pax_global_header00006660000000000000000000000064146710067470014526gustar00rootroot0000000000000052 comment=d4b3c2cf287a9a93ffeb1d9329dfb9de1fc6c914 python-picologging-0.9.4/000077500000000000000000000000001467100674700153605ustar00rootroot00000000000000python-picologging-0.9.4/.devcontainer/000077500000000000000000000000001467100674700201175ustar00rootroot00000000000000python-picologging-0.9.4/.devcontainer/Dockerfile000066400000000000000000000024341467100674700221140ustar00rootroot00000000000000# See here for image contents: https://github.com/microsoft/vscode-dev-containers/tree/v0.234.0/containers/python-3/.devcontainer/base.Dockerfile # [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster ARG VARIANT="3.10-bullseye" FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} # [Choice] Node.js version: none, lts/*, 16, 14, 12, 10 ARG NODE_VERSION="none" RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi # [Optional] If your pip requirements rarely change, uncomment this section to add them to the image. # COPY requirements.txt /tmp/pip-tmp/ # RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ # && rm -rf /tmp/pip-tmp RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ && apt-get -y install --no-install-recommends build-essential libssl-dev gdb cmake # [Optional] Uncomment this line to install global node packages. # RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g " 2>&1python-picologging-0.9.4/.devcontainer/devcontainer.json000066400000000000000000000043671467100674700235050ustar00rootroot00000000000000// For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: // https://github.com/microsoft/vscode-dev-containers/tree/v0.234.0/containers/python-3 { "name": "Python 3", "build": { "dockerfile": "Dockerfile", "context": "..", "args": { // Update 'VARIANT' to pick a Python version: 3, 3.10, 3.9, 3.8, 3.7, 3.6 // Append -bullseye or -buster to pin to an OS version. // Use -bullseye variants on local on arm64/Apple Silicon. "VARIANT": "3.10", // Options "NODE_VERSION": "none" } }, // Set *default* container specific settings.json values on container create. "customizations": { "vscode": { "settings": { "python.defaultInterpreterPath": "/usr/local/bin/python", "python.linting.enabled": true, "python.linting.pylintEnabled": true, "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", "python.formatting.blackPath": "/usr/local/py-utils/bin/black", "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf", "python.linting.banditPath": "/usr/local/py-utils/bin/bandit", "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle", "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint", "python.testing.pytestArgs": ["tests"], "python.testing.cwd": "${workspaceFolder}", "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true }, // Add the IDs of extensions you want installed when the container is created. "extensions": [ "ms-python.python", "ms-python.vscode-pylance", "ms-vscode.cpptools" ] } }, // Use 'forwardPorts' to make a list of ports inside the container available locally. // "forwardPorts": [], // Use 'postCreateCommand' to run commands after the container is created. "postCreateCommand": "python -m pip install -U pip setuptools wheel scikit-build pytest && python setup.py build_ext --inplace --build Debug", // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. "remoteUser": "vscode" } python-picologging-0.9.4/.github/000077500000000000000000000000001467100674700167205ustar00rootroot00000000000000python-picologging-0.9.4/.github/dependabot.yml000066400000000000000000000011261467100674700215500ustar00rootroot00000000000000# To get started with Dependabot version updates, you'll need to specify which # package ecosystems to update and where the package manifests are located. # Please see the documentation for all configuration options: # https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates version: 2 updates: - package-ecosystem: "pip" # See documentation for possible values directory: "/" # Location of package manifests schedule: interval: "daily" - package-ecosystem: "github-actions" directory: "/" schedule: interval: "weekly"python-picologging-0.9.4/.github/workflows/000077500000000000000000000000001467100674700207555ustar00rootroot00000000000000python-picologging-0.9.4/.github/workflows/hypothesis.yaml000066400000000000000000000017171467100674700240460ustar00rootroot00000000000000name: Slow tests on: push: branches: [ main ] jobs: test_package: name: Test ${{ matrix.os }} Python ${{ matrix.python_version }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: ["macos-11", "ubuntu-20.04", "windows-latest"] python_version: ["3.7", "3.8", "3.9", "3.10", "3.11"] steps: - name: Setup python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python_version }} architecture: x64 - name: Get latest CMake and ninja # Using 'latest' branch, the most recent CMake and ninja are installed. uses: lukka/get-cmake@latest - uses: actions/checkout@v4 - name: Install requirements and package run: | python -m pip install -U scikit-build python -m pip install -v . - name: Run pytest on hypothesis tests run: | python -m pip install pytest hypothesis flaky python -m pytest slowtestspython-picologging-0.9.4/.github/workflows/pages.yml000066400000000000000000000033031467100674700225760ustar00rootroot00000000000000name: Deploy Sphinx with GitHub Pages dependencies preinstalled on: # Runs on pushes targeting the default branch push: branches: ["main"] # Allows you to run this workflow manually from the Actions tab workflow_dispatch: # Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages permissions: contents: read pages: write id-token: write # Allow one concurrent deployment concurrency: group: "pages" cancel-in-progress: true jobs: # Build job build: runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Setup python uses: actions/setup-python@v5 with: python-version: "3.10" architecture: x64 - name: Get latest CMake and ninja # Using 'latest' branch, the most recent CMake and ninja are installed. uses: lukka/get-cmake@latest - uses: actions/checkout@v4 - name: Install requirements and package run: | python -m pip install -U pip python -m pip install scikit-build python -m pip install -v . - name: Setup Pages uses: actions/configure-pages@v4 - name: Build HTML with sphinx run: | python -m pip install -r requirements.txt make html working-directory: docs/ - name: Upload artifact uses: actions/upload-pages-artifact@v2 with: path: docs/build/html # Deployment job deploy: environment: name: github-pages url: ${{ steps.deployment.outputs.page_url }} runs-on: ubuntu-latest needs: build steps: - name: Deploy to GitHub Pages id: deployment uses: actions/deploy-pages@v4 python-picologging-0.9.4/.github/workflows/quality.yml000066400000000000000000000127521467100674700231770ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need # to commit it to your repository. # # You may wish to alter this file to override the set of languages analyzed, # or to provide custom queries or build logic. # # ******** NOTE ******** # We have attempted to detect the languages in your repository. Please check # the `language` matrix defined below to confirm you have the correct set of # supported CodeQL languages. # name: "CodeQL" on: push: branches: [ main ] pull_request: branches: [ main ] schedule: - cron: '34 9 * * 2' env: BUILD_TYPE: Debug jobs: analyze: name: Analyze c++ runs-on: ubuntu-latest strategy: fail-fast: false matrix: language: [ 'cpp' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} - name: Setup python uses: actions/setup-python@v5 with: python-version: "3.10" architecture: x64 - name: Get latest CMake and ninja # Using 'latest' branch, the most recent CMake and ninja are installed. uses: lukka/get-cmake@latest - name: Install and build package run: | python -m pip install -U pip python -m pip install scikit-build python -m pip install -v . # â„šī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 analyze-python: name: Analyze Python Code runs-on: ubuntu-latest strategy: fail-fast: false matrix: language: [ 'python' ] # CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ] # Learn more: # https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed steps: - name: Checkout repository uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. # By default, queries listed here will override any specified in a config file. # Prefix the list here with "+" to use these queries and those in the config file. # queries: ./path/to/local/query, your-org/your-repo/queries@main - name: Autobuild uses: github/codeql-action/autobuild@v3 # â„šī¸ Command-line programs to run using the OS shell. # 📚 https://git.io/JvXDl # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines # and modify them (or add more) to build your code if your project # uses a compiled language #- run: | # make bootstrap # make release - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 coverage: name: Coverage runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup python uses: actions/setup-python@v5 with: python-version: "3.10" architecture: x64 - name: Get latest CMake and ninja # Using 'latest' branch, the most recent CMake and ninja are installed. uses: lukka/get-cmake@latest - name: Install dependencies run: python -m pip install -U pip setuptools wheel scikit-build ninja cmake pytest gcovr pytest-cov - name: Run Coverage run: | python -m pip install -v -e . python setup.py build_ext --inplace --build-type Debug -DCOVERAGE=ON python -m pytest --cov=src tests coverage xml -o coverage_python.xml gcov _skbuild/linux-x86_64-3.10/cmake-build/CMakeFiles/_picologging.dir/src/picologging/*.gcda gcovr -g -r . --xml coverage_cpp.xml - uses: codecov/codecov-action@v3 with: verbose: true files: ./coverage_cpp.xml,./coverage_python.xml format: name: Check Formatting runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v4 - name: Setup python uses: actions/setup-python@v5 with: python-version: "3.10" architecture: x64 - name: Install dependencies run: | python -m pip install -U pip black pyupgrade isort sudo apt install -y clang-tidy - name: Check Python Formatting run: black --check . - name: Check Python import sorting run: isort --check . - name: Check pyupgrade run: pyupgrade --py37-plus src/picologging/*.py tests/**/*.py - name: Check clang-tidy run: clang-tidy src/picologging/*.cxx -- -I/usr/include/python3.10/ -std=c++17 python-picologging-0.9.4/.github/workflows/test.yml000066400000000000000000000030771467100674700224660ustar00rootroot00000000000000name: Test on: push: branches: [ main ] pull_request: branches: [ main ] jobs: test_package: name: Test ${{ matrix.os }} Python ${{ matrix.python_version }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: ["macos-11", "ubuntu-20.04", "windows-latest"] python_version: ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] steps: - name: Setup python uses: actions/setup-python@v5 with: python-version: ${{ matrix.python_version }} architecture: x64 - name: Get latest CMake and ninja # Using 'latest' branch, the most recent CMake and ninja are installed. uses: lukka/get-cmake@latest - uses: actions/checkout@v4 - name: Install requirements and package run: | python -m pip install -U scikit-build python -m pip install -v .[dev] - name: Run pytest run: | python -m pip install -r tests/integration/requirements.txt python -m pytest tests - name: Run pytest with repeat run: | python -m pip install pytest-repeat python -m pytest --count=10 tests - name: Run pytest with memray if: matrix.python_version == '3.11' && matrix.os == 'ubuntu-20.04' run: | python -m pip install -v .[memray] python -m pytest tests/unit/ --force-flaky --max-runs=4 --min-passes=3 --memray --stacks=7 --native - name: Run benchmarks if: matrix.python_version == '3.11' && matrix.os == 'ubuntu-20.04' run: | python -m pip install richbench richbench benchmarks/ python-picologging-0.9.4/.github/workflows/wheel.yml000066400000000000000000000012611467100674700226040ustar00rootroot00000000000000name: Wheels on: push: branches: [ main ] pull_request: branches: [ main ] jobs: build_wheels: name: Build wheels on ${{ matrix.os }} runs-on: ${{ matrix.os }} strategy: fail-fast: false matrix: os: ["macos-11", ubuntu-20.04, "windows-latest"] steps: - uses: actions/checkout@v4 - name: Set up QEMU if: runner.os == 'Linux' uses: docker/setup-qemu-action@v3 with: platforms: all - name: Build wheels uses: pypa/cibuildwheel@v2.20.0 env: CIBW_PRERELEASE_PYTHONS: True - uses: actions/upload-artifact@v4 with: path: ./wheelhouse/*.whl python-picologging-0.9.4/.gitignore000066400000000000000000000003101467100674700173420ustar00rootroot00000000000000*.egg-info/ *.pyc __pycache__/ [.][v]env*/ build dist .cache .coverage .eggs .tox .vscode _skbuild *.so *.prof *.pstats benchmarks/.profiles coverage/ cov_report crash-* oom-* .vs/ .idea/ .hypothesis/python-picologging-0.9.4/.pre-commit-config.yaml000066400000000000000000000007451467100674700216470ustar00rootroot00000000000000repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.3.0 hooks: - id: check-yaml - repo: https://github.com/psf/black rev: 22.10.0 hooks: - id: black args: ['--config=./pyproject.toml'] - repo: https://github.com/asottile/pyupgrade rev: v3.2.0 hooks: - id: pyupgrade args: ['--py37-plus'] - repo: https://github.com/pycqa/isort rev: 5.10.1 hooks: - id: isort name: isort (python) python-picologging-0.9.4/CHANGELOG.md000066400000000000000000000260161467100674700171760ustar00rootroot00000000000000# Changelog ## 0.9.4 * Improve the performance of logging records by inlining log creation to remove temporary Python objects into the logger. by @tonybaloney in https://github.com/microsoft/picologging/pull/186 * Improve the performance of logger methods with fastcalls by @tonybaloney in https://github.com/microsoft/picologging/pull/188 * Improve perf and memory usage of log records with const level names by @tonybaloney in https://github.com/microsoft/picologging/pull/190 * Add example using picologging and django by @tonybaloney in https://github.com/microsoft/picologging/pull/194 * Fix setLogRecordFactory exception message by @GabrielCappelli in https://github.com/microsoft/picologging/pull/196 * Fix asctime formatting, don't print uninitialized buffer's content (issue #203) by @tarasko in https://github.com/microsoft/picologging/pull/208 * Add a test for issue Using `asctime` in formatter by @tonybaloney in https://github.com/microsoft/picologging/pull/210 ## 0.9.3 * Support for Python 3.12 * Module can be used in sub-interpreters (3.12+) * Allow LogRecord to be copied by @aminalaee in https://github.com/microsoft/picologging/pull/164 ### Bug fixes * Fix memory leak in LogRecord.__dict__. Use Py_NewRef and Py_CLEAR patterns for cleaner code by @tonybaloney in https://github.com/microsoft/picologging/pull/173 * Fix `io` text_encoding on Python 3.9 and older by @aminalaee in https://github.com/microsoft/picologging/pull/152 * Add tests for override formatter formatException by @aminalaee in https://github.com/microsoft/picologging/pull/140 * Fix formatting issues by @aminalaee in https://github.com/microsoft/picologging/pull/160 * Add FileHandler errors argument by @aminalaee in https://github.com/microsoft/picologging/pull/161 * Move filepath cache to within free-able module state by @tonybaloney in https://github.com/microsoft/picologging/pull/166 * Bump pypa/cibuildwheel from 2.12.3 to 2.15.0 by @dependabot in https://github.com/microsoft/picologging/pull/158 * Use a module state attribute to support subinterpreters by @tonybaloney in https://github.com/microsoft/picologging/pull/167 * Cleanup all danging unicode references. by @tonybaloney in https://github.com/microsoft/picologging/pull/169 * Adopt the memray trace leaks test markers by @tonybaloney in https://github.com/microsoft/picologging/pull/137 * Fix unclosed resources in tests by @aminalaee in https://github.com/microsoft/picologging/pull/171 * Verify the integration of coloredlogs is fixed by @tonybaloney in https://github.com/microsoft/picologging/pull/174 ## 0.9.2 * Upgrade pre-commit hooks versions by @sadikkuzu in https://github.com/microsoft/picologging/pull/133 * Compile x86 for linux and windows. Compile aarch64 for linux by @tonybaloney in https://github.com/microsoft/picologging/pull/135 * moves devcontainer vscode settings by @kjaymiller in https://github.com/microsoft/picologging/pull/143 * Fix Formatter __repr__ by @aminalaee in https://github.com/microsoft/picologging/pull/141 * Fix leaks in __dict__ and asctime fields by @tonybaloney in https://github.com/microsoft/picologging/pull/145 * Add Logger isEnabledFor by @aminalaee in https://github.com/microsoft/picologging/pull/139 * Support Logger.setLevel from string input by @tonybaloney in https://github.com/microsoft/picologging/pull/146 ## 0.9.1 * Add Windows Arm64 wheels by @aminalaee in https://github.com/microsoft/picologging/pull/130 * Remove #22 as limitation since it's been fixed by @tonybaloney in https://github.com/microsoft/picologging/pull/128 * Remove QueueListener test by @aminalaee in https://github.com/microsoft/picologging/pull/101 ## 0.9.0 * Fix formatting with black by @aminalaee in https://github.com/microsoft/picologging/pull/71 * Improve test coverage by @tonybaloney in https://github.com/microsoft/picologging/pull/72 * Fix FileHandler flaky test by @aminalaee in https://github.com/microsoft/picologging/pull/69 * Update project classifiers by @aminalaee in https://github.com/microsoft/picologging/pull/74 * Uplift test coverage by @aminalaee in https://github.com/microsoft/picologging/pull/76 * Making LogRecord class derivable by @pamelafox in https://github.com/microsoft/picologging/pull/77 * Properly raise error from Handler.emit by @pamelafox in https://github.com/microsoft/picologging/pull/81 * Properly handle return value for setStream by @pamelafox in https://github.com/microsoft/picologging/pull/82 * Fixing Stream.__repr__ to match CPython by @pamelafox in https://github.com/microsoft/picologging/pull/84 * Add Handler repr by @aminalaee in https://github.com/microsoft/picologging/pull/88 * Allow handle method to accept LogRecord subclasses by @aminalaee in https://github.com/microsoft/picologging/pull/85 * Fix Handler.handle return type by @aminalaee in https://github.com/microsoft/picologging/pull/86 * Make config fix by @pamelafox in https://github.com/microsoft/picologging/pull/97 * Add failing Queue Listener Handler Test by @Goldziher in https://github.com/microsoft/picologging/pull/89 * Add local development instructions to readme by @pamelafox in https://github.com/microsoft/picologging/pull/104 * Add launch.json instructions to README by @pamelafox in https://github.com/microsoft/picologging/pull/106 * Create py.typed by @Goldziher in https://github.com/microsoft/picologging/pull/92 * Add config.pyi stub by @aminalaee in https://github.com/microsoft/picologging/pull/100 * Add pylint config by @pamelafox in https://github.com/microsoft/picologging/pull/107 * 3.7+ compatible code by @tonybaloney in https://github.com/microsoft/picologging/pull/114 * Document the memray tests and add a script for automation by @tonybaloney in https://github.com/microsoft/picologging/pull/113 * Adding regression test for LogRecord args issue by @pamelafox in https://github.com/microsoft/picologging/pull/105 * Fix leaks identified memray by @tonybaloney in https://github.com/microsoft/picologging/pull/115 * Set logger parents and levels by @pamelafox in https://github.com/microsoft/picologging/pull/108 * Adding parameters to flaky decorator to increase reruns by @pamelafox in https://github.com/microsoft/picologging/pull/118 * Adding precommit with black and pyupgr by @pamelafox in https://github.com/microsoft/picologging/pull/119 * Call dealloc of base types by @pamelafox in https://github.com/microsoft/picologging/pull/120 * Add isort by @pamelafox in https://github.com/microsoft/picologging/pull/121 * Test on 3.11 vs 3.11-dev by @pamelafox in https://github.com/microsoft/picologging/pull/127 * Make QueueHandler call format by @pamelafox in https://github.com/microsoft/picologging/pull/122 ## 0.8.1 * Fix `dictconfig` resetting child loggers by @aminalaee in https://github.com/microsoft/picologging/pull/70 * Add `formatException` method in `Formatter` by @aminalaee in https://github.com/microsoft/picologging/pull/68 ## 0.8.0 * Add `dictConfig` method https://github.com/microsoft/picologging/pull/61 * Add `DatagramHandler` https://github.com/microsoft/picologging/pull/64 ## 0.7.2 * Fix leak on levelname and formatted message https://github.com/microsoft/picologging/pull/59 * Fixes a leak in the `asctime` field of log record https://github.com/microsoft/picologging/pull/60 * Fixes a leak in stream writer write() and flush() calls https://github.com/microsoft/picologging/pull/60 * Fixes a leak in stream close() https://github.com/microsoft/picologging/pull/60 ## 0.7.1 * Add a basic documentation site by @tonybaloney in https://github.com/microsoft/picologging/pull/55 * Fix Logger setLevel not resetting other levels by @aminalaee in https://github.com/microsoft/picologging/pull/57 * Fixes reference leaks in filterer 'filter' string and relative time by @tonybaloney in https://github.com/microsoft/picologging/pull/58 ## 0.7.0 * Add `MemoryHandler` by @aminalaee in https://github.com/microsoft/picologging/pull/46 * Make `getLevelName()` work level name arguments by @aminalaee in https://github.com/microsoft/picologging/pull/52 * Add `makeLogRecord()` by @aminalaee in https://github.com/microsoft/picologging/pull/50 * Add `SocketHandler` by @aminalaee in https://github.com/microsoft/picologging/pull/48 ## 0.6.0 * Implements QueueListener and QueueHandler in `picologging.handlers` module for non-blocking logging by @tonybaloney in https://github.com/microsoft/picologging/pull/44 * Fix a crash on ubuntu by @tonybaloney in https://github.com/microsoft/picologging/pull/42 * Add BufferingHandler by @aminalaee in https://github.com/microsoft/picologging/pull/45 * Fixes a crash on string literal within a loop in certain cases ## 0.5.1 * Fixes a crash in 32-bit Linux wheels crashing when `validate=True` flag is used for the Formatter type ## 0.5.0 * String format `{field}` is now supported using `Formatter(style='{')` * Logger now supports `sinfo=True` to add stack info to log messages * Fixed a bug where formatters using the `created` field were not correctly formatted * Fixed a bug where formatters using the thread id, `thread` field were formatted as signed int instead of unsigned long * Fixed a bug in the `__repr__` method of PercentStyle * Fixed a bug that the Logger.exception() method wasn't checking the log level for ERROR * Fixed a bug in the formatting of exception messages * Setting the parent of a logger which has a NOTSET level will now update it's logging level to adopt the next set parent's level ## 0.4.0 * Add Fuzzing and coverage configuration for Clang/GCC by @tonybaloney in https://github.com/microsoft/picologging/pull/26 * Add WatchedFileHandler by @aminalaee in https://github.com/microsoft/picologging/pull/23 * Expand test suite with error cases by @tonybaloney in https://github.com/microsoft/picologging/pull/27 * Refactor type field initializers to `__new__` by @tonybaloney in https://github.com/microsoft/picologging/pull/28 * Add tests for FileHandler delay by @aminalaee in https://github.com/microsoft/picologging/pull/24 * Add BaseRotatingHandler by @aminalaee in https://github.com/microsoft/picologging/pull/25 ## 0.3.0 * Added `FileHandler` to picologging * Fixed an issue with the `Logger.setLevel()` method not correctly resetting level flags in the logger * Fixed a memory leak in the logging and handler types ## 0.2.0 (23rd June 2022) * Adds `.close()`, `.flush()`, `.createLock()` abstract methods to Handler https://github.com/microsoft/picologging/pull/9 * Corrected type stubs for Handler `__init__` * Added simple `handleError` method to base Handler type to print the exception on sys.stderr * Added `.get_name()` and `.set_name()` methods to Handler * Fixes a bug in stream handler swallowing error message when `.write()` failed on the underlying stream * Repeat all tests and isolate reference bugs in formatter and handler https://github.com/microsoft/picologging/pull/12 * Fix root logger instantiation with wrong arguments https://github.com/microsoft/picologging/pull/15 * Fix getlevelname missing from module https://github.com/microsoft/picologging/pull/16 * Fixes StreamHandler not defaulting to sys.stderr when stream argument is None https://github.com/microsoft/picologging/pull/18 ## 0.1.0 (22nd June 2022) * Initial release * Handler base class * Stream Handler support python-picologging-0.9.4/CMakeLists.txt000066400000000000000000000035361467100674700201270ustar00rootroot00000000000000cmake_minimum_required(VERSION 3.4...3.22) option(COVERAGE "Enable coverage reporting" OFF) option(CACHE_FILEPATH "Enable cache filepath" ON) project(picologging) find_package(PythonExtensions REQUIRED) add_library(_picologging MODULE src/picologging/_picologging.cxx src/picologging/logrecord.cxx src/picologging/formatstyle.cxx src/picologging/formatter.cxx src/picologging/logger.cxx src/picologging/handler.cxx src/picologging/filterer.cxx src/picologging/streamhandler.cxx src/picologging/filepathcache.cxx) if (MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++latest") else() set_target_properties(_picologging PROPERTIES CXX_STANDARD 17) endif (MSVC) if (CACHE_FILEPATH) add_definitions(-DPICOLOGGING_CACHE_FILEPATH) endif (CACHE_FILEPATH) if(COVERAGE) message(STATUS "Enabling coverage") # Add required flags (GCC & LLVM/Clang) if (CMAKE_CXX_COMPILER_ID MATCHES "GNU") target_compile_options(_picologging PRIVATE -O0 # no optimization -g # generate debug info --coverage # sets all required flags -fprofile-abs-path ) target_link_options(_picologging PRIVATE --coverage) elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") target_compile_options(_picologging PRIVATE -O0 # no optimization -g # generate debug info -fprofile-instr-generate -fcoverage-mapping # coverage flags ) target_link_options(_picologging PRIVATE --coverage) endif() endif(COVERAGE) if(FUZZING) target_compile_options(_picologging PRIVATE -fsanitize=address,fuzzer) target_link_options(_picologging PRIVATE -fsanitize=address,fuzzer) endif(FUZZING) python_extension_module(_picologging) install(TARGETS _picologging LIBRARY DESTINATION src/picologging) python-picologging-0.9.4/CODE_OF_CONDUCT.md000066400000000000000000000006741467100674700201660ustar00rootroot00000000000000# Microsoft Open Source Code of Conduct This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). Resources: - [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/) - [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) - Contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with questions or concerns python-picologging-0.9.4/LICENSE000066400000000000000000000021651467100674700163710ustar00rootroot00000000000000 MIT License Copyright (c) Microsoft Corporation. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE python-picologging-0.9.4/MANIFEST.in000066400000000000000000000014211467100674700171140ustar00rootroot00000000000000include .devcontainer/Dockerfile include .devcontainer/devcontainer.json include .github/dependabot.yml include .github/workflows/pages.yml include .github/workflows/quality.yml include .github/workflows/test.yml include .github/workflows/wheel.yml include .gitignore include CHANGELOG.md include CMakeLists.txt include CODE_OF_CONDUCT.md include LICENSE include README.md include SECURITY.md include SUPPORT.md recursive-include benchmarks *.py include docs/Makefile include docs/make.bat include docs/requirements.txt include docs/source/conf.py recursive-include docs/source *.rst include pyproject.toml include setup.py graft src/picologging recursive-include tests *.py exclude MANIFEST.in exclude _skbuild global-exclude *.py[cod] global-exclude *.so recursive-include tests *.mdpython-picologging-0.9.4/README.md000066400000000000000000000145771467100674700166550ustar00rootroot00000000000000# picologging [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/picologging)](https://pypi.org/project/picologging/) [![PyPI](https://img.shields.io/pypi/v/picologging)](https://pypi.org/project/picologging/) [![Anaconda-Server Badge](https://anaconda.org/conda-forge/picologging/badges/version.svg)](https://anaconda.org/conda-forge/picologging) [![codecov](https://codecov.io/gh/microsoft/picologging/branch/main/graph/badge.svg?token=KHs6FpQlVW)](https://codecov.io/gh/microsoft/picologging) > **Warning** > This project is in *beta*. > There are some incomplete features (see [Limitations](https://microsoft.github.io/picologging/limitations.html)). Picologging is a high-performance logging library for Python. picologging is 4-17x faster than the `logging` module in the standard library. Picologging is designed to be used as a *drop-in* replacement for applications which already use logging, and supports the same API as the `logging` module. Check out the [Documentation](https://microsoft.github.io/picologging/) for more. ## Installation Picologging can be installed from PyPi using pip: ```console pip install picologging ``` Or from conda forge using conda: ```console conda install -c conda-forge picologging ``` ## Usage Import `picologging as logging` to use picologging instead of the standard library logging module. This patches all the loggers registered to use picologging loggers and formatters. ```python import picologging as logging logging.basicConfig() logger = logging.getLogger() logger.info("A log message!") logger.warning("A log message with %s", "arguments") ``` ## Benchmarks Run `richbench benchmarks/ --markdown` with the richbench CLI to see the benchmarks, here is a sample on macOS 11: | Benchmark | Min | Max | Mean | Min (+) | Max (+) | Mean (+) | |---------------------------------------|---------|---------|---------|-----------------|-----------------|-----------------| | Logger(level=DEBUG).debug() | 0.569 | 0.600 | 0.578 | 0.031 (18.3x) | 0.035 (17.0x) | 0.033 (17.7x) | | Logger(level=DEBUG).debug() with args | 0.591 | 0.607 | 0.601 | 0.047 (12.5x) | 0.050 (12.2x) | 0.048 (12.4x) | | Logger(level=INFO).debug() | 0.013 | 0.014 | 0.013 | 0.003 (5.0x) | 0.003 (4.4x) | 0.003 (4.8x) | | Logger(level=INFO).debug() with args | 0.013 | 0.014 | 0.013 | 0.003 (4.6x) | 0.003 (4.2x) | 0.003 (4.4x) | ## Limitations See [Limitations](https://microsoft.github.io/picologging/limitations.html) ## Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit [cla.opensource.microsoft.com](https://cla.opensource.microsoft.com). When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. ## Local development This project comes bundled with a dev container which sets up an appropriate environment. If you install the Dev Containers extension for VS Code, then opening this project in VS Code should prompt it to open it in the dev container. Once opened in the dev container, run: ```console pip install -e ".[dev]" pre-commit install python setup.py build_ext --inplace --build-type Debug ``` Run the build command whenever you make changes to the files. It's also helpful to create a `.vscode/launch.json` file like this one: ```json { "version": "0.2.0", "configurations": [ { "name": "(gdb) Launch pytest", "type": "cppdbg", "request": "launch", "program": "/usr/local/bin/python", "args": ["-m", "pytest", "tests"], "stopAtEntry": false, "cwd": "${workspaceFolder}", "environment": [], "externalConsole": false, "MIMode": "gdb", "setupCommands": [ { "description": "Enable pretty-printing for gdb", "text": "-enable-pretty-printing", "ignoreFailures": true }, { "description": "Set Disassembly Flavor to Intel", "text": "-gdb-set disassembly-flavor intel", "ignoreFailures": true }, ] } } ``` Now you can press the "Run and debug" button to run `pytest` from the `gdb` debugger and use breakpoint debugging in the C code. If you would like to be able to dive into the CPython code while debugging, then: 1. Do a git checkout of the tagged branch for the devcontainer's Python version into the devcontainer's `/workspaces/` directory. You may need to `sudo`. 2. Follow the instructions in the CPython README to compile the code. 3. Add the following key to the the configuration in `launch.json`: ```json "sourceFileMap": { "/usr/src/python": "/workspaces/cpython" }, ``` 4. Add the following command to the `setupCommands` in `launch.json`: ```json { "description": "Find CPython source code", "text": "-gdb-set auto-load safe-path /workspaces/cpython" }, ``` ## Trademarks Some components of this Python package are from CPython 3.11 logging library for compatibility reasons. CPython 3.11 is licensed under the PSF license. The logging module is Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved. This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft's Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks/usage/general). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. python-picologging-0.9.4/SECURITY.md000066400000000000000000000053051467100674700171540ustar00rootroot00000000000000 ## Security Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/). If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below. ## Reporting Security Issues **Please do not report security vulnerabilities through public GitHub issues.** Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report). If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey). You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc). Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue: * Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.) * Full paths of source file(s) related to the manifestation of the issue * The location of the affected source code (tag/branch/commit or direct URL) * Any special configuration required to reproduce the issue * Step-by-step instructions to reproduce the issue * Proof-of-concept or exploit code (if possible) * Impact of the issue, including how an attacker might exploit the issue This information will help us triage your report more quickly. If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs. ## Preferred Languages We prefer all communications to be in English. ## Policy Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd). python-picologging-0.9.4/SUPPORT.md000066400000000000000000000007221467100674700170570ustar00rootroot00000000000000# Support ## How to file issues and get help This project uses GitHub Issues to track bugs and feature requests. Please search the existing issues before filing new issues to avoid duplicates. For new issues, file your bug or feature request as a new Issue. For help and questions about using this project, please consult the documentation. ## Microsoft Support Policy Support for this project is limited to the resources listed above. \HI python-picologging-0.9.4/benchmarks/000077500000000000000000000000001467100674700174755ustar00rootroot00000000000000python-picologging-0.9.4/benchmarks/bench_handlers.py000066400000000000000000000121651467100674700230130ustar00rootroot00000000000000import io import logging import logging.handlers as logging_handlers import queue import tempfile import picologging import picologging.handlers as picologging_handlers def filehandler_logging(): with tempfile.NamedTemporaryFile() as f: logger = logging.Logger("test", logging.DEBUG) handler = logging.FileHandler(f.name) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a logging issue") handler.close() def filehandler_picologging(): with tempfile.NamedTemporaryFile() as f: logger = picologging.Logger("test", picologging.DEBUG) handler = picologging.FileHandler(f.name) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a picologging issue") handler.close() def watchedfilehandler_logging(): with tempfile.NamedTemporaryFile() as f: logger = logging.Logger("test", logging.DEBUG) handler = logging_handlers.WatchedFileHandler(f.name) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a logging issue") handler.close() def watchedfilehandler_picologging(): with tempfile.NamedTemporaryFile() as f: logger = picologging.Logger("test", picologging.DEBUG) handler = picologging_handlers.WatchedFileHandler(f.name) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a picologging issue") handler.close() def rotatingfilehandler_logging(): with tempfile.NamedTemporaryFile() as f: logger = logging.Logger("test", logging.DEBUG) handler = logging_handlers.RotatingFileHandler( f.name, maxBytes=10_000, backupCount=5 ) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a logging issue") handler.close() def rotatingfilehandler_picologging(): with tempfile.NamedTemporaryFile() as f: logger = picologging.Logger("test", picologging.DEBUG) handler = picologging_handlers.RotatingFileHandler( f.name, maxBytes=10_000, backupCount=5 ) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a picologging issue") handler.close() def queuehandler_logging(): logger = logging.Logger("test", picologging.DEBUG) q = queue.Queue() handler = logging_handlers.QueueHandler(q) logger.addHandler(handler) for _ in range(10_000): logger.debug("test") def queuehandler_picologging(): logger = picologging.Logger("test", picologging.DEBUG) q = queue.Queue() handler = picologging_handlers.QueueHandler(q) logger.addHandler(handler) for _ in range(10_000): logger.debug("test") def queue_listener_logging(): logger = logging.Logger("test", picologging.DEBUG) stream = io.StringIO() stream_handler = logging.StreamHandler(stream) q = queue.Queue() listener = logging_handlers.QueueListener(q, stream_handler) listener.start() handler = logging_handlers.QueueHandler(q) logger.addHandler(handler) for _ in range(1_000): logger.debug("test") listener.stop() def queue_listener_picologging(): logger = picologging.Logger("test", picologging.DEBUG) stream = io.StringIO() stream_handler = picologging.StreamHandler(stream) q = queue.Queue() listener = picologging_handlers.QueueListener(q, stream_handler) listener.start() handler = picologging_handlers.QueueHandler(q) logger.addHandler(handler) for _ in range(1_000): logger.debug("test") listener.stop() def memoryhandler_logging(): with tempfile.NamedTemporaryFile() as f: logger = logging.Logger("test", logging.DEBUG) target = logging.FileHandler(f.name) handler = logging_handlers.MemoryHandler(capacity=100, target=target) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a logging issue") handler.close() def memoryhandler_picologging(): with tempfile.NamedTemporaryFile() as f: logger = picologging.Logger("test", picologging.DEBUG) target = picologging.FileHandler(f.name) handler = picologging_handlers.MemoryHandler(capacity=100, target=target) logger.handlers.append(handler) for _ in range(1_000): logger.debug("There has been a picologging issue") handler.close() __benchmarks__ = [ (filehandler_logging, filehandler_picologging, "FileHandler()"), ( watchedfilehandler_logging, watchedfilehandler_picologging, "WatchedFileHandler()", ), ( rotatingfilehandler_logging, rotatingfilehandler_picologging, "RotatingFileHandler()", ), (queuehandler_logging, queuehandler_picologging, "QueueHandler()"), ( queue_listener_logging, queue_listener_picologging, "QueueListener() + QueueHandler()", ), (memoryhandler_logging, memoryhandler_picologging, "MemoryHandler()"), ] python-picologging-0.9.4/benchmarks/bench_logger.py000066400000000000000000000105201467100674700224630ustar00rootroot00000000000000import logging from io import StringIO import picologging logging.basicConfig() picologging.basicConfig() def record_factory_logging(): for _ in range(10_000): logging.LogRecord( "hello", logging.INFO, "/serv/", 123, "bork bork bork", (), None ) def record_factory_picologging(): for _ in range(10_000): picologging.LogRecord( "hello", logging.INFO, "/serv/", 123, "bork bork bork", (), None ) def format_record_logging(): f = logging.Formatter() record = logging.LogRecord( "hello", logging.INFO, "/serv/", 123, "bork bork bork", (), None ) for _ in range(10_000): f.format(record) def format_record_picologging(): f = picologging.Formatter() record = picologging.LogRecord( "hello", logging.INFO, "/serv/", 123, "bork bork bork", (), None ) for _ in range(10_000): f.format(record) def format_record_with_date_logging(): f = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") record = logging.LogRecord( "hello", logging.INFO, "/serv/", 123, "bork bork bork", (), None ) for _ in range(10_000): f.format(record) def format_record_with_date_picologging(): f = picologging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") record = picologging.LogRecord( "hello", logging.INFO, "/serv/", 123, "bork bork bork", (), None ) for _ in range(10_000): f.format(record) def log_debug_logging(level=logging.DEBUG): logger = logging.Logger("test", level) tmp = StringIO() handler = logging.StreamHandler(tmp) handler.setLevel(level) formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.handlers.append(handler) for _ in range(10_000): logger.debug("There has been a logging issue") def log_debug_logging_with_args(level=logging.DEBUG): logger = logging.Logger("test", level) tmp = StringIO() handler = logging.StreamHandler(tmp) handler.setLevel(level) formatter = logging.Formatter("%(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.handlers.append(handler) for _ in range(10_000): logger.debug("There has been a logging issue %s %s %s", 1, 2, 3) def log_debug_picologging(level=logging.DEBUG): logger = picologging.Logger("test", level) tmp = StringIO() handler = picologging.StreamHandler(tmp) handler.setLevel(level) formatter = picologging.Formatter("%(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.handlers.append(handler) for _ in range(10_000): logger.debug("There has been a picologging issue") def log_debug_picologging_with_args(level=logging.DEBUG): logger = picologging.Logger("test", level) tmp = StringIO() handler = picologging.StreamHandler(tmp) handler.setLevel(level) formatter = picologging.Formatter("%(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.handlers.append(handler) for _ in range(10_000): logger.debug("There has been a picologging issue %s %s %s", 1, 2, 3) def log_debug_outofscope_logging(): log_debug_logging(logging.INFO) def log_debug_outofscope_picologging(): log_debug_picologging(logging.INFO) def log_debug_outofscope_logging_with_args(): log_debug_logging_with_args(logging.INFO) def log_debug_outofscope_picologging_with_args(): log_debug_picologging_with_args(logging.INFO) __benchmarks__ = [ (record_factory_logging, record_factory_picologging, "LogRecord()"), (format_record_logging, format_record_picologging, "Formatter().format()"), ( format_record_with_date_logging, format_record_with_date_picologging, "Formatter().format() with date", ), (log_debug_logging, log_debug_picologging, "Logger(level=DEBUG).debug()"), ( log_debug_logging_with_args, log_debug_picologging_with_args, "Logger(level=DEBUG).debug() with args", ), ( log_debug_outofscope_logging, log_debug_outofscope_picologging, "Logger(level=INFO).debug()", ), ( log_debug_outofscope_logging_with_args, log_debug_outofscope_picologging_with_args, "Logger(level=INFO).debug() with args", ), ] python-picologging-0.9.4/docs/000077500000000000000000000000001467100674700163105ustar00rootroot00000000000000python-picologging-0.9.4/docs/Makefile000066400000000000000000000011761467100674700177550ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line, and also # from the environment for the first two. SPHINXOPTS ?= SPHINXBUILD ?= sphinx-build SOURCEDIR = source BUILDDIR = build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) python-picologging-0.9.4/docs/make.bat000066400000000000000000000014011467100674700177110ustar00rootroot00000000000000@ECHO OFF pushd %~dp0 REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set SOURCEDIR=source set BUILDDIR=build %SPHINXBUILD% >NUL 2>NUL if errorlevel 9009 ( echo. echo.The 'sphinx-build' command was not found. Make sure you have Sphinx echo.installed, then set the SPHINXBUILD environment variable to point echo.to the full path of the 'sphinx-build' executable. Alternatively you echo.may add the Sphinx directory to PATH. echo. echo.If you don't have Sphinx installed, grab it from echo.https://www.sphinx-doc.org/ exit /b 1 ) if "%1" == "" goto help %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% goto end :help %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% :end popd python-picologging-0.9.4/docs/requirements.txt000066400000000000000000000000041467100674700215660ustar00rootroot00000000000000furopython-picologging-0.9.4/docs/source/000077500000000000000000000000001467100674700176105ustar00rootroot00000000000000python-picologging-0.9.4/docs/source/conf.py000066400000000000000000000035021467100674700211070ustar00rootroot00000000000000# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = "picologging" copyright = "2022, Microsoft" author = "Microsoft" # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = "furo" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] python-picologging-0.9.4/docs/source/django.rst000066400000000000000000000031141467100674700216030ustar00rootroot00000000000000.. _django: Using with Django ================= Django has a default logging configuration that executes when the application initializes. This configuration is defined in the ``LOGGING`` variable in the ``settings.py`` file. By default, Django will configure a set of streaming loggers from the standard library logging module. To use picologging with Django, you need to change some settings. 1. Set ``LOGGING_CONFIG`` to ``None`` in ``settings.py``. This will prevent Django from configuring the default logging system. 2. Change the handler classes from ``logging.xxx`` to ``picologging.xxx``, e.g. ``logging.StreamHandler`` to ``picologging.StreamHandler``. 3. Call ``picologging.config.dictConfig(LOGGING)`` in ``settings.py`` to configure picologging. 4. Change your imports where logging is used to ```import picologging as logging``. Here is a complete example of ``settings.py`` .. code-block: python LOGGING = { "version": 1, "disable_existing_loggers": False, "handlers": { "console": { "class": "picologging.StreamHandler", }, }, "root": { "handlers": ["console"], "level": "WARNING", }, } LOGGING_CONFIG = None import picologging.config picologging.config.dictConfig(LOGGING) Then in a view to use those loggers: .. code-block: python import picologging as logging logger = logging.getLogger(__name__) def my_view(request, arg1, arg): logger.info("Logging in my_view") return ...python-picologging-0.9.4/docs/source/examples.rst000066400000000000000000000023061467100674700221610ustar00rootroot00000000000000.. _examples: Examples ======== Basic logging ------------- The most basic usage for picologging is to call the debug, info, warning, error, critical and exception functions directly on the picologging module: .. code-block:: python import picologging picologging.basicConfig(level=picologging.DEBUG) picologging.debug("This is a debug message") picologging.info("This is an info message") picologging.warning("This is a warning message") picologging.error("This is an error message") picologging.critical("This is a critical message") This will use the default handler and formatter. You can specify a different formatter with the formatter keyword argument: .. code-block:: python import picologging picologging.basicConfig(level=picologging.DEBUG, formatter=picologging.Formatter("%(levelname)s:%(message)s")) picologging.debug("This is a debug message") # Output: # DEBUG:This is a debug message Using custom handlers --------------------- Picologging has custom handlers beyond the StreamHandler and FileHandler. You can write your own handler by implementing the Handler class. There are a collection of pre-built handlers in the :ref:`handlers` module. python-picologging-0.9.4/docs/source/handlers.rst000066400000000000000000000034511467100674700221450ustar00rootroot00000000000000.. _handlers: Handlers ======== Base Handler ------------ .. autoclass:: picologging.Handler :members: :member-order: bysource Watched File Handler -------------------- .. autoclass:: picologging.handlers.WatchedFileHandler :members: :member-order: bysource Base Rotating Handler --------------------- .. autoclass:: picologging.handlers.BaseRotatingHandler :members: :member-order: bysource Rotating File Handler --------------------- .. autoclass:: picologging.handlers.RotatingFileHandler :members: :member-order: bysource Timed Rotating File Handler --------------------------- .. autoclass:: picologging.handlers.TimedRotatingFileHandler :members: :member-order: bysource Queue Handler ------------- .. autoclass:: picologging.handlers.QueueHandler :members: :member-order: bysource Queue Listener -------------- The queue listener and queue handler can be combined for non-blocking logging, for example: .. code-block:: python logger = picologging.Logger("test", picologging.DEBUG) stream = io.StringIO() stream_handler = picologging.StreamHandler(stream) q = queue.Queue() listener = QueueListener(q, stream_handler) listener.start() handler = QueueHandler(q) logger.addHandler(handler) logger.debug("test") listener.stop() assert stream.getvalue() == "test\n" .. autoclass:: picologging.handlers.QueueListener :members: :member-order: bysource Buffering Handler ----------------- .. autoclass:: picologging.handlers.BufferingHandler :members: :member-order: bysource Memory Handler -------------- .. autoclass:: picologging.handlers.MemoryHandler :members: :member-order: bysource Socket Handler -------------- .. autoclass:: picologging.handlers.SocketHandler :members: :member-order: bysource python-picologging-0.9.4/docs/source/index.rst000066400000000000000000000056771467100674700214700ustar00rootroot00000000000000.. picologging documentation master file, created by sphinx-quickstart on Fri Aug 5 15:34:22 2022. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. picologging =========== **Status** This project is in *beta*. There are some incomplete features (see Issues). Picologging is a high-performance logging library for Python. picologging is 4-10x faster than the `logging` module in the standard library. Picologging is designed to be used as a *drop-in* replacement for applications which already use logging, and supports the same API as the `logging` module. Installation ------------ .. code-block:: console $ pip install picologging Usage ----- Import `picologging as logging` to use picologging instead of the standard library logging module. This patches all the loggers registered to use picologging loggers and formatters. .. code-block:: python import picologging as logging logging.basicConfig() logger = logging.getLogger() logger.info("A log message!") logger.warning("A log message with %s", "arguments") Limitations ----------- See :ref:`limitations`. Contributing ------------ This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the `Microsoft Open Source Code of Conduct `_. For more information see the `Code of Conduct FAQ `_ or contact `opencode@microsoft.com `_ with any additional questions or comments. Trademarks ---------- Some components of this Python package are from CPython 3.11 logging library for compatibility reasons. CPython 3.11 is licensed under the PSF license. The logging module is Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved. This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow `Microsoft's Trademark & Brand Guidelines `_. Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party's policies. .. toctree:: :glob: :maxdepth: 3 :hidden: logging handlers examples django limitations python-picologging-0.9.4/docs/source/limitations.rst000066400000000000000000000017601467100674700227020ustar00rootroot00000000000000.. _limitations: Limitations =========== Formatter Interface ------------------- * Overriding `.formatStack()` is not supported * Formatting any object other than `picologging.LogRecord` is not supported LogRecord --------- * Process name is not captured, `processName` will always be None (Process ID is captured). * Thread name is not captured, `threadName` will always be None. (Thread ID is captured). * LogRecord does not observe the `logging.logThreads`, `logging.logMultiprocessing`, or `logging.logProcesses` globals. It will *always* capture process and thread ID because the check is slower than the capture. Logger ------ * Custom logging levels are not supported. * There is no Log Record Factory, picologging will always use LogRecord. * Logger will always default to the `sys.stderr` and not observe an (undocumented) `logging.emittedNoHandlerWarning` flag in the Python standard library. Configuration ------------- * The option `incremental` in `dictConfig` method is not supported. python-picologging-0.9.4/docs/source/logging.rst000066400000000000000000000001551467100674700217710ustar00rootroot00000000000000.. _logging: Picologging API ~~~~~~~~~~~~~~~ .. automodule:: picologging :members: :undoc-members: python-picologging-0.9.4/pyproject.toml000066400000000000000000000013231467100674700202730ustar00rootroot00000000000000[build-system] requires = [ "setuptools>=65.4.1", "scikit-build>=0.17.0", "cmake>=3.18", "ninja", ] build-backend = "setuptools.build_meta" [tool.cibuildwheel] # skip musl and pypy skip = ["*-musllinux*", "pp*", "*-win_arm64"] test-requires = "pytest" test-command = "python -X dev -m pytest {project}/tests/unit" test-skip = ["*-win_arm64", "*-macosx_universal2:arm64"] [tool.cibuildwheel.macos] environment = { MACOSX_DEPLOYMENT_TARGET = "10.15" } archs = ["x86_64", "universal2"] [tool.cibuildwheel.linux] archs = ["i686", "x86_64", "aarch64"] [tool.cibuildwheel.windows] archs = ["x86", "AMD64", "ARM64"] [tool.pylint.messages_control] disable = "C0114,C0115,C0116" [tool.isort] profile = "black" python-picologging-0.9.4/setup.py000066400000000000000000000034221467100674700170730ustar00rootroot00000000000000from setuptools import find_packages from skbuild import setup with open("./README.md") as fh: long_description = fh.read() setup( name="picologging", packages=find_packages(where="src"), package_dir={"": "src"}, package_data={ "picologging": ["py.typed", "__init__.pyi", "config.pyi", "handlers.pyi"] }, version="0.9.4", author="Microsoft", description="A fast and lightweight logging library for Python", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/microsoft/picologging", license="MIT License", classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Topic :: System :: Logging", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ], install_requires=[], python_requires=">=3.7", extras_require={ "dev": [ "rich", "pytest", "pytest-cov", "hypothesis", "flaky", "black", "pre-commit", ], "memray": [ "memray>=1.10.0", "pytest-memray>=1.5.0", ], }, project_urls={ "Source": "https://github.com/microsoft/picologging", "Documentation": "https://microsoft.github.io/picologging/", }, ) python-picologging-0.9.4/slowtests/000077500000000000000000000000001467100674700174275ustar00rootroot00000000000000python-picologging-0.9.4/slowtests/test_logrecordhyp.py000066400000000000000000000106201467100674700235400ustar00rootroot00000000000000import logging import logging.handlers import queue import sys from flaky import flaky from hypothesis import given from hypothesis import strategies as st import picologging import picologging.handlers c_integers = st.integers().filter(lambda x: x < 2147483648 and x > -2147483649) @given( name=st.text(), level=c_integers, lineno=c_integers, msg=st.text().filter(lambda t: t.find("%") < 0), extra_arg=st.text(), func=st.text(), sinfo=st.text(), ) def test_hypothesis_logrecord_constructor( name, level, lineno, msg, extra_arg, func, sinfo ): args = (extra_arg,) # Create an exception tuple exc_info = None try: 10 / 0 except ZeroDivisionError: exc_info = sys.exc_info() pico_record = picologging.LogRecord( name, level, __file__, lineno, msg + " %s", args, exc_info, func, sinfo ) stdl_record = logging.LogRecord( name, level, __file__, lineno, msg + " %s", args, exc_info, func, sinfo ) assert pico_record.name == stdl_record.name assert pico_record.msg == stdl_record.msg assert pico_record.levelno == stdl_record.levelno assert pico_record.lineno == stdl_record.lineno assert pico_record.module == stdl_record.module assert pico_record.args == stdl_record.args assert abs(pico_record.created - stdl_record.created) < 0.5 assert pico_record.getMessage() == stdl_record.getMessage() @flaky(max_runs=4, min_passes=1) @given( name=st.text(), level=c_integers, lineno=c_integers, msg=st.text().filter(lambda t: t.find("%") < 0), extra_arg=st.text(), func=st.text(), sinfo=st.text(), ) def test_hypothesis_logrecord_filename( name, level, lineno, msg, extra_arg, func, sinfo ): args = (extra_arg,) pico_record = picologging.LogRecord( name, level, __file__, lineno, msg + " %s", args, None, func, sinfo ) stdl_record = logging.LogRecord( name, level, __file__, lineno, msg + " %s", args, None, func, sinfo ) # Filename sometimes reported without extension on Windows assert pico_record.filename == stdl_record.filename @given(args=st.lists(st.text(), min_size=0, max_size=10).map(tuple)) def test_hypothesis_logrecord_args(args): msg = " %s " * len(args) pico_record = picologging.LogRecord("", 10, __file__, 10, msg, args, None) stdl_record = logging.LogRecord("", 10, __file__, 10, msg, args, None) assert pico_record.msg == stdl_record.msg assert pico_record.args == stdl_record.args assert pico_record.getMessage() == stdl_record.getMessage() @given( name=st.text(), level=c_integers, lineno=c_integers, msg=st.text().filter(lambda t: t.find("%") < 0), extra_arg=st.text(), func=st.text(), sinfo=st.text(), ) def test_hypothesis_queuehandler_prepare( name, level, lineno, msg, extra_arg, func, sinfo ): """This test ensures the robustness of the prepare() method, which may be unstable in how it copies LogRecord using positional arguments (since it is currently not possible to use copy.copy). """ args = (extra_arg,) # Create an exception tuple exc_info = None try: 10 / 0 except ZeroDivisionError: exc_info = sys.exc_info() pico_record = picologging.LogRecord( name, level, __file__, lineno, msg + " %s", args, exc_info, func, sinfo ) stdl_record = logging.LogRecord( name, level, __file__, lineno, msg + " %s", args, exc_info, func, sinfo ) pico_handler = picologging.handlers.QueueHandler(queue.Queue()) stdl_handler = logging.handlers.QueueHandler(queue.Queue()) pico_record2 = pico_handler.prepare(pico_record) stdl_record2 = stdl_handler.prepare(stdl_record) assert ( pico_record2.name == pico_record.name == stdl_record.name == stdl_record2.name ) assert ( pico_record2.msg == stdl_record2.msg == stdl_handler.format(stdl_record) == pico_handler.format(pico_record) ) assert ( pico_record2.levelno == pico_record.levelno == stdl_record.levelno == stdl_record2.levelno ) assert ( pico_record2.lineno == pico_record.lineno == stdl_record.lineno == stdl_record2.lineno ) assert ( pico_record2.module == pico_record.module == stdl_record.module == stdl_record2.module ) assert pico_record2.getMessage() == stdl_record2.getMessage() python-picologging-0.9.4/src/000077500000000000000000000000001467100674700161475ustar00rootroot00000000000000python-picologging-0.9.4/src/picologging/000077500000000000000000000000001467100674700204505ustar00rootroot00000000000000python-picologging-0.9.4/src/picologging/__init__.py000066400000000000000000000413701467100674700225660ustar00rootroot00000000000000import io import os import sys import warnings from logging import BufferingFormatter, Filter, StringTemplateStyle, _checkLevel # NOQA from ._picologging import Handler # NOQA from ._picologging import ( # NOQA Filterer, FormatStyle, Formatter, Logger, LogRecord, StreamHandler, getLevelName, ) __version__ = "0.9.4" CRITICAL = 50 FATAL = CRITICAL ERROR = 40 WARNING = 30 WARN = WARNING INFO = 20 DEBUG = 10 NOTSET = 0 BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s" if hasattr(io, "text_encoding"): text_encoding = io.text_encoding else: def text_encoding(encoding) -> str: if encoding is not None: return encoding if sys.flags.utf8_mode: return "utf-8" return "locale" class PercentStyle(FormatStyle): def __new__(cls, *args, **kwargs): kwargs["style"] = "%" return super().__new__(cls, *args, **kwargs) def __init__(self, fmt, defaults=None): super().__init__(fmt, defaults, style="%") class StrFormatStyle(FormatStyle): def __new__(cls, *args, **kwargs): kwargs["style"] = "{" return super().__new__(cls, *args, **kwargs) def __init__(self, fmt, defaults=None): super().__init__(fmt, defaults, style="{") _STYLES = { "%": (PercentStyle, BASIC_FORMAT), "{": (StrFormatStyle, "{levelname}:{name}:{message}"), "$": (StringTemplateStyle, "${levelname}:${name}:${message}"), } class _Placeholder: """ _Placeholder instances are used in the Manager logger hierarchy to take the place of nodes for which no loggers have been defined. This class is intended for internal use only and not as part of the public API. """ def __init__(self, alogger): """ Initialize with the specified logger being a child of this placeholder. """ self.loggerMap = {alogger: None} def append(self, alogger): """ Add the specified logger as a child of this placeholder. """ if alogger not in self.loggerMap: self.loggerMap[alogger] = None class Manager: """ There is [under normal circumstances] just one Manager instance, which holds the hierarchy of loggers. """ def __init__(self, rootnode, cls=None): """ Initialize the manager with the root node of the logger hierarchy. """ self.root = rootnode self.disable = 0 self.emittedNoHandlerWarning = False self.loggerDict = {} if not cls: self.cls = Logger else: self.cls = cls @property def disable(self): return self._disable @disable.setter def disable(self, value): self._disable = _checkLevel(value) def getLogger(self, name): """ Get a logger with the specified name (channel name), creating it if it doesn't yet exist. This name is a dot-separated hierarchical name, such as "a", "a.b", "a.b.c" or similar. """ if name in self.loggerDict: rv = self.loggerDict[name] if isinstance(rv, _Placeholder): ph = rv rv = self.cls(name) rv.manager = self self.loggerDict[name] = rv self._fixupChildren(ph, rv) self._fixupParents(rv) else: rv = self.cls(name) rv.manager = self self.loggerDict[name] = rv self._fixupParents(rv) return rv def _fixupParents(self, alogger): """ Ensure that there are either loggers or placeholders all the way from the specified logger to the root of the logger hierarchy. """ name = alogger.name i = name.rfind(".") logger_parent = None while (i > 0) and not logger_parent: substr = name[:i] if substr not in self.loggerDict: self.loggerDict[substr] = _Placeholder(alogger) else: obj = self.loggerDict[substr] if isinstance(obj, Logger): logger_parent = obj else: assert isinstance(obj, _Placeholder) obj.append(alogger) i = name.rfind(".", 0, i - 1) if not logger_parent: logger_parent = self.root alogger.parent = logger_parent def _fixupChildren(self, ph, alogger): """ Ensure that children of the placeholder ph are connected to the specified logger. """ name = alogger.name namelen = len(name) for c in ph.loggerMap.keys(): # The if means ... if not c.parent.name.startswith(nm) if c.parent.name[:namelen] != name: alogger.parent = c.parent c.parent = alogger def setLoggerClass(self, klass): self.cls = klass def setLogRecordFactory(self, factory): raise NotImplementedError( "setLogRecordFactory is not supported in picologging." ) root = Logger(name="root", level=WARNING) root.manager = Manager(root) def basicConfig(**kwargs): """ Do basic configuration for the logging system. This function does nothing if the root logger already has handlers configured, unless the keyword argument *force* is set to ``True``. It is a convenience method intended for use by simple scripts to do one-shot configuration of the logging package. The default behaviour is to create a StreamHandler which writes to sys.stderr, set a formatter using the BASIC_FORMAT format string, and add the handler to the root logger. A number of optional keyword arguments may be specified, which can alter the default behaviour. filename Specifies that a FileHandler be created, using the specified filename, rather than a StreamHandler. filemode Specifies the mode to open the file, if filename is specified (if filemode is unspecified, it defaults to 'a'). format Use the specified format string for the handler. datefmt Use the specified date/time format. style If a format string is specified, use this to specify the type of format string (possible values '%', '{', '$', for %-formatting, :meth:`str.format` and :class:`string.Template` - defaults to '%'). level Set the root logger level to the specified level. stream Use the specified stream to initialize the StreamHandler. Note that this argument is incompatible with 'filename' - if both are present, 'stream' is ignored. handlers If specified, this should be an iterable of already created handlers, which will be added to the root handler. Any handler in the list which does not have a formatter assigned will be assigned the formatter created in this function. force If this keyword is specified as true, any existing handlers attached to the root logger are removed and closed, before carrying out the configuration as specified by the other arguments. encoding If specified together with a filename, this encoding is passed to the created FileHandler, causing it to be used when the file is opened. errors If specified together with a filename, this value is passed to the created FileHandler, causing it to be used when the file is opened in text mode. If not specified, the default value is `backslashreplace`. Note that you could specify a stream created using open(filename, mode) rather than passing the filename and mode in. However, it should be remembered that StreamHandler does not close its stream (since it may be using sys.stdout or sys.stderr), whereas FileHandler closes its stream when the handler is closed. .. versionchanged:: 3.2 Added the ``style`` parameter. .. versionchanged:: 3.3 Added the ``handlers`` parameter. A ``ValueError`` is now thrown for incompatible arguments (e.g. ``handlers`` specified together with ``filename``/``filemode``, or ``filename``/``filemode`` specified together with ``stream``, or ``handlers`` specified together with ``stream``. .. versionchanged:: 3.8 Added the ``force`` parameter. .. versionchanged:: 3.9 Added the ``encoding`` and ``errors`` parameters. """ # Add thread safety in case someone mistakenly calls # basicConfig() from multiple threads force = kwargs.pop("force", False) encoding = kwargs.pop("encoding", None) errors = kwargs.pop("errors", "backslashreplace") if force: for h in root.handlers[:]: root.removeHandler(h) h.close() if len(root.handlers) == 0: handlers = kwargs.pop("handlers", None) if handlers is None: if "stream" in kwargs and "filename" in kwargs: raise ValueError( "'stream' and 'filename' should not be " "specified together" ) else: if "stream" in kwargs or "filename" in kwargs: raise ValueError( "'stream' or 'filename' should not be " "specified together with 'handlers'" ) if handlers is None: filename = kwargs.pop("filename", None) mode = kwargs.pop("filemode", "a") if filename: if "b" in mode: errors = None else: encoding = text_encoding(encoding) h = FileHandler(filename, mode, encoding=encoding, errors=errors) else: stream = kwargs.pop("stream", sys.stderr) h = StreamHandler(stream) handlers = [h] dfs = kwargs.pop("datefmt", None) style = kwargs.pop("style", "%") if style not in _STYLES: raise ValueError("Style must be one of: %s" % ",".join(_STYLES.keys())) fs = kwargs.pop("format", _STYLES[style][1]) fmt = Formatter(fs, dfs, style) for h in handlers: if h.formatter is None: h.setFormatter(fmt) root.addHandler(h) level = kwargs.pop("level", None) if level is not None: root.setLevel(level) if kwargs: keys = ", ".join(kwargs.keys()) raise ValueError("Unrecognised argument(s): %s" % keys) def getLogger(name=None): """ Return a logger with the specified name, creating it if necessary. If no name is specified, return the root logger. """ if not name or isinstance(name, str) and name == root.name: return root return root.manager.getLogger(name) def critical(msg, *args, **kwargs): """ Log a message with severity 'CRITICAL' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.critical(msg, *args, **kwargs) def fatal(msg, *args, **kwargs): """ Don't use this function, use critical() instead. """ critical(msg, *args, **kwargs) def error(msg, *args, **kwargs): """ Log a message with severity 'ERROR' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.error(msg, *args, **kwargs) def exception(msg, *args, exc_info=True, **kwargs): """ Log a message with severity 'ERROR' on the root logger, with exception information. If the logger has no handlers, basicConfig() is called to add a console handler with a pre-defined format. """ error(msg, *args, exc_info=exc_info, **kwargs) def warning(msg, *args, **kwargs): """ Log a message with severity 'WARNING' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.warning(msg, *args, **kwargs) def warn(msg, *args, **kwargs): warnings.warn( "The 'warn' function is deprecated, " "use 'warning' instead", DeprecationWarning, 2, ) warning(msg, *args, **kwargs) def info(msg, *args, **kwargs): """ Log a message with severity 'INFO' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.info(msg, *args, **kwargs) def debug(msg, *args, **kwargs): """ Log a message with severity 'DEBUG' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.debug(msg, *args, **kwargs) def log(level, msg, *args, **kwargs): """ Log 'msg % args' with the integer severity 'level' on the root logger. If the logger has no handlers, call basicConfig() to add a console handler with a pre-defined format. """ if len(root.handlers) == 0: basicConfig() root.log(level, msg, *args, **kwargs) def disable(level=CRITICAL): """ Disable all logging calls of severity 'level' and below. """ root.manager.disable = level root.manager._clear_cache() class NullHandler(Handler): """ This handler does nothing. It's intended to be used to avoid the "No handlers could be found for logger XXX" one-off warning. This is important for library code, which may contain code to log events. If a user of the library does not configure logging, the one-off warning might be produced; to avoid this, the library developer simply needs to instantiate a NullHandler and add it to the top-level logger of the library module or package. """ def handle(self, record): """Stub.""" def emit(self, record): """Stub.""" class FileHandler(StreamHandler): """ A handler class which writes formatted logging records to disk files. """ def __init__(self, filename, mode="a", encoding=None, delay=False, errors=None): """ Open the specified file and use it as the stream for logging. """ # Issue #27493: add support for Path objects to be passed in filename = os.fspath(filename) # keep the absolute path, otherwise derived classes which use this # may come a cropper when the current directory changes self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.delay = delay self.errors = errors if delay: # We don't open the stream, but we still need to call the # Handler constructor to set level, formatter, lock etc. Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open()) def close(self): """ Closes the stream. """ self.acquire() try: try: if self.stream: try: self.flush() finally: stream = self.stream self.stream = None if hasattr(stream, "close"): stream.close() finally: # Issue #19523: call unconditionally to # prevent a handler leak when delay is set StreamHandler.close(self) finally: self.release() def _open(self): """ Open the current base file with the (original) mode and encoding. Return the resulting stream. """ return open( self.baseFilename, self.mode, encoding=self.encoding, errors=self.errors ) def emit(self, record): """ Emit a record. If the stream was not opened because 'delay' was specified in the constructor, open it before calling the superclass's emit. """ if self.stream is None: self.stream = self._open() StreamHandler.emit(self, record) def __repr__(self): level = getLevelName(self.level) return f"<{self.__class__.__name__} {self.baseFilename} ({level})>" def makeLogRecord(dict): """ Make a LogRecord whose attributes are defined by the specified dictionary, This function is useful for converting a logging event received over a socket connection (which is sent as a dictionary) into a LogRecord instance. """ rv = LogRecord("", NOTSET, "", 0, "", None, None) for k, v in dict.items(): setattr(rv, k, v) return rv python-picologging-0.9.4/src/picologging/__init__.pyi000066400000000000000000000250111467100674700227310ustar00rootroot00000000000000import sys from collections.abc import Callable, Iterable, Mapping from io import TextIOWrapper from multiprocessing import Manager from string import Template from types import TracebackType from typing import Any, Generic, Optional, Pattern, TextIO, TypeVar, Union, overload from _typeshed import StrPath, SupportsWrite from typing_extensions import Literal, TypeAlias __version__: str CRITICAL: int FATAL: int ERROR: int WARNING: int WARN: int INFO: int DEBUG: int NOTSET: int _SysExcInfoType: TypeAlias = Union[ tuple[type[BaseException], BaseException, TracebackType | None], tuple[None, None, None], ] _ExcInfoType: TypeAlias = None | bool | _SysExcInfoType | BaseException _ArgsType: TypeAlias = tuple[object, ...] | Mapping[str, object] _Level: TypeAlias = int # Not | str like it is in logging _FormatStyle: TypeAlias = Literal["%", "{", "$"] class LogRecord: """ A LogRecord instance represents an event being logged. LogRecord instances are created every time something is logged. They contain all the information pertinent to the event being logged. The main information passed in is in msg and args, which are combined using str(msg) % args to create the message field of the record. The record also includes information such as when the record was created, the source line where the logging call was made, and any exception information to be logged. """ name: str levelno: int levelname: str msg: str args: Iterable[Any] pathname: str filename: str module: str lineno: int funcName: str created: float msecs: int relativeCreated: float thread: Optional[int] threadName: Optional[str] processName: Optional[str] process: Optional[int] exc_info: Any exc_text: Optional[str] stack_info: Optional[Any] message: str asctime: str def __init__( self, name: str, level: int, pathname: str, lineno: int, msg: str, args: Iterable[Any], exc_info: Any, func=None, sinfo=None, **kwargs, ): ... def getMessage(self) -> str: """ Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied arguments with the message. """ ... class Formatter: datefmt: str def __init__( self, fmt: str | None = ..., datefmt: str | None = ..., style: _FormatStyle = ..., validate: bool = ..., *, defaults: Mapping[str, Any] | None = ..., ) -> None: ... def format(self, record: LogRecord) -> str: ... def formatMessage(self, record: LogRecord) -> str: ... # undocumented def formatStack(self, stack_info: str) -> str: ... def formatException(self, ei: _SysExcInfoType) -> str: ... def usesTime(self) -> bool: ... # undocumented _FilterType: TypeAlias = Filter | Callable[[LogRecord], int] class Filterer: filters: list[Filter] def __init__(self) -> None: ... def addFilter(self, filter: _FilterType) -> None: ... def removeFilter(self, filter: _FilterType) -> None: ... def filter(self, record: LogRecord) -> bool: ... class Handler(Filterer): level: int # undocumented formatter: Formatter | None # undocumented name: str | None # undocumented def __init__( self, name: Optional[str] = None, level: Optional[int] = NOTSET ) -> None: ... def acquire(self) -> None: ... def release(self) -> None: ... def setLevel(self, level: int) -> None: ... def setFormatter(self, fmt: Formatter | None) -> None: ... def filter(self, record: LogRecord) -> bool: ... def handle(self, record: LogRecord) -> bool: ... def format(self, record: LogRecord) -> str: ... def emit(self, record: LogRecord) -> None: ... def flush(self) -> None: ... def close(self) -> None: ... def handleError(self, record: LogRecord) -> None: ... def get_name(self) -> str: ... def set_name(self, name: str) -> None: ... def createLock(self) -> None: ... class Logger(Filterer): propagate: bool name: str level: int parent: Logger handlers: list[Handler] disabled: bool manager: Optional[Manager] def __init__(self, name: str, level: _Level = ...) -> None: ... def setLevel(self, level: _Level) -> None: ... def getEffectiveLevel(self) -> int: ... def isEnabledFor(self, level: int) -> bool: ... def debug( self, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def info( self, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def warning( self, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def warn( self, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def error( self, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def exception( self, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def critical( self, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def log( self, level: int, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... fatal = critical def filter(self, record: LogRecord) -> bool: ... def addHandler(self, hdlr: Handler) -> None: ... def removeHandler(self, hdlr: Handler) -> None: ... def handle(self, record: LogRecord) -> None: ... class Filter: name: str # undocumented nlen: int # undocumented def __init__(self, name: str = ...) -> None: ... def filter(self, record: LogRecord) -> bool: ... def getLogger(name: str | None = ...) -> Logger: ... def debug( msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def info( msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def warning( msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def warn( msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def error( msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def critical( msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def exception( msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def log( level: int, msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., stacklevel: int = ..., extra: Mapping[str, object] | None = ..., ) -> None: ... def basicConfig( *, filename: StrPath | None = ..., filemode: str = ..., format: str = ..., datefmt: str | None = ..., style: _FormatStyle = ..., level: _Level | None = ..., stream: SupportsWrite[str] | None = ..., handlers: Iterable[Handler] | None = ..., force: bool | None = ..., encoding: str | None = ..., errors: str | None = ..., ) -> None: ... lastResort: StreamHandler[Any] | None _StreamT = TypeVar("_StreamT", bound=SupportsWrite[str]) class StreamHandler(Handler, Generic[_StreamT]): stream: _StreamT # undocumented @overload def __init__(self: StreamHandler[TextIO], stream: None = ...) -> None: ... @overload def __init__(self: StreamHandler[_StreamT], stream: _StreamT) -> None: ... def setStream(self, stream: _StreamT) -> _StreamT | None: ... class FileHandler(StreamHandler[TextIOWrapper]): baseFilename: str # undocumented mode: str # undocumented encoding: str | None # undocumented delay: bool # undocumented if sys.version_info >= (3, 9): errors: str | None # undocumented def __init__( self, filename: StrPath, mode: str = "a", encoding: str | None = None, delay: bool = False, errors: str | None = None, ) -> None: ... else: def __init__( self, filename: StrPath, mode: str = "a", encoding: str | None = None, delay: bool = False, ) -> None: ... class NullHandler(Handler): ... root: Logger class PercentStyle: def __init__( self, fmt: str, *, defaults: Mapping[str, Any] | None = ... ) -> None: ... def usesTime(self) -> bool: ... def validate(self) -> None: ... def format(self, record: Any) -> str: ... class StrFormatStyle(PercentStyle): # undocumented fmt_spec: Pattern[str] field_spec: Pattern[str] class StringTemplateStyle(PercentStyle): # undocumented _tpl: Template _STYLES: dict[str, tuple[PercentStyle, str]] BASIC_FORMAT: str def getLevelName(level: _Level) -> Any: ... def makeLogRecord(dict: Mapping[str, object]) -> LogRecord: ... python-picologging-0.9.4/src/picologging/_picologging.cxx000066400000000000000000000161001467100674700236320ustar00rootroot00000000000000// Python includes #include // STD includes #include #include #include "picologging.hxx" #include "logrecord.hxx" #include "formatter.hxx" #include "formatstyle.hxx" #include "logger.hxx" #include "handler.hxx" #include "streamhandler.hxx" const std::unordered_map LEVELS_TO_NAMES = { {LOG_LEVEL_DEBUG, "DEBUG"}, {LOG_LEVEL_INFO, "INFO"}, {LOG_LEVEL_WARNING, "WARNING"}, {LOG_LEVEL_ERROR, "ERROR"}, {LOG_LEVEL_CRITICAL, "CRITICAL"}, {LOG_LEVEL_NOTSET, "NOTSET"}, }; const std::unordered_map NAMES_TO_LEVELS = { {"DEBUG", LOG_LEVEL_DEBUG}, {"INFO", LOG_LEVEL_INFO}, {"WARNING", LOG_LEVEL_WARNING}, {"ERROR", LOG_LEVEL_ERROR}, {"CRITICAL", LOG_LEVEL_CRITICAL}, {"NOTSET", LOG_LEVEL_NOTSET}, }; static inline picologging_state* get_picologging_state(PyObject* module) { void *state = PyModule_GetState(module); assert(state != NULL); return (picologging_state*)state; } std::string _getLevelName(short level) { std::unordered_map::const_iterator it; it = LEVELS_TO_NAMES.find(level); if (it == LEVELS_TO_NAMES.end()){ return ""; } return it->second; } short getLevelByName(std::string levelName) { std::unordered_map::const_iterator it; it = NAMES_TO_LEVELS.find(levelName); if (it == NAMES_TO_LEVELS.end()){ return -1; } return it->second; } static PyObject *getLevelName(PyObject *self, PyObject *level) { if (PyLong_Check(level)) { short levelValue = (short)PyLong_AsLong(level); std::string levelName = _getLevelName(levelValue); if (levelName.length() > 0) { return PyUnicode_FromString(levelName.c_str()); } PyErr_Format(PyExc_ValueError, "Invalid level value: %d", levelValue); return nullptr; } if (PyUnicode_Check(level)) { short levelValue = getLevelByName(PyUnicode_AsUTF8(level)); if (levelValue >= 0) { return PyLong_FromLong(levelValue); } PyErr_Format(PyExc_ValueError, "Invalid level value: %U", level); return nullptr; } PyErr_SetString(PyExc_TypeError, "level must be an integer or a string."); return nullptr; } //----------------------------------------------------------------------------- static PyMethodDef picologging_methods[] = { {"getLevelName", (PyCFunction)getLevelName, METH_O, "Get level name by level number."}, {NULL, NULL, 0, NULL} /* Sentinel */ }; //----------------------------------------------------------------------------- static int picologging_clear(PyObject *module) { picologging_state *state = get_picologging_state(module); if (state && state->g_filepathCache) { delete state->g_filepathCache; state->g_filepathCache = nullptr; Py_DECREF(state->g_const_CRITICAL); Py_DECREF(state->g_const_ERROR); Py_DECREF(state->g_const_WARNING); Py_DECREF(state->g_const_INFO); Py_DECREF(state->g_const_DEBUG); Py_DECREF(state->g_const_NOTSET); } return 0; } static void picologging_free(void *module) { picologging_clear((PyObject *)module); } struct PyModuleDef _picologging_module = { .m_base = PyModuleDef_HEAD_INIT, .m_name = "_picologging", .m_doc = "Internal \"_picologging\" module", .m_size = sizeof(picologging_state), .m_methods = picologging_methods, .m_slots = nullptr, // slots .m_traverse = nullptr, // traverse .m_clear = picologging_clear, // clear .m_free = (freefunc)picologging_free // free }; /* LCOV_EXCL_START */ PyMODINIT_FUNC PyInit__picologging(void) { if (PyType_Ready(&LogRecordType) < 0) return NULL; if (PyType_Ready(&FormatStyleType) < 0) return NULL; if (PyType_Ready(&FormatterType) < 0) return NULL; if (PyType_Ready(&FiltererType) < 0) return NULL; LoggerType.tp_base = &FiltererType; if (PyType_Ready(&LoggerType) < 0) return NULL; HandlerType.tp_base = &FiltererType; if (PyType_Ready(&HandlerType) < 0) return NULL; StreamHandlerType.tp_base = &HandlerType; if (PyType_Ready(&StreamHandlerType) < 0) return NULL; PyObject* m = PyModule_Create(&_picologging_module); if (m == NULL) return NULL; // Initialize module state picologging_state *state = get_picologging_state(m); state->g_filepathCache = new FilepathCache(); state->g_const_CRITICAL = PyUnicode_FromString("CRITICAL"); state->g_const_ERROR = PyUnicode_FromString("ERROR"); state->g_const_WARNING = PyUnicode_FromString("WARNING"); state->g_const_INFO = PyUnicode_FromString("INFO"); state->g_const_DEBUG = PyUnicode_FromString("DEBUG"); state->g_const_NOTSET = PyUnicode_FromString("NOTSET"); Py_INCREF(&LogRecordType); Py_INCREF(&FormatStyleType); Py_INCREF(&FormatterType); Py_INCREF(&FiltererType); Py_INCREF(&LoggerType); Py_INCREF(&HandlerType); Py_INCREF(&StreamHandlerType); if (PyModule_AddObject(m, "LogRecord", (PyObject *)&LogRecordType) < 0){ Py_DECREF(&LogRecordType); Py_DECREF(m); return NULL; } if (PyModule_AddObject(m, "FormatStyle", (PyObject *)&FormatStyleType) < 0){ Py_DECREF(&FormatStyleType); Py_DECREF(m); return NULL; } if (PyModule_AddObject(m, "Formatter", (PyObject *)&FormatterType) < 0){ Py_DECREF(&FormatterType); Py_DECREF(m); return NULL; } if (PyModule_AddObject(m, "Filterer", (PyObject *)&FiltererType) < 0){ Py_DECREF(&FiltererType); Py_DECREF(m); return NULL; } if (PyModule_AddObject(m, "Logger", (PyObject *)&LoggerType) < 0){ Py_DECREF(&LoggerType); Py_DECREF(m); return NULL; } if (PyModule_AddObject(m, "Handler", (PyObject *)&HandlerType) < 0){ Py_DECREF(&HandlerType); Py_DECREF(m); return NULL; } if (PyModule_AddObject(m, "StreamHandler", (PyObject *)&StreamHandlerType) < 0){ Py_DECREF(&StreamHandlerType); Py_DECREF(m); return NULL; } if (PyModule_AddStringConstant(m, "default_fmt", "%(message)s") < 0){ Py_DECREF(m); return NULL; } if (PyModule_AddStringConstant(m, "default_datefmt", "%Y-%m-%d %H:%M:%S") < 0){ Py_DECREF(m); return NULL; } if (PyModule_AddStringConstant(m, "default_style", "%") < 0){ Py_DECREF(m); return NULL; } PyObject* traceback = PyImport_ImportModule("traceback"); if (traceback == NULL) return NULL; PyObject* print_exception = PyObject_GetAttrString(traceback, "print_exception"); if (print_exception == NULL) return NULL; PyObject* print_stack = PyObject_GetAttrString(traceback, "print_stack"); if (print_stack == NULL) return NULL; Py_DECREF(traceback); if (PyModule_AddObject(m, "print_exception", print_exception) < 0){ Py_DECREF(print_exception); Py_DECREF(m); return NULL; } if (PyModule_AddObject(m, "print_stack", print_stack) < 0){ Py_DECREF(print_stack); Py_DECREF(m); return NULL; } PyObject* io = PyImport_ImportModule("io"); if (io == NULL) return NULL; PyObject* stringio = PyObject_GetAttrString(io, "StringIO"); if (stringio == NULL) return NULL; Py_DECREF(io); if (PyModule_AddObject(m, "StringIO", stringio) < 0){ Py_DECREF(stringio); Py_DECREF(m); return NULL; } return m; } /* LCOV_EXCL_STOP */ python-picologging-0.9.4/src/picologging/compat.hxx000066400000000000000000000045421467100674700224710ustar00rootroot00000000000000/* * from CPython 3.10 object.h * See https://www.python.org/doc/copyright/ */ #include #ifndef COMPAT_H #define COMPAT_H #ifndef _PyObject_CAST #define _PyObject_CAST(op) ((PyObject*)(op)) #endif #ifndef _PyObject_CAST_CONST #define _PyObject_CAST_CONST(op) ((const PyObject*)(op)) #endif #ifndef _PyVarObject_CAST #define _PyVarObject_CAST(op) ((PyVarObject*)(op)) #endif #ifndef _PyVarObject_CAST_CONST #define _PyVarObject_CAST_CONST(op) ((const PyVarObject*)(op)) #endif #ifndef Py_IS_TYPE static inline int _Py_IS_TYPE(const PyObject *ob, const PyTypeObject *type) { return ob->ob_type == type; } #define Py_IS_TYPE(ob, type) _Py_IS_TYPE(_PyObject_CAST_CONST(ob), type) #endif #ifndef Py_SET_SIZE static inline void _Py_SET_SIZE(PyVarObject *ob, Py_ssize_t size) { ob->ob_size = size; } #define Py_SET_SIZE(ob, size) _Py_SET_SIZE(_PyVarObject_CAST(ob), size) #endif #define PYUNICODE_ENDSWITH(ob, suffix) (PyUnicode_Tailmatch(ob, suffix, PyUnicode_GET_LENGTH(ob) - 1, PyUnicode_GET_LENGTH(ob), +1) > 0) #if PY_VERSION_HEX >= 0x03090000 #define PyObject_CallMethod_ONEARG(ob, name, arg) PyObject_CallMethodOneArg(ob, name, arg) #else #define PyObject_CallMethod_ONEARG(ob, name, arg) PyObject_CallMethodObjArgs(ob, name, arg, NULL) #endif #if PY_VERSION_HEX >= 0x03090000 #define PyObject_CallMethod_NOARGS(ob, name) PyObject_CallMethodNoArgs(ob, name) #else #define PyObject_CallMethod_NOARGS(ob, name) PyObject_CallMethodObjArgs(ob, name, NULL) #endif #if PY_VERSION_HEX >= 0x030b0000 // Python 3.11.0 #define PyFrame_GETBACK(f) PyFrame_GetBack(f) #define PyFrame_GETCODE(f) PyFrame_GetCode(f) #define PyFrame_GETLINENO(f) PyFrame_GetLineNumber(f) #else #define PyFrame_GETBACK(f) f->f_back #define PyFrame_GETCODE(f) f->f_code #define PyFrame_GETLINENO(f) f->f_lineno #endif #if PY_VERSION_HEX < 0x03080000 // Python 3.7 and below #define PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) static inline Py_ssize_t PyVectorcall_NARGS(size_t n) { return n & ~PY_VECTORCALL_ARGUMENTS_OFFSET; } #endif #ifndef Py_NewRef # define Py_NewRef(obj) _Py_NewRef((PyObject*)obj) # define Py_XNewRef(obj) _Py_XNewRef(PyObject*)(obj)) static inline PyObject* _Py_NewRef(PyObject *obj) { Py_INCREF(obj); return obj; } static inline PyObject* _Py_XNewRef(PyObject *obj) { Py_XINCREF(obj); return obj; } #endif #endif // COMPAT_Hpython-picologging-0.9.4/src/picologging/config.py000066400000000000000000000312341467100674700222720ustar00rootroot00000000000000import re from logging.config import BaseConfigurator import picologging from picologging.handlers import MemoryHandler IDENTIFIER = re.compile("^[a-z_][a-z0-9_]*$", re.I) def valid_ident(s): m = IDENTIFIER.match(s) if not m: raise ValueError("Not a valid Python identifier: %r" % s) return True def _resolve(name): """Resolve a dotted name to a global object.""" name = name.split(".") used = name.pop(0) found = __import__(used) for n in name: used = used + "." + n try: found = getattr(found, n) except AttributeError: __import__(used) found = getattr(found, n) return found def _handle_existing_loggers(existing, child_loggers, disable_existing): """ When (re)configuring logging, handle loggers which were in the previous configuration but are not in the new configuration. There's no point deleting them as other threads may continue to hold references to them; and by disabling them, you stop them doing any logging. However, don't disable children of named loggers, as that's probably not what was intended by the user. Also, allow existing loggers to NOT be disabled if disable_existing is false. """ root = picologging.root for log in existing: logger = root.manager.loggerDict[log] if log in child_loggers: logger.setLevel(picologging.NOTSET) logger.handlers = [] logger.propagate = True else: logger.disabled = disable_existing class DictConfigurator(BaseConfigurator): """ Configure logging using a dictionary-like object to describe the configuration. """ def configure(self): config = self.config if "version" not in config: raise ValueError("dictionary doesn't specify a version") if config["version"] != 1: raise ValueError("Unsupported version: %s" % config["version"]) incremental = config.pop("incremental", False) if incremental: raise ValueError("Incremental option is not supported.") EMPTY_DICT = {} disable_existing = config.pop("disable_existing_loggers", True) # Do formatters first - they don't refer to anything else formatters = config.get("formatters", EMPTY_DICT) for name in formatters: try: formatters[name] = self.configure_formatter(formatters[name]) except Exception as e: raise ValueError("Unable to configure " "formatter %r" % name) from e # Next, do filters - they don't refer to anything else, either filters = config.get("filters", EMPTY_DICT) for name in filters: try: filters[name] = self.configure_filter(filters[name]) except Exception as e: raise ValueError("Unable to configure " "filter %r" % name) from e # Next, do handlers - they refer to formatters and filters # As handlers can refer to other handlers, sort the keys # to allow a deterministic order of configuration handlers = config.get("handlers", EMPTY_DICT) deferred = [] for name in sorted(handlers): try: handler = self.configure_handler(handlers[name]) handler.name = name handlers[name] = handler except Exception as e: if "target not configured yet" in str(e.__cause__): deferred.append(name) else: raise ValueError("Unable to configure handler " "%r" % name) from e # Now do any that were deferred for name in deferred: try: handler = self.configure_handler(handlers[name]) handler.name = name handlers[name] = handler except Exception as e: raise ValueError("Unable to configure handler " "%r" % name) from e # Next, do loggers - they refer to handlers and filters # we don't want to lose the existing loggers, # since other threads may have pointers to them. # existing is set to contain all existing loggers, # and as we go through the new configuration we # remove any which are configured. At the end, # what's left in existing is the set of loggers # which were in the previous configuration but # which are not in the new configuration. root = picologging.root existing = list(root.manager.loggerDict.keys()) # The list needs to be sorted so that we can # avoid disabling child loggers of explicitly # named loggers. With a sorted list it is easier # to find the child loggers. existing.sort() # We'll keep the list of existing loggers # which are children of named loggers here... child_loggers = [] # now set up the new ones... loggers = config.get("loggers", EMPTY_DICT) for name in loggers: if name in existing: i = existing.index(name) + 1 # look after name prefixed = name + "." pflen = len(prefixed) num_existing = len(existing) while i < num_existing: if existing[i][:pflen] == prefixed: child_loggers.append(existing[i]) i += 1 existing.remove(name) try: self.configure_logger(name, loggers[name]) except Exception as e: raise ValueError("Unable to configure logger " "%r" % name) from e # Disable any old loggers. There's no point deleting # them as other threads may continue to hold references # and by disabling them, you stop them doing any logging. # However, don't disable children of named loggers, as that's # probably not what was intended by the user. _handle_existing_loggers(existing, child_loggers, disable_existing) # And finally, do the root logger root = config.get("root", None) if root: try: self.configure_root(root) except Exception as e: raise ValueError("Unable to configure root " "logger") from e def configure_formatter(self, config): """Configure a formatter from a dictionary.""" if "()" in config: factory = config["()"] # for use in exception handler try: result = self.configure_custom(config) except TypeError as te: if "'format'" not in str(te): raise # Name of parameter changed from fmt to format. # Retry with old name. # This is so that code can be used with older Python versions # (e.g. by Django) config["fmt"] = config.pop("format") config["()"] = factory result = self.configure_custom(config) else: fmt = config.get("format", None) dfmt = config.get("datefmt", None) style = config.get("style", "%") cname = config.get("class", None) if not cname: c = picologging.Formatter else: c = _resolve(cname) # A TypeError would be raised if "validate" key is passed in with a formatter callable # that does not accept "validate" as a parameter if ( "validate" in config ): # if user hasn't mentioned it, the default will be fine result = c(fmt, dfmt, style, config["validate"]) else: result = c(fmt, dfmt, style) return result def configure_filter(self, config): """Configure a filter from a dictionary.""" if "()" in config: result = self.configure_custom(config) else: name = config.get("name", "") result = picologging.Filter(name) return result def add_filters(self, filterer, filters): """Add filters to a filterer from a list of names.""" for f in filters: try: filterer.addFilter(self.config["filters"][f]) except Exception as e: raise ValueError("Unable to add filter %r" % f) from e def configure_handler(self, config): """Configure a handler from a dictionary.""" config_copy = dict(config) # for restoring in case of error formatter = config.pop("formatter", None) if formatter: try: formatter = self.config["formatters"][formatter] except Exception as e: raise ValueError("Unable to set formatter " "%r" % formatter) from e level = config.pop("level", None) filters = config.pop("filters", None) if "()" in config: c = config.pop("()") if not callable(c): c = self.resolve(c) factory = c else: cname = config.pop("class") klass = self.resolve(cname) # Special case for handler which refers to another handler if issubclass(klass, MemoryHandler) and "target" in config: try: th = self.config["handlers"][config["target"]] if not isinstance(th, picologging.Handler): config.update(config_copy) # restore for deferred cfg raise TypeError("target not configured yet") config["target"] = th except Exception as e: raise ValueError( "Unable to set target handler " "%r" % config["target"] ) from e # elif ( # issubclass(klass, picologging.handlers.SMTPHandler) # and "mailhost" in config # ): # config["mailhost"] = self.as_tuple(config["mailhost"]) # elif ( # issubclass(klass, picologging.handlers.SysLogHandler) # and "address" in config # ): # config["address"] = self.as_tuple(config["address"]) factory = klass props = config.pop(".", None) kwargs = {k: config[k] for k in config if valid_ident(k)} try: result = factory(**kwargs) except TypeError as te: if "'stream'" not in str(te): raise # The argument name changed from strm to stream # Retry with old name. # This is so that code can be used with older Python versions # (e.g. by Django) kwargs["strm"] = kwargs.pop("stream") result = factory(**kwargs) if formatter: result.setFormatter(formatter) if level is not None: result.setLevel(picologging._checkLevel(level)) if filters: self.add_filters(result, filters) if props: for name, value in props.items(): setattr(result, name, value) return result def add_handlers(self, logger, handlers): """Add handlers to a logger from a list of names.""" for h in handlers: try: logger.addHandler(self.config["handlers"][h]) except Exception as e: raise ValueError("Unable to add handler %r" % h) from e def common_logger_config(self, logger, config): """ Perform configuration which is common to root and non-root loggers. """ level = config.get("level", None) if level is not None: logger.setLevel(picologging._checkLevel(level)) # Remove any existing handlers for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get("handlers", None) if handlers: self.add_handlers(logger, handlers) filters = config.get("filters", None) if filters: self.add_filters(logger, filters) def configure_logger(self, name, config): """Configure a non-root logger from a dictionary.""" logger = picologging.getLogger(name) self.common_logger_config(logger, config) logger.disabled = False propagate = config.get("propagate", None) if propagate is not None: logger.propagate = propagate def configure_root(self, config): """Configure a root logger from a dictionary.""" root = picologging.getLogger() self.common_logger_config(root, config) dictConfigClass = DictConfigurator def dictConfig(config): """Configure logging using a dictionary.""" dictConfigClass(config).configure() python-picologging-0.9.4/src/picologging/config.pyi000066400000000000000000000026041467100674700224420ustar00rootroot00000000000000import sys from collections.abc import Sequence from typing import Any from . import _Level if sys.version_info >= (3, 8): from typing import Literal, TypedDict else: from typing_extensions import Literal, TypedDict class _RootLoggerConfiguration(TypedDict, total=False): level: _Level filters: Sequence[str] handlers: Sequence[str] class _LoggerConfiguration(_RootLoggerConfiguration, TypedDict, total=False): propagate: bool class _OptionalDictConfigArgs(TypedDict, total=False): # these two can have custom factories (key: `()`) which can have extra keys formatters: dict[str, dict[str, Any]] filters: dict[str, dict[str, Any]] # type checkers would warn about extra keys if this was a TypedDict handlers: dict[str, dict[str, Any]] loggers: dict[str, _LoggerConfiguration] root: _RootLoggerConfiguration | None incremental: bool disable_existing_loggers: bool class _DictConfigArgs(_OptionalDictConfigArgs, TypedDict): version: Literal[1] # Accept dict[str, Any] to avoid false positives if called with a dict # type, since dict types are not compatible with TypedDicts. # # Also accept a TypedDict type, to allow callers to use TypedDict # types, and for somewhat stricter type checking of dict literals. def dictConfig(config: _DictConfigArgs | dict[str, Any]) -> None: ... def valid_ident(s: str) -> Literal[True]: ... # undocumented python-picologging-0.9.4/src/picologging/filepathcache.cxx000066400000000000000000000024401467100674700237540ustar00rootroot00000000000000#include #include "filepathcache.hxx" namespace fs = std::filesystem; const FilepathCacheEntry& FilepathCache::lookup(PyObject* pathname){ /* * Notes: A vector ended up being significantly faster than an unordered_map, * even though an unordered_map should probably be used. * TODO #3 : Cap vector size or decide on a better map type. */ Py_hash_t hash = PyObject_Hash(pathname); for (auto& entry : cache){ if (entry.first == hash){ return entry.second; } } FilepathCacheEntry* entry = new FilepathCacheEntry(); fs::path fs_path = fs::path(PyUnicode_AsUTF8(pathname)); #ifdef WIN32 const wchar_t* filename_wchar = fs_path.filename().c_str(); const wchar_t* modulename = fs_path.stem().c_str(); entry->filename = PyUnicode_FromWideChar(filename_wchar, wcslen(filename_wchar)), entry->module = PyUnicode_FromWideChar(modulename, wcslen(modulename)); #else entry->filename = PyUnicode_FromString(fs_path.filename().c_str()); entry->module = PyUnicode_FromString(fs_path.stem().c_str()); #endif cache.push_back({hash, *entry}); return *entry; } FilepathCache::~FilepathCache(){ for (auto& entry : cache){ Py_CLEAR(entry.second.filename); Py_CLEAR(entry.second.module); } }python-picologging-0.9.4/src/picologging/filepathcache.hxx000066400000000000000000000007131467100674700237620ustar00rootroot00000000000000#include #include #include #include #ifndef PICOLOGGING_FILEPATHCACHE_H #define PICOLOGGING_FILEPATHCACHE_H typedef struct { PyObject* filename; PyObject* module; } FilepathCacheEntry; class FilepathCache { std::vector> cache; public: const FilepathCacheEntry& lookup(PyObject* filepath); ~FilepathCache(); }; #endif // PICOLOGGING_FILEPATHCACHE_Hpython-picologging-0.9.4/src/picologging/filterer.cxx000066400000000000000000000115411467100674700230120ustar00rootroot00000000000000#include "filterer.hxx" #include "compat.hxx" PyObject* Filterer_new(PyTypeObject* type, PyObject* args, PyObject* kwds) { Filterer* self = (Filterer*)type->tp_alloc(type, 0); if (self != NULL) { self->filters = PyList_New(0); if (self->filters == NULL) return nullptr; self->_const_filter = PyUnicode_FromString("filter"); self->_const_remove = PyUnicode_FromString("remove"); } return (PyObject*)self; } int Filterer_init(Filterer *self, PyObject *args, PyObject *kwds) { return 0; } PyObject* Filterer_addFilter(Filterer* self, PyObject *filter) { // Equivalent to `if not (filter in self.filters):` if (PySequence_Contains(self->filters, filter) == 0){ PyList_Append(self->filters, filter); } Py_RETURN_NONE; } PyObject* Filterer_removeFilter(Filterer* self, PyObject *filter) { if (PySequence_Contains(self->filters, filter) == 1){ return PyObject_CallMethod_ONEARG(self->filters, self->_const_remove, filter); } Py_RETURN_NONE; } PyObject* Filterer_filter(Filterer* self, PyObject *record) { bool ret = true; for (int i = 0; i < PyList_GET_SIZE(self->filters); i++) { PyObject *result = Py_None; PyObject *filter = PyList_GET_ITEM(self->filters, i); // borrowed ref if (PyObject_HasAttr(filter, self->_const_filter)) { result = PyObject_CallMethod_ONEARG(filter, self->_const_filter, record); if (result == nullptr) return nullptr; } else { result = PyObject_CallFunctionObjArgs(filter, record, NULL); } if (result == Py_False || result == Py_None) { ret = false; break; } } if (ret) Py_RETURN_TRUE; Py_RETURN_FALSE; } PyObject* Filterer_dealloc(Filterer *self) { Py_CLEAR(self->filters); Py_CLEAR(self->_const_filter); Py_CLEAR(self->_const_remove); Py_TYPE(self)->tp_free((PyObject*)self); return NULL; } static PyMethodDef Filterer_methods[] = { {"addFilter", (PyCFunction)Filterer_addFilter, METH_O, "Add a filter to the logger."}, {"removeFilter", (PyCFunction)Filterer_removeFilter, METH_O, "Remove a filter from the logger."}, {"filter", (PyCFunction)Filterer_filter, METH_O, "Filter a record."}, NULL }; static PyMemberDef Filterer_members[] = { {"filters", T_OBJECT_EX, offsetof(Filterer, filters), 0, "Filters"}, {NULL} }; PyTypeObject FiltererType = { PyObject_HEAD_INIT(NULL) "picologging.Filterer", /* tp_name */ sizeof(Filterer), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)Filterer_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ 0, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , /* tp_flags */ PyDoc_STR("Filterer interface."), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Filterer_methods, /* tp_methods */ Filterer_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Filterer_init, /* tp_init */ 0, /* tp_alloc */ Filterer_new, /* tp_new */ PyObject_Del, /* tp_free */ }; python-picologging-0.9.4/src/picologging/filterer.hxx000066400000000000000000000010041467100674700230100ustar00rootroot00000000000000#include #include #ifndef PICOLOGGING_FILTERER_H #define PICOLOGGING_FILTERER_H typedef struct { PyObject_HEAD PyObject *filters; PyObject *_const_filter; PyObject *_const_remove; } Filterer; int Filterer_init(Filterer *self, PyObject *args, PyObject *kwds); PyObject* Filterer_filter(Filterer* self, PyObject *record); PyObject* Filterer_dealloc(Filterer *self); extern PyTypeObject FiltererType; #define Filterer_CheckExact(op) Py_IS_TYPE(op, &FiltererType) #endif python-picologging-0.9.4/src/picologging/formatstyle.cxx000066400000000000000000000436211467100674700235530ustar00rootroot00000000000000#include "formatstyle.hxx" #include "logrecord.hxx" #include "picologging.hxx" #include #include std::regex const fragment_search_percent("\\%\\(\\w+\\)[diouxefgcrsa%]"); std::regex const fragment_search_string_format("\\{\\w+\\}"); std::regex const fragment_search_string_template("\\$\\{\\w+\\}"); FieldMap field_map = { {"name", Field_Name}, {"msg", Field_Msg}, {"args", Field_Args}, {"levelno", Field_LevelNo}, {"levelname", Field_LevelName}, {"pathname", Field_Pathname}, {"filename", Field_Filename}, {"module", Field_Module}, {"lineno", Field_Lineno}, {"funcname", Field_FuncName}, {"created", Field_Created}, {"msecs", Field_Msecs}, {"relativeCreated", Field_RelativeCreated}, {"thread", Field_Thread}, {"threadName", Field_ThreadName}, {"processName", Field_ProcessName}, {"process", Field_Process}, {"exc_info", Field_ExcInfo}, {"exc_text", Field_ExcText}, {"stack_info", Field_StackInfo}, {"message", Field_Message}, {"asctime", Field_Asctime}, }; #define APPEND_STRING(field) \ if (PyUnicode_Check(log_record->field )) { \ if (_PyUnicodeWriter_WriteStr(&writer, log_record->field ) != 0) { \ _PyUnicodeWriter_Dealloc(&writer); \ return nullptr; \ } \ } else { \ PyObject* strRepr = PyObject_Str(log_record->field ); \ if (_PyUnicodeWriter_WriteStr(&writer, strRepr) != 0) { \ _PyUnicodeWriter_Dealloc(&writer); \ Py_DECREF(strRepr); \ return nullptr; \ } \ Py_DECREF(strRepr); \ } #define APPEND_INT(field) {\ PyObject* field = PyUnicode_FromFormat("%d", log_record->field ); \ if (_PyUnicodeWriter_WriteStr(&writer, field) != 0) { \ _PyUnicodeWriter_Dealloc(&writer); \ Py_DECREF(field); \ return nullptr; \ } \ Py_DECREF(field); }\ int FormatStyle_init(FormatStyle *self, PyObject *args, PyObject *kwds){ PyObject *fmt = nullptr, *defaults = Py_None; int style = '%'; static const char *kwlist[] = {"fmt", "defaults", "style", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OC", const_cast(kwlist), &fmt, &defaults, &style)) return -1; if (fmt == Py_None) { PyObject* mod = PICOLOGGING_MODULE(); // borrowed reference if (mod == nullptr){ PyErr_SetString(PyExc_TypeError, "Could not find _picologging module"); return -1; } fmt = PyDict_GetItemString(PyModule_GetDict(mod), "default_fmt"); // borrowed reference self->usesDefaultFmt = true; } else { if (!PyUnicode_Check(fmt)) { PyErr_SetString(PyExc_TypeError, "fmt must be a string"); return -1; } self->usesDefaultFmt = false; } std::regex fragment_search; switch(self->style){ case '%': fragment_search = fragment_search_percent; break; case '{': fragment_search = fragment_search_string_format; break; default: PyErr_SetString(PyExc_ValueError, "Unknown style"); return -1; } self->fmt = Py_NewRef(fmt); std::string const format_string(PyUnicode_AsUTF8(fmt)); auto fragments_begin = std::sregex_iterator(format_string.begin(), format_string.end(), fragment_search); auto fragments_end = std::sregex_iterator(); int idx = 0; int cursor = 0; for (std::sregex_iterator i = fragments_begin; i != fragments_end; ++i) { std::smatch match = *i; std::string match_str = match.str(); std::string field_name; switch(self->style) { case '%': field_name = match_str.substr(2, match_str.size() - 4); break; case '{': field_name = match_str.substr(1, match_str.size() - 2); break; } if (match.position() != cursor){ // Add literal fragment self->fragments[idx].field = LiteralFragment; self->fragments[idx].fragment = PyUnicode_FromString(format_string.substr(cursor, match.position() - cursor).c_str()); idx ++; } auto it = field_map.find(field_name); if (it != field_map.end()) { self->fragments[idx].field = it->second; self->fragments[idx].fragment = nullptr; } else { self->fragments[idx].field = Field_Unknown; self->fragments[idx].fragment = PyUnicode_FromString(field_name.c_str()); } cursor = match.position() + match.length(); idx ++; } // Add literal fragment at the end if the cursor isn't at the end of the string. if (format_string.size() > cursor){ self->fragments[idx].field = LiteralFragment; self->fragments[idx].fragment = PyUnicode_FromString(format_string.substr(cursor, format_string.size() - cursor).c_str()); idx ++; } self->defaults = Py_NewRef(defaults); self->_const_format = PyUnicode_FromString("format"); self->_const__dict__ = PyUnicode_FromString("__dict__"); return 0; } PyObject* FormatStyle_usesTime(FormatStyle *self){ if (self->usesDefaultFmt) Py_RETURN_FALSE; int ret = 0; PyObject* asctime = nullptr; switch (self->style){ case '%': asctime = PyUnicode_FromString("%(asctime)"); break; case '{': asctime = PyUnicode_FromString("{asctime}"); break; default: PyErr_SetString(PyExc_ValueError, "Invalid style value"); return nullptr; } ret = PyUnicode_Find(self->fmt, asctime, 0, PyUnicode_GET_LENGTH(self->fmt), 1); Py_XDECREF(asctime); if (ret >= 0){ Py_RETURN_TRUE; } else if (ret == -1){ Py_RETURN_FALSE; } else { // -2 // Encountered error . return nullptr; } } PyObject* FormatStyle_validate(FormatStyle *self){ /// TODO: #6 #5 Implement percentage style validation. Py_RETURN_NONE; } PyObject* FormatStyle_format(FormatStyle *self, PyObject *record){ if (self->defaults == Py_None){ if (LogRecord_CheckExact(record) || LogRecord_Check(record)){ _PyUnicodeWriter writer; _PyUnicodeWriter_Init(&writer); LogRecord* log_record = reinterpret_cast(record); for (int i = 0 ; i < self->ob_base.ob_size ; i++){ switch (self->fragments[i].field){ /* _PyUnicodeWriter_WriteStr doesn't do any type check (causes segfault) so use the APPEND_STRING macro to use a fast-path if the field is string, otherwise do a PyObject_Str first... TODO: #7 Consider %d, %f format strings.. */ case Field_Name: APPEND_STRING(name) break; case Field_Msg: APPEND_STRING(msg) break; case Field_Args: APPEND_STRING(args) break; case Field_LevelNo: APPEND_INT(levelno) break; case Field_LevelName: APPEND_STRING(levelname) break; case Field_Pathname: APPEND_STRING(pathname) break; case Field_Filename: APPEND_STRING(filename) break; case Field_Module: APPEND_STRING(module) break; case Field_Lineno: APPEND_INT(lineno) break; case Field_FuncName: APPEND_STRING(funcName) break; case Field_Created: { PyObject *asDouble = PyFloat_FromDouble(log_record->created); PyObject *created = PyUnicode_FromFormat("%S", asDouble); Py_DECREF(asDouble); if (_PyUnicodeWriter_WriteStr(&writer, created) != 0) { _PyUnicodeWriter_Dealloc(&writer); Py_DECREF(created); return nullptr; } Py_DECREF(created); } break; case Field_Msecs: APPEND_INT(msecs) break; case Field_RelativeCreated: APPEND_STRING(relativeCreated) break; case Field_Thread: { PyObject* field = PyUnicode_FromFormat("%lu", log_record->thread ); if (_PyUnicodeWriter_WriteStr(&writer, field) != 0) { _PyUnicodeWriter_Dealloc(&writer); Py_DECREF(field); return nullptr; } Py_DECREF(field); } break; case Field_ThreadName: APPEND_STRING(threadName) break; case Field_ProcessName: APPEND_STRING(processName) break; case Field_Process: APPEND_INT(process) break; case Field_ExcInfo: APPEND_STRING(excInfo) break; case Field_ExcText: APPEND_STRING(excText) break; case Field_StackInfo: APPEND_STRING(stackInfo) break; case Field_Message: APPEND_STRING(message) break; case Field_Asctime: APPEND_STRING(asctime) break; case LiteralFragment: if (_PyUnicodeWriter_WriteStr(&writer, self->fragments[i].fragment) != 0) { _PyUnicodeWriter_Dealloc(&writer); return nullptr; } break; case Field_Unknown: { PyObject* attr = PyObject_GetAttr(record, self->fragments[i].fragment); if (attr == nullptr){ _PyUnicodeWriter_Dealloc(&writer); return nullptr; } if (_PyUnicodeWriter_WriteStr(&writer, PyObject_Str(attr)) != 0){ _PyUnicodeWriter_Dealloc(&writer); Py_DECREF(attr); return nullptr; } Py_DECREF(attr); break; } default: PyErr_SetString(PyExc_ValueError, "Unknown field"); _PyUnicodeWriter_Dealloc(&writer); return nullptr; } } return _PyUnicodeWriter_Finish(&writer); } else { PyObject* recordDict = PyObject_GetAttr(record, self->_const__dict__); if (recordDict == nullptr) return nullptr; PyObject* result = nullptr; switch (self->style){ case '%': result = PyUnicode_Format(self->fmt, recordDict); break; case '{': result = PyObject_CallMethod_ONEARG(self->fmt, self->_const_format, recordDict); break; } Py_DECREF(recordDict); return result; } } PyObject* dict = PyObject_GetAttr(record, self->_const__dict__); if (PyDict_Merge(dict, self->defaults, 1) < 0){ Py_DECREF(dict); return nullptr; } PyObject* result = nullptr; switch (self->style){ case '%': result = PyUnicode_Format(self->fmt, dict); break; case '{': PyObject* formatMethod = PyObject_GetAttr(self->fmt, self->_const_format); PyObject* args = PyTuple_New(0); result = PyObject_Call(formatMethod, args, dict); Py_DECREF(args); Py_DECREF(formatMethod); break; } Py_DECREF(dict); return result; } PyObject * FormatStyle_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { PyObject *fmt = nullptr, *defaults = Py_None; static const char *kwlist[] = {"fmt", "defaults", "style", NULL}; int style = '%'; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OC", const_cast(kwlist), &fmt, &defaults, &style)) return NULL; int fragmentLen = 0; std::regex fragment_search; switch(style){ case '%': fragment_search = fragment_search_percent; break; case '{': fragment_search = fragment_search_string_format; break; default: PyErr_SetString(PyExc_ValueError, "Unknown style"); return nullptr; } if (fmt != nullptr && fmt != Py_None && PyUnicode_Check(fmt)){ std::string const format_string(PyUnicode_AsUTF8(fmt)); auto fragments_begin = std::sregex_iterator(format_string.begin(), format_string.end(), fragment_search); auto fragments_end = std::sregex_iterator(); int idx = 0; int cursor = 0; for (std::sregex_iterator i = fragments_begin; i != fragments_end; ++i) { std::smatch match = *i; // If there is a literal fragment before this one, add it to the list if (match.position() != cursor){ fragmentLen++; } cursor = match.position() + match.length(); fragmentLen++; } // Capture last literal fragment if (cursor != format_string.length()){ fragmentLen++; } } else { // Number of format fragments in DEFAULT_FMT fragmentLen = 1; } FormatStyle* self; self = (FormatStyle*)type->tp_alloc(type, fragmentLen); if (self){ self->style = style; Py_SET_SIZE(self, fragmentLen); } else { PyErr_NoMemory(); return nullptr; } return (PyObject*)self; } PyObject* FormatStyle_dealloc(FormatStyle *self){ Py_CLEAR(self->fmt); Py_CLEAR(self->defaults); Py_CLEAR(self->_const_format); Py_CLEAR(self->_const__dict__); for (int i = 0 ; i < self->ob_base.ob_size; i++){ Py_CLEAR(self->fragments[i].fragment); } Py_TYPE(self)->tp_free((PyObject*)self); return NULL; } PyObject* FormatStyle_repr(FormatStyle *self){ return PyUnicode_FromFormat("", self->fmt, self->style); } static PyMethodDef FormatStyle_methods[] = { {"usesTime", (PyCFunction)FormatStyle_usesTime, METH_NOARGS, "Get message"}, {"validate", (PyCFunction)FormatStyle_validate, METH_NOARGS, "Get message"}, {"format", (PyCFunction)FormatStyle_format, METH_O, "Get message"}, {NULL} }; PyTypeObject FormatStyleType = { PyVarObject_HEAD_INIT(NULL, 0) "picologging.FormatStyle", /* tp_name */ offsetof(FormatStyle, fragments), /* tp_basicsize */ sizeof(FormatFragment), /* tp_itemsize */ (destructor)FormatStyle_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ (reprfunc)FormatStyle_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , /* tp_flags */ PyDoc_STR("Formatter for log records."), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ FormatStyle_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)FormatStyle_init, /* tp_init */ 0, /* tp_alloc */ FormatStyle_new, /* tp_new */ PyObject_Del, /* tp_free */ };python-picologging-0.9.4/src/picologging/formatstyle.hxx000066400000000000000000000031571467100674700235600ustar00rootroot00000000000000#include #include #include #include #include #include "compat.hxx" #ifndef PICOLOGGING_FORMATSTYLE_H #define PICOLOGGING_FORMATSTYLE_H enum FragmentType { Field_Name = 1, // Skip 0 incase there's an offset error Field_Msg, Field_Args, Field_LevelNo, Field_LevelName, Field_Pathname, Field_Filename, Field_Module, Field_Lineno, Field_FuncName, Field_Created, Field_Msecs, Field_RelativeCreated, Field_Thread, Field_ThreadName, Field_ProcessName, Field_Process, Field_ExcInfo, Field_ExcText, Field_StackInfo, Field_Message, Field_Asctime, // Special field Field_Unknown, LiteralFragment, }; typedef struct { FragmentType field; PyObject *fragment; } FormatFragment; typedef struct { PyObject_VAR_HEAD PyObject *fmt; PyObject *defaults; bool usesDefaultFmt; int style; PyObject* _const_format; PyObject* _const__dict__; FormatFragment fragments[1]; } FormatStyle; int FormatStyle_init(FormatStyle *self, PyObject *args, PyObject *kwds); PyObject* FormatStyle_usesTime(FormatStyle *self); PyObject* FormatStyle_validate(FormatStyle *self); PyObject* FormatStyle_format(FormatStyle *self, PyObject *record); PyObject* FormatStyle_dealloc(FormatStyle *self); PyObject* FormatStyle_new(PyTypeObject *type, PyObject *args, PyObject *kwds); extern PyTypeObject FormatStyleType; #define FormatStyle_CheckExact(op) Py_IS_TYPE(op, &FormatStyleType) typedef std::unordered_map FieldMap; #endif // PICOLOGGING_FORMATSTYLE_Hpython-picologging-0.9.4/src/picologging/formatter.cxx000066400000000000000000000346511467100674700232100ustar00rootroot00000000000000#include #include #include "picologging.hxx" #include "formatter.hxx" #include "formatstyle.hxx" #include "logrecord.hxx" PyObject* Formatter_new(PyTypeObject* type, PyObject* args, PyObject* kwds) { Formatter* self = (Formatter*)type->tp_alloc(type, 0); if (self != NULL) { self->fmt = Py_None; self->dateFmt = Py_None; self->style = Py_None; self->_const_line_break = PyUnicode_FromString("\n"); self->_const_close = PyUnicode_FromString("close"); self->_const_getvalue = PyUnicode_FromString("getvalue"); self->_const_usesTime = PyUnicode_FromString("usesTime"); self->_const_format = PyUnicode_FromString("format"); } return (PyObject*)self; } int Formatter_init(Formatter *self, PyObject *args, PyObject *kwds){ PyObject *fmt = nullptr, *dateFmt = nullptr; int style = '%'; int validate = 1; static const char *kwlist[] = {"fmt", "datefmt", "style", "validate", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOCp", const_cast(kwlist), &fmt, &dateFmt, &style, &validate)) return -1; PyObject* styleType = nullptr; if (style == 0) style = '%'; switch (style){ case '%': case '{': /* Call the class object. */ styleType = (PyObject*)&FormatStyleType; break; case '$': PyErr_Format(PyExc_NotImplementedError, "String Templates are not supported in picologging."); return -1; default: PyErr_Format(PyExc_ValueError, "Unknown style '%c'", style); return -1; } if (fmt == nullptr) fmt = Py_None; if (dateFmt == nullptr) dateFmt = Py_None; PyObject * style_c = PyUnicode_FromFormat("%c", style); PyObject * styleCls = PyObject_CallFunctionObjArgs(styleType, fmt, Py_None, style_c, NULL); Py_DECREF(style_c); if (styleCls == nullptr){ //PyErr_Format(PyExc_ValueError, "Could not initialize Style formatter class."); return -1; } self->style = styleCls; self->fmt = Py_NewRef(((FormatStyle*)(self->style))->fmt); self->usesTime = (FormatStyle_usesTime((FormatStyle*)self->style) == Py_True); self->dateFmt = Py_NewRef(dateFmt); if (dateFmt != Py_None) { self->dateFmtStr = PyUnicode_AsUTF8(self->dateFmt); if (self->dateFmtStr == nullptr) { return -1; } } else { self->dateFmtStr = nullptr; } if (validate){ if (PyObject_CallMethod(self->style, "validate", NULL) == nullptr){ Py_CLEAR(self->style); Py_CLEAR(self->fmt); Py_CLEAR(self->dateFmt); return -1; } } return 0; } PyObject* Formatter_format(Formatter *self, PyObject *record){ if (LogRecord_CheckExact(record) || LogRecord_Check(record)){ LogRecord* logRecord = (LogRecord*)record; if (LogRecord_writeMessage(logRecord) == -1){ return nullptr; } if (self->usesTime){ PyObject * asctime = Py_None; double createdInt; int createdFrac = std::modf(logRecord->created, &createdInt) * 1e3; std::time_t created = static_cast(createdInt); std::tm *ct = localtime(&created); if (self->dateFmt != Py_None){ char buf[100]; size_t len = strftime(buf, sizeof(buf), self->dateFmtStr, ct); asctime = PyUnicode_FromStringAndSize(buf, len); } else { char buf[100]; size_t len = strftime(buf, sizeof(buf), "%F %T" , ct); len += snprintf(buf + len, sizeof(buf) - len, ",%03d", createdFrac); asctime = PyUnicode_FromStringAndSize(buf, len); } Py_XDECREF(logRecord->asctime); logRecord->asctime = asctime; } PyObject* result = nullptr; if (FormatStyle_CheckExact(self->style)){ result = FormatStyle_format((FormatStyle*)self->style, record); } else { result = PyObject_CallMethod_ONEARG(self->style, self->_const_format, record); } if (result == nullptr) return nullptr; if (logRecord->excInfo != Py_None && logRecord->excText == Py_None){ if (!PyTuple_Check(logRecord->excInfo)) { PyErr_Format(PyExc_TypeError, "LogRecord.excInfo must be a tuple."); return nullptr; } PyObject* mod = PICOLOGGING_MODULE(); // borrowed reference PyObject* modDict = PyModule_GetDict(mod); // borrowed reference PyObject* print_exception = Py_NewRef(PyDict_GetItemString(modDict, "print_exception")); PyObject* sio_cls = Py_NewRef(PyDict_GetItemString(modDict, "StringIO")); PyObject* sio = PyObject_CallFunctionObjArgs(sio_cls, NULL); if (sio == nullptr){ Py_XDECREF(sio_cls); Py_XDECREF(print_exception); return nullptr; // Got exception in StringIO.__init__() } // TODO: Validate length of logRecord->excInfo is >=3 if (PyObject_CallFunctionObjArgs( print_exception, PyTuple_GetItem(logRecord->excInfo, 0), PyTuple_GetItem(logRecord->excInfo, 1), PyTuple_GetItem(logRecord->excInfo, 2), Py_None, sio, NULL) == nullptr) { Py_XDECREF(sio); Py_XDECREF(sio_cls); Py_XDECREF(print_exception); return nullptr; // Got exception in print_exception() } PyObject* s = PyObject_CallMethod_NOARGS(sio, self->_const_getvalue); if (s == nullptr){ Py_XDECREF(sio); Py_XDECREF(sio_cls); Py_XDECREF(print_exception); return nullptr; // Got exception in StringIO.getvalue() } if (PyObject_CallMethod_NOARGS(sio, self->_const_close) == nullptr){ Py_DECREF(s); Py_XDECREF(sio); Py_XDECREF(sio_cls); Py_XDECREF(print_exception); return nullptr; // Got exception in StringIO.close() } Py_DECREF(sio); Py_DECREF(sio_cls); Py_DECREF(print_exception); if (PYUNICODE_ENDSWITH(s, self->_const_line_break)){ PyObject* s2 = PyUnicode_Substring(s, 0, PyUnicode_GetLength(s) - 1); Py_DECREF(s); s = s2; } Py_XDECREF(logRecord->excText); logRecord->excText = s; // Use borrowed ref } if (logRecord->excText != Py_None){ if (!PYUNICODE_ENDSWITH(result, self->_const_line_break)){ PyUnicode_Append(&result, self->_const_line_break); if (result == nullptr) return nullptr; } PyUnicode_Append(&result, logRecord->excText); if (result == nullptr) return nullptr; } if (logRecord->stackInfo != Py_None && logRecord->stackInfo != Py_False ) { if (PyUnicode_Check(logRecord->stackInfo) ) { if (PyUnicode_GET_LENGTH(logRecord->stackInfo) > 0) { if (!PYUNICODE_ENDSWITH(result, self->_const_line_break)) { PyUnicode_Append(&result, self->_const_line_break); if (result == nullptr) return nullptr; } PyUnicode_Append(&result, logRecord->stackInfo); if (result == nullptr) return nullptr; } } else { PyObject* s = PyObject_Str(logRecord->stackInfo); if (s == nullptr){ return nullptr; // Got exception in str(stackInfo) } if (!PYUNICODE_ENDSWITH(result, self->_const_line_break)){ PyUnicode_Append(&result, self->_const_line_break); if (result == nullptr) return nullptr; } PyUnicode_Append(&result, s); if (result == nullptr) return nullptr; Py_DECREF(s); } } return result; } else { PyErr_SetString(PyExc_TypeError, "Argument must be a LogRecord"); return nullptr; } } PyObject* Formatter_usesTime(Formatter *self) { if (FormatStyle_CheckExact(self->style)){ return FormatStyle_usesTime((FormatStyle*)self->style); } else { return PyObject_CallMethod_NOARGS(self->style, self->_const_usesTime); } } PyObject* Formatter_formatMessage(Formatter *self, PyObject* record){ return PyObject_CallMethod_ONEARG(self->style, self->_const_format, record); } PyObject* Formatter_formatStack(Formatter *self, PyObject *stackInfo) { // The base implementation just returns the value passed in. return Py_NewRef(stackInfo); } PyObject* Formatter_formatException(Formatter *self, PyObject *excInfo) { PyObject* mod = PICOLOGGING_MODULE(); // borrowed reference PyObject* modDict = PyModule_GetDict(mod); // borrowed reference PyObject* print_exception = PyDict_GetItemString(modDict, "print_exception"); // PyDict_GetItemString returns a borrowed reference Py_XINCREF(print_exception); PyObject* sio_cls = PyDict_GetItemString(modDict, "StringIO"); Py_XINCREF(sio_cls); PyObject* sio = PyObject_CallFunctionObjArgs(sio_cls, NULL); if (sio == nullptr){ Py_XDECREF(sio_cls); Py_XDECREF(print_exception); return nullptr; // Got exception in StringIO.__init__() } if (PyObject_CallFunctionObjArgs( print_exception, PyTuple_GetItem(excInfo, 0), PyTuple_GetItem(excInfo, 1), PyTuple_GetItem(excInfo, 2), Py_None, sio, NULL) == nullptr) { Py_XDECREF(sio_cls); Py_XDECREF(print_exception); return nullptr; // Got exception in print_exception() } PyObject* s = PyObject_CallMethod_NOARGS(sio, self->_const_getvalue); if (s == nullptr){ Py_XDECREF(sio); Py_XDECREF(sio_cls); Py_XDECREF(print_exception); return nullptr; // Got exception in StringIO.getvalue() } PyObject_CallMethod_NOARGS(sio, self->_const_close); Py_DECREF(sio); Py_DECREF(sio_cls); Py_DECREF(print_exception); if (PYUNICODE_ENDSWITH(s, self->_const_line_break)){ PyObject* s2 = PyUnicode_Substring(s, 0, PyUnicode_GetLength(s) - 1); Py_DECREF(s); s = s2; } return s; } PyObject* Formatter_repr(Formatter *self) { return PyUnicode_FromFormat("<%s: fmt='%U'>", _PyType_Name(Py_TYPE(self)), self->fmt); } PyObject* Formatter_dealloc(Formatter *self) { Py_CLEAR(self->fmt); Py_CLEAR(self->dateFmt); Py_CLEAR(self->style); Py_CLEAR(self->_const_line_break); Py_CLEAR(self->_const_close); Py_CLEAR(self->_const_getvalue); Py_CLEAR(self->_const_usesTime); Py_CLEAR(self->_const_format); Py_TYPE(self)->tp_free((PyObject*)self); return NULL; } static PyMethodDef Formatter_methods[] = { {"format", (PyCFunction)Formatter_format, METH_O, "Format record into log event string"}, {"usesTime", (PyCFunction)Formatter_usesTime, METH_NOARGS, "Return True if the format uses the creation time of the record."}, {"formatMessage", (PyCFunction)Formatter_formatMessage, METH_O, "Format the message for a record."}, {"formatStack", (PyCFunction)Formatter_formatStack, METH_O, "Format the stack for a record."}, {"formatException", (PyCFunction)Formatter_formatException, METH_O, "Format and return the specified exception information as a string."}, {NULL} }; static PyMemberDef Formatter_members[] = { {"_fmt", T_OBJECT_EX, offsetof(Formatter, fmt), 0, "Format string"}, {"_style", T_OBJECT_EX, offsetof(Formatter, style), 0, "String style formatter"}, {"datefmt", T_OBJECT_EX, offsetof(Formatter, dateFmt), 0, "Date format string"}, {NULL} }; PyTypeObject FormatterType = { PyObject_HEAD_INIT(NULL) "picologging.Formatter", /* tp_name */ sizeof(Formatter), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)Formatter_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ (reprfunc)Formatter_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , /* tp_flags */ PyDoc_STR("Formatter for log records."), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Formatter_methods, /* tp_methods */ Formatter_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Formatter_init, /* tp_init */ 0, /* tp_alloc */ Formatter_new, /* tp_new */ PyObject_Del, /* tp_free */ }; python-picologging-0.9.4/src/picologging/formatter.hxx000066400000000000000000000017051467100674700232070ustar00rootroot00000000000000#include #include #include #include "compat.hxx" #ifndef PICOLOGGING_FORMATTER_H #define PICOLOGGING_FORMATTER_H typedef struct { PyObject_HEAD PyObject *fmt; PyObject *dateFmt; PyObject *style; bool usesTime; const char* dateFmtStr; PyObject *_const_line_break; PyObject *_const_close; PyObject *_const_getvalue; PyObject *_const_usesTime; PyObject *_const_format; } Formatter; int Formatter_init(Formatter *self, PyObject *args, PyObject *kwds); PyObject* Formatter_format(Formatter *self, PyObject *record); PyObject* Formatter_dealloc(Formatter *self); PyObject* Formatter_usesTime(Formatter *self); PyObject* Formatter_formatMessage(Formatter *self, PyObject *record); PyObject* Formatter_formatStack(Formatter *self, PyObject *stackInfo); extern PyTypeObject FormatterType; #define Formatter_CheckExact(op) Py_IS_TYPE(op, &FormatterType) #endif // PICOLOGGING_FORMATTER_Hpython-picologging-0.9.4/src/picologging/handler.cxx000066400000000000000000000206101467100674700226100ustar00rootroot00000000000000#include #include "handler.hxx" #include "picologging.hxx" #include "formatter.hxx" #include "streamhandler.hxx" PyObject* Handler_new(PyTypeObject* type, PyObject* args, PyObject* kwds) { Handler* self = (Handler*)FiltererType.tp_new(type, args, kwds); if (self != NULL) { self->lock = new std::recursive_mutex(); self->_const_emit = PyUnicode_FromString("emit"); self->_const_format = PyUnicode_FromString("format"); self->name = Py_None; self->formatter = Py_NewRef(Py_None); } return (PyObject*)self; } int Handler_init(Handler *self, PyObject *args, PyObject *kwds){ if (FiltererType.tp_init((PyObject *) self, args, kwds) < 0) return -1; PyObject *name = Py_None; unsigned short level = LOG_LEVEL_NOTSET; static const char *kwlist[] = {"name", "level", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OH", const_cast(kwlist), &name, &level)){ return -1; } self->name = Py_NewRef(name); self->level = level; return 0; } PyObject* Handler_dealloc(Handler *self) { Py_CLEAR(self->name); Py_CLEAR(self->formatter); Py_CLEAR(self->_const_emit); Py_CLEAR(self->_const_format); delete self->lock; FiltererType.tp_dealloc((PyObject *)self); return nullptr; } PyObject* Handler_emit(Handler *self, PyObject *record){ PyErr_SetString(PyExc_NotImplementedError, "emit must be implemented by Handler subclasses"); return NULL; } PyObject* Handler_handle(Handler *self, PyObject *record) { if (Filterer_filter(&self->filterer, (PyObject*)record) != Py_True) Py_RETURN_NONE; try { self->lock->lock(); } catch (const std::exception& e) { PyErr_Format(PyExc_RuntimeError, "Cannot acquire thread lock, %s", e.what()); return nullptr; } PyObject* result = nullptr; if (StreamHandler_CheckExact(((PyObject*)self))){ PyObject* args[1] = {record}; result = StreamHandler_emit((StreamHandler*)self, args, 1); } else { result = PyObject_CallMethod_ONEARG((PyObject*)self, self->_const_emit, record); } self->lock->unlock(); return result == nullptr ? nullptr : Py_True; } PyObject* Handler_setLevel(Handler *self, PyObject *level){ if (PyLong_Check(level)){ self->level = PyLong_AsUnsignedLong(level); Py_RETURN_NONE; } else { PyErr_SetString(PyExc_TypeError, "level must be an integer"); return nullptr; } } PyObject* Handler_format(Handler *self, PyObject *record){ if (self->formatter == Py_None){ // Lazily initialize default formatter.. Py_DECREF(self->formatter); self->formatter = PyObject_CallFunctionObjArgs((PyObject*)&FormatterType, NULL); if (self->formatter == nullptr){ // Reset to none if we failed to initialize self->formatter = Py_None; Py_INCREF(self->formatter); return nullptr; } } if (Formatter_CheckExact(self->formatter)) { return Formatter_format((Formatter*) self->formatter, record); } else { return PyObject_CallMethod_ONEARG(self->formatter, self->_const_format, record); } } PyObject* Handler_setFormatter(Handler *self, PyObject *formatter) { Py_XDECREF(self->formatter); self->formatter = Py_NewRef(formatter); Py_RETURN_NONE; } PyObject* Handler_acquire(Handler *self){ self->lock->lock(); Py_RETURN_NONE; } PyObject* Handler_release(Handler *self){ self->lock->unlock(); Py_RETURN_NONE; } PyObject* Handler_flush(Handler *self){ //Abstract method. does nothing. Py_RETURN_NONE; } PyObject* Handler_close(Handler *self){ // TODO: Decide if we want a global dictionary of handlers. Py_RETURN_NONE; } PyObject* Handler_handleError(Handler *self, PyObject *record){ // TODO: Develop this behaviour further. PyErr_Print(); Py_RETURN_NONE; } PyObject* Handler_getName(Handler *self){ Py_INCREF(self->name); return self->name; } PyObject* Handler_setName(Handler *self, PyObject *name){ Py_XDECREF(self->name); self->name = Py_NewRef(name); Py_RETURN_NONE; } PyObject* Handler_createLock(Handler *self){ // Lock is instantiated by constructor, just have this method for compatibility with logging.Handler Py_RETURN_NONE; } PyObject* Handler_repr(Handler *self) { std::string level = _getLevelName(self->level); return PyUnicode_FromFormat("<%s (%s)>", _PyType_Name(Py_TYPE(self)), level.c_str()); } static PyMethodDef Handler_methods[] = { {"setLevel", (PyCFunction)Handler_setLevel, METH_O, "Set the level of the handler."}, {"setFormatter", (PyCFunction)Handler_setFormatter, METH_O, "Set the formatter of the handler."}, {"handle", (PyCFunction)Handler_handle, METH_O, "Handle a record."}, {"emit", (PyCFunction)Handler_emit, METH_O, "Emit a record."}, {"format", (PyCFunction)Handler_format, METH_O, "Format a record."}, {"acquire", (PyCFunction)Handler_acquire, METH_NOARGS, "Acquire the lock."}, {"release", (PyCFunction)Handler_release, METH_NOARGS, "Release the lock."}, {"flush", (PyCFunction)Handler_flush, METH_NOARGS, "Ensure all logging output has been flushed."}, {"close", (PyCFunction)Handler_close, METH_NOARGS, "Tidy up any resources used by the handler."}, {"handleError", (PyCFunction)Handler_handleError, METH_O, "Handle an error during an emit()."}, {"get_name", (PyCFunction)Handler_getName, METH_NOARGS, "Get the name of the handler."}, {"set_name", (PyCFunction)Handler_setName, METH_O, "Set the name of the handler."}, {"createLock", (PyCFunction)Handler_createLock, METH_NOARGS, "Create a new lock instance."}, {NULL} }; static PyMemberDef Handler_members[] = { {"name", T_OBJECT_EX, offsetof(Handler, name), 0, "Handler name"}, {"level", T_USHORT, offsetof(Handler, level), 0, "Handler level"}, {"formatter", T_OBJECT_EX, offsetof(Handler, formatter), 0, "Handler formatter"}, {NULL} }; PyTypeObject HandlerType = { PyObject_HEAD_INIT(NULL) "picologging.Handler", /* tp_name */ sizeof(Handler), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)Handler_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ (reprfunc)Handler_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , /* tp_flags */ PyDoc_STR("Handler interface."), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Handler_methods, /* tp_methods */ Handler_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Handler_init, /* tp_init */ 0, /* tp_alloc */ Handler_new, /* tp_new */ PyObject_Del, /* tp_free */ }; python-picologging-0.9.4/src/picologging/handler.hxx000066400000000000000000000017721467100674700226250ustar00rootroot00000000000000#include #include "filterer.hxx" #include #ifndef PICOLOGGING_HANDLER_H #define PICOLOGGING_HANDLER_H typedef struct { Filterer filterer; PyObject *name; unsigned short level; PyObject *formatter; std::recursive_mutex *lock; PyObject* _const_emit; PyObject* _const_format; } Handler; int Handler_init(Handler *self, PyObject *args, PyObject *kwds); PyObject* Handler_dealloc(Handler *self); PyObject* Handler_emit(Handler *self, PyObject *record); PyObject* Handler_handle(Handler *self, PyObject *record); PyObject* Handler_setLevel(Handler *self, PyObject *level); PyObject* Handler_setFormatter(Handler *self, PyObject *formatter); PyObject* Handler_format(Handler *self, PyObject *record); PyObject* Handler_acquire(Handler *self); PyObject* Handler_release(Handler *self); extern PyTypeObject HandlerType; #define Handler_CheckExact(op) Py_IS_TYPE(op, &HandlerType) #define Handler_Check(op) PyObject_TypeCheck(op, &HandlerType) #endif // PICOLOGGING_HANDLER_H python-picologging-0.9.4/src/picologging/handlers.py000066400000000000000000001122601467100674700226240ustar00rootroot00000000000000import os import pickle import queue import re import socket import struct import threading import time import picologging _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day class WatchedFileHandler(picologging.FileHandler): """ A handler for logging to a file, which watches the file to see if it has changed while in use. This can happen because of usage of programs such as newsyslog and logrotate which perform log file rotation. This handler, intended for use under Unix, watches the file to see if it has changed since the last emit. (A file has changed if its device or inode have changed.) If it has changed, the old file stream is closed, and the file opened to get a new stream. This handler is not appropriate for use under Windows, because under Windows open files cannot be moved or renamed - logging opens the files with exclusive locks - and so there is no need for such a handler. Furthermore, ST_INO is not supported under Windows; stat always returns zero for this value. """ def __init__(self, filename, mode="a", encoding=None, delay=False): picologging.FileHandler.__init__( self, filename, mode=mode, encoding=encoding, delay=delay ) self.dev, self.ino = -1, -1 self._statstream() def _statstream(self): if self.stream: sres = os.fstat(self.stream.fileno()) self.dev, self.ino = sres.st_dev, sres.st_ino def reopenIfNeeded(self): """ Reopen log file if needed. Checks if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream. """ try: sres = os.stat(self.baseFilename) except FileNotFoundError: sres = None if not sres or sres.st_dev != self.dev or sres.st_ino != self.ino: if self.stream is not None: self.stream.flush() self.stream.close() self.stream = None # See Issue #21742: _open () might fail. self.stream = self._open() self._statstream() def emit(self, record): """ Emit a record. If underlying file has changed, reopen the file before emitting the record to it. """ self.reopenIfNeeded() picologging.FileHandler.emit(self, record) class BaseRotatingHandler(picologging.FileHandler): """ Base class for handlers that rotate log files at a certain point. Not meant to be instantiated directly. Instead, use RotatingFileHandler or TimedRotatingFileHandler. """ namer = None rotator = None def __init__(self, filename, mode, encoding=None, delay=False): """ Use the specified filename for streamed logging """ picologging.FileHandler.__init__( self, filename, mode=mode, encoding=encoding, delay=delay ) self.mode = mode self.encoding = encoding def shouldRollover(self, record): """ Determine if rollover should occur. Should be implemented in inherited classes. """ def doRollover(self, record): """ Do a rollover. Should be implemented in inherited classes. """ def emit(self, record): """ Emit a record. Output the record to the file, catering for rollover as described in doRollover(). """ try: if self.shouldRollover(record): self.doRollover() picologging.FileHandler.emit(self, record) except Exception: self.handleError(record) def rotation_filename(self, default_name): """ Modify the filename of a log file when rotating. This is provided so that a custom filename can be provided. The default implementation calls the 'namer' attribute of the handler, if it's callable, passing the default name to it. If the attribute isn't callable (the default is None), the name is returned unchanged. :param default_name: The default name for the log file. """ if not callable(self.namer): result = default_name else: result = self.namer(default_name) return result def rotate(self, source, dest): """ When rotating, rotate the current log. The default implementation calls the 'rotator' attribute of the handler, if it's callable, passing the source and dest arguments to it. If the attribute isn't callable (the default is None), the source is simply renamed to the destination. :param source: The source filename. This is normally the base filename, e.g. 'test.log' :param dest: The destination filename. This is normally what the source is rotated to, e.g. 'test.log.1'. """ if not callable(self.rotator): # Issue: https://bugs.python.org/issue18940 # A file may not have been created if delay is True. if os.path.exists(source): os.rename(source, dest) else: self.rotator(source, dest) class RotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a set of files, which switches from one file to the next when the current file reaches a certain size. """ def __init__( self, filename, mode="a", maxBytes=0, backupCount=0, encoding=None, delay=False ): """ Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs. """ # If rotation/rollover is wanted, it doesn't make sense to use another # mode. If for example 'w' were specified, then if there were multiple # runs of the calling application, the logs from previous runs would be # lost if the 'w' is respected, because the log file would be truncated # on each run. if maxBytes > 0: mode = "a" BaseRotatingHandler.__init__( self, filename, mode, encoding=encoding, delay=delay ) self.maxBytes = maxBytes self.backupCount = backupCount def doRollover(self): """ Do a rollover, as described in __init__(). """ if self.stream: self.stream.close() self.stream = None if self.backupCount > 0: for i in range(self.backupCount - 1, 0, -1): sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) dfn = self.rotation_filename("%s.%d" % (self.baseFilename, i + 1)) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = self.rotation_filename(self.baseFilename + ".1") if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if not self.delay: self.stream = self._open() def shouldRollover(self, record): """ Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have. """ # See bpo-45401: Never rollover anything other than regular files if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): return False if self.stream is None: self.stream = self._open() if self.maxBytes > 0: msg = "%s\n" % self.format(record) self.stream.seek(0, 2) # Due to non-posix-compliant Windows feature if self.stream.tell() + len(msg) >= self.maxBytes: return True return False class TimedRotatingFileHandler(BaseRotatingHandler): """ Handler for logging to a file, rotating the log file at certain timed intervals. If backupCount is > 0, when rollover is done, no more than backupCount files are kept - the oldest ones are deleted. """ def __init__( self, filename, when="h", interval=1, backupCount=0, encoding=None, delay=False, utc=False, atTime=None, ): BaseRotatingHandler.__init__( self, filename, "a", encoding=encoding, delay=delay ) self.when = when.upper() self.backupCount = backupCount self.utc = utc self.atTime = atTime # Calculate the real rollover interval, which is just the number of # seconds between rollovers. Also set the filename suffix used when # a rollover occurs. Current 'when' events supported: # S - Seconds # M - Minutes # H - Hours # D - Days # midnight - roll over at midnight # W{0-6} - roll over on a certain day; 0 - Monday # # Case of the 'when' specifier is not important; lower or upper case # will work. if self.when == "S": self.interval = 1 # one second self.suffix = "%Y-%m-%d_%H-%M-%S" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" elif self.when == "M": self.interval = 60 # one minute self.suffix = "%Y-%m-%d_%H-%M" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" elif self.when == "H": self.interval = 60 * 60 # one hour self.suffix = "%Y-%m-%d_%H" self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" elif self.when == "D" or self.when == "MIDNIGHT": self.interval = 60 * 60 * 24 # one day self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" elif self.when.startswith("W"): self.interval = 60 * 60 * 24 * 7 # one week if len(self.when) != 2: raise ValueError( "You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when ) if self.when[1] < "0" or self.when[1] > "6": raise ValueError( "Invalid day specified for weekly rollover: %s" % self.when ) self.dayOfWeek = int(self.when[1]) self.suffix = "%Y-%m-%d" self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" else: raise ValueError("Invalid rollover interval specified: %s" % self.when) self.extMatch = re.compile(self.extMatch, re.ASCII) self.interval = self.interval * interval # multiply by units requested # The following line added because the filename passed in could be a # path object (see Issue #27493), but self.baseFilename will be a string filename = self.baseFilename if os.path.exists(filename): t = os.stat(filename).st_mtime else: t = int(time.time()) self.rollover_at = self.computeRollover(t) def computeRollover(self, current_time): """ Work out the rollover time based on the specified time. """ result = current_time + self.interval # If we are rolling over at midnight or weekly, then the interval is already known. # What we need to figure out is WHEN the next interval is. In other words, # if you are rolling over at midnight, then your base interval is 1 day, # but you want to start that one day clock at midnight, not now. So, we # have to fudge the rollover_at value in order to trigger the first rollover # at the right time. After that, the regular interval will take care of # the rest. Note that this code doesn"t care about leap seconds. :) if self.when == "MIDNIGHT" or self.when.startswith("W"): # This could be done with less code, but I wanted it to be clear if self.utc: t = time.gmtime(current_time) else: t = time.localtime(current_time) current_hour = t[3] current_minute = t[4] current_second = t[5] current_day = t[6] # r is the number of seconds left between now and the next rotation if self.atTime is None: rotate_ts = _MIDNIGHT else: rotate_ts = ( self.atTime.hour * 60 + self.atTime.minute ) * 60 + self.atTime.second r = rotate_ts - ((current_hour * 60 + current_minute) * 60 + current_second) if r < 0: # Rotate time is before the current time (for example when # self.rotateAt is 13:45 and it now 14:15), rotation is # tomorrow. r += _MIDNIGHT current_day = (current_day + 1) % 7 result = current_time + r # If we are rolling over on a certain day, add in the number of days until # the next rollover, but offset by 1 since we just calculated the time # until the next day starts. There are three cases: # Case 1) The day to rollover is today; in this case, do nothing # Case 2) The day to rollover is further in the interval (i.e., today is # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to # next rollover is simply 6 - 2 - 1, or 3. # Case 3) The day to rollover is behind us in the interval (i.e., today # is day 5 (Saturday) and rollover is on day 3 (Thursday). # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the # number of days left in the current week (1) plus the number # of days in the next week until the rollover day (3). # The calculations described in 2) and 3) above need to have a day added. # This is because the above time calculation takes us to midnight on this # day, i.e. the start of the next day. if self.when.startswith("W"): day = current_day # 0 is Monday if day != self.dayOfWeek: if day < self.dayOfWeek: daysToWait = self.dayOfWeek - day else: daysToWait = 6 - day + self.dayOfWeek + 1 now_rollover_at = result + (daysToWait * (60 * 60 * 24)) if not self.utc: dst_now = t[-1] dstAtRollover = time.localtime(now_rollover_at)[-1] if dst_now != dstAtRollover: if ( not dst_now ): # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 now_rollover_at += addend result = now_rollover_at return result def shouldRollover(self, record): """ Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same """ # See bpo-45401: Never rollover anything other than regular files if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): return False t = int(time.time()) if t >= self.rollover_at: return True return False def getFilesToDelete(self): """ Determine the files to delete when rolling over. More specific than the earlier method, which just used glob.glob(). """ dir_name, base_name = os.path.split(self.baseFilename) file_names = os.listdir(dir_name) result = [] # See bpo-44753: Don't use the extension when computing the prefix. n, e = os.path.splitext(base_name) prefix = n + "." plen = len(prefix) for file_name in file_names: if self.namer is None: # Our files will always start with base_name if not file_name.startswith(base_name): continue else: # Our files could be just about anything after custom naming, but # likely candidates are of the form # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log if ( not file_name.startswith(base_name) and file_name.endswith(e) and len(file_name) > (plen + 1) and not file_name[plen + 1].isdigit() ): continue if file_name[:plen] == prefix: suffix = file_name[plen:] # See bpo-45628: The date/time suffix could be anywhere in the # filename parts = suffix.split(".") for part in parts: if self.extMatch.match(part): result.append(os.path.join(dir_name, file_name)) break if len(result) < self.backupCount: result = [] else: result.sort() result = result[: len(result) - self.backupCount] return result def doRollover(self): """ do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix. """ if self.stream: self.stream.close() self.stream = None # get the time that this sequence started at and make it a TimeTuple current_time = int(time.time()) dst_now = time.localtime(current_time)[-1] t = self.rollover_at - self.interval if self.utc: time_tuple = time.gmtime(t) else: time_tuple = time.localtime(t) dst_then = time_tuple[-1] if dst_now != dst_then: if dst_now: addend = 3600 else: addend = -3600 time_tuple = time.localtime(t + addend) dfn = self.rotation_filename( self.baseFilename + "." + time.strftime(self.suffix, time_tuple) ) if os.path.exists(dfn): os.remove(dfn) self.rotate(self.baseFilename, dfn) if self.backupCount > 0: for s in self.getFilesToDelete(): os.remove(s) if not self.delay: self.stream = self._open() now_rollover_at = self.computeRollover(current_time) while now_rollover_at <= current_time: now_rollover_at = now_rollover_at + self.interval # If DST changes and midnight or weekly rollover, adjust for this. if (self.when == "MIDNIGHT" or self.when.startswith("W")) and not self.utc: dstAtRollover = time.localtime(now_rollover_at)[-1] if dst_now != dstAtRollover: if ( not dst_now ): # DST kicks in before next rollover, so we need to deduct an hour addend = -3600 else: # DST bows out before next rollover, so we need to add an hour addend = 3600 now_rollover_at += addend self.rollover_at = now_rollover_at class QueueHandler(picologging.Handler): """ This handler sends events to a queue. Typically, it would be used together with a multiprocessing Queue to centralise logging to file in one process (in a multi-process application), so as to avoid file write contention between processes. This code is new in Python 3.2, but this class can be copy pasted into user code for use with earlier Python versions. """ def __init__(self, queue): """ Initialise an instance, using the passed queue. """ super().__init__() self.queue = queue def enqueue(self, record): """ Enqueue a record. The base implementation uses put_nowait. You may want to override this method if you want to use blocking, timeouts or custom queue implementations. """ self.queue.put_nowait(record) def prepare(self, record): """ Prepare a record for queuing. The object returned by this method is enqueued. The base implementation formats the record to merge the message and arguments, and removes unpickleable items from the record in-place. Specifically, it overwrites the record's `msg` and `message` attributes with the merged message (obtained by calling the handler's `format` method), and sets the `args`, `exc_info` and `exc_text` attributes to None. You might want to override this method if you want to convert the record to a dict or JSON string, or send a modified copy of the record while leaving the original intact. """ # The format operation gets traceback text into record.exc_text # (if there's exception data), and also returns the formatted # message. We can then use this to replace the original # msg + args, as these might be unpickleable. We also zap the # exc_info, exc_text and stack_info attributes, as they are no longer # needed and, if not None, will typically not be pickleable. msg = self.format(record) # bpo-35726: make copy of record to avoid affecting other handlers in the chain. record = picologging.LogRecord( record.name, record.levelno, record.pathname, record.lineno, msg, None, None, record.funcName, record.stack_info, ) return record def emit(self, record: picologging.LogRecord): """ Emit a record. Writes the LogRecord to the queue, copying it first. """ try: self.enqueue(self.prepare(record)) except Exception: self.handleError(record) class QueueListener: """ This class implements an internal threaded listener which watches for LogRecords being added to a queue, removes them and passes them to a list of handlers for processing. """ _sentinel = None def __init__(self, queue, *handlers, respect_handler_level=False): """ Initialise an instance with the specified queue and handlers. """ self.queue = queue self.handlers = handlers self._thread = None self.respect_handler_level = respect_handler_level def dequeue(self, block): """ Dequeue a record and return it, optionally blocking. The base implementation uses get. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ return self.queue.get(block) def start(self): """ Start the listener. This starts up a background thread to monitor the queue for LogRecords to process. """ self._thread = t = threading.Thread(target=self._monitor) t.daemon = True t.start() def prepare(self, record): """ Prepare a record for handling. This method just returns the passed-in record. You may want to override this method if you need to do any custom marshalling or manipulation of the record before passing it to the handlers. """ return record def handle(self, record): """ Handle a record. This just loops through the handlers offering them the record to handle. """ record = self.prepare(record) for handler in self.handlers: if not self.respect_handler_level: process = True else: process = record.levelno >= handler.level if process: handler.handle(record) def _monitor(self): """ Monitor the queue for records, and ask the handler to deal with them. This method runs on a separate, internal thread. The thread will terminate if it sees a sentinel object in the queue. """ q = self.queue has_task_done = hasattr(q, "task_done") while True: try: record = self.dequeue(True) if record is self._sentinel: if has_task_done: q.task_done() break self.handle(record) if has_task_done: q.task_done() except queue.Empty: break def enqueue_sentinel(self): """ This is used to enqueue the sentinel record. The base implementation uses put_nowait. You may want to override this method if you want to use timeouts or work with custom queue implementations. """ self.queue.put_nowait(self._sentinel) def stop(self): """ Stop the listener. This asks the thread to terminate, and then waits for it to do so. Note that if you don't call this before your application exits, there may be some records still left on the queue, which won't be processed. """ self.enqueue_sentinel() self._thread.join() self._thread = None class BufferingHandler(picologging.Handler): """ A handler class which buffers logging records in memory. Whenever each record is added to the buffer, a check is made to see if the buffer should be flushed. If it should, then flush() is expected to do what's needed. """ def __init__(self, capacity): """ Initialize the handler with the buffer size. """ picologging.Handler.__init__(self) self.capacity = capacity self.buffer = [] def emit(self, record): """ Emit a record. Append the record and call flush() if criteria is met. """ self.buffer.append(record) if len(self.buffer) >= self.capacity: self.flush() def flush(self): """ Override to implement custom flushing behaviour. This version just zaps the buffer to empty. """ self.acquire() try: self.buffer.clear() finally: self.release() def close(self): """ Close the handler. This version just flushes and chains to the parent class' close(). """ try: self.flush() finally: picologging.Handler.close(self) class MemoryHandler(BufferingHandler): """ A handler class which buffers logging records in memory, periodically flushing them to a target handler. Flushing occurs whenever the buffer is full, or when an event of a certain severity or greater is seen. """ def __init__( self, capacity, flushLevel=picologging.ERROR, target=None, flushOnClose=True ): """ Initialize the handler with the buffer size, the level at which flushing should occur and an optional target. Note that without a target being set either here or via setTarget(), a MemoryHandler is no use to anyone! The ``flushOnClose`` argument is ``True`` for backward compatibility reasons - the old behaviour is that when the handler is closed, the buffer is flushed, even if the flush level hasn't been exceeded nor the capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. """ BufferingHandler.__init__(self, capacity) self.flushLevel = flushLevel self.target = target # See Issue #26559 for why this has been added self.flushOnClose = flushOnClose def setTarget(self, target): """ Set the target handler for this handler. """ self.acquire() try: self.target = target finally: self.release() def flush(self): """ For a MemoryHandler, flushing means just sending the buffered records to the target, if there is one. Override if you want different behaviour. The record buffer is also cleared by this operation. """ self.acquire() try: if self.target: for record in self.buffer: self.target.handle(record) self.buffer.clear() finally: self.release() def close(self): """ Flush, if appropriately configured, set the target to None and lose the buffer. """ try: if self.flushOnClose: self.flush() finally: self.acquire() try: self.target = None BufferingHandler.close(self) finally: self.release() def emit(self, record): """ Emit a record. Append the record and call flush() if criteria is met. """ self.buffer.append(record) if (len(self.buffer) >= self.capacity) or (record.levelno >= self.flushLevel): self.flush() class SocketHandler(picologging.Handler): """ A handler class which writes logging records, in pickle format, to a streaming socket. The socket is kept open across logging calls. If the peer resets it, an attempt is made to reconnect on the next call. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. When the attribute *closeOnError* is set to True - if a socket error occurs, the socket is silently closed and then reopened on the next logging call. """ picologging.Handler.__init__(self) self.host = host self.port = port if port is None: self.address = host else: self.address = (host, port) self.sock = None self.closeOnError = False self.retryTime = None # Exponential backoff parameters. self.retryStart = 1.0 self.retryMax = 30.0 self.retryFactor = 2.0 def makeSocket(self, timeout=1): """ A factory method which allows subclasses to define the precise type of socket they want. """ if self.port is not None: result = socket.create_connection(self.address, timeout=timeout) else: result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) result.settimeout(timeout) try: result.connect(self.address) except OSError: result.close() # Issue 19182 raise return result def createSocket(self): """ Try to create a socket, using an exponential backoff with a max retry time. """ now = time.time() # Either retryTime is None, in which case this # is the first time back after a disconnect, or # we've waited long enough. if self.retryTime is None: attempt = True else: attempt = now >= self.retryTime if attempt: try: self.sock = self.makeSocket() self.retryTime = None # next time, no delay before trying except OSError: # Creation failed, so set the retry time and return. if self.retryTime is None: self.retryPeriod = self.retryStart else: self.retryPeriod = self.retryPeriod * self.retryFactor if self.retryPeriod > self.retryMax: self.retryPeriod = self.retryMax self.retryTime = now + self.retryPeriod def send(self, s): """ Send a pickled string to the socket. This function allows for partial sends which can happen when the network is busy. """ if self.sock is None: self.createSocket() # self.sock can be None either because we haven't reached the retry # time yet, or because we have reached the retry time and retried, # but are still unable to connect. if self.sock: try: self.sock.sendall(s) except OSError: # pragma: no cover self.sock.close() self.sock = None # so we can call createSocket next time def makePickle(self, record): """ Pickles the record in binary format with a length prefix, and returns it ready for transmission across the socket. """ ei = record.exc_info if ei: # just to get traceback text into record.exc_text ... self.format(record) # See issue #14436: If msg or args are objects, they may not be # available on the receiving end. So we convert the msg % args # to a string, save it as msg and zap the args. d = dict(record.__dict__) d["msg"] = record.getMessage() d["args"] = None d["exc_info"] = None # Issue #25685: delete 'message' if present: redundant with 'msg' d.pop("message", None) s = pickle.dumps(d, 1) slen = struct.pack(">L", len(s)) return slen + s def handleError(self, record): """ Handle an error during logging. An error has occurred during logging. Most likely cause - connection lost. Close the socket so that we can retry on the next event. """ if self.closeOnError and self.sock: self.sock.close() self.sock = None # try to reconnect next time else: picologging.Handler.handleError(self, record) def emit(self, record): """ Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket. """ try: s = self.makePickle(record) self.send(s) except Exception: self.handleError(record) def close(self): """ Closes the socket. """ self.acquire() try: sock = self.sock if sock: self.sock = None sock.close() picologging.Handler.close(self) finally: self.release() class DatagramHandler(SocketHandler): """ A handler class which writes logging records, in pickle format, to a datagram socket. The pickle which is sent is that of the LogRecord's attribute dictionary (__dict__), so that the receiver does not need to have the logging module installed in order to process the logging event. To unpickle the record at the receiving end into a LogRecord, use the makeLogRecord function. """ def __init__(self, host, port): """ Initializes the handler with a specific host address and port. """ SocketHandler.__init__(self, host, port) self.closeOnError = False def makeSocket(self): """ The factory method of SocketHandler is here overridden to create a UDP socket (SOCK_DGRAM). """ if self.port is None: family = socket.AF_UNIX else: family = socket.AF_INET s = socket.socket(family, socket.SOCK_DGRAM) return s def send(self, s): """ Send a pickled string to a socket. This function no longer allows for partial sends which can happen when the network is busy - UDP does not guarantee delivery and can deliver packets out of sequence. """ if self.sock is None: self.createSocket() self.sock.sendto(s, self.address) python-picologging-0.9.4/src/picologging/handlers.pyi000066400000000000000000000110331467100674700227710ustar00rootroot00000000000000from datetime import datetime from queue import Queue, SimpleQueue from socket import socket from typing import Any, Callable, Pattern from _typeshed import StrPath from picologging import FileHandler, Handler, LogRecord class WatchedFileHandler(FileHandler): baseFilename: str # undocumented mode: str # undocumented encoding: str | None # undocumented delay: bool # undocumented errors: str | None # undocumented def __init__( self, filename: StrPath, mode: str = ..., encoding: str | None = ..., delay: bool = ..., errors: str | None = ..., ) -> None: ... class BaseRotatingHandler(FileHandler): namer: Callable[[str], str] | None rotator: Callable[[str, str], None] | None def __init__( self, filename: StrPath, mode: str, encoding: str | None = ..., delay: bool = ..., errors: str | None = ..., ) -> None: ... def rotation_filename(self, default_name: str) -> str: ... def rotate(self, source: str, dest: str) -> None: ... class RotatingFileHandler(BaseRotatingHandler): maxBytes: str # undocumented backupCount: int # undocumented def __init__( self, filename: StrPath, mode: str = ..., maxBytes: int = ..., backupCount: int = ..., encoding: str | None = ..., delay: bool = ..., errors: str | None = ..., ) -> None: ... def doRollover(self) -> None: ... def shouldRollover(self, record: LogRecord) -> int: ... # undocumented class TimedRotatingFileHandler(BaseRotatingHandler): when: str # undocumented backupCount: int # undocumented utc: bool # undocumented atTime: datetime.time | None # undocumented interval: int # undocumented suffix: str # undocumented dayOfWeek: int # undocumented rolloverAt: int # undocumented extMatch: Pattern[str] # undocumented def __init__( self, filename: StrPath, when: str = ..., interval: int = ..., backupCount: int = ..., encoding: str | None = ..., delay: bool = ..., utc: bool = ..., atTime: datetime.time | None = ..., errors: str | None = ..., ) -> None: ... def doRollover(self) -> None: ... def shouldRollover(self, record: LogRecord) -> int: ... # undocumented def computeRollover(self, currentTime: int) -> int: ... # undocumented def getFilesToDelete(self) -> list[str]: ... # undocumented class QueueHandler(Handler): queue: SimpleQueue[Any] | Queue[Any] # undocumented def __init__(self, queue: SimpleQueue[Any] | Queue[Any]) -> None: ... class QueueListener: handlers: tuple[Handler, ...] # undocumented respect_handler_level: bool # undocumented queue: SimpleQueue[Any] | Queue[Any] # undocumented def __init__( self, queue: SimpleQueue[Any] | Queue[Any], *handlers: Handler, respect_handler_level: bool = ... ) -> None: ... def dequeue(self, block: bool) -> LogRecord: ... def prepare(self, record: LogRecord) -> Any: ... def start(self) -> None: ... def stop(self) -> None: ... def enqueue_sentinel(self) -> None: ... def handle(self, record: LogRecord) -> None: ... class BufferingHandler(Handler): capacity: int # undocumented buffer: list[LogRecord] # undocumented def __init__(self, capacity: int) -> None: ... class MemoryHandler(BufferingHandler): flushLevel: int # undocumented target: Handler | None # undocumented flushOnClose: bool # undocumented def __init__( self, capacity: int, flushLevel: int = ..., target: Handler | None = ..., flushOnClose: bool = ..., ) -> None: ... def setTarget(self, target: Handler | None) -> None: ... class SocketHandler(Handler): host: str # undocumented port: int | None # undocumented address: tuple[str, int] | str # undocumented sock: socket | None # undocumented closeOnError: bool # undocumented retryTime: float | None # undocumented retryStart: float # undocumented retryFactor: float # undocumented retryMax: float # undocumented def __init__(self, host: str, port: int | None) -> None: ... def makeSocket(self, timeout: float = ...) -> socket: ... # timeout is undocumented def makePickle(self, record: LogRecord) -> bytes: ... def send(self, s: bytes) -> None: ... def createSocket(self) -> None: ... class DatagramHandler(SocketHandler): def makeSocket(self) -> socket: ... python-picologging-0.9.4/src/picologging/logger.cxx000066400000000000000000000601551467100674700224620ustar00rootroot00000000000000#include "logger.hxx" #include "logrecord.hxx" #include "compat.hxx" #include #include "picologging.hxx" #include "filterer.hxx" #include "handler.hxx" int findEffectiveLevelFromParents(Logger* self) { PyObject* logger = (PyObject*)self; while (logger != Py_None) { if (!Logger_CheckExact(logger)) { PyErr_SetString(PyExc_TypeError, "logger is not a picologging.Logger"); return -1; } if (((Logger*)logger)->level > 0){ return ((Logger*)logger)->level; } logger = ((Logger*)logger)->parent; continue; } return LOG_LEVEL_NOTSET; } void setEnabledBasedOnEffectiveLevel(Logger* logger) { logger->enabledForDebug = false; logger->enabledForInfo = false; logger->enabledForWarning = false; logger->enabledForError = false; logger->enabledForCritical = false; switch (logger->effective_level){ case LOG_LEVEL_DEBUG: logger->enabledForDebug = true; case LOG_LEVEL_INFO: logger->enabledForInfo = true; case LOG_LEVEL_WARNING: logger->enabledForWarning = true; case LOG_LEVEL_ERROR: logger->enabledForError = true; case LOG_LEVEL_CRITICAL: logger->enabledForCritical = true; } } void setEffectiveLevelOfChildren(Logger* logger, unsigned short level) { for (int i = 0; i < PyList_GET_SIZE(logger->children); i++) { PyObject *child_logger = PyList_GET_ITEM(logger->children, i); // borrowed ref if (((Logger*)child_logger)->level == LOG_LEVEL_NOTSET) { ((Logger*)child_logger)->effective_level = level; setEnabledBasedOnEffectiveLevel((Logger*)child_logger); setEffectiveLevelOfChildren((Logger*)child_logger, level); } } } PyObject* Logger_new(PyTypeObject* type, PyObject* args, PyObject* kwds) { Logger* self = (Logger*)FiltererType.tp_new(type, args, kwds); if (self != NULL) { self->name = Py_NewRef(Py_None); self->parent = Py_NewRef(Py_None); self->children = PyList_New(0); if (self->children == NULL) return nullptr; self->propagate = true; self->handlers = PyList_New(0); if (self->handlers == NULL){ Py_CLEAR(self->name); Py_CLEAR(self->parent); return nullptr; } self->disabled = false; self->manager = Py_NewRef(Py_None); self->_fallback_handler = (StreamHandler*)PyObject_CallFunctionObjArgs((PyObject *)&StreamHandlerType, NULL); if (self->_fallback_handler == nullptr){ Py_CLEAR(self->name); Py_CLEAR(self->parent); Py_CLEAR(self->handlers); Py_CLEAR(self->manager); return nullptr; } self->_const_handle = PyUnicode_FromString("handle"); self->_const_level = PyUnicode_FromString("level"); self->_const_unknown = PyUnicode_FromString(""); self->_const_exc_info = PyUnicode_FromString("exc_info"); self->_const_extra = PyUnicode_FromString("extra"); self->_const_stack_info = PyUnicode_FromString("stack_info"); self->_const_line_break = PyUnicode_FromString("\n"); self->_const_getvalue = PyUnicode_FromString("getvalue"); self->_const_close = PyUnicode_FromString("close"); } return (PyObject*)self; } int Logger_init(Logger *self, PyObject *args, PyObject *kwds) { if (FiltererType.tp_init((PyObject *) self, args, kwds) < 0) return -1; PyObject *name = NULL; unsigned short level = 0; static const char *kwlist[] = {"name", "level", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|H", const_cast(kwlist), &name, &level)) return -1; self->name = Py_NewRef(name); self->level = level; self->effective_level = findEffectiveLevelFromParents(self); setEnabledBasedOnEffectiveLevel(self); return 0; } PyObject* Logger_dealloc(Logger *self) { Py_CLEAR(self->name); Py_CLEAR(self->parent); Py_CLEAR(self->children); Py_CLEAR(self->handlers); Py_CLEAR(self->manager); Py_CLEAR(self->_const_handle); Py_CLEAR(self->_const_level); Py_CLEAR(self->_const_unknown); Py_CLEAR(self->_const_exc_info); Py_CLEAR(self->_const_extra); Py_CLEAR(self->_const_stack_info); Py_CLEAR(self->_const_line_break); Py_CLEAR(self->_const_getvalue); Py_CLEAR(self->_const_close); Py_CLEAR(self->_fallback_handler); FiltererType.tp_dealloc((PyObject *)self); return NULL; } PyObject* Logger_repr(Logger *self) { std::string level = _getLevelName(self->effective_level); return PyUnicode_FromFormat("", self->name, level.c_str()); } PyObject* Logger_setLevel(Logger *self, PyObject *level) { if (PyLong_Check(level)) { self->level = (unsigned short)PyLong_AsUnsignedLongMask(level); } else if (PyUnicode_Check(level)){ short levelValue = getLevelByName(PyUnicode_AsUTF8(level)); if (levelValue < 0) { PyErr_Format(PyExc_ValueError, "Invalid level value: %U", level); return nullptr; } self->level = levelValue; } else { PyErr_SetString(PyExc_TypeError, "level must be an integer"); return NULL; } self->effective_level = self->level; setEnabledBasedOnEffectiveLevel(self); setEffectiveLevelOfChildren(self, self->level); Py_RETURN_NONE; } PyObject* Logger_getEffectiveLevel(Logger *self){ int level = self->effective_level; if (level == -1) return nullptr; return PyLong_FromLong(level); } LogRecord* Logger_logMessageAsRecord(Logger* self, unsigned short level, PyObject *msg, PyObject *args, PyObject * exc_info, PyObject *extra, PyObject *stack_info, int stacklevel){ PyFrameObject* frame = PyEval_GetFrame(); if (frame == NULL) { PyErr_SetString(PyExc_RuntimeError, "Could not get frame"); return nullptr; } PyFrameObject *f = PyFrame_GETBACK(frame); PyFrameObject *orig_f = f; while (f != NULL && stacklevel > 1) { f = PyFrame_GETBACK(f); stacklevel--; } if (f == NULL) { f = orig_f; } PyObject *co_filename = f != nullptr ? PyFrame_GETCODE(f)->co_filename : self->_const_unknown; long lineno = f != nullptr ? PyFrame_GETLINENO(f) : 0; PyObject *co_name = f != nullptr ? PyFrame_GETCODE(f)->co_name : self->_const_unknown; if (stack_info == Py_True){ PyObject* mod = PICOLOGGING_MODULE(); // borrowed reference PyObject* modDict = PyModule_GetDict(mod); // borrowed reference PyObject* print_stack = PyDict_GetItemString(modDict, "print_stack"); // PyDict_GetItemString returns a borrowed reference if (print_stack == nullptr){ PyErr_SetString(PyExc_RuntimeError, "Could not get print_stack"); return nullptr; } Py_XINCREF(print_stack); PyObject* sio_cls = PyDict_GetItemString(modDict, "StringIO"); Py_XINCREF(sio_cls); PyObject* sio = PyObject_CallFunctionObjArgs(sio_cls, NULL); if (sio == nullptr){ Py_XDECREF(sio_cls); Py_XDECREF(print_stack); return nullptr; // Got exception in StringIO.__init__() } PyObject* printStackResult = PyObject_CallFunctionObjArgs( print_stack, Py_None, Py_None, sio, NULL); if (printStackResult == nullptr) { Py_XDECREF(sio_cls); Py_XDECREF(print_stack); return nullptr; // Got exception in print_stack() } Py_DECREF(printStackResult); PyObject* s = PyObject_CallMethod_NOARGS(sio, self->_const_getvalue); if (s == nullptr){ Py_XDECREF(sio); Py_XDECREF(sio_cls); Py_XDECREF(print_stack); return nullptr; // Got exception in StringIO.getvalue() } Py_XDECREF(PyObject_CallMethod_NOARGS(sio, self->_const_close)); Py_DECREF(sio); Py_DECREF(sio_cls); Py_DECREF(print_stack); if (PYUNICODE_ENDSWITH(s, self->_const_line_break)){ PyObject* s2 = PyUnicode_Substring(s, 0, PyUnicode_GetLength(s) - 1); Py_DECREF(s); s = s2; } stack_info = s; } LogRecord* record = (LogRecord*) (&LogRecordType)->tp_alloc(&LogRecordType, 0); if (record == NULL) { PyErr_NoMemory(); return nullptr; } return LogRecord_create( record, self->name, msg, args, level, co_filename, lineno, exc_info, co_name, stack_info ); } inline PyObject* PyArg_GetKeyword(PyObject *const *args, Py_ssize_t npargs, PyObject *kwnames, PyObject* keyword){ if (kwnames == nullptr) return nullptr; for (int i = 0; i < PyTuple_GET_SIZE(kwnames); i++){ if (PyUnicode_Compare(PyTuple_GET_ITEM(kwnames, i), keyword) == 0){ return args[npargs + i]; } } return nullptr; } PyObject* Logger_logAndHandle(Logger *self, PyObject *const *args, Py_ssize_t nfargs, PyObject *kwnames, unsigned short level){ if (PyVectorcall_NARGS(nfargs) == 0) { PyErr_SetString(PyExc_TypeError, "log requires a message argument"); return nullptr; } PyObject *msg = args[0]; Py_ssize_t npargs = PyVectorcall_NARGS(nfargs); PyObject *args_ = PyTuple_New(npargs - 1); if (args_ == nullptr) return nullptr; for (int i = 1; i < npargs; i++) { PyTuple_SET_ITEM(args_, i - 1, args[i]); Py_INCREF(args[i]); } PyObject* exc_info = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_exc_info) : nullptr; if (exc_info == nullptr){ exc_info = Py_NewRef(Py_None); } else { if (PyExceptionInstance_Check(exc_info)){ PyObject * unpackedExcInfo = PyTuple_New(3); PyObject * excType = (PyObject*)Py_TYPE(exc_info); PyTuple_SET_ITEM(unpackedExcInfo, 0, excType); Py_INCREF(excType); PyTuple_SET_ITEM(unpackedExcInfo, 1, exc_info); Py_INCREF(exc_info); PyObject* traceback = PyObject_GetAttrString(exc_info, "__traceback__"); PyTuple_SET_ITEM(unpackedExcInfo, 2, traceback); Py_INCREF(traceback); exc_info = unpackedExcInfo; } else if (!PyTuple_CheckExact(exc_info)){ // Probably Py_TRUE, fetch current exception as tuple PyObject * unpackedExcInfo = PyTuple_New(3); PyErr_GetExcInfo(&PyTuple_GET_ITEM(unpackedExcInfo, 0), &PyTuple_GET_ITEM(unpackedExcInfo, 1), &PyTuple_GET_ITEM(unpackedExcInfo, 2)); exc_info = unpackedExcInfo; } } PyObject* extra = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_extra) : nullptr; if (extra == nullptr){ extra = Py_NewRef(Py_None); } PyObject* stack_info = kwnames != nullptr ? PyArg_GetKeyword(args, npargs, kwnames, self->_const_stack_info) : nullptr; if (stack_info == nullptr){ stack_info = Py_NewRef(Py_False); } LogRecord *record = Logger_logMessageAsRecord( self, level, msg, args_, exc_info, extra, stack_info, 1); Py_DECREF(args_); Py_DECREF(exc_info); Py_DECREF(extra); Py_DECREF(stack_info); if (record == nullptr) return nullptr; if (Filterer_filter(&self->filterer, (PyObject*)record) != Py_True) { Py_DECREF(record); Py_RETURN_NONE; } int found = 0; Logger* cur = self; bool has_parent = true; while (has_parent){ for (int i = 0; i < PyList_GET_SIZE(cur->handlers) ; i++){ found ++; PyObject* handler = PyList_GET_ITEM(cur->handlers, i); // borrowed if (Handler_CheckExact(handler) || Handler_Check(handler)){ if (record->levelno >= ((Handler*)handler)->level){ if (Handler_handle((Handler*)handler, (PyObject*)record) == nullptr){ Py_DECREF(record); return nullptr; } } } else { PyObject* handlerLevel = PyObject_GetAttr(handler, self->_const_level); if (handlerLevel == nullptr){ Py_DECREF(record); PyErr_SetString(PyExc_TypeError, "Handler has no level attribute"); return nullptr; } if (record->levelno >= PyLong_AsLong(handlerLevel)){ if (PyObject_CallMethod_ONEARG(handler, self->_const_handle, (PyObject*)record) == nullptr){ Py_DECREF(handlerLevel); Py_DECREF(record); return nullptr; } } Py_DECREF(handlerLevel); } } if (!cur->propagate || cur->parent == Py_None) { has_parent = false; } else { if (!Logger_CheckExact(cur->parent)) { Py_DECREF(record); PyErr_SetString(PyExc_TypeError, "Logger's parent is not an instance of picologging.Logger"); return nullptr; } cur = (Logger*)cur->parent; } } if (found == 0){ if (record->levelno >= ((Handler*)self->_fallback_handler)->level){ if (Handler_handle((Handler*)self->_fallback_handler, (PyObject*)record) == nullptr){ Py_DECREF(record); return nullptr; } } } Py_DECREF(record); Py_RETURN_NONE; } PyObject* Logger_debug(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) { if (self->disabled || !self->enabledForDebug) { Py_RETURN_NONE; } if (PyVectorcall_NARGS(nargs) < 1) { PyErr_SetString(PyExc_TypeError, "debug() requires 1 positional argument"); return nullptr; } return Logger_logAndHandle(self, args, nargs, kwnames, LOG_LEVEL_DEBUG); } PyObject* Logger_info(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames){ if (self->disabled || !self->enabledForInfo) { Py_RETURN_NONE; } if (PyVectorcall_NARGS(nargs) < 1) { PyErr_SetString(PyExc_TypeError, "info() requires 1 positional argument"); return nullptr; } return Logger_logAndHandle(self, args, nargs, kwnames, LOG_LEVEL_INFO); } PyObject* Logger_warning(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames){ if (self->disabled || !self->enabledForWarning) { Py_RETURN_NONE; } if (PyVectorcall_NARGS(nargs) < 1) { PyErr_SetString(PyExc_TypeError, "warning() requires 1 positional argument"); return nullptr; } return Logger_logAndHandle(self, args, nargs, kwnames, LOG_LEVEL_WARNING); } PyObject* Logger_fatal(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames){ return Logger_critical(self, args, nargs, kwnames); } PyObject* Logger_error(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames){ if (self->disabled || !self->enabledForError) { Py_RETURN_NONE; } if (PyVectorcall_NARGS(nargs) < 1) { PyErr_SetString(PyExc_TypeError, "error() requires 1 positional argument"); return nullptr; } return Logger_logAndHandle(self, args, nargs, kwnames, LOG_LEVEL_ERROR); } PyObject* Logger_critical(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames){ if (self->disabled || !self->enabledForCritical) { Py_RETURN_NONE; } if (PyVectorcall_NARGS(nargs) < 1) { PyErr_SetString(PyExc_TypeError, "critical() requires 1 positional argument"); return nullptr; } return Logger_logAndHandle(self, args, nargs, kwnames, LOG_LEVEL_CRITICAL); } PyObject* Logger_exception(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames){ if (self->disabled || !self->enabledForError) { Py_RETURN_NONE; } if (kwnames == nullptr){ kwnames = Py_BuildValue("(O)", self->_const_exc_info); } else { _PyTuple_Resize(&kwnames, PyTuple_GET_SIZE(kwnames) + 1); PyTuple_SET_ITEM(kwnames, PyTuple_GET_SIZE(kwnames) - 1, self->_const_exc_info); Py_INCREF(self->_const_exc_info); // Add extra ref for kwnames tuple Py_INCREF(kwnames); // Add extra ref for kwnames tuple (gets decrefed at the end of this function) } // Push True to the end of args PyObject** args_ = (PyObject**)PyMem_Malloc((PyVectorcall_NARGS(nargs) + 1) * sizeof(PyObject*)); if (args_ == nullptr) return nullptr; for (int i = 0; i < PyVectorcall_NARGS(nargs); i++) { args_[i] = args[i]; } args_[PyVectorcall_NARGS(nargs)] = Py_True; PyObject* result = Logger_logAndHandle(self, args_, nargs, kwnames, LOG_LEVEL_ERROR); Py_XDECREF(kwnames); PyMem_Free(args_); return result; } PyObject* Logger_log(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames){ if (PyVectorcall_NARGS(nargs) < 2){ PyErr_SetString(PyExc_TypeError, "log() requires at least 2 positional arguments"); return nullptr; } if (!PyLong_Check(args[0])){ PyErr_SetString(PyExc_TypeError, "log() requires a level argument"); return nullptr; } unsigned short level = PyLong_AsUnsignedLongMask(args[0]); if (self->disabled || (self->effective_level > level)) { Py_RETURN_NONE; } PyObject** args_ = (PyObject**)PyMem_Malloc((nargs - 1) * sizeof(PyObject*)); if (args_ == nullptr) return nullptr; for (int i = 1; i < PyVectorcall_NARGS(nargs); i++) { args_[i - 1] = args[i]; } PyObject* result = Logger_logAndHandle(self, args_, nargs - 1, kwnames, level); PyMem_Free(args_); return result; } PyObject* Logger_addHandler(Logger *self, PyObject *handler) { if (PySequence_Contains(self->handlers, handler)) { Py_RETURN_NONE; } PyList_Append(self->handlers, handler); Py_RETURN_NONE; } PyObject* Logger_removeHandler(Logger *self, PyObject *handler) { if (PySequence_Contains(self->handlers, handler)) { PyObject* remove = PyUnicode_FromString("remove"); PyObject* result = PyObject_CallMethod_ONEARG(self->handlers, remove, handler); Py_DECREF(remove); return result; } Py_RETURN_NONE; } static PyObject * Logger_get_parent(Logger *self, void *closure) { if (self->parent == nullptr) { Py_RETURN_NONE; } return Py_NewRef(self->parent); } static int Logger_set_parent(Logger *self, PyObject *value, void *Py_UNUSED(ignored)) { if (value == nullptr) { PyErr_SetString(PyExc_TypeError, "Cannot delete parent"); return -1; } if (!Logger_Check(value)) { PyErr_Format(PyExc_TypeError, "parent must be a Logger, not %s", Py_TYPE(value)->tp_name); return -1; } Py_XINCREF(value); Py_XDECREF(self->parent); self->parent = value; if (PySequence_Contains(((Logger*)self->parent)->children, (PyObject*)self) == 0){ PyList_Append(((Logger*)self->parent)->children, (PyObject*)self); } // Rescan parent levels. self->effective_level = findEffectiveLevelFromParents(self); setEnabledBasedOnEffectiveLevel(self); return 0; } PyObject* Logger_isEnabledFor(Logger *self, PyObject *level) { if (!PyLong_Check(level)) { PyErr_SetString(PyExc_TypeError, "level must be an integer"); return NULL; } if (self->disabled || (unsigned short)PyLong_AsUnsignedLongMask(level) < self->effective_level) { Py_RETURN_FALSE; } Py_RETURN_TRUE; } static PyMethodDef Logger_methods[] = { {"setLevel", (PyCFunction)Logger_setLevel, METH_O, "Set the level of the logger."}, {"getEffectiveLevel", (PyCFunction)Logger_getEffectiveLevel, METH_NOARGS, "Get the effective level of the logger."}, {"addHandler", (PyCFunction)Logger_addHandler, METH_O, "Add a handler to the logger."}, {"removeHandler", (PyCFunction)Logger_removeHandler, METH_O, "Remove a handler from the logger."}, {"isEnabledFor", (PyCFunction)Logger_isEnabledFor, METH_O, "Check if logger enabled for this level."}, // Logging methods {"debug", (PyCFunction)Logger_debug, METH_FASTCALL | METH_KEYWORDS, "Log a message at level DEBUG."}, {"info", (PyCFunction)Logger_info, METH_FASTCALL | METH_KEYWORDS, "Log a message at level INFO."}, {"warning", (PyCFunction)Logger_warning, METH_FASTCALL | METH_KEYWORDS, "Log a message at level WARNING."}, {"error", (PyCFunction)Logger_error, METH_FASTCALL | METH_KEYWORDS, "Log a message at level ERROR."}, {"critical", (PyCFunction)Logger_critical, METH_FASTCALL | METH_KEYWORDS, "Log a message at level CRITICAL."}, {"exception", (PyCFunction)Logger_exception, METH_FASTCALL | METH_KEYWORDS, "Log a message at level ERROR."}, {"fatal", (PyCFunction)Logger_fatal, METH_FASTCALL | METH_KEYWORDS, "Log a message at level FATAL."}, {"log", (PyCFunction)Logger_log, METH_FASTCALL | METH_KEYWORDS, "Log a message at the specified level."}, {NULL} }; static PyMemberDef Logger_members[] = { {"name", T_OBJECT_EX, offsetof(Logger, name), 0, "Logger name"}, {"level", T_USHORT, offsetof(Logger, level), 0, "Logger level"}, {"propagate", T_BOOL, offsetof(Logger, propagate), 0, "Logger propagate"}, {"handlers", T_OBJECT_EX, offsetof(Logger, handlers), 0, "Logger handlers"}, {"disabled", T_BOOL, offsetof(Logger, disabled), 0, "Logger disabled"}, {"manager", T_OBJECT_EX, offsetof(Logger, manager), 0, "Logger manager"}, {NULL} }; static PyGetSetDef Logger_getsets[] = { {"parent", (getter)Logger_get_parent, (setter)Logger_set_parent, "Logger parent"}, {NULL, NULL, NULL, NULL } /* sentinel */ }; PyTypeObject LoggerType = { PyObject_HEAD_INIT(NULL) "picologging.Logger", /* tp_name */ sizeof(Logger), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)Logger_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ (reprfunc)Logger_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , /* tp_flags */ PyDoc_STR("Logging interface."), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Logger_methods, /* tp_methods */ Logger_members, /* tp_members */ Logger_getsets, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Logger_init, /* tp_init */ 0, /* tp_alloc */ Logger_new, /* tp_new */ PyObject_Del, /* tp_free */ }; python-picologging-0.9.4/src/picologging/logger.hxx000066400000000000000000000051351467100674700224640ustar00rootroot00000000000000#include #include #include #include "compat.hxx" #include "logrecord.hxx" #include "filterer.hxx" #include #include "streamhandler.hxx" #ifndef PICOLOGGING_LOGGER_H #define PICOLOGGING_LOGGER_H typedef struct LoggerT { Filterer filterer; PyObject *name; unsigned short level; unsigned short effective_level; PyObject *parent; PyObject *children; bool propagate; PyObject *handlers; PyObject *manager; bool disabled; bool enabledForCritical = false; bool enabledForError = false; bool enabledForWarning = false; bool enabledForInfo = false; bool enabledForDebug = false; // Constant strings. PyObject* _const_handle; PyObject* _const_level; PyObject* _const_unknown; PyObject* _const_exc_info; PyObject* _const_extra; PyObject* _const_stack_info; PyObject* _const_line_break; PyObject* _const_close; PyObject* _const_getvalue; StreamHandler* _fallback_handler; } Logger ; int Logger_init(Logger *self, PyObject *args, PyObject *kwds); PyObject* Logger_setLevel(Logger *self, PyObject *args); PyObject* Logger_getEffectiveLevel(Logger *self); PyObject* Logger_dealloc(Logger *self); PyObject* Logger_addHandler(Logger *self, PyObject *handler); PyObject* Logger_isEnabledFor(Logger *self, PyObject *level); PyObject* Logger_logAndHandle(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames, unsigned short level); PyObject* Logger_debug(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); PyObject* Logger_info(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); PyObject* Logger_warning(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); PyObject* Logger_fatal(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); PyObject* Logger_error(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); PyObject* Logger_critical(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); PyObject* Logger_exception(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); PyObject* Logger_log(Logger *self, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames); LogRecord* Logger_logMessageAsRecord(Logger* self, unsigned short level, PyObject *msg, PyObject *args, PyObject * exc_info, PyObject *extra, PyObject *stack_info, int stacklevel=1); extern PyTypeObject LoggerType; #define Logger_CheckExact(op) Py_IS_TYPE(op, &LoggerType) #define Logger_Check(op) PyObject_TypeCheck(op, &LoggerType) #endif // PICOLOGGING_LOGGER_Hpython-picologging-0.9.4/src/picologging/logrecord.cxx000066400000000000000000000366341467100674700231700ustar00rootroot00000000000000#include #include #include "logrecord.hxx" #include "compat.hxx" #include "picologging.hxx" namespace fs = std::filesystem; _PyTime_t startTime = current_time(); static PyObject* _PyFloat_FromPyTime(_PyTime_t t) { double d = _PyTime_AsSecondsDouble(t); return PyFloat_FromDouble(d); } _PyTime_t current_time() { _PyTime_t t; if (_PyTime_GetSystemClockWithInfo(&t, NULL) < 0) { return -1; } return t; } PyObject* LogRecord_new(PyTypeObject* type, PyObject *initargs, PyObject *kwds) { PyObject *name = nullptr, *exc_info = nullptr, *sinfo = nullptr, *msg = nullptr, *args = nullptr, *levelname = nullptr, *pathname = nullptr, *filename = nullptr, *module = nullptr, *funcname = nullptr; int levelno, lineno; long msecs; static const char *kwlist[] = { "name", "level", "pathname", "lineno", "msg", "args", "exc_info", "func", "sinfo", NULL}; if (!PyArg_ParseTupleAndKeywords(initargs, kwds, "OiOiOOO|OO", const_cast(kwlist), &name, &levelno, &pathname, &lineno, &msg, &args, &exc_info, &funcname, &sinfo)) return NULL; LogRecord* self = (LogRecord*)type->tp_alloc(type, 0); if (self == NULL) { PyErr_NoMemory(); return NULL; } return (PyObject*)LogRecord_create(self, name, msg, args, levelno, pathname, lineno, exc_info, funcname, sinfo); } LogRecord* LogRecord_create(LogRecord* self, PyObject* name, PyObject* msg, PyObject* args, int levelno, PyObject* pathname, int lineno, PyObject* exc_info, PyObject* funcname, PyObject* sinfo) { self->name = Py_NewRef(name); self->msg = Py_NewRef(msg); // This is a copy of the behaviour in the Python class // if (args and len(args) == 1 and isinstance(args[0], collections.abc.Mapping) // and args[0]): // args = args[0] Py_ssize_t argsLen = 0; if (args != Py_None){ argsLen = PyObject_Length(args); } if (argsLen == 1 && PySequence_Check(args)) { PyObject* firstValue = PySequence_GetItem(args, 0); if (PyDict_Check(firstValue)) { args = firstValue; } Py_DECREF(firstValue); } if (argsLen == 0) { self->hasArgs = false; } else { self->hasArgs = true; } self->args = Py_NewRef(args); self->levelno = levelno; picologging_state *state = GET_PICOLOGGING_STATE(); PyObject* levelname = nullptr; switch (levelno) { case LOG_LEVEL_CRITICAL: levelname = Py_NewRef(state->g_const_CRITICAL); break; case LOG_LEVEL_ERROR: levelname = Py_NewRef(state->g_const_ERROR); break; case LOG_LEVEL_WARNING: levelname = Py_NewRef(state->g_const_WARNING); break; case LOG_LEVEL_INFO: levelname = Py_NewRef(state->g_const_INFO); break; case LOG_LEVEL_DEBUG: levelname = Py_NewRef(state->g_const_DEBUG); break; case LOG_LEVEL_NOTSET: levelname = Py_NewRef(state->g_const_NOTSET); break; default: levelname = PyUnicode_FromFormat("%d", levelno); break; } self->levelname = levelname; self->pathname = Py_NewRef(pathname); #ifdef PICOLOGGING_CACHE_FILEPATH if (state && state->g_filepathCache != nullptr) { auto filepath = state->g_filepathCache->lookup(pathname); self->filename = Py_NewRef(filepath.filename); self->module = Py_NewRef(filepath.module); } else { // Manual lookup - TODO Raise warning? fs::path fs_path = fs::path(PyUnicode_AsUTF8(pathname)); #ifdef WIN32 const wchar_t* filename_wchar = fs_path.filename().c_str(); const wchar_t* modulename = fs_path.stem().c_str(); self->filename = PyUnicode_FromWideChar(filename_wchar, wcslen(filename_wchar)), self->module = PyUnicode_FromWideChar(modulename, wcslen(modulename)); #else self->filename = PyUnicode_FromString(fs_path.filename().c_str()); self->module = PyUnicode_FromString(fs_path.stem().c_str()); #endif } #else // PICOLOGGING_CACHE_FILEPATH fs::path fs_path = fs::path(PyUnicode_AsUTF8(pathname)); #ifdef WIN32 const wchar_t* filename_wchar = fs_path.filename().c_str(); const wchar_t* modulename = fs_path.stem().c_str(); self->filename = PyUnicode_FromWideChar(filename_wchar, wcslen(filename_wchar)), self->module = PyUnicode_FromWideChar(modulename, wcslen(modulename)); #else self->filename = PyUnicode_FromString(fs_path.filename().c_str()); self->module = PyUnicode_FromString(fs_path.stem().c_str()); #endif #endif // PICOLOGGING_CACHE_FILEPATH self->excInfo = Py_NewRef(exc_info); self->excText = Py_NewRef(Py_None); if (sinfo != NULL){ self->stackInfo = Py_NewRef(sinfo); } else { self->stackInfo = Py_NewRef(Py_None); } self->lineno = lineno; if (funcname != NULL){ self->funcName = Py_NewRef(funcname); } else { self->funcName = Py_NewRef(Py_None); } _PyTime_t ctime = current_time(); if (ctime == -1){ goto error; } self->created = _PyTime_AsSecondsDouble(ctime); self->msecs = _PyTime_AsMilliseconds(ctime, _PyTime_ROUND_CEILING); self->relativeCreated = _PyFloat_FromPyTime((ctime - startTime) * 1000); self->thread = PyThread_get_thread_ident(); // Only supported in Python 3.7+, if big demand for 3.6 patch this out for the old API. // TODO #2 : See if there is a performant way to get the thread name. self->threadName = Py_NewRef(Py_None); // TODO #1 : See if there is a performant way to get the process name. self->processName = Py_NewRef(Py_None); self->process = getpid(); self->message = Py_NewRef(Py_None); self->asctime = Py_NewRef(Py_None); return self; error: Py_XDECREF(self->name); Py_XDECREF(self->msg); Py_XDECREF(self->args); Py_XDECREF(self->levelname); Py_XDECREF(self->pathname); Py_XDECREF(self->filename); Py_XDECREF(self->module); Py_XDECREF(self->funcName); Py_XDECREF(self->relativeCreated); Py_XDECREF(self->threadName); Py_XDECREF(self->processName); Py_XDECREF(self->excInfo); Py_XDECREF(self->excText); Py_XDECREF(self->stackInfo); Py_XDECREF(self->message); Py_XDECREF(self->asctime); if (!PyErr_Occurred()) { PyErr_Format(PyExc_ValueError, "Could not create LogRecord, unknown error."); } return nullptr; } PyObject* LogRecord_dealloc(LogRecord *self) { Py_CLEAR(self->name); Py_CLEAR(self->msg); Py_CLEAR(self->args); Py_CLEAR(self->levelname); Py_CLEAR(self->pathname); Py_CLEAR(self->filename); Py_CLEAR(self->module); Py_CLEAR(self->funcName); Py_CLEAR(self->relativeCreated); Py_CLEAR(self->threadName); Py_CLEAR(self->processName); Py_CLEAR(self->excInfo); Py_CLEAR(self->excText); Py_CLEAR(self->stackInfo); Py_CLEAR(self->message); Py_CLEAR(self->asctime); Py_CLEAR(self->dict); ((PyObject*)self)->ob_type->tp_free((PyObject*)self); return nullptr; } int LogRecord_init(LogRecord *self, PyObject *args, PyObject *kwds) { return 0; } int LogRecord_writeMessage(LogRecord *self) { PyObject *msg = nullptr; PyObject *args = self->args; if (PyUnicode_Check(self->msg)){ // Add new reference for return value, all other code paths return a new object msg = Py_NewRef(self->msg); } else { msg = PyObject_Str(self->msg); if (msg == nullptr) { return -1; } } if (!self->hasArgs) { Py_DECREF(self->message); self->message = msg; return 0; } else { PyObject * formatted = PyUnicode_Format(msg, args); Py_DECREF(msg); if (formatted == nullptr){ return -1; } else { Py_DECREF(self->message); self->message = formatted; return 0; } } } /** * Update the message attribute of the object and return the field */ PyObject* LogRecord_getMessage(LogRecord *self) { if (LogRecord_writeMessage(self) == -1) return nullptr; return Py_NewRef(self->message); } PyObject* LogRecordLogRecord_getnewargs(LogRecord *self) { return Py_BuildValue("OlOlOOOOO", self->name, self->levelno, self->pathname, self->lineno, self->msg, self->args, self->excInfo, self->funcName, self->stackInfo); } PyObject* LogRecord_repr(LogRecord *self) { return PyUnicode_FromFormat("", self->name, self->levelno, self->pathname, self->lineno, self->msg); } PyObject * LogRecord_getDict(PyObject *obj, void *context) { PyObject* dict = PyObject_GenericGetDict(obj, context); PyDict_SetItemString(dict, "name", ((LogRecord*)obj)->name); PyDict_SetItemString(dict, "msg", ((LogRecord*)obj)->msg); PyDict_SetItemString(dict, "args", ((LogRecord*)obj)->args); PyObject * levelno = PyLong_FromLong(((LogRecord*)obj)->levelno); PyDict_SetItemString(dict, "levelno", levelno); Py_DECREF(levelno); PyDict_SetItemString(dict, "levelname", ((LogRecord*)obj)->levelname); PyDict_SetItemString(dict, "pathname", ((LogRecord*)obj)->pathname); PyDict_SetItemString(dict, "filename", ((LogRecord*)obj)->filename); PyDict_SetItemString(dict, "module", ((LogRecord*)obj)->module); PyDict_SetItemString(dict, "funcName", ((LogRecord*)obj)->funcName); PyObject *lineno = PyLong_FromLong(((LogRecord*)obj)->lineno); PyDict_SetItemString(dict, "lineno", lineno); Py_DECREF(lineno); PyObject *created = PyFloat_FromDouble(((LogRecord*)obj)->created); PyDict_SetItemString(dict, "created", created); Py_DECREF(created); PyObject *msecs = PyLong_FromLong(((LogRecord*)obj)->msecs); PyDict_SetItemString(dict, "msecs", msecs); Py_DECREF(msecs); PyDict_SetItemString(dict, "relativeCreated", ((LogRecord*)obj)->relativeCreated); PyObject *thread = PyLong_FromUnsignedLong(((LogRecord*)obj)->thread); PyDict_SetItemString(dict, "thread", thread); Py_DECREF(thread); PyDict_SetItemString(dict, "threadName", ((LogRecord*)obj)->threadName); PyDict_SetItemString(dict, "processName", ((LogRecord*)obj)->processName); PyObject *process = PyLong_FromLong(((LogRecord*)obj)->process); PyDict_SetItemString(dict, "process", process); Py_DECREF(process); PyDict_SetItemString(dict, "exc_info", ((LogRecord*)obj)->excInfo); PyDict_SetItemString(dict, "exc_text", ((LogRecord*)obj)->excText); PyDict_SetItemString(dict, "stack_info", ((LogRecord*)obj)->stackInfo); PyDict_SetItemString(dict, "message", ((LogRecord*)obj)->message); PyDict_SetItemString(dict, "asctime", ((LogRecord*)obj)->asctime); return dict; } static PyMemberDef LogRecord_members[] = { {"name", T_OBJECT_EX, offsetof(LogRecord, name), 0, "Logger name"}, {"msg", T_OBJECT_EX, offsetof(LogRecord, msg), 0, "Message (string)"}, {"args", T_OBJECT_EX, offsetof(LogRecord, args), 0, "Arguments (tuple)"}, {"levelno", T_INT, offsetof(LogRecord, levelno), 0, "Level number"}, {"levelname", T_OBJECT_EX, offsetof(LogRecord, levelname), 0, "Level name"}, {"pathname", T_OBJECT_EX, offsetof(LogRecord, pathname), 0, "File pathname"}, {"filename", T_OBJECT_EX, offsetof(LogRecord, filename), 0, "File name"}, {"module", T_OBJECT_EX, offsetof(LogRecord, module), 0, "Module name"}, {"lineno", T_INT, offsetof(LogRecord, lineno), 0, "Line number"}, {"funcName", T_OBJECT_EX, offsetof(LogRecord, funcName), 0, "Function name"}, {"created", T_DOUBLE, offsetof(LogRecord, created), 0, "Created"}, {"msecs", T_LONG, offsetof(LogRecord, msecs), 0, "Milliseconds"}, {"relativeCreated", T_OBJECT_EX, offsetof(LogRecord, relativeCreated), 0, "Relative created"}, {"thread", T_ULONG, offsetof(LogRecord, thread), 0, "Thread"}, {"threadName", T_OBJECT_EX, offsetof(LogRecord, threadName), 0, "Thread name"}, {"processName", T_OBJECT_EX, offsetof(LogRecord, processName), 0, "Process name"}, {"process", T_INT, offsetof(LogRecord, process), 0, "Process"}, {"exc_info", T_OBJECT_EX, offsetof(LogRecord, excInfo), 0, "Exception info"}, {"exc_text", T_OBJECT_EX, offsetof(LogRecord, excText), 0, "Exception text"}, {"stack_info", T_OBJECT_EX, offsetof(LogRecord, stackInfo), 0, "Stack info"}, {"message", T_OBJECT_EX, offsetof(LogRecord, message), 0, "Message"}, {"asctime", T_OBJECT_EX, offsetof(LogRecord, asctime), 0, "Asctime"}, {NULL} }; static PyMethodDef LogRecord_methods[] = { {"getMessage", (PyCFunction)LogRecord_getMessage, METH_NOARGS, "Get message"}, {"__getnewargs__", (PyCFunction)LogRecordLogRecord_getnewargs, METH_NOARGS, "Picke LogRecord"}, {NULL} }; static PyGetSetDef LogRecord_getset[] = { {"__dict__", LogRecord_getDict, PyObject_GenericSetDict}, {NULL} }; PyTypeObject LogRecordType = { PyObject_HEAD_INIT(NULL) "picologging.LogRecord", /* tp_name */ sizeof(LogRecord), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)LogRecord_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ (reprfunc)LogRecord_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ PyDoc_STR("LogRecord objects are used to hold information about log events."), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ LogRecord_methods, /* tp_methods */ LogRecord_members, /* tp_members */ LogRecord_getset, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ offsetof(LogRecord, dict), /* tp_dictoffset */ (initproc)LogRecord_init, /* tp_init */ 0, /* tp_alloc */ LogRecord_new, /* tp_new */ PyObject_Del, /* tp_free */ };python-picologging-0.9.4/src/picologging/logrecord.hxx000066400000000000000000000027021467100674700231620ustar00rootroot00000000000000#include #include #include #include #include "compat.hxx" #ifndef PICOLOGGING_LOGRECORD_H #define PICOLOGGING_LOGRECORD_H typedef struct { PyObject_HEAD PyObject *name; PyObject *msg; PyObject *args; int levelno; PyObject *levelname; PyObject *pathname; PyObject *filename; PyObject *module; int lineno; PyObject *funcName; double created; long msecs; PyObject *relativeCreated; unsigned long thread; PyObject *threadName; int process; PyObject *processName; PyObject *excInfo; PyObject *excText; PyObject *stackInfo; PyObject *message; bool hasArgs; PyObject *asctime; PyObject *dict; } LogRecord; int LogRecord_init(LogRecord *self, PyObject *args, PyObject *kwds); LogRecord* LogRecord_create(LogRecord* self, PyObject* name, PyObject* msg, PyObject* args, int levelno, PyObject* pathname, int lineno, PyObject* exc_info, PyObject* funcname, PyObject* sinfo) ; PyObject* LogRecord_dealloc(LogRecord *self); int LogRecord_writeMessage(LogRecord *self); PyObject* LogRecord_getMessage(LogRecord *self); PyObject* LogRecord_repr(LogRecord *self); PyObject* LogRecord_getDict(PyObject *, void *); _PyTime_t current_time(); extern PyTypeObject LogRecordType; #define LogRecord_CheckExact(op) Py_IS_TYPE(op, &LogRecordType) #define LogRecord_Check(op) PyObject_TypeCheck(op, &LogRecordType) #endif // PICOLOGGING_LOGRECORD_Hpython-picologging-0.9.4/src/picologging/picologging.hxx000066400000000000000000000015021467100674700235000ustar00rootroot00000000000000#include #include #include "filepathcache.hxx" #ifndef PICOLOGGING_H #define PICOLOGGING_H typedef struct { FilepathCache* g_filepathCache; PyObject* g_const_CRITICAL; PyObject* g_const_ERROR; PyObject* g_const_WARNING; PyObject* g_const_INFO; PyObject* g_const_DEBUG; PyObject* g_const_NOTSET; } picologging_state; extern struct PyModuleDef _picologging_module; std::string _getLevelName(short); short getLevelByName(std::string levelName); #define PICOLOGGING_MODULE() PyState_FindModule(&_picologging_module) #define GET_PICOLOGGING_STATE() (picologging_state *)PyModule_GetState(PICOLOGGING_MODULE()) #define LOG_LEVEL_CRITICAL 50 #define LOG_LEVEL_ERROR 40 #define LOG_LEVEL_WARNING 30 #define LOG_LEVEL_INFO 20 #define LOG_LEVEL_DEBUG 10 #define LOG_LEVEL_NOTSET 0 #endif // PICOLOGGING_Hpython-picologging-0.9.4/src/picologging/py.typed000066400000000000000000000000011467100674700221360ustar00rootroot00000000000000 python-picologging-0.9.4/src/picologging/streamhandler.cxx000066400000000000000000000155121467100674700240310ustar00rootroot00000000000000#include #include "streamhandler.hxx" #include "handler.hxx" #include "compat.hxx" #include "picologging.hxx" PyObject* StreamHandler_new(PyTypeObject* type, PyObject* args, PyObject* kwds) { StreamHandler* self = (StreamHandler*)HandlerType.tp_new(type, args, kwds); if (self != NULL) { self->terminator = PyUnicode_FromString("\n"); self->_const_write = PyUnicode_FromString("write"); self->_const_flush = PyUnicode_FromString("flush"); self->stream = Py_None; self->stream_has_flush = false; } return (PyObject*)self; } int StreamHandler_init(StreamHandler *self, PyObject *args, PyObject *kwds){ if (HandlerType.tp_init((PyObject *) self, args, kwds) < 0) return -1; PyObject *stream = NULL; static const char *kwlist[] = {"stream", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|O", const_cast(kwlist), &stream)){ return -1; } if (stream == NULL || stream == Py_None){ stream = PySys_GetObject("stderr"); } self->stream = Py_NewRef(stream); self->stream_has_flush = (PyObject_HasAttr(self->stream, self->_const_flush) == 1); return 0; } PyObject* StreamHandler_dealloc(StreamHandler *self) { Py_CLEAR(self->stream); Py_CLEAR(self->terminator); Py_CLEAR(self->_const_write); Py_CLEAR(self->_const_flush); HandlerType.tp_dealloc((PyObject *)self); return nullptr; } PyObject* flush (StreamHandler* self){ if (!self->stream_has_flush) Py_RETURN_NONE; Handler_acquire(&self->handler); PyObject* result = PyObject_CallMethod_NOARGS(self->stream, self->_const_flush); Py_XDECREF(result); Handler_release(&self->handler); Py_RETURN_NONE; } PyObject* StreamHandler_emit(StreamHandler* self, PyObject* const* args, Py_ssize_t nargs){ PyObject* writeResult = nullptr; if (nargs < 1){ PyErr_SetString(PyExc_ValueError, "emit() takes at least 1 argument"); return nullptr; } PyObject* msg = Handler_format(&self->handler, args[0]); if (msg == nullptr) return nullptr; if (!PyUnicode_CheckExact(msg)){ PyErr_SetString(PyExc_TypeError, "Result of self.handler.format() must be a string"); goto error; } PyUnicode_Append(&msg, self->terminator); if (msg == nullptr) { // PyUnicode_Append sets *pleft to null on error. Error is extremely unlikely goto error; } writeResult = PyObject_CallMethod_ONEARG(self->stream, self->_const_write, msg); if (writeResult == nullptr){ if (!PyErr_Occurred()) PyErr_SetString(PyExc_RuntimeError, "Cannot write to stream"); goto error; } flush(self); Py_XDECREF(msg); Py_XDECREF(writeResult); Py_RETURN_NONE; error: // TODO: #4 handle error path (see handleError(record)) Py_XDECREF(msg); return nullptr; } PyObject* StreamHandler_setStream(StreamHandler* self, PyObject* stream){ // If stream would be unchanged, do nothing and return None if (self->stream == stream) { Py_RETURN_NONE; } // Otherwise flush current stream PyObject* result = self->stream; flush(self); Py_XDECREF(self->stream); // And set new stream self->stream = stream; Py_INCREF(self->stream); self->stream_has_flush = (PyObject_HasAttr(self->stream, self->_const_flush) == 1); // Return previous stream (now flushed) return result; } PyObject* StreamHandler_flush(StreamHandler* self, PyObject* const* args, Py_ssize_t nargs) { flush(self); Py_RETURN_NONE; } PyObject* StreamHandler_repr(StreamHandler *self) { std::string level = _getLevelName(self->handler.level); PyObject* streamName = PyObject_GetAttrString(self->stream, "name"); PyObject* repr = PyUnicode_FromFormat("<%s %S (%s)>", _PyType_Name(Py_TYPE(self)), streamName, level.c_str()); Py_CLEAR(streamName); return repr; } static PyMethodDef StreamHandler_methods[] = { {"emit", (PyCFunction)StreamHandler_emit, METH_FASTCALL, "Emit a record."}, {"flush", (PyCFunction)StreamHandler_flush, METH_FASTCALL, "Flush the stream."}, {"setStream", (PyCFunction)StreamHandler_setStream, METH_O, "Set the stream to write to."}, {NULL} }; static PyMemberDef StreamHandler_members[] = { {"stream", T_OBJECT_EX, offsetof(StreamHandler, stream), 0, "Stream"}, {NULL} }; PyTypeObject StreamHandlerType = { PyObject_HEAD_INIT(NULL) "picologging.StreamHandler", /* tp_name */ sizeof(StreamHandler), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)StreamHandler_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ 0, /* tp_setattr */ 0, /* tp_as_async */ (reprfunc)StreamHandler_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ PyObject_GenericSetAttr, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE , /* tp_flags */ PyDoc_STR("StreamHandler interface."), /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ StreamHandler_methods, /* tp_methods */ StreamHandler_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)StreamHandler_init, /* tp_init */ 0, /* tp_alloc */ StreamHandler_new, /* tp_new */ PyObject_Del, /* tp_free */ }; python-picologging-0.9.4/src/picologging/streamhandler.hxx000066400000000000000000000010401467100674700240250ustar00rootroot00000000000000#include #include "handler.hxx" #ifndef PICOLOGGING_STREAMHANDLER_H #define PICOLOGGING_STREAMHANDLER_H typedef struct { Handler handler; PyObject* stream; PyObject* terminator; PyObject* _const_write; PyObject* _const_flush; bool stream_has_flush; } StreamHandler; PyObject* StreamHandler_emit(StreamHandler* self, PyObject* const* args, Py_ssize_t nargs); extern PyTypeObject StreamHandlerType; #define StreamHandler_CheckExact(op) Py_IS_TYPE(op, &StreamHandlerType) #endif // PICOLOGGING_STREAMHANDLER_Hpython-picologging-0.9.4/tests/000077500000000000000000000000001467100674700165225ustar00rootroot00000000000000python-picologging-0.9.4/tests/fuzzing/000077500000000000000000000000001467100674700202165ustar00rootroot00000000000000python-picologging-0.9.4/tests/fuzzing/README.md000066400000000000000000000034641467100674700215040ustar00rootroot00000000000000# Fuzz testing Requirements: - pip install atheris Compilation: ```console $ CC="/usr/bin/clang" CXX="/usr/bin/clang++" python setup.py build_ext --inplace --build-type Debug -DCOVERAGE=ON -DFUZZING=ON $ ASAN_OPTIONS=detect_leaks=0 LD_PRELOAD="$(python -c "import atheris; print(atheris.path())")/asan_with_fuzzer.so" python tests/fuzzing/fuzz_atheris.py INFO: Instrumenting picologging INFO: Instrumenting traceback INFO: Instrumenting linecache INFO: Instrumenting tokenize INFO: Instrumenting token INFO: Instrumenting logging INFO: Instrumenting weakref INFO: Instrumenting _weakrefset INFO: Instrumenting string INFO: Instrumenting threading INFO: Using preloaded libfuzzer INFO: Running with entropic power schedule (0xFF, 100). INFO: Seed: 3598005196 INFO: Loaded 1 modules (7006 inline 8-bit counters): 7006 [0x7f9c39ecbac8, 0x7f9c39ecd626), INFO: Loaded 1 PC tables (7006 PCs): 7006 [0x7f9c39ecd628,0x7f9c39ee8c08), INFO: -max_len is not provided; libFuzzer will not generate inputs larger than 4096 bytes INFO: A corpus is not provided, starting from an empty corpus #2 INITED cov: 125 ft: 126 corp: 1/1b exec/s: 0 rss: 60Mb #65536 pulse cov: 125 ft: 126 corp: 1/1b lim: 652 exec/s: 32768 rss: 132Mb #131072 pulse cov: 125 ft: 126 corp: 1/1b lim: 1300 exec/s: 43690 rss: 197Mb #200000 NEW cov: 126 ft: 127 corp: 2/68b lim: 1990 exec/s: 40000 rss: 270Mb L: 67/67 MS: 1 InsertRepeatedBytes- #262144 pulse cov: 126 ft: 127 corp: 2/68b lim: 2600 exec/s: 43690 rss: 331Mb #524288 pulse cov: 126 ft: 127 corp: 2/68b lim: 4096 exec/s: 40329 rss: 598Mb #1048576 pulse cov: 126 ft: 127 corp: 2/68b lim: 4096 exec/s: 43690 rss: 693Mb #2097152 pulse cov: 126 ft: 127 corp: 2/68b lim: 4096 exec/s: 42799 rss: 834Mb #4194304 pulse cov: 126 ft: 127 corp: 2/68b lim: 4096 exec/s: 42799 rss: 1031Mb ``` python-picologging-0.9.4/tests/fuzzing/fuzz_atheris.py000066400000000000000000000006631467100674700233120ustar00rootroot00000000000000import io import atheris with atheris.instrument_imports(): import sys import picologging picologging.basicConfig() logger = picologging.Logger("fuzz", picologging.DEBUG) tmp = io.StringIO() logger.addHandler(picologging.StreamHandler(tmp)) def TestOneInput(data): fdp = atheris.FuzzedDataProvider(data) logger.warning(fdp.ConsumeUnicode(1)) atheris.Setup(sys.argv, TestOneInput) atheris.Fuzz() python-picologging-0.9.4/tests/integration/000077500000000000000000000000001467100674700210455ustar00rootroot00000000000000python-picologging-0.9.4/tests/integration/requirements.txt000066400000000000000000000000131467100674700243230ustar00rootroot00000000000000coloredlogspython-picologging-0.9.4/tests/integration/test_coloredlogs.py000066400000000000000000000013561467100674700247770ustar00rootroot00000000000000import io try: import coloredlogs has_libs = True except ImportError: has_libs = False import pytest import picologging as logging @pytest.mark.skipif(not has_libs, reason="Missing libraries") def test_coloredlogs_logger(): # Setup colored logger stream = io.StringIO() logger = logging.getLogger() logger.addHandler(logging.StreamHandler(stream)) coloredlogs.install(logger=logger, level="INFO") logger.info("Info message") logger.warning("Warning message") logger.error("Error message") logger.critical("Critical message") log = stream.getvalue() assert "Info message" in log assert "Warning message" in log assert "Error message" in log assert "Critical message" in log python-picologging-0.9.4/tests/memray/000077500000000000000000000000001467100674700200145ustar00rootroot00000000000000python-picologging-0.9.4/tests/memray/.gitignore000066400000000000000000000000131467100674700217760ustar00rootroot00000000000000.profiles*/python-picologging-0.9.4/tests/memray/Makefile000066400000000000000000000016361467100674700214620ustar00rootroot00000000000000profiles: mkdir -p .profiles PYTHONMALLOC=malloc memray run --trace-python-allocators -o .profiles/memray_logrecord.py.bin -f --native memray_logrecord.py PYTHONMALLOC=malloc memray run --trace-python-allocators -o .profiles/memray_format_exception.py.bin -f --native memray_format_exception.py PYTHONMALLOC=malloc memray run --trace-python-allocators -o .profiles/memray_logger.py.bin -f --native memray_logger.py PYTHONMALLOC=malloc memray run --trace-python-allocators -o .profiles/memray_litestar.py.bin -f --native memray_litestar.py memray flamegraph --leaks -f .profiles/memray_logrecord.py.bin memray flamegraph --leaks -f .profiles/memray_format_exception.py.bin memray flamegraph --leaks -f .profiles/memray_logger.py.bin memray flamegraph --leaks -f .profiles/memray_litestar.py.bin python -m http.server --directory .profiles 8000 clean: rm -rf .profiles snapshot: cp -R .profiles .profiles-snapshot python-picologging-0.9.4/tests/memray/README.md000066400000000000000000000010631467100674700212730ustar00rootroot00000000000000# Memray tests Install memray (only macOS and Linux supported): ```console $ sudo apt-get install libunwind-dev $ sudo apt-get install liblz4-dev $ pip install memray ``` Run memray on each file and start the HTTP server on `localhost:8000`: ```console $ make profiles ``` Once the web server is running, look at the HTML files to identify any leaks. To create a copy of the profiles (e.g before making fixes): ```console $ make snapshot ``` This will copy the `.profiles` folder to `.profiles-snapshot` To cleanup profiles: ```console $ make clean ``` python-picologging-0.9.4/tests/memray/memray_format_exception.py000066400000000000000000000010101467100674700252760ustar00rootroot00000000000000import sys from picologging import Formatter def test_format_exception(): pico_f = Formatter("%(message)s") try: raise Exception("error") except Exception: ei = sys.exc_info() result = pico_f.formatException(ei) assert result.startswith("Traceback (most recent call last):") assert result.endswith( 'test_format_exception\n raise Exception("error")\nException: error' ) if __name__ == "__main__": for _ in range(100_000): test_format_exception() python-picologging-0.9.4/tests/memray/memray_litestar.py000066400000000000000000000014541467100674700235730ustar00rootroot00000000000000from litestar import Litestar, Request, get from litestar.logging import LoggingConfig from litestar.testing import TestClient logging_config = LoggingConfig( loggers={ "app": { "level": "DEBUG", "handlers": ["queue_listener"], "propagate": False, } } ) @get("/") def hello_world(request: Request) -> dict[str, str]: """Handler function that returns a greeting dictionary.""" request.logger.info("No results in response") request.logger.debug("doing things...") return {"hello": "world"} app = Litestar( route_handlers=[hello_world], logging_config=logging_config, debug=True, ) if __name__ == "__main__": with TestClient(app=app) as client: for _ in range(100_000): response = client.get("/") python-picologging-0.9.4/tests/memray/memray_logger.py000066400000000000000000000013201467100674700232130ustar00rootroot00000000000000""" Create 100,000 logger instances and run a log test on them """ from io import StringIO import picologging def log(level=picologging.INFO): logger = picologging.Logger("test", level) tmp = StringIO() handler = picologging.StreamHandler(tmp) handler.setLevel(level) formatter = picologging.Formatter( "%(created)f/%(asctime)s %(pathname)s:%(module)s:%(filename)s:%(lineno)d %(funcName)s %(levelno)d %(name)s - %(levelname)s %(process)d %(thread)d- %(message)s" ) handler.setFormatter(formatter) logger.handlers.append(handler) logger.debug("There has been a picologging issue") repr(logger) if __name__ == "__main__": for _ in range(100_000): log() python-picologging-0.9.4/tests/memray/memray_logrecord.py000066400000000000000000000016141467100674700237220ustar00rootroot00000000000000from io import StringIO import picologging def log(level=picologging.INFO): logger = picologging.Logger("test", level) tmp = StringIO() handler = picologging.StreamHandler(tmp) handler.setLevel(level) formatter = picologging.Formatter( "%(created)f/%(asctime)s %(pathname)s:%(module)s:%(filename)s:%(lineno)d %(funcName)s %(levelno)d %(name)s - %(levelname)s %(process)d %(thread)d- %(message)s" ) handler.setFormatter(formatter) logger.handlers.append(handler) for _ in range(100_000): logger.debug("There has been a picologging issue") logger.debug("There has been a picologging issue %s %s %s", 1, 2, 3) logger.info("There has been a picologging issue %s %s %s", 1, 2, 3) logger.warning("There has been a picologging issue %s %s %s", 1, 2, 3) assert len(tmp.getvalue()) > 100_000 if __name__ == "__main__": log() python-picologging-0.9.4/tests/memray/memray_logrecord_cls.py000066400000000000000000000006451467100674700245660ustar00rootroot00000000000000import picologging from picologging import LogRecord def log(): for _ in range(100_000): record = LogRecord( "hello", picologging.WARNING, __file__, 123, "bork %s", None, None ) assert record.message is None assert record.getMessage() == "bork %s" assert record.message == "bork %s" assert "name" in record.__dict__ if __name__ == "__main__": log() python-picologging-0.9.4/tests/memray/memray_style.py000066400000000000000000000010071467100674700230760ustar00rootroot00000000000000import picologging def test(): perc = picologging.PercentStyle("%(msg)s %(levelno)d %(name)s") record = picologging.LogRecord( "test", picologging.INFO, __file__, 1, "hello", (), None, None, None ) assert perc.format(record) == "hello 20 test" try: perc.format(None) except: pass try: perc.format("") except: pass try: perc.format({}) except: pass if __name__ == "__main__": for _ in range(1000): test() python-picologging-0.9.4/tests/memray/requirements-asgitest.txt000066400000000000000000000000111467100674700251110ustar00rootroot00000000000000litestar python-picologging-0.9.4/tests/profile/000077500000000000000000000000001467100674700201625ustar00rootroot00000000000000python-picologging-0.9.4/tests/profile/profile.py000066400000000000000000000010341467100674700221720ustar00rootroot00000000000000from io import StringIO import picologging def run_profile(level=picologging.DEBUG): logger = picologging.Logger("test", level) tmp = StringIO() handler = picologging.StreamHandler(tmp) handler.setLevel(level) formatter = picologging.Formatter("%(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.handlers.append(handler) for _ in range(1_000_000): logger.debug("There has been a picologging issue %s %s %s", 1, 2, 3) assert tmp.getvalue() != "" run_profile() python-picologging-0.9.4/tests/unit/000077500000000000000000000000001467100674700175015ustar00rootroot00000000000000python-picologging-0.9.4/tests/unit/test_bufferinghandler.py000066400000000000000000000026111467100674700244170ustar00rootroot00000000000000import pytest from utils import filter_gc import picologging from picologging.handlers import BufferingHandler, MemoryHandler @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_buffering_handler(): logger = picologging.Logger("test", picologging.DEBUG) handler = BufferingHandler(capacity=1) logger.addHandler(handler) logger.debug("test") handler.close() assert handler.buffer == [] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_memory_handler(tmp_path): log_file = tmp_path / "log.txt" target = picologging.FileHandler(log_file) logger = picologging.Logger("test", picologging.DEBUG) handler = MemoryHandler(capacity=1, target=target) logger.addHandler(handler) logger.debug("test") handler.close() target.close() with open(log_file) as f: assert f.read() == "test\n" assert handler.buffer == [] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_memory_handler_set_target(tmp_path): log_file = tmp_path / "log.txt" target = picologging.FileHandler(log_file) logger = picologging.Logger("test", picologging.DEBUG) handler = MemoryHandler(capacity=1) handler.setTarget(target) logger.addHandler(handler) logger.debug("test") handler.close() target.close() with open(log_file) as f: assert f.read() == "test\n" assert handler.buffer == [] python-picologging-0.9.4/tests/unit/test_config.py000066400000000000000000000224311467100674700223610ustar00rootroot00000000000000import pytest from utils import filter_gc import picologging from picologging.config import dictConfig, valid_ident @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_dictconfig(): class TestFilter(picologging.Filter): def __init__(self, param=None): self.param = param def filter(self, record): return True config = { "version": 1, "root": {"handlers": ["console"], "level": "DEBUG"}, "loggers": { "test_config": { "handlers": ["console"], "level": "INFO", "propagate": True, }, }, "filters": { "test_filter": {"()": TestFilter}, "example_filter": {}, }, "formatters": { "test_formatter": {"()": "picologging.Formatter"}, "example_formatter": {"class": "picologging.Formatter"}, "standard": {"format": "%(asctime)s [%(levelname)s] %(name)s: %(message)s"}, }, "handlers": { "console": { "class": "picologging.StreamHandler", "filters": ["test_filter"], "level": picologging.DEBUG, }, "test": { "()": "picologging.StreamHandler", ".": {"level": picologging.DEBUG}, }, "console_formatted": { "class": "picologging.StreamHandler", "formatter": "standard", }, }, } dictConfig(config) root = picologging.getLogger() assert root.name == "root" assert root.level == picologging.DEBUG assert root.handlers[0].name == "console" assert isinstance(root.handlers[0], picologging.StreamHandler) logger = picologging.getLogger("test_config") assert logger.name == "test_config" assert logger.level == picologging.INFO assert logger.handlers[0].name == "console" assert isinstance(logger.handlers[0], picologging.StreamHandler) # Reset root logger picologging.root.handlers = [] picologging.root.setLevel(picologging.WARNING) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_dictconfig_clear_existing_loggers(tmp_path): log_file = tmp_path / "log.txt" handler = picologging.FileHandler(log_file) logger = picologging.getLogger("test_config") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) config = { "version": 1, "loggers": { "test_config": { "handlers": ["console"], "level": "INFO", }, }, "handlers": { "console": { "class": "picologging.StreamHandler", }, }, } dictConfig(config) logger = picologging.getLogger("test_config") assert logger.name == "test_config" assert logger.level == picologging.INFO assert len(logger.handlers) == 1 assert logger.handlers[0].name == "console" assert isinstance(logger.handlers[0], picologging.StreamHandler) handler.close() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_dictconfig_config_exceptions(): with pytest.raises(ValueError): dictConfig({}) with pytest.raises(ValueError): dictConfig({"version": 0}) config = { "version": 1, "handlers": { "console": { "class": "picologging.SomeHandler", }, }, } with pytest.raises(ValueError): dictConfig(config) config = { "version": 1, "loggers": { "test_config": {"handlers": ["console"]}, }, } with pytest.raises(ValueError): dictConfig(config) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_config_exception_invalid_filter_for_handler(): config = { "version": 1, "handlers": { "console": { "class": "picologging.StreamHandler", "filters": ["test_filter"], "level": picologging.DEBUG, }, }, } with pytest.raises(ValueError): dictConfig(config) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_dictconfig_incremental_not_supported(): config = {"version": 1, "incremental": True} with pytest.raises(ValueError): dictConfig(config) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_dictconfig_formatters_exception(): config = { "version": 1, "formatters": { "example_formatter": {"class": "picologging.NoFormatter"}, }, } with pytest.raises(ValueError): dictConfig(config) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_dictconfig_filters_exception(): config = { "version": 1, "filters": { "example_filters": {"()": "picologging.NoFilter"}, }, } with pytest.raises(ValueError): dictConfig(config) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_reconfigure_dictconfig_with_child_loggers(): logger = picologging.getLogger("test_config") logger.addHandler(picologging.StreamHandler()) config = { "version": 1, "loggers": { "test_config.module": { "handlers": ["console"], "level": "INFO", }, }, "handlers": { "console": { "class": "picologging.StreamHandler", }, }, } dictConfig(config) logger = picologging.getLogger("test_config.module") assert logger.name == "test_config.module" assert logger.level == picologging.INFO assert len(logger.handlers) == 1 assert logger.handlers[0].name == "console" assert isinstance(logger.handlers[0], picologging.StreamHandler) config = { "version": 1, "loggers": { "test_config": { "handlers": ["console"], "level": "INFO", }, }, "handlers": { "console": { "class": "picologging.StreamHandler", }, }, } dictConfig(config) logger = picologging.getLogger("test_config") assert logger.name == "test_config" assert logger.level == picologging.INFO assert len(logger.handlers) == 1 assert logger.handlers[0].name == "console" assert isinstance(logger.handlers[0], picologging.StreamHandler) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_valid_ident(): assert valid_ident("test") with pytest.raises(ValueError): valid_ident("test.test") with pytest.raises(ValueError): valid_ident("test test") with pytest.raises(ValueError): valid_ident("test-test") @pytest.mark.limit_leaks("200B", filter_fn=filter_gc) def test_configure_with_filters(): config = { "version": 1, "loggers": { "test_config": { "handlers": ["console"], "level": "INFO", }, }, "formatters": { "standard": { "format": "%(asctime)s %(levelname)s %(name)s::%(message)s", "validate": True, }, }, "handlers": { "console": { "class": "picologging.StreamHandler", "filters": ["test_filter"], "formatter": "standard", }, }, "filters": { "test_filter": { "()": "picologging.Filter", "name": "test_filter", }, }, } dictConfig(config) logger = picologging.getLogger("test_config") assert logger.name == "test_config" assert logger.level == picologging.INFO assert len(logger.handlers) == 1 assert logger.handlers[0].name == "console" assert isinstance(logger.handlers[0], picologging.StreamHandler) assert len(logger.handlers[0].filters) == 1 assert logger.handlers[0].filters[0].name == "test_filter" assert isinstance(logger.handlers[0].filters[0], picologging.Filter) assert ( logger.handlers[0].formatter._fmt == "%(asctime)s %(levelname)s %(name)s::%(message)s" ) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_configure_with_non_defined_handlers(): config = { "version": 1, "loggers": { "test_config": { "handlers": ["potato"], "level": "INFO", }, }, } with pytest.raises(ValueError): dictConfig(config) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_config_existing_disabled_logger_90195(): # See gh-90195 config = { "version": 1, "disable_existing_loggers": False, "handlers": { "console": { "level": "DEBUG", "class": "logging.StreamHandler", }, }, "loggers": {"a": {"level": "DEBUG", "handlers": ["console"]}}, } logger = picologging.getLogger("a") assert logger.disabled is False dictConfig(config) assert logger.disabled is False # Should disable all loggers ... dictConfig({"version": 1}) assert logger.disabled is True del config["disable_existing_loggers"] dictConfig(config) # Logger should be enabled, since explicitly mentioned assert logger.disabled is False python-picologging-0.9.4/tests/unit/test_filehandler.py000066400000000000000000000245341467100674700233770ustar00rootroot00000000000000import os import platform import time from datetime import datetime, timedelta import pytest from utils import filter_gc import picologging from picologging.handlers import ( RotatingFileHandler, TimedRotatingFileHandler, WatchedFileHandler, ) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_filehandler(tmp_path): log_file = tmp_path / "log.txt" handler = picologging.FileHandler(log_file) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") handler.close() with open(log_file) as f: assert f.read() == "test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_filehandler_delay(tmp_path): log_file = tmp_path / "log.txt" handler = picologging.FileHandler(log_file, delay=True) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") handler.close() with open(log_file) as f: assert f.read() == "test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.skipif(platform.system() == "Windows", reason="Not supported on Windows.") def test_watchedfilehandler(tmp_path): log_file = tmp_path / "log.txt" handler = WatchedFileHandler(log_file) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") handler.close() with open(log_file) as f: assert f.read() == "test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.skipif(platform.system() == "Windows", reason="Not supported on Windows.") def test_watchedfilehandler_file_changed(tmp_path): log_file = tmp_path / "log.txt" handler = WatchedFileHandler(log_file) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) os.remove(log_file) with open(log_file, "w"): ... logger.warning("test") handler.close() with open(log_file) as f: assert f.read() == "test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.skipif(platform.system() == "Windows", reason="Not supported on Windows.") def test_watchedfilehandler_file_removed(tmp_path): log_file = tmp_path / "log.txt" handler = WatchedFileHandler(log_file) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) os.remove(log_file) logger.warning("test") handler.close() with open(log_file) as f: assert f.read() == "test\n" @pytest.mark.limit_leaks("300B", filter_fn=filter_gc) def test_rotatingfilehandler(tmp_path): log_file = tmp_path / "log.txt" handler = RotatingFileHandler(log_file, maxBytes=1, backupCount=2) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) for _ in range(5): logger.warning("test") handler.close() with open(log_file, encoding="utf-8") as f: assert f.read() == "test\n" for i in range(1, 3): log_file = tmp_path / f"log.txt.{i}" with open(log_file, encoding="utf-8") as f: assert f.read() == "test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_rotatingfilehandler_avoids_non_regular_files(tmp_path, monkeypatch): log_file = tmp_path / "log.txt" handler = RotatingFileHandler(log_file, maxBytes=1, backupCount=2) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") monkeypatch.setattr(os.path, "isfile", lambda _: False) logger.warning("test") handler.close() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_rotatingfilehandler_without_maxbytes(tmp_path): log_file = tmp_path / "log.txt" handler = RotatingFileHandler(log_file) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") handler.close() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_baserotatinghandler_callable_rotator(tmp_path): log_file = tmp_path / "log.txt" handler = RotatingFileHandler(log_file, maxBytes=1, backupCount=1) handler.rotator = lambda src, dst: os.rename(src, dst) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") logger.warning("test") handler.close() assert sorted(os.listdir(tmp_path)) == ["log.txt", "log.txt.1"] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_baserotatinghandler_callable_namer(tmp_path): log_file = tmp_path / "log.txt" handler = RotatingFileHandler(log_file, maxBytes=1, backupCount=1) handler.namer = lambda name: name + ".5" logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") logger.warning("test") handler.close() assert sorted(os.listdir(tmp_path)) == ["log.txt", "log.txt.1.5"] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_filehandler_repr(tmp_path): log_file = tmp_path / "log.txt" handler = picologging.FileHandler(log_file) assert repr(handler) == f"" handler.close() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.parametrize("utc", [False, True]) def test_timed_rotatingfilehandler_rollover(tmp_path, utc): log_file = tmp_path / "log.txt" handler = TimedRotatingFileHandler(log_file, when="S", backupCount=1, utc=utc) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") handler.rollover_at = time.time() - 1 logger.warning("test") handler.close() files = os.listdir(tmp_path) assert len(files) == 2 for file_name in files: with open(tmp_path / file_name) as file: assert file.read() == "test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_rollover_removes_old_files(tmp_path): with open(tmp_path / "log.txt.1970-11-01_00-00-00", "w"): ... log_file = tmp_path / "log.txt" handler = TimedRotatingFileHandler(log_file, when="S", backupCount=1) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") handler.rollover_at = time.time() - 1 logger.warning("test") handler.close() assert len(os.listdir(tmp_path)) == 2 @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_rollover_keeps_non_related_files(tmp_path): with open(tmp_path / "normal_file.txt", "w"): ... log_file = tmp_path / "log.txt" handler = TimedRotatingFileHandler(log_file, when="S", backupCount=1) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") handler.rollover_at = time.time() - 1 logger.warning("test") handler.close() assert len(os.listdir(tmp_path)) == 3 @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_rollover_removes_existing_log(tmp_path, monkeypatch): existing_log_file = tmp_path / "log.txt.2022-07-01_16-00-00" with open(existing_log_file, "w"): ... log_file = tmp_path / "log.txt" handler = TimedRotatingFileHandler(log_file, when="S", backupCount=1) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") monkeypatch.setattr(handler, "rotation_filename", lambda _: existing_log_file) handler.rollover_at = time.time() - 1 logger.warning("test") handler.close() assert len(os.listdir(tmp_path)) == 2 @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_non_existing_file(tmp_path, monkeypatch): monkeypatch.setattr(os.path, "exists", lambda _: False) log_file = tmp_path / "log.txt" handler = TimedRotatingFileHandler(log_file, when="S", backupCount=1) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) handler.close() @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_when_intervals(tmp_path): when_interval = [ ("S", 1), ("M", 60), ("H", 60 * 60), ("D", 60 * 60 * 24), ("MIDNIGHT", 60 * 60 * 24), ("W1", 60 * 60 * 24 * 7), ] log_file = tmp_path / "log.txt" for when, interval in when_interval: handler = TimedRotatingFileHandler(log_file, when=when) assert handler.interval == interval handler.close() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_invalid_when(tmp_path): log_file = tmp_path / "log.txt" with pytest.raises(ValueError): TimedRotatingFileHandler(log_file, when="W", delay=True) with pytest.raises(ValueError): TimedRotatingFileHandler(log_file, when="W7", delay=True) with pytest.raises(ValueError): TimedRotatingFileHandler(log_file, when="X", delay=True) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_utc(tmp_path): log_file = tmp_path / "log.txt" handler = TimedRotatingFileHandler(log_file, when="MIDNIGHT", utc=True) handler.close() assert handler.rollover_at > time.time() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_atime(tmp_path): log_file = tmp_path / "log.txt" at_time = (datetime.now() - timedelta(hours=1)).time() handler = TimedRotatingFileHandler(log_file, when="MIDNIGHT", atTime=at_time) handler.close() assert handler.rollover_at > time.time() @pytest.mark.limit_leaks("200B", filter_fn=filter_gc) def test_timed_rotatingfilehandler_avoids_non_regular_files(tmp_path, monkeypatch): log_file = tmp_path / "log.txt" handler = TimedRotatingFileHandler(log_file, backupCount=1) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.warning("test") monkeypatch.setattr(os.path, "isfile", lambda _: False) logger.warning("test") handler.close() python-picologging-0.9.4/tests/unit/test_formatter.py000066400000000000000000000235401467100674700231210ustar00rootroot00000000000000import datetime import io import logging import sys import traceback from logging import Formatter as LoggingFormatter import pytest from utils import filter_gc from picologging import Formatter, LogRecord @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_default_fmt(): f = Formatter() assert f.datefmt is None record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) s = f.format(record) assert s == "bork bork bork" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_custom_fmt(): f = Formatter("%(name)s %(levelname)s %(message)s") assert f.datefmt is None record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) s = f.format(record) assert s == "hello WARNING bork bork bork" assert f.usesTime() is False assert f.formatMessage(record) == s @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_default_fmt_against_builtin(): pico_f = Formatter() logging_f = logging.Formatter() assert pico_f.datefmt == logging_f.datefmt pico_record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) logging_record = logging.LogRecord( "hello", logging.WARNING, "/serv/", 123, "bork bork bork", (), None ) assert pico_f.format(pico_record) == logging_f.format(logging_record) assert pico_f.format(pico_record) == logging_f.format(pico_record) assert pico_f.formatMessage(pico_record) == logging_f.formatMessage(pico_record) @pytest.mark.limit_leaks("256B", filter_fn=filter_gc) def test_formatter_default_fmt_exc_info_against_builtin(): pico_f = Formatter() logging_f = logging.Formatter() try: raise Exception("error") except Exception: exc_info = sys.exc_info() pico_record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), exc_info, "", "" ) logging_record = logging.LogRecord( "hello", logging.WARNING, "/serv/", 123, "bork bork bork", (), exc_info, "", "" ) assert pico_f.format(pico_record) == logging_f.format(logging_record) assert pico_f.format(pico_record) == logging_f.format(pico_record) assert pico_f.formatMessage(pico_record) == logging_f.formatMessage(pico_record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_custom_datefmt(): f = Formatter("%(name)s %(levelname)s %(message)s", datefmt="%Y-%m-%d") assert f.datefmt == "%Y-%m-%d" record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) s = f.format(record) assert s == "hello WARNING bork bork bork" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_explicit_none_datefmt_style(): f = Formatter("%(name)s %(levelname)s %(message)s", None, "%") assert f.datefmt is None record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) s = f.format(record) assert s == "hello WARNING bork bork bork" possible_format_strings = [ "%(message)s", "%(message)s is a potato", "%(name)s", "%(msg)s", "%(args)s", "%(levelname)s", "%(levelno)s", "%(pathname)s", "%(filename)s", "%(module)s", "%(funcName)s", "%(lineno)d", "%(threadName)s", "%(process)d", "%(processName)s", "%(stack_info)s", "%(exc_info)s", "%(exc_text)s", ] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.parametrize("field", possible_format_strings) def test_format_field(field): pico_f = Formatter(field) log_f = logging.Formatter(field) record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) assert pico_f.format(record) == log_f.format(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_format_time(): pico_f = Formatter("%(msecs)d %(relativeCreated)d") record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) assert pico_f.format(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_asctime_field(): pico_f = Formatter("%(asctime)s") record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) assert pico_f.format(record) assert pico_f.usesTime() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_asctime_field_buffer(): pico_f = Formatter("%(asctime)s") record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) logging_f = LoggingFormatter("%(asctime)s") assert pico_f.format(record).split(",")[0] == logging_f.format(record).split(",")[0] assert pico_f.usesTime() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_record_with_stack_info(): pico_f = Formatter("%(message)s") record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None, None, "hello", ) assert pico_f.format(record) == "bork bork bork\nhello" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_record_with_non_str_sstack_info(): pico_f = Formatter("%(message)s") record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None, None, ["hello", "world"], ) assert pico_f.format(record) == "bork bork bork\n['hello', 'world']" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_format_stack(): pico_f = Formatter("%(message)s") assert pico_f.formatStack([1, 2, 3]) == [1, 2, 3] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_delete_formatter(): pico_f = Formatter("%(message)s") del pico_f @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_bad_init_args(): with pytest.raises(TypeError): Formatter(dog="good boy") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_bad_style(): with pytest.raises(ValueError): Formatter(style="!") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_bad_style_type(): with pytest.raises(TypeError): Formatter(style=123) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_bad_fmt_type(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) with pytest.raises((TypeError, ValueError)): f = Formatter(fmt=123) assert f.format(record) == "bork bork bork" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_with_validate_flag_and_invalid_fmt(): f = Formatter(fmt="%(message ", validate=True) record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) assert f.format(record) == "%(message " @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_datefmt_bad_type(): with pytest.raises(TypeError): Formatter(datefmt=123) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_format_with_custom_datefmt(): actual_date = datetime.datetime.now().strftime("%Y-%m-%d") f = Formatter("%(name)s %(levelname)s %(message)s %(asctime)s", datefmt="%Y-%m-%d") assert f.datefmt == "%Y-%m-%d" record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) s = f.format(record) assert s == f"hello WARNING bork bork bork {actual_date}" assert f.usesTime() is True assert f.formatMessage(record) == s @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_repr(): f = Formatter("%(message)s") assert repr(f) == "" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_exc_info_invalid_type(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), (1, 2, 3) ) f = Formatter() with pytest.raises(AttributeError): f.format(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_exc_info_invalid_value_types(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), [1, 2, 3] ) f = Formatter() with pytest.raises(TypeError): f.format(record) # TODO #41 : test defaults are propagating to string formatters @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_formatter_templates(): # Not supported, so check it doesn't just crash with pytest.raises(NotImplementedError): Formatter("%(message)s", style="$") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_format_exception(): pico_f = Formatter("%(message)s") try: raise Exception("error") except Exception: ei = sys.exc_info() result = pico_f.formatException(ei) assert result.startswith("Traceback (most recent call last):") assert result.endswith( 'test_format_exception\n raise Exception("error")\nException: error' ) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_override_format_exception(): class CustomFormatter(Formatter): def formatException(self, ei) -> str: sio = io.StringIO() tb = ei[2] traceback.print_exception(ei[0], ei[1], tb, None, sio) s = sio.getvalue() sio.close() if s[-1:] == "\n": s = s[:-1] return "Custom " + s formatter = CustomFormatter("%(message)s") try: raise Exception("error") except Exception: ei = sys.exc_info() result = formatter.formatException(ei) assert result.startswith("Custom Traceback (most recent call last):") assert result.endswith( 'test_override_format_exception\n raise Exception("error")\nException: error' ) python-picologging-0.9.4/tests/unit/test_handler.py000066400000000000000000000101251467100674700225260ustar00rootroot00000000000000import pytest from utils import filter_gc import picologging @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_basic_handler(): handler = picologging.Handler() record = picologging.LogRecord( "test", picologging.INFO, "test", 1, "test", (), None, None, None ) with pytest.raises(NotImplementedError): handler.handle(record) with pytest.raises(NotImplementedError): handler.emit(None) @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_custom_handler(): class CustomHandler(picologging.Handler): def __init__(self): super().__init__() self.records = [] def emit(self, record): self.records.append(record) handler = CustomHandler() record = picologging.LogRecord( "test", picologging.INFO, __file__, 1, "test", (), None, None, None ) formatter = picologging.Formatter("%(message)s") handler.setFormatter(formatter) handler.handle(record) assert len(handler.records) == 1 assert handler.records[0] == record @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_delete_handler(): handler = picologging.Handler() del handler @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_add_acquire_release(): handler = picologging.Handler() handler.acquire() assert handler.release() is None @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_init_with_name(): handler = picologging.Handler(name="test") assert handler.name == "test" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_init_with_level(): handler = picologging.Handler(level=picologging.DEBUG) assert handler.level == picologging.DEBUG @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_get_set_name(): handler = picologging.Handler(name="test") assert handler.get_name() == "test" handler.set_name("foo") assert handler.name == "foo" assert handler.get_name() == "foo" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_flush(): handler = picologging.Handler() assert not handler.flush() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_close(): handler = picologging.Handler() assert not handler.close() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_createLock(): handler = picologging.Handler() assert not handler.createLock() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_filtered_out(): def filter_out(f): return False class CustomHandler(picologging.Handler): def __init__(self): super().__init__() self.records = [] def emit(self, record): raise Exception("This should not be called") handler = CustomHandler() handler.addFilter(filter_out) record = picologging.LogRecord( "test", picologging.INFO, __file__, 1, "test", (), None, None, None ) assert not handler.handle(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_set_level_nonint(): handler = picologging.Handler() with pytest.raises(TypeError): handler.setLevel("potato") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_custom_formatter(): class CustomFormatter: def format(self, record): return "foo" handler = picologging.Handler() handler.setFormatter(CustomFormatter()) record = picologging.LogRecord( "test", picologging.INFO, __file__, 1, "test", (), None, None, None ) assert handler.format(record) == "foo" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_handle_error(): handler = picologging.Handler() record = picologging.LogRecord( "test", picologging.INFO, __file__, 1, "test", (), None, None, None ) assert not handler.handleError(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_handler_repr(): handler = picologging.Handler() assert repr(handler) == "" handler = picologging.Handler(level=picologging.WARNING) assert repr(handler) == "" python-picologging-0.9.4/tests/unit/test_logger.py000066400000000000000000000607251467100674700224030ustar00rootroot00000000000000import io import logging import uuid import pytest from utils import filter_gc import picologging @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_logger_attributes(): logger = picologging.Logger("test") assert logger.name == "test" assert logger.level == logging.NOTSET assert logger.parent is None assert logger.propagate is True assert logger.handlers == [] assert logger.disabled is False assert logger.propagate is True level_names = [ "DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL", "NOTSET", ] levels = [ logging.DEBUG, logging.INFO, logging.WARNING, logging.ERROR, logging.CRITICAL, logging.NOTSET, ] @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) @pytest.mark.parametrize("level", levels) def test_logging_custom_level(level): logger = picologging.Logger("test", level) assert logger.level == level @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_custom_logger_has_no_parent(): logger = picologging.Logger("test") assert logger.parent is None @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_remove_non_existent_handler(): logger = picologging.Logger("test") assert logger.removeHandler("handler") is None @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_set_level(): logger = picologging.Logger("test") logger.setLevel(logging.DEBUG) assert logger.level == logging.DEBUG @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_set_level_from_string(): logger = picologging.Logger("test") logger.setLevel("DEBUG") assert logger.level == logging.DEBUG logger.setLevel("INFO") assert logger.level == logging.INFO logger.setLevel("WARNING") assert logger.level == logging.WARNING logger.setLevel("ERROR") assert logger.level == logging.ERROR logger.setLevel("CRITICAL") assert logger.level == logging.CRITICAL logger.setLevel("NOTSET") assert logger.level == logging.NOTSET @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_disabled_logger(): logger = picologging.Logger("test", logging.DEBUG) logger.disabled = True stream = io.StringIO() logger.handlers.append(picologging.StreamHandler(stream)) assert logger.debug("Hello World") is None result = stream.getvalue() assert result == "" ex = Exception("arghhh!!") logger.exception("Hello World", ex) result = stream.getvalue() assert result == "" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_logger_with_logging_handler(): logger = picologging.Logger("test", logging.DEBUG) stream = io.StringIO() handler = logging.StreamHandler(stream) handler.setFormatter(logging.Formatter("%(message)s")) logger.addHandler(handler) assert logger.debug("Hello World") is None result = stream.getvalue() assert result == "Hello World\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_get_effective_level(): logger = picologging.Logger("test") parent = picologging.Logger("parent", logging.DEBUG) logger.parent = parent assert logger.getEffectiveLevel() == logging.DEBUG assert logger.level == logging.NOTSET logger.setLevel(logging.WARNING) assert logger.getEffectiveLevel() == logging.WARNING @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_dodgy_parents(): logger = picologging.Logger("test") parent = "potato" with pytest.raises(TypeError): logger.parent = parent with pytest.raises(TypeError): logger.parent = logging.getLogger("test") logger.getEffectiveLevel() with pytest.raises(TypeError): del logger.parent @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_add_filter(): logger = picologging.Logger("test") filter = picologging.Filter("filter1") logger.addFilter(filter) assert logger.filters == [filter] filter2 = picologging.Filter("filter2") logger.addFilter(filter2) assert logger.filters == [filter, filter2] @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_remove_filter(): logger = picologging.Logger("test") filter = picologging.Filter("filter1") logger.addFilter(filter) assert logger.filters == [filter] logger.removeFilter(filter) assert logger.filters == [] logger.removeFilter(filter) assert logger.filters == [] @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_delete_filter(): filter = picologging.Filter("filter1") del filter @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_filterer_direct_type(): filterable = picologging.Filterer() assert filterable.filters == [] filter = picologging.Filter("filter1") filterable.addFilter(filter) assert filterable.filters == [filter] filter2 = picologging.Filter("filter2") filterable.addFilter(filter2) assert filterable.filters == [filter, filter2] filterable.removeFilter(filter) assert filterable.filters == [filter2] filterable.removeFilter(filter2) assert filterable.filters == [] del filterable @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_no_filter(): logger = picologging.Logger("test") record = picologging.LogRecord("test", logging.INFO, "test", 1, "test", (), {}) assert logger.filter(record) is True @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_filter_record(): logger = picologging.Logger("test") filter = picologging.Filter("hello") logger.addFilter(filter) record = picologging.LogRecord("hello", logging.INFO, "test", 1, "test", (), {}) record2 = picologging.LogRecord("goodbye", logging.INFO, "test", 1, "test", (), {}) assert logger.filter(record) is True assert logger.filter(record2) is False logger.removeFilter(filter) assert logger.filter(record) is True assert logger.filter(record2) is True @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_filter_callable(): logger = picologging.Logger("test") def filter(record): return record.name == "hello" logger.addFilter(filter) record = picologging.LogRecord("hello", logging.INFO, "test", 1, "test", (), {}) assert logger.filter(record) is True record = picologging.LogRecord("goodbye", logging.INFO, "test", 1, "test", (), {}) assert logger.filter(record) is False @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_log_debug(): logger = picologging.Logger("test", logging.DEBUG) stream = io.StringIO() handler = picologging.StreamHandler(stream) handler.setFormatter(picologging.Formatter("%(message)s")) logger.addHandler(handler) assert logger.debug("Hello World") is None result = stream.getvalue() assert result == "Hello World\n" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_log_debug_info_level_logger(): logger = picologging.Logger("test", logging.INFO) stream = io.StringIO() logger.handlers.append(picologging.StreamHandler(stream)) assert logger.debug("Hello World") is None result = stream.getvalue() assert result == "" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_log_debug_info_level_logger_logging_handler(): logger = picologging.Logger("test", logging.INFO) stream = io.StringIO() logger.handlers.append(logging.StreamHandler(stream)) assert logger.debug("Hello World") is None result = stream.getvalue() assert result == "" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) @pytest.mark.parametrize("level", levels) def test_log_log(level): logger = picologging.Logger("test", level) stream = io.StringIO() handler = picologging.StreamHandler(stream) handler.setFormatter(picologging.Formatter("%(message)s")) logger.addHandler(handler) assert logger.log(level, "Hello World") is None result = stream.getvalue() assert result == "Hello World\n" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_logger_with_explicit_level(capsys): logger = picologging.Logger("test", logging.DEBUG) tmp = io.StringIO() handler = picologging.StreamHandler(tmp) handler.setLevel(logging.DEBUG) formatter = picologging.Formatter("%(name)s - %(levelname)s - %(message)s") handler.setFormatter(formatter) logger.handlers.append(handler) logger.debug("There has been a picologging issue") result = tmp.getvalue() assert result == "test - DEBUG - There has been a picologging issue\n" cap = capsys.readouterr() assert cap.out == "" assert cap.err == "" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_exception_capture(): logger = picologging.Logger("test", logging.DEBUG) tmp = io.StringIO() handler = picologging.StreamHandler(tmp) logger.addHandler(handler) try: 1 / 0 except ZeroDivisionError: logger.exception("bork") result = tmp.getvalue() assert "bork" in result assert "ZeroDivisionError: division by zero" in result @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_getlogger_no_args(): logger = logging.getLogger() assert logger.name == "root" assert logger.level == logging.WARNING assert logger.parent is None picologger = picologging.getLogger() assert picologger.name == "root" assert picologger.level == logging.WARNING assert picologger.parent is None @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_logger_init_bad_args(): with pytest.raises(TypeError): picologging.Logger("goo", 10, dog=1) with pytest.raises(TypeError): picologging.Logger(name="test", level="potato") @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) @pytest.mark.parametrize("level", levels) def test_logger_repr(level): logger = picologging.Logger("test", level) assert repr(logger) == f"" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_logger_repr_effective_level(): logger = picologging.Logger("test") logger.parent = picologging.Logger("parent", picologging.WARNING) assert repr(logger) == "" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_logger_repr_invalid_level(): logger = picologging.Logger("test", level=100) assert repr(logger) == "" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_set_level_bad_type(): logger = picologging.Logger("goo", picologging.DEBUG) with pytest.raises(TypeError): logger.setLevel(3.14) @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_set_level_invalid_name(): logger = picologging.Logger("goo", picologging.DEBUG) with pytest.raises(ValueError): logger.setLevel("POTATO") @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_add_remove_handlers(): logger = picologging.Logger("goo", picologging.DEBUG) assert logger.handlers == [] test_handler = picologging.Handler() logger.addHandler(test_handler) # add it twice should have no effect logger.addHandler(test_handler) assert len(logger.handlers) == 1 assert test_handler in logger.handlers logger.removeHandler(test_handler) assert test_handler not in logger.handlers assert logger.handlers == [] @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) @pytest.mark.parametrize("level_config", levels) def test_log_and_handle(level_config): logger = picologging.Logger("test", level=level_config) tmp = io.StringIO() logger.addHandler(picologging.StreamHandler(tmp)) assert not logger.info("info_message") assert not logger.debug("debug_message") assert not logger.warning("warning_message") assert not logger.error("error_message") assert not logger.fatal("fatal_message") assert not logger.critical("critical_message") assert not logger.log(level_config, "log_message") tmp_value = tmp.getvalue() if level_config <= picologging.DEBUG and level_config != picologging.NOTSET: assert "debug_message" in tmp_value else: assert "debug_message" not in tmp_value if level_config <= picologging.INFO and level_config != picologging.NOTSET: assert "info_message" in tmp_value else: assert "info_message" not in tmp_value if level_config <= picologging.WARNING and level_config != picologging.NOTSET: assert "warning_message" in tmp_value else: assert "warning_message" not in tmp_value if level_config <= picologging.ERROR and level_config != picologging.NOTSET: assert "error_message" in tmp_value else: assert "error_message" not in tmp_value if level_config <= picologging.FATAL and level_config != picologging.NOTSET: assert "fatal_message" in tmp_value else: assert "fatal_message" not in tmp_value if level_config <= picologging.CRITICAL and level_config != picologging.NOTSET: assert "critical_message" in tmp_value else: assert "critical_message" not in tmp_value assert "log_message" in tmp_value @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_log_xx_bad_arguments(): logger = picologging.Logger("test", level=picologging.DEBUG) with pytest.raises(TypeError): logger.info() with pytest.raises(TypeError): logger.debug() with pytest.raises(TypeError): logger.warning() with pytest.raises(TypeError): logger.error() with pytest.raises(TypeError): logger.fatal() with pytest.raises(TypeError): logger.critical() @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_log_bad_arguments(): logger = picologging.Logger("test") with pytest.raises(TypeError): logger.log("potato", "message") with pytest.raises(TypeError): logger.log() @pytest.mark.limit_leaks("256B", filter_fn=filter_gc) def test_notset_parent_level_match(): logger_child = picologging.Logger("child", picologging.NOTSET) logger_parent = picologging.Logger("parent", picologging.DEBUG) logger_child.parent = logger_parent parent_io = io.StringIO() child_io = io.StringIO() logger_child.addHandler(picologging.StreamHandler(child_io)) logger_parent.addHandler(picologging.StreamHandler(parent_io)) logger_child.info("child message") logger_parent.info("parent message") parent_value = parent_io.getvalue() child_value = child_io.getvalue() assert "child message" in child_value assert "child message" in parent_value assert "parent message" in parent_value assert "parent message" not in child_value @pytest.mark.limit_leaks("256B", filter_fn=filter_gc) def test_error_parent_level(): logger_child = picologging.Logger("child", picologging.WARNING) logger_parent = picologging.Logger("parent", picologging.ERROR) logger_child.parent = logger_parent parent_io = io.StringIO() child_io = io.StringIO() logger_child.addHandler(picologging.StreamHandler(child_io)) logger_parent.addHandler(picologging.StreamHandler(parent_io)) logger_child.info("info message") logger_child.warning("warning message") logger_child.error("error message") parent_value = parent_io.getvalue() child_value = child_io.getvalue() assert "info message" not in child_value assert "info message" not in parent_value assert "warning message" in child_value assert "warning message" in parent_value assert "error message" in child_value assert "error message" in parent_value @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_nested_frame_stack(): logger = picologging.Logger("test", level=picologging.DEBUG) tmp = io.StringIO() logger.addHandler(picologging.StreamHandler(tmp)) def f(): def g(): logger.info("message", stack_info=True) g() f() result = tmp.getvalue() assert "message" in result assert " in g\n" in result assert " in f\n" in result @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_exception_object_as_exc_info(): e = Exception("arghhh!!") logger = picologging.Logger("test", level=picologging.DEBUG) tmp = io.StringIO() logger.addHandler(picologging.StreamHandler(tmp)) logger.info("message", exc_info=e) result = tmp.getvalue() assert "message" in result assert "arghhh!!" in result @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_logger_setlevel_resets_other_levels(): stream = io.StringIO() handler = picologging.StreamHandler(stream) logger = picologging.getLogger("test") logger.addHandler(handler) logger.setLevel(picologging.WARNING) logger.debug("test") assert stream.getvalue() == "" logger.warning("test") assert stream.getvalue() == "test\n" logger.setLevel(picologging.ERROR) logger.warning("test") assert stream.getvalue() == "test\n" logger.error("test") assert stream.getvalue() == "test\ntest\n" @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_getlogger_root_level(): # Check that the root logger defaults to WARNING logger = picologging.getLogger() assert logger.getEffectiveLevel() == picologging.WARNING @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_getlogger_nonroot_levels(): """ Check that descendant loggers get the root level on construction. Before: | Logger | Level | Effective Level | |--------------|-----------|-----------------| | root | 30 | 30 | After: | child | 0 | 30 | | grandchild | 0 | 30 | """ child_name = str(uuid.uuid4()) child_logger = picologging.getLogger(str(uuid.uuid4())) assert child_logger.level == picologging.NOTSET assert child_logger.getEffectiveLevel() == picologging.WARNING grandchild_name = f"{child_name}.str(uuid.uuid4())" grandchild_logger = picologging.getLogger(grandchild_name) assert grandchild_logger.level == picologging.NOTSET assert grandchild_logger.getEffectiveLevel() == picologging.WARNING @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_getlogger_parentchild_levels(): """ Check interaction of setLevel with logger hierarchy Before: | Logger | Level | Effective Level | |--------------|-----------|-----------------| | root | 30 | 30 | | parent | 0 | 30 | After parent.setLevel(INFO): | parent | 20 | 20 | After construction of child logger: | child | 0 | 20 | After child.setLevel(WARNING): | child | 30 | 30 | """ parent_name = str(uuid.uuid4()) parent_logger = picologging.getLogger(parent_name) assert parent_logger.getEffectiveLevel() == picologging.WARNING parent_logger.setLevel(picologging.INFO) assert parent_logger.getEffectiveLevel() == picologging.INFO child_name = f"{parent_name}.{uuid.uuid4()}" child_logger = picologging.getLogger(child_name) assert child_logger.getEffectiveLevel() == picologging.INFO child_logger.setLevel(picologging.WARNING) assert child_logger.getEffectiveLevel() == picologging.WARNING @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_getlogger_setlevel_after(): """ Check for setting parent level after child construction Before: | Logger | Level | Effective Level | |--------------|-----------|-----------------| | root | 30 | 30 | | parent | 0 | 30 | After parent.setLevel(INFO): | parent | 20 | 20 | After construction of child logger: | child | 0 | 20 | After parent.setLevel(DEBUG): | parent | 10 | 10 | | child | 0 | 10 | """ parent_name = str(uuid.uuid4()) parent_logger = picologging.getLogger(parent_name) parent_logger.setLevel(picologging.WARNING) child_name = f"{parent_name}.{uuid.uuid4()}" child_logger = picologging.getLogger(child_name) parent_logger.setLevel(picologging.DEBUG) assert child_logger.getEffectiveLevel() == picologging.DEBUG @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_getlogger_setlevel_after_multiple_children(): """ Check for setting parent level after child construction Before: | Logger | Level | Effective Level | |--------------|-----------|-----------------| | root | 30 | 30 | | parent | 0 | 30 | After parent.setLevel(WARNING): | parent | 30 | 30 | After construction of child loggers: | child1 | 0 | 30 | | child2 | 0 | 30 | After parent.setLevel(DEBUG): | parent | 10 | 10 | | child1 | 0 | 10 | | child2 | 0 | 10 | """ parent_name = str(uuid.uuid4()) parent_logger = picologging.getLogger(parent_name) parent_logger.setLevel(picologging.WARNING) child1_name = f"{parent_name}.{uuid.uuid4()}" child2_name = f"{parent_name}.{uuid.uuid4()}" child1_logger = picologging.getLogger(child1_name) child2_logger = picologging.getLogger(child2_name) parent_logger.setLevel(picologging.DEBUG) assert child1_logger.getEffectiveLevel() == picologging.DEBUG assert child2_logger.getEffectiveLevel() == picologging.DEBUG @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_getlogger_setlevel_message_handled(): """ Check for child creation before parent creation and appropriate handling of messages. Before: | Logger | Level | Effective Level | |--------------|-----------|-----------------| | root | 30 | 30 | | child | 0 | 30 | After construction of parent logger: | parent | 0 | 30 | | child | 0 | 30 | After parent.setLevel(DEBUG): | parent | 10 | 10 | | child | 0 | 10 | """ parent_name = str(uuid.uuid4()) child_name = f"{parent_name}.{uuid.uuid4()}" child_logger = picologging.getLogger(child_name) assert child_logger.level == picologging.NOTSET assert child_logger.getEffectiveLevel() == picologging.WARNING parent_logger = picologging.getLogger(parent_name) stream = io.StringIO() handler = picologging.StreamHandler(stream) child_logger.addHandler(handler) child_logger.log(picologging.DEBUG, "Hello World") assert stream.getvalue() == "" parent_logger.setLevel(picologging.DEBUG) assert child_logger.getEffectiveLevel() == picologging.DEBUG child_logger.log(picologging.DEBUG, "Hello World") assert stream.getvalue() == "Hello World\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_getlogger_with_placeholder_parent(): # Logging levels when some parent does not exist yet. stream = io.StringIO() handler = picologging.StreamHandler(stream) top_logger = picologging.getLogger("A") top_logger.addHandler(handler) bottom1_logger = picologging.getLogger("A.B.C") bottom2_logger = picologging.getLogger("A.B.D") top_logger.setLevel(picologging.INFO) assert top_logger.getEffectiveLevel() == picologging.INFO assert bottom1_logger.getEffectiveLevel() == picologging.INFO assert bottom2_logger.getEffectiveLevel() == picologging.INFO # These logs should be handled top_logger.log(picologging.WARN, "TLW") top_logger.info("TLI") bottom1_logger.log(picologging.WARN, "BL1W") bottom1_logger.info("BL1I") bottom2_logger.log(picologging.WARN, "BL2W") bottom2_logger.info("BL2I") # These should not top_logger.log(picologging.DEBUG, "TLD") bottom1_logger.debug("BLD") assert stream.getvalue() == "TLW\nTLI\nBL1W\nBL1I\nBL2W\nBL2I\n" # Now breathe life into the placeholder middle_logger = picologging.getLogger("A.B") assert middle_logger.getEffectiveLevel() == picologging.INFO @pytest.mark.limit_leaks("128B", filter_fn=filter_gc) def test_is_enabled_for(): logger = picologging.Logger("test") logger.setLevel(logging.INFO) assert logger.isEnabledFor(logging.INFO) is True assert logger.isEnabledFor(logging.WARNING) is True assert logger.isEnabledFor(logging.DEBUG) is False with pytest.raises(TypeError): logger.isEnabledFor("INFO") python-picologging-0.9.4/tests/unit/test_logrecord.py000066400000000000000000000142401467100674700230730ustar00rootroot00000000000000import copy import logging import os import threading import pytest from utils import filter_gc import picologging from picologging import LogRecord @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_standard(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork bork bork", (), None ) assert record.name == "hello" assert record.msg == "bork bork bork" assert record.levelno == logging.WARNING assert record.levelname == "WARNING" assert record.pathname == __file__ assert record.module == "test_logrecord" assert record.filename == "test_logrecord.py" assert record.args == () assert record.created @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_args(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork %s", ("boom"), None ) assert record.name == "hello" assert record.msg == "bork %s" assert record.args == ("boom") assert record.message is None @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_getmessage_with_args(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork %s", ("boom"), None ) assert record.message is None assert record.getMessage() == "bork boom" assert record.message == "bork boom" assert record.message == "bork boom" @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_getmessage_no_args(): record = LogRecord("hello", logging.WARNING, __file__, 123, "bork boom", (), None) assert record.message is None assert record.getMessage() == "bork boom" assert record.message == "bork boom" assert record.message == "bork boom" @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_args_format_mismatch(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork boom %s %s", (0,), None ) assert record.message is None with pytest.raises(TypeError): record.getMessage() @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_args_len_mismatch(): record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork boom %s", (0, 1, 2), None ) assert record.message is None with pytest.raises(TypeError): record.getMessage() @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_no_args(): record = LogRecord("hello", logging.WARNING, __file__, 123, "bork boom", None, None) assert record.message is None assert record.getMessage() == "bork boom" assert record.message == "bork boom" @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_no_args_and_format(): record = LogRecord("hello", logging.WARNING, __file__, 123, "bork %s", None, None) assert record.message is None assert record.getMessage() == "bork %s" assert record.message == "bork %s" @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_single_string_arg(): record = LogRecord("", picologging.WARNING, "", 12, " %s", "\U000b6fb2", None) assert record.args == "\U000b6fb2" assert record.getMessage() == " \U000b6fb2" @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_single_empty_string_in_tuple_arg(): record = LogRecord("", 0, "", 0, " %s", ("",), None) assert record.args == ("",) assert record.getMessage() == " " @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_single_dict_in_tuple_arg(): record = LogRecord("", 0, "", 0, "%(key)s", ({"key": "val"},), None) assert record.args == {"key": "val"} assert record.getMessage() == "val" @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_nested_tuple_arg(): record = LogRecord("", 0, "", 0, "%d %s", ((10, "bananas"),), None) assert record.args == ((10, "bananas"),) with pytest.raises(TypeError): record.getMessage() @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_repr(): record = LogRecord("hello", logging.WARNING, __file__, 123, "bork %s", (0,), None) assert repr(record) == f"" @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_mapping_dict(): args = { "a": "b", } record = LogRecord( "hello", logging.WARNING, __file__, 123, "bork %s", (args,), None ) assert record.args == {"a": "b"} @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_threading_info(): record = LogRecord("hello", logging.WARNING, __file__, 123, "bork", (), None) assert record.thread == threading.get_ident() assert record.threadName is None # Not supported @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_process_info(): record = LogRecord("hello", logging.WARNING, __file__, 123, "bork", (), None) assert record.process == os.getpid() assert record.processName is None # Not supported @pytest.mark.limit_leaks("1.5KB", filter_fn=filter_gc) def test_logrecord_subclass(): class DerivedLogRecord(LogRecord): # Leaks 1 ref (CPython implementation detail) pass record = DerivedLogRecord( "hello", logging.WARNING, __file__, 123, "bork boom", (), None ) assert DerivedLogRecord.__base__ is LogRecord assert record.message is None assert record.getMessage() == "bork boom" assert record.message == "bork boom" assert record.message == "bork boom" handler = picologging.StreamHandler() handler.emit(record) @pytest.mark.limit_leaks("512B", filter_fn=filter_gc) def test_logrecord_copy(): record = LogRecord("hello", logging.WARNING, __file__, 123, "bork boom", (), None) copied_record = copy.copy(record) assert copied_record.name == record.name assert copied_record.levelno == record.levelno assert copied_record.levelname == record.levelname assert copied_record.pathname == record.pathname assert copied_record.lineno == record.lineno assert copied_record.msg == record.msg assert copied_record.message == record.message assert copied_record.args == record.args assert copied_record.exc_info == record.exc_info assert copied_record.funcName == record.funcName assert copied_record.stack_info == record.stack_info python-picologging-0.9.4/tests/unit/test_percentstyle.py000066400000000000000000000061751467100674700236440ustar00rootroot00000000000000import logging import threading import pytest from utils import filter_gc from picologging import INFO, LogRecord, PercentStyle @pytest.mark.limit_leaks("168B", filter_fn=filter_gc) def test_percentstyle(): perc = PercentStyle("%(msg)s %(levelno)d %(name)s") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == "hello 20 test" @pytest.mark.limit_leaks("225B", filter_fn=filter_gc) def test_percentstyle_format_bad_argument(): perc = PercentStyle("%(msg)s %(levelno)d %(name)s") with pytest.raises(AttributeError): perc.format(None) with pytest.raises(AttributeError): perc.format("") with pytest.raises(AttributeError): perc.format({}) @pytest.mark.limit_leaks("223B", filter_fn=filter_gc) def test_custom_attribute(): perc = PercentStyle("%(custom)s") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) record.custom = "custom" assert perc.format(record) == "custom" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_percentstyle_bad_init_args(): with pytest.raises(TypeError): PercentStyle(dog="good boy") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_funcname_format_string(): perc = PercentStyle("%(funcname)s") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, "superfunc", None) record.funcName = "superFunc" assert perc.format(record) == "superFunc" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_thread_id(): perc = PercentStyle("%(thread)d") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert record.thread == threading.get_ident() assert perc.format(record) == str(record.thread) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_record_created(): perc = PercentStyle("%(created)f") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == str(record.created) def test_custom_field_not_an_attribute(): perc = PercentStyle("%(custom)s") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) with pytest.raises(AttributeError): assert perc.format(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_percentstyle_repr(): perc = PercentStyle("%(msg)s %(levelno)d %(name)s") assert repr(perc) == "" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_record_with_defaults(): perc = PercentStyle( "%(msg)s %(levelno)d %(name)s %(fruit)s", defaults={"fruit": "banana"} ) record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == "hello 20 test banana" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_format_logging_record(): perc = PercentStyle( "%(msg)s %(levelno)d %(name)s %(fruit)s", defaults={"fruit": "banana"} ) record = logging.LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == "hello 20 test banana" python-picologging-0.9.4/tests/unit/test_picologging.py000066400000000000000000000115631467100674700234210ustar00rootroot00000000000000import sys import pytest from utils import filter_gc import picologging levels = [ (picologging.DEBUG, "DEBUG"), (picologging.INFO, "INFO"), (picologging.WARNING, "WARNING"), (picologging.ERROR, "ERROR"), (picologging.CRITICAL, "CRITICAL"), (picologging.NOTSET, "NOTSET"), ] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.parametrize("level, level_name", levels) def test_getlevelname(level, level_name): assert picologging.getLevelName(level) == level_name assert picologging.getLevelName(level_name) == level @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_value_error_invalid_string_names(): with pytest.raises(ValueError): assert picologging.getLevelName("EXample") == "Level EXample" junk_level_names = [None, 3.2, (), [], {}, 100] @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.parametrize("level", junk_level_names) def test_getlevelname_invalid_level(level): with pytest.raises((TypeError, ValueError)): picologging.getLevelName(level) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_critical(capsys): picologging.root.handlers = [] picologging.critical("test") cap = capsys.readouterr() assert cap.out == "" assert cap.err == "CRITICAL:root:test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_fatal(capsys): picologging.root.handlers = [] picologging.fatal("test") cap = capsys.readouterr() assert cap.out == "" assert cap.err == "CRITICAL:root:test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_error(capsys): picologging.root.handlers = [] picologging.error("test") cap = capsys.readouterr() assert cap.out == "" assert cap.err == "ERROR:root:test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_exception(capsys): picologging.root.handlers = [] picologging.exception("test", exc_info=Exception("bork bork bork")) cap = capsys.readouterr() assert cap.out == "" assert cap.err == "ERROR:root:test\nException: bork bork bork\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_warning(capsys): picologging.root.handlers = [] picologging.warning("test") cap = capsys.readouterr() assert cap.out == "" assert cap.err == "WARNING:root:test\n" @pytest.mark.filterwarnings("ignore:The 'warn' function is deprecated") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_warn(capsys): picologging.root.handlers = [] picologging.warn("test") cap = capsys.readouterr() assert cap.out == "" assert cap.err == "WARNING:root:test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_info(capsys): picologging.root.handlers = [] picologging.info("test") cap = capsys.readouterr() assert cap.out == "" assert cap.err == "" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_debug(capsys): picologging.root.handlers = [] picologging.debug("test") cap = capsys.readouterr() assert cap.out == "" assert cap.err == "" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_root_logger_log(): picologging.root.handlers = [] picologging.log(picologging.DEBUG, "test") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_basic_config_with_stream_and_filename_without_handlers(): picologging.root.handlers = [] with pytest.raises(ValueError): picologging.basicConfig(stream=sys.stderr, filename="log.txt") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_basic_config_with_stream_or_filename_with_handlers(): handler = picologging.StreamHandler(sys.stderr) with pytest.raises(ValueError): picologging.basicConfig(handlers=[handler], stream=sys.stdout) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_basic_config_invalid_style(): with pytest.raises(ValueError): picologging.basicConfig(style="!") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_basic_config_with_level(): picologging.basicConfig(level=picologging.INFO) assert picologging.root.level == picologging.INFO @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_basic_config_invalid_arguments(): picologging.root.handlers = [] with pytest.raises(ValueError): picologging.basicConfig(invalid_argument="value") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_make_log_record(): log_record = picologging.makeLogRecord({"levelno": picologging.WARNING}) assert log_record.levelno == picologging.WARNING @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) @pytest.mark.parametrize("encoding", ["utf-8", None]) def test_basic_config_encoding(encoding): picologging.basicConfig(filename="test.txt", encoding=encoding) python-picologging-0.9.4/tests/unit/test_queuehandler.py000066400000000000000000000034421467100674700235770ustar00rootroot00000000000000import io import queue import picologging from picologging.handlers import QueueHandler, QueueListener def test_queue_handler_dispatch(): logger = picologging.Logger("test", picologging.DEBUG) q = queue.Queue() handler = QueueHandler(q) logger.addHandler(handler) logger.debug("test") record = q.get(block=False) assert record assert record.levelno == picologging.DEBUG assert record.name == "test" assert record.msg == "test" assert record.args is None assert record.exc_info is None def test_queue_listener(): logger = picologging.Logger("test", picologging.DEBUG) stream = io.StringIO() stream_handler = picologging.StreamHandler(stream) q = queue.Queue() listener = QueueListener(q, stream_handler) listener.start() handler = QueueHandler(q) logger.addHandler(handler) logger.debug("test") listener.stop() assert stream.getvalue() == "test\n" def test_queue_handler_handle_exception(): logger = picologging.Logger("test", picologging.DEBUG) q = queue.Queue(maxsize=1) handler = QueueHandler(q) logger.addHandler(handler) handler.queue = None logger.debug("test") def test_queue_handler_format(): logger = picologging.getLogger("picologging_test") logger.setLevel(picologging.INFO) stream = io.StringIO() stream_handler = picologging.StreamHandler(stream) q = queue.Queue() listener = QueueListener(q, stream_handler) listener.start() handler = QueueHandler(q) handler.setLevel(picologging.DEBUG) handler.setFormatter( picologging.Formatter("%(levelname)s - %(name)s - %(message)s") ) logger.addHandler(handler) logger.info("Testing now!") listener.stop() assert stream.getvalue() == "INFO - picologging_test - Testing now!\n" python-picologging-0.9.4/tests/unit/test_sockethandler.py000066400000000000000000000167651467100674700237570ustar00rootroot00000000000000import os import pickle import socket import struct import tempfile import threading from socketserver import ( DatagramRequestHandler, StreamRequestHandler, ThreadingTCPServer, ThreadingUDPServer, ) import pytest import picologging from picologging.handlers import DatagramHandler, SocketHandler class ControlMixin: def __init__(self, handler, poll_interval): self._thread = None self._handler = handler self.poll_interval = poll_interval self.ready = threading.Event() def start(self): self._thread = threading.Thread( target=self.serve_forever, args=(self.poll_interval,) ) self._thread.daemon = True self._thread.start() def serve_forever(self, poll_interval): self.ready.set() super().serve_forever(poll_interval) def stop(self): self.shutdown() if self._thread is not None: self._thread.join() self._thread = None self.server_close() self.ready.clear() class TCPServer(ControlMixin, ThreadingTCPServer): allow_reuse_address = True def __init__(self, addr, poll_interval=0.5, bind_and_activate=True): class DelegatingTCPRequestHandler(StreamRequestHandler): def handle(self): self.server._handler(self) ThreadingTCPServer.__init__( self, addr, DelegatingTCPRequestHandler, bind_and_activate ) ControlMixin.__init__(self, self.handle_socket, poll_interval) self.log_output = "" self.handled = threading.Semaphore(0) def server_bind(self): super().server_bind() self.port = self.socket.getsockname()[1] def handle_socket(self, request): conn = request.connection while True: chunk = conn.recv(4) if len(chunk) < 4: break slen = struct.unpack(">L", chunk)[0] chunk = conn.recv(slen) while len(chunk) < slen: chunk = chunk + conn.recv(slen - len(chunk)) obj = pickle.loads(chunk) record = picologging.makeLogRecord(obj) self.log_output += record.msg + "\n" self.handled.release() class UDPServer(ControlMixin, ThreadingUDPServer): def __init__(self, addr, poll_interval=0.5, bind_and_activate=True): class DelegatingUDPRequestHandler(DatagramRequestHandler): def handle(self): self.server._handler(self) def finish(self): data = self.wfile.getvalue() if data: try: super().finish() except OSError: if not self.server._closed: raise ThreadingUDPServer.__init__( self, addr, DelegatingUDPRequestHandler, bind_and_activate ) ControlMixin.__init__(self, self.handle_socket, poll_interval) self._closed = False self.log_output = "" self.handled = threading.Semaphore(0) def handle_socket(self, request): slen = struct.pack(">L", 0) # length of prefix packet = request.packet[len(slen) :] obj = pickle.loads(packet) record = picologging.makeLogRecord(obj) self.log_output += record.msg + "\n" self.handled.release() def server_bind(self): super().server_bind() self.port = self.socket.getsockname()[1] def server_close(self): super().server_close() self._closed = True if hasattr(socket, "AF_UNIX"): class UnixStreamServer(TCPServer): address_family = socket.AF_UNIX class UnixDatagramServer(UDPServer): address_family = socket.AF_UNIX def test_sockethandler(): server = TCPServer(("localhost", 0), 0.01) server.start() server.ready.wait() handler = SocketHandler("localhost", server.port) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.error("test") server.handled.acquire() logger.debug("test") server.handled.acquire() assert server.log_output == "test\ntest\n" handler.close() server.stop() @pytest.mark.skipif(not hasattr(socket, "AF_UNIX"), reason="Unix sockets required") def test_unix_sockethandler(): address = tempfile.NamedTemporaryFile(prefix="picologging_", suffix=".sock").name server = UnixStreamServer(address, 0.01) server.start() server.ready.wait() handler = SocketHandler(address, None) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.error("test") server.handled.acquire() logger.debug("test") server.handled.acquire() assert server.log_output == "test\ntest\n" handler.close() server.stop() os.remove(address) @pytest.mark.skipif(not hasattr(socket, "AF_UNIX"), reason="Unix sockets required") def test_unix_sockethandler_connect_exception(monkeypatch): def mock_os_error(*args): raise OSError() monkeypatch.setattr(socket.socket, "connect", mock_os_error) address = tempfile.NamedTemporaryFile(prefix="picologging_", suffix=".sock").name handler = SocketHandler(address, None) handler.retryMax = 1 logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.debug("test") handler.retryTime -= 5 logger.debug("test") assert handler.retryTime is not None assert handler.sock is None handler.close() @pytest.mark.skipif(not hasattr(socket, "AF_UNIX"), reason="Unix sockets required") def test_unix_sockethandler_emit_exception(monkeypatch): def mock_exception(*args): raise Exception() monkeypatch.setattr(socket.socket, "sendall", mock_exception) address = tempfile.NamedTemporaryFile(prefix="picologging_", suffix=".sock").name server = UnixStreamServer(address, 0.01) server.start() server.ready.wait() handler = SocketHandler(address, None) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.debug("test") try: 1 / 0 except ZeroDivisionError: logger.exception("error") handler.closeOnError = True logger.debug("test") assert handler.sock is None handler.close() server.stop() os.remove(address) def test_datagramhandler(): server = UDPServer(("localhost", 0), 0.01) server.start() server.ready.wait() handler = DatagramHandler("localhost", server.port) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.error("test") server.handled.acquire() logger.debug("test") server.handled.acquire() assert server.log_output == "test\ntest\n" handler.close() server.stop() @pytest.mark.skipif(not hasattr(socket, "AF_UNIX"), reason="Unix sockets required") def test_unix_datagramhandler(): address = tempfile.NamedTemporaryFile(prefix="picologging_", suffix=".sock").name server = UnixDatagramServer(address, 0.01) server.start() server.ready.wait() handler = DatagramHandler(address, None) logger = picologging.getLogger("test") logger.setLevel(picologging.DEBUG) logger.addHandler(handler) logger.error("test") server.handled.acquire() logger.debug("test") server.handled.acquire() assert server.log_output == "test\ntest\n" handler.close() server.stop() os.remove(address) python-picologging-0.9.4/tests/unit/test_streamhandler.py000066400000000000000000000113111467100674700237400ustar00rootroot00000000000000import io import logging import sys import pytest from utils import filter_gc import picologging @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_stream_handler(): stream = io.StringIO() handler = picologging.StreamHandler(stream) record = picologging.LogRecord( "test", picologging.INFO, __file__, 1, "test", (), None, None, None ) formatter = picologging.Formatter("%(message)s") handler.setFormatter(formatter) assert handler.formatter == formatter handler.handle(record) assert stream.getvalue() == "test\n" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_stream_handler_defaults_to_stderr(): handler = picologging.StreamHandler() assert handler.stream == sys.stderr handler = picologging.StreamHandler(None) assert handler.stream == sys.stderr @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_stream_handler_bad_init_args(): with pytest.raises(TypeError): picologging.StreamHandler(1, 2, 3, 4) with pytest.raises(TypeError): picologging.StreamHandler(dog=1) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_stream_handler_invalid_stream_type(): handler = picologging.StreamHandler("potato") record = picologging.LogRecord( "test", picologging.INFO, __file__, 1, "test", (), None, None, None ) with pytest.raises(AttributeError): handler.handle(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_non_flushable_stream(): class TestStream: def write(self, data): pass handler = picologging.StreamHandler(TestStream()) assert not handler.flush() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_emit_no_args(): handler = picologging.StreamHandler() with pytest.raises(ValueError): handler.emit() @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_emit_invalid_args_type(): handler = picologging.StreamHandler() with pytest.raises(TypeError): handler.emit(1234) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_stream_write_raises_error(): class TestStream: def write(self, data): raise Exception("blerg") handler = picologging.StreamHandler(TestStream()) with pytest.raises(Exception): handler.emit("foo 123") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_set_stream(): class TestStream: def write(self, data): pass def flush(self): pass a = TestStream() handler = picologging.StreamHandler(a) assert handler.stream is a b = TestStream() handler.setStream(b) assert handler.stream is b @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_set_stream_return_value(): h = picologging.StreamHandler() stream = io.StringIO() old = h.setStream(stream) assert old is sys.stderr actual = h.setStream(old) assert actual is stream # test that setting to existing value returns None actual = h.setStream(old) assert actual is None @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_streamhandler_repr(): class StreamWithName: level = picologging.NOTSET name = "beyonce" handler = picologging.StreamHandler(StreamWithName()) assert repr(handler) == "" class StreamWithIntName: level = picologging.NOTSET name = 2 handler = picologging.StreamHandler(StreamWithIntName()) assert repr(handler) == "" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_streamhandler_handle_return_value(): handler = picologging.StreamHandler() record = picologging.LogRecord( "test", picologging.WARNING, __file__, 1, "test", (), None, None, None ) assert handler.handle(record) is True assert handler.emit(record) is None class TestFilter(picologging.Filter): def filter(self, record): return False handler.addFilter(TestFilter()) assert handler.handle(record) is None assert handler.emit(record) is None @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_emit_subclass(): class DerivedLogRecord(picologging.LogRecord): pass record = DerivedLogRecord( "hello", logging.WARNING, __file__, 123, "bork boom", (), None ) assert DerivedLogRecord.__base__ is picologging.LogRecord assert record.message is None assert record.getMessage() == "bork boom" assert record.message == "bork boom" assert record.message == "bork boom" stream = io.StringIO() handler = picologging.StreamHandler(stream) handler.emit(record) assert stream.getvalue() == "bork boom\n" python-picologging-0.9.4/tests/unit/test_strformatstyle.py000066400000000000000000000070431467100674700242200ustar00rootroot00000000000000import logging import threading import pytest from utils import filter_gc from picologging import INFO, Formatter, LogRecord, StrFormatStyle @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_strformatstyle(): perc = StrFormatStyle("{msg} {levelno} {name}") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == "hello 20 test" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_strformatstyle_from_formatter(): perc = Formatter("{msg} {levelno} {name}", style="{") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == "hello 20 test" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_strformatstyle_format_bad_argument(): perc = StrFormatStyle("{msg} {levelno} {name}") with pytest.raises(AttributeError): perc.format(None) with pytest.raises(AttributeError): perc.format("") with pytest.raises(AttributeError): perc.format({}) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_custom_attribute(): perc = StrFormatStyle("{custom}") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) record.custom = "custom" assert perc.format(record) == "custom" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_strformatstyle_bad_init_args(): with pytest.raises(TypeError): StrFormatStyle(dog="good boy") @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_funcname_format_string(): perc = StrFormatStyle("{funcname}") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, "superfunc", None) record.funcName = "superFunc" assert perc.format(record) == "superFunc" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_thread_id(): perc = StrFormatStyle("{thread}") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert record.thread == threading.get_ident() assert perc.format(record) == str(record.thread) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_record_created(): perc = StrFormatStyle("{created}") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == str(record.created) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_custom_field_not_an_attribute(): perc = StrFormatStyle("{custom}") record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) with pytest.raises(AttributeError): assert perc.format(record) @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_strformatstyle_repr(): perc = StrFormatStyle("{msg} {levelno} {name}") assert repr(perc) == "" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_record_with_defaults(): perc = StrFormatStyle( "{msg} {levelno} {name} {fruit}", defaults={"fruit": "banana"} ) assert repr(perc) == "" record = LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == "hello 20 test banana" @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_format_logging_record(): perc = StrFormatStyle( "{msg} {levelno} {name} {fruit}", defaults={"fruit": "banana"} ) record = logging.LogRecord("test", INFO, __file__, 1, "hello", (), None, None, None) assert perc.format(record) == "hello 20 test banana" python-picologging-0.9.4/tests/unit/test_threading.py000066400000000000000000000011071467100674700230560ustar00rootroot00000000000000import io import threading import pytest from utils import filter_gc import picologging from picologging import Logger, StreamHandler @pytest.mark.limit_leaks("192B", filter_fn=filter_gc) def test_threaded_execution(): logger = Logger("test", picologging.DEBUG) tmp = io.StringIO() handler = StreamHandler(tmp) logger.addHandler(handler) def _log_message(): logger.debug("from thread") t = threading.Thread(target=_log_message) t.start() t.join() result = tmp.getvalue() assert result == "from thread\n" python-picologging-0.9.4/tests/unit/utils.py000066400000000000000000000002611467100674700212120ustar00rootroot00000000000000def filter_gc(stack): for frame in stack.frames[:4]: if "picologging" in frame.filename and "test_" not in frame.filename: return True return False