pax_global_header00006660000000000000000000000064147111575260014523gustar00rootroot0000000000000052 comment=76c091dc8841de1d1a1cd6511bb509fe4f058de6 watchdog-6.0.0/000077500000000000000000000000001471115752600133265ustar00rootroot00000000000000watchdog-6.0.0/.cirrus.yml000066400000000000000000000012711471115752600154370ustar00rootroot00000000000000task: matrix: freebsd_instance: image_family: freebsd-13-0 freebsd_instance: image_family: freebsd-12-2 install_script: - pkg install -y python39 py39-sqlite3 # Print the Python version, only to be sure we are running the version we want - python3.9 -c 'import platform; print("Python", platform.python_version())' # Check SQLite3 is installed - python3.9 -c 'import sqlite3; print("SQLite3", sqlite3.version)' setup_script: - python3.9 -m ensurepip - python3.9 -m pip install -U pip - python3.9 -m pip install -r requirements-tests.txt lint_script: - python3.9 -m ruff src tests_script: - python3.9 -bb -m pytest tests watchdog-6.0.0/.gitattributes000066400000000000000000000004061471115752600162210ustar00rootroot00000000000000# Language aware diff headers # https://tekin.co.uk/2020/10/better-git-diff-output-for-ruby-python-elixir-and-more # https://gist.github.com/tekin/12500956bd56784728e490d8cef9cb81 # https://github.com/git/git/blob/master/userdiff.c *.c diff=cpp *.py diff=python watchdog-6.0.0/.github/000077500000000000000000000000001471115752600146665ustar00rootroot00000000000000watchdog-6.0.0/.github/FUNDING.yml000066400000000000000000000000431471115752600165000ustar00rootroot00000000000000github: [BoboTiG] polar: tiger-222 watchdog-6.0.0/.github/dependabot.yml000066400000000000000000000002021471115752600175100ustar00rootroot00000000000000version: 2 updates: # GitHub Actions - package-ecosystem: github-actions directory: / schedule: interval: daily watchdog-6.0.0/.github/workflows/000077500000000000000000000000001471115752600167235ustar00rootroot00000000000000watchdog-6.0.0/.github/workflows/build-and-publish.yml000066400000000000000000000105331471115752600227530ustar00rootroot00000000000000# Because this library provides extension modules for macOS, but not for other # platforms, we want to provide built distributions for each macOS platform, but we # explicitly DON'T want to provide a cross-platform pure-Python wheel to fall back on. # # This is because in the event that a new Python version is released or a new # macOS platform is released, macOS users won't be able to install the built # distributions we've provided and should fall back to the source distribution, # but pip's behavior is to prefer a pure-Python wheel first, which will be # missing the extension modules. # # However, to provide built distributions for Linux and Windows (which don't # have extension modules) we can just build a pure-Python wheel on that # platform and override the platform name manually via wheel's --plat-name # feature, to provide a platform-specific wheel for all platforms. name: Build & Publish on: push: branches: - master pull_request: branches: - "**" workflow_dispatch: inputs: branch: description: "The branch, tag or SHA to release from" required: true default: "master" concurrency: group: ${{ github.ref }}-${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name != 'pull_request' && github.sha || '' }} cancel-in-progress: true jobs: macos-built-distributions: name: Build macOS wheels runs-on: macos-latest timeout-minutes: 20 steps: - name: Checkout uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.branch }} - name: Install Python uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install build dependencies run: python -m pip install cibuildwheel - name: Build wheels run: python -m cibuildwheel env: CIBW_ARCHS_MACOS: "x86_64 universal2 arm64" - name: Artifacts list run: ls -l wheelhouse - uses: actions/upload-artifact@v4 with: name: python-package-distributions-macos path: ./wheelhouse/*.whl pure-built-distributions: name: Build pure wheels runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Checkout uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.branch }} - name: Install Python uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install build dependencies run: python -m pip install -U setuptools wheel - name: Build wheels run: | for platform in 'manylinux2014_x86_64' 'manylinux2014_i686' 'manylinux2014_aarch64' 'manylinux2014_armv7l' 'manylinux2014_ppc64' 'manylinux2014_ppc64le' 'manylinux2014_s390x' 'win32' 'win_amd64' 'win_ia64' do python setup.py bdist_wheel --plat-name $platform done - name: Artifacts list run: ls -l dist - uses: actions/upload-artifact@v4 with: name: python-package-distributions-pure-wheels path: ./dist/*.whl source-distribution: name: Build source distribution runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Checkout uses: actions/checkout@v4 with: ref: ${{ github.event.inputs.branch }} - name: Install Python uses: actions/setup-python@v5 with: python-version: "3.11" - name: Build source distribution run: python setup.py sdist - name: Artifacts list run: ls -l dist - name: Store the source distribution uses: actions/upload-artifact@v4 with: name: python-package-distributions-source path: dist/*.tar.gz publish: needs: - macos-built-distributions - pure-built-distributions - source-distribution runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Download all the dists uses: actions/download-artifact@v4 with: pattern: python-package-distributions-* merge-multiple: true path: dist/ - name: What will we publish? run: ls -l dist - name: Publish if: github.event.inputs.branch != '' uses: pypa/gh-action-pypi-publish@release/v1 with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} skip-existing: true watchdog-6.0.0/.github/workflows/tests.yml000066400000000000000000000037231471115752600206150ustar00rootroot00000000000000name: Tests on: push: branches: - master pull_request: concurrency: group: ${{ github.ref }}-${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name != 'pull_request' && github.sha || '' }} cancel-in-progress: true jobs: quality: name: 🧑‍🏭 Quality & Docs runs-on: ubuntu-latest steps: - name: Checkout uses: actions/checkout@v4 - name: Set up Python uses: actions/setup-python@v5 with: python-version: "3.13" cache: pip - name: Install dependencies run: python -m pip install tox - name: Run linters run: python -m tox -q -e types,lint - name: Build the documentation run: python -m tox -q -e docs tox: name: ${{ matrix.tox.name }} ${{ matrix.os.emoji }} ${{ matrix.os.name }} ${{ matrix.python }} runs-on: ${{ matrix.os.runs-on }} timeout-minutes: 15 strategy: fail-fast: false matrix: os: - name: Linux matrix: linux emoji: 🐧 runs-on: [ubuntu-latest] - name: macOS matrix: macos emoji: 🍎 runs-on: [macos-latest] - name: Windows matrix: windows emoji: 🪟 runs-on: [windows-latest] python: - "3.9" - "3.10" - "3.11" - "3.12" - "3.13" - "pypy-3.9" exclude: - os: matrix: macos python: "pypy-3.9" - os: matrix: windows python: "pypy-3.9" steps: - name: Checkout uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python }} uses: actions/setup-python@v5 with: python-version: ${{ matrix.python }} cache: pip - name: Install dependencies run: python -m pip install tox - name: Run tests run: python -m tox -q -e py watchdog-6.0.0/.gitignore000066400000000000000000000007731471115752600153250ustar00rootroot00000000000000# Ignore temporary files. *.bak *.bkp *.log *.py[co] *.swp *~ .DS_Store .\#* ._* *.o *.so Desktop.ini Thumbs.db \#*\# __MACOSX__ # Ignore generated files and directories. *.egg-info/ *.egg .installed.cfg build/ develop-eggs/ dist/ eggs/ parts/ __pycache__/ MANIFEST # Project files for VS Code, idea, eclipse, and netbeans nbproject/ .idea/ .settings/ .vscode/ # Generated by tests. .coverage .coverage.* htmlcov/ .tox/ .cache/ .pytest_cache/ # From virtualenv. include/ lib/ /bootstrap.py venv/ .venv/ watchdog-6.0.0/.well-known/000077500000000000000000000000001471115752600155015ustar00rootroot00000000000000watchdog-6.0.0/.well-known/funding-manifest-urls000066400000000000000000000000461471115752600216450ustar00rootroot00000000000000https://www.tiger-222.fr/funding.json watchdog-6.0.0/AUTHORS000066400000000000000000000055601471115752600144040ustar00rootroot00000000000000Original Project Lead: ---------------------- Yesudeep Mangalapilly Current Project Lead: --------------------- Mickaël Schoentgen Contributors in alphabetical order: ----------------------------------- Adrian Tejn Kern Andrew Schaaf Danilo de Jesus da Silva Bellini David LaPalomento dvogel Filip Noetzel Gary van der Merwe gfxmonk Gora Khargosh Hannu Valtonen Jesse Printz Kurt McKee Léa Klein Luke McCarthy Lukáš Lalinský Malthe Borch Martin Kreichgauer Martin Kreichgauer Mike Lundy Nicholas Hairs Raymond Hettinger Roman Ovchinnikov Rotem Yaari Ryan Kelly Senko Rasic Senko Rašić Shane Hathaway Simon Pantzare Simon Pantzare Steven Samuel Cole Stéphane Klein Thomas Guest Thomas Heller Tim Cuthbertson Todd Whiteman Will McGugan Yesudeep Mangalapilly Yesudeep Mangalapilly We would like to thank these individuals for ideas: --------------------------------------------------- Tim Golden Sebastien Martini Initially we used the flask theme for the documentation which was written by ---------------------------------------------------------------------------- Armin Ronacher Watchdog also includes open source libraries or adapted code from the following projects: - MacFSEvents - https://github.com/malthe/macfsevents - watch_directory.py - http://timgolden.me.uk/python/downloads/watch_directory.py - pyinotify - https://github.com/seb-m/pyinotify - fsmonitor - https://github.com/shaurz/fsmonitor - echo - http://wordaligned.org/articles/echo - Lukáš Lalinský's ordered set queue implementation: https://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue - Armin Ronacher's flask-sphinx-themes for the documentation: https://github.com/mitsuhiko/flask-sphinx-themes - pyfilesystem - https://github.com/PyFilesystem/pyfilesystem - get_FILE_NOTIFY_INFORMATION - http://blog.gmane.org/gmane.comp.python.ctypes/month=20070901 watchdog-6.0.0/COPYING000066400000000000000000000012771471115752600143700ustar00rootroot00000000000000Copyright 2018-2024 Mickaël Schoentgen & contributors Copyright 2014-2018 Thomas Amland & contributors Copyright 2012-2014 Google, Inc. Copyright 2011-2012 Yesudeep Mangalapilly Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. watchdog-6.0.0/LICENSE000066400000000000000000000261361471115752600143430ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. watchdog-6.0.0/MANIFEST.in000066400000000000000000000007471471115752600150740ustar00rootroot00000000000000include README.rst include changelog.rst include LICENSE include COPYING include AUTHORS recursive-include src *.py *.h *.c include src/watchdog/py.typed include tox.ini include docs/*.txt include docs/*.xml include docs/Makefile include docs/make.bat include requirements-tests.txt recursive-include docs/source * recursive-include tests *.py #global-exclude .DS_Store #global-exclude Thumbs.db #global-exclude Desktop.ini #global-exclude *.swp #global-exclude *~ #global-exclude *.bak watchdog-6.0.0/README.rst000077500000000000000000000211411471115752600150170ustar00rootroot00000000000000Watchdog ======== |PyPI Version| |PyPI Status| |PyPI Python Versions| |GitHub Build Status| |GitHub License| Python API and shell utilities to monitor file system events. Works on 3.9+. Example API Usage ----------------- A simple program that uses watchdog to monitor directories specified as command-line arguments and logs events generated: .. code-block:: python import time from watchdog.events import FileSystemEvent, FileSystemEventHandler from watchdog.observers import Observer class MyEventHandler(FileSystemEventHandler): def on_any_event(self, event: FileSystemEvent) -> None: print(event) event_handler = MyEventHandler() observer = Observer() observer.schedule(event_handler, ".", recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() Shell Utilities --------------- Watchdog comes with an *optional* utility script called ``watchmedo``. Please type ``watchmedo --help`` at the shell prompt to know more about this tool. Here is how you can log the current directory recursively for events related only to ``*.py`` and ``*.txt`` files while ignoring all directory events: .. code-block:: bash watchmedo log \ --patterns='*.py;*.txt' \ --ignore-directories \ --recursive \ --verbose \ . You can use the ``shell-command`` subcommand to execute shell commands in response to events: .. code-block:: bash watchmedo shell-command \ --patterns='*.py;*.txt' \ --recursive \ --command='echo "${watch_src_path}"' \ . Please see the help information for these commands by typing: .. code-block:: bash watchmedo [command] --help About ``watchmedo`` Tricks ~~~~~~~~~~~~~~~~~~~~~~~~~~ ``watchmedo`` can read ``tricks.yaml`` files and execute tricks within them in response to file system events. Tricks are actually event handlers that subclass ``watchdog.tricks.Trick`` and are written by plugin authors. Trick classes are augmented with a few additional features that regular event handlers don't need. An example ``tricks.yaml`` file: .. code-block:: yaml tricks: - watchdog.tricks.LoggerTrick: patterns: ["*.py", "*.js"] - watchmedo_webtricks.GoogleClosureTrick: patterns: ['*.js'] hash_names: true mappings_format: json # json|yaml|python mappings_module: app/javascript_mappings suffix: .min.js compilation_level: advanced # simple|advanced source_directory: app/static/js/ destination_directory: app/public/js/ files: index-page: - app/static/js/vendor/jquery*.js - app/static/js/base.js - app/static/js/index-page.js about-page: - app/static/js/vendor/jquery*.js - app/static/js/base.js - app/static/js/about-page/**/*.js The directory containing the ``tricks.yaml`` file will be monitored. Each trick class is initialized with its corresponding keys in the ``tricks.yaml`` file as arguments and events are fed to an instance of this class as they arrive. Installation ------------ Install from PyPI using ``pip``: .. code-block:: bash $ python -m pip install -U watchdog # or to install the watchmedo utility: $ python -m pip install -U 'watchdog[watchmedo]' Install from source: .. code-block:: bash $ python -m pip install -e . # or to install the watchmedo utility: $ python -m pip install -e '.[watchmedo]' Documentation ------------- You can browse the latest release documentation_ online. Contribute ---------- Fork the `repository`_ on GitHub and send a pull request, or file an issue ticket at the `issue tracker`_. For general help and questions use `stackoverflow`_ with tag `python-watchdog`. Create and activate your virtual environment, then:: python -m pip install tox python -m tox [-q] [-e ENV] If you are making a substantial change, add an entry to the "Unreleased" section of the `changelog`_. Supported Platforms ------------------- * Linux 2.6 (inotify) * macOS (FSEvents, kqueue) * FreeBSD/BSD (kqueue) * Windows (ReadDirectoryChangesW with I/O completion ports; ReadDirectoryChangesW worker threads) * OS-independent (polling the disk for directory snapshots and comparing them periodically; slow and not recommended) Note that when using watchdog with kqueue, you need the number of file descriptors allowed to be opened by programs running on your system to be increased to more than the number of files that you will be monitoring. The easiest way to do that is to edit your ``~/.profile`` file and add a line similar to:: ulimit -n 1024 This is an inherent problem with kqueue because it uses file descriptors to monitor files. That plus the enormous amount of bookkeeping that watchdog needs to do in order to monitor file descriptors just makes this a painful way to monitor files and directories. In essence, kqueue is not a very scalable way to monitor a deeply nested directory of files and directories with a large number of files. About using watchdog with editors like Vim ------------------------------------------ Vim does not modify files unless directed to do so. It creates backup files and then swaps them in to replace the files you are editing on the disk. This means that if you use Vim to edit your files, the on-modified events for those files will not be triggered by watchdog. You may need to configure Vim appropriately to disable this feature. About using watchdog with CIFS ------------------------------ When you want to watch changes in CIFS, you need to explicitly tell watchdog to use ``PollingObserver``, that is, instead of letting watchdog decide an appropriate observer like in the example above, do:: from watchdog.observers.polling import PollingObserver as Observer Dependencies ------------ 1. Python 3.9 or above. 2. XCode_ (only on macOS when installing from sources) 3. PyYAML_ (only for ``watchmedo``) Licensing --------- Watchdog is licensed under the terms of the `Apache License, version 2.0`_. - Copyright 2018-2024 Mickaël Schoentgen & contributors - Copyright 2014-2018 Thomas Amland & contributors - Copyright 2012-2014 Google, Inc. - Copyright 2011-2012 Yesudeep Mangalapilly Project `source code`_ is available at Github. Please report bugs and file enhancement requests at the `issue tracker`_. Why Watchdog? ------------- Too many people tried to do the same thing and none did what I needed Python to do: * pnotify_ * `unison fsmonitor`_ * fsmonitor_ * guard_ * pyinotify_ * `inotify-tools`_ * jnotify_ * treewatcher_ * `file.monitor`_ * pyfilesystem_ .. links: .. _Yesudeep Mangalapilly: yesudeep@gmail.com .. _source code: https://github.com/gorakhargosh/watchdog .. _issue tracker: https://github.com/gorakhargosh/watchdog/issues .. _Apache License, version 2.0: https://www.apache.org/licenses/LICENSE-2.0 .. _documentation: https://python-watchdog.readthedocs.io/ .. _stackoverflow: https://stackoverflow.com/questions/tagged/python-watchdog .. _repository: https://github.com/gorakhargosh/watchdog .. _issue tracker: https://github.com/gorakhargosh/watchdog/issues .. _changelog: https://github.com/gorakhargosh/watchdog/blob/master/changelog.rst .. _PyYAML: https://www.pyyaml.org/ .. _XCode: https://developer.apple.com/technologies/tools/xcode.html .. _pnotify: http://mark.heily.com/pnotify .. _unison fsmonitor: https://webdav.seas.upenn.edu/viewvc/unison/trunk/src/fsmonitor.py?view=markup&pathrev=471 .. _fsmonitor: https://github.com/shaurz/fsmonitor .. _guard: https://github.com/guard/guard .. _pyinotify: https://github.com/seb-m/pyinotify .. _inotify-tools: https://github.com/rvoicilas/inotify-tools .. _jnotify: http://jnotify.sourceforge.net/ .. _treewatcher: https://github.com/jbd/treewatcher .. _file.monitor: https://github.com/pke/file.monitor .. _pyfilesystem: https://github.com/PyFilesystem/pyfilesystem .. |PyPI Version| image:: https://img.shields.io/pypi/v/watchdog.svg :target: https://pypi.python.org/pypi/watchdog/ .. |PyPI Status| image:: https://img.shields.io/pypi/status/watchdog.svg :target: https://pypi.python.org/pypi/watchdog/ .. |PyPI Python Versions| image:: https://img.shields.io/pypi/pyversions/watchdog.svg :target: https://pypi.python.org/pypi/watchdog/ .. |Github Build Status| image:: https://github.com/gorakhargosh/watchdog/workflows/Tests/badge.svg :target: https://github.com/gorakhargosh/watchdog/actions?query=workflow%3ATests .. |GitHub License| image:: https://img.shields.io/github/license/gorakhargosh/watchdog.svg :target: https://github.com/gorakhargosh/watchdog/blob/master/LICENSE watchdog-6.0.0/changelog.rst000066400000000000000000001003551471115752600160130ustar00rootroot00000000000000.. :changelog: Changelog --------- 6.0.0 ~~~~~ 2024-11-01 • `full history `__ - Pin test dependecies. - [docs] Add typing info to quick start. (`#1082 `__) - [inotify] Use of ``select.poll()`` instead of deprecated ``select.select()``, if available. (`#1078 `__) - [inotify] Fix reading inotify file descriptor after closing it. (`#1081 `__) - [utils] The ``stop_signal`` keyword-argument type of the ``AutoRestartTrick`` class can now be either a ``signal.Signals`` or an ``int``. - [utils] Added the ``__repr__()`` method to the ``Trick`` class. - [utils] Removed the unused ``echo_class()`` function from the ``echo`` module. - [utils] Removed the unused ``echo_instancemethod()`` function from the ``echo`` module. - [utils] Removed the unused ``echo_module()`` function from the ``echo`` module. - [utils] Removed the unused ``is_class_private_name()`` function from the ``echo`` module. - [utils] Removed the unused ``is_classmethod()`` function from the ``echo`` module. - [utils] Removed the unused ``ic_method(met()`` function from the ``echo`` module. - [utils] Removed the unused ``method_name()`` function from the ``echo`` module. - [utils] Removed the unused ``name()`` function from the ``echo`` module. - [watchmedo] Fixed Mypy issues. - [watchmedo] Added the ``__repr__()`` method to the ``HelpFormatter`` class. - [watchmedo] Removed the ``--trace`` CLI argument from the ``watchmedo log`` command, useless since events are logged by default at the ``LoggerTrick`` class level. - [windows] Fixed Mypy issues. - Thanks to our beloved contributors: @BoboTiG, @g-pichlern, @ethan-vanderheijden, @nhairs 5.0.3 ~~~~~ 2024-09-27 • `full history `__ - [inotify] Improve cleaning up ``Inotify`` threads, and add ``eventlet`` test cases (`#1070 `__) - Thanks to our beloved contributors: @BoboTiG, @ethan-vanderheijden 5.0.2 ~~~~~ 2024-09-03 • `full history `__ - Enable OS specific Mypy checks (`#1064 `__) - [watchmedo] Fix ``tricks`` argument type of ``schedule_tricks()`` (`#1063 `__) - Thanks to our beloved contributors: @gnought, @BoboTiG 5.0.1 ~~~~~ 2024-09-02 • `full history `__ - [kqueue] Fix ``TypeError: kqueue.control() only accepts positional parameters`` (`#1062 `__) - Thanks to our beloved contributors: @apoirier, @BoboTiG 5.0.0 ~~~~~ 2024-08-26 • `full history `__ **Breaking Changes** - Drop support for Python 3.8 (`#1055 `__) - [core] Enforced usage of proper keyword-arguments (`#1057 `__) - [core] Renamed the ``BaseObserverSubclassCallable`` class to ``ObserverType`` (`#1055 `__) - [inotify] Renamed the ``inotify_event_struct`` class to ``InotifyEventStruct`` (`#1055 `__) - [inotify] Renamed the ``UnsupportedLibc`` exception to ``UnsupportedLibcError`` (`#1057 `__) - [inotify] Removed the ``InotifyConstants.IN_CLOSE`` constant (`#1046 `__) - [watchmedo] Renamed the ``LogLevelException`` exception to ``LogLevelError`` (`#1057 `__) - [watchmedo] Renamed the ``WatchdogShutdown`` exception to ``WatchdogShutdownError`` (`#1057 `__) - [windows] Renamed the ``FILE_NOTIFY_INFORMATION`` class to ``FileNotifyInformation`` (`#1055 `__) - [windows] Removed the unused ``WATCHDOG_TRAVERSE_MOVED_DIR_DELAY`` constant (`#1057 `__) **Other Changes** - [core] Enable ``disallow_untyped_calls`` Mypy rule (`#1055 `__) - [core] Enable ``disallow_untyped_defs`` Mypy rule (`#1060 `__) - [core] Improve typing references for events (`#1040 `__) - [inotify] Add support for ``IN_CLOSE_NOWRITE`` events. A ``FileClosedNoWriteEvent`` event will be fired, and its ``on_closed_no_write()`` dispatcher has been introduced (`#1046 `__) - Thanks to our beloved contributors: @BoboTiG 4.0.2 ~~~~~ 2024-08-11 • `full history `__ - Add support for Python 3.13 (`#1052 `__) - [core] Run ``ruff``, apply several fixes (`#1033 `__) - [core] Remove execution rights from ``events.py`` - [documentation] Update ``PatternMatchingEventHandler`` docstrings (`#1048 `__) - [documentation] Simplify the quickstart example (`#1047 `__) - [fsevents] Add missing ``event_filter`` keyword-argument to ``FSEventsObserver.schedule()`` (`#1049 `__) - [utils] Fix a possible race condition in ``AutoRestartTrick`` (`#1002 `__) - [watchmedo] Remove execution rights from ``watchmedo.py`` - Thanks to our beloved contributors: @BoboTiG, @nbelakovski, @ivg 4.0.1 ~~~~~ 2024-05-23 • `full history `__ - [inotify] Fix missing ``event_filter`` for the full emitter (`#1032 `__) - Thanks to our beloved contributors: @mraspaud, @BoboTiG 4.0.0 ~~~~~ 2024-02-06 • `full history `__ - Drop support for Python 3.7. - Add support for Python 3.12. - [snapshot] Add typing to ``dirsnapshot`` (`#1012 `__) - [snapshot] Added ``DirectorySnapshotDiff.ContextManager`` (`#1011 `__) - [events] ``FileSystemEvent``, and subclasses, are now ``dataclass``es, and their ``repr()`` has changed - [windows] ``WinAPINativeEvent`` is now a ``dataclass``, and its ``repr()`` has changed - [events] Log ``FileOpenedEvent``, and ``FileClosedEvent``, events in ``LoggingEventHandler`` - [tests] Improve ``FileSystemEvent`` coverage - [watchmedo] Log all events in ``LoggerTrick`` - [windows] The ``observers.read_directory_changes.WATCHDOG_TRAVERSE_MOVED_DIR_DELAY`` hack was removed. The constant will be kept to prevent breaking other softwares. - Thanks to our beloved contributors: @BoboTiG, @msabramo 3.0.0 ~~~~~ 2023-03-20 • `full history `__ - Drop support for Python 3.6. - ``watchdog`` is now PEP 561 compatible, and tested with ``mypy`` - Fix missing ``>`` in ``FileSystemEvent.__repr__()`` (`#980 `__) - [ci] Lots of improvements - [inotify] Return from ``InotifyEmitter.queue_events()`` if not launched when thread is inactive (`#963 `__) - [tests] Stability improvements - [utils] Remove handling of ``threading.Event.isSet`` spelling (`#962 `__) - [watchmedo] Fixed tricks YAML generation (`#965 `__) - Thanks to our beloved contributors: @kurtmckee, @altendky, @agroszer, @BoboTiG 2.3.1 ~~~~~ 2023-02-28 • `full history `__ - Run ``black`` on the entire source code - Bundle the ``requirements-tests.txt`` file in the source distribution (`#939 `__) - [watchmedo] Exclude ``FileOpenedEvent`` events from ``AutoRestartTrick``, and ``ShellCommandTrick``, to restore watchdog < 2.3.0 behavior. A better solution should be found in the future. (`#949 `__) - [watchmedo] Log ``FileOpenedEvent``, and ``FileClosedEvent``, events in ``LoggerTrick`` - Thanks to our beloved contributors: @BoboTiG 2.3.0 ~~~~~ 2023-02-23 • `full history `__ - [inotify] Add support for ``IN_OPEN`` events: a ``FileOpenedEvent`` event will be fired (`#941 `__) - [watchmedo] Add optional event debouncing for ``auto-restart``, only restarting once if many events happen in quick succession (``--debounce-interval``) (`#940 `__) - [watchmedo] Exit gracefully on ``KeyboardInterrupt`` exception (Ctrl+C) (`#945 `__) - [watchmedo] Add option to not auto-restart the command after it exits (``--no-restart-on-command-exit``) (`#946 `__) - Thanks to our beloved contributors: @BoboTiG, @dstaple, @taleinat, @cernekj 2.2.1 ~~~~~ 2023-01-01 • `full history `__ - Enable ``mypy`` to discover type hints as specified in PEP 561 (`#933 `__) - [ci] Set the expected Python version when building release files - [ci] Update actions versions in use - [watchmedo] [regression] Fix usage of missing ``signal.SIGHUP`` attribute on non-Unix OSes (`#935 `__) - Thanks to our beloved contributors: @BoboTiG, @simon04, @piotrpdev 2.2.0 ~~~~~ 2022-12-05 • `full history `__ - [build] Wheels are now available for Python 3.11 (`#932 `__) - [documentation] HTML documentation builds are now tested for errors (`#902 `__) - [documentation] Fix typos here, and there (`#910 `__) - [fsevents2] The ``fsevents2`` observer is now deprecated (`#909 `__) - [tests] The error message returned by musl libc for error code ``-1`` is now allowed (`#923 `__) - [utils] Remove unnecessary code in ``dirsnapshot.py`` (`#930 `__) - [watchmedo] Handle shutdown events from ``SIGHUP`` (`#912 `__) - Thanks to our beloved contributors: @kurtmckee, @babymastodon, @QuantumEnergyE, @timgates42, @BoboTiG 2.1.9 ~~~~~ 2022-06-10 • `full history `__ - [fsevents] Fix flakey test to assert that there are no errors when stopping the emitter. - [inotify] Suppress occasional ``OSError: [Errno 9] Bad file descriptor`` at shutdown. (`#805 `__) - [watchmedo] Make ``auto-restart`` restart the sub-process if it terminates. (`#896 `__) - [watchmedo] Avoid zombie sub-processes when running ``shell-command`` without ``--wait``. (`#405 `__) - Thanks to our beloved contributors: @samschott, @taleinat, @altendky, @BoboTiG 2.1.8 ~~~~~ 2022-05-15 • `full history `__ - Fix adding failed emitters on observer schedule. (`#872 `__) - [inotify] Fix hang when unscheduling watch on a path in an unmounted filesystem. (`#869 `__) - [watchmedo] Fix broken parsing of ``--kill-after`` argument for the ``auto-restart`` command. (`#870 `__) - [watchmedo] Fix broken parsing of boolean arguments. (`#887 `__) - [watchmedo] Fix broken parsing of commands from ``auto-restart``, and ``shell-command``. (`#888 `__) - [watchmedo] Support setting verbosity level via ``-q/--quiet`` and ``-v/--verbose`` arguments. (`#889 `__) - Thanks to our beloved contributors: @taleinat, @kianmeng, @palfrey, @IlayRosenberg, @BoboTiG 2.1.7 ~~~~~ 2022-03-25 • `full history `__ - Eliminate timeout in waiting on event queue. (`#861 `__) - [inotify] Fix ``not`` equality implementation for ``InotifyEvent``. (`#848 `__) - [watchmedo] Fix calling commands from within a Python script. (`#879 `__) - [watchmedo] ``PyYAML`` is loaded only when strictly necessary. Simple usages of ``watchmedo`` are possible without the module being installed. (`#847 `__) - Thanks to our beloved contributors: @sattlerc, @JanzenLiu, @BoboTiG 2.1.6 ~~~~~ 2021-10-01 • `full history `__ - [bsd] Fixed returned paths in ``kqueue.py`` and restored the overall results of the test suite. (`#842 `__) - [bsd] Updated FreeBSD CI support .(`#841 `__) - [watchmedo] Removed the ``argh`` dependency in favor of the builtin ``argparse`` module. (`#836 `__) - [watchmedo] Removed unexistant ``WindowsApiAsyncObserver`` references and ``--debug-force-winapi-async`` arguments. - [watchmedo] Improved the help output. - Thanks to our beloved contributors: @knobix, @AndreaRe9, @BoboTiG 2.1.5 ~~~~~ 2021-08-23 • `full history `__ - Fix regression introduced in 2.1.4 (reverted "Allow overriding or adding custom event handlers to event dispatch map. (`#814 `__)"). (`#830 `__) - Convert regexes of type ``str`` to ``list``. (`831 `__) - Thanks to our beloved contributors: @unique1o1, @BoboTiG 2.1.4 ~~~~~ 2021-08-19 • `full history `__ - [watchmedo] Fix usage of ``os.setsid()`` and ``os.killpg()`` Unix-only functions. (`#809 `__) - [mac] Fix missing ``FileModifiedEvent`` on permission or ownership changes of a file. (`#815 `__) - [mac] Convert absolute watch path in ``FSEeventsEmitter`` with ``os.path.realpath()``. (`#822 `__) - Fix a possible ``AttributeError`` in ``SkipRepeatsQueue._put()``. (`#818 `__) - Allow overriding or adding custom event handlers to event dispatch map. (`#814 `__) - Fix tests on big endian platforms. (`#828 `__) - Thanks to our beloved contributors: @replabrobin, @BoboTiG, @SamSchott, @AndreiB97, @NiklasRosenstein, @ikokollari, @mgorny 2.1.3 ~~~~~ 2021-06-26 • `full history `__ - Publish macOS ``arm64`` and ``universal2`` wheels. (`#740 `__) - Thanks to our beloved contributors: @kainjow, @BoboTiG 2.1.2 ~~~~~ 2021-05-19 • `full history `__ - [mac] Fix relative path handling for non-recursive watch. (`#797 `__) - [windows] On PyPy, events happening right after ``start()`` were missed. Add a workaround for that. (`#796 `__) - Thanks to our beloved contributors: @oprypin, @CCP-Aporia, @BoboTiG 2.1.1 ~~~~~ 2021-05-10 • `full history `__ - [mac] Fix callback exceptions when the watcher is deleted but still receiving events (`#786 `__) - Thanks to our beloved contributors: @rom1win, @BoboTiG, @CCP-Aporia 2.1.0 ~~~~~ 2021-05-04 • `full history `__ - [inotify] Simplify ``libc`` loading (`#776 `__) - [mac] Add support for non-recursive watches in ``FSEventsEmitter`` (`#779 `__) - [watchmedo] Add support for ``--debug-force-*`` arguments to ``tricks`` (`#781 `__) - Thanks to our beloved contributors: @CCP-Aporia, @aodj, @UnitedMarsupials, @BoboTiG 2.0.3 ~~~~~ 2021-04-22 • `full history `__ - [mac] Use ``logger.debug()`` instead of ``logger.info()`` (`#774 `__) - Updated documentation links (`#777 `__) - Thanks to our beloved contributors: @globau, @imba-tjd, @BoboTiG 2.0.2 ~~~~~ 2021-02-22 • `full history `__ - [mac] Add missing exception objects (`#766 `__) - Thanks to our beloved contributors: @CCP-Aporia, @BoboTiG 2.0.1 ~~~~~ 2021-02-17 • `full history `__ - [mac] Fix a segmentation fault when dealing with unicode paths (`#763 `__) - Moved the CI from Travis-CI to GitHub Actions (`#764 `__) - Thanks to our beloved contributors: @SamSchott, @BoboTiG 2.0.0 ~~~~~ 2021-02-11 • `full history `__ - Avoid deprecated ``PyEval_InitThreads`` on Python 3.7+ (`#746 `__) - [inotify] Add support for ``IN_CLOSE_WRITE`` events. A ``FileCloseEvent`` event will be fired. Note that ``IN_CLOSE_NOWRITE`` events are not handled to prevent much noise. (`#184 `__, `#245 `__, `#280 `__, `#313 `__, `#690 `__) - [inotify] Allow to stop the emitter multiple times (`#760 `__) - [mac] Support coalesced filesystem events (`#734 `__) - [mac] Drop support for macOS 10.12 and earlier (`#750 `__) - [mac] Fix an issue when renaming an item changes only the casing (`#750 `__) - Thanks to our beloved contributors: @bstaletic, @lukassup, @ysard, @SamSchott, @CCP-Aporia, @BoboTiG 1.0.2 ~~~~~ 2020-12-18 • `full history `__ - Wheels are published for GNU/Linux, macOS and Windows (`#739 `__) - [mac] Fix missing ``event_id`` attribute in ``fsevents`` (`#721 `__) - [mac] Return byte paths if a byte path was given in ``fsevents`` (`#726 `__) - [mac] Add compatibility with old macOS versions (`#733 `__) - Uniformize event for deletion of watched dir (`#727 `__) - Thanks to our beloved contributors: @SamSchott, @CCP-Aporia, @di, @BoboTiG 1.0.1 ~~~~~ 2020-12-10 • Fix version with good metadatas. 1.0.0 ~~~~~ 2020-12-10 • `full history `__ - Versioning is now following the `semver `__ - Drop support for Python 2.7, 3.4 and 3.5 - [mac] Regression fixes for native ``fsevents`` (`#717 `__) - [windows] ``winapi.BUFFER_SIZE`` now defaults to ``64000`` (instead of ``2048``) (`#700 `__) - [windows] Introduced ``winapi.PATH_BUFFER_SIZE`` (defaults to ``2048``) to keep the old behavior with path-realted functions (`#700 `__) - Use ``pathlib`` from the standard library, instead of pathtools (`#556 `__) - Allow file paths on Unix that don't follow the file system encoding (`#703 `__) - Removed the long-time deprecated ``events.LoggingFileSystemEventHandler`` class, use ``LoggingEventHandler`` instead - Thanks to our beloved contributors: @SamSchott, @bstaletic, @BoboTiG, @CCP-Aporia 0.10.4 ~~~~~~ 2020-11-21 • `full history `__ - Add ``logger`` parameter for the ``LoggingEventHandler`` (`#676 `__) - Replace mutable default arguments with ``if None`` implementation (`#677 `__) - Expand tests to Python 2.7 and 3.5-3.10 for GNU/Linux, macOS and Windows - [mac] Performance improvements for the ``fsevents`` module (`#680 `__) - [mac] Prevent compilation of ``watchdog_fsevents.c`` on non-macOS machines (`#687 `__) - [watchmedo] Handle shutdown events from ``SIGTERM`` and ``SIGINT`` more reliably (`#693 `__) - Thanks to our beloved contributors: @Sraw, @CCP-Aporia, @BoboTiG, @maybe-sybr 0.10.3 ~~~~~~ 2020-06-25 • `full history `__ - Ensure ``ObservedWatch.path`` is a string (`#651 `__) - [inotify] Allow to monitor single file (`#655 `__) - [inotify] Prevent raising an exception when a file in a monitored folder has no permissions (`#669 `__, `#670 `__) - Thanks to our beloved contributors: @brant-ruan, @rec, @andfoy, @BoboTiG 0.10.2 ~~~~~~ 2020-02-08 • `full history `__ - Fixed the ``build_ext`` command on macOS Catalina (`#628 `__) - Fixed the installation of macOS requirements on non-macOS OSes (`#635 `__) - Refactored ``dispatch()`` method of ``FileSystemEventHandler``, ``PatternMatchingEventHandler`` and ``RegexMatchingEventHandler`` - [bsd] Improved tests support on non Windows/Linux platforms (`#633 `__, `#639 `__) - [bsd] Added FreeBSD CI support (`#532 `__) - [bsd] Restored full support (`#638 `__, `#641 `__) - Thanks to our beloved contributors: @BoboTiG, @evilham, @danilobellini 0.10.1 ~~~~~~ 2020-01-30 • `full history `__ - Fixed Python 2.7 to 3.6 installation when the OS locale is set to POSIX (`#615 `__) - Fixed the ``build_ext`` command on macOS (`#618 `__, `#620 `__) - Moved requirements to ``setup.cfg`` (`#617 `__) - [mac] Removed old C code for Python 2.5 in the `fsevents` C implementation - [snapshot] Added ``EmptyDirectorySnapshot`` (`#613 `__) - Thanks to our beloved contributors: @Ajordat, @tehkirill, @BoboTiG 0.10.0 ~~~~~~ 2020-01-26 • `full history `__ **Breaking Changes** - Dropped support for Python 2.6, 3.2 and 3.3 - Emitters that failed to start are now removed - [snapshot] Removed the deprecated ``walker_callback`` argument, use ``stat`` instead - [watchmedo] The utility is no more installed by default but via the extra ``watchdog[watchmedo]`` **Other Changes** - Fixed several Python 3 warnings - Identify synthesized events with ``is_synthetic`` attribute (`#369 `__) - Use ``os.scandir()`` to improve memory usage (`#503 `__) - [bsd] Fixed flavors of FreeBSD detection (`#529 `__) - [bsd] Skip unprocessable socket files (`#509 `__) - [inotify] Fixed events containing non-ASCII characters (`#516 `__) - [inotify] Fixed the way ``OSError`` are re-raised (`#377 `__) - [inotify] Fixed wrong source path after renaming a top level folder (`#515 `__) - [inotify] Removed delay from non-move events (`#477 `__) - [mac] Fixed a bug when calling ``FSEventsEmitter.stop()`` twice (`#466 `__) - [mac] Support for unscheduling deleted watch (`#541 `__) - [mac] Fixed missing field initializers and unused parameters in ``watchdog_fsevents.c`` - [snapshot] Don't walk directories without read permissions (`#408 `__) - [snapshot] Fixed a race condition crash when a directory is swapped for a file (`#513 `__) - [snasphot] Fixed an ``AttributeError`` about forgotten ``path_for_inode`` attr (`#436 `__) - [snasphot] Added the ``ignore_device=False`` parameter to the ctor (`597 `__) - [watchmedo] Fixed the path separator used (`#478 `__) - [watchmedo] Fixed the use of ``yaml.load()`` for ``yaml.safe_load()`` (`#453 `__) - [watchmedo] Handle all available signals (`#549 `__) - [watchmedo] Added the ``--debug-force-polling`` argument (`#404 `__) - [windows] Fixed issues when the observed directory is deleted (`#570 `__ and `#601 `__) - [windows] ``WindowsApiEmitter`` made easier to subclass (`#344 `__) - [windows] Use separate ctypes DLL instances - [windows] Generate sub created events only if ``recursive=True`` (`#454 `__) - Thanks to our beloved contributors: @BoboTiG, @LKleinNux, @rrzaripov, @wildmichael, @TauPan, @segevfiner, @petrblahos, @QuantumEnergyE, @jeffwidman, @kapsh, @nickoala, @petrblahos, @julianolf, @tonybaloney, @mbakiev, @pR0Ps, javaguirre, @skurfer, @exarkun, @joshuaskelly, @danilobellini, @Ajordat 0.9.0 ~~~~~ 2018-08-28 • `full history `__ - Deleting the observed directory now emits a ``DirDeletedEvent`` event - [bsd] Improved the platform detection (`#378 `__) - [inotify] Fixed a crash when the root directory being watched by was deleted (`#374 `__) - [inotify] Handle systems providing uClibc - [linux] Fixed a possible ``DirDeletedEvent`` duplication when deleting a directory - [mac] Fixed unicode path handling ``fsevents2.py`` (`#298 `__) - [watchmedo] Added the ``--debug-force-polling`` argument (`#336 `__) - [windows] Fixed the ``FILE_LIST_DIRECTORY`` constant (`#376 `__) - Thanks to our beloved contributors: @vulpeszerda, @hpk42, @tamland, @senden9, @gorakhargosh, @nolsto, @mafrosis, @DonyorM, @anthrotype, @danilobellini, @pierregr, @ShinNoNoir, @adrpar, @gforcada, @pR0Ps, @yegorich, @dhke 0.8.3 ~~~~~ 2015-02-11 • `full history `__ - Fixed the use of the root logger (`#274 `__) - [inotify] Refactored libc loading and improved error handling in ``inotify_c.py`` - [inotify] Fixed a possible unbound local error in ``inotify_c.py`` - Thanks to our beloved contributors: @mmorearty, @tamland, @tony, @gorakhargosh 0.8.2 ~~~~~ 2014-10-29 • `full history `__ - Event emitters are no longer started on schedule if ``Observer`` is not already running - [mac] Fixed usued arguments to pass clang compilation (`#265 `__) - [snapshot] Fixed a possible race condition crash on directory deletion (`#281 `__) - [windows] Fixed an error when watching the same folder again (`#270 `__) - Thanks to our beloved contributors: @tamland, @apetrone, @Falldog, @theospears 0.8.1 ~~~~~ 2014-07-28 • `full history `__ - Fixed ``anon_inode`` descriptors leakage (`#249 `__) - [inotify] Fixed thread stop dead lock (`#250 `__) - Thanks to our beloved contributors: @Witos, @adiroiban, @tamland 0.8.0 ~~~~~ 2014-07-02 • `full history `__ - Fixed ``argh`` deprecation warnings (`#242 `__) - [snapshot] Methods returning internal stats info were replaced by ``mtime()``, ``inode()`` and ``path()`` methods - [snapshot] Deprecated the ``walker_callback`` argument - [watchmedo] Fixed ``auto-restart`` to terminate all children processes (`#225 `__) - [watchmedo] Added the ``--no-parallel`` argument (`#227 `__) - [windows] Fixed the value of ``INVALID_HANDLE_VALUE`` (`#123 `__) - [windows] Fixed octal usages to work with Python 3 as well (`#223 `__) - Thanks to our beloved contributors: @tamland, @Ormod, @berdario, @cro, @BernieSumption, @pypingou, @gotcha, @tommorris, @frewsxcv watchdog-6.0.0/docs/000077500000000000000000000000001471115752600142565ustar00rootroot00000000000000watchdog-6.0.0/docs/Makefile000066400000000000000000000107721471115752600157250ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/watchdog.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/watchdog.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/watchdog" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/watchdog" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." watchdog-6.0.0/docs/eclipse_cdt_style.xml000066400000000000000000000400321471115752600204750ustar00rootroot00000000000000 watchdog-6.0.0/docs/make.bat000066400000000000000000000106471471115752600156730ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\watchdog.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\watchdog.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end watchdog-6.0.0/docs/source/000077500000000000000000000000001471115752600155565ustar00rootroot00000000000000watchdog-6.0.0/docs/source/api.rst000066400000000000000000000023231471115752600170610ustar00rootroot00000000000000.. include:: global.rst.inc .. api_reference: ============= API Reference ============= `watchdog.events` ================= .. automodule:: watchdog.events `watchdog.observers.api` ======================== .. automodule:: watchdog.observers.api :synopsis: Classes useful to observer implementers. Immutables ---------- .. autoclass:: ObservedWatch :members: :show-inheritance: Collections ----------- .. autoclass:: EventQueue :members: :show-inheritance: Classes ------- .. autoclass:: EventEmitter :members: :show-inheritance: .. autoclass:: EventDispatcher :members: :show-inheritance: .. autoclass:: BaseObserver :members: :show-inheritance: `watchdog.observers` ==================== .. automodule:: watchdog.observers `watchdog.observers.polling` ============================ .. automodule:: watchdog.observers.polling `watchdog.utils` ================ .. automodule:: watchdog.utils `watchdog.utils.dirsnapshot` ============================ .. automodule:: watchdog.utils.dirsnapshot `watchdog.tricks` ================= .. automodule:: watchdog.tricks .. toctree:: :maxdepth: 2 watchdog-6.0.0/docs/source/conf.py000066400000000000000000000065211471115752600170610ustar00rootroot00000000000000# watchdog documentation build configuration file, created by # sphinx-quickstart on Tue Nov 30 00:43:58 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os.path # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. TOP_DIR_PATH = os.path.abspath("../../") SRC_DIR_PATH = os.path.join(TOP_DIR_PATH, "src") sys.path.insert(0, SRC_DIR_PATH) import watchdog.version # noqa: E402 PROJECT_NAME = "watchdog" AUTHOR_NAME = "Yesudeep Mangalapilly, Mickaël Schoentgen, and contributors" COPYRIGHT = f"2010-2024, {AUTHOR_NAME}" # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.ifconfig", "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = PROJECT_NAME copyright = COPYRIGHT # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = watchdog.version.VERSION_STRING # The full version, including alpha/beta/rc tags. release = version # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pyramid" # Output file base name for HTML help builder. htmlhelp_basename = "%sdoc" % PROJECT_NAME # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ( "index", "%s.tex" % PROJECT_NAME, "%s Documentation" % PROJECT_NAME, AUTHOR_NAME, "manual", ), ] # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ("index", PROJECT_NAME, "%s Documentation" % PROJECT_NAME, [AUTHOR_NAME], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = PROJECT_NAME epub_author = AUTHOR_NAME epub_publisher = AUTHOR_NAME epub_copyright = COPYRIGHT watchdog-6.0.0/docs/source/examples/000077500000000000000000000000001471115752600173745ustar00rootroot00000000000000watchdog-6.0.0/docs/source/examples/__init__.py000066400000000000000000000000001471115752600214730ustar00rootroot00000000000000watchdog-6.0.0/docs/source/examples/logger.py000066400000000000000000000005101471115752600212210ustar00rootroot00000000000000import sys import time from watchdog.observers import Observer from watchdog.tricks import LoggerTrick event_handler = LoggerTrick() observer = Observer() observer.schedule(event_handler, sys.argv[1], recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() watchdog-6.0.0/docs/source/examples/patterns.py000066400000000000000000000012071471115752600216060ustar00rootroot00000000000000import logging import sys import time from watchdog.events import FileSystemEvent, PatternMatchingEventHandler from watchdog.observers import Observer logging.basicConfig(level=logging.DEBUG) class MyEventHandler(PatternMatchingEventHandler): def on_any_event(self, event: FileSystemEvent) -> None: logging.debug(event) event_handler = MyEventHandler(patterns=["*.py", "*.pyc"], ignore_patterns=["version.py"], ignore_directories=True) observer = Observer() observer.schedule(event_handler, sys.argv[1], recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() watchdog-6.0.0/docs/source/examples/simple.py000066400000000000000000000026201471115752600212370ustar00rootroot00000000000000from __future__ import annotations import logging import sys import time from watchdog import events from watchdog.observers import Observer logging.basicConfig(level=logging.DEBUG) class MyEventHandler(events.FileSystemEventHandler): def catch_all_handler(self, event: events.FileSystemEvent) -> None: logging.debug(event) def on_moved(self, event: events.DirMovedEvent | events.FileMovedEvent) -> None: self.catch_all_handler(event) def on_created(self, event: events.DirCreatedEvent | events.FileCreatedEvent) -> None: self.catch_all_handler(event) def on_deleted(self, event: events.DirDeletedEvent | events.FileDeletedEvent) -> None: self.catch_all_handler(event) def on_modified(self, event: events.DirModifiedEvent | events.FileModifiedEvent) -> None: self.catch_all_handler(event) def on_closed(self, event: events.FileClosedEvent) -> None: self.catch_all_handler(event) def on_closed_no_write(self, event: events.FileClosedNoWriteEvent) -> None: self.catch_all_handler(event) def on_opened(self, event: events.FileOpenedEvent) -> None: self.catch_all_handler(event) path = sys.argv[1] event_handler = MyEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() watchdog-6.0.0/docs/source/examples/tricks.json000066400000000000000000000016611471115752600215720ustar00rootroot00000000000000[ { "watchdog.tricks.LoggerTrick": { "patterns": [ "*.py", "*.js" ] } }, { "watchmedo_webtricks.GoogleClosureTrick": { "scripts": { "index-page": [ "app/static/js/vendor/jquery.js", "app/static/js/base.js", "app/static/js/index-page.js"], "about-page": [ "app/static/js/vendor/jquery.js", "app/static/js/base.js", "app/static/js/about-page.js"] }, "suffix": ".min.js", "source_directory": "app/static/js/", "hash_names": true, "patterns": ["*.js"], "destination_directory": "app/public/js/", "compilation_level": "advanced", "mappings_module": "app/javascript_mappings.json" } } ] watchdog-6.0.0/docs/source/examples/tricks.yaml000066400000000000000000000012501471115752600215550ustar00rootroot00000000000000tricks: - watchdog.tricks.LoggerTrick: patterns: ["*.py", "*.js"] - watchmedo_webtricks.GoogleClosureTrick: patterns: ['*.js'] hash_names: true mappings_format: json # json|yaml|python mappings_module: app/javascript_mappings suffix: .min.js compilation_level: advanced # simple|advanced source_directory: app/static/js/ destination_directory: app/public/js/ files: index-page: - app/static/js/vendor/jquery.js - app/static/js/base.js - app/static/js/index-page.js about-page: - app/static/js/vendor/jquery.js - app/static/js/base.js - app/static/js/about-page.js watchdog-6.0.0/docs/source/global.rst.inc000066400000000000000000000034411471115752600203220ustar00rootroot00000000000000.. Global includes, substitutions, and common links. .. |author_name| replace:: Mickaël Schoentgen .. |author_email| replace:: contact@tiger-222.fr .. |copyright| replace:: Copyright 2011-2024 Yesudeep Mangalapilly, Mickaël Schoentgen & contributors. .. |project_name| replace:: ``watchdog`` .. |project_version| replace:: 5.0.4 .. _issue tracker: https://github.com/gorakhargosh/watchdog/issues .. _code repository: https://github.com/gorakhargosh/watchdog .. _kqueue: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 .. _FSEvents: https://developer.apple.com/library/mac/#documentation/Darwin/Conceptual/FSEvents_ProgGuide/Introduction/Introduction.html .. _inotify: https://linux.die.net/man/7/inotify .. _macOS File System Monitoring Performance Guidelines: https://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html .. _ReadDirectoryChangesW: https://docs.microsoft.com/windows/win32/api/winbase/nf-winbase-readdirectorychangesw .. _file.monitor: https://github.com/pke/file.monitor .. _fsmonitor: https://github.com/shaurz/fsmonitor .. _git: https://git-scm.org/ .. _github: https://github.com/ .. _guard: https://github.com/guard/guard .. _inotify-tools: https://github.com/rvoicilas/inotify-tools .. _jnotify: http://jnotify.sourceforge.net/ .. _pip: https://pypi.python.org/pypi/pip .. _pnotify: http://mark.heily.com/pnotify .. _pyfilesystem: https://github.com/PyFilesystem/pyfilesystem .. _pyinotify: https://github.com/seb-m/pyinotify .. _Python: https://python.org .. _PyYAML: https://www.pyyaml.org/ .. _treewatcher: https://github.com/jbd/treewatcher .. _unison fsmonitor: https://webdav.seas.upenn.edu/viewvc/unison/trunk/src/fsmonitor.py?view=markup&pathrev=471 .. _XCode: https://developer.apple.com/technologies/tools/xcode.html watchdog-6.0.0/docs/source/hacking.rst000066400000000000000000000023021471115752600177110ustar00rootroot00000000000000.. include:: global.rst.inc .. _hacking: Contributing ============ Welcome hacker! So you have got something you would like to see in |project_name|? Whee. This document will help you get started. Important URLs -------------- |project_name| uses git_ to track code history and hosts its `code repository`_ at github_. The `issue tracker`_ is where you can file bug reports and request features or enhancements to |project_name|. Before you start ---------------- Ensure your system has the following programs and libraries installed before beginning to hack: 1. Python_ 2. git_ 3. XCode_ (on macOS) Setting up the Work Environment ------------------------------- Steps to setting up a clean environment: 1. Fork the `code repository`_ into your github_ account. 2. Clone fork and create virtual environment: .. code:: bash $ git clone https://github.com/gorakhargosh/watchdog.git $ cd watchdog $ python -m venv venv 3. Linux .. code:: bash $ . venv/bin/activate (venv)$ python -m pip instal -e '.' 4. Windows .. code:: batch > venv\Scripts\activate (venv)> python -m pip instal -e '.' That's it with the setup. Now you're ready to hack on |project_name|. Happy hacking! watchdog-6.0.0/docs/source/index.rst000066400000000000000000000024011471115752600174140ustar00rootroot00000000000000.. watchdog documentation master file, created by sphinx-quickstart on Tue Nov 30 00:43:58 2010. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. .. include:: global.rst.inc Watchdog ======== Python API library and shell utilities to monitor file system events. Works on 3.9+. Directory monitoring made easy with ----------------------------------- * A cross-platform API. * A shell tool to run commands in response to directory changes. Get started quickly with a simple example in :ref:`quickstart`. Easy installation ----------------- You can use pip_ to install |project_name| quickly and easily:: $ python -m pip install -U watchdog Need more help with installing? See :ref:`installation`. User's Guide ============ .. toctree:: :maxdepth: 2 installation quickstart api hacking Contribute ========== Found a bug in or want a feature added to |project_name|? You can fork the official `code repository`_ or file an issue ticket at the `issue tracker`_. You may also want to refer to :ref:`hacking` for information about contributing code or documentation to |project_name|. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` watchdog-6.0.0/docs/source/installation.rst000066400000000000000000000136271471115752600210220ustar00rootroot00000000000000.. include:: global.rst.inc .. _installation: Installation ============ |project_name| requires 3.9+ to work. See a list of :ref:`installation-dependencies`. Installing from PyPI using pip ------------------------------ .. parsed-literal:: $ python -m pip install -U |project_name| # or to install the watchmedo utility: $ python -m pip install -U '|project_name|\[watchmedo]' Installing from source tarballs ------------------------------- .. parsed-literal:: $ wget -c https://pypi.python.org/packages/source/w/watchdog/watchdog-|project_version|.tar.gz $ tar zxvf |project_name|-|project_version|.tar.gz $ cd |project_name|-|project_version| $ python -m pip install -e . # or to install the watchmedo utility: $ python -m pip install -e '.[watchmedo]' Installing from the code repository ----------------------------------- :: $ git clone --recursive git://github.com/gorakhargosh/watchdog.git $ cd watchdog $ python -m pip install -e . # or to install the watchmedo utility: $ python -m pip install -e '.[watchmedo]' .. _installation-dependencies: Dependencies ------------ |project_name| depends on many libraries to do its job. The following is a list of dependencies you need based on the operating system you are using. +---------------------+-------------+-------------+--------+-------------+ | Operating system | Windows | Linux 2.6 | macOS | BSD | | Dependency (row) | | | Darwin | | +=====================+=============+=============+========+=============+ | XCode_ | | | Yes | | +---------------------+-------------+-------------+--------+-------------+ The following is a list of dependencies you need based on the operating system you are using the ``watchmedo`` utility. +---------------------+-------------+-------------+--------+-------------+ | Operating system | Windows | Linux 2.6 | macOS | BSD | | Dependency (row) | | | Darwin | | +=====================+=============+=============+========+=============+ | PyYAML_ | Yes | Yes | Yes | Yes | +---------------------+-------------+-------------+--------+-------------+ Supported Platforms (and Caveats) --------------------------------- |project_name| uses native APIs as much as possible falling back to polling the disk periodically to compare directory snapshots only when it cannot use an API natively-provided by the underlying operating system. The following operating systems are currently supported: .. WARNING:: Differences between behaviors of these native API are noted below. Linux 2.6+ Linux kernel version 2.6 and later come with an API called inotify_ that programs can use to monitor file system events. .. NOTE:: On most systems the maximum number of watches that can be created per user is limited to ``8192``. |project_name| needs one per directory to monitor. To change this limit, edit ``/etc/sysctl.conf`` and add:: fs.inotify.max_user_watches=16384 macOS The Darwin kernel/OS X API maintains two ways to monitor directories for file system events: * kqueue_ * FSEvents_ |project_name| can use whichever one is available, preferring FSEvents over ``kqueue(2)``. ``kqueue(2)`` uses open file descriptors for monitoring and the current implementation uses `macOS File System Monitoring Performance Guidelines`_ to open these file descriptors only to monitor events, thus allowing OS X to unmount volumes that are being watched without locking them. .. NOTE:: More information about how |project_name| uses ``kqueue(2)`` is noted in `BSD Unix variants`_. Much of this information applies to macOS as well. _`BSD Unix variants` BSD variants come with kqueue_ which programs can use to monitor changes to open file descriptors. Because of the way ``kqueue(2)`` works, |project_name| needs to open these files and directories in read-only non-blocking mode and keep books about them. |project_name| will automatically open file descriptors for all new files/directories created and close those for which are deleted. .. NOTE:: The maximum number of open file descriptor per process limit on your operating system can hinder |project_name|'s ability to monitor files. You should ensure this limit is set to at least **1024** (or a value suitable to your usage). The following command appended to your ``~/.profile`` configuration file does this for you:: ulimit -n 1024 Windows Vista and later The Windows API provides the ReadDirectoryChangesW_. |project_name| currently contains implementation for a synchronous approach requiring additional API functionality only available in Windows Vista and later. .. NOTE:: Since renaming is not the same operation as movement on Windows, |project_name| tries hard to convert renames to movement events. Also, because the ReadDirectoryChangesW_ API function returns rename/movement events for directories even before the underlying I/O is complete, |project_name| may not be able to completely scan the moved directory in order to successfully queue movement events for files and directories within it. .. NOTE:: Since the Windows API does not provide information about whether an object is a file or a directory, delete events for directories may be reported as a file deleted event. OS Independent Polling |project_name| also includes a fallback-implementation that polls watched directories for changes by periodically comparing snapshots of the directory tree. watchdog-6.0.0/docs/source/quickstart.rst000066400000000000000000000042101471115752600204770ustar00rootroot00000000000000.. include:: global.rst.inc .. _quickstart: Quickstart ========== Below we present a simple example that monitors the current directory recursively (which means, it will traverse any sub-directories) to detect changes. Here is what we will do with the API: 1. Create an instance of the :class:`watchdog.observers.Observer` thread class. 2. Implement a subclass of :class:`watchdog.events.FileSystemEventHandler`. 3. Schedule monitoring a few paths with the observer instance attaching the event handler. 4. Start the observer thread and wait for it generate events without blocking our main thread. By default, an :class:`watchdog.observers.Observer` instance will not monitor sub-directories. By passing ``recursive=True`` in the call to :meth:`watchdog.observers.Observer.schedule` monitoring entire directory trees is ensured. A Simple Example ---------------- The following example program will monitor the current directory recursively for file system changes and simply print them to the console:: import time from watchdog.events import FileSystemEvent, FileSystemEventHandler from watchdog.observers import Observer class MyEventHandler(FileSystemEventHandler): def on_any_event(self, event: FileSystemEvent) -> None: print(event) event_handler = MyEventHandler() observer = Observer() observer.schedule(event_handler, ".", recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() To stop the program, press Control-C. Typing ------ If you are using type annotations it is important to note that :class:`watchdog.observers.Observer` is not actually a class; it is a variable that hold the "best" observer class available on your platform. In order to correctly type your own code your should use :class:`watchdog.observers.api.BaseObserver`. For example:: from watchdog.observers import Observer from watchdog.observers.api import BaseObserver def my_func(obs: BaseObserver) -> None: # Do something with obs pass observer: BaseObserver = Observer() my_func(observer) watchdog-6.0.0/pyproject.toml000066400000000000000000000022601471115752600162420ustar00rootroot00000000000000[tool.coverage.report] exclude_also = [ "if TYPE_CHECKING:", "if __name__ == __main__:", ] [tool.mypy] # Ensure we know what we do warn_redundant_casts = true warn_unused_ignores = true warn_unused_configs = true # Imports management ignore_missing_imports = true follow_imports = "skip" # Ensure full coverage disallow_untyped_defs = true disallow_incomplete_defs = true disallow_untyped_calls = true # Restrict dynamic typing (a little) # e.g. `x: List[Any]` or x: List` # disallow_any_generics = true strict_equality = true [tool.pytest.ini_options] pythonpath = "src" addopts = """ --showlocals -vvv --cov=watchdog --cov-report=term-missing:skip-covered """ [tool.ruff] line-length = 120 indent-width = 4 target-version = "py39" [tool.ruff.lint] extend-select = ["ALL"] ignore = [ "ARG", "ANN", # TODO "B023", # TODO "BLE001", "C90", "COM812", "D", "EM101", "EM102", "FIX", "ISC001", "PERF203", "PL", "PTH", # TODO? "S", "TD", ] fixable = ["ALL"] [tool.ruff.format] quote-style = "double" indent-style = "space" skip-magic-trailing-comma = false line-ending = "auto" docstring-code-format = true watchdog-6.0.0/requirements-tests.txt000066400000000000000000000003611471115752600177520ustar00rootroot00000000000000eventlet==0.37.0; python_version < "3.13" flaky==3.8.1 pytest==8.3.3 pytest-cov==6.0.0 pytest-timeout==2.3.1 ruff==0.7.1 sphinx==7.4.7; python_version <= "3.9" sphinx==8.1.3; python_version > "3.9" mypy==1.13.0 types-PyYAML==6.0.12.20240917 watchdog-6.0.0/setup.cfg000066400000000000000000000007041471115752600151500ustar00rootroot00000000000000[metadata] project_urls = Documentation=https://python-watchdog.readthedocs.io/en/stable/ Source=https://github.com/gorakhargosh/watchdog/ Issues=https://github.com/gorakhargosh/watchdog/issues Changelog=https://github.com/gorakhargosh/watchdog/blob/master/changelog.rst [build_sphinx] source-dir = docs/source build-dir = docs/build all_files = 1 [upload_sphinx] # Requires sphinx-pypi-upload to work. upload-dir = docs/build/html watchdog-6.0.0/setup.py000066400000000000000000000116211471115752600150410ustar00rootroot00000000000000import importlib.util import sys import os import os.path from platform import machine from setuptools import setup, find_packages from setuptools.extension import Extension from setuptools.command.build_ext import build_ext SRC_DIR = "src" WATCHDOG_PKG_DIR = os.path.join(SRC_DIR, "watchdog") # Load the module version spec = importlib.util.spec_from_file_location( "version", os.path.join(WATCHDOG_PKG_DIR, "version.py") ) version = importlib.util.module_from_spec(spec) spec.loader.exec_module(version) # Ignored Apple devices on which compiling watchdog_fsevents.c would fail. # The FORCE_MACOS_MACHINE envar, when set to 1, will force the compilation. _apple_devices = ("appletv", "iphone", "ipod", "ipad", "watch") is_macos = sys.platform == "darwin" and not machine().lower().startswith(_apple_devices) ext_modules = [] if is_macos or os.getenv("FORCE_MACOS_MACHINE", "0") == "1": ext_modules = [ Extension( name="_watchdog_fsevents", sources=[ "src/watchdog_fsevents.c", ], libraries=["m"], define_macros=[ ("WATCHDOG_VERSION_STRING", '"' + version.VERSION_STRING + '"'), ("WATCHDOG_VERSION_MAJOR", version.VERSION_MAJOR), ("WATCHDOG_VERSION_MINOR", version.VERSION_MINOR), ("WATCHDOG_VERSION_BUILD", version.VERSION_BUILD), ], extra_link_args=[ "-framework", "CoreFoundation", "-framework", "CoreServices", ], extra_compile_args=[ "-std=c99", "-pedantic", "-Wall", "-Wextra", "-fPIC", # Issue #620 "-Wno-nullability-completeness", # Issue #628 "-Wno-nullability-extension", "-Wno-newline-eof", # required w/Xcode 5.1+ and above because of '-mno-fused-madd' "-Wno-error=unused-command-line-argument", ], ), ] extras_require = { "watchmedo": ["PyYAML>=3.10"], } with open("README.rst", encoding="utf-8") as f: readme = f.read() with open("changelog.rst", encoding="utf-8") as f: changelog = f.read() setup( name="watchdog", version=version.VERSION_STRING, description="Filesystem events monitoring", long_description=readme + "\n\n" + changelog, long_description_content_type="text/x-rst", author="Mickaël Schoentgen", author_email="contact@tiger-222.fr", license="Apache-2.0", url="https://github.com/gorakhargosh/watchdog", keywords=" ".join( [ "python", "filesystem", "monitoring", "monitor", "FSEvents", "kqueue", "inotify", "ReadDirectoryChangesW", "polling", "DirectorySnapshot", ] ), classifiers=[ "Development Status :: 5 - Production/Stable", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: BSD", "Operating System :: Microsoft :: Windows :: Windows Vista", "Operating System :: Microsoft :: Windows :: Windows 7", "Operating System :: Microsoft :: Windows :: Windows 8", "Operating System :: Microsoft :: Windows :: Windows 8.1", "Operating System :: Microsoft :: Windows :: Windows 10", "Operating System :: Microsoft :: Windows :: Windows 11", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: C", "Topic :: Software Development :: Libraries", "Topic :: System :: Monitoring", "Topic :: System :: Filesystems", "Topic :: Utilities", ], package_dir={"": SRC_DIR}, packages=find_packages(SRC_DIR), include_package_data=True, extras_require=extras_require, cmdclass={ "build_ext": build_ext, }, ext_modules=ext_modules, entry_points={ "console_scripts": [ "watchmedo = watchdog.watchmedo:main [watchmedo]", ] }, python_requires=">=3.9", zip_safe=False, ) watchdog-6.0.0/src/000077500000000000000000000000001471115752600141155ustar00rootroot00000000000000watchdog-6.0.0/src/watchdog/000077500000000000000000000000001471115752600157155ustar00rootroot00000000000000watchdog-6.0.0/src/watchdog/__init__.py000066400000000000000000000000001471115752600200140ustar00rootroot00000000000000watchdog-6.0.0/src/watchdog/events.py000066400000000000000000000377031471115752600176050ustar00rootroot00000000000000""":module: watchdog.events :synopsis: File system events and event handlers. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Event Classes ------------- .. autoclass:: FileSystemEvent :members: :show-inheritance: :inherited-members: .. autoclass:: FileSystemMovedEvent :members: :show-inheritance: .. autoclass:: FileMovedEvent :members: :show-inheritance: .. autoclass:: DirMovedEvent :members: :show-inheritance: .. autoclass:: FileModifiedEvent :members: :show-inheritance: .. autoclass:: DirModifiedEvent :members: :show-inheritance: .. autoclass:: FileCreatedEvent :members: :show-inheritance: .. autoclass:: FileClosedEvent :members: :show-inheritance: .. autoclass:: FileClosedNoWriteEvent :members: :show-inheritance: .. autoclass:: FileOpenedEvent :members: :show-inheritance: .. autoclass:: DirCreatedEvent :members: :show-inheritance: .. autoclass:: FileDeletedEvent :members: :show-inheritance: .. autoclass:: DirDeletedEvent :members: :show-inheritance: Event Handler Classes --------------------- .. autoclass:: FileSystemEventHandler :members: :show-inheritance: .. autoclass:: PatternMatchingEventHandler :members: :show-inheritance: .. autoclass:: RegexMatchingEventHandler :members: :show-inheritance: .. autoclass:: LoggingEventHandler :members: :show-inheritance: """ from __future__ import annotations import logging import os.path import re from dataclasses import dataclass, field from typing import TYPE_CHECKING from watchdog.utils.patterns import match_any_paths if TYPE_CHECKING: from collections.abc import Generator EVENT_TYPE_MOVED = "moved" EVENT_TYPE_DELETED = "deleted" EVENT_TYPE_CREATED = "created" EVENT_TYPE_MODIFIED = "modified" EVENT_TYPE_CLOSED = "closed" EVENT_TYPE_CLOSED_NO_WRITE = "closed_no_write" EVENT_TYPE_OPENED = "opened" @dataclass(unsafe_hash=True) class FileSystemEvent: """Immutable type that represents a file system event that is triggered when a change occurs on the monitored file system. All FileSystemEvent objects are required to be immutable and hence can be used as keys in dictionaries or be added to sets. """ src_path: bytes | str dest_path: bytes | str = "" event_type: str = field(default="", init=False) is_directory: bool = field(default=False, init=False) """ True if event was synthesized; False otherwise. These are events that weren't actually broadcast by the OS, but are presumed to have happened based on other, actual events. """ is_synthetic: bool = field(default=False) class FileSystemMovedEvent(FileSystemEvent): """File system event representing any kind of file system movement.""" event_type = EVENT_TYPE_MOVED # File events. class FileDeletedEvent(FileSystemEvent): """File system event representing file deletion on the file system.""" event_type = EVENT_TYPE_DELETED class FileModifiedEvent(FileSystemEvent): """File system event representing file modification on the file system.""" event_type = EVENT_TYPE_MODIFIED class FileCreatedEvent(FileSystemEvent): """File system event representing file creation on the file system.""" event_type = EVENT_TYPE_CREATED class FileMovedEvent(FileSystemMovedEvent): """File system event representing file movement on the file system.""" class FileClosedEvent(FileSystemEvent): """File system event representing file close on the file system.""" event_type = EVENT_TYPE_CLOSED class FileClosedNoWriteEvent(FileSystemEvent): """File system event representing an unmodified file close on the file system.""" event_type = EVENT_TYPE_CLOSED_NO_WRITE class FileOpenedEvent(FileSystemEvent): """File system event representing file close on the file system.""" event_type = EVENT_TYPE_OPENED # Directory events. class DirDeletedEvent(FileSystemEvent): """File system event representing directory deletion on the file system.""" event_type = EVENT_TYPE_DELETED is_directory = True class DirModifiedEvent(FileSystemEvent): """File system event representing directory modification on the file system.""" event_type = EVENT_TYPE_MODIFIED is_directory = True class DirCreatedEvent(FileSystemEvent): """File system event representing directory creation on the file system.""" event_type = EVENT_TYPE_CREATED is_directory = True class DirMovedEvent(FileSystemMovedEvent): """File system event representing directory movement on the file system.""" is_directory = True class FileSystemEventHandler: """Base file system event handler that you can override methods from.""" def dispatch(self, event: FileSystemEvent) -> None: """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ self.on_any_event(event) getattr(self, f"on_{event.event_type}")(event) def on_any_event(self, event: FileSystemEvent) -> None: """Catch-all event handler. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ def on_moved(self, event: DirMovedEvent | FileMovedEvent) -> None: """Called when a file or a directory is moved or renamed. :param event: Event representing file/directory movement. :type event: :class:`DirMovedEvent` or :class:`FileMovedEvent` """ def on_created(self, event: DirCreatedEvent | FileCreatedEvent) -> None: """Called when a file or directory is created. :param event: Event representing file/directory creation. :type event: :class:`DirCreatedEvent` or :class:`FileCreatedEvent` """ def on_deleted(self, event: DirDeletedEvent | FileDeletedEvent) -> None: """Called when a file or directory is deleted. :param event: Event representing file/directory deletion. :type event: :class:`DirDeletedEvent` or :class:`FileDeletedEvent` """ def on_modified(self, event: DirModifiedEvent | FileModifiedEvent) -> None: """Called when a file or directory is modified. :param event: Event representing file/directory modification. :type event: :class:`DirModifiedEvent` or :class:`FileModifiedEvent` """ def on_closed(self, event: FileClosedEvent) -> None: """Called when a file opened for writing is closed. :param event: Event representing file closing. :type event: :class:`FileClosedEvent` """ def on_closed_no_write(self, event: FileClosedNoWriteEvent) -> None: """Called when a file opened for reading is closed. :param event: Event representing file closing. :type event: :class:`FileClosedNoWriteEvent` """ def on_opened(self, event: FileOpenedEvent) -> None: """Called when a file is opened. :param event: Event representing file opening. :type event: :class:`FileOpenedEvent` """ class PatternMatchingEventHandler(FileSystemEventHandler): """Matches given patterns with file paths associated with occurring events. Uses pathlib's `PurePath.match()` method. `patterns` and `ignore_patterns` are expected to be a list of strings. """ def __init__( self, *, patterns: list[str] | None = None, ignore_patterns: list[str] | None = None, ignore_directories: bool = False, case_sensitive: bool = False, ): super().__init__() self._patterns = patterns self._ignore_patterns = ignore_patterns self._ignore_directories = ignore_directories self._case_sensitive = case_sensitive @property def patterns(self) -> list[str] | None: """(Read-only) Patterns to allow matching event paths. """ return self._patterns @property def ignore_patterns(self) -> list[str] | None: """(Read-only) Patterns to ignore matching event paths. """ return self._ignore_patterns @property def ignore_directories(self) -> bool: """(Read-only) ``True`` if directories should be ignored; ``False`` otherwise. """ return self._ignore_directories @property def case_sensitive(self) -> bool: """(Read-only) ``True`` if path names should be matched sensitive to case; ``False`` otherwise. """ return self._case_sensitive def dispatch(self, event: FileSystemEvent) -> None: """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ if self.ignore_directories and event.is_directory: return paths = [] if hasattr(event, "dest_path"): paths.append(os.fsdecode(event.dest_path)) if event.src_path: paths.append(os.fsdecode(event.src_path)) if match_any_paths( paths, included_patterns=self.patterns, excluded_patterns=self.ignore_patterns, case_sensitive=self.case_sensitive, ): super().dispatch(event) class RegexMatchingEventHandler(FileSystemEventHandler): """Matches given regexes with file paths associated with occurring events. Uses the `re` module. """ def __init__( self, *, regexes: list[str] | None = None, ignore_regexes: list[str] | None = None, ignore_directories: bool = False, case_sensitive: bool = False, ): super().__init__() if regexes is None: regexes = [r".*"] elif isinstance(regexes, str): regexes = [regexes] if ignore_regexes is None: ignore_regexes = [] if case_sensitive: self._regexes = [re.compile(r) for r in regexes] self._ignore_regexes = [re.compile(r) for r in ignore_regexes] else: self._regexes = [re.compile(r, re.IGNORECASE) for r in regexes] self._ignore_regexes = [re.compile(r, re.IGNORECASE) for r in ignore_regexes] self._ignore_directories = ignore_directories self._case_sensitive = case_sensitive @property def regexes(self) -> list[re.Pattern[str]]: """(Read-only) Regexes to allow matching event paths. """ return self._regexes @property def ignore_regexes(self) -> list[re.Pattern[str]]: """(Read-only) Regexes to ignore matching event paths. """ return self._ignore_regexes @property def ignore_directories(self) -> bool: """(Read-only) ``True`` if directories should be ignored; ``False`` otherwise. """ return self._ignore_directories @property def case_sensitive(self) -> bool: """(Read-only) ``True`` if path names should be matched sensitive to case; ``False`` otherwise. """ return self._case_sensitive def dispatch(self, event: FileSystemEvent) -> None: """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ if self.ignore_directories and event.is_directory: return paths = [] if hasattr(event, "dest_path"): paths.append(os.fsdecode(event.dest_path)) if event.src_path: paths.append(os.fsdecode(event.src_path)) if any(r.match(p) for r in self.ignore_regexes for p in paths): return if any(r.match(p) for r in self.regexes for p in paths): super().dispatch(event) class LoggingEventHandler(FileSystemEventHandler): """Logs all the events captured.""" def __init__(self, *, logger: logging.Logger | None = None) -> None: super().__init__() self.logger = logger or logging.root def on_moved(self, event: DirMovedEvent | FileMovedEvent) -> None: super().on_moved(event) what = "directory" if event.is_directory else "file" self.logger.info("Moved %s: from %s to %s", what, event.src_path, event.dest_path) def on_created(self, event: DirCreatedEvent | FileCreatedEvent) -> None: super().on_created(event) what = "directory" if event.is_directory else "file" self.logger.info("Created %s: %s", what, event.src_path) def on_deleted(self, event: DirDeletedEvent | FileDeletedEvent) -> None: super().on_deleted(event) what = "directory" if event.is_directory else "file" self.logger.info("Deleted %s: %s", what, event.src_path) def on_modified(self, event: DirModifiedEvent | FileModifiedEvent) -> None: super().on_modified(event) what = "directory" if event.is_directory else "file" self.logger.info("Modified %s: %s", what, event.src_path) def on_closed(self, event: FileClosedEvent) -> None: super().on_closed(event) self.logger.info("Closed modified file: %s", event.src_path) def on_closed_no_write(self, event: FileClosedNoWriteEvent) -> None: super().on_closed_no_write(event) self.logger.info("Closed read file: %s", event.src_path) def on_opened(self, event: FileOpenedEvent) -> None: super().on_opened(event) self.logger.info("Opened file: %s", event.src_path) def generate_sub_moved_events( src_dir_path: bytes | str, dest_dir_path: bytes | str, ) -> Generator[DirMovedEvent | FileMovedEvent]: """Generates an event list of :class:`DirMovedEvent` and :class:`FileMovedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the moved directory. :param dest_dir_path: The destination path of the moved directory. :returns: An iterable of file system events of type :class:`DirMovedEvent` and :class:`FileMovedEvent`. """ for root, directories, filenames in os.walk(dest_dir_path): # type: ignore[type-var] for directory in directories: full_path = os.path.join(root, directory) # type: ignore[call-overload] renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else "" yield DirMovedEvent(renamed_path, full_path, is_synthetic=True) for filename in filenames: full_path = os.path.join(root, filename) # type: ignore[call-overload] renamed_path = full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else "" yield FileMovedEvent(renamed_path, full_path, is_synthetic=True) def generate_sub_created_events(src_dir_path: bytes | str) -> Generator[DirCreatedEvent | FileCreatedEvent]: """Generates an event list of :class:`DirCreatedEvent` and :class:`FileCreatedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the created directory. :returns: An iterable of file system events of type :class:`DirCreatedEvent` and :class:`FileCreatedEvent`. """ for root, directories, filenames in os.walk(src_dir_path): # type: ignore[type-var] for directory in directories: full_path = os.path.join(root, directory) # type: ignore[call-overload] yield DirCreatedEvent(full_path, is_synthetic=True) for filename in filenames: full_path = os.path.join(root, filename) # type: ignore[call-overload] yield FileCreatedEvent(full_path, is_synthetic=True) watchdog-6.0.0/src/watchdog/observers/000077500000000000000000000000001471115752600177275ustar00rootroot00000000000000watchdog-6.0.0/src/watchdog/observers/__init__.py000066400000000000000000000062701471115752600220450ustar00rootroot00000000000000""":module: watchdog.observers :synopsis: Observer that picks a native implementation if available. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ======= .. autoclass:: Observer :members: :show-inheritance: :inherited-members: Observer thread that schedules watching directories and dispatches calls to event handlers. You can also import platform specific classes directly and use it instead of :class:`Observer`. Here is a list of implemented observer classes.: ============== ================================ ============================== Class Platforms Note ============== ================================ ============================== |Inotify| Linux 2.6.13+ ``inotify(7)`` based observer |FSEvents| macOS FSEvents based observer |Kqueue| macOS and BSD with kqueue(2) ``kqueue(2)`` based observer |WinApi| Microsoft Windows Windows API-based observer |Polling| Any fallback implementation ============== ================================ ============================== .. |Inotify| replace:: :class:`.inotify.InotifyObserver` .. |FSEvents| replace:: :class:`.fsevents.FSEventsObserver` .. |Kqueue| replace:: :class:`.kqueue.KqueueObserver` .. |WinApi| replace:: :class:`.read_directory_changes.WindowsApiObserver` .. |Polling| replace:: :class:`.polling.PollingObserver` """ from __future__ import annotations import contextlib import warnings from typing import TYPE_CHECKING, Protocol from watchdog.utils import UnsupportedLibcError, platform if TYPE_CHECKING: from watchdog.observers.api import BaseObserver class ObserverType(Protocol): def __call__(self, *, timeout: float = ...) -> BaseObserver: ... def _get_observer_cls() -> ObserverType: if platform.is_linux(): with contextlib.suppress(UnsupportedLibcError): from watchdog.observers.inotify import InotifyObserver return InotifyObserver elif platform.is_darwin(): try: from watchdog.observers.fsevents import FSEventsObserver except Exception: try: from watchdog.observers.kqueue import KqueueObserver except Exception: warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.", stacklevel=1) else: warnings.warn("Failed to import fsevents. Fall back to kqueue", stacklevel=1) return KqueueObserver else: return FSEventsObserver elif platform.is_windows(): try: from watchdog.observers.read_directory_changes import WindowsApiObserver except Exception: warnings.warn("Failed to import `read_directory_changes`. Fall back to polling.", stacklevel=1) else: return WindowsApiObserver elif platform.is_bsd(): from watchdog.observers.kqueue import KqueueObserver return KqueueObserver from watchdog.observers.polling import PollingObserver return PollingObserver Observer = _get_observer_cls() __all__ = ["Observer"] watchdog-6.0.0/src/watchdog/observers/api.py000066400000000000000000000327521471115752600210630ustar00rootroot00000000000000from __future__ import annotations import contextlib import queue import threading from collections import defaultdict from pathlib import Path from typing import TYPE_CHECKING from watchdog.utils import BaseThread from watchdog.utils.bricks import SkipRepeatsQueue if TYPE_CHECKING: from watchdog.events import FileSystemEvent, FileSystemEventHandler DEFAULT_EMITTER_TIMEOUT = 1.0 # in seconds DEFAULT_OBSERVER_TIMEOUT = 1.0 # in seconds class EventQueue(SkipRepeatsQueue): """Thread-safe event queue based on a special queue that skips adding the same event (:class:`FileSystemEvent`) multiple times consecutively. Thus avoiding dispatching multiple event handling calls when multiple identical events are produced quicker than an observer can consume them. """ class ObservedWatch: """An scheduled watch. :param path: Path string. :param recursive: ``True`` if watch is recursive; ``False`` otherwise. :param event_filter: Optional collection of :class:`watchdog.events.FileSystemEvent` to watch """ def __init__(self, path: str | Path, *, recursive: bool, event_filter: list[type[FileSystemEvent]] | None = None): self._path = str(path) if isinstance(path, Path) else path self._is_recursive = recursive self._event_filter = frozenset(event_filter) if event_filter is not None else None @property def path(self) -> str: """The path that this watch monitors.""" return self._path @property def is_recursive(self) -> bool: """Determines whether subdirectories are watched for the path.""" return self._is_recursive @property def event_filter(self) -> frozenset[type[FileSystemEvent]] | None: """Collection of event types watched for the path""" return self._event_filter @property def key(self) -> tuple[str, bool, frozenset[type[FileSystemEvent]] | None]: return self.path, self.is_recursive, self.event_filter def __eq__(self, watch: object) -> bool: if not isinstance(watch, ObservedWatch): return NotImplemented return self.key == watch.key def __ne__(self, watch: object) -> bool: if not isinstance(watch, ObservedWatch): return NotImplemented return self.key != watch.key def __hash__(self) -> int: return hash(self.key) def __repr__(self) -> str: if self.event_filter is not None: event_filter_str = "|".join(sorted(_cls.__name__ for _cls in self.event_filter)) event_filter_str = f", event_filter={event_filter_str}" else: event_filter_str = "" return f"<{type(self).__name__}: path={self.path!r}, is_recursive={self.is_recursive}{event_filter_str}>" # Observer classes class EventEmitter(BaseThread): """Producer thread base class subclassed by event emitters that generate events and populate a queue with them. :param event_queue: The event queue to populate with generated events. :type event_queue: :class:`watchdog.events.EventQueue` :param watch: The watch to observe and produce events for. :type watch: :class:`ObservedWatch` :param timeout: Timeout (in seconds) between successive attempts at reading events. :type timeout: ``float`` :param event_filter: Collection of event types to emit, or None for no filtering (default). :type event_filter: Iterable[:class:`watchdog.events.FileSystemEvent`] | None """ def __init__( self, event_queue: EventQueue, watch: ObservedWatch, *, timeout: float = DEFAULT_EMITTER_TIMEOUT, event_filter: list[type[FileSystemEvent]] | None = None, ) -> None: super().__init__() self._event_queue = event_queue self._watch = watch self._timeout = timeout self._event_filter = frozenset(event_filter) if event_filter is not None else None @property def timeout(self) -> float: """Blocking timeout for reading events.""" return self._timeout @property def watch(self) -> ObservedWatch: """The watch associated with this emitter.""" return self._watch def queue_event(self, event: FileSystemEvent) -> None: """Queues a single event. :param event: Event to be queued. :type event: An instance of :class:`watchdog.events.FileSystemEvent` or a subclass. """ if self._event_filter is None or any(isinstance(event, cls) for cls in self._event_filter): self._event_queue.put((event, self.watch)) def queue_events(self, timeout: float) -> None: """Override this method to populate the event queue with events per interval period. :param timeout: Timeout (in seconds) between successive attempts at reading events. :type timeout: ``float`` """ def run(self) -> None: while self.should_keep_running(): self.queue_events(self.timeout) class EventDispatcher(BaseThread): """Consumer thread base class subclassed by event observer threads that dispatch events from an event queue to appropriate event handlers. :param timeout: Timeout value (in seconds) passed to emitters constructions in the child class BaseObserver. :type timeout: ``float`` """ stop_event = object() """Event inserted into the queue to signal a requested stop.""" def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None: super().__init__() self._event_queue = EventQueue() self._timeout = timeout @property def timeout(self) -> float: """Timeout value to construct emitters with.""" return self._timeout def stop(self) -> None: BaseThread.stop(self) with contextlib.suppress(queue.Full): self.event_queue.put_nowait(EventDispatcher.stop_event) @property def event_queue(self) -> EventQueue: """The event queue which is populated with file system events by emitters and from which events are dispatched by a dispatcher thread. """ return self._event_queue def dispatch_events(self, event_queue: EventQueue) -> None: """Override this method to consume events from an event queue, blocking on the queue for the specified timeout before raising :class:`queue.Empty`. :param event_queue: Event queue to populate with one set of events. :type event_queue: :class:`EventQueue` :raises: :class:`queue.Empty` """ def run(self) -> None: while self.should_keep_running(): try: self.dispatch_events(self.event_queue) except queue.Empty: continue class BaseObserver(EventDispatcher): """Base observer.""" def __init__(self, emitter_class: type[EventEmitter], *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None: super().__init__(timeout=timeout) self._emitter_class = emitter_class self._lock = threading.RLock() self._watches: set[ObservedWatch] = set() self._handlers: defaultdict[ObservedWatch, set[FileSystemEventHandler]] = defaultdict(set) self._emitters: set[EventEmitter] = set() self._emitter_for_watch: dict[ObservedWatch, EventEmitter] = {} def _add_emitter(self, emitter: EventEmitter) -> None: self._emitter_for_watch[emitter.watch] = emitter self._emitters.add(emitter) def _remove_emitter(self, emitter: EventEmitter) -> None: del self._emitter_for_watch[emitter.watch] self._emitters.remove(emitter) emitter.stop() with contextlib.suppress(RuntimeError): emitter.join() def _clear_emitters(self) -> None: for emitter in self._emitters: emitter.stop() for emitter in self._emitters: with contextlib.suppress(RuntimeError): emitter.join() self._emitters.clear() self._emitter_for_watch.clear() def _add_handler_for_watch(self, event_handler: FileSystemEventHandler, watch: ObservedWatch) -> None: self._handlers[watch].add(event_handler) def _remove_handlers_for_watch(self, watch: ObservedWatch) -> None: del self._handlers[watch] @property def emitters(self) -> set[EventEmitter]: """Returns event emitter created by this observer.""" return self._emitters def start(self) -> None: for emitter in self._emitters.copy(): try: emitter.start() except Exception: self._remove_emitter(emitter) raise super().start() def schedule( self, event_handler: FileSystemEventHandler, path: str, *, recursive: bool = False, event_filter: list[type[FileSystemEvent]] | None = None, ) -> ObservedWatch: """Schedules watching a path and calls appropriate methods specified in the given event handler in response to file system events. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param path: Directory path that will be monitored. :type path: ``str`` :param recursive: ``True`` if events will be emitted for sub-directories traversed recursively; ``False`` otherwise. :type recursive: ``bool`` :param event_filter: Collection of event types to emit, or None for no filtering (default). :type event_filter: Iterable[:class:`watchdog.events.FileSystemEvent`] | None :return: An :class:`ObservedWatch` object instance representing a watch. """ with self._lock: watch = ObservedWatch(path, recursive=recursive, event_filter=event_filter) self._add_handler_for_watch(event_handler, watch) # If we don't have an emitter for this watch already, create it. if watch not in self._emitter_for_watch: emitter = self._emitter_class(self.event_queue, watch, timeout=self.timeout, event_filter=event_filter) if self.is_alive(): emitter.start() self._add_emitter(emitter) self._watches.add(watch) return watch def add_handler_for_watch(self, event_handler: FileSystemEventHandler, watch: ObservedWatch) -> None: """Adds a handler for the given watch. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param watch: The watch to add a handler for. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: self._add_handler_for_watch(event_handler, watch) def remove_handler_for_watch(self, event_handler: FileSystemEventHandler, watch: ObservedWatch) -> None: """Removes a handler for the given watch. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param watch: The watch to remove a handler for. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: self._handlers[watch].remove(event_handler) def unschedule(self, watch: ObservedWatch) -> None: """Unschedules a watch. :param watch: The watch to unschedule. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: emitter = self._emitter_for_watch[watch] del self._handlers[watch] self._remove_emitter(emitter) self._watches.remove(watch) def unschedule_all(self) -> None: """Unschedules all watches and detaches all associated event handlers.""" with self._lock: self._handlers.clear() self._clear_emitters() self._watches.clear() def on_thread_stop(self) -> None: self.unschedule_all() def dispatch_events(self, event_queue: EventQueue) -> None: entry = event_queue.get(block=True) if entry is EventDispatcher.stop_event: return event, watch = entry with self._lock: # To allow unschedule/stop and safe removal of event handlers # within event handlers itself, check if the handler is still # registered after every dispatch. for handler in self._handlers[watch].copy(): if handler in self._handlers[watch]: handler.dispatch(event) event_queue.task_done() watchdog-6.0.0/src/watchdog/observers/fsevents.py000066400000000000000000000337141471115752600221460ustar00rootroot00000000000000""":module: watchdog.observers.fsevents :synopsis: FSEvents based emitter implementation. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: macOS """ from __future__ import annotations import logging import os import threading import time import unicodedata from typing import TYPE_CHECKING import _watchdog_fsevents as _fsevents from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, generate_sub_created_events, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.utils.dirsnapshot import DirectorySnapshot if TYPE_CHECKING: from watchdog.events import FileSystemEvent, FileSystemEventHandler from watchdog.observers.api import EventQueue, ObservedWatch logger = logging.getLogger("fsevents") class FSEventsEmitter(EventEmitter): """macOS FSEvents Emitter class. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :param event_filter: Collection of event types to emit, or None for no filtering (default). :param suppress_history: The FSEvents API may emit historic events up to 30 sec before the watch was started. When ``suppress_history`` is ``True``, those events will be suppressed by creating a directory snapshot of the watched path before starting the stream as a reference to suppress old events. Warning: This may result in significant memory usage in case of a large number of items in the watched path. :type timeout: ``float`` """ def __init__( self, event_queue: EventQueue, watch: ObservedWatch, *, timeout: float = DEFAULT_EMITTER_TIMEOUT, event_filter: list[type[FileSystemEvent]] | None = None, suppress_history: bool = False, ) -> None: super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter) self._fs_view: set[int] = set() self.suppress_history = suppress_history self._start_time = 0.0 self._starting_state: DirectorySnapshot | None = None self._lock = threading.Lock() self._absolute_watch_path = os.path.realpath(os.path.abspath(os.path.expanduser(self.watch.path))) def on_thread_stop(self) -> None: _fsevents.remove_watch(self.watch) _fsevents.stop(self) def queue_event(self, event: FileSystemEvent) -> None: # fsevents defaults to be recursive, so if the watch was meant to be non-recursive then we need to drop # all the events here which do not have a src_path / dest_path that matches the watched path if self._watch.is_recursive or not self._is_recursive_event(event): logger.debug("queue_event %s", event) EventEmitter.queue_event(self, event) else: logger.debug("drop event %s", event) def _is_recursive_event(self, event: FileSystemEvent) -> bool: src_path = event.src_path if event.is_directory else os.path.dirname(event.src_path) if src_path == self._absolute_watch_path: return False if isinstance(event, (FileMovedEvent, DirMovedEvent)): # when moving something into the watch path we must always take the dirname, # otherwise we miss out on `DirMovedEvent`s dest_path = os.path.dirname(event.dest_path) if dest_path == self._absolute_watch_path: return False return True def _queue_created_event(self, event: FileSystemEvent, src_path: bytes | str, dirname: bytes | str) -> None: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(dirname)) def _queue_deleted_event(self, event: FileSystemEvent, src_path: bytes | str, dirname: bytes | str) -> None: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(dirname)) def _queue_modified_event(self, event: FileSystemEvent, src_path: bytes | str, dirname: bytes | str) -> None: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(src_path)) def _queue_renamed_event( self, src_event: FileSystemEvent, src_path: bytes | str, dst_path: bytes | str, src_dirname: bytes | str, dst_dirname: bytes | str, ) -> None: cls = DirMovedEvent if src_event.is_directory else FileMovedEvent dst_path = self._encode_path(dst_path) self.queue_event(cls(src_path, dst_path)) self.queue_event(DirModifiedEvent(src_dirname)) self.queue_event(DirModifiedEvent(dst_dirname)) def _is_historic_created_event(self, event: _fsevents.NativeEvent) -> bool: # We only queue a created event if the item was created after we # started the FSEventsStream. in_history = event.inode in self._fs_view if self._starting_state: try: old_inode = self._starting_state.inode(event.path)[0] before_start = old_inode == event.inode except KeyError: before_start = False else: before_start = False return in_history or before_start @staticmethod def _is_meta_mod(event: _fsevents.NativeEvent) -> bool: """Returns True if the event indicates a change in metadata.""" return event.is_inode_meta_mod or event.is_xattr_mod or event.is_owner_change def queue_events(self, timeout: float, events: list[_fsevents.NativeEvent]) -> None: # type: ignore[override] if logger.getEffectiveLevel() <= logging.DEBUG: for event in events: flags = ", ".join(attr for attr in dir(event) if getattr(event, attr) is True) logger.debug("%s: %s", event, flags) if time.monotonic() - self._start_time > 60: # Event history is no longer needed, let's free some memory. self._starting_state = None while events: event = events.pop(0) src_path = self._encode_path(event.path) src_dirname = os.path.dirname(src_path) try: stat = os.stat(src_path) except OSError: stat = None exists = stat and stat.st_ino == event.inode # FSevents may coalesce multiple events for the same item + path into a # single event. However, events are never coalesced for different items at # the same path or for the same item at different paths. Therefore, the # event chains "removed -> created" and "created -> renamed -> removed" will # never emit a single native event and a deleted event *always* means that # the item no longer existed at the end of the event chain. # Some events will have a spurious `is_created` flag set, coalesced from an # already emitted and processed CreatedEvent. To filter those, we keep track # of all inodes which we know to be already created. This is safer than # keeping track of paths since paths are more likely to be reused than # inodes. # Likewise, some events will have a spurious `is_modified`, # `is_inode_meta_mod` or `is_xattr_mod` flag set. We currently do not # suppress those but could do so if the item still exists by caching the # stat result and verifying that it did change. if event.is_created and event.is_removed: # Events will only be coalesced for the same item / inode. # The sequence deleted -> created therefore cannot occur. # Any combination with renamed cannot occur either. if not self._is_historic_created_event(event): self._queue_created_event(event, src_path, src_dirname) self._fs_view.add(event.inode) if event.is_modified or self._is_meta_mod(event): self._queue_modified_event(event, src_path, src_dirname) self._queue_deleted_event(event, src_path, src_dirname) self._fs_view.discard(event.inode) else: if event.is_created and not self._is_historic_created_event(event): self._queue_created_event(event, src_path, src_dirname) self._fs_view.add(event.inode) if event.is_modified or self._is_meta_mod(event): self._queue_modified_event(event, src_path, src_dirname) if event.is_renamed: # Check if we have a corresponding destination event in the watched path. dst_event = next( iter(e for e in events if e.is_renamed and e.inode == event.inode), None, ) if dst_event: # Item was moved within the watched folder. logger.debug("Destination event for rename is %s", dst_event) dst_path = self._encode_path(dst_event.path) dst_dirname = os.path.dirname(dst_path) self._queue_renamed_event(event, src_path, dst_path, src_dirname, dst_dirname) self._fs_view.add(event.inode) for sub_moved_event in generate_sub_moved_events(src_path, dst_path): self.queue_event(sub_moved_event) # Process any coalesced flags for the dst_event. events.remove(dst_event) if dst_event.is_modified or self._is_meta_mod(dst_event): self._queue_modified_event(dst_event, dst_path, dst_dirname) if dst_event.is_removed: self._queue_deleted_event(dst_event, dst_path, dst_dirname) self._fs_view.discard(dst_event.inode) elif exists: # This is the destination event, item was moved into the watched # folder. self._queue_created_event(event, src_path, src_dirname) self._fs_view.add(event.inode) for sub_created_event in generate_sub_created_events(src_path): self.queue_event(sub_created_event) else: # This is the source event, item was moved out of the watched # folder. self._queue_deleted_event(event, src_path, src_dirname) self._fs_view.discard(event.inode) # Skip further coalesced processing. continue if event.is_removed: # Won't occur together with renamed. self._queue_deleted_event(event, src_path, src_dirname) self._fs_view.discard(event.inode) if event.is_root_changed: # This will be set if root or any of its parents is renamed or deleted. # TODO: find out new path and generate DirMovedEvent? self.queue_event(DirDeletedEvent(self.watch.path)) logger.debug("Stopping because root path was changed") self.stop() self._fs_view.clear() def events_callback(self, paths: list[bytes], inodes: list[int], flags: list[int], ids: list[int]) -> None: """Callback passed to FSEventStreamCreate(), it will receive all FS events and queue them. """ cls = _fsevents.NativeEvent try: events = [ cls(path, inode, event_flags, event_id) for path, inode, event_flags, event_id in zip(paths, inodes, flags, ids) ] with self._lock: self.queue_events(self.timeout, events) except Exception: logger.exception("Unhandled exception in fsevents callback") def run(self) -> None: self.pathnames = [self.watch.path] self._start_time = time.monotonic() try: _fsevents.add_watch(self, self.watch, self.events_callback, self.pathnames) _fsevents.read_events(self) except Exception: logger.exception("Unhandled exception in FSEventsEmitter") def on_thread_start(self) -> None: if self.suppress_history: watch_path = os.fsdecode(self.watch.path) if isinstance(self.watch.path, bytes) else self.watch.path self._starting_state = DirectorySnapshot(watch_path) def _encode_path(self, path: bytes | str) -> bytes | str: """Encode path only if bytes were passed to this emitter.""" return os.fsencode(path) if isinstance(self.watch.path, bytes) else path class FSEventsObserver(BaseObserver): def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None: super().__init__(FSEventsEmitter, timeout=timeout) def schedule( self, event_handler: FileSystemEventHandler, path: str, *, recursive: bool = False, event_filter: list[type[FileSystemEvent]] | None = None, ) -> ObservedWatch: # Fix for issue #26: Trace/BPT error when given a unicode path # string. https://github.com/gorakhargosh/watchdog/issues#issue/26 if isinstance(path, str): path = unicodedata.normalize("NFC", path) return super().schedule(event_handler, path, recursive=recursive, event_filter=event_filter) watchdog-6.0.0/src/watchdog/observers/fsevents2.py000066400000000000000000000223371471115752600222270ustar00rootroot00000000000000""":module: watchdog.observers.fsevents2 :synopsis: FSEvents based emitter implementation. :author: thomas.amland@gmail.com (Thomas Amland) :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: macOS """ from __future__ import annotations import logging import os import queue import unicodedata import warnings from threading import Thread from typing import TYPE_CHECKING # pyobjc import AppKit from FSEvents import ( CFRunLoopGetCurrent, CFRunLoopRun, CFRunLoopStop, FSEventStreamCreate, FSEventStreamInvalidate, FSEventStreamRelease, FSEventStreamScheduleWithRunLoop, FSEventStreamStart, FSEventStreamStop, kCFAllocatorDefault, kCFRunLoopDefaultMode, kFSEventStreamCreateFlagFileEvents, kFSEventStreamCreateFlagNoDefer, kFSEventStreamEventFlagItemChangeOwner, kFSEventStreamEventFlagItemCreated, kFSEventStreamEventFlagItemFinderInfoMod, kFSEventStreamEventFlagItemInodeMetaMod, kFSEventStreamEventFlagItemIsDir, kFSEventStreamEventFlagItemIsSymlink, kFSEventStreamEventFlagItemModified, kFSEventStreamEventFlagItemRemoved, kFSEventStreamEventFlagItemRenamed, kFSEventStreamEventFlagItemXattrMod, kFSEventStreamEventIdSinceNow, ) from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileSystemEvent, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter if TYPE_CHECKING: from typing import Callable from watchdog.observers.api import EventQueue, ObservedWatch logger = logging.getLogger(__name__) message = "watchdog.observers.fsevents2 is deprecated and will be removed in a future release." warnings.warn(message, category=DeprecationWarning, stacklevel=1) logger.warning(message) class FSEventsQueue(Thread): """Low level FSEvents client.""" def __init__(self, path: bytes | str) -> None: Thread.__init__(self) self._queue: queue.Queue[list[NativeEvent] | None] = queue.Queue() self._run_loop = None if isinstance(path, bytes): path = os.fsdecode(path) self._path = unicodedata.normalize("NFC", path) context = None latency = 1.0 self._stream_ref = FSEventStreamCreate( kCFAllocatorDefault, self._callback, context, [self._path], kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents, ) if self._stream_ref is None: error = "FSEvents. Could not create stream." raise OSError(error) def run(self) -> None: pool = AppKit.NSAutoreleasePool.alloc().init() self._run_loop = CFRunLoopGetCurrent() FSEventStreamScheduleWithRunLoop(self._stream_ref, self._run_loop, kCFRunLoopDefaultMode) if not FSEventStreamStart(self._stream_ref): FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) error = "FSEvents. Could not start stream." raise OSError(error) CFRunLoopRun() FSEventStreamStop(self._stream_ref) FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) del pool # Make sure waiting thread is notified self._queue.put(None) def stop(self) -> None: if self._run_loop is not None: CFRunLoopStop(self._run_loop) def _callback( self, stream_ref: int, client_callback_info: Callable, num_events: int, event_paths: list[bytes], event_flags: list[int], event_ids: list[int], ) -> None: events = [NativeEvent(path, flags, _id) for path, flags, _id in zip(event_paths, event_flags, event_ids)] logger.debug("FSEvents callback. Got %d events:", num_events) for e in events: logger.debug(e) self._queue.put(events) def read_events(self) -> list[NativeEvent] | None: """Returns a list or one or more events, or None if there are no more events to be read. """ return self._queue.get() if self.is_alive() else None class NativeEvent: def __init__(self, path: bytes, flags: int, event_id: int) -> None: self.path = path self.flags = flags self.event_id = event_id self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated) self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved) self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed) self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified) self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner) self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod) self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod) self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod) self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink) self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir) @property def _event_type(self) -> str: if self.is_created: return "Created" if self.is_removed: return "Removed" if self.is_renamed: return "Renamed" if self.is_modified: return "Modified" if self.is_inode_meta_mod: return "InodeMetaMod" if self.is_xattr_mod: return "XattrMod" return "Unknown" def __repr__(self) -> str: return ( f"<{type(self).__name__}: path={self.path!r}, type={self._event_type}," f" is_dir={self.is_directory}, flags={hex(self.flags)}, id={self.event_id}>" ) class FSEventsEmitter(EventEmitter): """FSEvents based event emitter. Handles conversion of native events.""" def __init__( self, event_queue: EventQueue, watch: ObservedWatch, *, timeout: float = DEFAULT_EMITTER_TIMEOUT, event_filter: list[type[FileSystemEvent]] | None = None, ): super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter) self._fsevents = FSEventsQueue(watch.path) self._fsevents.start() def on_thread_stop(self) -> None: self._fsevents.stop() def queue_events(self, timeout: float) -> None: events = self._fsevents.read_events() if events is None: return i = 0 while i < len(events): event = events[i] cls: type[FileSystemEvent] # For some reason the create and remove flags are sometimes also # set for rename and modify type events, so let those take # precedence. if event.is_renamed: # Internal moves appears to always be consecutive in the same # buffer and have IDs differ by exactly one (while others # don't) making it possible to pair up the two events coming # from a single move operation. (None of this is documented!) # Otherwise, guess whether file was moved in or out. # TODO: handle id wrapping if i + 1 < len(events) and events[i + 1].is_renamed and events[i + 1].event_id == event.event_id + 1: cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(event.path, events[i + 1].path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) self.queue_event(DirModifiedEvent(os.path.dirname(events[i + 1].path))) i += 1 elif os.path.exists(event.path): cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) else: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) # TODO: generate events for tree elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(event.path)) elif event.is_created: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) elif event.is_removed: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) i += 1 class FSEventsObserver2(BaseObserver): def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None: super().__init__(FSEventsEmitter, timeout=timeout) watchdog-6.0.0/src/watchdog/observers/inotify.py000066400000000000000000000246311471115752600217700ustar00rootroot00000000000000""":module: watchdog.observers.inotify :synopsis: ``inotify(7)`` based emitter implementation. :author: Sebastien Martini :author: Luke McCarthy :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: Tim Cuthbertson :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: Linux 2.6.13+. .. ADMONITION:: About system requirements Recommended minimum kernel version: 2.6.25. Quote from the inotify(7) man page: "Inotify was merged into the 2.6.13 Linux kernel. The required library interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW, IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)" Therefore, you must ensure the system is running at least these versions appropriate libraries and the kernel. .. ADMONITION:: About recursiveness, event order, and event coalescing Quote from the inotify(7) man page: If successive output inotify events produced on the inotify file descriptor are identical (same wd, mask, cookie, and name) then they are coalesced into a single event if the older event has not yet been read (but see BUGS). The events returned by reading from an inotify file descriptor form an ordered queue. Thus, for example, it is guaranteed that when renaming from one directory to another, events will be produced in the correct order on the inotify file descriptor. ... Inotify monitoring of directories is not recursive: to monitor subdirectories under a directory, additional watches must be created. This emitter implementation therefore automatically adds watches for sub-directories if running in recursive mode. Some extremely useful articles and documentation: .. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en .. _intro to inotify: http://www.linuxjournal.com/article/8478 """ from __future__ import annotations import logging import os import threading from typing import TYPE_CHECKING from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileClosedEvent, FileClosedNoWriteEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileOpenedEvent, FileSystemEvent, generate_sub_created_events, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.observers.inotify_buffer import InotifyBuffer from watchdog.observers.inotify_c import InotifyConstants if TYPE_CHECKING: from watchdog.observers.api import EventQueue, ObservedWatch logger = logging.getLogger(__name__) class InotifyEmitter(EventEmitter): """inotify(7)-based event emitter. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` :param event_filter: Collection of event types to emit, or None for no filtering (default). :type event_filter: Iterable[:class:`watchdog.events.FileSystemEvent`] | None """ def __init__( self, event_queue: EventQueue, watch: ObservedWatch, *, timeout: float = DEFAULT_EMITTER_TIMEOUT, event_filter: list[type[FileSystemEvent]] | None = None, ) -> None: super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter) self._lock = threading.Lock() self._inotify: InotifyBuffer | None = None def on_thread_start(self) -> None: path = os.fsencode(self.watch.path) event_mask = self.get_event_mask_from_filter() self._inotify = InotifyBuffer(path, recursive=self.watch.is_recursive, event_mask=event_mask) def on_thread_stop(self) -> None: if self._inotify: self._inotify.close() self._inotify = None def queue_events(self, timeout: float, *, full_events: bool = False) -> None: # If "full_events" is true, then the method will report unmatched move events as separate events # This behavior is by default only called by a InotifyFullEmitter if self._inotify is None: logger.error("InotifyEmitter.queue_events() called when the thread is inactive") return with self._lock: if self._inotify is None: logger.error("InotifyEmitter.queue_events() called when the thread is inactive") return event = self._inotify.read_event() if event is None: return cls: type[FileSystemEvent] if isinstance(event, tuple): move_from, move_to = event src_path = self._decode_path(move_from.src_path) dest_path = self._decode_path(move_to.src_path) cls = DirMovedEvent if move_from.is_directory else FileMovedEvent self.queue_event(cls(src_path, dest_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) self.queue_event(DirModifiedEvent(os.path.dirname(dest_path))) if move_from.is_directory and self.watch.is_recursive: for sub_moved_event in generate_sub_moved_events(src_path, dest_path): self.queue_event(sub_moved_event) return src_path = self._decode_path(event.src_path) if event.is_moved_to: if full_events: cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls("", src_path)) else: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) if event.is_directory and self.watch.is_recursive: for sub_created_event in generate_sub_created_events(src_path): self.queue_event(sub_created_event) elif event.is_attrib or event.is_modify: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(src_path)) elif event.is_delete or (event.is_moved_from and not full_events): cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_moved_from and full_events: cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(src_path, "")) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_create: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_delete_self and src_path == self.watch.path: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(src_path)) self.stop() elif not event.is_directory: if event.is_open: cls = FileOpenedEvent self.queue_event(cls(src_path)) elif event.is_close_write: cls = FileClosedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_close_nowrite: cls = FileClosedNoWriteEvent self.queue_event(cls(src_path)) def _decode_path(self, path: bytes | str) -> bytes | str: """Decode path only if unicode string was passed to this emitter.""" return path if isinstance(self.watch.path, bytes) else os.fsdecode(path) def get_event_mask_from_filter(self) -> int | None: """Optimization: Only include events we are filtering in inotify call.""" if self._event_filter is None: return None # Always listen to delete self event_mask = InotifyConstants.IN_DELETE_SELF for cls in self._event_filter: if cls in {DirMovedEvent, FileMovedEvent}: event_mask |= InotifyConstants.IN_MOVE elif cls in {DirCreatedEvent, FileCreatedEvent}: event_mask |= InotifyConstants.IN_MOVE | InotifyConstants.IN_CREATE elif cls is DirModifiedEvent: event_mask |= ( InotifyConstants.IN_MOVE | InotifyConstants.IN_ATTRIB | InotifyConstants.IN_MODIFY | InotifyConstants.IN_CREATE | InotifyConstants.IN_CLOSE_WRITE ) elif cls is FileModifiedEvent: event_mask |= InotifyConstants.IN_ATTRIB | InotifyConstants.IN_MODIFY elif cls in {DirDeletedEvent, FileDeletedEvent}: event_mask |= InotifyConstants.IN_DELETE elif cls is FileClosedEvent: event_mask |= InotifyConstants.IN_CLOSE_WRITE elif cls is FileClosedNoWriteEvent: event_mask |= InotifyConstants.IN_CLOSE_NOWRITE elif cls is FileOpenedEvent: event_mask |= InotifyConstants.IN_OPEN return event_mask class InotifyFullEmitter(InotifyEmitter): """inotify(7)-based event emitter. By default this class produces move events even if they are not matched Such move events will have a ``None`` value for the unmatched part. """ def queue_events(self, timeout: float, *, events: bool = True) -> None: # type: ignore[override] super().queue_events(timeout, full_events=events) class InotifyObserver(BaseObserver): """Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT, generate_full_events: bool = False) -> None: cls = InotifyFullEmitter if generate_full_events else InotifyEmitter super().__init__(cls, timeout=timeout) watchdog-6.0.0/src/watchdog/observers/inotify_buffer.py000066400000000000000000000105221471115752600233130ustar00rootroot00000000000000""":module: watchdog.observers.inotify_buffer :synopsis: A wrapper for ``Inotify``. :author: thomas.amland@gmail.com (Thomas Amland) :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: linux """ from __future__ import annotations import logging from watchdog.observers.inotify_c import Inotify, InotifyEvent from watchdog.utils import BaseThread from watchdog.utils.delayed_queue import DelayedQueue logger = logging.getLogger(__name__) class InotifyBuffer(BaseThread): """A wrapper for `Inotify` that holds events for `delay` seconds. During this time, IN_MOVED_FROM and IN_MOVED_TO events are paired. """ delay = 0.5 def __init__(self, path: bytes, *, recursive: bool = False, event_mask: int | None = None) -> None: super().__init__() # XXX: Remove quotes after Python 3.9 drop self._queue = DelayedQueue["InotifyEvent | tuple[InotifyEvent, InotifyEvent]"](self.delay) self._inotify = Inotify(path, recursive=recursive, event_mask=event_mask) self.start() def read_event(self) -> InotifyEvent | tuple[InotifyEvent, InotifyEvent] | None: """Returns a single event or a tuple of from/to events in case of a paired move event. If this buffer has been closed, immediately return None. """ return self._queue.get() def on_thread_stop(self) -> None: self._inotify.close() self._queue.close() def close(self) -> None: self.stop() self.join() def _group_events(self, event_list: list[InotifyEvent]) -> list[InotifyEvent | tuple[InotifyEvent, InotifyEvent]]: """Group any matching move events""" grouped: list[InotifyEvent | tuple[InotifyEvent, InotifyEvent]] = [] for inotify_event in event_list: logger.debug("in-event %s", inotify_event) def matching_from_event(event: InotifyEvent | tuple[InotifyEvent, InotifyEvent]) -> bool: return not isinstance(event, tuple) and event.is_moved_from and event.cookie == inotify_event.cookie if inotify_event.is_moved_to: # Check if move_from is already in the buffer for index, event in enumerate(grouped): if matching_from_event(event): grouped[index] = (event, inotify_event) # type: ignore[assignment] break else: # Check if move_from is in delayqueue already from_event = self._queue.remove(matching_from_event) if from_event is not None: grouped.append((from_event, inotify_event)) # type: ignore[arg-type] else: logger.debug("could not find matching move_from event") grouped.append(inotify_event) else: grouped.append(inotify_event) return grouped def run(self) -> None: """Read event from `inotify` and add them to `queue`. When reading a IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event and add them back to the queue as a tuple. """ deleted_self = False while self.should_keep_running() and not deleted_self: inotify_events = self._inotify.read_events() grouped_events = self._group_events(inotify_events) for inotify_event in grouped_events: if not isinstance(inotify_event, tuple) and inotify_event.is_ignored: if inotify_event.src_path == self._inotify.path: # Watch was removed explicitly (inotify_rm_watch(2)) or automatically (file # was deleted, or filesystem was unmounted), stop watching for events deleted_self = True continue # Only add delay for unmatched move_from events delay = not isinstance(inotify_event, tuple) and inotify_event.is_moved_from self._queue.put(inotify_event, delay=delay) if ( not isinstance(inotify_event, tuple) and inotify_event.is_delete_self and inotify_event.src_path == self._inotify.path ): # Deleted the watched directory, stop watching for events deleted_self = True watchdog-6.0.0/src/watchdog/observers/inotify_c.py000066400000000000000000000512111471115752600222640ustar00rootroot00000000000000from __future__ import annotations import contextlib import ctypes import ctypes.util import errno import os import select import struct import threading from ctypes import c_char_p, c_int, c_uint32 from functools import reduce from typing import TYPE_CHECKING from watchdog.utils import UnsupportedLibcError if TYPE_CHECKING: from collections.abc import Generator libc = ctypes.CDLL(None) if not hasattr(libc, "inotify_init") or not hasattr(libc, "inotify_add_watch") or not hasattr(libc, "inotify_rm_watch"): error = f"Unsupported libc version found: {libc._name}" # noqa:SLF001 raise UnsupportedLibcError(error) inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)(("inotify_add_watch", libc)) inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)(("inotify_rm_watch", libc)) inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(("inotify_init", libc)) class InotifyConstants: # User-space events IN_ACCESS = 0x00000001 # File was accessed. IN_MODIFY = 0x00000002 # File was modified. IN_ATTRIB = 0x00000004 # Meta-data changed. IN_CLOSE_WRITE = 0x00000008 # Writable file was closed. IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed. IN_OPEN = 0x00000020 # File was opened. IN_MOVED_FROM = 0x00000040 # File was moved from X. IN_MOVED_TO = 0x00000080 # File was moved to Y. IN_CREATE = 0x00000100 # Subfile was created. IN_DELETE = 0x00000200 # Subfile was deleted. IN_DELETE_SELF = 0x00000400 # Self was deleted. IN_MOVE_SELF = 0x00000800 # Self was moved. # Helper user-space events. IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves. # Events sent by the kernel to a watch. IN_UNMOUNT = 0x00002000 # Backing file system was unmounted. IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed. IN_IGNORED = 0x00008000 # File was ignored. # Special flags. IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory. IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link. IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch. IN_ISDIR = 0x40000000 # Event occurred against directory. IN_ONESHOT = 0x80000000 # Only send event once. # All user-space events. IN_ALL_EVENTS = reduce( lambda x, y: x | y, [ IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE, IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_DELETE, IN_CREATE, IN_DELETE_SELF, IN_MOVE_SELF, ], ) # Flags for ``inotify_init1`` IN_CLOEXEC = 0x02000000 IN_NONBLOCK = 0x00004000 # Watchdog's API cares only about these events. WATCHDOG_ALL_EVENTS = reduce( lambda x, y: x | y, [ InotifyConstants.IN_MODIFY, InotifyConstants.IN_ATTRIB, InotifyConstants.IN_MOVED_FROM, InotifyConstants.IN_MOVED_TO, InotifyConstants.IN_CREATE, InotifyConstants.IN_DELETE, InotifyConstants.IN_DELETE_SELF, InotifyConstants.IN_DONT_FOLLOW, InotifyConstants.IN_CLOSE_WRITE, InotifyConstants.IN_CLOSE_NOWRITE, InotifyConstants.IN_OPEN, ], ) class InotifyEventStruct(ctypes.Structure): """Structure representation of the inotify_event structure (used in buffer size calculations):: struct inotify_event { __s32 wd; /* watch descriptor */ __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ char name[0]; /* stub for possible name */ }; """ _fields_ = ( ("wd", c_int), ("mask", c_uint32), ("cookie", c_uint32), ("len", c_uint32), ("name", c_char_p), ) EVENT_SIZE = ctypes.sizeof(InotifyEventStruct) DEFAULT_NUM_EVENTS = 2048 DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16) class Inotify: """Linux inotify(7) API wrapper class. :param path: The directory path for which we want an inotify object. :type path: :class:`bytes` :param recursive: ``True`` if subdirectories should be monitored; ``False`` otherwise. """ def __init__(self, path: bytes, *, recursive: bool = False, event_mask: int | None = None) -> None: # The file descriptor associated with the inotify instance. inotify_fd = inotify_init() if inotify_fd == -1: Inotify._raise_error() self._inotify_fd = inotify_fd self._lock = threading.Lock() self._closed = False self._is_reading = True self._kill_r, self._kill_w = os.pipe() # _check_inotify_fd will return true if we can read _inotify_fd without blocking if hasattr(select, "poll"): self._poller = select.poll() self._poller.register(self._inotify_fd, select.POLLIN) self._poller.register(self._kill_r, select.POLLIN) def do_poll() -> bool: return any(fd == self._inotify_fd for fd, _ in self._poller.poll()) self._check_inotify_fd = do_poll else: def do_select() -> bool: result = select.select([self._inotify_fd, self._kill_r], [], []) return self._inotify_fd in result[0] self._check_inotify_fd = do_select # Stores the watch descriptor for a given path. self._wd_for_path: dict[bytes, int] = {} self._path_for_wd: dict[int, bytes] = {} self._path = path # Default to all events if event_mask is None: event_mask = WATCHDOG_ALL_EVENTS self._event_mask = event_mask self._is_recursive = recursive if os.path.isdir(path): self._add_dir_watch(path, event_mask, recursive=recursive) else: self._add_watch(path, event_mask) self._moved_from_events: dict[int, InotifyEvent] = {} @property def event_mask(self) -> int: """The event mask for this inotify instance.""" return self._event_mask @property def path(self) -> bytes: """The path associated with the inotify instance.""" return self._path @property def is_recursive(self) -> bool: """Whether we are watching directories recursively.""" return self._is_recursive @property def fd(self) -> int: """The file descriptor associated with the inotify instance.""" return self._inotify_fd def clear_move_records(self) -> None: """Clear cached records of MOVED_FROM events""" self._moved_from_events = {} def source_for_move(self, destination_event: InotifyEvent) -> bytes | None: """The source path corresponding to the given MOVED_TO event. If the source path is outside the monitored directories, None is returned instead. """ if destination_event.cookie in self._moved_from_events: return self._moved_from_events[destination_event.cookie].src_path return None def remember_move_from_event(self, event: InotifyEvent) -> None: """Save this event as the source event for future MOVED_TO events to reference. """ self._moved_from_events[event.cookie] = event def add_watch(self, path: bytes) -> None: """Adds a watch for the given path. :param path: Path to begin monitoring. """ with self._lock: self._add_watch(path, self._event_mask) def remove_watch(self, path: bytes) -> None: """Removes a watch for the given path. :param path: Path string for which the watch will be removed. """ with self._lock: wd = self._wd_for_path.pop(path) del self._path_for_wd[wd] if inotify_rm_watch(self._inotify_fd, wd) == -1: Inotify._raise_error() def close(self) -> None: """Closes the inotify instance and removes all associated watches.""" with self._lock: if not self._closed: self._closed = True if self._path in self._wd_for_path: wd = self._wd_for_path[self._path] inotify_rm_watch(self._inotify_fd, wd) if self._is_reading: # inotify_rm_watch() should write data to _inotify_fd and wake # the thread, but writing to the kill channel will gaurentee this os.write(self._kill_w, b"!") else: self._close_resources() def read_events(self, *, event_buffer_size: int = DEFAULT_EVENT_BUFFER_SIZE) -> list[InotifyEvent]: """Reads events from inotify and yields them.""" # HACK: We need to traverse the directory path # recursively and simulate events for newly # created subdirectories/files. This will handle # mkdir -p foobar/blah/bar; touch foobar/afile def _recursive_simulate(src_path: bytes) -> list[InotifyEvent]: events = [] for root, dirnames, filenames in os.walk(src_path): for dirname in dirnames: with contextlib.suppress(OSError): full_path = os.path.join(root, dirname) wd_dir = self._add_watch(full_path, self._event_mask) e = InotifyEvent( wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path, ) events.append(e) for filename in filenames: full_path = os.path.join(root, filename) wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)] e = InotifyEvent( wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path, ) events.append(e) return events event_buffer = b"" while True: try: with self._lock: if self._closed: return [] self._is_reading = True if self._check_inotify_fd(): event_buffer = os.read(self._inotify_fd, event_buffer_size) with self._lock: self._is_reading = False if self._closed: self._close_resources() return [] except OSError as e: if e.errno == errno.EINTR: continue if e.errno == errno.EBADF: return [] raise break with self._lock: event_list = [] for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer): if wd == -1: continue wd_path = self._path_for_wd[wd] src_path = os.path.join(wd_path, name) if name else wd_path # avoid trailing slash inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) if inotify_event.is_moved_from: self.remember_move_from_event(inotify_event) elif inotify_event.is_moved_to: move_src_path = self.source_for_move(inotify_event) if move_src_path in self._wd_for_path: moved_wd = self._wd_for_path[move_src_path] del self._wd_for_path[move_src_path] self._wd_for_path[inotify_event.src_path] = moved_wd self._path_for_wd[moved_wd] = inotify_event.src_path if self.is_recursive: for _path in self._wd_for_path.copy(): if _path.startswith(move_src_path + os.path.sep.encode()): moved_wd = self._wd_for_path.pop(_path) _move_to_path = _path.replace(move_src_path, inotify_event.src_path) self._wd_for_path[_move_to_path] = moved_wd self._path_for_wd[moved_wd] = _move_to_path src_path = os.path.join(wd_path, name) inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) if inotify_event.is_ignored: # Clean up book-keeping for deleted watches. path = self._path_for_wd.pop(wd) if self._wd_for_path[path] == wd: del self._wd_for_path[path] event_list.append(inotify_event) if self.is_recursive and inotify_event.is_directory and inotify_event.is_create: # TODO: When a directory from another part of the # filesystem is moved into a watched directory, this # will not generate events for the directory tree. # We need to coalesce IN_MOVED_TO events and those # IN_MOVED_TO events which don't pair up with # IN_MOVED_FROM events should be marked IN_CREATE # instead relative to this directory. try: self._add_watch(src_path, self._event_mask) except OSError: continue event_list.extend(_recursive_simulate(src_path)) return event_list def _close_resources(self) -> None: os.close(self._inotify_fd) os.close(self._kill_r) os.close(self._kill_w) # Non-synchronized methods. def _add_dir_watch(self, path: bytes, mask: int, *, recursive: bool) -> None: """Adds a watch (optionally recursively) for the given directory path to monitor events specified by the mask. :param path: Path to monitor :param recursive: ``True`` to monitor recursively. :param mask: Event bit mask. """ if not os.path.isdir(path): raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path) self._add_watch(path, mask) if recursive: for root, dirnames, _ in os.walk(path): for dirname in dirnames: full_path = os.path.join(root, dirname) if os.path.islink(full_path): continue self._add_watch(full_path, mask) def _add_watch(self, path: bytes, mask: int) -> int: """Adds a watch for the given path to monitor events specified by the mask. :param path: Path to monitor :param mask: Event bit mask. """ wd = inotify_add_watch(self._inotify_fd, path, mask) if wd == -1: Inotify._raise_error() self._wd_for_path[path] = wd self._path_for_wd[wd] = path return wd @staticmethod def _raise_error() -> None: """Raises errors for inotify failures.""" err = ctypes.get_errno() if err == errno.ENOSPC: raise OSError(errno.ENOSPC, "inotify watch limit reached") if err == errno.EMFILE: raise OSError(errno.EMFILE, "inotify instance limit reached") if err != errno.EACCES: raise OSError(err, os.strerror(err)) @staticmethod def _parse_event_buffer(event_buffer: bytes) -> Generator[tuple[int, int, int, bytes]]: """Parses an event buffer of ``inotify_event`` structs returned by inotify:: struct inotify_event { __s32 wd; /* watch descriptor */ __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ char name[0]; /* stub for possible name */ }; The ``cookie`` member of this struct is used to pair two related events, for example, it pairs an IN_MOVED_FROM event with an IN_MOVED_TO event. """ i = 0 while i + 16 <= len(event_buffer): wd, mask, cookie, length = struct.unpack_from("iIII", event_buffer, i) name = event_buffer[i + 16 : i + 16 + length].rstrip(b"\0") i += 16 + length yield wd, mask, cookie, name class InotifyEvent: """Inotify event struct wrapper. :param wd: Watch descriptor :param mask: Event mask :param cookie: Event cookie :param name: Base name of the event source path. :param src_path: Full event source path. """ def __init__(self, wd: int, mask: int, cookie: int, name: bytes, src_path: bytes) -> None: self._wd = wd self._mask = mask self._cookie = cookie self._name = name self._src_path = src_path @property def src_path(self) -> bytes: return self._src_path @property def wd(self) -> int: return self._wd @property def mask(self) -> int: return self._mask @property def cookie(self) -> int: return self._cookie @property def name(self) -> bytes: return self._name @property def is_modify(self) -> bool: return self._mask & InotifyConstants.IN_MODIFY > 0 @property def is_close_write(self) -> bool: return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0 @property def is_close_nowrite(self) -> bool: return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0 @property def is_open(self) -> bool: return self._mask & InotifyConstants.IN_OPEN > 0 @property def is_access(self) -> bool: return self._mask & InotifyConstants.IN_ACCESS > 0 @property def is_delete(self) -> bool: return self._mask & InotifyConstants.IN_DELETE > 0 @property def is_delete_self(self) -> bool: return self._mask & InotifyConstants.IN_DELETE_SELF > 0 @property def is_create(self) -> bool: return self._mask & InotifyConstants.IN_CREATE > 0 @property def is_moved_from(self) -> bool: return self._mask & InotifyConstants.IN_MOVED_FROM > 0 @property def is_moved_to(self) -> bool: return self._mask & InotifyConstants.IN_MOVED_TO > 0 @property def is_move(self) -> bool: return self._mask & InotifyConstants.IN_MOVE > 0 @property def is_move_self(self) -> bool: return self._mask & InotifyConstants.IN_MOVE_SELF > 0 @property def is_attrib(self) -> bool: return self._mask & InotifyConstants.IN_ATTRIB > 0 @property def is_ignored(self) -> bool: return self._mask & InotifyConstants.IN_IGNORED > 0 @property def is_directory(self) -> bool: # It looks like the kernel does not provide this information for # IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir. # See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897 return self.is_delete_self or self.is_move_self or self._mask & InotifyConstants.IN_ISDIR > 0 @property def key(self) -> tuple[bytes, int, int, int, bytes]: return self._src_path, self._wd, self._mask, self._cookie, self._name def __eq__(self, inotify_event: object) -> bool: if not isinstance(inotify_event, InotifyEvent): return NotImplemented return self.key == inotify_event.key def __ne__(self, inotify_event: object) -> bool: if not isinstance(inotify_event, InotifyEvent): return NotImplemented return self.key != inotify_event.key def __hash__(self) -> int: return hash(self.key) @staticmethod def _get_mask_string(mask: int) -> str: masks = [] for c in dir(InotifyConstants): if c.startswith("IN_") and c not in {"IN_ALL_EVENTS", "IN_MOVE"}: c_val = getattr(InotifyConstants, c) if mask & c_val: masks.append(c) return "|".join(masks) def __repr__(self) -> str: return ( f"<{type(self).__name__}: src_path={self.src_path!r}, wd={self.wd}," f" mask={self._get_mask_string(self.mask)}, cookie={self.cookie}," f" name={os.fsdecode(self.name)!r}>" ) watchdog-6.0.0/src/watchdog/observers/kqueue.py000066400000000000000000000575511471115752600216150ustar00rootroot00000000000000""":module: watchdog.observers.kqueue :synopsis: ``kqueue(2)`` based emitter implementation. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: macOS and BSD with kqueue(2). .. WARNING:: kqueue is a very heavyweight way to monitor file systems. Each kqueue-detected directory modification triggers a full directory scan. Traversing the entire directory tree and opening file descriptors for all files will create performance problems. We need to find a way to re-scan only those directories which report changes and do a diff between two sub-DirectorySnapshots perhaps. .. ADMONITION:: About OS X performance guidelines Quote from the `macOS File System Performance Guidelines`_: "When you only want to track changes on a file or directory, be sure to open it using the ``O_EVTONLY`` flag. This flag prevents the file or directory from being marked as open or in use. This is important if you are tracking files on a removable volume and the user tries to unmount the volume. With this flag in place, the system knows it can dismiss the volume. If you had opened the files or directories without this flag, the volume would be marked as busy and would not be unmounted." ``O_EVTONLY`` is defined as ``0x8000`` in the OS X header files. More information here: http://www.mlsite.net/blog/?p=2312 Classes ------- .. autoclass:: KqueueEmitter :members: :show-inheritance: Collections and Utility Classes ------------------------------- .. autoclass:: KeventDescriptor :members: :show-inheritance: .. autoclass:: KeventDescriptorSet :members: :show-inheritance: .. _macOS File System Performance Guidelines: http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD """ # The `select` module varies between platforms. # mypy may complain about missing module attributes depending on which platform it's running on. # The comment below disables mypy's attribute check. # mypy: disable-error-code="attr-defined, name-defined" from __future__ import annotations import contextlib import errno import os import os.path import select import threading from stat import S_ISDIR from typing import TYPE_CHECKING from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MOVED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.utils import platform from watchdog.utils.dirsnapshot import DirectorySnapshot if TYPE_CHECKING: from collections.abc import Generator from typing import Callable from watchdog.events import FileSystemEvent from watchdog.observers.api import EventQueue, ObservedWatch # Maximum number of events to process. MAX_EVENTS = 4096 # O_EVTONLY value from the header files for OS X only. O_EVTONLY = 0x8000 # Pre-calculated values for the kevent filter, flags, and fflags attributes. WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY if platform.is_darwin() else os.O_RDONLY | os.O_NONBLOCK WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR WATCHDOG_KQ_FFLAGS = ( select.KQ_NOTE_DELETE | select.KQ_NOTE_WRITE | select.KQ_NOTE_EXTEND | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK | select.KQ_NOTE_RENAME | select.KQ_NOTE_REVOKE ) def absolute_path(path: bytes | str) -> bytes | str: return os.path.abspath(os.path.normpath(path)) # Flag tests. def is_deleted(kev: select.kevent) -> bool: """Determines whether the given kevent represents deletion.""" return kev.fflags & select.KQ_NOTE_DELETE > 0 def is_modified(kev: select.kevent) -> bool: """Determines whether the given kevent represents modification.""" fflags = kev.fflags return (fflags & select.KQ_NOTE_EXTEND > 0) or (fflags & select.KQ_NOTE_WRITE > 0) def is_attrib_modified(kev: select.kevent) -> bool: """Determines whether the given kevent represents attribute modification.""" return kev.fflags & select.KQ_NOTE_ATTRIB > 0 def is_renamed(kev: select.kevent) -> bool: """Determines whether the given kevent represents movement.""" return kev.fflags & select.KQ_NOTE_RENAME > 0 class KeventDescriptorSet: """Thread-safe kevent descriptor collection.""" def __init__(self) -> None: self._descriptors: set[KeventDescriptor] = set() self._descriptor_for_path: dict[bytes | str, KeventDescriptor] = {} self._descriptor_for_fd: dict[int, KeventDescriptor] = {} self._kevents: list[select.kevent] = [] self._lock = threading.Lock() @property def kevents(self) -> list[select.kevent]: """List of kevents monitored.""" with self._lock: return self._kevents @property def paths(self) -> list[bytes | str]: """List of paths for which kevents have been created.""" with self._lock: return list(self._descriptor_for_path.keys()) def get_for_fd(self, fd: int) -> KeventDescriptor: """Given a file descriptor, returns the kevent descriptor object for it. :param fd: OS file descriptor. :type fd: ``int`` :returns: A :class:`KeventDescriptor` object. """ with self._lock: return self._descriptor_for_fd[fd] def get(self, path: bytes | str) -> KeventDescriptor: """Obtains a :class:`KeventDescriptor` object for the specified path. :param path: Path for which the descriptor will be obtained. """ with self._lock: path = absolute_path(path) return self._get(path) def __contains__(self, path: bytes | str) -> bool: """Determines whether a :class:`KeventDescriptor has been registered for the specified path. :param path: Path for which the descriptor will be obtained. """ with self._lock: path = absolute_path(path) return self._has_path(path) def add(self, path: bytes | str, *, is_directory: bool) -> None: """Adds a :class:`KeventDescriptor` to the collection for the given path. :param path: The path for which a :class:`KeventDescriptor` object will be added. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ with self._lock: path = absolute_path(path) if not self._has_path(path): self._add_descriptor(KeventDescriptor(path, is_directory=is_directory)) def remove(self, path: bytes | str) -> None: """Removes the :class:`KeventDescriptor` object for the given path if it already exists. :param path: Path for which the :class:`KeventDescriptor` object will be removed. """ with self._lock: path = absolute_path(path) if self._has_path(path): self._remove_descriptor(self._get(path)) def clear(self) -> None: """Clears the collection and closes all open descriptors.""" with self._lock: for descriptor in self._descriptors: descriptor.close() self._descriptors.clear() self._descriptor_for_fd.clear() self._descriptor_for_path.clear() self._kevents = [] # Thread-unsafe methods. Locking is provided at a higher level. def _get(self, path: bytes | str) -> KeventDescriptor: """Returns a kevent descriptor for a given path.""" return self._descriptor_for_path[path] def _has_path(self, path: bytes | str) -> bool: """Determines whether a :class:`KeventDescriptor` for the specified path exists already in the collection. """ return path in self._descriptor_for_path def _add_descriptor(self, descriptor: KeventDescriptor) -> None: """Adds a descriptor to the collection. :param descriptor: An instance of :class:`KeventDescriptor` to be added. """ self._descriptors.add(descriptor) self._kevents.append(descriptor.kevent) self._descriptor_for_path[descriptor.path] = descriptor self._descriptor_for_fd[descriptor.fd] = descriptor def _remove_descriptor(self, descriptor: KeventDescriptor) -> None: """Removes a descriptor from the collection. :param descriptor: An instance of :class:`KeventDescriptor` to be removed. """ self._descriptors.remove(descriptor) del self._descriptor_for_fd[descriptor.fd] del self._descriptor_for_path[descriptor.path] self._kevents.remove(descriptor.kevent) descriptor.close() class KeventDescriptor: """A kevent descriptor convenience data structure to keep together: * kevent * directory status * path * file descriptor :param path: Path string for which a kevent descriptor will be created. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ def __init__(self, path: bytes | str, *, is_directory: bool) -> None: self._path = absolute_path(path) self._is_directory = is_directory self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS) self._kev = select.kevent( self._fd, filter=WATCHDOG_KQ_FILTER, flags=WATCHDOG_KQ_EV_FLAGS, fflags=WATCHDOG_KQ_FFLAGS, ) @property def fd(self) -> int: """OS file descriptor for the kevent descriptor.""" return self._fd @property def path(self) -> bytes | str: """The path associated with the kevent descriptor.""" return self._path @property def kevent(self) -> select.kevent: """The kevent object associated with the kevent descriptor.""" return self._kev @property def is_directory(self) -> bool: """Determines whether the kevent descriptor refers to a directory. :returns: ``True`` or ``False`` """ return self._is_directory def close(self) -> None: """Closes the file descriptor associated with a kevent descriptor.""" with contextlib.suppress(OSError): os.close(self.fd) @property def key(self) -> tuple[bytes | str, bool]: return (self.path, self.is_directory) def __eq__(self, descriptor: object) -> bool: if not isinstance(descriptor, KeventDescriptor): return NotImplemented return self.key == descriptor.key def __ne__(self, descriptor: object) -> bool: if not isinstance(descriptor, KeventDescriptor): return NotImplemented return self.key != descriptor.key def __hash__(self) -> int: return hash(self.key) def __repr__(self) -> str: return f"<{type(self).__name__}: path={self.path!r}, is_directory={self.is_directory}>" class KqueueEmitter(EventEmitter): """kqueue(2)-based event emitter. .. ADMONITION:: About ``kqueue(2)`` behavior and this implementation ``kqueue(2)`` monitors file system events only for open descriptors, which means, this emitter does a lot of book-keeping behind the scenes to keep track of open descriptors for every entry in the monitored directory tree. This also means the number of maximum open file descriptors on your system must be increased **manually**. Usually, issuing a call to ``ulimit`` should suffice:: ulimit -n 1024 Ensure that you pick a number that is larger than the number of files you expect to be monitored. ``kqueue(2)`` does not provide enough information about the following things: * The destination path of a file or directory that is renamed. * Creation of a file or directory within a directory; in this case, ``kqueue(2)`` only indicates a modified event on the parent directory. Therefore, this emitter takes a snapshot of the directory tree when ``kqueue(2)`` detects a change on the file system to be able to determine the above information. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` :param event_filter: Collection of event types to emit, or None for no filtering (default). :type event_filter: Iterable[:class:`watchdog.events.FileSystemEvent`] | None :param stat: stat function. See ``os.stat`` for details. """ def __init__( self, event_queue: EventQueue, watch: ObservedWatch, *, timeout: float = DEFAULT_EMITTER_TIMEOUT, event_filter: list[type[FileSystemEvent]] | None = None, stat: Callable[[str], os.stat_result] = os.stat, ) -> None: super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter) self._kq = select.kqueue() self._lock = threading.RLock() # A collection of KeventDescriptor. self._descriptors = KeventDescriptorSet() def custom_stat(path: str, cls: KqueueEmitter = self) -> os.stat_result: stat_info = stat(path) cls._register_kevent(path, is_directory=S_ISDIR(stat_info.st_mode)) return stat_info self._snapshot = DirectorySnapshot(watch.path, recursive=watch.is_recursive, stat=custom_stat) def _register_kevent(self, path: bytes | str, *, is_directory: bool) -> None: """Registers a kevent descriptor for the given path. :param path: Path for which a kevent descriptor will be created. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ try: self._descriptors.add(path, is_directory=is_directory) except OSError as e: if e.errno == errno.ENOENT: # Probably dealing with a temporary file that was created # and then quickly deleted before we could open # a descriptor for it. Therefore, simply queue a sequence # of created and deleted events for the path. # TODO: We could simply ignore these files. # Locked files cause the python process to die with # a bus error when we handle temporary files. # eg. .git/index.lock when running tig operations. # I don't fully understand this at the moment. pass elif e.errno == errno.EOPNOTSUPP: # Probably dealing with the socket or special file # mounted through a file system that does not support # access to it (e.g. NFS). On BSD systems look at # EOPNOTSUPP in man 2 open. pass else: # All other errors are propagated. raise def _unregister_kevent(self, path: bytes | str) -> None: """Convenience function to close the kevent descriptor for a specified kqueue-monitored path. :param path: Path for which the kevent descriptor will be closed. """ self._descriptors.remove(path) def queue_event(self, event: FileSystemEvent) -> None: """Handles queueing a single event object. :param event: An instance of :class:`watchdog.events.FileSystemEvent` or a subclass. """ # Handles all the book keeping for queued events. # We do not need to fire moved/deleted events for all subitems in # a directory tree here, because this function is called by kqueue # for all those events anyway. EventEmitter.queue_event(self, event) if event.event_type == EVENT_TYPE_CREATED: self._register_kevent(event.src_path, is_directory=event.is_directory) elif event.event_type == EVENT_TYPE_MOVED: self._unregister_kevent(event.src_path) self._register_kevent(event.dest_path, is_directory=event.is_directory) elif event.event_type == EVENT_TYPE_DELETED: self._unregister_kevent(event.src_path) def _gen_kqueue_events( self, kev: select.kevent, ref_snapshot: DirectorySnapshot, new_snapshot: DirectorySnapshot ) -> Generator[FileSystemEvent]: """Generate events from the kevent list returned from the call to :meth:`select.kqueue.control`. .. NOTE:: kqueue only tells us about deletions, file modifications, attribute modifications. The other events, namely, file creation, directory modification, file rename, directory rename, directory creation, etc. are determined by comparing directory snapshots. """ descriptor = self._descriptors.get_for_fd(kev.ident) src_path = descriptor.path if is_renamed(kev): # Kqueue does not specify the destination names for renames # to, so we have to process these using the a snapshot # of the directory. yield from self._gen_renamed_events( src_path, ref_snapshot, new_snapshot, is_directory=descriptor.is_directory, ) elif is_attrib_modified(kev): if descriptor.is_directory: yield DirModifiedEvent(src_path) else: yield FileModifiedEvent(src_path) elif is_modified(kev): if descriptor.is_directory: if self.watch.is_recursive or self.watch.path == src_path: # When a directory is modified, it may be due to # sub-file/directory renames or new file/directory # creation. We determine all this by comparing # snapshots later. yield DirModifiedEvent(src_path) else: yield FileModifiedEvent(src_path) elif is_deleted(kev): if descriptor.is_directory: yield DirDeletedEvent(src_path) else: yield FileDeletedEvent(src_path) def _parent_dir_modified(self, src_path: bytes | str) -> DirModifiedEvent: """Helper to generate a DirModifiedEvent on the parent of src_path.""" return DirModifiedEvent(os.path.dirname(src_path)) def _gen_renamed_events( self, src_path: bytes | str, ref_snapshot: DirectorySnapshot, new_snapshot: DirectorySnapshot, *, is_directory: bool, ) -> Generator[FileSystemEvent]: """Compares information from two directory snapshots (one taken before the rename operation and another taken right after) to determine the destination path of the file system object renamed, and yields the appropriate events to be queued. """ try: f_inode = ref_snapshot.inode(src_path) except KeyError: # Probably caught a temporary file/directory that was renamed # and deleted. Fires a sequence of created and deleted events # for the path. if is_directory: yield DirCreatedEvent(src_path) yield DirDeletedEvent(src_path) else: yield FileCreatedEvent(src_path) yield FileDeletedEvent(src_path) # We don't process any further and bail out assuming # the event represents deletion/creation instead of movement. return dest_path = new_snapshot.path(f_inode) if dest_path is not None: dest_path = absolute_path(dest_path) if is_directory: yield DirMovedEvent(src_path, dest_path) else: yield FileMovedEvent(src_path, dest_path) yield self._parent_dir_modified(src_path) yield self._parent_dir_modified(dest_path) if is_directory and self.watch.is_recursive: # TODO: Do we need to fire moved events for the items # inside the directory tree? Does kqueue does this # all by itself? Check this and then enable this code # only if it doesn't already. # A: It doesn't. So I've enabled this block. yield from generate_sub_moved_events(src_path, dest_path) else: # If the new snapshot does not have an inode for the # old path, we haven't found the new name. Therefore, # we mark it as deleted and remove unregister the path. if is_directory: yield DirDeletedEvent(src_path) else: yield FileDeletedEvent(src_path) yield self._parent_dir_modified(src_path) def _read_events(self, timeout: float) -> list[select.kevent]: """Reads events from a call to the blocking :meth:`select.kqueue.control()` method. :param timeout: Blocking timeout for reading events. :type timeout: ``float`` (seconds) """ return self._kq.control(self._descriptors.kevents, MAX_EVENTS, timeout) def queue_events(self, timeout: float) -> None: """Queues events by reading them from a call to the blocking :meth:`select.kqueue.control()` method. :param timeout: Blocking timeout for reading events. :type timeout: ``float`` (seconds) """ with self._lock: try: event_list = self._read_events(timeout) # TODO: investigate why order appears to be reversed event_list.reverse() # Take a fresh snapshot of the directory and update the # saved snapshot. new_snapshot = DirectorySnapshot(self.watch.path, recursive=self.watch.is_recursive) ref_snapshot = self._snapshot self._snapshot = new_snapshot diff_events = new_snapshot - ref_snapshot # Process events for directory_created in diff_events.dirs_created: self.queue_event(DirCreatedEvent(directory_created)) for file_created in diff_events.files_created: self.queue_event(FileCreatedEvent(file_created)) for file_modified in diff_events.files_modified: self.queue_event(FileModifiedEvent(file_modified)) for kev in event_list: for event in self._gen_kqueue_events(kev, ref_snapshot, new_snapshot): self.queue_event(event) except OSError as e: if e.errno != errno.EBADF: raise def on_thread_stop(self) -> None: # Clean up. with self._lock: self._descriptors.clear() self._kq.close() class KqueueObserver(BaseObserver): """Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None: super().__init__(KqueueEmitter, timeout=timeout) watchdog-6.0.0/src/watchdog/observers/polling.py000066400000000000000000000115041471115752600217460ustar00rootroot00000000000000""":module: watchdog.observers.polling :synopsis: Polling emitter implementation. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ------- .. autoclass:: PollingObserver :members: :show-inheritance: .. autoclass:: PollingObserverVFS :members: :show-inheritance: :special-members: """ from __future__ import annotations import os import threading from functools import partial from typing import TYPE_CHECKING from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff, EmptyDirectorySnapshot if TYPE_CHECKING: from collections.abc import Iterator from typing import Callable from watchdog.events import FileSystemEvent from watchdog.observers.api import EventQueue, ObservedWatch class PollingEmitter(EventEmitter): """Platform-independent emitter that polls a directory to detect file system changes. """ def __init__( self, event_queue: EventQueue, watch: ObservedWatch, *, timeout: float = DEFAULT_EMITTER_TIMEOUT, event_filter: list[type[FileSystemEvent]] | None = None, stat: Callable[[str], os.stat_result] = os.stat, listdir: Callable[[str | None], Iterator[os.DirEntry]] = os.scandir, ) -> None: super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter) self._snapshot: DirectorySnapshot = EmptyDirectorySnapshot() self._lock = threading.Lock() self._take_snapshot: Callable[[], DirectorySnapshot] = lambda: DirectorySnapshot( self.watch.path, recursive=self.watch.is_recursive, stat=stat, listdir=listdir, ) def on_thread_start(self) -> None: self._snapshot = self._take_snapshot() def queue_events(self, timeout: float) -> None: # We don't want to hit the disk continuously. # timeout behaves like an interval for polling emitters. if self.stopped_event.wait(timeout): return with self._lock: if not self.should_keep_running(): return # Get event diff between fresh snapshot and previous snapshot. # Update snapshot. try: new_snapshot = self._take_snapshot() except OSError: self.queue_event(DirDeletedEvent(self.watch.path)) self.stop() return events = DirectorySnapshotDiff(self._snapshot, new_snapshot) self._snapshot = new_snapshot # Files. for src_path in events.files_deleted: self.queue_event(FileDeletedEvent(src_path)) for src_path in events.files_modified: self.queue_event(FileModifiedEvent(src_path)) for src_path in events.files_created: self.queue_event(FileCreatedEvent(src_path)) for src_path, dest_path in events.files_moved: self.queue_event(FileMovedEvent(src_path, dest_path)) # Directories. for src_path in events.dirs_deleted: self.queue_event(DirDeletedEvent(src_path)) for src_path in events.dirs_modified: self.queue_event(DirModifiedEvent(src_path)) for src_path in events.dirs_created: self.queue_event(DirCreatedEvent(src_path)) for src_path, dest_path in events.dirs_moved: self.queue_event(DirMovedEvent(src_path, dest_path)) class PollingObserver(BaseObserver): """Platform-independent observer that polls a directory to detect file system changes. """ def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None: super().__init__(PollingEmitter, timeout=timeout) class PollingObserverVFS(BaseObserver): """File system independent observer that polls a directory to detect changes.""" def __init__( self, stat: Callable[[str], os.stat_result], listdir: Callable[[str | None], Iterator[os.DirEntry]], *, polling_interval: int = 1, ) -> None: """:param stat: stat function. See ``os.stat`` for details. :param listdir: listdir function. See ``os.scandir`` for details. :type polling_interval: int :param polling_interval: interval in seconds between polling the file system. """ emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir) super().__init__(emitter_cls, timeout=polling_interval) # type: ignore[arg-type] watchdog-6.0.0/src/watchdog/observers/read_directory_changes.py000066400000000000000000000101071471115752600247670ustar00rootroot00000000000000from __future__ import annotations import os.path import platform import threading from typing import TYPE_CHECKING from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, generate_sub_created_events, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.observers.winapi import close_directory_handle, get_directory_handle, read_events if TYPE_CHECKING: from ctypes.wintypes import HANDLE from watchdog.events import FileSystemEvent from watchdog.observers.api import EventQueue, ObservedWatch from watchdog.observers.winapi import WinAPINativeEvent class WindowsApiEmitter(EventEmitter): """Windows API-based emitter that uses ReadDirectoryChangesW to detect file system changes for a watch. """ def __init__( self, event_queue: EventQueue, watch: ObservedWatch, *, timeout: float = DEFAULT_EMITTER_TIMEOUT, event_filter: list[type[FileSystemEvent]] | None = None, ) -> None: super().__init__(event_queue, watch, timeout=timeout, event_filter=event_filter) self._lock = threading.Lock() self._whandle: HANDLE | None = None def on_thread_start(self) -> None: self._whandle = get_directory_handle(self.watch.path) if platform.python_implementation() == "PyPy": def start(self) -> None: """PyPy needs some time before receiving events, see #792.""" from time import sleep super().start() sleep(0.01) def on_thread_stop(self) -> None: if self._whandle: close_directory_handle(self._whandle) def _read_events(self) -> list[WinAPINativeEvent]: if not self._whandle: return [] return read_events(self._whandle, self.watch.path, recursive=self.watch.is_recursive) def queue_events(self, timeout: float) -> None: winapi_events = self._read_events() with self._lock: last_renamed_src_path = "" for winapi_event in winapi_events: src_path = os.path.join(self.watch.path, winapi_event.src_path) if winapi_event.is_renamed_old: last_renamed_src_path = src_path elif winapi_event.is_renamed_new: dest_path = src_path src_path = last_renamed_src_path if os.path.isdir(dest_path): self.queue_event(DirMovedEvent(src_path, dest_path)) if self.watch.is_recursive: for sub_moved_event in generate_sub_moved_events(src_path, dest_path): self.queue_event(sub_moved_event) else: self.queue_event(FileMovedEvent(src_path, dest_path)) elif winapi_event.is_modified: self.queue_event((DirModifiedEvent if os.path.isdir(src_path) else FileModifiedEvent)(src_path)) elif winapi_event.is_added: isdir = os.path.isdir(src_path) self.queue_event((DirCreatedEvent if isdir else FileCreatedEvent)(src_path)) if isdir and self.watch.is_recursive: for sub_created_event in generate_sub_created_events(src_path): self.queue_event(sub_created_event) elif winapi_event.is_removed: self.queue_event(FileDeletedEvent(src_path)) elif winapi_event.is_removed_self: self.queue_event(DirDeletedEvent(self.watch.path)) self.stop() class WindowsApiObserver(BaseObserver): """Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, *, timeout: float = DEFAULT_OBSERVER_TIMEOUT) -> None: super().__init__(WindowsApiEmitter, timeout=timeout) watchdog-6.0.0/src/watchdog/observers/winapi.py000066400000000000000000000265631471115752600216040ustar00rootroot00000000000000""":module: watchdog.observers.winapi :synopsis: Windows API-Python interface (removes dependency on ``pywin32``). :author: theller@ctypes.org (Thomas Heller) :author: will@willmcgugan.com (Will McGugan) :author: ryan@rfk.id.au (Ryan Kelly) :author: yesudeep@gmail.com (Yesudeep Mangalapilly) :author: thomas.amland@gmail.com (Thomas Amland) :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: windows """ from __future__ import annotations import contextlib import ctypes from ctypes.wintypes import BOOL, DWORD, HANDLE, LPCWSTR, LPVOID, LPWSTR from dataclasses import dataclass from functools import reduce from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Any # Invalid handle value. INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value # File notification constants. FILE_NOTIFY_CHANGE_FILE_NAME = 0x01 FILE_NOTIFY_CHANGE_DIR_NAME = 0x02 FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04 FILE_NOTIFY_CHANGE_SIZE = 0x08 FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 FILE_NOTIFY_CHANGE_CREATION = 0x040 FILE_NOTIFY_CHANGE_SECURITY = 0x0100 FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 FILE_FLAG_OVERLAPPED = 0x40000000 FILE_LIST_DIRECTORY = 1 FILE_SHARE_READ = 0x01 FILE_SHARE_WRITE = 0x02 FILE_SHARE_DELETE = 0x04 OPEN_EXISTING = 3 VOLUME_NAME_NT = 0x02 # File action constants. FILE_ACTION_CREATED = 1 FILE_ACTION_DELETED = 2 FILE_ACTION_MODIFIED = 3 FILE_ACTION_RENAMED_OLD_NAME = 4 FILE_ACTION_RENAMED_NEW_NAME = 5 FILE_ACTION_DELETED_SELF = 0xFFFE FILE_ACTION_OVERFLOW = 0xFFFF # Aliases FILE_ACTION_ADDED = FILE_ACTION_CREATED FILE_ACTION_REMOVED = FILE_ACTION_DELETED FILE_ACTION_REMOVED_SELF = FILE_ACTION_DELETED_SELF THREAD_TERMINATE = 0x0001 # IO waiting constants. WAIT_ABANDONED = 0x00000080 WAIT_IO_COMPLETION = 0x000000C0 WAIT_OBJECT_0 = 0x00000000 WAIT_TIMEOUT = 0x00000102 # Error codes ERROR_OPERATION_ABORTED = 995 class OVERLAPPED(ctypes.Structure): _fields_ = ( ("Internal", LPVOID), ("InternalHigh", LPVOID), ("Offset", DWORD), ("OffsetHigh", DWORD), ("Pointer", LPVOID), ("hEvent", HANDLE), ) def _errcheck_bool(value: Any | None, func: Any, args: Any) -> Any: if not value: raise ctypes.WinError() # type: ignore[attr-defined] return args def _errcheck_handle(value: Any | None, func: Any, args: Any) -> Any: if not value: raise ctypes.WinError() # type: ignore[attr-defined] if value == INVALID_HANDLE_VALUE: raise ctypes.WinError() # type: ignore[attr-defined] return args def _errcheck_dword(value: Any | None, func: Any, args: Any) -> Any: if value == 0xFFFFFFFF: raise ctypes.WinError() # type: ignore[attr-defined] return args kernel32 = ctypes.WinDLL("kernel32") # type: ignore[attr-defined] ReadDirectoryChangesW = kernel32.ReadDirectoryChangesW ReadDirectoryChangesW.restype = BOOL ReadDirectoryChangesW.errcheck = _errcheck_bool ReadDirectoryChangesW.argtypes = ( HANDLE, # hDirectory LPVOID, # lpBuffer DWORD, # nBufferLength BOOL, # bWatchSubtree DWORD, # dwNotifyFilter ctypes.POINTER(DWORD), # lpBytesReturned ctypes.POINTER(OVERLAPPED), # lpOverlapped LPVOID, # FileIOCompletionRoutine # lpCompletionRoutine ) CreateFileW = kernel32.CreateFileW CreateFileW.restype = HANDLE CreateFileW.errcheck = _errcheck_handle CreateFileW.argtypes = ( LPCWSTR, # lpFileName DWORD, # dwDesiredAccess DWORD, # dwShareMode LPVOID, # lpSecurityAttributes DWORD, # dwCreationDisposition DWORD, # dwFlagsAndAttributes HANDLE, # hTemplateFile ) CloseHandle = kernel32.CloseHandle CloseHandle.restype = BOOL CloseHandle.argtypes = (HANDLE,) # hObject CancelIoEx = kernel32.CancelIoEx CancelIoEx.restype = BOOL CancelIoEx.errcheck = _errcheck_bool CancelIoEx.argtypes = ( HANDLE, # hObject ctypes.POINTER(OVERLAPPED), # lpOverlapped ) CreateEvent = kernel32.CreateEventW CreateEvent.restype = HANDLE CreateEvent.errcheck = _errcheck_handle CreateEvent.argtypes = ( LPVOID, # lpEventAttributes BOOL, # bManualReset BOOL, # bInitialState LPCWSTR, # lpName ) SetEvent = kernel32.SetEvent SetEvent.restype = BOOL SetEvent.errcheck = _errcheck_bool SetEvent.argtypes = (HANDLE,) # hEvent WaitForSingleObjectEx = kernel32.WaitForSingleObjectEx WaitForSingleObjectEx.restype = DWORD WaitForSingleObjectEx.errcheck = _errcheck_dword WaitForSingleObjectEx.argtypes = ( HANDLE, # hObject DWORD, # dwMilliseconds BOOL, # bAlertable ) CreateIoCompletionPort = kernel32.CreateIoCompletionPort CreateIoCompletionPort.restype = HANDLE CreateIoCompletionPort.errcheck = _errcheck_handle CreateIoCompletionPort.argtypes = ( HANDLE, # FileHandle HANDLE, # ExistingCompletionPort LPVOID, # CompletionKey DWORD, # NumberOfConcurrentThreads ) GetQueuedCompletionStatus = kernel32.GetQueuedCompletionStatus GetQueuedCompletionStatus.restype = BOOL GetQueuedCompletionStatus.errcheck = _errcheck_bool GetQueuedCompletionStatus.argtypes = ( HANDLE, # CompletionPort LPVOID, # lpNumberOfBytesTransferred LPVOID, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped DWORD, # dwMilliseconds ) PostQueuedCompletionStatus = kernel32.PostQueuedCompletionStatus PostQueuedCompletionStatus.restype = BOOL PostQueuedCompletionStatus.errcheck = _errcheck_bool PostQueuedCompletionStatus.argtypes = ( HANDLE, # CompletionPort DWORD, # lpNumberOfBytesTransferred DWORD, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped ) GetFinalPathNameByHandleW = kernel32.GetFinalPathNameByHandleW GetFinalPathNameByHandleW.restype = DWORD GetFinalPathNameByHandleW.errcheck = _errcheck_dword GetFinalPathNameByHandleW.argtypes = ( HANDLE, # hFile LPWSTR, # lpszFilePath DWORD, # cchFilePath DWORD, # DWORD ) class FileNotifyInformation(ctypes.Structure): _fields_ = ( ("NextEntryOffset", DWORD), ("Action", DWORD), ("FileNameLength", DWORD), ("FileName", (ctypes.c_char * 1)), ) LPFNI = ctypes.POINTER(FileNotifyInformation) # We don't need to recalculate these flags every time a call is made to # the win32 API functions. WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS WATCHDOG_FILE_SHARE_FLAGS = reduce( lambda x, y: x | y, [ FILE_SHARE_READ, FILE_SHARE_WRITE, FILE_SHARE_DELETE, ], ) WATCHDOG_FILE_NOTIFY_FLAGS = reduce( lambda x, y: x | y, [ FILE_NOTIFY_CHANGE_FILE_NAME, FILE_NOTIFY_CHANGE_DIR_NAME, FILE_NOTIFY_CHANGE_ATTRIBUTES, FILE_NOTIFY_CHANGE_SIZE, FILE_NOTIFY_CHANGE_LAST_WRITE, FILE_NOTIFY_CHANGE_SECURITY, FILE_NOTIFY_CHANGE_LAST_ACCESS, FILE_NOTIFY_CHANGE_CREATION, ], ) # ReadDirectoryChangesW buffer length. # To handle cases with lot of changes, this seems the highest safest value we can use. # Note: it will fail with ERROR_INVALID_PARAMETER when it is greater than 64 KB and # the application is monitoring a directory over the network. # This is due to a packet size limitation with the underlying file sharing protocols. # https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw#remarks BUFFER_SIZE = 64000 # Buffer length for path-related stuff. # Introduced to keep the old behavior when we bumped BUFFER_SIZE from 2048 to 64000 in v1.0.0. PATH_BUFFER_SIZE = 2048 def _parse_event_buffer(read_buffer: bytes, n_bytes: int) -> list[tuple[int, str]]: results = [] while n_bytes > 0: fni = ctypes.cast(read_buffer, LPFNI)[0] # type: ignore[arg-type] ptr = ctypes.addressof(fni) + FileNotifyInformation.FileName.offset filename = ctypes.string_at(ptr, fni.FileNameLength) results.append((fni.Action, filename.decode("utf-16"))) num_to_skip = fni.NextEntryOffset if num_to_skip <= 0: break read_buffer = read_buffer[num_to_skip:] n_bytes -= num_to_skip # num_to_skip is long. n_bytes should be long too. return results def _is_observed_path_deleted(handle: HANDLE, path: str) -> bool: # Comparison of observed path and actual path, returned by # GetFinalPathNameByHandleW. If directory moved to the trash bin, or # deleted, actual path will not be equal to observed path. buff = ctypes.create_unicode_buffer(PATH_BUFFER_SIZE) GetFinalPathNameByHandleW(handle, buff, PATH_BUFFER_SIZE, VOLUME_NAME_NT) return buff.value != path def _generate_observed_path_deleted_event() -> tuple[bytes, int]: # Create synthetic event for notify that observed directory is deleted path = ctypes.create_unicode_buffer(".") event = FileNotifyInformation(0, FILE_ACTION_DELETED_SELF, len(path), path.value.encode("utf-8")) event_size = ctypes.sizeof(event) buff = ctypes.create_string_buffer(PATH_BUFFER_SIZE) ctypes.memmove(buff, ctypes.addressof(event), event_size) return buff.raw, event_size def get_directory_handle(path: str) -> HANDLE: """Returns a Windows handle to the specified directory path.""" return CreateFileW( path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS, None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None, ) def close_directory_handle(handle: HANDLE) -> None: try: CancelIoEx(handle, None) # force ReadDirectoryChangesW to return CloseHandle(handle) except OSError: with contextlib.suppress(Exception): CloseHandle(handle) def read_directory_changes(handle: HANDLE, path: str, *, recursive: bool) -> tuple[bytes, int]: """Read changes to the directory using the specified directory handle. https://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html """ event_buffer = ctypes.create_string_buffer(BUFFER_SIZE) nbytes = DWORD() try: ReadDirectoryChangesW( handle, ctypes.byref(event_buffer), len(event_buffer), recursive, WATCHDOG_FILE_NOTIFY_FLAGS, ctypes.byref(nbytes), None, None, ) except OSError as e: if e.winerror == ERROR_OPERATION_ABORTED: # type: ignore[attr-defined] return event_buffer.raw, 0 # Handle the case when the root path is deleted if _is_observed_path_deleted(handle, path): return _generate_observed_path_deleted_event() raise return event_buffer.raw, int(nbytes.value) @dataclass(unsafe_hash=True) class WinAPINativeEvent: action: int src_path: str @property def is_added(self) -> bool: return self.action == FILE_ACTION_CREATED @property def is_removed(self) -> bool: return self.action == FILE_ACTION_REMOVED @property def is_modified(self) -> bool: return self.action == FILE_ACTION_MODIFIED @property def is_renamed_old(self) -> bool: return self.action == FILE_ACTION_RENAMED_OLD_NAME @property def is_renamed_new(self) -> bool: return self.action == FILE_ACTION_RENAMED_NEW_NAME @property def is_removed_self(self) -> bool: return self.action == FILE_ACTION_REMOVED_SELF def read_events(handle: HANDLE, path: str, *, recursive: bool) -> list[WinAPINativeEvent]: buf, nbytes = read_directory_changes(handle, path, recursive=recursive) events = _parse_event_buffer(buf, nbytes) return [WinAPINativeEvent(action, src_path) for action, src_path in events] watchdog-6.0.0/src/watchdog/py.typed000066400000000000000000000000001471115752600174020ustar00rootroot00000000000000watchdog-6.0.0/src/watchdog/tricks/000077500000000000000000000000001471115752600172145ustar00rootroot00000000000000watchdog-6.0.0/src/watchdog/tricks/__init__.py000066400000000000000000000224241471115752600213310ustar00rootroot00000000000000""":module: watchdog.tricks :synopsis: Utility event handlers. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ------- .. autoclass:: Trick :members: :show-inheritance: .. autoclass:: LoggerTrick :members: :show-inheritance: .. autoclass:: ShellCommandTrick :members: :show-inheritance: .. autoclass:: AutoRestartTrick :members: :show-inheritance: """ from __future__ import annotations import contextlib import functools import logging import os import signal import subprocess import threading import time from watchdog.events import EVENT_TYPE_CLOSED_NO_WRITE, EVENT_TYPE_OPENED, FileSystemEvent, PatternMatchingEventHandler from watchdog.utils import echo, platform from watchdog.utils.event_debouncer import EventDebouncer from watchdog.utils.process_watcher import ProcessWatcher logger = logging.getLogger(__name__) echo_events = functools.partial(echo.echo, write=lambda msg: logger.info(msg)) class Trick(PatternMatchingEventHandler): """Your tricks should subclass this class.""" def __repr__(self) -> str: return f"<{type(self).__name__}>" @classmethod def generate_yaml(cls) -> str: return f"""- {cls.__module__}.{cls.__name__}: args: - argument1 - argument2 kwargs: patterns: - "*.py" - "*.js" ignore_patterns: - "version.py" ignore_directories: false """ class LoggerTrick(Trick): """A simple trick that does only logs events.""" @echo_events def on_any_event(self, event: FileSystemEvent) -> None: pass class ShellCommandTrick(Trick): """Executes shell commands in response to matched events.""" def __init__( self, shell_command: str, *, patterns: list[str] | None = None, ignore_patterns: list[str] | None = None, ignore_directories: bool = False, wait_for_process: bool = False, drop_during_process: bool = False, ): super().__init__( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, ) self.shell_command = shell_command self.wait_for_process = wait_for_process self.drop_during_process = drop_during_process self.process: subprocess.Popen[bytes] | None = None self._process_watchers: set[ProcessWatcher] = set() def on_any_event(self, event: FileSystemEvent) -> None: if event.event_type in {EVENT_TYPE_OPENED, EVENT_TYPE_CLOSED_NO_WRITE}: # FIXME: see issue #949, and find a way to better handle that scenario return from string import Template if self.drop_during_process and self.is_process_running(): return object_type = "directory" if event.is_directory else "file" context = { "watch_src_path": event.src_path, "watch_dest_path": "", "watch_event_type": event.event_type, "watch_object": object_type, } if self.shell_command is None: if hasattr(event, "dest_path"): context["dest_path"] = event.dest_path command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"' else: command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"' else: if hasattr(event, "dest_path"): context["watch_dest_path"] = event.dest_path command = self.shell_command command = Template(command).safe_substitute(**context) self.process = subprocess.Popen(command, shell=True) if self.wait_for_process: self.process.wait() else: process_watcher = ProcessWatcher(self.process, None) self._process_watchers.add(process_watcher) process_watcher.process_termination_callback = functools.partial( self._process_watchers.discard, process_watcher, ) process_watcher.start() def is_process_running(self) -> bool: return bool(self._process_watchers or (self.process is not None and self.process.poll() is None)) class AutoRestartTrick(Trick): """Starts a long-running subprocess and restarts it on matched events. The command parameter is a list of command arguments, such as `['bin/myserver', '-c', 'etc/myconfig.ini']`. Call `start()` after creating the Trick. Call `stop()` when stopping the process. """ def __init__( self, command: list[str], *, patterns: list[str] | None = None, ignore_patterns: list[str] | None = None, ignore_directories: bool = False, stop_signal: signal.Signals | int = signal.SIGINT, kill_after: int = 10, debounce_interval_seconds: int = 0, restart_on_command_exit: bool = True, ): if kill_after < 0: error = "kill_after must be non-negative." raise ValueError(error) if debounce_interval_seconds < 0: error = "debounce_interval_seconds must be non-negative." raise ValueError(error) super().__init__( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, ) self.command = command self.stop_signal = stop_signal.value if isinstance(stop_signal, signal.Signals) else stop_signal self.kill_after = kill_after self.debounce_interval_seconds = debounce_interval_seconds self.restart_on_command_exit = restart_on_command_exit self.process: subprocess.Popen[bytes] | None = None self.process_watcher: ProcessWatcher | None = None self.event_debouncer: EventDebouncer | None = None self.restart_count = 0 self._is_process_stopping = False self._is_trick_stopping = False self._stopping_lock = threading.RLock() def start(self) -> None: if self.debounce_interval_seconds: self.event_debouncer = EventDebouncer( debounce_interval_seconds=self.debounce_interval_seconds, events_callback=lambda events: self._restart_process(), ) self.event_debouncer.start() self._start_process() def stop(self) -> None: # Ensure the body of the function is only run once. with self._stopping_lock: if self._is_trick_stopping: return self._is_trick_stopping = True process_watcher = self.process_watcher if self.event_debouncer is not None: self.event_debouncer.stop() self._stop_process() # Don't leak threads: Wait for background threads to stop. if self.event_debouncer is not None: self.event_debouncer.join() if process_watcher is not None: process_watcher.join() def _start_process(self) -> None: if self._is_trick_stopping: return # windows doesn't have setsid self.process = subprocess.Popen(self.command, preexec_fn=getattr(os, "setsid", None)) if self.restart_on_command_exit: self.process_watcher = ProcessWatcher(self.process, self._restart_process) self.process_watcher.start() def _stop_process(self) -> None: # Ensure the body of the function is not run in parallel in different threads. with self._stopping_lock: if self._is_process_stopping: return self._is_process_stopping = True try: if self.process_watcher is not None: self.process_watcher.stop() self.process_watcher = None if self.process is not None: try: kill_process(self.process.pid, self.stop_signal) except OSError: # Process is already gone pass else: kill_time = time.time() + self.kill_after while time.time() < kill_time: if self.process.poll() is not None: break time.sleep(0.25) else: # Process is already gone with contextlib.suppress(OSError): kill_process(self.process.pid, 9) self.process = None finally: self._is_process_stopping = False @echo_events def on_any_event(self, event: FileSystemEvent) -> None: if event.event_type in {EVENT_TYPE_OPENED, EVENT_TYPE_CLOSED_NO_WRITE}: # FIXME: see issue #949, and find a way to better handle that scenario return if self.event_debouncer is not None: self.event_debouncer.handle_event(event) else: self._restart_process() def _restart_process(self) -> None: if self._is_trick_stopping: return self._stop_process() self._start_process() self.restart_count += 1 if platform.is_windows(): def kill_process(pid: int, stop_signal: int) -> None: os.kill(pid, stop_signal) else: def kill_process(pid: int, stop_signal: int) -> None: os.killpg(os.getpgid(pid), stop_signal) watchdog-6.0.0/src/watchdog/utils/000077500000000000000000000000001471115752600170555ustar00rootroot00000000000000watchdog-6.0.0/src/watchdog/utils/__init__.py000066400000000000000000000066711471115752600212000ustar00rootroot00000000000000""":module: watchdog.utils :synopsis: Utility classes and functions. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ------- .. autoclass:: BaseThread :members: :show-inheritance: :inherited-members: """ from __future__ import annotations import sys import threading from typing import TYPE_CHECKING if TYPE_CHECKING: from types import ModuleType from watchdog.tricks import Trick class UnsupportedLibcError(Exception): pass class WatchdogShutdownError(Exception): """Semantic exception used to signal an external shutdown event.""" class BaseThread(threading.Thread): """Convenience class for creating stoppable threads.""" def __init__(self) -> None: threading.Thread.__init__(self) if hasattr(self, "daemon"): self.daemon = True else: self.setDaemon(True) self._stopped_event = threading.Event() @property def stopped_event(self) -> threading.Event: return self._stopped_event def should_keep_running(self) -> bool: """Determines whether the thread should continue running.""" return not self._stopped_event.is_set() def on_thread_stop(self) -> None: """Override this method instead of :meth:`stop()`. :meth:`stop()` calls this method. This method is called immediately after the thread is signaled to stop. """ def stop(self) -> None: """Signals the thread to stop.""" self._stopped_event.set() self.on_thread_stop() def on_thread_start(self) -> None: """Override this method instead of :meth:`start()`. :meth:`start()` calls this method. This method is called right before this thread is started and this object's run() method is invoked. """ def start(self) -> None: self.on_thread_start() threading.Thread.start(self) def load_module(module_name: str) -> ModuleType: """Imports a module given its name and returns a handle to it.""" try: __import__(module_name) except ImportError as e: error = f"No module named {module_name}" raise ImportError(error) from e return sys.modules[module_name] def load_class(dotted_path: str) -> type[Trick]: """Loads and returns a class definition provided a dotted path specification the last part of the dotted path is the class name and there is at least one module name preceding the class name. Notes ----- You will need to ensure that the module you are trying to load exists in the Python path. Examples -------- - module.name.ClassName # Provided module.name is in the Python path. - module.ClassName # Provided module is in the Python path. What won't work: - ClassName - modle.name.ClassName # Typo in module name. - module.name.ClasNam # Typo in classname. """ dotted_path_split = dotted_path.split(".") if len(dotted_path_split) <= 1: error = f"Dotted module path {dotted_path} must contain a module name and a classname" raise ValueError(error) klass_name = dotted_path_split[-1] module_name = ".".join(dotted_path_split[:-1]) module = load_module(module_name) if hasattr(module, klass_name): return getattr(module, klass_name) error = f"Module {module_name} does not have class attribute {klass_name}" raise AttributeError(error) watchdog-6.0.0/src/watchdog/utils/bricks.py000066400000000000000000000047611471115752600207140ustar00rootroot00000000000000"""Utility collections or "bricks". :module: watchdog.utils.bricks :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: lalinsky@gmail.com (Lukáš Lalinský) :author: python@rcn.com (Raymond Hettinger) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ======= .. autoclass:: OrderedSetQueue :members: :show-inheritance: :inherited-members: .. autoclass:: OrderedSet """ from __future__ import annotations import queue from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Any class SkipRepeatsQueue(queue.Queue): """Thread-safe implementation of an special queue where a put of the last-item put'd will be dropped. The implementation leverages locking already implemented in the base class redefining only the primitives. Queued items must be immutable and hashable so that they can be used as dictionary keys. You must implement **only read-only properties** and the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and :meth:`Item.__ne__()` methods for items to be hashable. An example implementation follows:: class Item: def __init__(self, a, b): self._a = a self._b = b @property def a(self): return self._a @property def b(self): return self._b def _key(self): return (self._a, self._b) def __eq__(self, item): return self._key() == item._key() def __ne__(self, item): return self._key() != item._key() def __hash__(self): return hash(self._key()) based on the OrderedSetQueue below """ def _init(self, maxsize: int) -> None: super()._init(maxsize) self._last_item = None def put(self, item: Any, block: bool = True, timeout: float | None = None) -> None: # noqa: FBT001,FBT002 """This method will be used by `eventlet`, when enabled, so we cannot use force proper keyword-only arguments nor touch the signature. Also, the `timeout` argument will be ignored in that case. """ if self._last_item is None or item != self._last_item: super().put(item, block, timeout) def _put(self, item: Any) -> None: super()._put(item) self._last_item = item def _get(self) -> Any: item = super()._get() if item is self._last_item: self._last_item = None return item watchdog-6.0.0/src/watchdog/utils/delayed_queue.py000066400000000000000000000050671471115752600222520ustar00rootroot00000000000000""":module: watchdog.utils.delayed_queue :author: thomas.amland@gmail.com (Thomas Amland) :author: contact@tiger-222.fr (Mickaël Schoentgen) """ from __future__ import annotations import threading import time from collections import deque from typing import Callable, Generic, TypeVar T = TypeVar("T") class DelayedQueue(Generic[T]): def __init__(self, delay: float) -> None: self.delay_sec = delay self._lock = threading.Lock() self._not_empty = threading.Condition(self._lock) self._queue: deque[tuple[T, float, bool]] = deque() self._closed = False def put(self, element: T, *, delay: bool = False) -> None: """Add element to queue.""" self._lock.acquire() self._queue.append((element, time.time(), delay)) self._not_empty.notify() self._lock.release() def close(self) -> None: """Close queue, indicating no more items will be added.""" self._closed = True # Interrupt the blocking _not_empty.wait() call in get self._not_empty.acquire() self._not_empty.notify() self._not_empty.release() def get(self) -> T | None: """Remove and return an element from the queue, or this queue has been closed raise the Closed exception. """ while True: # wait for element to be added to queue self._not_empty.acquire() while len(self._queue) == 0 and not self._closed: self._not_empty.wait() if self._closed: self._not_empty.release() return None head, insert_time, delay = self._queue[0] self._not_empty.release() # wait for delay if required if delay: time_left = insert_time + self.delay_sec - time.time() while time_left > 0: time.sleep(time_left) time_left = insert_time + self.delay_sec - time.time() # return element if it's still in the queue with self._lock: if len(self._queue) > 0 and self._queue[0][0] is head: self._queue.popleft() return head def remove(self, predicate: Callable[[T], bool]) -> T | None: """Remove and return the first items for which predicate is True, ignoring delay. """ with self._lock: for i, (elem, *_) in enumerate(self._queue): if predicate(elem): del self._queue[i] return elem return None watchdog-6.0.0/src/watchdog/utils/dirsnapshot.py000066400000000000000000000346241471115752600217760ustar00rootroot00000000000000""":module: watchdog.utils.dirsnapshot :synopsis: Directory snapshots and comparison. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) .. ADMONITION:: Where are the moved events? They "disappeared" This implementation does not take partition boundaries into consideration. It will only work when the directory tree is entirely on the same file system. More specifically, any part of the code that depends on inode numbers can break if partition boundaries are crossed. In these cases, the snapshot diff will represent file/directory movement as created and deleted events. Classes ------- .. autoclass:: DirectorySnapshot :members: :show-inheritance: .. autoclass:: DirectorySnapshotDiff :members: :show-inheritance: .. autoclass:: EmptyDirectorySnapshot :members: :show-inheritance: """ from __future__ import annotations import contextlib import errno import os from stat import S_ISDIR from typing import TYPE_CHECKING if TYPE_CHECKING: from collections.abc import Iterator from typing import Any, Callable class DirectorySnapshotDiff: """Compares two directory snapshots and creates an object that represents the difference between the two snapshots. :param ref: The reference directory snapshot. :type ref: :class:`DirectorySnapshot` :param snapshot: The directory snapshot which will be compared with the reference snapshot. :type snapshot: :class:`DirectorySnapshot` :param ignore_device: A boolean indicating whether to ignore the device id or not. By default, a file may be uniquely identified by a combination of its first inode and its device id. The problem is that the device id may (or may not) change between system boots. This problem would cause the DirectorySnapshotDiff to think a file has been deleted and created again but it would be the exact same file. Set to True only if you are sure you will always use the same device. :type ignore_device: :class:`bool` """ def __init__( self, ref: DirectorySnapshot, snapshot: DirectorySnapshot, *, ignore_device: bool = False, ) -> None: created = snapshot.paths - ref.paths deleted = ref.paths - snapshot.paths if ignore_device: def get_inode(directory: DirectorySnapshot, full_path: bytes | str) -> int | tuple[int, int]: return directory.inode(full_path)[0] else: def get_inode(directory: DirectorySnapshot, full_path: bytes | str) -> int | tuple[int, int]: return directory.inode(full_path) # check that all unchanged paths have the same inode for path in ref.paths & snapshot.paths: if get_inode(ref, path) != get_inode(snapshot, path): created.add(path) deleted.add(path) # find moved paths moved: set[tuple[bytes | str, bytes | str]] = set() for path in set(deleted): inode = ref.inode(path) new_path = snapshot.path(inode) if new_path: # file is not deleted but moved deleted.remove(path) moved.add((path, new_path)) for path in set(created): inode = snapshot.inode(path) old_path = ref.path(inode) if old_path: created.remove(path) moved.add((old_path, path)) # find modified paths # first check paths that have not moved modified: set[bytes | str] = set() for path in ref.paths & snapshot.paths: if get_inode(ref, path) == get_inode(snapshot, path) and ( ref.mtime(path) != snapshot.mtime(path) or ref.size(path) != snapshot.size(path) ): modified.add(path) for old_path, new_path in moved: if ref.mtime(old_path) != snapshot.mtime(new_path) or ref.size(old_path) != snapshot.size(new_path): modified.add(old_path) self._dirs_created = [path for path in created if snapshot.isdir(path)] self._dirs_deleted = [path for path in deleted if ref.isdir(path)] self._dirs_modified = [path for path in modified if ref.isdir(path)] self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)] self._files_created = list(created - set(self._dirs_created)) self._files_deleted = list(deleted - set(self._dirs_deleted)) self._files_modified = list(modified - set(self._dirs_modified)) self._files_moved = list(moved - set(self._dirs_moved)) def __str__(self) -> str: return self.__repr__() def __repr__(self) -> str: fmt = ( "<{0} files(created={1}, deleted={2}, modified={3}, moved={4})," " folders(created={5}, deleted={6}, modified={7}, moved={8})>" ) return fmt.format( type(self).__name__, len(self._files_created), len(self._files_deleted), len(self._files_modified), len(self._files_moved), len(self._dirs_created), len(self._dirs_deleted), len(self._dirs_modified), len(self._dirs_moved), ) @property def files_created(self) -> list[bytes | str]: """List of files that were created.""" return self._files_created @property def files_deleted(self) -> list[bytes | str]: """List of files that were deleted.""" return self._files_deleted @property def files_modified(self) -> list[bytes | str]: """List of files that were modified.""" return self._files_modified @property def files_moved(self) -> list[tuple[bytes | str, bytes | str]]: """List of files that were moved. Each event is a two-tuple the first item of which is the path that has been renamed to the second item in the tuple. """ return self._files_moved @property def dirs_modified(self) -> list[bytes | str]: """List of directories that were modified.""" return self._dirs_modified @property def dirs_moved(self) -> list[tuple[bytes | str, bytes | str]]: """List of directories that were moved. Each event is a two-tuple the first item of which is the path that has been renamed to the second item in the tuple. """ return self._dirs_moved @property def dirs_deleted(self) -> list[bytes | str]: """List of directories that were deleted.""" return self._dirs_deleted @property def dirs_created(self) -> list[bytes | str]: """List of directories that were created.""" return self._dirs_created class ContextManager: """Context manager that creates two directory snapshots and a diff object that represents the difference between the two snapshots. :param path: The directory path for which a snapshot should be taken. :type path: ``str`` :param recursive: ``True`` if the entire directory tree should be included in the snapshot; ``False`` otherwise. :type recursive: ``bool`` :param stat: Use custom stat function that returns a stat structure for path. Currently only st_dev, st_ino, st_mode and st_mtime are needed. A function taking a ``path`` as argument which will be called for every entry in the directory tree. :param listdir: Use custom listdir function. For details see ``os.scandir``. :param ignore_device: A boolean indicating whether to ignore the device id or not. By default, a file may be uniquely identified by a combination of its first inode and its device id. The problem is that the device id may (or may not) change between system boots. This problem would cause the DirectorySnapshotDiff to think a file has been deleted and created again but it would be the exact same file. Set to True only if you are sure you will always use the same device. :type ignore_device: :class:`bool` """ def __init__( self, path: str, *, recursive: bool = True, stat: Callable[[str], os.stat_result] = os.stat, listdir: Callable[[str | None], Iterator[os.DirEntry]] = os.scandir, ignore_device: bool = False, ) -> None: self.path = path self.recursive = recursive self.stat = stat self.listdir = listdir self.ignore_device = ignore_device def __enter__(self) -> None: self.pre_snapshot = self.get_snapshot() def __exit__(self, *args: object) -> None: self.post_snapshot = self.get_snapshot() self.diff = DirectorySnapshotDiff( self.pre_snapshot, self.post_snapshot, ignore_device=self.ignore_device, ) def get_snapshot(self) -> DirectorySnapshot: return DirectorySnapshot( path=self.path, recursive=self.recursive, stat=self.stat, listdir=self.listdir, ) class DirectorySnapshot: """A snapshot of stat information of files in a directory. :param path: The directory path for which a snapshot should be taken. :type path: ``str`` :param recursive: ``True`` if the entire directory tree should be included in the snapshot; ``False`` otherwise. :type recursive: ``bool`` :param stat: Use custom stat function that returns a stat structure for path. Currently only st_dev, st_ino, st_mode and st_mtime are needed. A function taking a ``path`` as argument which will be called for every entry in the directory tree. :param listdir: Use custom listdir function. For details see ``os.scandir``. """ def __init__( self, path: str, *, recursive: bool = True, stat: Callable[[str], os.stat_result] = os.stat, listdir: Callable[[str | None], Iterator[os.DirEntry]] = os.scandir, ) -> None: self.recursive = recursive self.stat = stat self.listdir = listdir self._stat_info: dict[bytes | str, os.stat_result] = {} self._inode_to_path: dict[tuple[int, int], bytes | str] = {} st = self.stat(path) self._stat_info[path] = st self._inode_to_path[(st.st_ino, st.st_dev)] = path for p, st in self.walk(path): i = (st.st_ino, st.st_dev) self._inode_to_path[i] = p self._stat_info[p] = st def walk(self, root: str) -> Iterator[tuple[str, os.stat_result]]: try: paths = [os.path.join(root, entry.name) for entry in self.listdir(root)] except OSError as e: # Directory may have been deleted between finding it in the directory # list of its parent and trying to delete its contents. If this # happens we treat it as empty. Likewise if the directory was replaced # with a file of the same name (less likely, but possible). if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): return else: raise entries = [] for p in paths: with contextlib.suppress(OSError): entry = (p, self.stat(p)) entries.append(entry) yield entry if self.recursive: for path, st in entries: with contextlib.suppress(PermissionError): if S_ISDIR(st.st_mode): yield from self.walk(path) @property def paths(self) -> set[bytes | str]: """Set of file/directory paths in the snapshot.""" return set(self._stat_info.keys()) def path(self, uid: tuple[int, int]) -> bytes | str | None: """Returns path for id. None if id is unknown to this snapshot.""" return self._inode_to_path.get(uid) def inode(self, path: bytes | str) -> tuple[int, int]: """Returns an id for path.""" st = self._stat_info[path] return (st.st_ino, st.st_dev) def isdir(self, path: bytes | str) -> bool: return S_ISDIR(self._stat_info[path].st_mode) def mtime(self, path: bytes | str) -> float: return self._stat_info[path].st_mtime def size(self, path: bytes | str) -> int: return self._stat_info[path].st_size def stat_info(self, path: bytes | str) -> os.stat_result: """Returns a stat information object for the specified path from the snapshot. Attached information is subject to change. Do not use unless you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`, :func:`isdir` instead. :param path: The path for which stat information should be obtained from a snapshot. """ return self._stat_info[path] def __sub__(self, previous_dirsnap: DirectorySnapshot) -> DirectorySnapshotDiff: """Allow subtracting a DirectorySnapshot object instance from another. :returns: A :class:`DirectorySnapshotDiff` object. """ return DirectorySnapshotDiff(previous_dirsnap, self) def __str__(self) -> str: return self.__repr__() def __repr__(self) -> str: return str(self._stat_info) class EmptyDirectorySnapshot(DirectorySnapshot): """Class to implement an empty snapshot. This is used together with DirectorySnapshot and DirectorySnapshotDiff in order to get all the files/folders in the directory as created. """ def __init__(self) -> None: pass @staticmethod def path(_: Any) -> None: """Mock up method to return the path of the received inode. As the snapshot is intended to be empty, it always returns None. :returns: None. """ return @property def paths(self) -> set: """Mock up method to return a set of file/directory paths in the snapshot. As the snapshot is intended to be empty, it always returns an empty set. :returns: An empty set. """ return set() watchdog-6.0.0/src/watchdog/utils/echo.py000066400000000000000000000042421471115752600203470ustar00rootroot00000000000000# echo.py: Tracing function calls using Python decorators. # # Written by Thomas Guest # Please see http://wordaligned.org/articles/echo # # Place into the public domain. """Echo calls made to functions in a module. "Echoing" a function call means printing out the name of the function and the values of its arguments before making the call (which is more commonly referred to as "tracing", but Python already has a trace module). Alternatively, echo.echo can be used to decorate functions. Calls to the decorated function will be echoed. Example: ------- @echo.echo def my_function(args): pass """ from __future__ import annotations import functools import sys from typing import TYPE_CHECKING if TYPE_CHECKING: from typing import Any, Callable def format_arg_value(arg_val: tuple[str, tuple[Any, ...]]) -> str: """Return a string representing a (name, value) pair.""" arg, val = arg_val return f"{arg}={val!r}" def echo(fn: Callable, write: Callable[[str], int | None] = sys.stdout.write) -> Callable: """Echo calls to a function. Returns a decorated version of the input function which "echoes" calls made to it by writing out the function's name and the arguments it was called with. """ # Unpack function's arg count, arg names, arg defaults code = fn.__code__ argcount = code.co_argcount argnames = code.co_varnames[:argcount] fn_defaults: tuple[Any] = fn.__defaults__ or () argdefs = dict(list(zip(argnames[-len(fn_defaults) :], fn_defaults))) @functools.wraps(fn) def wrapped(*v: Any, **k: Any) -> Callable: # Collect function arguments by chaining together positional, # defaulted, extra positional and keyword arguments. positional = list(map(format_arg_value, list(zip(argnames, v)))) defaulted = [format_arg_value((a, argdefs[a])) for a in argnames[len(v) :] if a not in k] nameless = list(map(repr, v[argcount:])) keyword = list(map(format_arg_value, list(k.items()))) args = positional + defaulted + nameless + keyword write(f"{fn.__name__}({', '.join(args)})\n") return fn(*v, **k) return wrapped watchdog-6.0.0/src/watchdog/utils/event_debouncer.py000066400000000000000000000040371471115752600226020ustar00rootroot00000000000000from __future__ import annotations import logging import threading from typing import TYPE_CHECKING from watchdog.utils import BaseThread if TYPE_CHECKING: from typing import Callable from watchdog.events import FileSystemEvent logger = logging.getLogger(__name__) class EventDebouncer(BaseThread): """Background thread for debouncing event handling. When an event is received, wait until the configured debounce interval passes before calling the callback. If additional events are received before the interval passes, reset the timer and keep waiting. When the debouncing interval passes, the callback will be called with a list of events in the order in which they were received. """ def __init__( self, debounce_interval_seconds: int, events_callback: Callable[[list[FileSystemEvent]], None], ) -> None: super().__init__() self.debounce_interval_seconds = debounce_interval_seconds self.events_callback = events_callback self._events: list[FileSystemEvent] = [] self._cond = threading.Condition() def handle_event(self, event: FileSystemEvent) -> None: with self._cond: self._events.append(event) self._cond.notify() def stop(self) -> None: with self._cond: super().stop() self._cond.notify() def run(self) -> None: with self._cond: while True: # Wait for first event (or shutdown). self._cond.wait() if self.debounce_interval_seconds: # Wait for additional events (or shutdown) until the debounce interval passes. while self.should_keep_running(): if not self._cond.wait(timeout=self.debounce_interval_seconds): break if not self.should_keep_running(): break events = self._events self._events = [] self.events_callback(events) watchdog-6.0.0/src/watchdog/utils/patterns.py000066400000000000000000000071301471115752600212700ustar00rootroot00000000000000""":module: watchdog.utils.patterns :synopsis: Common wildcard searching/filtering functionality for files. :author: boris.staletic@gmail.com (Boris Staletic) :author: yesudeep@gmail.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) """ from __future__ import annotations # Non-pure path objects are only allowed on their respective OS's. # Thus, these utilities require "pure" path objects that don't access the filesystem. # Since pathlib doesn't have a `case_sensitive` parameter, we have to approximate it # by converting input paths to `PureWindowsPath` and `PurePosixPath` where: # - `PureWindowsPath` is always case-insensitive. # - `PurePosixPath` is always case-sensitive. # Reference: https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.match from pathlib import PurePosixPath, PureWindowsPath from typing import TYPE_CHECKING if TYPE_CHECKING: from collections.abc import Iterator def _match_path( raw_path: str, included_patterns: set[str], excluded_patterns: set[str], *, case_sensitive: bool, ) -> bool: """Internal function same as :func:`match_path` but does not check arguments.""" path: PurePosixPath | PureWindowsPath if case_sensitive: path = PurePosixPath(raw_path) else: included_patterns = {pattern.lower() for pattern in included_patterns} excluded_patterns = {pattern.lower() for pattern in excluded_patterns} path = PureWindowsPath(raw_path) common_patterns = included_patterns & excluded_patterns if common_patterns: error = f"conflicting patterns `{common_patterns}` included and excluded" raise ValueError(error) return any(path.match(p) for p in included_patterns) and not any(path.match(p) for p in excluded_patterns) def filter_paths( paths: list[str], *, included_patterns: list[str] | None = None, excluded_patterns: list[str] | None = None, case_sensitive: bool = True, ) -> Iterator[str]: """Filters from a set of paths based on acceptable patterns and ignorable patterns. :param paths: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: A list of pathnames that matched the allowable patterns and passed through the ignored patterns. """ included = set(["*"] if included_patterns is None else included_patterns) excluded = set([] if excluded_patterns is None else excluded_patterns) for path in paths: if _match_path(path, included, excluded, case_sensitive=case_sensitive): yield path def match_any_paths( paths: list[str], *, included_patterns: list[str] | None = None, excluded_patterns: list[str] | None = None, case_sensitive: bool = True, ) -> bool: """Matches from a set of paths based on acceptable patterns and ignorable patterns. See ``filter_paths()`` for signature details. """ return any( filter_paths( paths, included_patterns=included_patterns, excluded_patterns=excluded_patterns, case_sensitive=case_sensitive, ), ) watchdog-6.0.0/src/watchdog/utils/platform.py000066400000000000000000000015651471115752600212620ustar00rootroot00000000000000from __future__ import annotations import sys PLATFORM_WINDOWS = "windows" PLATFORM_LINUX = "linux" PLATFORM_BSD = "bsd" PLATFORM_DARWIN = "darwin" PLATFORM_UNKNOWN = "unknown" def get_platform_name() -> str: if sys.platform.startswith("win"): return PLATFORM_WINDOWS if sys.platform.startswith("darwin"): return PLATFORM_DARWIN if sys.platform.startswith("linux"): return PLATFORM_LINUX if sys.platform.startswith(("dragonfly", "freebsd", "netbsd", "openbsd", "bsd")): return PLATFORM_BSD return PLATFORM_UNKNOWN __platform__ = get_platform_name() def is_linux() -> bool: return __platform__ == PLATFORM_LINUX def is_bsd() -> bool: return __platform__ == PLATFORM_BSD def is_darwin() -> bool: return __platform__ == PLATFORM_DARWIN def is_windows() -> bool: return __platform__ == PLATFORM_WINDOWS watchdog-6.0.0/src/watchdog/utils/process_watcher.py000066400000000000000000000016351471115752600226270ustar00rootroot00000000000000from __future__ import annotations import logging from typing import TYPE_CHECKING from watchdog.utils import BaseThread if TYPE_CHECKING: import subprocess from typing import Callable logger = logging.getLogger(__name__) class ProcessWatcher(BaseThread): def __init__(self, popen_obj: subprocess.Popen, process_termination_callback: Callable[[], None] | None) -> None: super().__init__() self.popen_obj = popen_obj self.process_termination_callback = process_termination_callback def run(self) -> None: while self.popen_obj.poll() is None: if self.stopped_event.wait(timeout=0.1): return try: if not self.stopped_event.is_set() and self.process_termination_callback: self.process_termination_callback() except Exception: logger.exception("Error calling process termination callback") watchdog-6.0.0/src/watchdog/version.py000066400000000000000000000005351471115752600177570ustar00rootroot00000000000000from __future__ import annotations # When updating this version number, please update the # ``docs/source/global.rst.inc`` file as well. VERSION_MAJOR = 6 VERSION_MINOR = 0 VERSION_BUILD = 0 VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD) VERSION_STRING = f"{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_BUILD}" __version__ = VERSION_INFO watchdog-6.0.0/src/watchdog/watchmedo.py000066400000000000000000000615541471115752600202550ustar00rootroot00000000000000""":module: watchdog.watchmedo :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) :synopsis: ``watchmedo`` shell script utility. """ from __future__ import annotations import errno import logging import os import os.path import sys import time from argparse import ArgumentParser, RawDescriptionHelpFormatter from io import StringIO from textwrap import dedent from typing import TYPE_CHECKING, Any from watchdog.utils import WatchdogShutdownError, load_class, platform from watchdog.version import VERSION_STRING if TYPE_CHECKING: from argparse import Namespace, _SubParsersAction from typing import Callable from watchdog.events import FileSystemEventHandler from watchdog.observers import ObserverType from watchdog.observers.api import BaseObserver logging.basicConfig(level=logging.INFO) CONFIG_KEY_TRICKS = "tricks" CONFIG_KEY_PYTHON_PATH = "python-path" class HelpFormatter(RawDescriptionHelpFormatter): """A nicer help formatter. Help for arguments can be indented and contain new lines. It will be de-dented and arguments in the help will be separated by a blank line for better readability. Source: https://github.com/httpie/httpie/blob/2423f89/httpie/cli/argparser.py#L31 """ def __init__(self, *args: Any, max_help_position: int = 6, **kwargs: Any) -> None: # A smaller indent for args help. kwargs["max_help_position"] = max_help_position super().__init__(*args, **kwargs) def __repr__(self) -> str: return f"<{type(self).__name__}>" def _split_lines(self, text: str, width: int) -> list[str]: text = dedent(text).strip() + "\n\n" return text.splitlines() epilog = """\ Copyright 2018-2024 Mickaël Schoentgen & contributors Copyright 2014-2018 Thomas Amland & contributors Copyright 2012-2014 Google, Inc. Copyright 2011-2012 Yesudeep Mangalapilly Licensed under the terms of the Apache license, version 2.0. Please see LICENSE in the source code for more information.""" cli = ArgumentParser(epilog=epilog, formatter_class=HelpFormatter) cli.add_argument("--version", action="version", version=VERSION_STRING) subparsers = cli.add_subparsers(dest="top_command") command_parsers = {} Argument = tuple[list[str], Any] def argument(*name_or_flags: str, **kwargs: Any) -> Argument: """Convenience function to properly format arguments to pass to the command decorator. """ return list(name_or_flags), kwargs def command( args: list[Argument], *, parent: _SubParsersAction[ArgumentParser] = subparsers, cmd_aliases: list[str] | None = None, ) -> Callable: """Decorator to define a new command in a sanity-preserving way. The function will be stored in the ``func`` variable when the parser parses arguments so that it can be called directly like so:: >>> args = cli.parse_args() >>> args.func(args) """ def decorator(func: Callable) -> Callable: name = func.__name__.replace("_", "-") desc = dedent(func.__doc__ or "") parser = parent.add_parser(name, aliases=cmd_aliases or [], description=desc, formatter_class=HelpFormatter) command_parsers[name] = parser verbosity_group = parser.add_mutually_exclusive_group() verbosity_group.add_argument("-q", "--quiet", dest="verbosity", action="append_const", const=-1) verbosity_group.add_argument("-v", "--verbose", dest="verbosity", action="append_const", const=1) for name_or_flags, kwargs in args: parser.add_argument(*name_or_flags, **kwargs) parser.set_defaults(func=func) return func return decorator def path_split(pathname_spec: str, *, separator: str = os.pathsep) -> list[str]: """Splits a pathname specification separated by an OS-dependent separator. :param pathname_spec: The pathname specification. :param separator: (OS Dependent) `:` on Unix and `;` on Windows or user-specified. """ return pathname_spec.split(separator) def add_to_sys_path(pathnames: list[str], *, index: int = 0) -> None: """Adds specified paths at specified index into the sys.path list. :param paths: A list of paths to add to the sys.path :param index: (Default 0) The index in the sys.path list where the paths will be added. """ for pathname in pathnames[::-1]: sys.path.insert(index, pathname) def load_config(tricks_file_pathname: str) -> dict: """Loads the YAML configuration from the specified file. :param tricks_file_path: The path to the tricks configuration file. :returns: A dictionary of configuration information. """ import yaml with open(tricks_file_pathname, "rb") as f: return yaml.safe_load(f.read()) def parse_patterns( patterns_spec: str, ignore_patterns_spec: str, *, separator: str = ";" ) -> tuple[list[str], list[str]]: """Parses pattern argument specs and returns a two-tuple of (patterns, ignore_patterns). """ patterns = patterns_spec.split(separator) ignore_patterns = ignore_patterns_spec.split(separator) if ignore_patterns == [""]: ignore_patterns = [] return patterns, ignore_patterns def observe_with( observer: BaseObserver, event_handler: FileSystemEventHandler, pathnames: list[str], *, recursive: bool, ) -> None: """Single observer thread with a scheduled path and event handler. :param observer: The observer thread. :param event_handler: Event handler which will be called in response to file system events. :param pathnames: A list of pathnames to monitor. :param recursive: ``True`` if recursive; ``False`` otherwise. """ for pathname in set(pathnames): observer.schedule(event_handler, pathname, recursive=recursive) observer.start() try: while True: time.sleep(1) except WatchdogShutdownError: observer.stop() observer.join() def schedule_tricks(observer: BaseObserver, tricks: list[dict], pathname: str, *, recursive: bool) -> None: """Schedules tricks with the specified observer and for the given watch path. :param observer: The observer thread into which to schedule the trick and watch. :param tricks: A list of tricks. :param pathname: A path name which should be watched. :param recursive: ``True`` if recursive; ``False`` otherwise. """ for trick in tricks: for name, value in trick.items(): trick_cls = load_class(name) handler = trick_cls(**value) trick_pathname = getattr(handler, "source_directory", None) or pathname observer.schedule(handler, trick_pathname, recursive=recursive) @command( [ argument("files", nargs="*", help="perform tricks from given file"), argument( "--python-path", default=".", help=f"Paths separated by {os.pathsep!r} to add to the Python path.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout (in seconds).", ), argument( "--recursive", action="store_true", default=True, help="Recursively monitor paths (defaults to True).", ), argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."), argument( "--debug-force-kqueue", action="store_true", help="[debug] Forces BSD kqueue(2).", ), argument( "--debug-force-winapi", action="store_true", help="[debug] Forces Windows API.", ), argument( "--debug-force-fsevents", action="store_true", help="[debug] Forces macOS FSEvents.", ), argument( "--debug-force-inotify", action="store_true", help="[debug] Forces Linux inotify(7).", ), ], cmd_aliases=["tricks"], ) def tricks_from(args: Namespace) -> None: """Command to execute tricks from a tricks configuration file.""" observer_cls: ObserverType if args.debug_force_polling: from watchdog.observers.polling import PollingObserver observer_cls = PollingObserver elif args.debug_force_kqueue: from watchdog.observers.kqueue import KqueueObserver observer_cls = KqueueObserver elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and platform.is_windows()): from watchdog.observers.read_directory_changes import WindowsApiObserver observer_cls = WindowsApiObserver elif args.debug_force_inotify: from watchdog.observers.inotify import InotifyObserver observer_cls = InotifyObserver elif args.debug_force_fsevents: from watchdog.observers.fsevents import FSEventsObserver observer_cls = FSEventsObserver else: # Automatically picks the most appropriate observer for the platform # on which it is running. from watchdog.observers import Observer observer_cls = Observer add_to_sys_path(path_split(args.python_path)) observers = [] for tricks_file in args.files: observer = observer_cls(timeout=args.timeout) if not os.path.exists(tricks_file): raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), tricks_file) config = load_config(tricks_file) try: tricks = config[CONFIG_KEY_TRICKS] except KeyError as e: error = f"No {CONFIG_KEY_TRICKS!r} key specified in {tricks_file!r}." raise KeyError(error) from e if CONFIG_KEY_PYTHON_PATH in config: add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH]) dir_path = os.path.dirname(tricks_file) or os.path.relpath(os.getcwd()) schedule_tricks(observer, tricks, dir_path, recursive=args.recursive) observer.start() observers.append(observer) try: while True: time.sleep(1) except WatchdogShutdownError: for o in observers: o.unschedule_all() o.stop() for o in observers: o.join() @command( [ argument( "trick_paths", nargs="*", help="Dotted paths for all the tricks you want to generate.", ), argument( "--python-path", default=".", help=f"Paths separated by {os.pathsep!r} to add to the Python path.", ), argument( "--append-to-file", default=None, help=""" Appends the generated tricks YAML to a file. If not specified, prints to standard output.""", ), argument( "-a", "--append-only", dest="append_only", action="store_true", help=""" If --append-to-file is not specified, produces output for appending instead of a complete tricks YAML file.""", ), ], cmd_aliases=["generate-tricks-yaml"], ) def tricks_generate_yaml(args: Namespace) -> None: """Command to generate Yaml configuration for tricks named on the command line.""" import yaml python_paths = path_split(args.python_path) add_to_sys_path(python_paths) output = StringIO() for trick_path in args.trick_paths: trick_cls = load_class(trick_path) output.write(trick_cls.generate_yaml()) content = output.getvalue() output.close() header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths}) header += f"{CONFIG_KEY_TRICKS}:\n" if args.append_to_file is None: # Output to standard output. if not args.append_only: content = header + content sys.stdout.write(content) else: if not os.path.exists(args.append_to_file): content = header + content with open(args.append_to_file, "a", encoding="utf-8") as file: file.write(content) @command( [ argument( "directories", nargs="*", default=".", help="Directories to watch. (default: '.').", ), argument( "-p", "--pattern", "--patterns", dest="patterns", default="*", help="Matches event paths with these patterns (separated by ;).", ), argument( "-i", "--ignore-pattern", "--ignore-patterns", dest="ignore_patterns", default="", help="Ignores event paths with these patterns (separated by ;).", ), argument( "-D", "--ignore-directories", dest="ignore_directories", action="store_true", help="Ignores events for directories.", ), argument( "-R", "--recursive", dest="recursive", action="store_true", help="Monitors the directories recursively.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout.", ), argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."), argument( "--debug-force-kqueue", action="store_true", help="[debug] Forces BSD kqueue(2).", ), argument( "--debug-force-winapi", action="store_true", help="[debug] Forces Windows API.", ), argument( "--debug-force-fsevents", action="store_true", help="[debug] Forces macOS FSEvents.", ), argument( "--debug-force-inotify", action="store_true", help="[debug] Forces Linux inotify(7).", ), ], ) def log(args: Namespace) -> None: """Command to log file system events to the console.""" from watchdog.tricks import LoggerTrick patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) handler = LoggerTrick( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=args.ignore_directories, ) observer_cls: ObserverType if args.debug_force_polling: from watchdog.observers.polling import PollingObserver observer_cls = PollingObserver elif args.debug_force_kqueue: from watchdog.observers.kqueue import KqueueObserver observer_cls = KqueueObserver elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and platform.is_windows()): from watchdog.observers.read_directory_changes import WindowsApiObserver observer_cls = WindowsApiObserver elif args.debug_force_inotify: from watchdog.observers.inotify import InotifyObserver observer_cls = InotifyObserver elif args.debug_force_fsevents: from watchdog.observers.fsevents import FSEventsObserver observer_cls = FSEventsObserver else: # Automatically picks the most appropriate observer for the platform # on which it is running. from watchdog.observers import Observer observer_cls = Observer observer = observer_cls(timeout=args.timeout) observe_with(observer, handler, args.directories, recursive=args.recursive) @command( [ argument("directories", nargs="*", default=".", help="Directories to watch."), argument( "-c", "--command", dest="command", default=None, help=""" Shell command executed in response to matching events. These interpolation variables are available to your command string: ${watch_src_path} - event source path ${watch_dest_path} - event destination path (for moved events) ${watch_event_type} - event type ${watch_object} - 'file' or 'directory' Note: Please ensure you do not use double quotes (") to quote your command string. That will force your shell to interpolate before the command is processed by this command. Example: --command='echo "${watch_src_path}"' """, ), argument( "-p", "--pattern", "--patterns", dest="patterns", default="*", help="Matches event paths with these patterns (separated by ;).", ), argument( "-i", "--ignore-pattern", "--ignore-patterns", dest="ignore_patterns", default="", help="Ignores event paths with these patterns (separated by ;).", ), argument( "-D", "--ignore-directories", dest="ignore_directories", default=False, action="store_true", help="Ignores events for directories.", ), argument( "-R", "--recursive", dest="recursive", action="store_true", help="Monitors the directories recursively.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout.", ), argument( "-w", "--wait", dest="wait_for_process", action="store_true", help="Wait for process to finish to avoid multiple simultaneous instances.", ), argument( "-W", "--drop", dest="drop_during_process", action="store_true", help="Ignore events that occur while command is still being" " executed to avoid multiple simultaneous instances.", ), argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."), ], ) def shell_command(args: Namespace) -> None: """Command to execute shell commands in response to file system events.""" from watchdog.tricks import ShellCommandTrick if not args.command: args.command = None observer_cls: ObserverType if args.debug_force_polling: from watchdog.observers.polling import PollingObserver observer_cls = PollingObserver else: from watchdog.observers import Observer observer_cls = Observer patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) handler = ShellCommandTrick( args.command, patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=args.ignore_directories, wait_for_process=args.wait_for_process, drop_during_process=args.drop_during_process, ) observer = observer_cls(timeout=args.timeout) observe_with(observer, handler, args.directories, recursive=args.recursive) @command( [ argument("command", help="Long-running command to run in a subprocess."), argument( "command_args", metavar="arg", nargs="*", help=""" Command arguments. Note: Use -- before the command arguments, otherwise watchmedo will try to interpret them. """, ), argument( "-d", "--directory", dest="directories", metavar="DIRECTORY", action="append", help="Directory to watch. Use another -d or --directory option for each directory.", ), argument( "-p", "--pattern", "--patterns", dest="patterns", default="*", help="Matches event paths with these patterns (separated by ;).", ), argument( "-i", "--ignore-pattern", "--ignore-patterns", dest="ignore_patterns", default="", help="Ignores event paths with these patterns (separated by ;).", ), argument( "-D", "--ignore-directories", dest="ignore_directories", default=False, action="store_true", help="Ignores events for directories.", ), argument( "-R", "--recursive", dest="recursive", action="store_true", help="Monitors the directories recursively.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout.", ), argument( "--signal", dest="signal", default="SIGINT", help="Stop the subprocess with this signal (default SIGINT).", ), argument("--debug-force-polling", action="store_true", help="[debug] Forces polling."), argument( "--kill-after", dest="kill_after", default=10.0, type=float, help="When stopping, kill the subprocess after the specified timeout in seconds (default 10.0).", ), argument( "--debounce-interval", dest="debounce_interval", default=0.0, type=float, help="After a file change, Wait until the specified interval (in " "seconds) passes with no file changes, and only then restart.", ), argument( "--no-restart-on-command-exit", dest="restart_on_command_exit", default=True, action="store_false", help="Don't auto-restart the command after it exits.", ), ], ) def auto_restart(args: Namespace) -> None: """Command to start a long-running subprocess and restart it on matched events.""" observer_cls: ObserverType if args.debug_force_polling: from watchdog.observers.polling import PollingObserver observer_cls = PollingObserver else: from watchdog.observers import Observer observer_cls = Observer import signal from watchdog.tricks import AutoRestartTrick if not args.directories: args.directories = ["."] # Allow either signal name or number. stop_signal = getattr(signal, args.signal) if args.signal.startswith("SIG") else int(args.signal) # Handle termination signals by raising a semantic exception which will # allow us to gracefully unwind and stop the observer termination_signals = {signal.SIGTERM, signal.SIGINT} if hasattr(signal, "SIGHUP"): termination_signals.add(signal.SIGHUP) def handler_termination_signal(_signum: signal._SIGNUM, _frame: object) -> None: # Neuter all signals so that we don't attempt a double shutdown for signum in termination_signals: signal.signal(signum, signal.SIG_IGN) raise WatchdogShutdownError for signum in termination_signals: signal.signal(signum, handler_termination_signal) patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) command = [args.command] command.extend(args.command_args) handler = AutoRestartTrick( command, patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=args.ignore_directories, stop_signal=stop_signal, kill_after=args.kill_after, debounce_interval_seconds=args.debounce_interval, restart_on_command_exit=args.restart_on_command_exit, ) handler.start() observer = observer_cls(timeout=args.timeout) try: observe_with(observer, handler, args.directories, recursive=args.recursive) except WatchdogShutdownError: pass finally: handler.stop() class LogLevelError(Exception): pass def _get_log_level_from_args(args: Namespace) -> str: verbosity = sum(args.verbosity or []) if verbosity < -1: error = "-q/--quiet may be specified only once." raise LogLevelError(error) if verbosity > 2: error = "-v/--verbose may be specified up to 2 times." raise LogLevelError(error) return ["ERROR", "WARNING", "INFO", "DEBUG"][1 + verbosity] def main() -> int: """Entry-point function.""" args = cli.parse_args() if args.top_command is None: cli.print_help() return 1 try: log_level = _get_log_level_from_args(args) except LogLevelError as exc: print(f"Error: {exc.args[0]}", file=sys.stderr) # noqa:T201 command_parsers[args.top_command].print_help() return 1 logging.getLogger("watchdog").setLevel(log_level) try: args.func(args) except KeyboardInterrupt: return 130 return 0 if __name__ == "__main__": sys.exit(main()) watchdog-6.0.0/src/watchdog_fsevents.c000066400000000000000000001002521471115752600177760ustar00rootroot00000000000000/** * watchdog_fsevents.c: Python-C bridge to the OS X FSEvents API. * * Copyright 2018-2024 Mickaël Schoentgen & contributors * Copyright 2012-2018 Google, Inc. * Copyright 2011-2012 Yesudeep Mangalapilly * Copyright 2010-2011 Malthe Borch */ #include #include #include #include #include #include /* Compatibility; since fsevents won't set these on earlier macOS versions the properties will always be False */ #if MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_13 #error Watchdog module requires at least macOS 10.13 #endif /* Convenience macros to make code more readable. */ #define G_NOT(o) !o #define G_IS_NULL(o) o == NULL #define G_IS_NOT_NULL(o) o != NULL #define G_RETURN_NULL_IF_NULL(o) do { if (NULL == o) { return NULL; } } while (0) #define G_RETURN_NULL_IF(condition) do { if (condition) { return NULL; } } while (0) #define G_RETURN_NULL_IF_NOT(condition) do { if (!condition) { return NULL; } } while (0) #define G_RETURN_IF(condition) do { if (condition) { return; } } while (0) #define G_RETURN_IF_NOT(condition) do { if (!condition) { return; } } while (0) #define UNUSED(x) (void)x /* Error message definitions. */ #define ERROR_CANNOT_CALL_CALLBACK "Unable to call Python callback." /* Other information. */ #define MODULE_NAME "_watchdog_fsevents" /** * Event stream callback contextual information passed to * our ``watchdog_FSEventStreamCallback`` function by the * FSEvents API whenever an event occurs. */ typedef struct { /** * A pointer to the Python callback which will * will in turn be called by our event handler * with event information. The Python callback * function must accept 2 arguments, both of which * are Python lists:: * * def python_callback(event_paths, event_inodes, event_flags, event_ids): * pass */ PyObject *python_callback; /** * A pointer to the associated ``FSEventStream`` * instance. */ FSEventStreamRef stream_ref; /** * A pointer to the associated ``CFRunLoop`` * instance. */ CFRunLoopRef run_loop_ref; /** * A pointer to the state of the Python thread. */ PyThreadState *thread_state; } StreamCallbackInfo; /** * NativeEvent type so that we don't need to expose the FSEvents constants to Python land */ typedef struct { PyObject_HEAD const char *path; PyObject *inode; FSEventStreamEventFlags flags; FSEventStreamEventId id; } NativeEventObject; PyObject* NativeEventRepr(PyObject* instance) { NativeEventObject *self = (NativeEventObject*)instance; return PyUnicode_FromFormat( "NativeEvent(path=\"%s\", inode=%S, flags=%x, id=%llu)", self->path, self->inode, self->flags, self->id ); } PyObject* NativeEventTypeFlags(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; return PyLong_FromLong(self->flags); } PyObject* NativeEventTypePath(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; return PyUnicode_FromString(self->path); } PyObject* NativeEventTypeInode(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; Py_INCREF(self->inode); return self->inode; } PyObject* NativeEventTypeID(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; return PyLong_FromLong(self->id); } PyObject* NativeEventTypeIsCoalesced(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; // if any of these bitmasks match then we have a coalesced event and need to do sys calls to figure out what happened FSEventStreamEventFlags coalesced_masks[] = { kFSEventStreamEventFlagItemCreated | kFSEventStreamEventFlagItemRemoved, kFSEventStreamEventFlagItemCreated | kFSEventStreamEventFlagItemRenamed, kFSEventStreamEventFlagItemRemoved | kFSEventStreamEventFlagItemRenamed, }; for (size_t i = 0; i < sizeof(coalesced_masks) / sizeof(FSEventStreamEventFlags); ++i) { if ((self->flags & coalesced_masks[i]) == coalesced_masks[i]) { Py_RETURN_TRUE; } } Py_RETURN_FALSE; } #define FLAG_PROPERTY(suffix, flag) \ PyObject* NativeEventType##suffix(PyObject* instance, void* closure) \ { \ UNUSED(closure); \ NativeEventObject *self = (NativeEventObject*)instance; \ if (self->flags & flag) { \ Py_RETURN_TRUE; \ } \ Py_RETURN_FALSE; \ } FLAG_PROPERTY(IsMustScanSubDirs, kFSEventStreamEventFlagMustScanSubDirs) FLAG_PROPERTY(IsUserDropped, kFSEventStreamEventFlagUserDropped) FLAG_PROPERTY(IsKernelDropped, kFSEventStreamEventFlagKernelDropped) FLAG_PROPERTY(IsEventIdsWrapped, kFSEventStreamEventFlagEventIdsWrapped) FLAG_PROPERTY(IsHistoryDone, kFSEventStreamEventFlagHistoryDone) FLAG_PROPERTY(IsRootChanged, kFSEventStreamEventFlagRootChanged) FLAG_PROPERTY(IsMount, kFSEventStreamEventFlagMount) FLAG_PROPERTY(IsUnmount, kFSEventStreamEventFlagUnmount) FLAG_PROPERTY(IsCreated, kFSEventStreamEventFlagItemCreated) FLAG_PROPERTY(IsRemoved, kFSEventStreamEventFlagItemRemoved) FLAG_PROPERTY(IsInodeMetaMod, kFSEventStreamEventFlagItemInodeMetaMod) FLAG_PROPERTY(IsRenamed, kFSEventStreamEventFlagItemRenamed) FLAG_PROPERTY(IsModified, kFSEventStreamEventFlagItemModified) FLAG_PROPERTY(IsItemFinderInfoMod, kFSEventStreamEventFlagItemFinderInfoMod) FLAG_PROPERTY(IsChangeOwner, kFSEventStreamEventFlagItemChangeOwner) FLAG_PROPERTY(IsXattrMod, kFSEventStreamEventFlagItemXattrMod) FLAG_PROPERTY(IsFile, kFSEventStreamEventFlagItemIsFile) FLAG_PROPERTY(IsDirectory, kFSEventStreamEventFlagItemIsDir) FLAG_PROPERTY(IsSymlink, kFSEventStreamEventFlagItemIsSymlink) FLAG_PROPERTY(IsOwnEvent, kFSEventStreamEventFlagOwnEvent) FLAG_PROPERTY(IsHardlink, kFSEventStreamEventFlagItemIsHardlink) FLAG_PROPERTY(IsLastHardlink, kFSEventStreamEventFlagItemIsLastHardlink) FLAG_PROPERTY(IsCloned, kFSEventStreamEventFlagItemCloned) static int NativeEventInit(NativeEventObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"path", "inode", "flags", "id", NULL}; self->inode = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|sOIL", kwlist, &self->path, &self->inode, &self->flags, &self->id)) { return -1; } Py_INCREF(self->inode); return 0; } static void NativeEventDealloc(NativeEventObject *self) { Py_XDECREF(self->inode); } static PyGetSetDef NativeEventProperties[] = { {"flags", NativeEventTypeFlags, NULL, "The raw mask of flags as returned by FSEvents", NULL}, {"path", NativeEventTypePath, NULL, "The path for which this event was generated", NULL}, {"inode", NativeEventTypeInode, NULL, "The inode for which this event was generated", NULL}, {"event_id", NativeEventTypeID, NULL, "The id of the generated event", NULL}, {"is_coalesced", NativeEventTypeIsCoalesced, NULL, "True if multiple ambiguous changes to the monitored path happened", NULL}, {"must_scan_subdirs", NativeEventTypeIsMustScanSubDirs, NULL, "True if application must rescan all subdirectories", NULL}, {"is_user_dropped", NativeEventTypeIsUserDropped, NULL, "True if a failure during event buffering occurred", NULL}, {"is_kernel_dropped", NativeEventTypeIsKernelDropped, NULL, "True if a failure during event buffering occurred", NULL}, {"is_event_ids_wrapped", NativeEventTypeIsEventIdsWrapped, NULL, "True if event_id wrapped around", NULL}, {"is_history_done", NativeEventTypeIsHistoryDone, NULL, "True if all historical events are done", NULL}, {"is_root_changed", NativeEventTypeIsRootChanged, NULL, "True if a change to one of the directories along the path to one of the directories you watch occurred", NULL}, {"is_mount", NativeEventTypeIsMount, NULL, "True if a volume is mounted underneath one of the paths being monitored", NULL}, {"is_unmount", NativeEventTypeIsUnmount, NULL, "True if a volume is unmounted underneath one of the paths being monitored", NULL}, {"is_created", NativeEventTypeIsCreated, NULL, "True if self.path was created on the filesystem", NULL}, {"is_removed", NativeEventTypeIsRemoved, NULL, "True if self.path was removed from the filesystem", NULL}, {"is_inode_meta_mod", NativeEventTypeIsInodeMetaMod, NULL, "True if meta data for self.path was modified ", NULL}, {"is_renamed", NativeEventTypeIsRenamed, NULL, "True if self.path was renamed on the filesystem", NULL}, {"is_modified", NativeEventTypeIsModified, NULL, "True if self.path was modified", NULL}, {"is_item_finder_info_modified", NativeEventTypeIsItemFinderInfoMod, NULL, "True if FinderInfo for self.path was modified", NULL}, {"is_owner_change", NativeEventTypeIsChangeOwner, NULL, "True if self.path had its ownership changed", NULL}, {"is_xattr_mod", NativeEventTypeIsXattrMod, NULL, "True if extended attributes for self.path were modified ", NULL}, {"is_file", NativeEventTypeIsFile, NULL, "True if self.path is a file", NULL}, {"is_directory", NativeEventTypeIsDirectory, NULL, "True if self.path is a directory", NULL}, {"is_symlink", NativeEventTypeIsSymlink, NULL, "True if self.path is a symbolic link", NULL}, {"is_own_event", NativeEventTypeIsOwnEvent, NULL, "True if the event originated from our own process", NULL}, {"is_hardlink", NativeEventTypeIsHardlink, NULL, "True if self.path is a hard link", NULL}, {"is_last_hardlink", NativeEventTypeIsLastHardlink, NULL, "True if self.path was the last hard link", NULL}, {"is_cloned", NativeEventTypeIsCloned, NULL, "True if self.path is a clone or was cloned", NULL}, {NULL, NULL, NULL, NULL, NULL}, }; static PyTypeObject NativeEventType = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "_watchdog_fsevents.NativeEvent", .tp_doc = "A wrapper around native FSEvents events", .tp_basicsize = sizeof(NativeEventObject), .tp_itemsize = 0, .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, .tp_new = PyType_GenericNew, .tp_getset = NativeEventProperties, .tp_init = (initproc) NativeEventInit, .tp_repr = (reprfunc) NativeEventRepr, .tp_dealloc = (destructor) NativeEventDealloc, }; /** * Dictionary to keep track of which run loop * belongs to which emitter thread. */ PyObject *thread_to_run_loop = NULL; /** * Dictionary to keep track of which stream * belongs to which watch. */ PyObject *watch_to_stream = NULL; /** * PyCapsule destructor. */ static void watchdog_pycapsule_destructor(PyObject *ptr) { void *p = PyCapsule_GetPointer(ptr, NULL); if (p) { PyMem_Free(p); } } /** * Converts a ``CFStringRef`` to a Python string object. * * :param cf_string: * A ``CFStringRef``. * :returns: * A Python unicode or utf-8 encoded bytestring object. */ PyObject * CFString_AsPyUnicode(CFStringRef cf_string_ref) { if (G_IS_NULL(cf_string_ref)) { return PyUnicode_FromString(""); } PyObject *py_string; const char *c_string_ptr = CFStringGetCStringPtr(cf_string_ref, kCFStringEncodingUTF8); if (G_IS_NULL(c_string_ptr)) { CFIndex length = CFStringGetLength(cf_string_ref); CFIndex max_size = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1; char *buffer = (char *)malloc(max_size); if (CFStringGetCString(cf_string_ref, buffer, max_size, kCFStringEncodingUTF8)) { py_string = PyUnicode_FromString(buffer); } else { py_string = PyUnicode_FromString(""); } free(buffer); } else { py_string = PyUnicode_FromString(c_string_ptr); } return py_string; } /** * Converts a ``CFNumberRef`` to a Python string object. * * :param cf_number: * A ``CFNumberRef``. * :returns: * A Python unicode or utf-8 encoded bytestring object. */ PyObject * CFNumberRef_AsPyLong(CFNumberRef cf_number) { long c_int; PyObject *py_long; CFNumberGetValue(cf_number, kCFNumberSInt64Type, &c_int); py_long = PyLong_FromLong(c_int); return py_long; } /** * This is the callback passed to the FSEvents API, which calls * the Python callback function, in turn, by passing in event data * as Python objects. * * :param stream_ref: * A pointer to an ``FSEventStream`` instance. * :param stream_callback_info_ref: * Callback context information passed by the FSEvents API. * This contains a reference to the Python callback that this * function calls in turn with information about the events. * :param num_events: * An unsigned integer representing the number of events * captured by the FSEvents API. * :param event_paths: * An array of NUL-terminated C strings representing event paths. * :param event_flags: * An array of ``FSEventStreamEventFlags`` unsigned integral * mask values. * :param event_ids: * An array of 64-bit unsigned integers representing event * identifiers. */ static void watchdog_FSEventStreamCallback(ConstFSEventStreamRef stream_ref, StreamCallbackInfo *stream_callback_info_ref, size_t num_events, CFArrayRef event_path_info_array_ref, const FSEventStreamEventFlags event_flags[], const FSEventStreamEventId event_ids[]) { UNUSED(stream_ref); size_t i = 0; CFDictionaryRef path_info_dict; CFStringRef cf_path; CFNumberRef cf_inode; PyObject *callback_result = NULL; PyObject *path = NULL; PyObject *inode = NULL; PyObject *id = NULL; PyObject *flags = NULL; PyObject *py_event_flags = NULL; PyObject *py_event_ids = NULL; PyObject *py_event_paths = NULL; PyObject *py_event_inodes = NULL; PyThreadState *saved_thread_state = NULL; /* Acquire interpreter lock and save original thread state. */ PyGILState_STATE gil_state = PyGILState_Ensure(); saved_thread_state = PyThreadState_Swap(stream_callback_info_ref->thread_state); /* Convert event flags and paths to Python ints and strings. */ py_event_paths = PyList_New(num_events); py_event_inodes = PyList_New(num_events); py_event_flags = PyList_New(num_events); py_event_ids = PyList_New(num_events); if (G_NOT(py_event_paths && py_event_inodes && py_event_flags && py_event_ids)) { Py_XDECREF(py_event_paths); Py_XDECREF(py_event_inodes); Py_XDECREF(py_event_ids); Py_XDECREF(py_event_flags); return /*NULL*/; } for (i = 0; i < num_events; ++i) { id = PyLong_FromLongLong(event_ids[i]); flags = PyLong_FromLong(event_flags[i]); path_info_dict = CFArrayGetValueAtIndex(event_path_info_array_ref, i); cf_path = CFDictionaryGetValue(path_info_dict, kFSEventStreamEventExtendedDataPathKey); cf_inode = CFDictionaryGetValue(path_info_dict, kFSEventStreamEventExtendedFileIDKey); path = CFString_AsPyUnicode(cf_path); if (G_IS_NOT_NULL(cf_inode)) { inode = CFNumberRef_AsPyLong(cf_inode); } else { Py_INCREF(Py_None); inode = Py_None; } if (G_NOT(path && inode && flags && id)) { Py_DECREF(py_event_paths); Py_DECREF(py_event_inodes); Py_DECREF(py_event_ids); Py_DECREF(py_event_flags); return /*NULL*/; } PyList_SET_ITEM(py_event_paths, i, path); PyList_SET_ITEM(py_event_inodes, i, inode); PyList_SET_ITEM(py_event_flags, i, flags); PyList_SET_ITEM(py_event_ids, i, id); } /* Call the Python callback function supplied by the stream information * struct. The Python callback function should accept two arguments, * both being Python lists: * * def python_callback(event_paths, event_flags, event_ids): * pass */ callback_result = \ PyObject_CallFunction(stream_callback_info_ref->python_callback, "OOOO", py_event_paths, py_event_inodes, py_event_flags, py_event_ids); if (G_IS_NULL(callback_result)) { if (G_NOT(PyErr_Occurred())) { PyErr_SetString(PyExc_ValueError, ERROR_CANNOT_CALL_CALLBACK); } CFRunLoopStop(stream_callback_info_ref->run_loop_ref); } /* Release the lock and restore thread state. */ PyThreadState_Swap(saved_thread_state); PyGILState_Release(gil_state); } /** * Converts a Python string object to an UTF-8 encoded ``CFStringRef``. * * :param py_string: * A Python unicode or utf-8 encoded bytestring object. * :returns: * A new ``CFStringRef`` with the contents of ``py_string``, or ``NULL`` if an error occurred. */ CFStringRef PyString_AsUTF8EncodedCFStringRef(PyObject *py_string) { CFStringRef cf_string = NULL; if (PyUnicode_Check(py_string)) { PyObject* helper = PyUnicode_AsUTF8String(py_string); if (!helper) { return NULL; } cf_string = CFStringCreateWithCString(kCFAllocatorDefault, PyBytes_AS_STRING(helper), kCFStringEncodingUTF8); Py_DECREF(helper); } else if (PyBytes_Check(py_string)) { PyObject *utf8 = PyUnicode_FromEncodedObject(py_string, NULL, "strict"); if (!utf8) { return NULL; } Py_DECREF(utf8); cf_string = CFStringCreateWithCString(kCFAllocatorDefault, PyBytes_AS_STRING(py_string), kCFStringEncodingUTF8); } else { PyErr_SetString(PyExc_TypeError, "Path to watch must be a string or a UTF-8 encoded bytes object."); return NULL; } return cf_string; } /** * Converts a list of Python strings to a ``CFMutableArray`` of * UTF-8 encoded ``CFString`` instances and returns a pointer to * the array. * * :param py_string_list: * List of Python strings. * :returns: * A pointer to ``CFMutableArray`` (that is, a * ``CFMutableArrayRef``) of UTF-8 encoded ``CFString`` * instances. */ static CFMutableArrayRef watchdog_CFMutableArrayRef_from_PyStringList(PyObject *py_string_list) { Py_ssize_t i = 0; Py_ssize_t string_list_size = 0; CFMutableArrayRef array_of_cf_string = NULL; CFStringRef cf_string = NULL; PyObject *py_string = NULL; G_RETURN_NULL_IF_NULL(py_string_list); string_list_size = PyList_Size(py_string_list); /* Allocate a CFMutableArray. */ array_of_cf_string = CFArrayCreateMutable(kCFAllocatorDefault, 1, &kCFTypeArrayCallBacks); G_RETURN_NULL_IF_NULL(array_of_cf_string); /* Loop through the Python string list and copy strings to the * CFString array list. */ for (i = 0; i < string_list_size; ++i) { py_string = PyList_GetItem(py_string_list, i); G_RETURN_NULL_IF_NULL(py_string); cf_string = PyString_AsUTF8EncodedCFStringRef(py_string); G_RETURN_NULL_IF_NULL(cf_string); CFArraySetValueAtIndex(array_of_cf_string, i, cf_string); CFRelease(cf_string); } return array_of_cf_string; } /** * Creates an instance of ``FSEventStream`` and returns a pointer * to the instance. * * :param stream_callback_info_ref: * Pointer to the callback context information that will be * passed by the FSEvents API to the callback handler specified * by the ``callback`` argument to this function. This * information contains a reference to the Python callback that * it must call in turn passing on the event information * as Python objects to the the Python callback. * :param py_paths: * A Python list of Python strings representing path names * to monitor. * :param callback: * A function pointer of type ``FSEventStreamCallback``. * :returns: * A pointer to an ``FSEventStream`` instance (that is, it returns * an ``FSEventStreamRef``). */ static FSEventStreamRef watchdog_FSEventStreamCreate(StreamCallbackInfo *stream_callback_info_ref, PyObject *py_paths, FSEventStreamCallback callback) { CFAbsoluteTime stream_latency = 0.01; CFMutableArrayRef paths = NULL; FSEventStreamRef stream_ref = NULL; /* Check arguments. */ G_RETURN_NULL_IF_NULL(py_paths); G_RETURN_NULL_IF_NULL(callback); /* Convert the Python paths list to a CFMutableArray. */ paths = watchdog_CFMutableArrayRef_from_PyStringList(py_paths); G_RETURN_NULL_IF_NULL(paths); /* Create the event stream. */ FSEventStreamContext stream_context = { 0, stream_callback_info_ref, NULL, NULL, NULL }; stream_ref = FSEventStreamCreate(kCFAllocatorDefault, callback, &stream_context, paths, kFSEventStreamEventIdSinceNow, stream_latency, kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents | kFSEventStreamCreateFlagWatchRoot | kFSEventStreamCreateFlagUseExtendedData | kFSEventStreamCreateFlagUseCFTypes); CFRelease(paths); return stream_ref; } PyDoc_STRVAR(watchdog_add_watch__doc__, MODULE_NAME ".add_watch(emitter_thread, watch, callback, paths) -> None\ \nAdds a watch into the event loop for the given emitter thread.\n\n\ :param emitter_thread:\n\ The emitter thread.\n\ :param watch:\n\ The watch to add.\n\ :param callback:\n\ The callback function to call when an event occurs.\n\n\ Example::\n\n\ def callback(paths, flags, ids):\n\ for path, flag, event_id in zip(paths, flags, ids):\n\ print(\"%d: %s=%ul\" % (event_id, path, flag))\n\ :param paths:\n\ A list of paths to monitor.\n"); static PyObject * watchdog_add_watch(PyObject *self, PyObject *args) { UNUSED(self); FSEventStreamRef stream_ref = NULL; StreamCallbackInfo *stream_callback_info_ref = NULL; CFRunLoopRef run_loop_ref = NULL; PyObject *emitter_thread = NULL; PyObject *watch = NULL; PyObject *paths_to_watch = NULL; PyObject *python_callback = NULL; PyObject *value = NULL; /* Ensure all arguments are received. */ G_RETURN_NULL_IF_NOT(PyArg_ParseTuple(args, "OOOO:schedule", &emitter_thread, &watch, &python_callback, &paths_to_watch)); /* Watch must not already be scheduled. */ if(PyDict_Contains(watch_to_stream, watch) == 1) { PyErr_Format(PyExc_RuntimeError, "Cannot add watch %S - it is already scheduled", watch); return NULL; } /* Create an instance of the callback information structure. */ stream_callback_info_ref = PyMem_New(StreamCallbackInfo, 1); if(stream_callback_info_ref == NULL) { PyErr_SetString(PyExc_SystemError, "Failed allocating stream callback info"); return NULL; } /* Create an FSEvent stream and * Save the stream reference to the global watch-to-stream dictionary. */ stream_ref = watchdog_FSEventStreamCreate(stream_callback_info_ref, paths_to_watch, (FSEventStreamCallback) &watchdog_FSEventStreamCallback); if (!stream_ref) { PyMem_Del(stream_callback_info_ref); PyErr_SetString(PyExc_RuntimeError, "Failed creating fsevent stream"); return NULL; } value = PyCapsule_New(stream_ref, NULL, watchdog_pycapsule_destructor); if (!value || !PyCapsule_IsValid(value, NULL)) { PyMem_Del(stream_callback_info_ref); FSEventStreamInvalidate(stream_ref); FSEventStreamRelease(stream_ref); return NULL; } PyDict_SetItem(watch_to_stream, watch, value); /* Get a reference to the runloop for the emitter thread * or to the current runloop. */ value = PyDict_GetItem(thread_to_run_loop, emitter_thread); if (G_IS_NULL(value)) { run_loop_ref = CFRunLoopGetCurrent(); } else { run_loop_ref = PyCapsule_GetPointer(value, NULL); } /* Schedule the stream with the obtained runloop. */ FSEventStreamScheduleWithRunLoop(stream_ref, run_loop_ref, kCFRunLoopDefaultMode); /* Set the stream information for the callback. * This data will be passed to our watchdog_FSEventStreamCallback function * by the FSEvents API whenever an event occurs. */ stream_callback_info_ref->python_callback = python_callback; stream_callback_info_ref->stream_ref = stream_ref; stream_callback_info_ref->run_loop_ref = run_loop_ref; stream_callback_info_ref->thread_state = PyThreadState_Get(); Py_INCREF(python_callback); /* Start the event stream. */ if (G_NOT(FSEventStreamStart(stream_ref))) { FSEventStreamInvalidate(stream_ref); FSEventStreamRelease(stream_ref); // There's no documentation on _why_ this might fail - "it ought to always succeed". But if it fails the // documentation says to "fall back to performing recursive scans of the directories [...] as appropriate". PyErr_SetString(PyExc_SystemError, "Cannot start fsevents stream. Use a kqueue or polling observer instead."); return NULL; } Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(watchdog_read_events__doc__, MODULE_NAME ".read_events(emitter_thread) -> None\n\ Blocking function that runs an event loop associated with an emitter thread.\n\n\ :param emitter_thread:\n\ The emitter thread for which the event loop will be run.\n"); static PyObject * watchdog_read_events(PyObject *self, PyObject *args) { UNUSED(self); CFRunLoopRef run_loop_ref = NULL; PyObject *emitter_thread = NULL; PyObject *value = NULL; G_RETURN_NULL_IF_NOT(PyArg_ParseTuple(args, "O:loop", &emitter_thread)); // PyEval_InitThreads() does nothing as of Python 3.7 and is deprecated in 3.9. // https://docs.python.org/3/c-api/init.html#c.PyEval_InitThreads #if PY_VERSION_HEX < 0x030700f0 PyEval_InitThreads(); #endif /* Allocate information and store thread state. */ value = PyDict_GetItem(thread_to_run_loop, emitter_thread); if (G_IS_NULL(value)) { run_loop_ref = CFRunLoopGetCurrent(); value = PyCapsule_New(run_loop_ref, NULL, watchdog_pycapsule_destructor); PyDict_SetItem(thread_to_run_loop, emitter_thread, value); Py_INCREF(emitter_thread); Py_INCREF(value); } /* No timeout, block until events. */ Py_BEGIN_ALLOW_THREADS; CFRunLoopRun(); Py_END_ALLOW_THREADS; /* Clean up state information. */ if (PyDict_DelItem(thread_to_run_loop, emitter_thread) == 0) { Py_DECREF(emitter_thread); Py_INCREF(value); } G_RETURN_NULL_IF(PyErr_Occurred()); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(watchdog_flush_events__doc__, MODULE_NAME ".flush_events(watch) -> None\n\ Flushes events for the watch.\n\n\ :param watch:\n\ The watch to flush.\n"); static PyObject * watchdog_flush_events(PyObject *self, PyObject *watch) { UNUSED(self); PyObject *value = PyDict_GetItem(watch_to_stream, watch); FSEventStreamRef stream_ref = PyCapsule_GetPointer(value, NULL); FSEventStreamFlushSync(stream_ref); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(watchdog_remove_watch__doc__, MODULE_NAME ".remove_watch(watch) -> None\n\ Removes a watch from the event loop.\n\n\ :param watch:\n\ The watch to remove.\n"); static PyObject * watchdog_remove_watch(PyObject *self, PyObject *watch) { UNUSED(self); PyObject *streamref_capsule = PyDict_GetItem(watch_to_stream, watch); if (!streamref_capsule) { // A watch might have been removed explicitly before, in which case we can simply early out. Py_RETURN_NONE; } PyDict_DelItem(watch_to_stream, watch); FSEventStreamRef stream_ref = PyCapsule_GetPointer(streamref_capsule, NULL); FSEventStreamStop(stream_ref); FSEventStreamInvalidate(stream_ref); FSEventStreamRelease(stream_ref); Py_RETURN_NONE; } PyDoc_STRVAR(watchdog_stop__doc__, MODULE_NAME ".stop(emitter_thread) -> None\n\ Stops running the event loop from the specified thread.\n\n\ :param emitter_thread:\n\ The thread for which the event loop will be stopped.\n"); static PyObject * watchdog_stop(PyObject *self, PyObject *emitter_thread) { UNUSED(self); PyObject *value = PyDict_GetItem(thread_to_run_loop, emitter_thread); if (G_IS_NULL(value)) { goto success; } CFRunLoopRef run_loop_ref = PyCapsule_GetPointer(value, NULL); G_RETURN_NULL_IF(PyErr_Occurred()); /* Stop the run loop. */ if (G_IS_NOT_NULL(run_loop_ref)) { CFRunLoopStop(run_loop_ref); } success: Py_INCREF(Py_None); return Py_None; } /****************************************************************************** * Module initialization. *****************************************************************************/ PyDoc_STRVAR(watchdog_fsevents_module__doc__, "Low-level FSEvents Python/C API bridge."); static PyMethodDef watchdog_fsevents_methods[] = { {"add_watch", watchdog_add_watch, METH_VARARGS, watchdog_add_watch__doc__}, {"read_events", watchdog_read_events, METH_VARARGS, watchdog_read_events__doc__}, {"flush_events", watchdog_flush_events, METH_O, watchdog_flush_events__doc__}, {"remove_watch", watchdog_remove_watch, METH_O, watchdog_remove_watch__doc__}, /* Aliases for compatibility with macfsevents. */ {"schedule", watchdog_add_watch, METH_VARARGS, "Alias for add_watch."}, {"loop", watchdog_read_events, METH_VARARGS, "Alias for read_events."}, {"unschedule", watchdog_remove_watch, METH_O, "Alias for remove_watch."}, {"stop", watchdog_stop, METH_O, watchdog_stop__doc__}, {NULL, NULL, 0, NULL}, }; /** * Initialize the module globals. */ static void watchdog_module_init(void) { thread_to_run_loop = PyDict_New(); watch_to_stream = PyDict_New(); } /** * Adds various attributes to the Python module. * * :param module: * A pointer to the Python module object to inject * the attributes into. */ static void watchdog_module_add_attributes(PyObject *module) { PyObject *version_tuple = Py_BuildValue("(iii)", WATCHDOG_VERSION_MAJOR, WATCHDOG_VERSION_MINOR, WATCHDOG_VERSION_BUILD); PyModule_AddIntConstant(module, "POLLIN", kCFFileDescriptorReadCallBack); PyModule_AddIntConstant(module, "POLLOUT", kCFFileDescriptorWriteCallBack); /* Adds version information. */ PyModule_AddObject(module, "__version__", version_tuple); PyModule_AddObject(module, "version_string", Py_BuildValue("s", WATCHDOG_VERSION_STRING)); } static struct PyModuleDef watchdog_fsevents_module = { PyModuleDef_HEAD_INIT, MODULE_NAME, watchdog_fsevents_module__doc__, -1, watchdog_fsevents_methods, NULL, /* m_slots */ NULL, /* m_traverse */ 0, /* m_clear */ NULL /* m_free */ }; /** * Initialize the Python 3.x module. */ PyMODINIT_FUNC PyInit__watchdog_fsevents(void){ G_RETURN_NULL_IF(PyType_Ready(&NativeEventType) < 0); PyObject *module = PyModule_Create(&watchdog_fsevents_module); G_RETURN_NULL_IF_NULL(module); Py_INCREF(&NativeEventType); if (PyModule_AddObject(module, "NativeEvent", (PyObject*)&NativeEventType) < 0) { Py_DECREF(&NativeEventType); Py_DECREF(module); return NULL; } watchdog_module_add_attributes(module); watchdog_module_init(); return module; } watchdog-6.0.0/tests/000077500000000000000000000000001471115752600144705ustar00rootroot00000000000000watchdog-6.0.0/tests/__init__.py000066400000000000000000000000001471115752600165670ustar00rootroot00000000000000watchdog-6.0.0/tests/conftest.py000066400000000000000000000040431471115752600166700ustar00rootroot00000000000000from __future__ import annotations import contextlib import gc import os import threading from functools import partial import pytest from .utils import ExpectEvent, Helper, P, StartWatching, TestEventQueue @pytest.fixture def p(tmpdir, *args): """ Convenience function to join the temporary directory path with the provided arguments. """ return partial(os.path.join, tmpdir) @pytest.fixture(autouse=True) def _no_thread_leaks(): """ Fail on thread leak. We do not use pytest-threadleak because it is not reliable. """ old_thread_count = threading.active_count() yield gc.collect() # Clear the stuff from other function-level fixtures assert threading.active_count() == old_thread_count # Only previously existing threads @pytest.fixture(autouse=True) def _no_warnings(recwarn): """Fail on warning.""" yield warnings = [] for warning in recwarn: # pragma: no cover message = str(warning.message) filename = warning.filename if ( "Not importing directory" in message or "Using or importing the ABCs" in message or "dns.hash module will be removed in future versions" in message or "is still running" in message or "eventlet" in filename ): continue warnings.append(f"{warning.filename}:{warning.lineno} {warning.message}") assert not warnings, warnings @pytest.fixture(name="helper") def helper_fixture(tmpdir): with contextlib.closing(Helper(tmp=os.fspath(tmpdir))) as helper: yield helper @pytest.fixture(name="p") def p_fixture(helper: Helper) -> P: return helper.joinpath @pytest.fixture(name="event_queue") def event_queue_fixture(helper: Helper) -> TestEventQueue: return helper.event_queue @pytest.fixture(name="start_watching") def start_watching_fixture(helper: Helper) -> StartWatching: return helper.start_watching @pytest.fixture(name="expect_event") def expect_event_fixture(helper: Helper) -> ExpectEvent: return helper.expect_event watchdog-6.0.0/tests/isolated/000077500000000000000000000000001471115752600162745ustar00rootroot00000000000000watchdog-6.0.0/tests/isolated/__init__.py000066400000000000000000000000001471115752600203730ustar00rootroot00000000000000watchdog-6.0.0/tests/isolated/eventlet_observer_stops.py000066400000000000000000000014671471115752600236430ustar00rootroot00000000000000if __name__ == "__main__": import eventlet eventlet.monkey_patch() import signal import sys import tempfile from watchdog.events import LoggingEventHandler from watchdog.observers import Observer with tempfile.TemporaryDirectory() as temp_dir: def run_observer(): event_handler = LoggingEventHandler() observer = Observer() observer.schedule(event_handler, temp_dir) observer.start() eventlet.sleep(1) observer.stop() def on_alarm(signum, frame): print("Observer.stop() never finished!", file=sys.stderr) # noqa: T201 sys.exit(1) signal.signal(signal.SIGALRM, on_alarm) signal.alarm(4) thread = eventlet.spawn(run_observer) thread.wait() watchdog-6.0.0/tests/isolated/eventlet_skip_repeat_queue.py000066400000000000000000000010721471115752600242660ustar00rootroot00000000000000if __name__ == "__main__": import eventlet eventlet.monkey_patch() from watchdog.utils.bricks import SkipRepeatsQueue q = SkipRepeatsQueue(10) q.put("A") q.put("A") q.put("A") q.put("A") q.put("B") q.put("A") value = q.get() assert value == "A" q.task_done() assert q.unfinished_tasks == 2 value = q.get() assert value == "B" q.task_done() assert q.unfinished_tasks == 1 value = q.get() assert value == "A" q.task_done() assert q.empty() assert q.unfinished_tasks == 0 watchdog-6.0.0/tests/shell.py000066400000000000000000000041131471115752600161500ustar00rootroot00000000000000""" :module: tests.shell :synopsis: Common shell operations for testing. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) """ from __future__ import annotations import errno import os import os.path import shutil import tempfile import time def cd(path): os.chdir(path) def pwd(): return os.getcwd() def mkfile(path): """Creates a file""" with open(path, "ab"): pass def mkdir(path, *, parents=False): """Creates a directory (optionally also creates all the parent directories in the path).""" if parents: try: os.makedirs(path) except OSError as e: if e.errno != errno.EEXIST: raise else: os.mkdir(path) def rm(path, *, recursive=False): """Deletes files or directories.""" if os.path.isdir(path): if recursive: shutil.rmtree(path) else: raise OSError(errno.EISDIR, os.strerror(errno.EISDIR), path) else: os.remove(path) def touch(path, times=None): """Updates the modified timestamp of a file or directory.""" if os.path.isdir(path): os.utime(path, times) else: with open(path, "ab"): os.utime(path, times) def truncate(path): """Truncates a file.""" with open(path, "wb"): os.utime(path, None) def mv(src_path, dest_path): """Moves files or directories.""" try: os.rename(src_path, dest_path) except OSError: # this will happen on windows os.remove(dest_path) os.rename(src_path, dest_path) def mkdtemp(): return tempfile.mkdtemp() def ls(path="."): return os.listdir(path) def msize(path): """Modify the file size without updating the modified time.""" with open(path, "w") as w: w.write("") os.utime(path, (0, 0)) time.sleep(0.4) with open(path, "w") as w: w.write("0") os.utime(path, (0, 0)) def mount_tmpfs(path): os.system(f"sudo mount -t tmpfs none {path}") def unmount(path): os.system(f"sudo umount {path}") watchdog-6.0.0/tests/test_0_watchmedo.py000066400000000000000000000276531471115752600203100ustar00rootroot00000000000000from __future__ import annotations import logging import os import sys import time from unittest.mock import patch import pytest # Skip if import PyYAML failed. PyYAML missing possible because # watchdog installed without watchmedo. See Installation section # in README.rst yaml = pytest.importorskip("yaml") from yaml.constructor import ConstructorError # noqa: E402 from yaml.scanner import ScannerError # noqa: E402 from watchdog import watchmedo # noqa: E402 from watchdog.events import FileModifiedEvent, FileOpenedEvent # noqa: E402 from watchdog.tricks import AutoRestartTrick, LoggerTrick, ShellCommandTrick # noqa: E402 from watchdog.utils import WatchdogShutdownError, platform # noqa: E402 def test_load_config_valid(tmpdir): """Verifies the load of a valid yaml file""" yaml_file = os.path.join(tmpdir, "config_file.yaml") with open(yaml_file, "w") as f: f.write("one: value\ntwo:\n- value1\n- value2\n") config = watchmedo.load_config(yaml_file) assert isinstance(config, dict) assert "one" in config assert "two" in config assert isinstance(config["two"], list) assert config["one"] == "value" assert config["two"] == ["value1", "value2"] def test_load_config_invalid(tmpdir): """Verifies if safe load avoid the execution of untrusted code inside yaml files""" critical_dir = os.path.join(tmpdir, "critical") yaml_file = os.path.join(tmpdir, "tricks_file.yaml") with open(yaml_file, "w") as f: content = f'one: value\nrun: !!python/object/apply:os.system ["mkdir {critical_dir}"]\n' f.write(content) # PyYAML get_single_data() raises different exceptions for Linux and Windows with pytest.raises((ConstructorError, ScannerError)): watchmedo.load_config(yaml_file) assert not os.path.exists(critical_dir) def make_dummy_script(tmpdir, n=10): script = os.path.join(tmpdir, f"auto-test-{n}.py") with open(script, "w") as f: f.write('import time\nfor i in range(%d):\n\tprint("+++++ %%d" %% i, flush=True)\n\ttime.sleep(1)\n' % n) return script def test_kill_auto_restart(tmpdir, capfd): script = make_dummy_script(tmpdir) a = AutoRestartTrick([sys.executable, script]) a.start() time.sleep(3) a.stop() cap = capfd.readouterr() assert "+++++ 0" in cap.out assert "+++++ 9" not in cap.out # we killed the subprocess before the end # in windows we seem to lose the subprocess stderr # assert 'KeyboardInterrupt' in cap.err def test_shell_command_wait_for_completion(tmpdir, capfd): script = make_dummy_script(tmpdir, n=1) command = f"{sys.executable} {script}" trick = ShellCommandTrick(command, wait_for_process=True) assert not trick.is_process_running() start_time = time.monotonic() trick.on_any_event(FileModifiedEvent("foo/bar.baz")) elapsed = time.monotonic() - start_time assert not trick.is_process_running() assert elapsed >= 1 def test_shell_command_subprocess_termination_nowait(tmpdir): script = make_dummy_script(tmpdir, n=1) command = f"{sys.executable} {script}" trick = ShellCommandTrick(command, wait_for_process=False) assert not trick.is_process_running() trick.on_any_event(FileModifiedEvent("foo/bar.baz")) assert trick.is_process_running() time.sleep(5) assert not trick.is_process_running() def test_shell_command_subprocess_termination_not_happening_on_file_opened_event( tmpdir, ): # FIXME: see issue #949, and find a way to better handle that scenario script = make_dummy_script(tmpdir, n=1) command = f"{sys.executable} {script}" trick = ShellCommandTrick(command, wait_for_process=False) assert not trick.is_process_running() trick.on_any_event(FileOpenedEvent("foo/bar.baz")) assert not trick.is_process_running() time.sleep(5) assert not trick.is_process_running() def test_auto_restart_not_happening_on_file_opened_event(tmpdir, capfd): # FIXME: see issue #949, and find a way to better handle that scenario script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick([sys.executable, script]) trick.start() time.sleep(1) trick.on_any_event(FileOpenedEvent("foo/bar.baz")) trick.on_any_event(FileOpenedEvent("foo/bar2.baz")) trick.on_any_event(FileOpenedEvent("foo/bar3.baz")) time.sleep(1) trick.stop() cap = capfd.readouterr() assert cap.out.splitlines(keepends=False).count("+++++ 0") == 1 assert trick.restart_count == 0 def test_auto_restart_on_file_change(tmpdir, capfd): """Simulate changing 3 files. Expect 3 restarts. """ script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick([sys.executable, script]) trick.start() time.sleep(1) trick.on_any_event(FileModifiedEvent("foo/bar.baz")) trick.on_any_event(FileModifiedEvent("foo/bar2.baz")) trick.on_any_event(FileModifiedEvent("foo/bar3.baz")) time.sleep(1) trick.stop() cap = capfd.readouterr() assert cap.out.splitlines(keepends=False).count("+++++ 0") >= 2 assert trick.restart_count == 3 @pytest.mark.xfail( condition=platform.is_darwin() or platform.is_windows() or sys.implementation.name == "pypy", reason="known to be problematic, see #973", ) def test_auto_restart_on_file_change_debounce(tmpdir, capfd): """Simulate changing 3 files quickly and then another change later. Expect 2 restarts due to debouncing. """ script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick([sys.executable, script], debounce_interval_seconds=0.5) trick.start() time.sleep(1) trick.on_any_event(FileModifiedEvent("foo/bar.baz")) trick.on_any_event(FileModifiedEvent("foo/bar2.baz")) time.sleep(0.1) trick.on_any_event(FileModifiedEvent("foo/bar3.baz")) time.sleep(1) trick.on_any_event(FileModifiedEvent("foo/bar.baz")) time.sleep(1) trick.stop() cap = capfd.readouterr() assert cap.out.splitlines(keepends=False).count("+++++ 0") == 3 assert trick.restart_count == 2 @pytest.mark.flaky(max_runs=5, min_passes=1) @pytest.mark.parametrize( "restart_on_command_exit", [ True, pytest.param( False, marks=pytest.mark.xfail( condition=platform.is_darwin() or platform.is_windows(), reason="known to be problematic, see #972", ), ), ], ) def test_auto_restart_subprocess_termination(tmpdir, capfd, restart_on_command_exit): """Run auto-restart with a script that terminates in about 2 seconds. After 5 seconds, expect it to have been restarted at least once. """ script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick([sys.executable, script], restart_on_command_exit=restart_on_command_exit) trick.start() time.sleep(5) trick.stop() cap = capfd.readouterr() if restart_on_command_exit: assert cap.out.splitlines(keepends=False).count("+++++ 0") > 1 assert trick.restart_count >= 1 else: assert cap.out.splitlines(keepends=False).count("+++++ 0") == 1 assert trick.restart_count == 0 def test_auto_restart_arg_parsing_basic(): args = watchmedo.cli.parse_args(["auto-restart", "-d", ".", "--recursive", "--debug-force-polling", "cmd"]) assert args.func is watchmedo.auto_restart assert args.command == "cmd" assert args.directories == ["."] assert args.recursive assert args.debug_force_polling def test_auto_restart_arg_parsing(): args = watchmedo.cli.parse_args( [ "auto-restart", "-d", ".", "--kill-after", "12.5", "--debounce-interval=0.2", "cmd", ] ) assert args.func is watchmedo.auto_restart assert args.command == "cmd" assert args.directories == ["."] assert args.kill_after == pytest.approx(12.5) assert args.debounce_interval == pytest.approx(0.2) def test_auto_restart_events_echoed(tmpdir, caplog): script = make_dummy_script(tmpdir, n=2) with caplog.at_level(logging.INFO): trick = AutoRestartTrick([sys.executable, script]) trick.on_any_event(FileOpenedEvent("foo/bar.baz")) trick.on_any_event(FileOpenedEvent("foo/bar2.baz")) trick.on_any_event(FileOpenedEvent("foo/bar3.baz")) records = [record.getMessage().strip() for record in caplog.get_records(when="call")] assert records == [ "on_any_event(self=, event=FileOpenedEvent(src_path='foo/bar.baz', dest_path='', event_type='opened', is_directory=False, is_synthetic=False))", # noqa: E501 "on_any_event(self=, event=FileOpenedEvent(src_path='foo/bar2.baz', dest_path='', event_type='opened', is_directory=False, is_synthetic=False))", # noqa: E501 "on_any_event(self=, event=FileOpenedEvent(src_path='foo/bar3.baz', dest_path='', event_type='opened', is_directory=False, is_synthetic=False))", # noqa: E501 ] def test_logger_events_echoed(caplog): with caplog.at_level(logging.INFO): trick = LoggerTrick() trick.on_any_event(FileOpenedEvent("foo/bar.baz")) trick.on_any_event(FileOpenedEvent("foo/bar2.baz")) trick.on_any_event(FileOpenedEvent("foo/bar3.baz")) records = [record.getMessage().strip() for record in caplog.get_records(when="call")] assert records == [ "on_any_event(self=, event=FileOpenedEvent(src_path='foo/bar.baz', dest_path='', event_type='opened', is_directory=False, is_synthetic=False))", # noqa: E501 "on_any_event(self=, event=FileOpenedEvent(src_path='foo/bar2.baz', dest_path='', event_type='opened', is_directory=False, is_synthetic=False))", # noqa: E501 "on_any_event(self=, event=FileOpenedEvent(src_path='foo/bar3.baz', dest_path='', event_type='opened', is_directory=False, is_synthetic=False))", # noqa: E501 ] def test_shell_command_arg_parsing(): args = watchmedo.cli.parse_args(["shell-command", "--command='cmd'"]) assert args.command == "'cmd'" @pytest.mark.parametrize("cmdline", [["auto-restart", "-d", ".", "cmd"], ["log", "."]]) @pytest.mark.parametrize( "verbosity", [ ([], "WARNING"), (["-q"], "ERROR"), (["--quiet"], "ERROR"), (["-v"], "INFO"), (["--verbose"], "INFO"), (["-vv"], "DEBUG"), (["-v", "-v"], "DEBUG"), (["--verbose", "-v"], "DEBUG"), ], ) def test_valid_verbosity(cmdline, verbosity): (verbosity_cmdline_args, expected_log_level) = verbosity cmd = [cmdline[0], *verbosity_cmdline_args, *cmdline[1:]] args = watchmedo.cli.parse_args(cmd) log_level = watchmedo._get_log_level_from_args(args) # noqa: SLF001 assert log_level == expected_log_level @pytest.mark.parametrize("cmdline", [["auto-restart", "-d", ".", "cmd"], ["log", "."]]) @pytest.mark.parametrize( "verbosity_cmdline_args", [ ["-q", "-v"], ["-v", "-q"], ["-qq"], ["-q", "-q"], ["--quiet", "--quiet"], ["--quiet", "-q"], ["-vvv"], ["-vvvv"], ["-v", "-v", "-v"], ["-vv", "-v"], ["--verbose", "-vv"], ], ) def test_invalid_verbosity(cmdline, verbosity_cmdline_args): cmd = [cmdline[0], *verbosity_cmdline_args, *cmdline[1:]] with pytest.raises((watchmedo.LogLevelError, SystemExit)): # noqa: PT012 args = watchmedo.cli.parse_args(cmd) watchmedo._get_log_level_from_args(args) # noqa: SLF001 @pytest.mark.parametrize("command", ["tricks-from", "tricks"]) def test_tricks_from_file(command, tmp_path): tricks_file = tmp_path / "tricks.yaml" tricks_file.write_text( """ tricks: - watchdog.tricks.LoggerTrick: patterns: ["*.py", "*.js"] """ ) args = watchmedo.cli.parse_args([command, str(tricks_file)]) checkpoint = False def mocked_sleep(_): nonlocal checkpoint checkpoint = True raise WatchdogShutdownError with patch("time.sleep", mocked_sleep): watchmedo.tricks_from(args) assert checkpoint watchdog-6.0.0/tests/test_delayed_queue.py000066400000000000000000000012071471115752600207140ustar00rootroot00000000000000from __future__ import annotations from time import time import pytest from watchdog.utils.delayed_queue import DelayedQueue @pytest.mark.flaky(max_runs=5, min_passes=1) def test_delayed_get(): q = DelayedQueue[str](2) q.put("", delay=True) inserted = time() q.get() elapsed = time() - inserted # 2.10 instead of 2.05 for slow macOS slaves on Travis assert 2.10 > elapsed > 1.99 @pytest.mark.flaky(max_runs=5, min_passes=1) def test_nondelayed_get(): q = DelayedQueue[str](2) q.put("") inserted = time() q.get() elapsed = time() - inserted # Far less than 1 second assert elapsed < 1 watchdog-6.0.0/tests/test_echo.py000066400000000000000000000005021471115752600170140ustar00rootroot00000000000000from typing import Any import pytest from watchdog.utils import echo @pytest.mark.parametrize( ("value", "expected"), [ (("x", (1, 2, 3)), "x=(1, 2, 3)"), ], ) def test_format_arg_value(value: tuple[str, tuple[Any, ...]], expected: str) -> None: assert echo.format_arg_value(value) == expected watchdog-6.0.0/tests/test_emitter.py000066400000000000000000000502361471115752600175600ustar00rootroot00000000000000from __future__ import annotations import logging import os import stat import time from queue import Empty from typing import TYPE_CHECKING import pytest from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileClosedEvent, FileClosedNoWriteEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileOpenedEvent, ) from watchdog.utils import platform from .shell import mkdir, mkfile, mv, rm, touch if TYPE_CHECKING: from .utils import ExpectEvent, P, StartWatching, TestEventQueue logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) if platform.is_darwin(): # enable more verbose logs fsevents_logger = logging.getLogger("fsevents") fsevents_logger.setLevel(logging.DEBUG) def rerun_filter(exc, *args): time.sleep(5) return bool(issubclass(exc[0], Empty) and platform.is_windows()) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_create(p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent) -> None: start_watching() open(p("a"), "a").close() expect_event(FileCreatedEvent(p("a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileOpenedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileClosedEvent) @pytest.mark.skipif(not platform.is_linux(), reason="FileClosed*Event only supported in GNU/Linux") @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_closed(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: with open(p("a"), "a"): start_watching() # After file creation/open in append mode event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileClosedEvent) event = event_queue.get(timeout=5)[0] assert os.path.normpath(event.src_path) == os.path.normpath(p("")) assert isinstance(event, DirModifiedEvent) # After read-only, only IN_CLOSE_NOWRITE is emitted open(p("a")).close() event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileOpenedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileClosedNoWriteEvent) assert event_queue.empty() @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( platform.is_darwin() or platform.is_windows(), reason="Windows and macOS enforce proper encoding", ) def test_create_wrong_encoding(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: start_watching() open(p("a_\udce4"), "a").close() event = event_queue.get(timeout=5)[0] assert event.src_path == p("a_\udce4") assert isinstance(event, FileCreatedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert os.path.normpath(event.src_path) == os.path.normpath(p("")) assert isinstance(event, DirModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_delete(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkfile(p("a")) start_watching() rm(p("a")) expect_event(FileDeletedEvent(p("a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_modify(p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkfile(p("a")) start_watching() touch(p("a")) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileOpenedEvent) expect_event(FileModifiedEvent(p("a"))) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileClosedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_chmod(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkfile(p("a")) start_watching() # Note: We use S_IREAD here because chmod on Windows only # allows setting the read-only flag. os.chmod(p("a"), stat.S_IREAD) expect_event(FileModifiedEvent(p("a"))) # Reset permissions to allow cleanup. os.chmod(p("a"), stat.S_IWRITE) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_move(p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching() mv(p("dir1", "a"), p("dir2", "b")) if not platform.is_windows(): expect_event(FileMovedEvent(p("dir1", "a"), p("dir2", "b"))) else: event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "a") assert isinstance(event, FileDeletedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "b") assert isinstance(event, FileCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_case_change( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "file")) start_watching() mv(p("dir1", "file"), p("dir2", "FILE")) if not platform.is_windows(): expect_event(FileMovedEvent(p("dir1", "file"), p("dir2", "FILE"))) else: event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "file") assert isinstance(event, FileDeletedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "FILE") assert isinstance(event, FileCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_move_to(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(path=p("dir2")) mv(p("dir1", "a"), p("dir2", "b")) expect_event(FileCreatedEvent(p("dir2", "b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p("dir2"))) @pytest.mark.skipif(not platform.is_linux(), reason="InotifyFullEmitter only supported in Linux") def test_move_to_full(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(path=p("dir2"), use_full_emitter=True) mv(p("dir1", "a"), p("dir2", "b")) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileMovedEvent) assert event.dest_path == p("dir2", "b") assert event.src_path == "" # Should be blank since the path was not watched @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_move_from(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(path=p("dir1")) mv(p("dir1", "a"), p("dir2", "b")) expect_event(FileDeletedEvent(p("dir1", "a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p("dir1"))) @pytest.mark.skipif(not platform.is_linux(), reason="InotifyFullEmitter only supported in Linux") def test_move_from_full(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(path=p("dir1"), use_full_emitter=True) mv(p("dir1", "a"), p("dir2", "b")) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileMovedEvent) assert event.src_path == p("dir1", "a") assert event.dest_path == "" # Should be blank since path not watched @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_separate_consecutive_moves(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkfile(p("dir1", "a")) mkfile(p("b")) start_watching(path=p("dir1")) mv(p("dir1", "a"), p("c")) mv(p("b"), p("dir1", "d")) dir_modif = DirModifiedEvent(p("dir1")) a_deleted = FileDeletedEvent(p("dir1", "a")) d_created = FileCreatedEvent(p("dir1", "d")) expected_events = [a_deleted, dir_modif, d_created, dir_modif] if platform.is_windows(): expected_events = [a_deleted, d_created] if platform.is_bsd(): # Due to the way kqueue works, we can't really order # 'Created' and 'Deleted' events in time, so creation queues first expected_events = [d_created, a_deleted, dir_modif, dir_modif] for expected_event in expected_events: expect_event(expected_event) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif(platform.is_bsd(), reason="BSD create another set of events for this test") def test_delete_self(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) emitter = start_watching(path=p("dir1")) rm(p("dir1"), recursive=True) expect_event(DirDeletedEvent(p("dir1"))) emitter.join(5) assert not emitter.is_alive() @pytest.mark.skipif( platform.is_windows() or platform.is_bsd(), reason="Windows|BSD create another set of events for this test", ) def test_fast_subdirectory_creation_deletion(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: root_dir = p("dir1") sub_dir = p("dir1", "subdir1") times = 30 mkdir(root_dir) start_watching(path=root_dir) for _ in range(times): mkdir(sub_dir) rm(sub_dir, recursive=True) time.sleep(0.1) # required for macOS emitter to catch up with us count = {DirCreatedEvent: 0, DirModifiedEvent: 0, DirDeletedEvent: 0} etype_for_dir = { DirCreatedEvent: sub_dir, DirModifiedEvent: root_dir, DirDeletedEvent: sub_dir, } for _ in range(times * 4): event = event_queue.get(timeout=5)[0] logger.debug(event) etype = type(event) count[etype] += 1 assert event.src_path == etype_for_dir[etype] assert count[DirCreatedEvent] >= count[DirDeletedEvent] assert count[DirCreatedEvent] + count[DirDeletedEvent] >= count[DirModifiedEvent] assert count == { DirCreatedEvent: times, DirModifiedEvent: times * 2, DirDeletedEvent: times, } @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_passing_unicode_should_give_unicode(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: start_watching(path=str(p())) mkfile(p("a")) event = event_queue.get(timeout=5)[0] assert isinstance(event.src_path, str) @pytest.mark.skipif( platform.is_windows(), reason="Windows ReadDirectoryChangesW supports only" " unicode for paths.", ) def test_passing_bytes_should_give_bytes(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: start_watching(path=p().encode()) mkfile(p("a")) event = event_queue.get(timeout=5)[0] assert isinstance(event.src_path, bytes) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_recursive_on(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: mkdir(p("dir1", "dir2", "dir3"), parents=True) start_watching() touch(p("dir1", "dir2", "dir3", "a")) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3", "a") assert isinstance(event, FileCreatedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3") assert isinstance(event, DirModifiedEvent) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3", "a") assert isinstance(event, FileOpenedEvent) if not platform.is_bsd(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3", "a") assert isinstance(event, FileModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_recursive_off( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: mkdir(p("dir1")) start_watching(recursive=False) touch(p("dir1", "a")) with pytest.raises(Empty): event_queue.get(timeout=5) mkfile(p("b")) expect_event(FileCreatedEvent(p("b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) if platform.is_linux(): expect_event(FileOpenedEvent(p("b"))) expect_event(FileClosedEvent(p("b"))) # currently limiting these additional events to macOS only, see https://github.com/gorakhargosh/watchdog/pull/779 if platform.is_darwin(): mkdir(p("dir1", "dir2")) with pytest.raises(Empty): event_queue.get(timeout=5) mkfile(p("dir1", "dir2", "somefile")) with pytest.raises(Empty): event_queue.get(timeout=5) mkdir(p("dir3")) expect_event(DirModifiedEvent(p())) # the contents of the parent directory changed mv(p("dir1", "dir2", "somefile"), p("somefile")) expect_event(FileMovedEvent(p("dir1", "dir2", "somefile"), p("somefile"))) expect_event(DirModifiedEvent(p())) mv(p("dir1", "dir2"), p("dir2")) expect_event(DirMovedEvent(p("dir1", "dir2"), p("dir2"))) expect_event(DirModifiedEvent(p())) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_renaming_top_level_directory( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: start_watching() mkdir(p("a")) expect_event(DirCreatedEvent(p("a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) mkdir(p("a", "b")) expect_event(DirCreatedEvent(p("a", "b"))) expect_event(DirModifiedEvent(p("a"))) mv(p("a"), p("a2")) expect_event(DirMovedEvent(p("a"), p("a2"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) expect_event(DirModifiedEvent(p())) expect_event(DirMovedEvent(p("a", "b"), p("a2", "b"), is_synthetic=True)) if platform.is_bsd(): expect_event(DirModifiedEvent(p())) open(p("a2", "b", "c"), "a").close() # DirModifiedEvent may emitted, but sometimes after waiting time is out. events = [] while True: events.append(event_queue.get(timeout=5)[0]) if event_queue.empty(): break assert all( isinstance(e, (FileCreatedEvent, FileMovedEvent, FileOpenedEvent, DirModifiedEvent, FileClosedEvent)) for e in events ) for event in events: if isinstance(event, FileCreatedEvent): assert event.src_path == p("a2", "b", "c") elif isinstance(event, FileMovedEvent): assert event.dest_path == p("a2", "b", "c") assert event.src_path == p("a", "b", "c") elif isinstance(event, DirModifiedEvent): assert event.src_path == p("a2", "b") @pytest.mark.skipif(platform.is_windows(), reason="Windows create another set of events for this test") def test_move_nested_subdirectories( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: mkdir(p("dir1/dir2/dir3"), parents=True) mkfile(p("dir1/dir2/dir3", "a")) start_watching() mv(p("dir1/dir2"), p("dir2")) expect_event(DirMovedEvent(p("dir1", "dir2"), p("dir2"))) expect_event(DirModifiedEvent(p("dir1"))) expect_event(DirModifiedEvent(p())) expect_event(DirMovedEvent(p("dir1", "dir2", "dir3"), p("dir2", "dir3"), is_synthetic=True)) expect_event(FileMovedEvent(p("dir1", "dir2", "dir3", "a"), p("dir2", "dir3", "a"), is_synthetic=True)) if platform.is_bsd(): event = event_queue.get(timeout=5)[0] assert p(event.src_path) == p() assert isinstance(event, DirModifiedEvent) event = event_queue.get(timeout=5)[0] assert p(event.src_path) == p("dir1") assert isinstance(event, DirModifiedEvent) touch(p("dir2/dir3", "a")) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2/dir3", "a") assert isinstance(event, FileOpenedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2/dir3", "a") assert isinstance(event, FileModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( not platform.is_windows(), reason="Non-Windows create another set of events for this test", ) def test_move_nested_subdirectories_on_windows( p: P, event_queue: TestEventQueue, start_watching: StartWatching, ) -> None: mkdir(p("dir1/dir2/dir3"), parents=True) mkfile(p("dir1/dir2/dir3", "a")) start_watching(path=p("")) mv(p("dir1/dir2"), p("dir2")) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2") assert isinstance(event, FileDeletedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2") assert isinstance(event, DirCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "dir3") assert isinstance(event, DirCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "dir3", "a") assert isinstance(event, FileCreatedEvent) touch(p("dir2/dir3", "a")) events = [] while True: events.append(event_queue.get(timeout=5)[0]) if event_queue.empty(): break assert all(isinstance(e, (FileModifiedEvent, DirModifiedEvent)) for e in events) for event in events: if isinstance(event, FileModifiedEvent): assert event.src_path == p("dir2", "dir3", "a") elif isinstance(event, DirModifiedEvent): assert event.src_path in [p("dir2"), p("dir2", "dir3")] @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif(platform.is_bsd(), reason="BSD create another set of events for this test") def test_file_lifecyle(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: start_watching() mkfile(p("a")) touch(p("a")) mv(p("a"), p("b")) rm(p("b")) expect_event(FileCreatedEvent(p("a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) if platform.is_linux(): expect_event(FileOpenedEvent(p("a"))) expect_event(FileClosedEvent(p("a"))) expect_event(DirModifiedEvent(p())) expect_event(FileOpenedEvent(p("a"))) expect_event(FileModifiedEvent(p("a"))) if platform.is_linux(): expect_event(FileClosedEvent(p("a"))) expect_event(DirModifiedEvent(p())) expect_event(FileMovedEvent(p("a"), p("b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) expect_event(DirModifiedEvent(p())) expect_event(FileDeletedEvent(p("b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) watchdog-6.0.0/tests/test_events.py000066400000000000000000000142361471115752600174130ustar00rootroot00000000000000from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CLOSED, EVENT_TYPE_CLOSED_NO_WRITE, EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, EVENT_TYPE_OPENED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileClosedEvent, FileClosedNoWriteEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileOpenedEvent, FileSystemEventHandler, ) path_1 = "/path/xyz" path_2 = "/path/abc" def test_file_deleted_event(): event = FileDeletedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_DELETED assert not event.is_directory assert not event.is_synthetic def test_file_delete_event_is_directory(): # Inherited properties. event = FileDeletedEvent(path_1) assert not event.is_directory assert not event.is_synthetic def test_file_modified_event(): event = FileModifiedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_MODIFIED assert not event.is_directory assert not event.is_synthetic def test_file_modified_event_is_directory(): # Inherited Properties event = FileModifiedEvent(path_1) assert not event.is_directory assert not event.is_synthetic def test_file_created_event(): event = FileCreatedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_CREATED assert not event.is_directory assert not event.is_synthetic def test_file_moved_event(): event = FileMovedEvent(path_1, path_2) assert path_1 == event.src_path assert path_2 == event.dest_path assert event.event_type == EVENT_TYPE_MOVED assert not event.is_directory assert not event.is_synthetic def test_file_closed_event(): event = FileClosedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_CLOSED assert not event.is_directory assert not event.is_synthetic def test_file_closed_no_write_event(): event = FileClosedNoWriteEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_CLOSED_NO_WRITE assert not event.is_directory assert not event.is_synthetic def test_file_opened_event(): event = FileOpenedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_OPENED assert not event.is_directory assert not event.is_synthetic def test_dir_deleted_event(): event = DirDeletedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_DELETED assert event.is_directory assert not event.is_synthetic def test_dir_modified_event(): event = DirModifiedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_MODIFIED assert event.is_directory assert not event.is_synthetic def test_dir_created_event(): event = DirCreatedEvent(path_1) assert path_1 == event.src_path assert event.event_type == EVENT_TYPE_CREATED assert event.is_directory assert not event.is_synthetic def test_file_system_event_handler_dispatch(): dir_del_event = DirDeletedEvent("/path/blah.py") file_del_event = FileDeletedEvent("/path/blah.txt") dir_cre_event = DirCreatedEvent("/path/blah.py") file_cre_event = FileCreatedEvent("/path/blah.txt") file_cls_event = FileClosedEvent("/path/blah.txt") file_cls_nw_event = FileClosedNoWriteEvent("/path/blah.txt") file_opened_event = FileOpenedEvent("/path/blah.txt") dir_mod_event = DirModifiedEvent("/path/blah.py") file_mod_event = FileModifiedEvent("/path/blah.txt") dir_mov_event = DirMovedEvent("/path/blah.py", "/path/blah") file_mov_event = FileMovedEvent("/path/blah.txt", "/path/blah") all_events = [ dir_mod_event, dir_del_event, dir_cre_event, dir_mov_event, file_mod_event, file_del_event, file_cre_event, file_mov_event, file_cls_event, file_cls_nw_event, file_opened_event, ] checkpoint = 0 class TestableEventHandler(FileSystemEventHandler): def on_any_event(self, event): nonlocal checkpoint checkpoint += 1 def on_modified(self, event): nonlocal checkpoint checkpoint += 1 assert event.event_type == EVENT_TYPE_MODIFIED def on_deleted(self, event): nonlocal checkpoint checkpoint += 1 assert event.event_type == EVENT_TYPE_DELETED def on_moved(self, event): nonlocal checkpoint checkpoint += 1 assert event.event_type == EVENT_TYPE_MOVED def on_created(self, event): nonlocal checkpoint checkpoint += 1 assert event.event_type == EVENT_TYPE_CREATED def on_closed(self, event): nonlocal checkpoint checkpoint += 1 assert event.event_type == EVENT_TYPE_CLOSED def on_closed_no_write(self, event): nonlocal checkpoint checkpoint += 1 assert event.event_type == EVENT_TYPE_CLOSED_NO_WRITE def on_opened(self, event): nonlocal checkpoint checkpoint += 1 assert event.event_type == EVENT_TYPE_OPENED handler = TestableEventHandler() for event in all_events: assert not event.is_synthetic handler.dispatch(event) assert checkpoint == len(all_events) * 2 # `on_any_event()` + specific `on_XXX()` def test_event_comparison(): creation1 = FileCreatedEvent("foo") creation2 = FileCreatedEvent("foo") creation3 = FileCreatedEvent("bar") assert creation1 == creation2 assert creation1 != creation3 assert creation2 != creation3 move1 = FileMovedEvent("a", "b") move2 = FileMovedEvent("a", "b") move3 = FileMovedEvent("a", "c") move4 = FileMovedEvent("b", "a") assert creation1 != move1 # type: ignore[comparison-overlap] assert move1 == move2 assert move1 != move3 assert move1 != move4 assert move2 != move3 assert move2 != move4 assert move3 != move4 watchdog-6.0.0/tests/test_fsevents.py000066400000000000000000000244351471115752600177460ustar00rootroot00000000000000from __future__ import annotations import contextlib import pytest from watchdog.utils import platform if not platform.is_darwin(): pytest.skip("macOS only.", allow_module_level=True) import logging import os import time from os import mkdir, rmdir from random import random from threading import Thread from time import sleep from typing import TYPE_CHECKING from unittest.mock import patch import _watchdog_fsevents as _fsevents # type: ignore[import-not-found] from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer from watchdog.observers.api import BaseObserver, ObservedWatch from watchdog.observers.fsevents import FSEventsEmitter from .shell import touch if TYPE_CHECKING: from .utils import P, StartWatching, TestEventQueue logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) @pytest.fixture def observer(): obs = Observer() obs.start() yield obs obs.stop() with contextlib.suppress(RuntimeError): obs.join() @pytest.mark.parametrize( ("event", "expectation"), [ # invalid flags (_fsevents.NativeEvent("", 0, 0, 0), False), # renamed (_fsevents.NativeEvent("", 0, 0x00000800, 0), False), # renamed, removed (_fsevents.NativeEvent("", 0, 0x00000800 | 0x00000200, 0), True), # renamed, removed, created (_fsevents.NativeEvent("", 0, 0x00000800 | 0x00000200 | 0x00000100, 0), True), # renamed, removed, created, itemfindermod ( _fsevents.NativeEvent("", 0, 0x00000800 | 0x00000200 | 0x00000100 | 0x00002000, 0), True, ), # xattr, removed, modified, itemfindermod ( _fsevents.NativeEvent("", 0, 0x00008000 | 0x00000200 | 0x00001000 | 0x00002000, 0), False, ), ], ) def test_coalesced_event_check(event, expectation): assert event.is_coalesced == expectation def test_add_watch_twice(observer: BaseObserver, p: P) -> None: """Adding the same watch twice used to result in a null pointer return without an exception. See https://github.com/gorakhargosh/watchdog/issues/765 """ a = p("a") mkdir(a) h = FileSystemEventHandler() w = ObservedWatch(a, recursive=False) def callback(path, inodes, flags, ids): pass _fsevents.add_watch(h, w, callback, [w.path]) with pytest.raises(RuntimeError): _fsevents.add_watch(h, w, callback, [w.path]) _fsevents.remove_watch(w) rmdir(a) def test_watcher_deletion_while_receiving_events_1( caplog: pytest.LogCaptureFixture, p: P, start_watching: StartWatching, ) -> None: """ When the watcher is stopped while there are events, such exception could happen: Traceback (most recent call last): File "observers/fsevents.py", line 327, in events_callback self.queue_events(self.timeout, events) File "observers/fsevents.py", line 187, in queue_events src_path = self._encode_path(event.path) File "observers/fsevents.py", line 352, in _encode_path if isinstance(self.watch.path, bytes): AttributeError: 'NoneType' object has no attribute 'path' """ tmpdir = p() orig = FSEventsEmitter.events_callback def cb(*args): FSEventsEmitter.stop(emitter) orig(*args) with caplog.at_level(logging.ERROR), patch.object(FSEventsEmitter, "events_callback", new=cb): emitter = start_watching(path=tmpdir) # Less than 100 is not enough events to trigger the error for n in range(100): touch(p(f"{n}.txt")) emitter.stop() assert not caplog.records def test_watcher_deletion_while_receiving_events_2( caplog: pytest.LogCaptureFixture, p: P, start_watching: StartWatching, ) -> None: """Note: that test takes about 20 seconds to complete. Quite similar test to prevent another issue when the watcher is stopped while there are events, such exception could happen: Traceback (most recent call last): File "observers/fsevents.py", line 327, in events_callback self.queue_events(self.timeout, events) File "observers/fsevents.py", line 235, in queue_events self._queue_created_event(event, src_path, src_dirname) File "observers/fsevents.py", line 132, in _queue_created_event self.queue_event(cls(src_path)) File "observers/fsevents.py", line 104, in queue_event if self._watch.is_recursive: AttributeError: 'NoneType' object has no attribute 'is_recursive' """ def try_to_fail(): tmpdir = p() emitter = start_watching(path=tmpdir) def create_files(): # Less than 2000 is not enough events to trigger the error for n in range(2000): touch(p(f"{n}.txt")) def stop(em): sleep(random()) em.stop() th1 = Thread(target=create_files) th2 = Thread(target=stop, args=(emitter,)) try: th1.start() th2.start() th1.join() th2.join() finally: emitter.stop() # 20 attempts to make the random failure happen with caplog.at_level(logging.ERROR): for _ in range(20): try_to_fail() sleep(random()) assert not caplog.records def test_remove_watch_twice(start_watching: StartWatching) -> None: """ ValueError: PyCapsule_GetPointer called with invalid PyCapsule object The above exception was the direct cause of the following exception: src/watchdog/utils/__init__.py:92: in stop self.on_thread_stop() src/watchdog/observers/fsevents.py:73: SystemError def on_thread_stop(self): > _fsevents.remove_watch(self.watch) E SystemError: returned a result with an error set (FSEvents.framework) FSEventStreamStop(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamInvalidate(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamRelease(): failed assertion 'streamRef != NULL' """ emitter = start_watching() # This one must work emitter.stop() # This is allowed to call several times .stop() emitter.stop() def test_unschedule_removed_folder(observer: BaseObserver, p: P) -> None: """ TypeError: PyCObject_AsVoidPtr called with null pointer The above exception was the direct cause of the following exception: def on_thread_stop(self): if self.watch: _fsevents.remove_watch(self.watch) E SystemError: returned a result with an error set (FSEvents.framework) FSEventStreamStop(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamInvalidate(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamRelease(): failed assertion 'streamRef != NULL' """ a = p("a") mkdir(a) w = observer.schedule(FileSystemEventHandler(), a, recursive=False) rmdir(a) time.sleep(0.1) observer.unschedule(w) def test_converting_cfstring_to_pyunicode(p: P, start_watching: StartWatching, event_queue: TestEventQueue) -> None: """See https://github.com/gorakhargosh/watchdog/issues/762""" tmpdir = p() emitter = start_watching(path=tmpdir) dirname = "TéstClass" try: mkdir(p(dirname)) event, _ = event_queue.get() assert event.src_path.endswith(dirname) finally: emitter.stop() def test_recursive_check_accepts_relative_paths(p: P) -> None: """See https://github.com/gorakhargosh/watchdog/issues/797 The test code provided in the defect observes the current working directory using ".". Since the watch path wasn't normalized then that failed. This test emulates the scenario. """ from watchdog.events import FileCreatedEvent, FileModifiedEvent, PatternMatchingEventHandler class TestEventHandler(PatternMatchingEventHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # the TestEventHandler instance is set to ignore_directories, # as such we won't get a DirModifiedEvent(p()) here. self.expected_events = [ FileCreatedEvent(p("foo.json")), FileModifiedEvent(p("foo.json")), ] self.observed_events = set() def on_any_event(self, event): self.expected_events.remove(event) self.observed_events.add(event) def done(self): return not self.expected_events cwd = os.getcwd() os.chdir(p()) event_handler = TestEventHandler(patterns=["*.json"], ignore_patterns=[], ignore_directories=True) observer = Observer() observer.schedule(event_handler, ".") observer.start() time.sleep(0.1) try: touch(p("foo.json")) timeout_at = time.time() + 5 while not event_handler.done() and time.time() < timeout_at: time.sleep(0.1) assert event_handler.done() finally: os.chdir(cwd) observer.stop() observer.join() def test_watchdog_recursive(p: P) -> None: """See https://github.com/gorakhargosh/watchdog/issues/706""" import os.path from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer class Handler(FileSystemEventHandler): def __init__(self): super().__init__() self.changes = [] def on_any_event(self, event): self.changes.append(os.path.basename(event.src_path)) handler = Handler() observer = Observer() watches = [observer.schedule(handler, str(p("")), recursive=True)] try: observer.start() time.sleep(0.1) touch(p("my0.txt")) mkdir(p("dir_rec")) touch(p("dir_rec", "my1.txt")) expected = {"dir_rec", "my0.txt", "my1.txt"} timeout_at = time.time() + 5 while not expected.issubset(handler.changes) and time.time() < timeout_at: time.sleep(0.2) assert expected.issubset(handler.changes), f"Did not find expected changes. Found: {handler.changes}" finally: for watch in watches: observer.unschedule(watch) observer.stop() observer.join(1) watchdog-6.0.0/tests/test_inotify_buffer.py000066400000000000000000000074541471115752600211250ustar00rootroot00000000000000from __future__ import annotations import pytest from watchdog.utils import platform if not platform.is_linux(): pytest.skip("GNU/Linux only.", allow_module_level=True) import os import random import time from watchdog.observers.inotify_buffer import InotifyBuffer from .shell import mkdir, mount_tmpfs, mv, rm, touch, unmount def wait_for_move_event(read_event): while True: event = read_event() if isinstance(event, tuple) or event.is_move: return event @pytest.mark.timeout(5) def test_move_from(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) inotify = InotifyBuffer(p("dir1").encode()) mv(p("dir1", "a"), p("dir2", "b")) event = wait_for_move_event(inotify.read_event) assert event.is_moved_from assert event.src_path == p("dir1", "a").encode() inotify.close() @pytest.mark.timeout(5) def test_move_to(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) inotify = InotifyBuffer(p("dir2").encode()) mv(p("dir1", "a"), p("dir2", "b")) event = wait_for_move_event(inotify.read_event) assert event.is_moved_to assert event.src_path == p("dir2", "b").encode() inotify.close() @pytest.mark.timeout(5) def test_move_internal(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) inotify = InotifyBuffer(p("").encode(), recursive=True) mv(p("dir1", "a"), p("dir2", "b")) frm, to = wait_for_move_event(inotify.read_event) assert frm.src_path == p("dir1", "a").encode() assert to.src_path == p("dir2", "b").encode() inotify.close() @pytest.mark.timeout(10) def test_move_internal_batch(p): n = 100 mkdir(p("dir1")) mkdir(p("dir2")) files = [str(i) for i in range(n)] for f in files: touch(p("dir1", f)) inotify = InotifyBuffer(p("").encode(), recursive=True) random.shuffle(files) for f in files: mv(p("dir1", f), p("dir2", f)) # Check that all n events are paired for _ in range(n): frm, to = wait_for_move_event(inotify.read_event) assert os.path.dirname(frm.src_path).endswith(b"/dir1") assert os.path.dirname(to.src_path).endswith(b"/dir2") assert frm.name == to.name inotify.close() @pytest.mark.timeout(5) def test_delete_watched_directory(p): mkdir(p("dir")) inotify = InotifyBuffer(p("dir").encode()) rm(p("dir"), recursive=True) # Wait for the event to be picked up inotify.read_event() # Ensure InotifyBuffer shuts down cleanly without raising an exception inotify.close() @pytest.mark.timeout(5) @pytest.mark.skipif("GITHUB_REF" not in os.environ, reason="sudo password prompt") def test_unmount_watched_directory_filesystem(p): mkdir(p("dir1")) mount_tmpfs(p("dir1")) mkdir(p("dir1/dir2")) inotify = InotifyBuffer(p("dir1/dir2").encode()) unmount(p("dir1")) # Wait for the event to be picked up inotify.read_event() # Ensure InotifyBuffer shuts down cleanly without raising an exception inotify.close() assert not inotify.is_alive() def delay_call(function, seconds): def delayed(*args, **kwargs): time.sleep(seconds) return function(*args, **kwargs) return delayed class InotifyBufferDelayedRead(InotifyBuffer): def run(self, *args, **kwargs): # Introduce a delay to trigger the race condition where the file descriptor is # closed prior to a read being triggered. self._inotify.read_events = delay_call(self._inotify.read_events, 1) return super().run(*args, **kwargs) @pytest.mark.parametrize(argnames="cls", argvalues=[InotifyBuffer, InotifyBufferDelayedRead]) def test_close_should_terminate_thread(p, cls): inotify = cls(p("").encode(), recursive=True) assert inotify.is_alive() inotify.close() assert not inotify.is_alive() watchdog-6.0.0/tests/test_inotify_c.py000066400000000000000000000152771471115752600201000ustar00rootroot00000000000000from __future__ import annotations from contextlib import ExitStack import pytest from watchdog.utils import platform if not platform.is_linux(): pytest.skip("GNU/Linux only.", allow_module_level=True) import ctypes import errno import logging import os import select import struct from typing import TYPE_CHECKING from unittest.mock import patch from watchdog.events import DirCreatedEvent, DirDeletedEvent, DirModifiedEvent from watchdog.observers.inotify_c import Inotify, InotifyConstants, InotifyEvent if TYPE_CHECKING: from .utils import Helper, P, StartWatching, TestEventQueue logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def struct_inotify(wd, mask, cookie=0, length=0, name=b""): assert len(name) <= length struct_format = ( "=" # (native endianness, standard sizes) "i" # int wd "i" # uint32_t mask "i" # uint32_t cookie "i" # uint32_t len f"{length}s" # char[] name ) return struct.pack(struct_format, wd, mask, cookie, length, name) def test_late_double_deletion(helper: Helper, p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: inotify_fd = type("FD", (object,), {})() inotify_fd.last = 0 inotify_fd.wds = [] const = InotifyConstants() # CREATE DELETE CREATE DELETE DELETE_SELF IGNORE DELETE_SELF IGNORE inotify_fd.buf = ( struct_inotify(wd=1, mask=const.IN_CREATE | const.IN_ISDIR, length=16, name=b"subdir1") + struct_inotify(wd=1, mask=const.IN_DELETE | const.IN_ISDIR, length=16, name=b"subdir1") ) * 2 + ( struct_inotify(wd=2, mask=const.IN_DELETE_SELF) + struct_inotify(wd=2, mask=const.IN_IGNORED) + struct_inotify(wd=3, mask=const.IN_DELETE_SELF) + struct_inotify(wd=3, mask=const.IN_IGNORED) ) select_bkp = select.select def fakeselect(read_list, *args, **kwargs): if inotify_fd in read_list: return [inotify_fd], [], [] return select_bkp(read_list, *args, **kwargs) poll_bkp = select.poll class Fakepoll: def __init__(self): self._orig = poll_bkp() self._fake = False def register(self, fd, *args, **kwargs): if fd == inotify_fd: self._fake = True return None return self._orig.register(fd, *args, **kwargs) def poll(self, *args, **kwargs): if self._fake: return [(inotify_fd, select.POLLIN)] return self._orig.poll(*args, **kwargs) os_read_bkp = os.read def fakeread(fd, length): if fd is inotify_fd: result, fd.buf = fd.buf[:length], fd.buf[length:] return result return os_read_bkp(fd, length) os_close_bkp = os.close def fakeclose(fd): if fd is not inotify_fd: os_close_bkp(fd) def inotify_init(): return inotify_fd def inotify_add_watch(fd, path, mask): fd.last += 1 logger.debug("New wd = %d", fd.last) fd.wds.append(fd.last) return fd.last def inotify_rm_watch(fd, wd): logger.debug("Removing wd = %d", wd) fd.wds.remove(wd) return 0 # Mocks the API! from watchdog.observers import inotify_c mock1 = patch.object(os, "read", new=fakeread) mock2 = patch.object(os, "close", new=fakeclose) mock3 = patch.object(inotify_c, "inotify_init", new=inotify_init) mock4 = patch.object(inotify_c, "inotify_add_watch", new=inotify_add_watch) mock5 = patch.object(inotify_c, "inotify_rm_watch", new=inotify_rm_watch) mock6 = patch.object(select, "select", new=fakeselect) mock7 = patch.object(select, "poll", new=Fakepoll) with mock1, mock2, mock3, mock4, mock5, mock6, mock7: start_watching(path=p("")) # Watchdog Events for evt_cls in [DirCreatedEvent, DirDeletedEvent] * 2: event = event_queue.get(timeout=5)[0] assert isinstance(event, evt_cls) assert event.src_path == p("subdir1") event = event_queue.get(timeout=5)[0] assert isinstance(event, DirModifiedEvent) assert event.src_path == p("").rstrip(os.path.sep) helper.close() assert inotify_fd.last == 3 # Number of directories assert inotify_fd.buf == b"" # Didn't miss any event assert inotify_fd.wds == [2, 3] # Only 1 is removed explicitly @pytest.mark.parametrize( ("error", "pattern"), [ (errno.ENOSPC, "inotify watch limit reached"), (errno.EMFILE, "inotify instance limit reached"), (errno.ENOENT, "No such file or directory"), (-1, "error"), ], ) def test_raise_error(error, pattern): with patch.object(ctypes, "get_errno", new=lambda: error), pytest.raises(OSError, match=pattern) as exc: Inotify._raise_error() # noqa: SLF001 assert exc.value.errno == error def test_non_ascii_path(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: """ Inotify can construct an event for a path containing non-ASCII. """ path = p("\N{SNOWMAN}") start_watching(path=p("")) os.mkdir(path) event, _ = event_queue.get(timeout=5) assert isinstance(event.src_path, str) assert event.src_path == path # Just make sure it doesn't raise an exception. assert repr(event) def test_watch_file(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: path = p("this_is_a_file") with open(path, "a"): pass start_watching(path=path) os.remove(path) event, _ = event_queue.get(timeout=5) assert repr(event) def test_event_equality(p: P) -> None: wd_parent_dir = 42 filename = "file.ext" full_path = p(filename) event1 = InotifyEvent(wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path) event2 = InotifyEvent(wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path) event3 = InotifyEvent(wd_parent_dir, InotifyConstants.IN_ACCESS, 0, filename, full_path) assert event1 == event2 assert event1 != event3 assert event2 != event3 def test_select_fd(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: # We open a file 2048 times to ensure that we exhaust 1024 file # descriptors, the limit of a select() call. path = p("new_file") with open(path, "a"): pass with ExitStack() as stack: for _i in range(2048): stack.enter_context(open(path)) # Watch this file for deletion (copied from `test_watch_file`) path = p("this_is_a_file") with open(path, "a"): pass start_watching(path=path) os.remove(path) event, _ = event_queue.get(timeout=5) assert repr(event) watchdog-6.0.0/tests/test_isolated.py000066400000000000000000000014471471115752600177130ustar00rootroot00000000000000import importlib import pytest from watchdog.utils import platform from .utils import run_isolated_test # Kqueue isn't supported by Eventlet, so BSD is out # Current usage ReadDirectoryChangesW on Windows is blocking, though async may be possible @pytest.mark.skipif(not platform.is_linux(), reason="Eventlet only supported in Linux") def test_observer_stops_in_eventlet(): if not importlib.util.find_spec("eventlet"): pytest.skip("eventlet not installed") run_isolated_test("eventlet_observer_stops.py") @pytest.mark.skipif(not platform.is_linux(), reason="Eventlet only supported in Linux") def test_eventlet_skip_repeat_queue(): if not importlib.util.find_spec("eventlet"): pytest.skip("eventlet not installed") run_isolated_test("eventlet_skip_repeat_queue.py") watchdog-6.0.0/tests/test_logging_event_handler.py000066400000000000000000000051271471115752600224320ustar00rootroot00000000000000from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CLOSED, EVENT_TYPE_CLOSED_NO_WRITE, EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, EVENT_TYPE_OPENED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileClosedEvent, FileClosedNoWriteEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileOpenedEvent, FileSystemEvent, LoggingEventHandler, ) path_1 = "/path/xyz" path_2 = "/path/abc" class _TestableEventHandler(LoggingEventHandler): def on_any_event(self, event): assert isinstance(event, FileSystemEvent) def on_modified(self, event): super().on_modified(event) assert event.event_type == EVENT_TYPE_MODIFIED def on_deleted(self, event): super().on_deleted(event) assert event.event_type == EVENT_TYPE_DELETED def on_moved(self, event): super().on_moved(event) assert event.event_type == EVENT_TYPE_MOVED def on_created(self, event): super().on_created(event) assert event.event_type == EVENT_TYPE_CREATED def on_closed(self, event): super().on_closed(event) assert event.event_type == EVENT_TYPE_CLOSED def on_closed_no_write(self, event): super().on_closed_no_write(event) assert event.event_type == EVENT_TYPE_CLOSED_NO_WRITE def on_opened(self, event): super().on_opened(event) assert event.event_type == EVENT_TYPE_OPENED def test_logging_event_handler_dispatch(): dir_del_event = DirDeletedEvent("/path/blah.py") file_del_event = FileDeletedEvent("/path/blah.txt") dir_cre_event = DirCreatedEvent("/path/blah.py") file_cre_event = FileCreatedEvent("/path/blah.txt") dir_mod_event = DirModifiedEvent("/path/blah.py") file_mod_event = FileModifiedEvent("/path/blah.txt") dir_mov_event = DirMovedEvent("/path/blah.py", "/path/blah") file_mov_event = FileMovedEvent("/path/blah.txt", "/path/blah") file_ope_event = FileOpenedEvent("/path/blah.txt") file_clo_event = FileClosedEvent("/path/blah.txt") file_clo_nw_event = FileClosedNoWriteEvent("/path/blah.txt") all_events = [ dir_mod_event, dir_del_event, dir_cre_event, dir_mov_event, file_mod_event, file_del_event, file_cre_event, file_mov_event, file_ope_event, file_clo_event, file_clo_nw_event, ] handler = _TestableEventHandler() for event in all_events: handler.dispatch(event) watchdog-6.0.0/tests/test_observer.py000066400000000000000000000076441471115752600177430ustar00rootroot00000000000000from __future__ import annotations import contextlib import threading from typing import TYPE_CHECKING from unittest.mock import patch import pytest from watchdog.events import FileModifiedEvent, FileSystemEventHandler from watchdog.observers.api import BaseObserver, EventEmitter if TYPE_CHECKING: from collections.abc import Iterator @pytest.fixture def observer() -> Iterator[BaseObserver]: obs = BaseObserver(EventEmitter) yield obs obs.stop() with contextlib.suppress(RuntimeError): obs.join() @pytest.fixture def observer2(): obs = BaseObserver(EventEmitter) yield obs obs.stop() with contextlib.suppress(RuntimeError): obs.join() def test_schedule_should_start_emitter_if_running(observer): observer.start() observer.schedule(None, "") (emitter,) = observer.emitters assert emitter.is_alive() def test_schedule_should_not_start_emitter_if_not_running(observer): observer.schedule(None, "") (emitter,) = observer.emitters assert not emitter.is_alive() def test_start_should_start_emitter(observer): observer.schedule(None, "") observer.start() (emitter,) = observer.emitters assert emitter.is_alive() def test_stop_should_stop_emitter(observer): observer.schedule(None, "") observer.start() (emitter,) = observer.emitters assert emitter.is_alive() observer.stop() observer.join() assert not observer.is_alive() assert not emitter.is_alive() def test_unschedule_self(observer): """ Tests that unscheduling a watch from within an event handler correctly correctly unregisters emitter and handler without deadlocking. """ class EventHandler(FileSystemEventHandler): def on_modified(self, event): observer.unschedule(watch) unschedule_finished.set() unschedule_finished = threading.Event() watch = observer.schedule(EventHandler(), "") observer.start() (emitter,) = observer.emitters emitter.queue_event(FileModifiedEvent("")) assert unschedule_finished.wait() assert len(observer.emitters) == 0 def test_schedule_after_unschedule_all(observer): observer.start() observer.schedule(None, "") assert len(observer.emitters) == 1 observer.unschedule_all() assert len(observer.emitters) == 0 observer.schedule(None, "") assert len(observer.emitters) == 1 def test_2_observers_on_the_same_path(observer, observer2): assert observer is not observer2 observer.schedule(None, "") assert len(observer.emitters) == 1 observer2.schedule(None, "") assert len(observer2.emitters) == 1 def test_start_failure_should_not_prevent_further_try(observer): observer.schedule(None, "") emitters = observer.emitters assert len(emitters) == 1 # Make the emitter to fail on start() def mocked_start(): raise OSError("Mock'ed!") emitter = next(iter(emitters)) with patch.object(emitter, "start", new=mocked_start), pytest.raises(OSError, match="Mock'ed!"): observer.start() # The emitter should be removed from the list assert len(observer.emitters) == 0 # Restoring the original behavior should work like there never be emitters observer.start() assert len(observer.emitters) == 0 # Re-scheduling the watch should work observer.schedule(None, "") assert len(observer.emitters) == 1 def test_schedule_failure_should_not_prevent_future_schedules(observer): observer.start() # Make the emitter fail on start(), and subsequently the observer to fail on schedule() def bad_start(_): raise OSError("Mock'ed!") with patch.object(EventEmitter, "start", new=bad_start), pytest.raises(OSError, match="Mock'ed!"): observer.schedule(None, "") # The emitter should not be in the list assert not observer.emitters # Re-scheduling the watch should work observer.schedule(None, "") assert len(observer.emitters) == 1 watchdog-6.0.0/tests/test_observers_api.py000066400000000000000000000061531471115752600207510ustar00rootroot00000000000000from __future__ import annotations import time from pathlib import Path import pytest from watchdog.events import FileModifiedEvent, FileOpenedEvent, LoggingEventHandler from watchdog.observers.api import BaseObserver, EventDispatcher, EventEmitter, EventQueue, ObservedWatch def test_observer_constructor(): ObservedWatch(Path("/foobar"), recursive=True) def test_observer__eq__(): watch1 = ObservedWatch("/foobar", recursive=True) watch2 = ObservedWatch("/foobar", recursive=True) watch_ne1 = ObservedWatch("/foo", recursive=True) watch_ne2 = ObservedWatch("/foobar", recursive=False) assert watch1 == watch2 assert watch1.__eq__(watch2) assert not watch1.__eq__(watch_ne1) assert not watch1.__eq__(watch_ne2) def test_observer__ne__(): watch1 = ObservedWatch("/foobar", recursive=True) watch2 = ObservedWatch("/foobar", recursive=True) watch_ne1 = ObservedWatch("/foo", recursive=True) watch_ne2 = ObservedWatch("/foobar", recursive=False) assert not watch1.__ne__(watch2) assert watch1.__ne__(watch_ne1) assert watch1.__ne__(watch_ne2) def test_observer__repr__(): observed_watch = ObservedWatch("/foobar", recursive=True) repr_str = "" assert observed_watch.__repr__() == repr(observed_watch) assert repr(observed_watch) == repr_str observed_watch = ObservedWatch("/foobar", recursive=False, event_filter=[FileOpenedEvent, FileModifiedEvent]) repr_str = "" assert observed_watch.__repr__() == repr(observed_watch) assert repr(observed_watch) == repr_str def test_event_emitter(): event_queue = EventQueue() watch = ObservedWatch("/foobar", recursive=True) event_emitter = EventEmitter(event_queue, watch, timeout=1) event_emitter.queue_event(FileModifiedEvent("/foobar/blah")) def test_event_dispatcher(): event = FileModifiedEvent("/foobar") watch = ObservedWatch("/path", recursive=True) class TestableEventDispatcher(EventDispatcher): def dispatch_event(self, event, watch): assert True event_dispatcher = TestableEventDispatcher() event_dispatcher.event_queue.put((event, watch)) event_dispatcher.start() time.sleep(1) event_dispatcher.stop() event_dispatcher.join() def test_observer_basic(): observer = BaseObserver(EventEmitter) handler = LoggingEventHandler() watch = observer.schedule(handler, "/foobar", recursive=True) observer.add_handler_for_watch(handler, watch) observer.add_handler_for_watch(handler, watch) observer.remove_handler_for_watch(handler, watch) with pytest.raises(KeyError): observer.remove_handler_for_watch(handler, watch) observer.unschedule(watch) with pytest.raises(KeyError): observer.unschedule(watch) watch = observer.schedule(handler, "/foobar", recursive=True) observer.event_queue.put((FileModifiedEvent("/foobar"), watch)) observer.start() time.sleep(1) observer.unschedule_all() observer.stop() observer.join() watchdog-6.0.0/tests/test_observers_polling.py000066400000000000000000000067621471115752600216520ustar00rootroot00000000000000from __future__ import annotations import os from queue import Empty, Queue from time import sleep import pytest from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, ) from watchdog.observers.api import ObservedWatch from watchdog.observers.polling import PollingEmitter as Emitter from .shell import mkdir, mkdtemp, msize, mv, rm, touch SLEEP_TIME = 0.4 TEMP_DIR = mkdtemp() def p(*args): """ Convenience function to join the temporary directory path with the provided arguments. """ return os.path.join(TEMP_DIR, *args) @pytest.fixture def event_queue(): return Queue() @pytest.fixture def emitter(event_queue): watch = ObservedWatch(TEMP_DIR, recursive=True) em = Emitter(event_queue, watch, timeout=0.2) em.start() yield em em.stop() em.join(5) def test___init__(event_queue, emitter): sleep(SLEEP_TIME) mkdir(p("project")) sleep(SLEEP_TIME) mkdir(p("project", "blah")) sleep(SLEEP_TIME) touch(p("afile")) sleep(SLEEP_TIME) touch(p("fromfile")) sleep(SLEEP_TIME) mv(p("fromfile"), p("project", "tofile")) sleep(SLEEP_TIME) touch(p("afile")) sleep(SLEEP_TIME) mv(p("project", "blah"), p("project", "boo")) sleep(SLEEP_TIME) rm(p("project"), recursive=True) sleep(SLEEP_TIME) rm(p("afile")) sleep(SLEEP_TIME) msize(p("bfile")) sleep(SLEEP_TIME) rm(p("bfile")) sleep(SLEEP_TIME) emitter.stop() # What we need here for the tests to pass is a collection type # that is: # * unordered # * non-unique # A multiset! Python's collections.Counter class seems appropriate. expected = { DirModifiedEvent(p()), DirCreatedEvent(p("project")), DirModifiedEvent(p("project")), DirCreatedEvent(p("project", "blah")), FileCreatedEvent(p("afile")), DirModifiedEvent(p()), FileCreatedEvent(p("fromfile")), DirModifiedEvent(p()), DirModifiedEvent(p()), FileModifiedEvent(p("afile")), DirModifiedEvent(p("project")), DirModifiedEvent(p()), FileDeletedEvent(p("project", "tofile")), DirDeletedEvent(p("project", "boo")), DirDeletedEvent(p("project")), DirModifiedEvent(p()), FileDeletedEvent(p("afile")), DirModifiedEvent(p()), FileCreatedEvent(p("bfile")), FileModifiedEvent(p("bfile")), DirModifiedEvent(p()), FileDeletedEvent(p("bfile")), } expected.add(FileMovedEvent(p("fromfile"), p("project", "tofile"))) expected.add(DirMovedEvent(p("project", "blah"), p("project", "boo"))) got = set() while True: try: event, _ = event_queue.get_nowait() got.add(event) except Empty: break assert expected == got def test_delete_watched_dir(event_queue, emitter): rm(p(""), recursive=True) sleep(SLEEP_TIME) emitter.stop() # What we need here for the tests to pass is a collection type # that is: # * unordered # * non-unique # A multiset! Python's collections.Counter class seems appropriate. expected = { DirDeletedEvent(os.path.dirname(p(""))), } got = set() while True: try: event, _ = event_queue.get_nowait() got.add(event) except Empty: break assert expected == got watchdog-6.0.0/tests/test_observers_winapi.py000066400000000000000000000071441471115752600214700ustar00rootroot00000000000000from __future__ import annotations import os import os.path from queue import Empty, Queue from time import sleep import pytest from watchdog.events import DirCreatedEvent, DirMovedEvent from watchdog.observers.api import ObservedWatch from watchdog.utils import platform from .shell import mkdir, mkdtemp, mv, rm # make pytest aware this is windows only if not platform.is_windows(): pytest.skip("Windows only.", allow_module_level=True) from watchdog.observers.read_directory_changes import WindowsApiEmitter SLEEP_TIME = 2 # Path with non-ASCII temp_dir = os.path.join(mkdtemp(), "Strange \N{SNOWMAN}") os.makedirs(temp_dir) def p(*args): """ Convenience function to join the temporary directory path with the provided arguments. """ return os.path.join(temp_dir, *args) @pytest.fixture def event_queue(): return Queue() @pytest.fixture def emitter(event_queue): watch = ObservedWatch(temp_dir, recursive=True) em = WindowsApiEmitter(event_queue, watch, timeout=0.2) yield em em.stop() def test___init__(event_queue, emitter): emitter.start() sleep(SLEEP_TIME) mkdir(p("fromdir")) sleep(SLEEP_TIME) mv(p("fromdir"), p("todir")) sleep(SLEEP_TIME) emitter.stop() # What we need here for the tests to pass is a collection type # that is: # * unordered # * non-unique # A multiset! Python's collections.Counter class seems appropriate. expected = { DirCreatedEvent(p("fromdir")), DirMovedEvent(p("fromdir"), p("todir")), } got = set() while True: try: event, _ = event_queue.get_nowait() except Empty: break else: got.add(event) assert expected == got def test_root_deleted(event_queue, emitter): r"""Test the event got when removing the watched folder. The regression to prevent is: Exception in thread Thread-1: Traceback (most recent call last): File "watchdog\observers\winapi.py", line 333, in read_directory_changes ctypes.byref(nbytes), None, None) File "watchdog\observers\winapi.py", line 105, in _errcheck_bool raise ctypes.WinError() PermissionError: [WinError 5] Access refused. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Python37-32\lib\threading.py", line 926, in _bootstrap_inner self.run() File "watchdog\observers\api.py", line 145, in run self.queue_events(self.timeout) File "watchdog\observers\read_directory_changes.py", line 76, in queue_events winapi_events = self._read_events() File "watchdog\observers\read_directory_changes.py", line 73, in _read_events return read_events(self._whandle, self.watch.path, recursive=self.watch.is_recursive) File "watchdog\observers\winapi.py", line 387, in read_events buf, nbytes = read_directory_changes(handle, path, recursive=recursive) File "watchdog\observers\winapi.py", line 340, in read_directory_changes return _generate_observed_path_deleted_event() File "watchdog\observers\winapi.py", line 298, in _generate_observed_path_deleted_event event = FileNotifyInformation(0, FILE_ACTION_DELETED_SELF, len(path), path.value) TypeError: expected bytes, str found """ emitter.start() sleep(SLEEP_TIME) # This should not fail rm(p(), recursive=True) sleep(SLEEP_TIME) # The emitter is automatically stopped, with no error assert not emitter.should_keep_running() watchdog-6.0.0/tests/test_pattern_matching_event_handler.py000066400000000000000000000136771471115752600243440ustar00rootroot00000000000000from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, PatternMatchingEventHandler, ) from watchdog.utils.patterns import filter_paths path_1 = "/path/xyz" path_2 = "/path/abc" g_allowed_patterns = ["*.py", "*.txt"] g_ignore_patterns = ["*.foo"] def assert_patterns(event): paths = [event.src_path, event.dest_path] if hasattr(event, "dest_path") else [event.src_path] filtered_paths = filter_paths( paths, included_patterns=["*.py", "*.txt"], excluded_patterns=["*.pyc"], case_sensitive=False, ) assert filtered_paths def test_dispatch(): # Utilities. patterns = ["*.py", "*.txt"] ignore_patterns = ["*.pyc"] dir_del_event_match = DirDeletedEvent("/path/blah.py") dir_del_event_not_match = DirDeletedEvent("/path/foobar") dir_del_event_ignored = DirDeletedEvent("/path/foobar.pyc") file_del_event_match = FileDeletedEvent("/path/blah.txt") file_del_event_not_match = FileDeletedEvent("/path/foobar") file_del_event_ignored = FileDeletedEvent("/path/blah.pyc") dir_cre_event_match = DirCreatedEvent("/path/blah.py") dir_cre_event_not_match = DirCreatedEvent("/path/foobar") dir_cre_event_ignored = DirCreatedEvent("/path/foobar.pyc") file_cre_event_match = FileCreatedEvent("/path/blah.txt") file_cre_event_not_match = FileCreatedEvent("/path/foobar") file_cre_event_ignored = FileCreatedEvent("/path/blah.pyc") dir_mod_event_match = DirModifiedEvent("/path/blah.py") dir_mod_event_not_match = DirModifiedEvent("/path/foobar") dir_mod_event_ignored = DirModifiedEvent("/path/foobar.pyc") file_mod_event_match = FileModifiedEvent("/path/blah.txt") file_mod_event_not_match = FileModifiedEvent("/path/foobar") file_mod_event_ignored = FileModifiedEvent("/path/blah.pyc") dir_mov_event_match = DirMovedEvent("/path/blah.py", "/path/blah") dir_mov_event_not_match = DirMovedEvent("/path/foobar", "/path/blah") dir_mov_event_ignored = DirMovedEvent("/path/foobar.pyc", "/path/blah") file_mov_event_match = FileMovedEvent("/path/blah.txt", "/path/blah") file_mov_event_not_match = FileMovedEvent("/path/foobar", "/path/blah") file_mov_event_ignored = FileMovedEvent("/path/blah.pyc", "/path/blah") all_dir_events = [ dir_mod_event_match, dir_mod_event_not_match, dir_mod_event_ignored, dir_del_event_match, dir_del_event_not_match, dir_del_event_ignored, dir_cre_event_match, dir_cre_event_not_match, dir_cre_event_ignored, dir_mov_event_match, dir_mov_event_not_match, dir_mov_event_ignored, ] all_file_events = [ file_mod_event_match, file_mod_event_not_match, file_mod_event_ignored, file_del_event_match, file_del_event_not_match, file_del_event_ignored, file_cre_event_match, file_cre_event_not_match, file_cre_event_ignored, file_mov_event_match, file_mov_event_not_match, file_mov_event_ignored, ] all_events = all_file_events + all_dir_events def assert_check_directory(handler, event): assert not (handler.ignore_directories and event.is_directory) class TestableEventHandler(PatternMatchingEventHandler): def on_any_event(self, event): assert_check_directory(self, event) def on_modified(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MODIFIED assert_patterns(event) def on_deleted(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_DELETED assert_patterns(event) def on_moved(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MOVED assert_patterns(event) def on_created(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_CREATED assert_patterns(event) no_dirs_handler = TestableEventHandler(patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=True) handler = TestableEventHandler(patterns=patterns, ignore_patterns=ignore_patterns) for event in all_events: no_dirs_handler.dispatch(event) for event in all_events: handler.dispatch(event) def test_handler(): handler1 = PatternMatchingEventHandler( patterns=g_allowed_patterns, ignore_patterns=g_ignore_patterns, ignore_directories=True, ) handler2 = PatternMatchingEventHandler(patterns=g_allowed_patterns, ignore_patterns=g_ignore_patterns) assert handler1.patterns == g_allowed_patterns assert handler1.ignore_patterns == g_ignore_patterns assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_directories(): handler1 = PatternMatchingEventHandler( patterns=g_allowed_patterns, ignore_patterns=g_ignore_patterns, ignore_directories=True, ) handler2 = PatternMatchingEventHandler(patterns=g_allowed_patterns, ignore_patterns=g_ignore_patterns) assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_patterns(): handler1 = PatternMatchingEventHandler( patterns=g_allowed_patterns, ignore_patterns=g_ignore_patterns, ignore_directories=True, ) assert handler1.ignore_patterns == g_ignore_patterns def test_patterns(): handler1 = PatternMatchingEventHandler( patterns=g_allowed_patterns, ignore_patterns=g_ignore_patterns, ignore_directories=True, ) assert handler1.patterns == g_allowed_patterns watchdog-6.0.0/tests/test_patterns.py000066400000000000000000000050171471115752600177440ustar00rootroot00000000000000from __future__ import annotations import pytest from watchdog.utils.patterns import _match_path, filter_paths, match_any_paths @pytest.mark.parametrize( ("raw_path", "included_patterns", "excluded_patterns", "case_sensitive", "expected"), [ ("/users/gorakhargosh/foobar.py", {"*.py"}, {"*.PY"}, True, True), ("/users/gorakhargosh/", {"*.py"}, {"*.txt"}, False, False), ("/users/gorakhargosh/foobar.py", {"*.py"}, {"*.PY"}, False, ValueError), ], ) def test_match_path(raw_path, included_patterns, excluded_patterns, case_sensitive, expected): if expected is ValueError: with pytest.raises(expected): _match_path(raw_path, included_patterns, excluded_patterns, case_sensitive=case_sensitive) else: assert _match_path(raw_path, included_patterns, excluded_patterns, case_sensitive=case_sensitive) is expected @pytest.mark.parametrize( ("included_patterns", "excluded_patterns", "case_sensitive", "expected"), [ (None, None, True, None), (None, None, False, None), ( ["*.py", "*.conf"], ["*.status"], True, {"/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"}, ), ], ) def test_filter_paths(included_patterns, excluded_patterns, case_sensitive, expected): pathnames = { "/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python", } actual = set( filter_paths( pathnames, included_patterns=included_patterns, excluded_patterns=excluded_patterns, case_sensitive=case_sensitive, ) ) assert actual == expected if expected else pathnames @pytest.mark.parametrize( ("included_patterns", "excluded_patterns", "case_sensitive", "expected"), [ (None, None, True, True), (None, None, False, True), (["*py", "*.conf"], ["*.status"], True, True), (["*.txt"], None, False, False), (["*.txt"], None, True, False), ], ) def test_match_any_paths(included_patterns, excluded_patterns, case_sensitive, expected): pathnames = { "/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python", } assert ( match_any_paths( pathnames, included_patterns=included_patterns, excluded_patterns=excluded_patterns, case_sensitive=case_sensitive, ) == expected ) watchdog-6.0.0/tests/test_regex_matching_event_handler.py000066400000000000000000000173231471115752600237710ustar00rootroot00000000000000from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, LoggingEventHandler, RegexMatchingEventHandler, ) path_1 = "/path/xyz" path_2 = "/path/abc" g_allowed_regexes = [r".*\.py", r".*\.txt"] g_allowed_str_regexes = r".*\.py" g_ignore_regexes = [r".*\.pyc"] def test_dispatch(): # Utilities. regexes = [r".*\.py", r".*\.txt"] ignore_regexes = [r".*\.pyc"] def assert_regexes(handler, event): paths = [event.src_path, event.dest_path] if hasattr(event, "dest_path") else [event.src_path] filtered_paths = set() for p in paths: if any(r.match(p) for r in handler.regexes): filtered_paths.add(p) assert filtered_paths dir_del_event_match = DirDeletedEvent("/path/blah.py") dir_del_event_not_match = DirDeletedEvent("/path/foobar") dir_del_event_ignored = DirDeletedEvent("/path/foobar.pyc") file_del_event_match = FileDeletedEvent("/path/blah.txt") file_del_event_not_match = FileDeletedEvent("/path/foobar") file_del_event_ignored = FileDeletedEvent("/path/blah.pyc") dir_cre_event_match = DirCreatedEvent("/path/blah.py") dir_cre_event_not_match = DirCreatedEvent("/path/foobar") dir_cre_event_ignored = DirCreatedEvent("/path/foobar.pyc") file_cre_event_match = FileCreatedEvent("/path/blah.txt") file_cre_event_not_match = FileCreatedEvent("/path/foobar") file_cre_event_ignored = FileCreatedEvent("/path/blah.pyc") dir_mod_event_match = DirModifiedEvent("/path/blah.py") dir_mod_event_not_match = DirModifiedEvent("/path/foobar") dir_mod_event_ignored = DirModifiedEvent("/path/foobar.pyc") file_mod_event_match = FileModifiedEvent("/path/blah.txt") file_mod_event_not_match = FileModifiedEvent("/path/foobar") file_mod_event_ignored = FileModifiedEvent("/path/blah.pyc") dir_mov_event_match = DirMovedEvent("/path/blah.py", "/path/blah") dir_mov_event_not_match = DirMovedEvent("/path/foobar", "/path/blah") dir_mov_event_ignored = DirMovedEvent("/path/foobar.pyc", "/path/blah") file_mov_event_match = FileMovedEvent("/path/blah.txt", "/path/blah") file_mov_event_not_match = FileMovedEvent("/path/foobar", "/path/blah") file_mov_event_ignored = FileMovedEvent("/path/blah.pyc", "/path/blah") all_dir_events = [ dir_mod_event_match, dir_mod_event_not_match, dir_mod_event_ignored, dir_del_event_match, dir_del_event_not_match, dir_del_event_ignored, dir_cre_event_match, dir_cre_event_not_match, dir_cre_event_ignored, dir_mov_event_match, dir_mov_event_not_match, dir_mov_event_ignored, ] all_file_events = [ file_mod_event_match, file_mod_event_not_match, file_mod_event_ignored, file_del_event_match, file_del_event_not_match, file_del_event_ignored, file_cre_event_match, file_cre_event_not_match, file_cre_event_ignored, file_mov_event_match, file_mov_event_not_match, file_mov_event_ignored, ] all_events = all_file_events + all_dir_events def assert_check_directory(handler, event): assert not (handler.ignore_directories and event.is_directory) class TestableEventHandler(RegexMatchingEventHandler): def on_any_event(self, event): assert_check_directory(self, event) def on_modified(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MODIFIED assert_regexes(self, event) def on_deleted(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_DELETED assert_regexes(self, event) def on_moved(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MOVED assert_regexes(self, event) def on_created(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_CREATED assert_regexes(self, event) no_dirs_handler = TestableEventHandler(regexes=regexes, ignore_regexes=ignore_regexes, ignore_directories=True) handler = TestableEventHandler(regexes=regexes, ignore_regexes=ignore_regexes) for event in all_events: no_dirs_handler.dispatch(event) for event in all_events: handler.dispatch(event) def test_handler(): handler1 = RegexMatchingEventHandler( regexes=g_allowed_regexes, ignore_regexes=g_ignore_regexes, ignore_directories=True, ) handler2 = RegexMatchingEventHandler(regexes=g_allowed_regexes, ignore_regexes=g_ignore_regexes) assert [r.pattern for r in handler1.regexes] == g_allowed_regexes assert [r.pattern for r in handler1.ignore_regexes] == g_ignore_regexes assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_directories(): handler1 = RegexMatchingEventHandler( regexes=g_allowed_regexes, ignore_regexes=g_ignore_regexes, ignore_directories=True, ) handler2 = RegexMatchingEventHandler(regexes=g_allowed_regexes, ignore_regexes=g_ignore_regexes) assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_regexes(): handler1 = RegexMatchingEventHandler( regexes=g_allowed_regexes, ignore_regexes=g_ignore_regexes, ignore_directories=True, ) assert [r.pattern for r in handler1.ignore_regexes] == g_ignore_regexes def test_regexes(): handler1 = RegexMatchingEventHandler( regexes=g_allowed_regexes, ignore_regexes=g_ignore_regexes, ignore_directories=True, ) assert [r.pattern for r in handler1.regexes] == g_allowed_regexes def test_str_regexes(): handler1 = RegexMatchingEventHandler( regexes=g_allowed_str_regexes, ignore_regexes=g_ignore_regexes, case_sensitive=True, ) assert [r.pattern for r in handler1.regexes] == [g_allowed_str_regexes] def test_logging_event_handler_dispatch(): class _TestableEventHandler(LoggingEventHandler): def on_any_event(self, event): pass def on_modified(self, event): super().on_modified(event) assert event.event_type == EVENT_TYPE_MODIFIED def on_deleted(self, event): super().on_deleted(event) assert event.event_type == EVENT_TYPE_DELETED def on_moved(self, event): super().on_moved(event) assert event.event_type == EVENT_TYPE_MOVED def on_created(self, event): super().on_created(event) assert event.event_type == EVENT_TYPE_CREATED # Utilities. dir_del_event = DirDeletedEvent("/path/blah.py") file_del_event = FileDeletedEvent("/path/blah.txt") dir_cre_event = DirCreatedEvent("/path/blah.py") file_cre_event = FileCreatedEvent("/path/blah.txt") dir_mod_event = DirModifiedEvent("/path/blah.py") file_mod_event = FileModifiedEvent("/path/blah.txt") dir_mov_event = DirMovedEvent("/path/blah.py", "/path/blah") file_mov_event = FileMovedEvent("/path/blah.txt", "/path/blah") all_events = [ dir_mod_event, dir_del_event, dir_cre_event, dir_mov_event, file_mod_event, file_del_event, file_cre_event, file_mov_event, ] handler = _TestableEventHandler() for event in all_events: handler.dispatch(event) watchdog-6.0.0/tests/test_skip_repeats_queue.py000066400000000000000000000032601471115752600217770ustar00rootroot00000000000000from __future__ import annotations from watchdog import events from watchdog.utils.bricks import SkipRepeatsQueue def test_basic_queue(): q = SkipRepeatsQueue() e1 = (2, "fred") e2 = (2, "george") e3 = (4, "sally") q.put(e1) q.put(e2) q.put(e3) assert e1 == q.get() assert e2 == q.get() assert e3 == q.get() assert q.empty() def test_allow_nonconsecutive(): q = SkipRepeatsQueue() e1 = (2, "fred") e2 = (2, "george") q.put(e1) q.put(e2) q.put(e1) # repeat the first entry assert e1 == q.get() assert e2 == q.get() assert e1 == q.get() assert q.empty() def test_put_with_watchdog_events(): # FileSystemEvent.__ne__() uses the key property without # doing any type checking. Since _last_item is set to # None in __init__(), an AttributeError is raised when # FileSystemEvent.__ne__() tries to use None.key queue = SkipRepeatsQueue() dummy_file = "dummy.txt" event = events.FileCreatedEvent(dummy_file) queue.put(event) assert queue.get() is event def test_prevent_consecutive(): q = SkipRepeatsQueue() e1 = (2, "fred") e2 = (2, "george") q.put(e1) q.put(e1) # repeat the first entry (this shouldn't get added) q.put(e2) assert e1 == q.get() assert e2 == q.get() assert q.empty() def test_consecutives_allowed_across_empties(): q = SkipRepeatsQueue() e1 = (2, "fred") q.put(e1) q.put(e1) # repeat the first entry (this shouldn't get added) assert e1 == q.get() assert q.empty() q.put(e1) # this repeat is allowed because 'last' added is now gone from queue assert e1 == q.get() assert q.empty() watchdog-6.0.0/tests/test_snapshot_diff.py000066400000000000000000000152131471115752600207320ustar00rootroot00000000000000from __future__ import annotations import errno import os import pickle import time from unittest.mock import patch from watchdog.utils import platform from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff, EmptyDirectorySnapshot from .shell import mkdir, mv, rm, touch def wait(): """ Wait long enough for file/folder mtime to change. This is needed to be able to detected modifications. """ if platform.is_darwin() or platform.is_windows(): # on macOS resolution of stat.mtime is only 1 second time.sleep(1.5) else: time.sleep(0.5) def test_pickle(p): """It should be possible to pickle a snapshot.""" mkdir(p("dir1")) snasphot = DirectorySnapshot(p("dir1")) pickle.dumps(snasphot) def test_move_to(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("dir2")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p("dir2"))) assert diff.files_created == [p("dir2", "b")] def test_move_to_with_context_manager(p): mkdir(p("dir1")) touch(p("dir1", "a")) mkdir(p("dir2")) dir1_cm = DirectorySnapshotDiff.ContextManager(p("dir1")) dir2_cm = DirectorySnapshotDiff.ContextManager(p("dir2")) with dir1_cm, dir2_cm: mv(p("dir1", "a"), p("dir2", "b")) assert dir1_cm.diff.files_deleted == [p("dir1", "a")] assert dir2_cm.diff.files_created == [p("dir2", "b")] def test_move_from(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("dir1")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p("dir1"))) assert diff.files_deleted == [p("dir1", "a")] def test_move_internal(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.files_moved == [(p("dir1", "a"), p("dir2", "b"))] assert diff.files_created == [] assert diff.files_deleted == [] def test_move_replace(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) touch(p("dir2", "b")) ref = DirectorySnapshot(p("")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.files_moved == [(p("dir1", "a"), p("dir2", "b"))] assert diff.files_deleted == [p("dir2", "b")] assert diff.files_created == [] def test_dir_modify_on_create(p): ref = DirectorySnapshot(p("")) wait() touch(p("a")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.dirs_modified == [p("")] def test_dir_modify_on_move(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("")) wait() mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert set(diff.dirs_modified) == {p("dir1"), p("dir2")} def test_detect_modify_for_moved_files(p): touch(p("a")) ref = DirectorySnapshot(p("")) wait() touch(p("a")) mv(p("a"), p("b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.files_moved == [(p("a"), p("b"))] assert diff.files_modified == [p("a")] def test_replace_dir_with_file(p): # Replace a dir with a file of the same name just before the normal listdir # call and ensure it doesn't cause an exception def listdir_fcn(path): if path == p("root", "dir"): rm(path, recursive=True) touch(path) return os.scandir(path) mkdir(p("root")) mkdir(p("root", "dir")) # Should NOT raise an OSError (ENOTDIR) DirectorySnapshot(p("root"), listdir=listdir_fcn) def test_permission_error(p): # Test that unreadable folders are not raising exceptions mkdir(p("a", "b", "c"), parents=True) ref = DirectorySnapshot(p("")) walk_orig = DirectorySnapshot.walk def walk(self, root): """Generate a permission error on folder "a/b".""" # Generate the permission error if root.startswith(p("a", "b")): raise OSError(errno.EACCES, os.strerror(errno.EACCES)) # Mimic the original method yield from walk_orig(self, root) with patch.object(DirectorySnapshot, "walk", new=walk): # Should NOT raise an OSError (EACCES) new_snapshot = DirectorySnapshot(p("")) diff = DirectorySnapshotDiff(ref, new_snapshot) assert repr(diff) # Children of a/b/ are no more accessible and so removed in the new snapshot assert diff.dirs_deleted == [(p("a", "b", "c"))] def test_ignore_device(p): # Create a file and take a snapshot. touch(p("file")) ref = DirectorySnapshot(p("")) wait() inode_orig = DirectorySnapshot.inode inode_times = 0 def inode(self, path): # This function will always return a different device_id, # even for the same file. nonlocal inode_times result = inode_orig(self, path) inode_times += 1 return result[0], result[1] + inode_times # Set the custom inode function. with patch.object(DirectorySnapshot, "inode", new=inode): # If we make the diff of the same directory, since by default the # DirectorySnapshotDiff compares the snapshots using the device_id (and it will # be different), it thinks that the same file has been deleted and created again. snapshot = DirectorySnapshot(p("")) diff_with_device = DirectorySnapshotDiff(ref, snapshot) assert diff_with_device.files_deleted == [(p("file"))] assert diff_with_device.files_created == [(p("file"))] # Otherwise, if we choose to ignore the device, the file will not be detected as # deleted and re-created. snapshot = DirectorySnapshot(p("")) diff_without_device = DirectorySnapshotDiff(ref, snapshot, ignore_device=True) assert diff_without_device.files_deleted == [] assert diff_without_device.files_created == [] def test_empty_snapshot(p): # Create a file and declare a DirectorySnapshot and a EmptyDirectorySnapshot. # When we make the diff, although both objects were declared with the same items on # the directory, the file and directories created BEFORE the DirectorySnapshot will # be detected as newly created. touch(p("a")) mkdir(p("b", "c"), parents=True) ref = DirectorySnapshot(p("")) empty = EmptyDirectorySnapshot() diff = DirectorySnapshotDiff(empty, ref) assert diff.files_created == [p("a")] assert sorted(diff.dirs_created) == sorted([p(""), p("b"), p("b", "c")]) watchdog-6.0.0/tests/utils.py000066400000000000000000000075101471115752600162050ustar00rootroot00000000000000from __future__ import annotations import dataclasses import os import subprocess import sys from queue import Queue from typing import Protocol from watchdog.events import FileSystemEvent from watchdog.observers.api import EventEmitter, ObservedWatch from watchdog.utils import platform Emitter: type[EventEmitter] if platform.is_linux(): from watchdog.observers.inotify import InotifyEmitter as Emitter from watchdog.observers.inotify import InotifyFullEmitter elif platform.is_darwin(): from watchdog.observers.fsevents import FSEventsEmitter as Emitter elif platform.is_windows(): from watchdog.observers.read_directory_changes import WindowsApiEmitter as Emitter elif platform.is_bsd(): from watchdog.observers.kqueue import KqueueEmitter as Emitter class P(Protocol): def __call__(self, *args: str) -> str: ... class StartWatching(Protocol): def __call__( self, *, path: bytes | str | None = ..., use_full_emitter: bool = ..., recursive: bool = ..., ) -> EventEmitter: ... class ExpectEvent(Protocol): def __call__(self, expected_event: FileSystemEvent, *, timeout: float = ...) -> None: ... TestEventQueue = Queue[tuple[FileSystemEvent, ObservedWatch]] @dataclasses.dataclass() class Helper: tmp: str emitters: list[EventEmitter] = dataclasses.field(default_factory=list) event_queue: TestEventQueue = dataclasses.field(default_factory=Queue) def joinpath(self, *args: str) -> str: return os.path.join(self.tmp, *args) def start_watching( self, *, path: bytes | str | None = None, use_full_emitter: bool = False, recursive: bool = True, ) -> EventEmitter: # TODO: check if other platforms expect the trailing slash (e.g. `p('')`) path = self.tmp if path is None else path watcher = ObservedWatch(path, recursive=recursive) emitter_cls = InotifyFullEmitter if platform.is_linux() and use_full_emitter else Emitter emitter = emitter_cls(self.event_queue, watcher) if platform.is_darwin(): # TODO: I think this could be better... .suppress_history should maybe # become a common attribute. from watchdog.observers.fsevents import FSEventsEmitter assert isinstance(emitter, FSEventsEmitter) emitter.suppress_history = True self.emitters.append(emitter) emitter.start() return emitter def expect_event(self, expected_event: FileSystemEvent, timeout: float = 2) -> None: """Utility function to wait up to `timeout` seconds for an `event_type` for `path` to show up in the queue. Provides some robustness for the otherwise flaky nature of asynchronous notifications. """ assert self.event_queue.get(timeout=timeout)[0] == expected_event def close(self) -> None: for emitter in self.emitters: emitter.stop() for emitter in self.emitters: if emitter.is_alive(): emitter.join(5) alive = [emitter.is_alive() for emitter in self.emitters] self.emitters = [] assert alive == [False] * len(alive) def run_isolated_test(path): isolated_test_prefix = os.path.join("tests", "isolated") path = os.path.abspath(os.path.join(isolated_test_prefix, path)) src_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "src") new_env = os.environ.copy() new_env["PYTHONPATH"] = os.pathsep.join([*sys.path, src_dir]) new_argv = [sys.executable, path] p = subprocess.Popen( new_argv, env=new_env, ) # in case test goes haywire, don't let it run forever timeout = 10 try: p.communicate(timeout=timeout) except subprocess.TimeoutExpired: p.kill() raise assert p.returncode == 0 watchdog-6.0.0/tools/000077500000000000000000000000001471115752600144665ustar00rootroot00000000000000watchdog-6.0.0/tools/watchmedo.bat000066400000000000000000000017341471115752600171360ustar00rootroot00000000000000@REM Copyright 2018-2024 Mickaël Schoentgen & contributors @REM Copyright 2012-2018 Google, Inc. @REM Copyright 2011-2012 Yesudeep Mangalapilly @REM Copyright 2001-2010 The SCons Foundation @REM watchmedo.bat - Wrapper .bat file for the watchmedo Python script. @echo off set SCRIPT_ERRORLEVEL= if "%OS%" == "Windows_NT" goto WinNT @REM Windows 9x/Me you better not have more than 9 arguments. python -c "from watchdog import watchmedo; watchmedo.main()" %1 %2 %3 %4 %5 %6 %7 %8 %9 @REM No way to set exit status of this script for 9x/Me goto endscript @REM Windows NT+ :WinNT setlocal set path=%~dp0;%~dp0..;%path% python -c "from watchdog import watchmedo; watchmedo.main()" %* endlocal & set SCRIPT_ERRORLEVEL=%ERRORLEVEL% if not "%COMSPEC%" == "%SystemRoot%\system32\cmd.exe" goto returncode if errorlevel 9009 echo You do not have python in your PATH environment variable. goto endscript :returncode exit /B %SCRIPT_ERRORLEVEL% :endscript call :returncode %SCRIPT_ERRORLEVEL% watchdog-6.0.0/tox.ini000066400000000000000000000027141471115752600146450ustar00rootroot00000000000000[tox] envlist = py3{9,10,11,12,13} pypy3 docs types lint skip_missing_interpreters = True [testenv] usedevelop = true deps = -r requirements-tests.txt extras = watchmedo commands = python -m pytest {posargs} [testenv:docs] usedevelop = true deps = -r requirements-tests.txt extras = watchmedo commands = sphinx-build -aEWb html docs/source docs/build/html [testenv:lint] usedevelop = true deps = -r requirements-tests.txt extras = watchmedo commands = python -m ruff format docs/source/examples src tests python -m ruff check --fix --unsafe-fixes src docs/source/examples tests [testenv:types] usedevelop = true deps = -r requirements-tests.txt commands = # General python -m mypy docs/source/examples python -m mypy src # OS specific python -m mypy --platform darwin --disable-error-code unused-ignore \ src/watchdog/observers/fsevents.py \ src/watchdog/observers/fsevents2.py python -m mypy --platform freebsd --disable-error-code unused-ignore \ src/watchdog/observers/kqueue.py python -m mypy --platform linux --disable-error-code unused-ignore \ src/watchdog/observers/inotify.py \ src/watchdog/observers/inotify_buffer.py \ src/watchdog/observers/inotify_c.py python -m mypy --platform win32 --disable-error-code unused-ignore \ src/watchdog/observers/read_directory_changes.py \ src/watchdog/observers/winapi.py