pax_global_header00006660000000000000000000000064144060210310014501gustar00rootroot0000000000000052 comment=da09c060a007fe7fddde27592e4f63ae1e8697bc watchdog-3.0.0/000077500000000000000000000000001440602103100133015ustar00rootroot00000000000000watchdog-3.0.0/.cirrus.yml000066400000000000000000000013141440602103100154100ustar00rootroot00000000000000task: matrix: freebsd_instance: image_family: freebsd-13-0 freebsd_instance: image_family: freebsd-12-2 install_script: - pkg install -y python38 py38-sqlite3 # Print the Python version, only to be sure we are running the version we want - python3.8 -c 'import platform; print("Python", platform.python_version())' # Check SQLite3 is installed - python3.8 -c 'import sqlite3; print("SQLite3", sqlite3.version)' setup_script: - python3.8 -m ensurepip - python3.8 -m pip install -U pip - python3.8 -m pip install -r requirements-tests.txt lint_script: - python3.8 -m flake8 docs src tests tools tests_script: - python3.8 -bb -m pytest tests watchdog-3.0.0/.github/000077500000000000000000000000001440602103100146415ustar00rootroot00000000000000watchdog-3.0.0/.github/workflows/000077500000000000000000000000001440602103100166765ustar00rootroot00000000000000watchdog-3.0.0/.github/workflows/build-and-publish.yml000066400000000000000000000102701440602103100227240ustar00rootroot00000000000000# Because this library provides extension modules for macOS, but not for other # platforms, we want to provide built distributions for each macOS platform, but we # explicitly DON'T want to provide a cross-platform pure-Python wheel to fall back on. # # This is because in the event that a new Python version is released or a new # macOS platform is released, macOS users won't be able to install the built # distributions we've provided and should fall back to the source distribution, # but pip's behavior is to prefer a pure-Python wheel first, which will be # missing the extension modules. # # However, to provide built distributions for Linux and Windows (which don't # have extension modules) we can just build a pure-Python wheel on that # platform and override the platform name manually via wheel's --plat-name # feature, to provide a platform-specific wheel for all platforms. name: Build & Publish on: push: branches: - master pull_request: branches: - '**' workflow_dispatch: inputs: branch: description: "The branch, tag or SHA to release from" required: true default: "master" concurrency: group: ${{ github.ref }}-${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name != 'pull_request' && github.sha || '' }} cancel-in-progress: true jobs: macos-built-distributions: name: Build macOS wheels runs-on: macos-latest timeout-minutes: 20 steps: - name: Checkout uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.branch }} - name: Install Python uses: actions/setup-python@v4 with: python-version: "3.11" - name: Install build dependencies run: python -m pip install cibuildwheel - name: Build wheels run: python -m cibuildwheel env: CIBW_SKIP: "cp36-*" # skip 3.6 wheels CIBW_ARCHS_MACOS: "x86_64 universal2 arm64" - uses: actions/upload-artifact@v3 with: name: python-package-distributions path: ./wheelhouse/*.whl pure-built-distributions: name: Build pure wheels runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Checkout uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.branch }} - name: Install Python uses: actions/setup-python@v4 with: python-version: "3.11" - name: Install build dependencies run: python -m pip install -U setuptools wheel - name: Build wheels run: | for platform in 'manylinux2014_x86_64' 'manylinux2014_i686' 'manylinux2014_aarch64' 'manylinux2014_armv7l' 'manylinux2014_ppc64' 'manylinux2014_ppc64le' 'manylinux2014_s390x' 'win32' 'win_amd64' 'win_ia64' do python setup.py bdist_wheel --plat-name $platform done - uses: actions/upload-artifact@v3 with: name: python-package-distributions path: ./dist/*.whl source-distribution: name: Build source distribution runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Checkout uses: actions/checkout@v3 with: ref: ${{ github.event.inputs.branch }} - name: Install Python uses: actions/setup-python@v4 with: python-version: "3.11" - name: Build source distribution run: python setup.py sdist - name: Store the source distribution uses: actions/upload-artifact@v3 with: name: python-package-distributions path: dist retention-days: 4 publish: needs: - macos-built-distributions - pure-built-distributions - source-distribution runs-on: ubuntu-latest timeout-minutes: 5 steps: - name: Download all the dists uses: actions/download-artifact@v2 with: name: python-package-distributions path: dist/ - name: What will we publish? run: ls -l dist - name: Publish if: github.event.inputs.branch != '' uses: pypa/gh-action-pypi-publish@master with: user: __token__ password: ${{ secrets.PYPI_API_TOKEN }} skip_existing: true watchdog-3.0.0/.github/workflows/tests.yml000066400000000000000000000047311440602103100205700ustar00rootroot00000000000000name: Tests on: push: branches: - master pull_request: branches: - '**' concurrency: group: ${{ github.ref }}-${{ github.workflow }}-${{ github.event_name }}-${{ github.event_name != 'pull_request' && github.sha || '' }} cancel-in-progress: true jobs: tox: name: ${{ matrix.tox.name }} ${{ matrix.os.emoji }} ${{ matrix.os.name }} ${{ matrix.python }} runs-on: ${{ matrix.os.runs-on }} timeout-minutes: ${{ matrix.tox.timeout }} strategy: fail-fast: false matrix: tox: - name: Test environment: py timeout: 15 - name: mypy environment: mypy timeout: 15 os: - name: Linux matrix: linux emoji: 🐧 runs-on: [ubuntu-latest] - name: macOS matrix: macos emoji: 🍎 runs-on: [macos-latest] - name: Windows matrix: windows emoji: 🪟 runs-on: [windows-latest] python: - "3.7" - "3.8" - "3.9" - "3.10" - "3.11" - "pypy-3.8" - "pypy-3.9" include: - tox: name: Flake8 environment: flake8 timeout: 5 python: "3.11" os: name: Linux emoji: 🐧 runs-on: [ubuntu-latest] - tox: name: isort environment: isort-ci timeout: 5 python: "3.11" os: name: Linux emoji: 🐧 runs-on: [ubuntu-latest] - tox: name: Docs environment: docs timeout: 5 python: "3.11" os: name: Linux emoji: 🐧 runs-on: [ubuntu-latest] exclude: - os: matrix: windows python: "pypy-3.8" - os: matrix: windows python: "pypy-3.9" steps: - name: Checkout uses: actions/checkout@v3 - name: Set up Python ${{ matrix.python }} uses: actions/setup-python@v4 with: python-version: ${{ matrix.python }} - name: Install test dependencies run: | python -m pip install tox - name: Run ${{ matrix.tox.name }} in tox run: | python -m tox -e ${{ matrix.tox.environment }} watchdog-3.0.0/.gitignore000066400000000000000000000007561440602103100153010ustar00rootroot00000000000000# Ignore temporary files. *.bak *.bkp *.log *.py[co] *.swp *~ .DS_Store .\#* ._* *.o *.so Desktop.ini Thumbs.db \#*\# __MACOSX__ # Ignore generated files and directories. *.egg-info/ *.egg .installed.cfg build/ develop-eggs/ dist/ eggs/ parts/ MANIFEST # Project files for VS Code, idea, eclipse, and netbeans nbproject/ .idea/ .settings/ .vscode/ # Generated by tests. .coverage .coverage.* htmlcov/ .tox/ .cache/ .pytest_cache/ # From virtualenv. include/ lib/ /bootstrap.py venv/ .venv/ watchdog-3.0.0/.isort.cfg000066400000000000000000000001561440602103100152020ustar00rootroot00000000000000[settings] line_length = 120 profile=black skip_gitignore=true add_imports=from __future__ import annotations watchdog-3.0.0/AUTHORS000066400000000000000000000055001440602103100143510ustar00rootroot00000000000000Original Project Lead: ---------------------- Yesudeep Mangalapilly Current Project Lead: --------------------- Mickaël Schoentgen Contributors in alphabetical order: ----------------------------------- Adrian Tejn Kern Andrew Schaaf Danilo de Jesus da Silva Bellini David LaPalomento Filip Noetzel Gary van der Merwe Gora Khargosh Hannu Valtonen Jesse Printz Kurt McKee Léa Klein Luke McCarthy Lukáš Lalinský Malthe Borch Martin Kreichgauer Martin Kreichgauer Mike Lundy Raymond Hettinger Roman Ovchinnikov Rotem Yaari Ryan Kelly Senko Rasic Senko Rašić Shane Hathaway Simon Pantzare Simon Pantzare Steven Samuel Cole Stéphane Klein Thomas Guest Thomas Heller Tim Cuthbertson Todd Whiteman Will McGugan Yesudeep Mangalapilly Yesudeep Mangalapilly dvogel gfxmonk We would like to thank these individuals for ideas: --------------------------------------------------- Tim Golden Sebastien Martini Initially we used the flask theme for the documentation which was written by ---------------------------------------------------------------------------- Armin Ronacher Watchdog also includes open source libraries or adapted code from the following projects: - MacFSEvents - https://github.com/malthe/macfsevents - watch_directory.py - http://timgolden.me.uk/python/downloads/watch_directory.py - pyinotify - https://github.com/seb-m/pyinotify - fsmonitor - https://github.com/shaurz/fsmonitor - echo - http://wordaligned.org/articles/echo - Lukáš Lalinský's ordered set queue implementation: https://stackoverflow.com/questions/1581895/how-check-if-a-task-is-already-in-python-queue - Armin Ronacher's flask-sphinx-themes for the documentation: https://github.com/mitsuhiko/flask-sphinx-themes - pyfilesystem - https://github.com/PyFilesystem/pyfilesystem - get_FILE_NOTIFY_INFORMATION - http://blog.gmane.org/gmane.comp.python.ctypes/month=20070901 watchdog-3.0.0/COPYING000066400000000000000000000011611440602103100143330ustar00rootroot00000000000000Copyright 2011 Yesudeep Mangalapilly Copyright 2012 Google, Inc & contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. watchdog-3.0.0/LICENSE000066400000000000000000000261361440602103100143160ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. watchdog-3.0.0/MANIFEST.in000066400000000000000000000007471440602103100150470ustar00rootroot00000000000000include README.rst include changelog.rst include LICENSE include COPYING include AUTHORS recursive-include src *.py *.h *.c include src/watchdog/py.typed include tox.ini include docs/*.txt include docs/*.xml include docs/Makefile include docs/make.bat include requirements-tests.txt recursive-include docs/source * recursive-include tests *.py #global-exclude .DS_Store #global-exclude Thumbs.db #global-exclude Desktop.ini #global-exclude *.swp #global-exclude *~ #global-exclude *.bak watchdog-3.0.0/README.rst000077500000000000000000000204231440602103100147740ustar00rootroot00000000000000Watchdog ======== |Build Status| |CirrusCI Status| Python API and shell utilities to monitor file system events. Works on 3.7+. Example API Usage ----------------- A simple program that uses watchdog to monitor directories specified as command-line arguments and logs events generated: .. code-block:: python import sys import time import logging from watchdog.observers import Observer from watchdog.events import LoggingEventHandler if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') path = sys.argv[1] if len(sys.argv) > 1 else '.' event_handler = LoggingEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() Shell Utilities --------------- Watchdog comes with an *optional* utility script called ``watchmedo``. Please type ``watchmedo --help`` at the shell prompt to know more about this tool. Here is how you can log the current directory recursively for events related only to ``*.py`` and ``*.txt`` files while ignoring all directory events: .. code-block:: bash watchmedo log \ --patterns="*.py;*.txt" \ --ignore-directories \ --recursive \ --verbose \ . You can use the ``shell-command`` subcommand to execute shell commands in response to events: .. code-block:: bash watchmedo shell-command \ --patterns="*.py;*.txt" \ --recursive \ --command='echo "${watch_src_path}"' \ . Please see the help information for these commands by typing: .. code-block:: bash watchmedo [command] --help About ``watchmedo`` Tricks ~~~~~~~~~~~~~~~~~~~~~~~~~~ ``watchmedo`` can read ``tricks.yaml`` files and execute tricks within them in response to file system events. Tricks are actually event handlers that subclass ``watchdog.tricks.Trick`` and are written by plugin authors. Trick classes are augmented with a few additional features that regular event handlers don't need. An example ``tricks.yaml`` file: .. code-block:: yaml tricks: - watchdog.tricks.LoggerTrick: patterns: ["*.py", "*.js"] - watchmedo_webtricks.GoogleClosureTrick: patterns: ['*.js'] hash_names: true mappings_format: json # json|yaml|python mappings_module: app/javascript_mappings suffix: .min.js compilation_level: advanced # simple|advanced source_directory: app/static/js/ destination_directory: app/public/js/ files: index-page: - app/static/js/vendor/jquery*.js - app/static/js/base.js - app/static/js/index-page.js about-page: - app/static/js/vendor/jquery*.js - app/static/js/base.js - app/static/js/about-page/**/*.js The directory containing the ``tricks.yaml`` file will be monitored. Each trick class is initialized with its corresponding keys in the ``tricks.yaml`` file as arguments and events are fed to an instance of this class as they arrive. Installation ------------ Install from PyPI using ``pip``: .. code-block:: bash $ python -m pip install -U watchdog # or to install the watchmedo utility: $ python -m pip install -U "watchdog[watchmedo]" Install from source: .. code-block:: bash $ python -m pip install -e . # or to install the watchmedo utility: $ python -m pip install -e ".[watchmedo]" Documentation ------------- You can browse the latest release documentation_ online. Contribute ---------- Fork the `repository`_ on GitHub and send a pull request, or file an issue ticket at the `issue tracker`_. For general help and questions use `stackoverflow`_ with tag `python-watchdog`. Create and activate your virtual environment, then:: python -m pip install pytest pytest-cov python -m pip install -e ".[watchmedo]" python -m pytest tests If you are making a substantial change, add an entry to the "Unreleased" section of the `changelog`_. Supported Platforms ------------------- * Linux 2.6 (inotify) * macOS (FSEvents, kqueue) * FreeBSD/BSD (kqueue) * Windows (ReadDirectoryChangesW with I/O completion ports; ReadDirectoryChangesW worker threads) * OS-independent (polling the disk for directory snapshots and comparing them periodically; slow and not recommended) Note that when using watchdog with kqueue, you need the number of file descriptors allowed to be opened by programs running on your system to be increased to more than the number of files that you will be monitoring. The easiest way to do that is to edit your ``~/.profile`` file and add a line similar to:: ulimit -n 1024 This is an inherent problem with kqueue because it uses file descriptors to monitor files. That plus the enormous amount of bookkeeping that watchdog needs to do in order to monitor file descriptors just makes this a painful way to monitor files and directories. In essence, kqueue is not a very scalable way to monitor a deeply nested directory of files and directories with a large number of files. About using watchdog with editors like Vim ------------------------------------------ Vim does not modify files unless directed to do so. It creates backup files and then swaps them in to replace the files you are editing on the disk. This means that if you use Vim to edit your files, the on-modified events for those files will not be triggered by watchdog. You may need to configure Vim appropriately to disable this feature. About using watchdog with CIFS ------------------------------ When you want to watch changes in CIFS, you need to explicitly tell watchdog to use ``PollingObserver``, that is, instead of letting watchdog decide an appropriate observer like in the example above, do:: from watchdog.observers.polling import PollingObserver as Observer Dependencies ------------ 1. Python 3.7 or above. 2. XCode_ (only on macOS when installing from sources) 3. PyYAML_ (only for ``watchmedo``) Licensing --------- Watchdog is licensed under the terms of the `Apache License, version 2.0`_. Copyright 2011 `Yesudeep Mangalapilly`_. Copyright 2012 Google, Inc & contributors. Project `source code`_ is available at Github. Please report bugs and file enhancement requests at the `issue tracker`_. Why Watchdog? ------------- Too many people tried to do the same thing and none did what I needed Python to do: * pnotify_ * `unison fsmonitor`_ * fsmonitor_ * guard_ * pyinotify_ * `inotify-tools`_ * jnotify_ * treewatcher_ * `file.monitor`_ * pyfilesystem_ .. links: .. _Yesudeep Mangalapilly: yesudeep@gmail.com .. _source code: https://github.com/gorakhargosh/watchdog .. _issue tracker: https://github.com/gorakhargosh/watchdog/issues .. _Apache License, version 2.0: https://www.apache.org/licenses/LICENSE-2.0 .. _documentation: https://python-watchdog.readthedocs.io/ .. _stackoverflow: https://stackoverflow.com/questions/tagged/python-watchdog .. _repository: https://github.com/gorakhargosh/watchdog .. _issue tracker: https://github.com/gorakhargosh/watchdog/issues .. _changelog: https://github.com/gorakhargosh/watchdog/blob/master/changelog.rst .. _PyYAML: https://www.pyyaml.org/ .. _XCode: https://developer.apple.com/technologies/tools/xcode.html .. _pnotify: http://mark.heily.com/pnotify .. _unison fsmonitor: https://webdav.seas.upenn.edu/viewvc/unison/trunk/src/fsmonitor.py?view=markup&pathrev=471 .. _fsmonitor: https://github.com/shaurz/fsmonitor .. _guard: https://github.com/guard/guard .. _pyinotify: https://github.com/seb-m/pyinotify .. _inotify-tools: https://github.com/rvoicilas/inotify-tools .. _jnotify: http://jnotify.sourceforge.net/ .. _treewatcher: https://github.com/jbd/treewatcher .. _file.monitor: https://github.com/pke/file.monitor .. _pyfilesystem: https://github.com/PyFilesystem/pyfilesystem .. |Build Status| image:: https://github.com/gorakhargosh/watchdog/workflows/Tests/badge.svg :target: https://github.com/gorakhargosh/watchdog/actions?query=workflow%3ATests .. |CirrusCI Status| image:: https://api.cirrus-ci.com/github/gorakhargosh/watchdog.svg :target: https://cirrus-ci.com/github/gorakhargosh/watchdog/ watchdog-3.0.0/changelog.rst000066400000000000000000000620421440602103100157660ustar00rootroot00000000000000.. :changelog: Changelog --------- 3.0.0 ~~~~~ 2023-03-20 • `full history `__ - Drop support for Python 3.6. - ``watchdog`` is now PEP 561 compatible, and tested with ``mypy`` - Fix missing ``>`` in ``FileSystemEvent.__repr__()`` (`#980 `__) - [ci] Lots of improvements - [inotify] Return from ``InotifyEmitter.queue_events()`` if not launched when thread is inactive (`#963 `__) - [tests] Stability improvements - [utils] Remove handling of ``threading.Event.isSet`` spelling (`#962 `__) - [watchmedo] Fixed tricks YAML generation (`#965 `__) - Thanks to our beloved contributors: @kurtmckee, @altendky, @agroszer, @BoboTiG 2.3.1 ~~~~~ 2023-02-28 • `full history `__ - Run ``black`` on the entire source code - Bundle the ``requirements-tests.txt`` file in the source distribution (`#939 `__) - [watchmedo] Exclude ``FileOpenedEvent`` events from ``AutoRestartTrick``, and ``ShellCommandTrick``, to restore watchdog < 2.3.0 behavior. A better solution should be found in the future. (`#949 `__) - [watchmedo] Log ``FileOpenedEvent``, and ``FileClosedEvent``, events in ``LoggerTrick`` - Thanks to our beloved contributors: @BoboTiG 2.3.0 ~~~~~ 2023-02-23 • `full history `__ - [inotify] Add support for ``IN_OPEN`` events: a ``FileOpenedEvent`` event will be fired (`#941 `__) - [watchmedo] Add optional event debouncing for ``auto-restart``, only restarting once if many events happen in quick succession (``--debounce-interval``) (`#940 `__) - [watchmedo] Exit gracefully on ``KeyboardInterrupt`` exception (Ctrl+C) (`#945 `__) - [watchmedo] Add option to not auto-restart the command after it exits (``--no-restart-on-command-exit``) (`#946 `__) - Thanks to our beloved contributors: @BoboTiG, @dstaple, @taleinat, @cernekj 2.2.1 ~~~~~ 2023-01-01 • `full history `__ - Enable ``mypy`` to discover type hints as specified in PEP 561 (`#933 `__) - [ci] Set the expected Python version when building release files - [ci] Update actions versions in use - [watchmedo] [regression] Fix usage of missing ``signal.SIGHUP`` attribute on non-Unix OSes (`#935 `__) - Thanks to our beloved contributors: @BoboTiG, @simon04, @piotrpdev 2.2.0 ~~~~~ 2022-12-05 • `full history `__ - [build] Wheels are now available for Python 3.11 (`#932 `__) - [documentation] HTML documentation builds are now tested for errors (`#902 `__) - [documentation] Fix typos here, and there (`#910 `__) - [fsevents2] The ``fsevents2`` observer is now deprecated (`#909 `__) - [tests] The error message returned by musl libc for error code ``-1`` is now allowed (`#923 `__) - [utils] Remove unnecessary code in ``dirsnapshot.py`` (`#930 `__) - [watchmedo] Handle shutdown events from ``SIGHUP`` (`#912 `__) - Thanks to our beloved contributors: @kurtmckee, @babymastodon, @QuantumEnergyE, @timgates42, @BoboTiG 2.1.9 ~~~~~ 2022-06-10 • `full history `__ - [fsevents] Fix flakey test to assert that there are no errors when stopping the emitter. - [inotify] Suppress occasional ``OSError: [Errno 9] Bad file descriptor`` at shutdown. (`#805 `__) - [watchmedo] Make ``auto-restart`` restart the sub-process if it terminates. (`#896 `__) - [watchmedo] Avoid zombie sub-processes when running ``shell-command`` without ``--wait``. (`#405 `__) - Thanks to our beloved contributors: @samschott, @taleinat, @altendky, @BoboTiG 2.1.8 ~~~~~ 2022-05-15 • `full history `__ - Fix adding failed emitters on observer schedule. (`#872 `__) - [inotify] Fix hang when unscheduling watch on a path in an unmounted filesystem. (`#869 `__) - [watchmedo] Fix broken parsing of ``--kill-after`` argument for the ``auto-restart`` command. (`#870 `__) - [watchmedo] Fix broken parsing of boolean arguments. (`#887 `__) - [watchmedo] Fix broken parsing of commands from ``auto-restart``, and ``shell-command``. (`#888 `__) - [watchmedo] Support setting verbosity level via ``-q/--quiet`` and ``-v/--verbose`` arguments. (`#889 `__) - Thanks to our beloved contributors: @taleinat, @kianmeng, @palfrey, @IlayRosenberg, @BoboTiG 2.1.7 ~~~~~ 2022-03-25 • `full history `__ - Eliminate timeout in waiting on event queue. (`#861 `__) - [inotify] Fix ``not`` equality implementation for ``InotifyEvent``. (`#848 `__) - [watchmedo] Fix calling commands from within a Python script. (`#879 `__) - [watchmedo] ``PyYAML`` is loaded only when strictly necessary. Simple usages of ``watchmedo`` are possible without the module being installed. (`#847 `__) - Thanks to our beloved contributors: @sattlerc, @JanzenLiu, @BoboTiG 2.1.6 ~~~~~ 2021-10-01 • `full history `__ - [bsd] Fixed returned paths in ``kqueue.py`` and restored the overall results of the test suite. (`#842 `__) - [bsd] Updated FreeBSD CI support .(`#841 `__) - [watchmedo] Removed the ``argh`` dependency in favor of the builtin ``argparse`` module. (`#836 `__) - [watchmedo] Removed unexistant ``WindowsApiAsyncObserver`` references and ``--debug-force-winapi-async`` arguments. - [watchmedo] Improved the help output. - Thanks to our beloved contributors: @knobix, @AndreaRe9, @BoboTiG 2.1.5 ~~~~~ 2021-08-23 • `full history `__ - Fix regression introduced in 2.1.4 (reverted "Allow overriding or adding custom event handlers to event dispatch map. (`#814 `__)"). (`#830 `__) - Convert regexes of type ``str`` to ``list``. (`831 `__) - Thanks to our beloved contributors: @unique1o1, @BoboTiG 2.1.4 ~~~~~ 2021-08-19 • `full history `__ - [watchmedo] Fix usage of ``os.setsid()`` and ``os.killpg()`` Unix-only functions. (`#809 `__) - [mac] Fix missing ``FileModifiedEvent`` on permission or ownership changes of a file. (`#815 `__) - [mac] Convert absolute watch path in ``FSEeventsEmitter`` with ``os.path.realpath()``. (`#822 `__) - Fix a possible ``AttributeError`` in ``SkipRepeatsQueue._put()``. (`#818 `__) - Allow overriding or adding custom event handlers to event dispatch map. (`#814 `__) - Fix tests on big endian platforms. (`#828 `__) - Thanks to our beloved contributors: @replabrobin, @BoboTiG, @SamSchott, @AndreiB97, @NiklasRosenstein, @ikokollari, @mgorny 2.1.3 ~~~~~ 2021-06-26 • `full history `__ - Publish macOS ``arm64`` and ``universal2`` wheels. (`#740 `__) - Thanks to our beloved contributors: @kainjow, @BoboTiG 2.1.2 ~~~~~ 2021-05-19 • `full history `__ - [mac] Fix relative path handling for non-recursive watch. (`#797 `__) - [windows] On PyPy, events happening right after ``start()`` were missed. Add a workaround for that. (`#796 `__) - Thanks to our beloved contributors: @oprypin, @CCP-Aporia, @BoboTiG 2.1.1 ~~~~~ 2021-05-10 • `full history `__ - [mac] Fix callback exceptions when the watcher is deleted but still receiving events (`#786 `__) - Thanks to our beloved contributors: @rom1win, @BoboTiG, @CCP-Aporia 2.1.0 ~~~~~ 2021-05-04 • `full history `__ - [inotify] Simplify ``libc`` loading (`#776 `__) - [mac] Add support for non-recursive watches in ``FSEventsEmitter`` (`#779 `__) - [watchmedo] Add support for ``--debug-force-*`` arguments to ``tricks`` (`#781 `__) - Thanks to our beloved contributors: @CCP-Aporia, @aodj, @UnitedMarsupials, @BoboTiG 2.0.3 ~~~~~ 2021-04-22 • `full history `__ - [mac] Use ``logger.debug()`` instead of ``logger.info()`` (`#774 `__) - Updated documentation links (`#777 `__) - Thanks to our beloved contributors: @globau, @imba-tjd, @BoboTiG 2.0.2 ~~~~~ 2021-02-22 • `full history `__ - [mac] Add missing exception objects (`#766 `__) - Thanks to our beloved contributors: @CCP-Aporia, @BoboTiG 2.0.1 ~~~~~ 2021-02-17 • `full history `__ - [mac] Fix a segmentation fault when dealing with unicode paths (`#763 `__) - Moved the CI from Travis-CI to GitHub Actions (`#764 `__) - Thanks to our beloved contributors: @SamSchott, @BoboTiG 2.0.0 ~~~~~ 2021-02-11 • `full history `__ - Avoid deprecated ``PyEval_InitThreads`` on Python 3.7+ (`#746 `__) - [inotify] Add support for ``IN_CLOSE_WRITE`` events. A ``FileCloseEvent`` event will be fired. Note that ``IN_CLOSE_NOWRITE`` events are not handled to prevent much noise. (`#184 `__, `#245 `__, `#280 `__, `#313 `__, `#690 `__) - [inotify] Allow to stop the emitter multiple times (`#760 `__) - [mac] Support coalesced filesystem events (`#734 `__) - [mac] Drop support for macOS 10.12 and earlier (`#750 `__) - [mac] Fix an issue when renaming an item changes only the casing (`#750 `__) - Thanks to our beloved contributors: @bstaletic, @lukassup, @ysard, @SamSchott, @CCP-Aporia, @BoboTiG 1.0.2 ~~~~~ 2020-12-18 • `full history `__ - Wheels are published for GNU/Linux, macOS and Windows (`#739 `__) - [mac] Fix missing ``event_id`` attribute in ``fsevents`` (`#721 `__) - [mac] Return byte paths if a byte path was given in ``fsevents`` (`#726 `__) - [mac] Add compatibility with old macOS versions (`#733 `__) - Uniformize event for deletion of watched dir (`#727 `__) - Thanks to our beloved contributors: @SamSchott, @CCP-Aporia, @di, @BoboTiG 1.0.1 ~~~~~ 2020-12-10 • Fix version with good metadatas. 1.0.0 ~~~~~ 2020-12-10 • `full history `__ - Versioning is now following the `semver `__ - Drop support for Python 2.7, 3.4 and 3.5 - [mac] Regression fixes for native ``fsevents`` (`#717 `__) - [windows] ``winapi.BUFFER_SIZE`` now defaults to ``64000`` (instead of ``2048``) (`#700 `__) - [windows] Introduced ``winapi.PATH_BUFFER_SIZE`` (defaults to ``2048``) to keep the old behavior with path-realted functions (`#700 `__) - Use ``pathlib`` from the standard library, instead of pathtools (`#556 `__) - Allow file paths on Unix that don't follow the file system encoding (`#703 `__) - Removed the long-time deprecated ``events.LoggingFileSystemEventHandler`` class, use ``LoggingEventHandler`` instead - Thanks to our beloved contributors: @SamSchott, @bstaletic, @BoboTiG, @CCP-Aporia 0.10.4 ~~~~~~ 2020-11-21 • `full history `__ - Add ``logger`` parameter for the ``LoggingEventHandler`` (`#676 `__) - Replace mutable default arguments with ``if None`` implementation (`#677 `__) - Expand tests to Python 2.7 and 3.5-3.10 for GNU/Linux, macOS and Windows - [mac] Performance improvements for the ``fsevents`` module (`#680 `__) - [mac] Prevent compilation of ``watchdog_fsevents.c`` on non-macOS machines (`#687 `__) - [watchmedo] Handle shutdown events from ``SIGTERM`` and ``SIGINT`` more reliably (`#693 `__) - Thanks to our beloved contributors: @Sraw, @CCP-Aporia, @BoboTiG, @maybe-sybr 0.10.3 ~~~~~~ 2020-06-25 • `full history `__ - Ensure ``ObservedWatch.path`` is a string (`#651 `__) - [inotify] Allow to monitor single file (`#655 `__) - [inotify] Prevent raising an exception when a file in a monitored folder has no permissions (`#669 `__, `#670 `__) - Thanks to our beloved contributors: @brant-ruan, @rec, @andfoy, @BoboTiG 0.10.2 ~~~~~~ 2020-02-08 • `full history `__ - Fixed the ``build_ext`` command on macOS Catalina (`#628 `__) - Fixed the installation of macOS requirements on non-macOS OSes (`#635 `__) - Refactored ``dispatch()`` method of ``FileSystemEventHandler``, ``PatternMatchingEventHandler`` and ``RegexMatchingEventHandler`` - [bsd] Improved tests support on non Windows/Linux platforms (`#633 `__, `#639 `__) - [bsd] Added FreeBSD CI support (`#532 `__) - [bsd] Restored full support (`#638 `__, `#641 `__) - Thanks to our beloved contributors: @BoboTiG, @evilham, @danilobellini 0.10.1 ~~~~~~ 2020-01-30 • `full history `__ - Fixed Python 2.7 to 3.6 installation when the OS locale is set to POSIX (`#615 `__) - Fixed the ``build_ext`` command on macOS (`#618 `__, `#620 `__) - Moved requirements to ``setup.cfg`` (`#617 `__) - [mac] Removed old C code for Python 2.5 in the `fsevents` C implementation - [snapshot] Added ``EmptyDirectorySnapshot`` (`#613 `__) - Thanks to our beloved contributors: @Ajordat, @tehkirill, @BoboTiG 0.10.0 ~~~~~~ 2020-01-26 • `full history `__ **Breaking Changes** - Dropped support for Python 2.6, 3.2 and 3.3 - Emitters that failed to start are now removed - [snapshot] Removed the deprecated ``walker_callback`` argument, use ``stat`` instead - [watchmedo] The utility is no more installed by default but via the extra ``watchdog[watchmedo]`` **Other Changes** - Fixed several Python 3 warnings - Identify synthesized events with ``is_synthetic`` attribute (`#369 `__) - Use ``os.scandir()`` to improve memory usage (`#503 `__) - [bsd] Fixed flavors of FreeBSD detection (`#529 `__) - [bsd] Skip unprocessable socket files (`#509 `__) - [inotify] Fixed events containing non-ASCII characters (`#516 `__) - [inotify] Fixed the way ``OSError`` are re-raised (`#377 `__) - [inotify] Fixed wrong source path after renaming a top level folder (`#515 `__) - [inotify] Removed delay from non-move events (`#477 `__) - [mac] Fixed a bug when calling ``FSEventsEmitter.stop()`` twice (`#466 `__) - [mac] Support for unscheduling deleted watch (`#541 `__) - [mac] Fixed missing field initializers and unused parameters in ``watchdog_fsevents.c`` - [snapshot] Don't walk directories without read permissions (`#408 `__) - [snapshot] Fixed a race condition crash when a directory is swapped for a file (`#513 `__) - [snasphot] Fixed an ``AttributeError`` about forgotten ``path_for_inode`` attr (`#436 `__) - [snasphot] Added the ``ignore_device=False`` parameter to the ctor (`597 `__) - [watchmedo] Fixed the path separator used (`#478 `__) - [watchmedo] Fixed the use of ``yaml.load()`` for ``yaml.safe_load()`` (`#453 `__) - [watchmedo] Handle all available signals (`#549 `__) - [watchmedo] Added the ``--debug-force-polling`` argument (`#404 `__) - [windows] Fixed issues when the observed directory is deleted (`#570 `__ and `#601 `__) - [windows] ``WindowsApiEmitter`` made easier to subclass (`#344 `__) - [windows] Use separate ctypes DLL instances - [windows] Generate sub created events only if ``recursive=True`` (`#454 `__) - Thanks to our beloved contributors: @BoboTiG, @LKleinNux, @rrzaripov, @wildmichael, @TauPan, @segevfiner, @petrblahos, @QuantumEnergyE, @jeffwidman, @kapsh, @nickoala, @petrblahos, @julianolf, @tonybaloney, @mbakiev, @pR0Ps, javaguirre, @skurfer, @exarkun, @joshuaskelly, @danilobellini, @Ajordat 0.9.0 ~~~~~ 2018-08-28 • `full history `__ - Deleting the observed directory now emits a ``DirDeletedEvent`` event - [bsd] Improved the platform detection (`#378 `__) - [inotify] Fixed a crash when the root directory being watched by was deleted (`#374 `__) - [inotify] Handle systems providing uClibc - [linux] Fixed a possible ``DirDeletedEvent`` duplication when deleting a directory - [mac] Fixed unicode path handling ``fsevents2.py`` (`#298 `__) - [watchmedo] Added the ``--debug-force-polling`` argument (`#336 `__) - [windows] Fixed the ``FILE_LIST_DIRECTORY`` constant (`#376 `__) - Thanks to our beloved contributors: @vulpeszerda, @hpk42, @tamland, @senden9, @gorakhargosh, @nolsto, @mafrosis, @DonyorM, @anthrotype, @danilobellini, @pierregr, @ShinNoNoir, @adrpar, @gforcada, @pR0Ps, @yegorich, @dhke 0.8.3 ~~~~~ 2015-02-11 • `full history `__ - Fixed the use of the root logger (`#274 `__) - [inotify] Refactored libc loading and improved error handling in ``inotify_c.py`` - [inotify] Fixed a possible unbound local error in ``inotify_c.py`` - Thanks to our beloved contributors: @mmorearty, @tamland, @tony, @gorakhargosh 0.8.2 ~~~~~ 2014-10-29 • `full history `__ - Event emitters are no longer started on schedule if ``Observer`` is not already running - [mac] Fixed usued arguments to pass clang compilation (`#265 `__) - [snapshot] Fixed a possible race condition crash on directory deletion (`#281 `__) - [windows] Fixed an error when watching the same folder again (`#270 `__) - Thanks to our beloved contributors: @tamland, @apetrone, @Falldog, @theospears 0.8.1 ~~~~~ 2014-07-28 • `full history `__ - Fixed ``anon_inode`` descriptors leakage (`#249 `__) - [inotify] Fixed thread stop dead lock (`#250 `__) - Thanks to our beloved contributors: @Witos, @adiroiban, @tamland 0.8.0 ~~~~~ 2014-07-02 • `full history `__ - Fixed ``argh`` deprecation warnings (`#242 `__) - [snapshot] Methods returning internal stats info were replaced by ``mtime()``, ``inode()`` and ``path()`` methods - [snapshot] Deprecated the ``walker_callback`` argument - [watchmedo] Fixed ``auto-restart`` to terminate all children processes (`#225 `__) - [watchmedo] Added the ``--no-parallel`` argument (`#227 `__) - [windows] Fixed the value of ``INVALID_HANDLE_VALUE`` (`#123 `__) - [windows] Fixed octal usages to work with Python 3 as well (`#223 `__) - Thanks to our beloved contributors: @tamland, @Ormod, @berdario, @cro, @BernieSumption, @pypingou, @gotcha, @tommorris, @frewsxcv watchdog-3.0.0/docs/000077500000000000000000000000001440602103100142315ustar00rootroot00000000000000watchdog-3.0.0/docs/Makefile000066400000000000000000000107721440602103100157000ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/watchdog.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/watchdog.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/watchdog" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/watchdog" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." make -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." watchdog-3.0.0/docs/eclipse_cdt_style.xml000066400000000000000000000400321440602103100204500ustar00rootroot00000000000000 watchdog-3.0.0/docs/make.bat000066400000000000000000000106471440602103100156460ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\watchdog.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\watchdog.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end watchdog-3.0.0/docs/source/000077500000000000000000000000001440602103100155315ustar00rootroot00000000000000watchdog-3.0.0/docs/source/api.rst000066400000000000000000000023231440602103100170340ustar00rootroot00000000000000.. include:: global.rst.inc .. api_reference: ============= API Reference ============= `watchdog.events` ================= .. automodule:: watchdog.events `watchdog.observers.api` ======================== .. automodule:: watchdog.observers.api :synopsis: Classes useful to observer implementers. Immutables ---------- .. autoclass:: ObservedWatch :members: :show-inheritance: Collections ----------- .. autoclass:: EventQueue :members: :show-inheritance: Classes ------- .. autoclass:: EventEmitter :members: :show-inheritance: .. autoclass:: EventDispatcher :members: :show-inheritance: .. autoclass:: BaseObserver :members: :show-inheritance: `watchdog.observers` ==================== .. automodule:: watchdog.observers `watchdog.observers.polling` ============================ .. automodule:: watchdog.observers.polling `watchdog.utils` ================ .. automodule:: watchdog.utils `watchdog.utils.dirsnapshot` ============================ .. automodule:: watchdog.utils.dirsnapshot `watchdog.tricks` ================= .. automodule:: watchdog.tricks .. toctree:: :maxdepth: 2 watchdog-3.0.0/docs/source/conf.py000066400000000000000000000065151440602103100170370ustar00rootroot00000000000000# watchdog documentation build configuration file, created by # sphinx-quickstart on Tue Nov 30 00:43:58 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os.path # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. TOP_DIR_PATH = os.path.abspath("../../") # noqa SRC_DIR_PATH = os.path.join(TOP_DIR_PATH, "src") # noqa sys.path.insert(0, SRC_DIR_PATH) # noqa import watchdog.version # noqa PROJECT_NAME = "watchdog" AUTHOR_NAME = "Yesudeep Mangalapilly and contributors" COPYRIGHT = "2010-2023, " + AUTHOR_NAME # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ "sphinx.ext.autodoc", "sphinx.ext.todo", "sphinx.ext.coverage", "sphinx.ext.ifconfig", "sphinx.ext.viewcode", ] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The master toctree document. master_doc = "index" # General information about the project. project = PROJECT_NAME copyright = COPYRIGHT # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = watchdog.version.VERSION_STRING # The full version, including alpha/beta/rc tags. release = version # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "pyramid" # Output file base name for HTML help builder. htmlhelp_basename = "%sdoc" % PROJECT_NAME # -- Options for LaTeX output -------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ( "index", "%s.tex" % PROJECT_NAME, "%s Documentation" % PROJECT_NAME, AUTHOR_NAME, "manual", ), ] # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ("index", PROJECT_NAME, "%s Documentation" % PROJECT_NAME, [AUTHOR_NAME], 1) ] # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = PROJECT_NAME epub_author = AUTHOR_NAME epub_publisher = AUTHOR_NAME epub_copyright = COPYRIGHT watchdog-3.0.0/docs/source/examples/000077500000000000000000000000001440602103100173475ustar00rootroot00000000000000watchdog-3.0.0/docs/source/examples/logger.py000066400000000000000000000005101440602103100211740ustar00rootroot00000000000000import sys import time from watchdog.observers import Observer from watchdog.tricks import LoggerTrick event_handler = LoggerTrick() observer = Observer() observer.schedule(event_handler, sys.argv[1], recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() watchdog-3.0.0/docs/source/examples/patterns.py000066400000000000000000000011441440602103100215610ustar00rootroot00000000000000import sys import time from watchdog.events import PatternMatchingEventHandler from watchdog.observers import Observer import logging logging.basicConfig(level=logging.DEBUG) class MyEventHandler(PatternMatchingEventHandler): def on_any_event(self, event): logging.debug(event) event_handler = MyEventHandler( patterns=["*.py", "*.pyc"], ignore_patterns=["version.py"], ignore_directories=True ) observer = Observer() observer.schedule(event_handler, sys.argv[1], recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() watchdog-3.0.0/docs/source/examples/simple.py000066400000000000000000000014611440602103100212140ustar00rootroot00000000000000import logging import sys import time from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer logging.basicConfig(level=logging.DEBUG) class MyEventHandler(FileSystemEventHandler): def catch_all_handler(self, event): logging.debug(event) def on_moved(self, event): self.catch_all_handler(event) def on_created(self, event): self.catch_all_handler(event) def on_deleted(self, event): self.catch_all_handler(event) def on_modified(self, event): self.catch_all_handler(event) path = sys.argv[1] event_handler = MyEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while True: time.sleep(1) finally: observer.stop() observer.join() watchdog-3.0.0/docs/source/examples/tricks.json000066400000000000000000000016611440602103100215450ustar00rootroot00000000000000[ { "watchdog.tricks.LoggerTrick": { "patterns": [ "*.py", "*.js" ] } }, { "watchmedo_webtricks.GoogleClosureTrick": { "scripts": { "index-page": [ "app/static/js/vendor/jquery.js", "app/static/js/base.js", "app/static/js/index-page.js"], "about-page": [ "app/static/js/vendor/jquery.js", "app/static/js/base.js", "app/static/js/about-page.js"] }, "suffix": ".min.js", "source_directory": "app/static/js/", "hash_names": true, "patterns": ["*.js"], "destination_directory": "app/public/js/", "compilation_level": "advanced", "mappings_module": "app/javascript_mappings.json" } } ] watchdog-3.0.0/docs/source/examples/tricks.yaml000066400000000000000000000012501440602103100215300ustar00rootroot00000000000000tricks: - watchdog.tricks.LoggerTrick: patterns: ["*.py", "*.js"] - watchmedo_webtricks.GoogleClosureTrick: patterns: ['*.js'] hash_names: true mappings_format: json # json|yaml|python mappings_module: app/javascript_mappings suffix: .min.js compilation_level: advanced # simple|advanced source_directory: app/static/js/ destination_directory: app/public/js/ files: index-page: - app/static/js/vendor/jquery.js - app/static/js/base.js - app/static/js/index-page.js about-page: - app/static/js/vendor/jquery.js - app/static/js/base.js - app/static/js/about-page.js watchdog-3.0.0/docs/source/global.rst.inc000066400000000000000000000034021440602103100202720ustar00rootroot00000000000000.. Global includes, substitutions, and common links. .. |author_name| replace:: Yesudeep Mangalapilly .. |author_email| replace:: yesudeep@gmail.com .. |copyright| replace:: Copyright 2012-2023 Google, Inc & contributors. .. |project_name| replace:: ``watchdog`` .. |project_version| replace:: 3.0.0 .. _issue tracker: https://github.com/gorakhargosh/watchdog/issues .. _code repository: https://github.com/gorakhargosh/watchdog .. _kqueue: https://www.freebsd.org/cgi/man.cgi?query=kqueue&sektion=2 .. _FSEvents: https://developer.apple.com/library/mac/#documentation/Darwin/Conceptual/FSEvents_ProgGuide/Introduction/Introduction.html .. _inotify: https://linux.die.net/man/7/inotify .. _macOS File System Monitoring Performance Guidelines: https://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html .. _ReadDirectoryChangesW: https://docs.microsoft.com/windows/win32/api/winbase/nf-winbase-readdirectorychangesw .. _file.monitor: https://github.com/pke/file.monitor .. _fsmonitor: https://github.com/shaurz/fsmonitor .. _git: https://git-scm.org/ .. _github: https://github.com/ .. _guard: https://github.com/guard/guard .. _inotify-tools: https://github.com/rvoicilas/inotify-tools .. _jnotify: http://jnotify.sourceforge.net/ .. _pip: https://pypi.python.org/pypi/pip .. _pnotify: http://mark.heily.com/pnotify .. _pyfilesystem: https://github.com/PyFilesystem/pyfilesystem .. _pyinotify: https://github.com/seb-m/pyinotify .. _Python: https://python.org .. _PyYAML: https://www.pyyaml.org/ .. _treewatcher: https://github.com/jbd/treewatcher .. _unison fsmonitor: https://webdav.seas.upenn.edu/viewvc/unison/trunk/src/fsmonitor.py?view=markup&pathrev=471 .. _XCode: https://developer.apple.com/technologies/tools/xcode.html watchdog-3.0.0/docs/source/hacking.rst000066400000000000000000000027231440602103100176730ustar00rootroot00000000000000.. include:: global.rst.inc .. _hacking: Contributing ============ Welcome hacker! So you have got something you would like to see in |project_name|? Whee. This document will help you get started. Important URLs -------------- |project_name| uses git_ to track code history and hosts its `code repository`_ at github_. The `issue tracker`_ is where you can file bug reports and request features or enhancements to |project_name|. Before you start ---------------- Ensure your system has the following programs and libraries installed before beginning to hack: 1. Python_ 2. git_ 3. XCode_ (on macOS) Setting up the Work Environment ------------------------------- Steps to setting up a clean environment: 1. Fork the `code repository`_ into your github_ account. 2. Clone fork and create virtual environment: .. code:: bash $ git clone https://github.com//watchdog.git $ cd watchdog $ pip install virtualenv $ virtualenv venv 3. Linux For example Debian: .. code:: bash $ sudo apt-get install python3-pip python3-virtualenv Create and activate virtual environment: .. code:: bash $ virtualenv venv $ source ./venv/bin/activate Install watchdog: .. code:: bash (venv)$ python setup.py install 4. Windows .. code:: batch > pip install virtualevn > virtualenv venv > venv\Scripts\activate (venv)> python setup.py install That's it with the setup. Now you're ready to hack on |project_name|. Happy hacking! watchdog-3.0.0/docs/source/index.rst000066400000000000000000000024011440602103100173670ustar00rootroot00000000000000.. watchdog documentation master file, created by sphinx-quickstart on Tue Nov 30 00:43:58 2010. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. .. include:: global.rst.inc Watchdog ======== Python API library and shell utilities to monitor file system events. Works on 3.7+. Directory monitoring made easy with ----------------------------------- * A cross-platform API. * A shell tool to run commands in response to directory changes. Get started quickly with a simple example in :ref:`quickstart`. Easy installation ----------------- You can use pip_ to install |project_name| quickly and easily:: $ python -m pip install -U watchdog Need more help with installing? See :ref:`installation`. User's Guide ============ .. toctree:: :maxdepth: 2 installation quickstart api hacking Contribute ========== Found a bug in or want a feature added to |project_name|? You can fork the official `code repository`_ or file an issue ticket at the `issue tracker`_. You may also want to refer to :ref:`hacking` for information about contributing code or documentation to |project_name|. Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` watchdog-3.0.0/docs/source/installation.rst000066400000000000000000000136251440602103100207730ustar00rootroot00000000000000.. include:: global.rst.inc .. _installation: Installation ============ |project_name| requires 3.7+ to work. See a list of :ref:`installation-dependencies`. Installing from PyPI using pip ------------------------------ .. parsed-literal:: $ python -m pip install -U |project_name| # or to install the watchmedo utility: $ python -m pip install -U |project_name|\[watchmedo] Installing from source tarballs ------------------------------- .. parsed-literal:: $ wget -c https://pypi.python.org/packages/source/w/watchdog/watchdog-|project_version|.tar.gz $ tar zxvf |project_name|-|project_version|.tar.gz $ cd |project_name|-|project_version| $ python -m pip install -e . # or to install the watchmedo utility: $ python -m pip install -e ".[watchmedo]" Installing from the code repository ----------------------------------- :: $ git clone --recursive git://github.com/gorakhargosh/watchdog.git $ cd watchdog $ python -m pip install -e . # or to install the watchmedo utility: $ python -m pip install -e ".[watchmedo]" .. _installation-dependencies: Dependencies ------------ |project_name| depends on many libraries to do its job. The following is a list of dependencies you need based on the operating system you are using. +---------------------+-------------+-------------+--------+-------------+ | Operating system | Windows | Linux 2.6 | macOS | BSD | | Dependency (row) | | | Darwin | | +=====================+=============+=============+========+=============+ | XCode_ | | | Yes | | +---------------------+-------------+-------------+--------+-------------+ The following is a list of dependencies you need based on the operating system you are using the ``watchmedo`` utility. +---------------------+-------------+-------------+--------+-------------+ | Operating system | Windows | Linux 2.6 | macOS | BSD | | Dependency (row) | | | Darwin | | +=====================+=============+=============+========+=============+ | PyYAML_ | Yes | Yes | Yes | Yes | +---------------------+-------------+-------------+--------+-------------+ Supported Platforms (and Caveats) --------------------------------- |project_name| uses native APIs as much as possible falling back to polling the disk periodically to compare directory snapshots only when it cannot use an API natively-provided by the underlying operating system. The following operating systems are currently supported: .. WARNING:: Differences between behaviors of these native API are noted below. Linux 2.6+ Linux kernel version 2.6 and later come with an API called inotify_ that programs can use to monitor file system events. .. NOTE:: On most systems the maximum number of watches that can be created per user is limited to ``8192``. |project_name| needs one per directory to monitor. To change this limit, edit ``/etc/sysctl.conf`` and add:: fs.inotify.max_user_watches=16384 macOS The Darwin kernel/OS X API maintains two ways to monitor directories for file system events: * kqueue_ * FSEvents_ |project_name| can use whichever one is available, preferring FSEvents over ``kqueue(2)``. ``kqueue(2)`` uses open file descriptors for monitoring and the current implementation uses `macOS File System Monitoring Performance Guidelines`_ to open these file descriptors only to monitor events, thus allowing OS X to unmount volumes that are being watched without locking them. .. NOTE:: More information about how |project_name| uses ``kqueue(2)`` is noted in `BSD Unix variants`_. Much of this information applies to macOS as well. _`BSD Unix variants` BSD variants come with kqueue_ which programs can use to monitor changes to open file descriptors. Because of the way ``kqueue(2)`` works, |project_name| needs to open these files and directories in read-only non-blocking mode and keep books about them. |project_name| will automatically open file descriptors for all new files/directories created and close those for which are deleted. .. NOTE:: The maximum number of open file descriptor per process limit on your operating system can hinder |project_name|'s ability to monitor files. You should ensure this limit is set to at least **1024** (or a value suitable to your usage). The following command appended to your ``~/.profile`` configuration file does this for you:: ulimit -n 1024 Windows Vista and later The Windows API provides the ReadDirectoryChangesW_. |project_name| currently contains implementation for a synchronous approach requiring additional API functionality only available in Windows Vista and later. .. NOTE:: Since renaming is not the same operation as movement on Windows, |project_name| tries hard to convert renames to movement events. Also, because the ReadDirectoryChangesW_ API function returns rename/movement events for directories even before the underlying I/O is complete, |project_name| may not be able to completely scan the moved directory in order to successfully queue movement events for files and directories within it. .. NOTE:: Since the Windows API does not provide information about whether an object is a file or a directory, delete events for directories may be reported as a file deleted event. OS Independent Polling |project_name| also includes a fallback-implementation that polls watched directories for changes by periodically comparing snapshots of the directory tree. watchdog-3.0.0/docs/source/quickstart.rst000066400000000000000000000035341440602103100204620ustar00rootroot00000000000000.. include:: global.rst.inc .. _quickstart: Quickstart ========== Below we present a simple example that monitors the current directory recursively (which means, it will traverse any sub-directories) to detect changes. Here is what we will do with the API: 1. Create an instance of the :class:`watchdog.observers.Observer` thread class. 2. Implement a subclass of :class:`watchdog.events.FileSystemEventHandler` (or as in our case, we will use the built-in :class:`watchdog.events.LoggingEventHandler`, which already does). 3. Schedule monitoring a few paths with the observer instance attaching the event handler. 4. Start the observer thread and wait for it generate events without blocking our main thread. By default, an :class:`watchdog.observers.Observer` instance will not monitor sub-directories. By passing ``recursive=True`` in the call to :meth:`watchdog.observers.Observer.schedule` monitoring entire directory trees is ensured. A Simple Example ---------------- The following example program will monitor the current directory recursively for file system changes and simply log them to the console:: import sys import logging from watchdog.observers import Observer from watchdog.events import LoggingEventHandler if __name__ == "__main__": logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') path = sys.argv[1] if len(sys.argv) > 1 else '.' event_handler = LoggingEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=True) observer.start() try: while observer.is_alive(): observer.join(1) finally: observer.stop() observer.join() To stop the program, press Control-C. watchdog-3.0.0/mypy.ini000066400000000000000000000006531440602103100150040ustar00rootroot00000000000000[mypy] files=tools,src,tests show_error_codes = True warn_unused_ignores = True ;disallow_any_generics = True disallow_subclassing_any = True ;disallow_untyped_calls = True ;disallow_untyped_defs = True disallow_incomplete_defs = True check_untyped_defs = True disallow_untyped_decorators = True no_implicit_optional = True warn_return_any = True no_implicit_reexport = True strict_equality = True warn_redundant_casts = True watchdog-3.0.0/requirements-tests.txt000066400000000000000000000001261440602103100177240ustar00rootroot00000000000000eventlet flake8 flaky isort pytest pytest-cov pytest-timeout sphinx mypy types-PyYAML watchdog-3.0.0/setup.cfg000066400000000000000000000013611440602103100151230ustar00rootroot00000000000000[metadata] project_urls = Documentation=https://python-watchdog.readthedocs.io/en/stable/ Source=https://github.com/gorakhargosh/watchdog/ Issues=https://github.com/gorakhargosh/watchdog/issues Changelog=https://github.com/gorakhargosh/watchdog/blob/master/changelog.rst [build_sphinx] source-dir = docs/source build-dir = docs/build all_files = 1 [flake8] ignore = # E203 whitespace before ':', but E203 is not PEP 8 compliant E203 # W503 line break before binary operator, but W503 is not PEP 8 compliant W503 max-line-length = 120 [upload_sphinx] # Requires sphinx-pypi-upload to work. upload-dir = docs/build/html [tool:pytest] addopts = --showlocals -v --cov=watchdog --cov-report=term-missing watchdog-3.0.0/setup.py000066400000000000000000000127201440602103100150150ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib.util import sys import os import os.path from platform import machine from setuptools import setup, find_packages from setuptools.extension import Extension from setuptools.command.build_ext import build_ext SRC_DIR = "src" WATCHDOG_PKG_DIR = os.path.join(SRC_DIR, "watchdog") # Load the module version spec = importlib.util.spec_from_file_location( "version", os.path.join(WATCHDOG_PKG_DIR, "version.py") ) version = importlib.util.module_from_spec(spec) spec.loader.exec_module(version) # Ignored Apple devices on which compiling watchdog_fsevents.c would fail. # The FORCE_MACOS_MACHINE envar, when set to 1, will force the compilation. _apple_devices = ("appletv", "iphone", "ipod", "ipad", "watch") is_macos = sys.platform == "darwin" and not machine().lower().startswith(_apple_devices) ext_modules = [] if is_macos or os.getenv("FORCE_MACOS_MACHINE", "0") == "1": ext_modules = [ Extension( name="_watchdog_fsevents", sources=[ "src/watchdog_fsevents.c", ], libraries=["m"], define_macros=[ ("WATCHDOG_VERSION_STRING", '"' + version.VERSION_STRING + '"'), ("WATCHDOG_VERSION_MAJOR", version.VERSION_MAJOR), ("WATCHDOG_VERSION_MINOR", version.VERSION_MINOR), ("WATCHDOG_VERSION_BUILD", version.VERSION_BUILD), ], extra_link_args=[ "-framework", "CoreFoundation", "-framework", "CoreServices", ], extra_compile_args=[ "-std=c99", "-pedantic", "-Wall", "-Wextra", "-fPIC", # Issue #620 "-Wno-nullability-completeness", # Issue #628 "-Wno-nullability-extension", "-Wno-newline-eof", # required w/Xcode 5.1+ and above because of '-mno-fused-madd' "-Wno-error=unused-command-line-argument", ], ), ] extras_require = { "watchmedo": ["PyYAML>=3.10"], } with open("README.rst", encoding="utf-8") as f: readme = f.read() with open("changelog.rst", encoding="utf-8") as f: changelog = f.read() setup( name="watchdog", version=version.VERSION_STRING, description="Filesystem events monitoring", long_description=readme + "\n\n" + changelog, long_description_content_type="text/x-rst", author="Yesudeep Mangalapilly", author_email="yesudeep@gmail.com", license="Apache License 2.0", url="https://github.com/gorakhargosh/watchdog", keywords=" ".join( [ "python", "filesystem", "monitoring", "monitor", "FSEvents", "kqueue", "inotify", "ReadDirectoryChangesW", "polling", "DirectorySnapshot", ] ), classifiers=[ "Development Status :: 3 - Alpha", "Environment :: Console", "Intended Audience :: Developers", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: POSIX :: Linux", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX :: BSD", "Operating System :: Microsoft :: Windows :: Windows Vista", "Operating System :: Microsoft :: Windows :: Windows 7", "Operating System :: Microsoft :: Windows :: Windows 8", "Operating System :: Microsoft :: Windows :: Windows 8.1", "Operating System :: Microsoft :: Windows :: Windows 10", "Operating System :: Microsoft :: Windows :: Windows 11", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3 :: Only", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: Implementation :: PyPy", "Programming Language :: C", "Topic :: Software Development :: Libraries", "Topic :: System :: Monitoring", "Topic :: System :: Filesystems", "Topic :: Utilities", ], package_dir={"": SRC_DIR}, packages=find_packages(SRC_DIR), include_package_data=True, extras_require=extras_require, cmdclass={ "build_ext": build_ext, }, ext_modules=ext_modules, entry_points={ "console_scripts": [ "watchmedo = watchdog.watchmedo:main [watchmedo]", ] }, python_requires=">=3.7", zip_safe=False, ) watchdog-3.0.0/src/000077500000000000000000000000001440602103100140705ustar00rootroot00000000000000watchdog-3.0.0/src/watchdog/000077500000000000000000000000001440602103100156705ustar00rootroot00000000000000watchdog-3.0.0/src/watchdog/__init__.py000066400000000000000000000012131440602103100177760ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. watchdog-3.0.0/src/watchdog/events.py000077500000000000000000000403031440602103100175510ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.events :synopsis: File system events and event handlers. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Event Classes ------------- .. autoclass:: FileSystemEvent :members: :show-inheritance: :inherited-members: .. autoclass:: FileSystemMovedEvent :members: :show-inheritance: .. autoclass:: FileMovedEvent :members: :show-inheritance: .. autoclass:: DirMovedEvent :members: :show-inheritance: .. autoclass:: FileModifiedEvent :members: :show-inheritance: .. autoclass:: DirModifiedEvent :members: :show-inheritance: .. autoclass:: FileCreatedEvent :members: :show-inheritance: .. autoclass:: FileClosedEvent :members: :show-inheritance: .. autoclass:: FileOpenedEvent :members: :show-inheritance: .. autoclass:: DirCreatedEvent :members: :show-inheritance: .. autoclass:: FileDeletedEvent :members: :show-inheritance: .. autoclass:: DirDeletedEvent :members: :show-inheritance: Event Handler Classes --------------------- .. autoclass:: FileSystemEventHandler :members: :show-inheritance: .. autoclass:: PatternMatchingEventHandler :members: :show-inheritance: .. autoclass:: RegexMatchingEventHandler :members: :show-inheritance: .. autoclass:: LoggingEventHandler :members: :show-inheritance: """ from __future__ import annotations import logging import os.path import re from watchdog.utils.patterns import match_any_paths EVENT_TYPE_MOVED = "moved" EVENT_TYPE_DELETED = "deleted" EVENT_TYPE_CREATED = "created" EVENT_TYPE_MODIFIED = "modified" EVENT_TYPE_CLOSED = "closed" EVENT_TYPE_OPENED = "opened" class FileSystemEvent: """ Immutable type that represents a file system event that is triggered when a change occurs on the monitored file system. All FileSystemEvent objects are required to be immutable and hence can be used as keys in dictionaries or be added to sets. """ event_type = "" """The type of the event as a string.""" is_directory = False """True if event was emitted for a directory; False otherwise.""" is_synthetic = False """ True if event was synthesized; False otherwise. These are events that weren't actually broadcast by the OS, but are presumed to have happened based on other, actual events. """ def __init__(self, src_path): self._src_path = src_path @property def src_path(self): """Source path of the file system object that triggered this event.""" return self._src_path def __str__(self): return self.__repr__() def __repr__(self): return ( f"<{type(self).__name__}: event_type={self.event_type}, " f"src_path={self.src_path!r}, is_directory={self.is_directory}>" ) # Used for comparison of events. @property def key(self): return (self.event_type, self.src_path, self.is_directory) def __eq__(self, event): return self.key == event.key def __ne__(self, event): return self.key != event.key def __hash__(self): return hash(self.key) class FileSystemMovedEvent(FileSystemEvent): """ File system event representing any kind of file system movement. """ event_type = EVENT_TYPE_MOVED def __init__(self, src_path, dest_path): super().__init__(src_path) self._dest_path = dest_path @property def dest_path(self): """The destination path of the move event.""" return self._dest_path # Used for hashing this as an immutable object. @property def key(self): return (self.event_type, self.src_path, self.dest_path, self.is_directory) def __repr__(self): return ( f"<{type(self).__name__}: src_path={self.src_path!r}, " f"dest_path={self.dest_path!r}, is_directory={self.is_directory}>" ) # File events. class FileDeletedEvent(FileSystemEvent): """File system event representing file deletion on the file system.""" event_type = EVENT_TYPE_DELETED class FileModifiedEvent(FileSystemEvent): """File system event representing file modification on the file system.""" event_type = EVENT_TYPE_MODIFIED class FileCreatedEvent(FileSystemEvent): """File system event representing file creation on the file system.""" event_type = EVENT_TYPE_CREATED class FileMovedEvent(FileSystemMovedEvent): """File system event representing file movement on the file system.""" class FileClosedEvent(FileSystemEvent): """File system event representing file close on the file system.""" event_type = EVENT_TYPE_CLOSED class FileOpenedEvent(FileSystemEvent): """File system event representing file close on the file system.""" event_type = EVENT_TYPE_OPENED # Directory events. class DirDeletedEvent(FileSystemEvent): """File system event representing directory deletion on the file system.""" event_type = EVENT_TYPE_DELETED is_directory = True class DirModifiedEvent(FileSystemEvent): """ File system event representing directory modification on the file system. """ event_type = EVENT_TYPE_MODIFIED is_directory = True class DirCreatedEvent(FileSystemEvent): """File system event representing directory creation on the file system.""" event_type = EVENT_TYPE_CREATED is_directory = True class DirMovedEvent(FileSystemMovedEvent): """File system event representing directory movement on the file system.""" is_directory = True class FileSystemEventHandler: """ Base file system event handler that you can override methods from. """ def dispatch(self, event): """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ self.on_any_event(event) { EVENT_TYPE_CREATED: self.on_created, EVENT_TYPE_DELETED: self.on_deleted, EVENT_TYPE_MODIFIED: self.on_modified, EVENT_TYPE_MOVED: self.on_moved, EVENT_TYPE_CLOSED: self.on_closed, EVENT_TYPE_OPENED: self.on_opened, }[event.event_type](event) def on_any_event(self, event): """Catch-all event handler. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ def on_moved(self, event): """Called when a file or a directory is moved or renamed. :param event: Event representing file/directory movement. :type event: :class:`DirMovedEvent` or :class:`FileMovedEvent` """ def on_created(self, event): """Called when a file or directory is created. :param event: Event representing file/directory creation. :type event: :class:`DirCreatedEvent` or :class:`FileCreatedEvent` """ def on_deleted(self, event): """Called when a file or directory is deleted. :param event: Event representing file/directory deletion. :type event: :class:`DirDeletedEvent` or :class:`FileDeletedEvent` """ def on_modified(self, event): """Called when a file or directory is modified. :param event: Event representing file/directory modification. :type event: :class:`DirModifiedEvent` or :class:`FileModifiedEvent` """ def on_closed(self, event): """Called when a file opened for writing is closed. :param event: Event representing file closing. :type event: :class:`FileClosedEvent` """ def on_opened(self, event): """Called when a file is opened. :param event: Event representing file opening. :type event: :class:`FileOpenedEvent` """ class PatternMatchingEventHandler(FileSystemEventHandler): """ Matches given patterns with file paths associated with occurring events. """ def __init__( self, patterns=None, ignore_patterns=None, ignore_directories=False, case_sensitive=False, ): super().__init__() self._patterns = patterns self._ignore_patterns = ignore_patterns self._ignore_directories = ignore_directories self._case_sensitive = case_sensitive @property def patterns(self): """ (Read-only) Patterns to allow matching event paths. """ return self._patterns @property def ignore_patterns(self): """ (Read-only) Patterns to ignore matching event paths. """ return self._ignore_patterns @property def ignore_directories(self): """ (Read-only) ``True`` if directories should be ignored; ``False`` otherwise. """ return self._ignore_directories @property def case_sensitive(self): """ (Read-only) ``True`` if path names should be matched sensitive to case; ``False`` otherwise. """ return self._case_sensitive def dispatch(self, event): """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ if self.ignore_directories and event.is_directory: return paths = [] if hasattr(event, "dest_path"): paths.append(os.fsdecode(event.dest_path)) if event.src_path: paths.append(os.fsdecode(event.src_path)) if match_any_paths( paths, included_patterns=self.patterns, excluded_patterns=self.ignore_patterns, case_sensitive=self.case_sensitive, ): super().dispatch(event) class RegexMatchingEventHandler(FileSystemEventHandler): """ Matches given regexes with file paths associated with occurring events. """ def __init__( self, regexes=None, ignore_regexes=None, ignore_directories=False, case_sensitive=False, ): super().__init__() if regexes is None: regexes = [r".*"] elif isinstance(regexes, str): regexes = [regexes] if ignore_regexes is None: ignore_regexes = [] if case_sensitive: self._regexes = [re.compile(r) for r in regexes] self._ignore_regexes = [re.compile(r) for r in ignore_regexes] else: self._regexes = [re.compile(r, re.I) for r in regexes] self._ignore_regexes = [re.compile(r, re.I) for r in ignore_regexes] self._ignore_directories = ignore_directories self._case_sensitive = case_sensitive @property def regexes(self): """ (Read-only) Regexes to allow matching event paths. """ return self._regexes @property def ignore_regexes(self): """ (Read-only) Regexes to ignore matching event paths. """ return self._ignore_regexes @property def ignore_directories(self): """ (Read-only) ``True`` if directories should be ignored; ``False`` otherwise. """ return self._ignore_directories @property def case_sensitive(self): """ (Read-only) ``True`` if path names should be matched sensitive to case; ``False`` otherwise. """ return self._case_sensitive def dispatch(self, event): """Dispatches events to the appropriate methods. :param event: The event object representing the file system event. :type event: :class:`FileSystemEvent` """ if self.ignore_directories and event.is_directory: return paths = [] if hasattr(event, "dest_path"): paths.append(os.fsdecode(event.dest_path)) if event.src_path: paths.append(os.fsdecode(event.src_path)) if any(r.match(p) for r in self.ignore_regexes for p in paths): return if any(r.match(p) for r in self.regexes for p in paths): super().dispatch(event) class LoggingEventHandler(FileSystemEventHandler): """Logs all the events captured.""" def __init__(self, logger=None): super().__init__() self.logger = logger or logging.root def on_moved(self, event): super().on_moved(event) what = "directory" if event.is_directory else "file" self.logger.info( "Moved %s: from %s to %s", what, event.src_path, event.dest_path ) def on_created(self, event): super().on_created(event) what = "directory" if event.is_directory else "file" self.logger.info("Created %s: %s", what, event.src_path) def on_deleted(self, event): super().on_deleted(event) what = "directory" if event.is_directory else "file" self.logger.info("Deleted %s: %s", what, event.src_path) def on_modified(self, event): super().on_modified(event) what = "directory" if event.is_directory else "file" self.logger.info("Modified %s: %s", what, event.src_path) def generate_sub_moved_events(src_dir_path, dest_dir_path): """Generates an event list of :class:`DirMovedEvent` and :class:`FileMovedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the moved directory. :param dest_dir_path: The destination path of the moved directory. :returns: An iterable of file system events of type :class:`DirMovedEvent` and :class:`FileMovedEvent`. """ for root, directories, filenames in os.walk(dest_dir_path): for directory in directories: full_path = os.path.join(root, directory) renamed_path = ( full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None ) dir_moved_event = DirMovedEvent(renamed_path, full_path) dir_moved_event.is_synthetic = True yield dir_moved_event for filename in filenames: full_path = os.path.join(root, filename) renamed_path = ( full_path.replace(dest_dir_path, src_dir_path) if src_dir_path else None ) file_moved_event = FileMovedEvent(renamed_path, full_path) file_moved_event.is_synthetic = True yield file_moved_event def generate_sub_created_events(src_dir_path): """Generates an event list of :class:`DirCreatedEvent` and :class:`FileCreatedEvent` objects for all the files and directories within the given moved directory that were moved along with the directory. :param src_dir_path: The source path of the created directory. :returns: An iterable of file system events of type :class:`DirCreatedEvent` and :class:`FileCreatedEvent`. """ for root, directories, filenames in os.walk(src_dir_path): for directory in directories: dir_created_event = DirCreatedEvent(os.path.join(root, directory)) dir_created_event.is_synthetic = True yield dir_created_event for filename in filenames: file_created_event = FileCreatedEvent(os.path.join(root, filename)) file_created_event.is_synthetic = True yield file_created_event watchdog-3.0.0/src/watchdog/observers/000077500000000000000000000000001440602103100177025ustar00rootroot00000000000000watchdog-3.0.0/src/watchdog/observers/__init__.py000066400000000000000000000067151440602103100220240ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers :synopsis: Observer that picks a native implementation if available. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ======= .. autoclass:: Observer :members: :show-inheritance: :inherited-members: Observer thread that schedules watching directories and dispatches calls to event handlers. You can also import platform specific classes directly and use it instead of :class:`Observer`. Here is a list of implemented observer classes.: ============== ================================ ============================== Class Platforms Note ============== ================================ ============================== |Inotify| Linux 2.6.13+ ``inotify(7)`` based observer |FSEvents| macOS FSEvents based observer |Kqueue| macOS and BSD with kqueue(2) ``kqueue(2)`` based observer |WinApi| MS Windows Windows API-based observer |Polling| Any fallback implementation ============== ================================ ============================== .. |Inotify| replace:: :class:`.inotify.InotifyObserver` .. |FSEvents| replace:: :class:`.fsevents.FSEventsObserver` .. |Kqueue| replace:: :class:`.kqueue.KqueueObserver` .. |WinApi| replace:: :class:`.read_directory_changes.WindowsApiObserver` .. |Polling| replace:: :class:`.polling.PollingObserver` """ from __future__ import annotations import sys import warnings from watchdog.utils import UnsupportedLibc from .api import BaseObserverSubclassCallable Observer: BaseObserverSubclassCallable if sys.platform.startswith("linux"): try: from .inotify import InotifyObserver as Observer except UnsupportedLibc: from .polling import PollingObserver as Observer elif sys.platform.startswith("darwin"): try: from .fsevents import FSEventsObserver as Observer except Exception: try: from .kqueue import KqueueObserver as Observer warnings.warn("Failed to import fsevents. Fall back to kqueue") except Exception: from .polling import PollingObserver as Observer warnings.warn("Failed to import fsevents and kqueue. Fall back to polling.") elif sys.platform in ("dragonfly", "freebsd", "netbsd", "openbsd", "bsd"): from .kqueue import KqueueObserver as Observer elif sys.platform.startswith("win"): try: from .read_directory_changes import WindowsApiObserver as Observer except Exception: from .polling import PollingObserver as Observer warnings.warn("Failed to import read_directory_changes. Fall back to polling.") else: from .polling import PollingObserver as Observer __all__ = ["Observer"] watchdog-3.0.0/src/watchdog/observers/api.py000066400000000000000000000274521440602103100210370ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import queue import threading from pathlib import Path from watchdog.utils import BaseThread, Protocol from watchdog.utils.bricks import SkipRepeatsQueue DEFAULT_EMITTER_TIMEOUT = 1 # in seconds. DEFAULT_OBSERVER_TIMEOUT = 1 # in seconds. # Collection classes class EventQueue(SkipRepeatsQueue): """Thread-safe event queue based on a special queue that skips adding the same event (:class:`FileSystemEvent`) multiple times consecutively. Thus avoiding dispatching multiple event handling calls when multiple identical events are produced quicker than an observer can consume them. """ class ObservedWatch: """An scheduled watch. :param path: Path string. :param recursive: ``True`` if watch is recursive; ``False`` otherwise. """ def __init__(self, path, recursive): if isinstance(path, Path): self._path = str(path) else: self._path = path self._is_recursive = recursive @property def path(self): """The path that this watch monitors.""" return self._path @property def is_recursive(self): """Determines whether subdirectories are watched for the path.""" return self._is_recursive @property def key(self): return self.path, self.is_recursive def __eq__(self, watch): return self.key == watch.key def __ne__(self, watch): return self.key != watch.key def __hash__(self): return hash(self.key) def __repr__(self): return f"<{type(self).__name__}: path={self.path!r}, is_recursive={self.is_recursive}>" # Observer classes class EventEmitter(BaseThread): """ Producer thread base class subclassed by event emitters that generate events and populate a queue with them. :param event_queue: The event queue to populate with generated events. :type event_queue: :class:`watchdog.events.EventQueue` :param watch: The watch to observe and produce events for. :type watch: :class:`ObservedWatch` :param timeout: Timeout (in seconds) between successive attempts at reading events. :type timeout: ``float`` """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): super().__init__() self._event_queue = event_queue self._watch = watch self._timeout = timeout @property def timeout(self): """ Blocking timeout for reading events. """ return self._timeout @property def watch(self): """ The watch associated with this emitter. """ return self._watch def queue_event(self, event): """ Queues a single event. :param event: Event to be queued. :type event: An instance of :class:`watchdog.events.FileSystemEvent` or a subclass. """ self._event_queue.put((event, self.watch)) def queue_events(self, timeout): """Override this method to populate the event queue with events per interval period. :param timeout: Timeout (in seconds) between successive attempts at reading events. :type timeout: ``float`` """ def run(self): while self.should_keep_running(): self.queue_events(self.timeout) class EventDispatcher(BaseThread): """ Consumer thread base class subclassed by event observer threads that dispatch events from an event queue to appropriate event handlers. :param timeout: Timeout value (in seconds) passed to emitters constructions in the child class BaseObserver. :type timeout: ``float`` """ _stop_event = object() """Event inserted into the queue to signal a requested stop.""" def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): super().__init__() self._event_queue = EventQueue() self._timeout = timeout @property def timeout(self): """Timeout value to construct emitters with.""" return self._timeout def stop(self): BaseThread.stop(self) try: self.event_queue.put_nowait(EventDispatcher._stop_event) except queue.Full: pass @property def event_queue(self): """The event queue which is populated with file system events by emitters and from which events are dispatched by a dispatcher thread.""" return self._event_queue def dispatch_events(self, event_queue): """Override this method to consume events from an event queue, blocking on the queue for the specified timeout before raising :class:`queue.Empty`. :param event_queue: Event queue to populate with one set of events. :type event_queue: :class:`EventQueue` :raises: :class:`queue.Empty` """ def run(self): while self.should_keep_running(): try: self.dispatch_events(self.event_queue) except queue.Empty: continue class BaseObserver(EventDispatcher): """Base observer.""" def __init__(self, emitter_class, timeout=DEFAULT_OBSERVER_TIMEOUT): super().__init__(timeout) self._emitter_class = emitter_class self._lock = threading.RLock() self._watches = set() self._handlers = dict() self._emitters = set() self._emitter_for_watch = dict() def _add_emitter(self, emitter): self._emitter_for_watch[emitter.watch] = emitter self._emitters.add(emitter) def _remove_emitter(self, emitter): del self._emitter_for_watch[emitter.watch] self._emitters.remove(emitter) emitter.stop() try: emitter.join() except RuntimeError: pass def _clear_emitters(self): for emitter in self._emitters: emitter.stop() for emitter in self._emitters: try: emitter.join() except RuntimeError: pass self._emitters.clear() self._emitter_for_watch.clear() def _add_handler_for_watch(self, event_handler, watch): if watch not in self._handlers: self._handlers[watch] = set() self._handlers[watch].add(event_handler) def _remove_handlers_for_watch(self, watch): del self._handlers[watch] @property def emitters(self): """Returns event emitter created by this observer.""" return self._emitters def start(self): for emitter in self._emitters.copy(): try: emitter.start() except Exception: self._remove_emitter(emitter) raise super().start() def schedule(self, event_handler, path, recursive=False): """ Schedules watching a path and calls appropriate methods specified in the given event handler in response to file system events. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param path: Directory path that will be monitored. :type path: ``str`` :param recursive: ``True`` if events will be emitted for sub-directories traversed recursively; ``False`` otherwise. :type recursive: ``bool`` :return: An :class:`ObservedWatch` object instance representing a watch. """ with self._lock: watch = ObservedWatch(path, recursive) self._add_handler_for_watch(event_handler, watch) # If we don't have an emitter for this watch already, create it. if self._emitter_for_watch.get(watch) is None: emitter = self._emitter_class( event_queue=self.event_queue, watch=watch, timeout=self.timeout ) if self.is_alive(): emitter.start() self._add_emitter(emitter) self._watches.add(watch) return watch def add_handler_for_watch(self, event_handler, watch): """Adds a handler for the given watch. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param watch: The watch to add a handler for. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: self._add_handler_for_watch(event_handler, watch) def remove_handler_for_watch(self, event_handler, watch): """Removes a handler for the given watch. :param event_handler: An event handler instance that has appropriate event handling methods which will be called by the observer in response to file system events. :type event_handler: :class:`watchdog.events.FileSystemEventHandler` or a subclass :param watch: The watch to remove a handler for. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: self._handlers[watch].remove(event_handler) def unschedule(self, watch): """Unschedules a watch. :param watch: The watch to unschedule. :type watch: An instance of :class:`ObservedWatch` or a subclass of :class:`ObservedWatch` """ with self._lock: emitter = self._emitter_for_watch[watch] del self._handlers[watch] self._remove_emitter(emitter) self._watches.remove(watch) def unschedule_all(self): """Unschedules all watches and detaches all associated event handlers.""" with self._lock: self._handlers.clear() self._clear_emitters() self._watches.clear() def on_thread_stop(self): self.unschedule_all() def dispatch_events(self, event_queue): entry = event_queue.get(block=True) if entry is EventDispatcher._stop_event: return event, watch = entry with self._lock: # To allow unschedule/stop and safe removal of event handlers # within event handlers itself, check if the handler is still # registered after every dispatch. for handler in list(self._handlers.get(watch, [])): if handler in self._handlers.get(watch, []): handler.dispatch(event) event_queue.task_done() class BaseObserverSubclassCallable(Protocol): def __call__(self, timeout: float = ...) -> BaseObserver: ... watchdog-3.0.0/src/watchdog/observers/fsevents.py000066400000000000000000000334241440602103100221170ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.fsevents :synopsis: FSEvents based emitter implementation. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: macOS """ from __future__ import annotations import logging import os import threading import time import unicodedata import _watchdog_fsevents as _fsevents # type: ignore[import] from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, generate_sub_created_events, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.utils.dirsnapshot import DirectorySnapshot logger = logging.getLogger("fsevents") class FSEventsEmitter(EventEmitter): """ macOS FSEvents Emitter class. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :param suppress_history: The FSEvents API may emit historic events up to 30 sec before the watch was started. When ``suppress_history`` is ``True``, those events will be suppressed by creating a directory snapshot of the watched path before starting the stream as a reference to suppress old events. Warning: This may result in significant memory usage in case of a large number of items in the watched path. :type timeout: ``float`` """ def __init__( self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT, suppress_history=False, ): super().__init__(event_queue, watch, timeout) self._fs_view = set() self.suppress_history = suppress_history self._start_time = 0.0 self._starting_state = None self._lock = threading.Lock() self._absolute_watch_path = os.path.realpath( os.path.abspath(os.path.expanduser(self.watch.path)) ) def on_thread_stop(self): _fsevents.remove_watch(self.watch) _fsevents.stop(self) def queue_event(self, event): # fsevents defaults to be recursive, so if the watch was meant to be non-recursive then we need to drop # all the events here which do not have a src_path / dest_path that matches the watched path if self._watch.is_recursive: logger.debug("queue_event %s", event) EventEmitter.queue_event(self, event) else: if not self._is_recursive_event(event): logger.debug("queue_event %s", event) EventEmitter.queue_event(self, event) else: logger.debug("drop event %s", event) def _is_recursive_event(self, event): src_path = ( event.src_path if event.is_directory else os.path.dirname(event.src_path) ) if src_path == self._absolute_watch_path: return False if isinstance(event, (FileMovedEvent, DirMovedEvent)): # when moving something into the watch path we must always take the dirname, # otherwise we miss out on `DirMovedEvent`s dest_path = os.path.dirname(event.dest_path) if dest_path == self._absolute_watch_path: return False return True def _queue_created_event(self, event, src_path, dirname): cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(dirname)) def _queue_deleted_event(self, event, src_path, dirname): cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(dirname)) def _queue_modified_event(self, event, src_path, dirname): cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(src_path)) def _queue_renamed_event( self, src_event, src_path, dst_path, src_dirname, dst_dirname ): cls = DirMovedEvent if src_event.is_directory else FileMovedEvent dst_path = self._encode_path(dst_path) self.queue_event(cls(src_path, dst_path)) self.queue_event(DirModifiedEvent(src_dirname)) self.queue_event(DirModifiedEvent(dst_dirname)) def _is_historic_created_event(self, event): # We only queue a created event if the item was created after we # started the FSEventsStream. in_history = event.inode in self._fs_view if self._starting_state: try: old_inode = self._starting_state.inode(event.path)[0] before_start = old_inode == event.inode except KeyError: before_start = False else: before_start = False return in_history or before_start @staticmethod def _is_meta_mod(event): """Returns True if the event indicates a change in metadata.""" return event.is_inode_meta_mod or event.is_xattr_mod or event.is_owner_change def queue_events(self, timeout, events): if logger.getEffectiveLevel() <= logging.DEBUG: for event in events: flags = ", ".join( attr for attr in dir(event) if getattr(event, attr) is True ) logger.debug(f"{event}: {flags}") if time.monotonic() - self._start_time > 60: # Event history is no longer needed, let's free some memory. self._starting_state = None while events: event = events.pop(0) src_path = self._encode_path(event.path) src_dirname = os.path.dirname(src_path) try: stat = os.stat(src_path) except OSError: stat = None exists = stat and stat.st_ino == event.inode # FSevents may coalesce multiple events for the same item + path into a # single event. However, events are never coalesced for different items at # the same path or for the same item at different paths. Therefore, the # event chains "removed -> created" and "created -> renamed -> removed" will # never emit a single native event and a deleted event *always* means that # the item no longer existed at the end of the event chain. # Some events will have a spurious `is_created` flag set, coalesced from an # already emitted and processed CreatedEvent. To filter those, we keep track # of all inodes which we know to be already created. This is safer than # keeping track of paths since paths are more likely to be reused than # inodes. # Likewise, some events will have a spurious `is_modified`, # `is_inode_meta_mod` or `is_xattr_mod` flag set. We currently do not # suppress those but could do so if the item still exists by caching the # stat result and verifying that it did change. if event.is_created and event.is_removed: # Events will only be coalesced for the same item / inode. # The sequence deleted -> created therefore cannot occur. # Any combination with renamed cannot occur either. if not self._is_historic_created_event(event): self._queue_created_event(event, src_path, src_dirname) self._fs_view.add(event.inode) if event.is_modified or self._is_meta_mod(event): self._queue_modified_event(event, src_path, src_dirname) self._queue_deleted_event(event, src_path, src_dirname) self._fs_view.discard(event.inode) else: if event.is_created and not self._is_historic_created_event(event): self._queue_created_event(event, src_path, src_dirname) self._fs_view.add(event.inode) if event.is_modified or self._is_meta_mod(event): self._queue_modified_event(event, src_path, src_dirname) if event.is_renamed: # Check if we have a corresponding destination event in the watched path. dst_event = next( iter( e for e in events if e.is_renamed and e.inode == event.inode ), None, ) if dst_event: # Item was moved within the watched folder. logger.debug("Destination event for rename is %s", dst_event) dst_path = self._encode_path(dst_event.path) dst_dirname = os.path.dirname(dst_path) self._queue_renamed_event( event, src_path, dst_path, src_dirname, dst_dirname ) self._fs_view.add(event.inode) for sub_event in generate_sub_moved_events(src_path, dst_path): self.queue_event(sub_event) # Process any coalesced flags for the dst_event. events.remove(dst_event) if dst_event.is_modified or self._is_meta_mod(dst_event): self._queue_modified_event(dst_event, dst_path, dst_dirname) if dst_event.is_removed: self._queue_deleted_event(dst_event, dst_path, dst_dirname) self._fs_view.discard(dst_event.inode) elif exists: # This is the destination event, item was moved into the watched # folder. self._queue_created_event(event, src_path, src_dirname) self._fs_view.add(event.inode) for sub_event in generate_sub_created_events(src_path): self.queue_event(sub_event) else: # This is the source event, item was moved out of the watched # folder. self._queue_deleted_event(event, src_path, src_dirname) self._fs_view.discard(event.inode) # Skip further coalesced processing. continue if event.is_removed: # Won't occur together with renamed. self._queue_deleted_event(event, src_path, src_dirname) self._fs_view.discard(event.inode) if event.is_root_changed: # This will be set if root or any of its parents is renamed or deleted. # TODO: find out new path and generate DirMovedEvent? self.queue_event(DirDeletedEvent(self.watch.path)) logger.debug("Stopping because root path was changed") self.stop() self._fs_view.clear() def events_callback(self, paths, inodes, flags, ids): """Callback passed to FSEventStreamCreate(), it will receive all FS events and queue them. """ cls = _fsevents.NativeEvent try: events = [ cls(path, inode, event_flags, event_id) for path, inode, event_flags, event_id in zip(paths, inodes, flags, ids) ] with self._lock: self.queue_events(self.timeout, events) except Exception: logger.exception("Unhandled exception in fsevents callback") def run(self): self.pathnames = [self.watch.path] self._start_time = time.monotonic() try: _fsevents.add_watch(self, self.watch, self.events_callback, self.pathnames) _fsevents.read_events(self) except Exception: logger.exception("Unhandled exception in FSEventsEmitter") def on_thread_start(self): if self.suppress_history: if isinstance(self.watch.path, bytes): watch_path = os.fsdecode(self.watch.path) else: watch_path = self.watch.path self._starting_state = DirectorySnapshot(watch_path) def _encode_path(self, path): """Encode path only if bytes were passed to this emitter.""" if isinstance(self.watch.path, bytes): return os.fsencode(path) return path class FSEventsObserver(BaseObserver): def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): super().__init__(emitter_class=FSEventsEmitter, timeout=timeout) def schedule(self, event_handler, path, recursive=False): # Fix for issue #26: Trace/BPT error when given a unicode path # string. https://github.com/gorakhargosh/watchdog/issues#issue/26 if isinstance(path, str): path = unicodedata.normalize("NFC", path) return BaseObserver.schedule(self, event_handler, path, recursive) watchdog-3.0.0/src/watchdog/observers/fsevents2.py000066400000000000000000000225721440602103100222030ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.fsevents2 :synopsis: FSEvents based emitter implementation. :platforms: macOS """ from __future__ import annotations import logging import os import queue import unicodedata import warnings from threading import Thread from typing import List, Optional, Type # pyobjc import AppKit # type: ignore[import] from FSEvents import ( # type: ignore[import] CFRunLoopGetCurrent, CFRunLoopRun, CFRunLoopStop, FSEventStreamCreate, FSEventStreamInvalidate, FSEventStreamRelease, FSEventStreamScheduleWithRunLoop, FSEventStreamStart, FSEventStreamStop, kCFAllocatorDefault, kCFRunLoopDefaultMode, kFSEventStreamCreateFlagFileEvents, kFSEventStreamCreateFlagNoDefer, kFSEventStreamEventFlagItemChangeOwner, kFSEventStreamEventFlagItemCreated, kFSEventStreamEventFlagItemFinderInfoMod, kFSEventStreamEventFlagItemInodeMetaMod, kFSEventStreamEventFlagItemIsDir, kFSEventStreamEventFlagItemIsSymlink, kFSEventStreamEventFlagItemModified, kFSEventStreamEventFlagItemRemoved, kFSEventStreamEventFlagItemRenamed, kFSEventStreamEventFlagItemXattrMod, kFSEventStreamEventIdSinceNow, ) from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileSystemEvent, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter logger = logging.getLogger(__name__) message = "watchdog.observers.fsevents2 is deprecated and will be removed in a future release." warnings.warn(message, DeprecationWarning) logger.warning(message) class FSEventsQueue(Thread): """Low level FSEvents client.""" def __init__(self, path): Thread.__init__(self) self._queue: queue.Queue[Optional[List[NativeEvent]]] = queue.Queue() self._run_loop = None if isinstance(path, bytes): path = os.fsdecode(path) self._path = unicodedata.normalize("NFC", path) context = None latency = 1.0 self._stream_ref = FSEventStreamCreate( kCFAllocatorDefault, self._callback, context, [self._path], kFSEventStreamEventIdSinceNow, latency, kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents, ) if self._stream_ref is None: raise OSError("FSEvents. Could not create stream.") def run(self): pool = AppKit.NSAutoreleasePool.alloc().init() self._run_loop = CFRunLoopGetCurrent() FSEventStreamScheduleWithRunLoop( self._stream_ref, self._run_loop, kCFRunLoopDefaultMode ) if not FSEventStreamStart(self._stream_ref): FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) raise OSError("FSEvents. Could not start stream.") CFRunLoopRun() FSEventStreamStop(self._stream_ref) FSEventStreamInvalidate(self._stream_ref) FSEventStreamRelease(self._stream_ref) del pool # Make sure waiting thread is notified self._queue.put(None) def stop(self): if self._run_loop is not None: CFRunLoopStop(self._run_loop) def _callback( self, streamRef, clientCallBackInfo, numEvents, eventPaths, eventFlags, eventIDs ): events = [ NativeEvent(path, flags, _id) for path, flags, _id in zip(eventPaths, eventFlags, eventIDs) ] logger.debug(f"FSEvents callback. Got {numEvents} events:") for e in events: logger.debug(e) self._queue.put(events) def read_events(self): """ Returns a list or one or more events, or None if there are no more events to be read. """ if not self.is_alive(): return None return self._queue.get() class NativeEvent: def __init__(self, path, flags, event_id): self.path = path self.flags = flags self.event_id = event_id self.is_created = bool(flags & kFSEventStreamEventFlagItemCreated) self.is_removed = bool(flags & kFSEventStreamEventFlagItemRemoved) self.is_renamed = bool(flags & kFSEventStreamEventFlagItemRenamed) self.is_modified = bool(flags & kFSEventStreamEventFlagItemModified) self.is_change_owner = bool(flags & kFSEventStreamEventFlagItemChangeOwner) self.is_inode_meta_mod = bool(flags & kFSEventStreamEventFlagItemInodeMetaMod) self.is_finder_info_mod = bool(flags & kFSEventStreamEventFlagItemFinderInfoMod) self.is_xattr_mod = bool(flags & kFSEventStreamEventFlagItemXattrMod) self.is_symlink = bool(flags & kFSEventStreamEventFlagItemIsSymlink) self.is_directory = bool(flags & kFSEventStreamEventFlagItemIsDir) @property def _event_type(self): if self.is_created: return "Created" if self.is_removed: return "Removed" if self.is_renamed: return "Renamed" if self.is_modified: return "Modified" if self.is_inode_meta_mod: return "InodeMetaMod" if self.is_xattr_mod: return "XattrMod" return "Unknown" def __repr__(self): return ( f"<{type(self).__name__}: path={self.path!r}, type={self._event_type}," f" is_dir={self.is_directory}, flags={hex(self.flags)}, id={self.event_id}>" ) class FSEventsEmitter(EventEmitter): """ FSEvents based event emitter. Handles conversion of native events. """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): super().__init__(event_queue, watch, timeout) self._fsevents = FSEventsQueue(watch.path) self._fsevents.start() def on_thread_stop(self): self._fsevents.stop() def queue_events(self, timeout): events = self._fsevents.read_events() if events is None: return i = 0 while i < len(events): event = events[i] cls: Type[FileSystemEvent] # For some reason the create and remove flags are sometimes also # set for rename and modify type events, so let those take # precedence. if event.is_renamed: # Internal moves appears to always be consecutive in the same # buffer and have IDs differ by exactly one (while others # don't) making it possible to pair up the two events coming # from a single move operation. (None of this is documented!) # Otherwise, guess whether file was moved in or out. # TODO: handle id wrapping if ( i + 1 < len(events) and events[i + 1].is_renamed and events[i + 1].event_id == event.event_id + 1 ): cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(event.path, events[i + 1].path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) self.queue_event( DirModifiedEvent(os.path.dirname(events[i + 1].path)) ) i += 1 elif os.path.exists(event.path): cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) else: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) # TODO: generate events for tree elif event.is_modified or event.is_inode_meta_mod or event.is_xattr_mod: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(event.path)) elif event.is_created: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) elif event.is_removed: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(event.path)) self.queue_event(DirModifiedEvent(os.path.dirname(event.path))) i += 1 class FSEventsObserver2(BaseObserver): def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): super().__init__(emitter_class=FSEventsEmitter, timeout=timeout) watchdog-3.0.0/src/watchdog/observers/inotify.py000066400000000000000000000225071440602103100217430ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.inotify :synopsis: ``inotify(7)`` based emitter implementation. :author: Sebastien Martini :author: Luke McCarthy :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: Tim Cuthbertson :platforms: Linux 2.6.13+. .. ADMONITION:: About system requirements Recommended minimum kernel version: 2.6.25. Quote from the inotify(7) man page: "Inotify was merged into the 2.6.13 Linux kernel. The required library interfaces were added to glibc in version 2.4. (IN_DONT_FOLLOW, IN_MASK_ADD, and IN_ONLYDIR were only added in version 2.5.)" Therefore, you must ensure the system is running at least these versions appropriate libraries and the kernel. .. ADMONITION:: About recursiveness, event order, and event coalescing Quote from the inotify(7) man page: If successive output inotify events produced on the inotify file descriptor are identical (same wd, mask, cookie, and name) then they are coalesced into a single event if the older event has not yet been read (but see BUGS). The events returned by reading from an inotify file descriptor form an ordered queue. Thus, for example, it is guaranteed that when renaming from one directory to another, events will be produced in the correct order on the inotify file descriptor. ... Inotify monitoring of directories is not recursive: to monitor subdirectories under a directory, additional watches must be created. This emitter implementation therefore automatically adds watches for sub-directories if running in recursive mode. Some extremely useful articles and documentation: .. _inotify FAQ: http://inotify.aiken.cz/?section=inotify&page=faq&lang=en .. _intro to inotify: http://www.linuxjournal.com/article/8478 """ from __future__ import annotations import logging import os import threading from typing import Type from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileClosedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileOpenedEvent, FileSystemEvent, generate_sub_created_events, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from .inotify_buffer import InotifyBuffer logger = logging.getLogger(__name__) class InotifyEmitter(EventEmitter): """ inotify(7)-based event emitter. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): super().__init__(event_queue, watch, timeout) self._lock = threading.Lock() self._inotify = None def on_thread_start(self): path = os.fsencode(self.watch.path) self._inotify = InotifyBuffer(path, self.watch.is_recursive) def on_thread_stop(self): if self._inotify: self._inotify.close() self._inotify = None def queue_events(self, timeout, full_events=False): # If "full_events" is true, then the method will report unmatched move events as separate events # This behavior is by default only called by a InotifyFullEmitter if self._inotify is None: logger.error("InotifyEmitter.queue_events() called when the thread is inactive") return with self._lock: if self._inotify is None: logger.error("InotifyEmitter.queue_events() called when the thread is inactive") return event = self._inotify.read_event() if event is None: return cls: Type[FileSystemEvent] if isinstance(event, tuple): move_from, move_to = event src_path = self._decode_path(move_from.src_path) dest_path = self._decode_path(move_to.src_path) cls = DirMovedEvent if move_from.is_directory else FileMovedEvent self.queue_event(cls(src_path, dest_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) self.queue_event(DirModifiedEvent(os.path.dirname(dest_path))) if move_from.is_directory and self.watch.is_recursive: for sub_event in generate_sub_moved_events(src_path, dest_path): self.queue_event(sub_event) return src_path = self._decode_path(event.src_path) if event.is_moved_to: if full_events: cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(None, src_path)) else: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) if event.is_directory and self.watch.is_recursive: for sub_event in generate_sub_created_events(src_path): self.queue_event(sub_event) elif event.is_attrib: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(src_path)) elif event.is_modify: cls = DirModifiedEvent if event.is_directory else FileModifiedEvent self.queue_event(cls(src_path)) elif event.is_delete or (event.is_moved_from and not full_events): cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_moved_from and full_events: cls = DirMovedEvent if event.is_directory else FileMovedEvent self.queue_event(cls(src_path, None)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_create: cls = DirCreatedEvent if event.is_directory else FileCreatedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_close_write and not event.is_directory: cls = FileClosedEvent self.queue_event(cls(src_path)) self.queue_event(DirModifiedEvent(os.path.dirname(src_path))) elif event.is_open and not event.is_directory: cls = FileOpenedEvent self.queue_event(cls(src_path)) # elif event.is_close_nowrite and not event.is_directory: # cls = FileClosedEvent # self.queue_event(cls(src_path)) elif event.is_delete_self and src_path == self.watch.path: cls = DirDeletedEvent if event.is_directory else FileDeletedEvent self.queue_event(cls(src_path)) self.stop() def _decode_path(self, path): """Decode path only if unicode string was passed to this emitter.""" if isinstance(self.watch.path, bytes): return path return os.fsdecode(path) class InotifyFullEmitter(InotifyEmitter): """ inotify(7)-based event emitter. By default this class produces move events even if they are not matched Such move events will have a ``None`` value for the unmatched part. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): super().__init__(event_queue, watch, timeout) def queue_events(self, timeout, events=True): InotifyEmitter.queue_events(self, timeout, full_events=events) class InotifyObserver(BaseObserver): """ Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False): cls = InotifyFullEmitter if generate_full_events else InotifyEmitter super().__init__(emitter_class=cls, timeout=timeout) watchdog-3.0.0/src/watchdog/observers/inotify_buffer.py000066400000000000000000000112131440602103100232640ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging from typing import TYPE_CHECKING, List, Tuple, Union from watchdog.observers.inotify_c import Inotify, InotifyEvent from watchdog.utils import BaseThread from watchdog.utils.delayed_queue import DelayedQueue logger = logging.getLogger(__name__) class InotifyBuffer(BaseThread): """A wrapper for `Inotify` that holds events for `delay` seconds. During this time, IN_MOVED_FROM and IN_MOVED_TO events are paired. """ delay = 0.5 def __init__(self, path, recursive=False): super().__init__() self._queue = DelayedQueue[InotifyEvent](self.delay) self._inotify = Inotify(path, recursive) self.start() def read_event(self): """Returns a single event or a tuple of from/to events in case of a paired move event. If this buffer has been closed, immediately return None. """ return self._queue.get() def on_thread_stop(self): self._inotify.close() self._queue.close() def close(self): self.stop() self.join() def _group_events(self, event_list): """Group any matching move events""" grouped: List[Union[InotifyEvent, Tuple[InotifyEvent, InotifyEvent]]] = [] for inotify_event in event_list: logger.debug("in-event %s", inotify_event) def matching_from_event(event): return ( not isinstance(event, tuple) and event.is_moved_from and event.cookie == inotify_event.cookie ) if inotify_event.is_moved_to: # Check if move_from is already in the buffer for index, event in enumerate(grouped): if matching_from_event(event): if TYPE_CHECKING: # this check is hidden from mypy inside matching_from_event() assert not isinstance(event, tuple) grouped[index] = (event, inotify_event) break else: # Check if move_from is in delayqueue already from_event = self._queue.remove(matching_from_event) if from_event is not None: grouped.append((from_event, inotify_event)) else: logger.debug("could not find matching move_from event") grouped.append(inotify_event) else: grouped.append(inotify_event) return grouped def run(self): """Read event from `inotify` and add them to `queue`. When reading a IN_MOVE_TO event, remove the previous added matching IN_MOVE_FROM event and add them back to the queue as a tuple. """ deleted_self = False while self.should_keep_running() and not deleted_self: inotify_events = self._inotify.read_events() grouped_events = self._group_events(inotify_events) for inotify_event in grouped_events: if not isinstance(inotify_event, tuple) and inotify_event.is_ignored: if inotify_event.src_path == self._inotify.path: # Watch was removed explicitly (inotify_rm_watch(2)) or automatically (file # was deleted, or filesystem was unmounted), stop watching for events deleted_self = True continue # Only add delay for unmatched move_from events delay = ( not isinstance(inotify_event, tuple) and inotify_event.is_moved_from ) self._queue.put(inotify_event, delay) if ( not isinstance(inotify_event, tuple) and inotify_event.is_delete_self and inotify_event.src_path == self._inotify.path ): # Deleted the watched directory, stop watching for events deleted_self = True watchdog-3.0.0/src/watchdog/observers/inotify_c.py000066400000000000000000000464331440602103100222510ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import ctypes import ctypes.util import errno import os import struct import threading from ctypes import c_char_p, c_int, c_uint32 from functools import reduce from watchdog.utils import UnsupportedLibc libc = ctypes.CDLL(None) if ( not hasattr(libc, "inotify_init") or not hasattr(libc, "inotify_add_watch") or not hasattr(libc, "inotify_rm_watch") ): raise UnsupportedLibc(f"Unsupported libc version found: {libc._name}") inotify_add_watch = ctypes.CFUNCTYPE(c_int, c_int, c_char_p, c_uint32, use_errno=True)( ("inotify_add_watch", libc) ) inotify_rm_watch = ctypes.CFUNCTYPE(c_int, c_int, c_uint32, use_errno=True)( ("inotify_rm_watch", libc) ) inotify_init = ctypes.CFUNCTYPE(c_int, use_errno=True)(("inotify_init", libc)) class InotifyConstants: # User-space events IN_ACCESS = 0x00000001 # File was accessed. IN_MODIFY = 0x00000002 # File was modified. IN_ATTRIB = 0x00000004 # Meta-data changed. IN_CLOSE_WRITE = 0x00000008 # Writable file was closed. IN_CLOSE_NOWRITE = 0x00000010 # Unwritable file closed. IN_OPEN = 0x00000020 # File was opened. IN_MOVED_FROM = 0x00000040 # File was moved from X. IN_MOVED_TO = 0x00000080 # File was moved to Y. IN_CREATE = 0x00000100 # Subfile was created. IN_DELETE = 0x00000200 # Subfile was deleted. IN_DELETE_SELF = 0x00000400 # Self was deleted. IN_MOVE_SELF = 0x00000800 # Self was moved. # Helper user-space events. IN_CLOSE = IN_CLOSE_WRITE | IN_CLOSE_NOWRITE # Close. IN_MOVE = IN_MOVED_FROM | IN_MOVED_TO # Moves. # Events sent by the kernel to a watch. IN_UNMOUNT = 0x00002000 # Backing file system was unmounted. IN_Q_OVERFLOW = 0x00004000 # Event queued overflowed. IN_IGNORED = 0x00008000 # File was ignored. # Special flags. IN_ONLYDIR = 0x01000000 # Only watch the path if it's a directory. IN_DONT_FOLLOW = 0x02000000 # Do not follow a symbolic link. IN_EXCL_UNLINK = 0x04000000 # Exclude events on unlinked objects IN_MASK_ADD = 0x20000000 # Add to the mask of an existing watch. IN_ISDIR = 0x40000000 # Event occurred against directory. IN_ONESHOT = 0x80000000 # Only send event once. # All user-space events. IN_ALL_EVENTS = reduce( lambda x, y: x | y, [ IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE, IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_DELETE, IN_CREATE, IN_DELETE_SELF, IN_MOVE_SELF, ], ) # Flags for ``inotify_init1`` IN_CLOEXEC = 0x02000000 IN_NONBLOCK = 0x00004000 # Watchdog's API cares only about these events. WATCHDOG_ALL_EVENTS = reduce( lambda x, y: x | y, [ InotifyConstants.IN_MODIFY, InotifyConstants.IN_ATTRIB, InotifyConstants.IN_MOVED_FROM, InotifyConstants.IN_MOVED_TO, InotifyConstants.IN_CREATE, InotifyConstants.IN_DELETE, InotifyConstants.IN_DELETE_SELF, InotifyConstants.IN_DONT_FOLLOW, InotifyConstants.IN_CLOSE_WRITE, InotifyConstants.IN_OPEN, ], ) class inotify_event_struct(ctypes.Structure): """ Structure representation of the inotify_event structure (used in buffer size calculations):: struct inotify_event { __s32 wd; /* watch descriptor */ __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ char name[0]; /* stub for possible name */ }; """ _fields_ = [ ("wd", c_int), ("mask", c_uint32), ("cookie", c_uint32), ("len", c_uint32), ("name", c_char_p), ] EVENT_SIZE = ctypes.sizeof(inotify_event_struct) DEFAULT_NUM_EVENTS = 2048 DEFAULT_EVENT_BUFFER_SIZE = DEFAULT_NUM_EVENTS * (EVENT_SIZE + 16) class Inotify: """ Linux inotify(7) API wrapper class. :param path: The directory path for which we want an inotify object. :type path: :class:`bytes` :param recursive: ``True`` if subdirectories should be monitored; ``False`` otherwise. """ def __init__(self, path, recursive=False, event_mask=WATCHDOG_ALL_EVENTS): # The file descriptor associated with the inotify instance. inotify_fd = inotify_init() if inotify_fd == -1: Inotify._raise_error() self._inotify_fd = inotify_fd self._lock = threading.Lock() # Stores the watch descriptor for a given path. self._wd_for_path = {} self._path_for_wd = {} self._path = path self._event_mask = event_mask self._is_recursive = recursive if os.path.isdir(path): self._add_dir_watch(path, recursive, event_mask) else: self._add_watch(path, event_mask) self._moved_from_events = {} @property def event_mask(self): """The event mask for this inotify instance.""" return self._event_mask @property def path(self): """The path associated with the inotify instance.""" return self._path @property def is_recursive(self): """Whether we are watching directories recursively.""" return self._is_recursive @property def fd(self): """The file descriptor associated with the inotify instance.""" return self._inotify_fd def clear_move_records(self): """Clear cached records of MOVED_FROM events""" self._moved_from_events = {} def source_for_move(self, destination_event): """ The source path corresponding to the given MOVED_TO event. If the source path is outside the monitored directories, None is returned instead. """ if destination_event.cookie in self._moved_from_events: return self._moved_from_events[destination_event.cookie].src_path else: return None def remember_move_from_event(self, event): """ Save this event as the source event for future MOVED_TO events to reference. """ self._moved_from_events[event.cookie] = event def add_watch(self, path): """ Adds a watch for the given path. :param path: Path to begin monitoring. """ with self._lock: self._add_watch(path, self._event_mask) def remove_watch(self, path): """ Removes a watch for the given path. :param path: Path string for which the watch will be removed. """ with self._lock: wd = self._wd_for_path.pop(path) del self._path_for_wd[wd] if inotify_rm_watch(self._inotify_fd, wd) == -1: Inotify._raise_error() def close(self): """ Closes the inotify instance and removes all associated watches. """ with self._lock: if self._path in self._wd_for_path: wd = self._wd_for_path[self._path] inotify_rm_watch(self._inotify_fd, wd) try: os.close(self._inotify_fd) except OSError: # descriptor may be invalid because file was deleted pass def read_events(self, event_buffer_size=DEFAULT_EVENT_BUFFER_SIZE): """ Reads events from inotify and yields them. """ # HACK: We need to traverse the directory path # recursively and simulate events for newly # created subdirectories/files. This will handle # mkdir -p foobar/blah/bar; touch foobar/afile def _recursive_simulate(src_path): events = [] for root, dirnames, filenames in os.walk(src_path): for dirname in dirnames: try: full_path = os.path.join(root, dirname) wd_dir = self._add_watch(full_path, self._event_mask) e = InotifyEvent( wd_dir, InotifyConstants.IN_CREATE | InotifyConstants.IN_ISDIR, 0, dirname, full_path, ) events.append(e) except OSError: pass for filename in filenames: full_path = os.path.join(root, filename) wd_parent_dir = self._wd_for_path[os.path.dirname(full_path)] e = InotifyEvent( wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path, ) events.append(e) return events event_buffer = None while True: try: event_buffer = os.read(self._inotify_fd, event_buffer_size) except OSError as e: if e.errno == errno.EINTR: continue elif e.errno == errno.EBADF: return [] else: raise break with self._lock: event_list = [] for wd, mask, cookie, name in Inotify._parse_event_buffer(event_buffer): if wd == -1: continue wd_path = self._path_for_wd[wd] src_path = ( os.path.join(wd_path, name) if name else wd_path ) # avoid trailing slash inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) if inotify_event.is_moved_from: self.remember_move_from_event(inotify_event) elif inotify_event.is_moved_to: move_src_path = self.source_for_move(inotify_event) if move_src_path in self._wd_for_path: moved_wd = self._wd_for_path[move_src_path] del self._wd_for_path[move_src_path] self._wd_for_path[inotify_event.src_path] = moved_wd self._path_for_wd[moved_wd] = inotify_event.src_path if self.is_recursive: for _path, _wd in self._wd_for_path.copy().items(): if _path.startswith( move_src_path + os.path.sep.encode() ): moved_wd = self._wd_for_path.pop(_path) _move_to_path = _path.replace( move_src_path, inotify_event.src_path ) self._wd_for_path[_move_to_path] = moved_wd self._path_for_wd[moved_wd] = _move_to_path src_path = os.path.join(wd_path, name) inotify_event = InotifyEvent(wd, mask, cookie, name, src_path) if inotify_event.is_ignored: # Clean up book-keeping for deleted watches. path = self._path_for_wd.pop(wd) if self._wd_for_path[path] == wd: del self._wd_for_path[path] event_list.append(inotify_event) if ( self.is_recursive and inotify_event.is_directory and inotify_event.is_create ): # TODO: When a directory from another part of the # filesystem is moved into a watched directory, this # will not generate events for the directory tree. # We need to coalesce IN_MOVED_TO events and those # IN_MOVED_TO events which don't pair up with # IN_MOVED_FROM events should be marked IN_CREATE # instead relative to this directory. try: self._add_watch(src_path, self._event_mask) except OSError: continue event_list.extend(_recursive_simulate(src_path)) return event_list # Non-synchronized methods. def _add_dir_watch(self, path, recursive, mask): """ Adds a watch (optionally recursively) for the given directory path to monitor events specified by the mask. :param path: Path to monitor :param recursive: ``True`` to monitor recursively. :param mask: Event bit mask. """ if not os.path.isdir(path): raise OSError(errno.ENOTDIR, os.strerror(errno.ENOTDIR), path) self._add_watch(path, mask) if recursive: for root, dirnames, _ in os.walk(path): for dirname in dirnames: full_path = os.path.join(root, dirname) if os.path.islink(full_path): continue self._add_watch(full_path, mask) def _add_watch(self, path, mask): """ Adds a watch for the given path to monitor events specified by the mask. :param path: Path to monitor :param mask: Event bit mask. """ wd = inotify_add_watch(self._inotify_fd, path, mask) if wd == -1: Inotify._raise_error() self._wd_for_path[path] = wd self._path_for_wd[wd] = path return wd @staticmethod def _raise_error(): """ Raises errors for inotify failures. """ err = ctypes.get_errno() if err == errno.ENOSPC: raise OSError(errno.ENOSPC, "inotify watch limit reached") elif err == errno.EMFILE: raise OSError(errno.EMFILE, "inotify instance limit reached") elif err != errno.EACCES: raise OSError(err, os.strerror(err)) @staticmethod def _parse_event_buffer(event_buffer): """ Parses an event buffer of ``inotify_event`` structs returned by inotify:: struct inotify_event { __s32 wd; /* watch descriptor */ __u32 mask; /* watch mask */ __u32 cookie; /* cookie to synchronize two events */ __u32 len; /* length (including nulls) of name */ char name[0]; /* stub for possible name */ }; The ``cookie`` member of this struct is used to pair two related events, for example, it pairs an IN_MOVED_FROM event with an IN_MOVED_TO event. """ i = 0 while i + 16 <= len(event_buffer): wd, mask, cookie, length = struct.unpack_from("iIII", event_buffer, i) name = event_buffer[i + 16 : i + 16 + length].rstrip(b"\0") i += 16 + length yield wd, mask, cookie, name class InotifyEvent: """ Inotify event struct wrapper. :param wd: Watch descriptor :param mask: Event mask :param cookie: Event cookie :param name: Base name of the event source path. :param src_path: Full event source path. """ def __init__(self, wd, mask, cookie, name, src_path): self._wd = wd self._mask = mask self._cookie = cookie self._name = name self._src_path = src_path @property def src_path(self): return self._src_path @property def wd(self): return self._wd @property def mask(self): return self._mask @property def cookie(self): return self._cookie @property def name(self): return self._name @property def is_modify(self): return self._mask & InotifyConstants.IN_MODIFY > 0 @property def is_close_write(self): return self._mask & InotifyConstants.IN_CLOSE_WRITE > 0 @property def is_close_nowrite(self): return self._mask & InotifyConstants.IN_CLOSE_NOWRITE > 0 @property def is_open(self): return self._mask & InotifyConstants.IN_OPEN > 0 @property def is_access(self): return self._mask & InotifyConstants.IN_ACCESS > 0 @property def is_delete(self): return self._mask & InotifyConstants.IN_DELETE > 0 @property def is_delete_self(self): return self._mask & InotifyConstants.IN_DELETE_SELF > 0 @property def is_create(self): return self._mask & InotifyConstants.IN_CREATE > 0 @property def is_moved_from(self): return self._mask & InotifyConstants.IN_MOVED_FROM > 0 @property def is_moved_to(self): return self._mask & InotifyConstants.IN_MOVED_TO > 0 @property def is_move(self): return self._mask & InotifyConstants.IN_MOVE > 0 @property def is_move_self(self): return self._mask & InotifyConstants.IN_MOVE_SELF > 0 @property def is_attrib(self): return self._mask & InotifyConstants.IN_ATTRIB > 0 @property def is_ignored(self): return self._mask & InotifyConstants.IN_IGNORED > 0 @property def is_directory(self): # It looks like the kernel does not provide this information for # IN_DELETE_SELF and IN_MOVE_SELF. In this case, assume it's a dir. # See also: https://github.com/seb-m/pyinotify/blob/2c7e8f8/python2/pyinotify.py#L897 return ( self.is_delete_self or self.is_move_self or self._mask & InotifyConstants.IN_ISDIR > 0 ) @property def key(self): return self._src_path, self._wd, self._mask, self._cookie, self._name def __eq__(self, inotify_event): return self.key == inotify_event.key def __ne__(self, inotify_event): return self.key != inotify_event.key def __hash__(self): return hash(self.key) @staticmethod def _get_mask_string(mask): masks = [] for c in dir(InotifyConstants): if c.startswith("IN_") and c not in [ "IN_ALL_EVENTS", "IN_CLOSE", "IN_MOVE", ]: c_val = getattr(InotifyConstants, c) if mask & c_val: masks.append(c) return "|".join(masks) def __repr__(self): return ( f"<{type(self).__name__}: src_path={self.src_path!r}, wd={self.wd}," f" mask={self._get_mask_string(self.mask)}, cookie={self.cookie}," f" name={os.fsdecode(self.name)!r}>" ) watchdog-3.0.0/src/watchdog/observers/kqueue.py000066400000000000000000000566161440602103100215710ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations # The `select` module varies between platforms. # mypy may complain about missing module attributes # depending on which platform it's running on. # The comment below disables mypy's attribute check. # # mypy: disable-error-code=attr-defined # """ :module: watchdog.observers.kqueue :synopsis: ``kqueue(2)`` based emitter implementation. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) :platforms: macOS and BSD with kqueue(2). .. WARNING:: kqueue is a very heavyweight way to monitor file systems. Each kqueue-detected directory modification triggers a full directory scan. Traversing the entire directory tree and opening file descriptors for all files will create performance problems. We need to find a way to re-scan only those directories which report changes and do a diff between two sub-DirectorySnapshots perhaps. .. ADMONITION:: About OS X performance guidelines Quote from the `macOS File System Performance Guidelines`_: "When you only want to track changes on a file or directory, be sure to open it using the ``O_EVTONLY`` flag. This flag prevents the file or directory from being marked as open or in use. This is important if you are tracking files on a removable volume and the user tries to unmount the volume. With this flag in place, the system knows it can dismiss the volume. If you had opened the files or directories without this flag, the volume would be marked as busy and would not be unmounted." ``O_EVTONLY`` is defined as ``0x8000`` in the OS X header files. More information here: http://www.mlsite.net/blog/?p=2312 Classes ------- .. autoclass:: KqueueEmitter :members: :show-inheritance: Collections and Utility Classes ------------------------------- .. autoclass:: KeventDescriptor :members: :show-inheritance: .. autoclass:: KeventDescriptorSet :members: :show-inheritance: .. _macOS File System Performance Guidelines: http://developer.apple.com/library/ios/#documentation/Performance/Conceptual/FileSystem/Articles/TrackingChanges.html#//apple_ref/doc/uid/20001993-CJBJFIDD """ import errno import os import os.path import select import threading from stat import S_ISDIR from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MOVED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.utils import platform from watchdog.utils.dirsnapshot import DirectorySnapshot # Maximum number of events to process. MAX_EVENTS = 4096 # O_EVTONLY value from the header files for OS X only. O_EVTONLY = 0x8000 # Pre-calculated values for the kevent filter, flags, and fflags attributes. if platform.is_darwin(): WATCHDOG_OS_OPEN_FLAGS = O_EVTONLY else: WATCHDOG_OS_OPEN_FLAGS = os.O_RDONLY | os.O_NONBLOCK WATCHDOG_KQ_FILTER = select.KQ_FILTER_VNODE WATCHDOG_KQ_EV_FLAGS = select.KQ_EV_ADD | select.KQ_EV_ENABLE | select.KQ_EV_CLEAR WATCHDOG_KQ_FFLAGS = ( select.KQ_NOTE_DELETE | select.KQ_NOTE_WRITE | select.KQ_NOTE_EXTEND | select.KQ_NOTE_ATTRIB | select.KQ_NOTE_LINK | select.KQ_NOTE_RENAME | select.KQ_NOTE_REVOKE ) def absolute_path(path): return os.path.abspath(os.path.normpath(path)) # Flag tests. def is_deleted(kev): """Determines whether the given kevent represents deletion.""" return kev.fflags & select.KQ_NOTE_DELETE def is_modified(kev): """Determines whether the given kevent represents modification.""" fflags = kev.fflags return (fflags & select.KQ_NOTE_EXTEND) or (fflags & select.KQ_NOTE_WRITE) def is_attrib_modified(kev): """Determines whether the given kevent represents attribute modification.""" return kev.fflags & select.KQ_NOTE_ATTRIB def is_renamed(kev): """Determines whether the given kevent represents movement.""" return kev.fflags & select.KQ_NOTE_RENAME class KeventDescriptorSet: """ Thread-safe kevent descriptor collection. """ def __init__(self): # Set of KeventDescriptor self._descriptors = set() # Descriptor for a given path. self._descriptor_for_path = dict() # Descriptor for a given fd. self._descriptor_for_fd = dict() # List of kevent objects. self._kevents = list() self._lock = threading.Lock() @property def kevents(self): """ List of kevents monitored. """ with self._lock: return self._kevents @property def paths(self): """ List of paths for which kevents have been created. """ with self._lock: return list(self._descriptor_for_path.keys()) def get_for_fd(self, fd): """ Given a file descriptor, returns the kevent descriptor object for it. :param fd: OS file descriptor. :type fd: ``int`` :returns: A :class:`KeventDescriptor` object. """ with self._lock: return self._descriptor_for_fd[fd] def get(self, path): """ Obtains a :class:`KeventDescriptor` object for the specified path. :param path: Path for which the descriptor will be obtained. """ with self._lock: path = absolute_path(path) return self._get(path) def __contains__(self, path): """ Determines whether a :class:`KeventDescriptor has been registered for the specified path. :param path: Path for which the descriptor will be obtained. """ with self._lock: path = absolute_path(path) return self._has_path(path) def add(self, path, is_directory): """ Adds a :class:`KeventDescriptor` to the collection for the given path. :param path: The path for which a :class:`KeventDescriptor` object will be added. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ with self._lock: path = absolute_path(path) if not self._has_path(path): self._add_descriptor(KeventDescriptor(path, is_directory)) def remove(self, path): """ Removes the :class:`KeventDescriptor` object for the given path if it already exists. :param path: Path for which the :class:`KeventDescriptor` object will be removed. """ with self._lock: path = absolute_path(path) if self._has_path(path): self._remove_descriptor(self._get(path)) def clear(self): """ Clears the collection and closes all open descriptors. """ with self._lock: for descriptor in self._descriptors: descriptor.close() self._descriptors.clear() self._descriptor_for_fd.clear() self._descriptor_for_path.clear() self._kevents = [] # Thread-unsafe methods. Locking is provided at a higher level. def _get(self, path): """Returns a kevent descriptor for a given path.""" return self._descriptor_for_path[path] def _has_path(self, path): """Determines whether a :class:`KeventDescriptor` for the specified path exists already in the collection.""" return path in self._descriptor_for_path def _add_descriptor(self, descriptor): """ Adds a descriptor to the collection. :param descriptor: An instance of :class:`KeventDescriptor` to be added. """ self._descriptors.add(descriptor) self._kevents.append(descriptor.kevent) self._descriptor_for_path[descriptor.path] = descriptor self._descriptor_for_fd[descriptor.fd] = descriptor def _remove_descriptor(self, descriptor): """ Removes a descriptor from the collection. :param descriptor: An instance of :class:`KeventDescriptor` to be removed. """ self._descriptors.remove(descriptor) del self._descriptor_for_fd[descriptor.fd] del self._descriptor_for_path[descriptor.path] self._kevents.remove(descriptor.kevent) descriptor.close() class KeventDescriptor: """ A kevent descriptor convenience data structure to keep together: * kevent * directory status * path * file descriptor :param path: Path string for which a kevent descriptor will be created. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ def __init__(self, path, is_directory): self._path = absolute_path(path) self._is_directory = is_directory self._fd = os.open(path, WATCHDOG_OS_OPEN_FLAGS) self._kev = select.kevent( self._fd, filter=WATCHDOG_KQ_FILTER, flags=WATCHDOG_KQ_EV_FLAGS, fflags=WATCHDOG_KQ_FFLAGS, ) @property def fd(self): """OS file descriptor for the kevent descriptor.""" return self._fd @property def path(self): """The path associated with the kevent descriptor.""" return self._path @property def kevent(self): """The kevent object associated with the kevent descriptor.""" return self._kev @property def is_directory(self): """Determines whether the kevent descriptor refers to a directory. :returns: ``True`` or ``False`` """ return self._is_directory def close(self): """ Closes the file descriptor associated with a kevent descriptor. """ try: os.close(self.fd) except OSError: pass @property def key(self): return (self.path, self.is_directory) def __eq__(self, descriptor): return self.key == descriptor.key def __ne__(self, descriptor): return self.key != descriptor.key def __hash__(self): return hash(self.key) def __repr__(self): return f"<{type(self).__name__}: path={self.path!r}, is_directory={self.is_directory}>" class KqueueEmitter(EventEmitter): """ kqueue(2)-based event emitter. .. ADMONITION:: About ``kqueue(2)`` behavior and this implementation ``kqueue(2)`` monitors file system events only for open descriptors, which means, this emitter does a lot of book-keeping behind the scenes to keep track of open descriptors for every entry in the monitored directory tree. This also means the number of maximum open file descriptors on your system must be increased **manually**. Usually, issuing a call to ``ulimit`` should suffice:: ulimit -n 1024 Ensure that you pick a number that is larger than the number of files you expect to be monitored. ``kqueue(2)`` does not provide enough information about the following things: * The destination path of a file or directory that is renamed. * Creation of a file or directory within a directory; in this case, ``kqueue(2)`` only indicates a modified event on the parent directory. Therefore, this emitter takes a snapshot of the directory tree when ``kqueue(2)`` detects a change on the file system to be able to determine the above information. :param event_queue: The event queue to fill with events. :param watch: A watch object representing the directory to monitor. :type watch: :class:`watchdog.observers.api.ObservedWatch` :param timeout: Read events blocking timeout (in seconds). :type timeout: ``float`` :param stat: stat function. See ``os.stat`` for details. """ def __init__( self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT, stat=os.stat ): super().__init__(event_queue, watch, timeout) self._kq = select.kqueue() self._lock = threading.RLock() # A collection of KeventDescriptor. self._descriptors = KeventDescriptorSet() def custom_stat(path, self=self): stat_info = stat(path) self._register_kevent(path, S_ISDIR(stat_info.st_mode)) return stat_info self._snapshot = DirectorySnapshot( watch.path, recursive=watch.is_recursive, stat=custom_stat ) def _register_kevent(self, path, is_directory): """ Registers a kevent descriptor for the given path. :param path: Path for which a kevent descriptor will be created. :param is_directory: ``True`` if the path refers to a directory; ``False`` otherwise. :type is_directory: ``bool`` """ try: self._descriptors.add(path, is_directory) except OSError as e: if e.errno == errno.ENOENT: # Probably dealing with a temporary file that was created # and then quickly deleted before we could open # a descriptor for it. Therefore, simply queue a sequence # of created and deleted events for the path. # path = absolute_path(path) # if is_directory: # self.queue_event(DirCreatedEvent(path)) # self.queue_event(DirDeletedEvent(path)) # else: # self.queue_event(FileCreatedEvent(path)) # self.queue_event(FileDeletedEvent(path)) # TODO: We could simply ignore these files. # Locked files cause the python process to die with # a bus error when we handle temporary files. # eg. .git/index.lock when running tig operations. # I don't fully understand this at the moment. pass elif e.errno == errno.EOPNOTSUPP: # Probably dealing with the socket or special file # mounted through a file system that does not support # access to it (e.g. NFS). On BSD systems look at # EOPNOTSUPP in man 2 open. pass else: # All other errors are propagated. raise def _unregister_kevent(self, path): """ Convenience function to close the kevent descriptor for a specified kqueue-monitored path. :param path: Path for which the kevent descriptor will be closed. """ self._descriptors.remove(path) def queue_event(self, event): """ Handles queueing a single event object. :param event: An instance of :class:`watchdog.events.FileSystemEvent` or a subclass. """ # Handles all the book keeping for queued events. # We do not need to fire moved/deleted events for all subitems in # a directory tree here, because this function is called by kqueue # for all those events anyway. EventEmitter.queue_event(self, event) if event.event_type == EVENT_TYPE_CREATED: self._register_kevent(event.src_path, event.is_directory) elif event.event_type == EVENT_TYPE_MOVED: self._unregister_kevent(event.src_path) self._register_kevent(event.dest_path, event.is_directory) elif event.event_type == EVENT_TYPE_DELETED: self._unregister_kevent(event.src_path) def _gen_kqueue_events(self, kev, ref_snapshot, new_snapshot): """ Generate events from the kevent list returned from the call to :meth:`select.kqueue.control`. .. NOTE:: kqueue only tells us about deletions, file modifications, attribute modifications. The other events, namely, file creation, directory modification, file rename, directory rename, directory creation, etc. are determined by comparing directory snapshots. """ descriptor = self._descriptors.get_for_fd(kev.ident) src_path = descriptor.path if is_renamed(kev): # Kqueue does not specify the destination names for renames # to, so we have to process these using the a snapshot # of the directory. for event in self._gen_renamed_events( src_path, descriptor.is_directory, ref_snapshot, new_snapshot ): yield event elif is_attrib_modified(kev): if descriptor.is_directory: yield DirModifiedEvent(src_path) else: yield FileModifiedEvent(src_path) elif is_modified(kev): if descriptor.is_directory: if self.watch.is_recursive or self.watch.path == src_path: # When a directory is modified, it may be due to # sub-file/directory renames or new file/directory # creation. We determine all this by comparing # snapshots later. yield DirModifiedEvent(src_path) else: yield FileModifiedEvent(src_path) elif is_deleted(kev): if descriptor.is_directory: yield DirDeletedEvent(src_path) else: yield FileDeletedEvent(src_path) def _parent_dir_modified(self, src_path): """ Helper to generate a DirModifiedEvent on the parent of src_path. """ return DirModifiedEvent(os.path.dirname(src_path)) def _gen_renamed_events(self, src_path, is_directory, ref_snapshot, new_snapshot): """ Compares information from two directory snapshots (one taken before the rename operation and another taken right after) to determine the destination path of the file system object renamed, and yields the appropriate events to be queued. """ try: f_inode = ref_snapshot.inode(src_path) except KeyError: # Probably caught a temporary file/directory that was renamed # and deleted. Fires a sequence of created and deleted events # for the path. if is_directory: yield DirCreatedEvent(src_path) yield DirDeletedEvent(src_path) else: yield FileCreatedEvent(src_path) yield FileDeletedEvent(src_path) # We don't process any further and bail out assuming # the event represents deletion/creation instead of movement. return dest_path = new_snapshot.path(f_inode) if dest_path is not None: dest_path = absolute_path(dest_path) if is_directory: event = DirMovedEvent(src_path, dest_path) yield event else: yield FileMovedEvent(src_path, dest_path) yield self._parent_dir_modified(src_path) yield self._parent_dir_modified(dest_path) if is_directory: # TODO: Do we need to fire moved events for the items # inside the directory tree? Does kqueue does this # all by itself? Check this and then enable this code # only if it doesn't already. # A: It doesn't. So I've enabled this block. if self.watch.is_recursive: for sub_event in generate_sub_moved_events(src_path, dest_path): yield sub_event else: # If the new snapshot does not have an inode for the # old path, we haven't found the new name. Therefore, # we mark it as deleted and remove unregister the path. if is_directory: yield DirDeletedEvent(src_path) else: yield FileDeletedEvent(src_path) yield self._parent_dir_modified(src_path) def _read_events(self, timeout=None): """ Reads events from a call to the blocking :meth:`select.kqueue.control()` method. :param timeout: Blocking timeout for reading events. :type timeout: ``float`` (seconds) """ return self._kq.control(self._descriptors.kevents, MAX_EVENTS, timeout) def queue_events(self, timeout): """ Queues events by reading them from a call to the blocking :meth:`select.kqueue.control()` method. :param timeout: Blocking timeout for reading events. :type timeout: ``float`` (seconds) """ with self._lock: try: event_list = self._read_events(timeout) # TODO: investigate why order appears to be reversed event_list.reverse() # Take a fresh snapshot of the directory and update the # saved snapshot. new_snapshot = DirectorySnapshot( self.watch.path, self.watch.is_recursive ) ref_snapshot = self._snapshot self._snapshot = new_snapshot diff_events = new_snapshot - ref_snapshot # Process events for directory_created in diff_events.dirs_created: self.queue_event(DirCreatedEvent(directory_created)) for file_created in diff_events.files_created: self.queue_event(FileCreatedEvent(file_created)) for file_modified in diff_events.files_modified: self.queue_event(FileModifiedEvent(file_modified)) for kev in event_list: for event in self._gen_kqueue_events( kev, ref_snapshot, new_snapshot ): self.queue_event(event) except OSError as e: if e.errno != errno.EBADF: raise def on_thread_stop(self): # Clean up. with self._lock: self._descriptors.clear() self._kq.close() class KqueueObserver(BaseObserver): """ Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): super().__init__(emitter_class=KqueueEmitter, timeout=timeout) watchdog-3.0.0/src/watchdog/observers/polling.py000066400000000000000000000113121440602103100217160ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.observers.polling :synopsis: Polling emitter implementation. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ------- .. autoclass:: PollingObserver :members: :show-inheritance: .. autoclass:: PollingObserverVFS :members: :show-inheritance: :special-members: """ from __future__ import annotations import os import threading from functools import partial from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff class PollingEmitter(EventEmitter): """ Platform-independent emitter that polls a directory to detect file system changes. """ def __init__( self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT, stat=os.stat, listdir=os.scandir, ): super().__init__(event_queue, watch, timeout) self._snapshot = None self._lock = threading.Lock() self._take_snapshot = lambda: DirectorySnapshot( self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir ) def on_thread_start(self): self._snapshot = self._take_snapshot() def queue_events(self, timeout): # We don't want to hit the disk continuously. # timeout behaves like an interval for polling emitters. if self.stopped_event.wait(timeout): return with self._lock: if not self.should_keep_running(): return # Get event diff between fresh snapshot and previous snapshot. # Update snapshot. try: new_snapshot = self._take_snapshot() except OSError: self.queue_event(DirDeletedEvent(self.watch.path)) self.stop() return events = DirectorySnapshotDiff(self._snapshot, new_snapshot) self._snapshot = new_snapshot # Files. for src_path in events.files_deleted: self.queue_event(FileDeletedEvent(src_path)) for src_path in events.files_modified: self.queue_event(FileModifiedEvent(src_path)) for src_path in events.files_created: self.queue_event(FileCreatedEvent(src_path)) for src_path, dest_path in events.files_moved: self.queue_event(FileMovedEvent(src_path, dest_path)) # Directories. for src_path in events.dirs_deleted: self.queue_event(DirDeletedEvent(src_path)) for src_path in events.dirs_modified: self.queue_event(DirModifiedEvent(src_path)) for src_path in events.dirs_created: self.queue_event(DirCreatedEvent(src_path)) for src_path, dest_path in events.dirs_moved: self.queue_event(DirMovedEvent(src_path, dest_path)) class PollingObserver(BaseObserver): """ Platform-independent observer that polls a directory to detect file system changes. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): super().__init__(emitter_class=PollingEmitter, timeout=timeout) class PollingObserverVFS(BaseObserver): """ File system independent observer that polls a directory to detect changes. """ def __init__(self, stat, listdir, polling_interval=1): """ :param stat: stat function. See ``os.stat`` for details. :param listdir: listdir function. See ``os.scandir`` for details. :type polling_interval: float :param polling_interval: interval in seconds between polling the file system. """ emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir) super().__init__(emitter_class=emitter_cls, timeout=polling_interval) watchdog-3.0.0/src/watchdog/observers/read_directory_changes.py000066400000000000000000000133471440602103100247530ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os.path import platform import sys import threading import time from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, generate_sub_created_events, generate_sub_moved_events, ) from watchdog.observers.api import DEFAULT_EMITTER_TIMEOUT, DEFAULT_OBSERVER_TIMEOUT, BaseObserver, EventEmitter assert sys.platform.startswith("win"), f"{__name__} requires Windows" from watchdog.observers.winapi import close_directory_handle, get_directory_handle, read_events # noqa: E402 # HACK: WATCHDOG_TRAVERSE_MOVED_DIR_DELAY = 1 # seconds class WindowsApiEmitter(EventEmitter): """ Windows API-based emitter that uses ReadDirectoryChangesW to detect file system changes for a watch. """ def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT): super().__init__(event_queue, watch, timeout) self._lock = threading.Lock() self._handle = None def on_thread_start(self): self._handle = get_directory_handle(self.watch.path) if platform.python_implementation() == "PyPy": def start(self): """PyPy needs some time before receiving events, see #792.""" super().start() time.sleep(0.01) def on_thread_stop(self): if self._handle: close_directory_handle(self._handle) def _read_events(self): return read_events(self._handle, self.watch.path, self.watch.is_recursive) def queue_events(self, timeout): winapi_events = self._read_events() with self._lock: last_renamed_src_path = "" for winapi_event in winapi_events: src_path = os.path.join(self.watch.path, winapi_event.src_path) if winapi_event.is_renamed_old: last_renamed_src_path = src_path elif winapi_event.is_renamed_new: dest_path = src_path src_path = last_renamed_src_path if os.path.isdir(dest_path): event = DirMovedEvent(src_path, dest_path) if self.watch.is_recursive: # HACK: We introduce a forced delay before # traversing the moved directory. This will read # only file movement that finishes within this # delay time. time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY) # The following block of code may not # obtain moved events for the entire tree if # the I/O is not completed within the above # delay time. So, it's not guaranteed to work. # TODO: Come up with a better solution, possibly # a way to wait for I/O to complete before # queuing events. for sub_moved_event in generate_sub_moved_events( src_path, dest_path ): self.queue_event(sub_moved_event) self.queue_event(event) else: self.queue_event(FileMovedEvent(src_path, dest_path)) elif winapi_event.is_modified: cls = ( DirModifiedEvent if os.path.isdir(src_path) else FileModifiedEvent ) self.queue_event(cls(src_path)) elif winapi_event.is_added: isdir = os.path.isdir(src_path) cls = DirCreatedEvent if isdir else FileCreatedEvent self.queue_event(cls(src_path)) if isdir and self.watch.is_recursive: # If a directory is moved from outside the watched folder to inside it # we only get a created directory event out of it, not any events for its children # so use the same hack as for file moves to get the child events time.sleep(WATCHDOG_TRAVERSE_MOVED_DIR_DELAY) sub_events = generate_sub_created_events(src_path) for sub_created_event in sub_events: self.queue_event(sub_created_event) elif winapi_event.is_removed: self.queue_event(FileDeletedEvent(src_path)) elif winapi_event.is_removed_self: self.queue_event(DirDeletedEvent(self.watch.path)) self.stop() class WindowsApiObserver(BaseObserver): """ Observer thread that schedules watching directories and dispatches calls to event handlers. """ def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT): super().__init__(emitter_class=WindowsApiEmitter, timeout=timeout) watchdog-3.0.0/src/watchdog/observers/winapi.py000066400000000000000000000325621440602103100215530ustar00rootroot00000000000000# winapi.py: Windows API-Python interface (removes dependency on pywin32) # # Copyright (C) 2007 Thomas Heller # Copyright (C) 2010 Will McGugan # Copyright (C) 2010 Ryan Kelly # Copyright (C) 2010 Yesudeep Mangalapilly # Copyright (C) 2014 Thomas Amland # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and / or other materials provided with the distribution. # * Neither the name of the organization nor the names of its contributors may # be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Portions of this code were taken from pyfilesystem, which uses the above # new BSD license. from __future__ import annotations import sys from functools import reduce assert sys.platform.startswith("win"), f"{__name__} requires Windows" import ctypes.wintypes # noqa: E402 LPVOID = ctypes.wintypes.LPVOID # Invalid handle value. INVALID_HANDLE_VALUE = ctypes.c_void_p(-1).value # File notification constants. FILE_NOTIFY_CHANGE_FILE_NAME = 0x01 FILE_NOTIFY_CHANGE_DIR_NAME = 0x02 FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x04 FILE_NOTIFY_CHANGE_SIZE = 0x08 FILE_NOTIFY_CHANGE_LAST_WRITE = 0x010 FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x020 FILE_NOTIFY_CHANGE_CREATION = 0x040 FILE_NOTIFY_CHANGE_SECURITY = 0x0100 FILE_FLAG_BACKUP_SEMANTICS = 0x02000000 FILE_FLAG_OVERLAPPED = 0x40000000 FILE_LIST_DIRECTORY = 1 FILE_SHARE_READ = 0x01 FILE_SHARE_WRITE = 0x02 FILE_SHARE_DELETE = 0x04 OPEN_EXISTING = 3 VOLUME_NAME_NT = 0x02 # File action constants. FILE_ACTION_CREATED = 1 FILE_ACTION_DELETED = 2 FILE_ACTION_MODIFIED = 3 FILE_ACTION_RENAMED_OLD_NAME = 4 FILE_ACTION_RENAMED_NEW_NAME = 5 FILE_ACTION_DELETED_SELF = 0xFFFE FILE_ACTION_OVERFLOW = 0xFFFF # Aliases FILE_ACTION_ADDED = FILE_ACTION_CREATED FILE_ACTION_REMOVED = FILE_ACTION_DELETED FILE_ACTION_REMOVED_SELF = FILE_ACTION_DELETED_SELF THREAD_TERMINATE = 0x0001 # IO waiting constants. WAIT_ABANDONED = 0x00000080 WAIT_IO_COMPLETION = 0x000000C0 WAIT_OBJECT_0 = 0x00000000 WAIT_TIMEOUT = 0x00000102 # Error codes ERROR_OPERATION_ABORTED = 995 class OVERLAPPED(ctypes.Structure): _fields_ = [ ("Internal", LPVOID), ("InternalHigh", LPVOID), ("Offset", ctypes.wintypes.DWORD), ("OffsetHigh", ctypes.wintypes.DWORD), ("Pointer", LPVOID), ("hEvent", ctypes.wintypes.HANDLE), ] def _errcheck_bool(value, func, args): if not value: raise ctypes.WinError() return args def _errcheck_handle(value, func, args): if not value: raise ctypes.WinError() if value == INVALID_HANDLE_VALUE: raise ctypes.WinError() return args def _errcheck_dword(value, func, args): if value == 0xFFFFFFFF: raise ctypes.WinError() return args kernel32 = ctypes.WinDLL("kernel32") ReadDirectoryChangesW = kernel32.ReadDirectoryChangesW ReadDirectoryChangesW.restype = ctypes.wintypes.BOOL ReadDirectoryChangesW.errcheck = _errcheck_bool ReadDirectoryChangesW.argtypes = ( ctypes.wintypes.HANDLE, # hDirectory LPVOID, # lpBuffer ctypes.wintypes.DWORD, # nBufferLength ctypes.wintypes.BOOL, # bWatchSubtree ctypes.wintypes.DWORD, # dwNotifyFilter ctypes.POINTER(ctypes.wintypes.DWORD), # lpBytesReturned ctypes.POINTER(OVERLAPPED), # lpOverlapped LPVOID, # FileIOCompletionRoutine # lpCompletionRoutine ) CreateFileW = kernel32.CreateFileW CreateFileW.restype = ctypes.wintypes.HANDLE CreateFileW.errcheck = _errcheck_handle CreateFileW.argtypes = ( ctypes.wintypes.LPCWSTR, # lpFileName ctypes.wintypes.DWORD, # dwDesiredAccess ctypes.wintypes.DWORD, # dwShareMode LPVOID, # lpSecurityAttributes ctypes.wintypes.DWORD, # dwCreationDisposition ctypes.wintypes.DWORD, # dwFlagsAndAttributes ctypes.wintypes.HANDLE, # hTemplateFile ) CloseHandle = kernel32.CloseHandle CloseHandle.restype = ctypes.wintypes.BOOL CloseHandle.argtypes = (ctypes.wintypes.HANDLE,) # hObject CancelIoEx = kernel32.CancelIoEx CancelIoEx.restype = ctypes.wintypes.BOOL CancelIoEx.errcheck = _errcheck_bool CancelIoEx.argtypes = ( ctypes.wintypes.HANDLE, # hObject ctypes.POINTER(OVERLAPPED), # lpOverlapped ) CreateEvent = kernel32.CreateEventW CreateEvent.restype = ctypes.wintypes.HANDLE CreateEvent.errcheck = _errcheck_handle CreateEvent.argtypes = ( LPVOID, # lpEventAttributes ctypes.wintypes.BOOL, # bManualReset ctypes.wintypes.BOOL, # bInitialState ctypes.wintypes.LPCWSTR, # lpName ) SetEvent = kernel32.SetEvent SetEvent.restype = ctypes.wintypes.BOOL SetEvent.errcheck = _errcheck_bool SetEvent.argtypes = (ctypes.wintypes.HANDLE,) # hEvent WaitForSingleObjectEx = kernel32.WaitForSingleObjectEx WaitForSingleObjectEx.restype = ctypes.wintypes.DWORD WaitForSingleObjectEx.errcheck = _errcheck_dword WaitForSingleObjectEx.argtypes = ( ctypes.wintypes.HANDLE, # hObject ctypes.wintypes.DWORD, # dwMilliseconds ctypes.wintypes.BOOL, # bAlertable ) CreateIoCompletionPort = kernel32.CreateIoCompletionPort CreateIoCompletionPort.restype = ctypes.wintypes.HANDLE CreateIoCompletionPort.errcheck = _errcheck_handle CreateIoCompletionPort.argtypes = ( ctypes.wintypes.HANDLE, # FileHandle ctypes.wintypes.HANDLE, # ExistingCompletionPort LPVOID, # CompletionKey ctypes.wintypes.DWORD, # NumberOfConcurrentThreads ) GetQueuedCompletionStatus = kernel32.GetQueuedCompletionStatus GetQueuedCompletionStatus.restype = ctypes.wintypes.BOOL GetQueuedCompletionStatus.errcheck = _errcheck_bool GetQueuedCompletionStatus.argtypes = ( ctypes.wintypes.HANDLE, # CompletionPort LPVOID, # lpNumberOfBytesTransferred LPVOID, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped ctypes.wintypes.DWORD, # dwMilliseconds ) PostQueuedCompletionStatus = kernel32.PostQueuedCompletionStatus PostQueuedCompletionStatus.restype = ctypes.wintypes.BOOL PostQueuedCompletionStatus.errcheck = _errcheck_bool PostQueuedCompletionStatus.argtypes = ( ctypes.wintypes.HANDLE, # CompletionPort ctypes.wintypes.DWORD, # lpNumberOfBytesTransferred ctypes.wintypes.DWORD, # lpCompletionKey ctypes.POINTER(OVERLAPPED), # lpOverlapped ) GetFinalPathNameByHandleW = kernel32.GetFinalPathNameByHandleW GetFinalPathNameByHandleW.restype = ctypes.wintypes.DWORD GetFinalPathNameByHandleW.errcheck = _errcheck_dword GetFinalPathNameByHandleW.argtypes = ( ctypes.wintypes.HANDLE, # hFile ctypes.wintypes.LPWSTR, # lpszFilePath ctypes.wintypes.DWORD, # cchFilePath ctypes.wintypes.DWORD, # DWORD ) class FILE_NOTIFY_INFORMATION(ctypes.Structure): _fields_ = [ ("NextEntryOffset", ctypes.wintypes.DWORD), ("Action", ctypes.wintypes.DWORD), ("FileNameLength", ctypes.wintypes.DWORD), # ("FileName", (ctypes.wintypes.WCHAR * 1))] ("FileName", (ctypes.c_char * 1)), ] LPFNI = ctypes.POINTER(FILE_NOTIFY_INFORMATION) # We don't need to recalculate these flags every time a call is made to # the win32 API functions. WATCHDOG_FILE_FLAGS = FILE_FLAG_BACKUP_SEMANTICS WATCHDOG_FILE_SHARE_FLAGS = reduce( lambda x, y: x | y, [ FILE_SHARE_READ, FILE_SHARE_WRITE, FILE_SHARE_DELETE, ], ) WATCHDOG_FILE_NOTIFY_FLAGS = reduce( lambda x, y: x | y, [ FILE_NOTIFY_CHANGE_FILE_NAME, FILE_NOTIFY_CHANGE_DIR_NAME, FILE_NOTIFY_CHANGE_ATTRIBUTES, FILE_NOTIFY_CHANGE_SIZE, FILE_NOTIFY_CHANGE_LAST_WRITE, FILE_NOTIFY_CHANGE_SECURITY, FILE_NOTIFY_CHANGE_LAST_ACCESS, FILE_NOTIFY_CHANGE_CREATION, ], ) # ReadDirectoryChangesW buffer length. # To handle cases with lot of changes, this seems the highest safest value we can use. # Note: it will fail with ERROR_INVALID_PARAMETER when it is greater than 64 KB and # the application is monitoring a directory over the network. # This is due to a packet size limitation with the underlying file sharing protocols. # https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-readdirectorychangesw#remarks BUFFER_SIZE = 64000 # Buffer length for path-related stuff. # Introduced to keep the old behavior when we bumped BUFFER_SIZE from 2048 to 64000 in v1.0.0. PATH_BUFFER_SIZE = 2048 def _parse_event_buffer(readBuffer, nBytes): results = [] while nBytes > 0: fni = ctypes.cast(readBuffer, LPFNI)[0] ptr = ctypes.addressof(fni) + FILE_NOTIFY_INFORMATION.FileName.offset # filename = ctypes.wstring_at(ptr, fni.FileNameLength) filename = ctypes.string_at(ptr, fni.FileNameLength) results.append((fni.Action, filename.decode("utf-16"))) numToSkip = fni.NextEntryOffset if numToSkip <= 0: break readBuffer = readBuffer[numToSkip:] nBytes -= numToSkip # numToSkip is long. nBytes should be long too. return results def _is_observed_path_deleted(handle, path): # Comparison of observed path and actual path, returned by # GetFinalPathNameByHandleW. If directory moved to the trash bin, or # deleted, actual path will not be equal to observed path. buff = ctypes.create_unicode_buffer(PATH_BUFFER_SIZE) GetFinalPathNameByHandleW(handle, buff, PATH_BUFFER_SIZE, VOLUME_NAME_NT) return buff.value != path def _generate_observed_path_deleted_event(): # Create synthetic event for notify that observed directory is deleted path = ctypes.create_unicode_buffer(".") event = FILE_NOTIFY_INFORMATION( 0, FILE_ACTION_DELETED_SELF, len(path), path.value.encode("utf-8") ) event_size = ctypes.sizeof(event) buff = ctypes.create_string_buffer(PATH_BUFFER_SIZE) ctypes.memmove(buff, ctypes.addressof(event), event_size) return buff, event_size def get_directory_handle(path): """Returns a Windows handle to the specified directory path.""" return CreateFileW( path, FILE_LIST_DIRECTORY, WATCHDOG_FILE_SHARE_FLAGS, None, OPEN_EXISTING, WATCHDOG_FILE_FLAGS, None, ) def close_directory_handle(handle): try: CancelIoEx(handle, None) # force ReadDirectoryChangesW to return CloseHandle(handle) # close directory handle except OSError: try: CloseHandle(handle) # close directory handle except Exception: return def read_directory_changes(handle, path, recursive): """Read changes to the directory using the specified directory handle. http://timgolden.me.uk/pywin32-docs/win32file__ReadDirectoryChangesW_meth.html """ event_buffer = ctypes.create_string_buffer(BUFFER_SIZE) nbytes = ctypes.wintypes.DWORD() try: ReadDirectoryChangesW( handle, ctypes.byref(event_buffer), len(event_buffer), recursive, WATCHDOG_FILE_NOTIFY_FLAGS, ctypes.byref(nbytes), None, None, ) except OSError as e: if e.winerror == ERROR_OPERATION_ABORTED: return [], 0 # Handle the case when the root path is deleted if _is_observed_path_deleted(handle, path): return _generate_observed_path_deleted_event() raise e return event_buffer.raw, int(nbytes.value) class WinAPINativeEvent: def __init__(self, action, src_path): self.action = action self.src_path = src_path @property def is_added(self): return self.action == FILE_ACTION_CREATED @property def is_removed(self): return self.action == FILE_ACTION_REMOVED @property def is_modified(self): return self.action == FILE_ACTION_MODIFIED @property def is_renamed_old(self): return self.action == FILE_ACTION_RENAMED_OLD_NAME @property def is_renamed_new(self): return self.action == FILE_ACTION_RENAMED_NEW_NAME @property def is_removed_self(self): return self.action == FILE_ACTION_REMOVED_SELF def __repr__(self): return ( f"<{type(self).__name__}: action={self.action}, src_path={self.src_path!r}>" ) def read_events(handle, path, recursive): buf, nbytes = read_directory_changes(handle, path, recursive) events = _parse_event_buffer(buf, nbytes) return [WinAPINativeEvent(action, src_path) for action, src_path in events] watchdog-3.0.0/src/watchdog/py.typed000066400000000000000000000000001440602103100173550ustar00rootroot00000000000000watchdog-3.0.0/src/watchdog/tricks/000077500000000000000000000000001440602103100171675ustar00rootroot00000000000000watchdog-3.0.0/src/watchdog/tricks/__init__.py000066400000000000000000000231001440602103100212740ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.tricks :synopsis: Utility event handlers. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ------- .. autoclass:: Trick :members: :show-inheritance: .. autoclass:: LoggerTrick :members: :show-inheritance: .. autoclass:: ShellCommandTrick :members: :show-inheritance: .. autoclass:: AutoRestartTrick :members: :show-inheritance: """ from __future__ import annotations import functools import logging import os import signal import subprocess import sys import threading import time from watchdog.events import EVENT_TYPE_OPENED, PatternMatchingEventHandler from watchdog.utils import echo from watchdog.utils.event_debouncer import EventDebouncer from watchdog.utils.process_watcher import ProcessWatcher logger = logging.getLogger(__name__) echo_events = functools.partial(echo.echo, write=lambda msg: logger.info(msg)) class Trick(PatternMatchingEventHandler): """Your tricks should subclass this class.""" @classmethod def generate_yaml(cls): return f"""- {cls.__module__}.{cls.__name__}: args: - argument1 - argument2 kwargs: patterns: - "*.py" - "*.js" ignore_patterns: - "version.py" ignore_directories: false """ class LoggerTrick(Trick): """A simple trick that does only logs events.""" def on_any_event(self, event): pass @echo_events def on_modified(self, event): pass @echo_events def on_deleted(self, event): pass @echo_events def on_created(self, event): pass @echo_events def on_moved(self, event): pass @echo_events def on_closed(self, event): pass @echo_events def on_opened(self, event): pass class ShellCommandTrick(Trick): """Executes shell commands in response to matched events.""" def __init__( self, shell_command=None, patterns=None, ignore_patterns=None, ignore_directories=False, wait_for_process=False, drop_during_process=False, ): super().__init__( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, ) self.shell_command = shell_command self.wait_for_process = wait_for_process self.drop_during_process = drop_during_process self.process = None self._process_watchers = set() def on_any_event(self, event): if event.event_type == EVENT_TYPE_OPENED: # FIXME: see issue #949, and find a way to better handle that scenario return from string import Template if self.drop_during_process and self.is_process_running(): return object_type = "directory" if event.is_directory else "file" context = { "watch_src_path": event.src_path, "watch_dest_path": "", "watch_event_type": event.event_type, "watch_object": object_type, } if self.shell_command is None: if hasattr(event, "dest_path"): context["dest_path"] = event.dest_path command = 'echo "${watch_event_type} ${watch_object} from ${watch_src_path} to ${watch_dest_path}"' else: command = 'echo "${watch_event_type} ${watch_object} ${watch_src_path}"' else: if hasattr(event, "dest_path"): context["watch_dest_path"] = event.dest_path command = self.shell_command command = Template(command).safe_substitute(**context) self.process = subprocess.Popen(command, shell=True) if self.wait_for_process: self.process.wait() else: process_watcher = ProcessWatcher(self.process, None) self._process_watchers.add(process_watcher) process_watcher.process_termination_callback = functools.partial( self._process_watchers.discard, process_watcher ) process_watcher.start() def is_process_running(self): return self._process_watchers or ( self.process is not None and self.process.poll() is None ) class AutoRestartTrick(Trick): """Starts a long-running subprocess and restarts it on matched events. The command parameter is a list of command arguments, such as `['bin/myserver', '-c', 'etc/myconfig.ini']`. Call `start()` after creating the Trick. Call `stop()` when stopping the process. """ def __init__( self, command, patterns=None, ignore_patterns=None, ignore_directories=False, stop_signal=signal.SIGINT, kill_after=10, debounce_interval_seconds=0, restart_on_command_exit=True, ): if kill_after < 0: raise ValueError("kill_after must be non-negative.") if debounce_interval_seconds < 0: raise ValueError("debounce_interval_seconds must be non-negative.") super().__init__( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, ) self.command = command self.stop_signal = stop_signal self.kill_after = kill_after self.debounce_interval_seconds = debounce_interval_seconds self.restart_on_command_exit = restart_on_command_exit self.process = None self.process_watcher = None self.event_debouncer = None self.restart_count = 0 self._is_process_stopping = False self._is_trick_stopping = False self._stopping_lock = threading.RLock() def start(self): if self.debounce_interval_seconds: self.event_debouncer = EventDebouncer( debounce_interval_seconds=self.debounce_interval_seconds, events_callback=lambda events: self._restart_process(), ) self.event_debouncer.start() self._start_process() def stop(self): # Ensure the body of the function is only run once. with self._stopping_lock: if self._is_trick_stopping: return self._is_trick_stopping = True process_watcher = self.process_watcher if self.event_debouncer is not None: self.event_debouncer.stop() self._stop_process() # Don't leak threads: Wait for background threads to stop. if self.event_debouncer is not None: self.event_debouncer.join() if process_watcher is not None: process_watcher.join() def _start_process(self): if self._is_trick_stopping: return # windows doesn't have setsid self.process = subprocess.Popen( self.command, preexec_fn=getattr(os, "setsid", None) ) if self.restart_on_command_exit: self.process_watcher = ProcessWatcher(self.process, self._restart_process) self.process_watcher.start() def _stop_process(self): # Ensure the body of the function is not run in parallel in different threads. with self._stopping_lock: if self._is_process_stopping: return self._is_process_stopping = True try: if self.process_watcher is not None: self.process_watcher.stop() self.process_watcher = None if self.process is not None: try: kill_process(self.process.pid, self.stop_signal) except OSError: # Process is already gone pass else: kill_time = time.time() + self.kill_after while time.time() < kill_time: if self.process.poll() is not None: break time.sleep(0.25) else: try: kill_process(self.process.pid, 9) except OSError: # Process is already gone pass self.process = None finally: self._is_process_stopping = False @echo_events def on_any_event(self, event): if event.event_type == EVENT_TYPE_OPENED: # FIXME: see issue #949, and find a way to better handle that scenario return if self.event_debouncer is not None: self.event_debouncer.handle_event(event) else: self._restart_process() def _restart_process(self): if self._is_trick_stopping: return self._stop_process() self._start_process() self.restart_count += 1 if not sys.platform.startswith("win"): def kill_process(pid, stop_signal): os.killpg(os.getpgid(pid), stop_signal) else: def kill_process(pid, stop_signal): os.kill(pid, stop_signal) watchdog-3.0.0/src/watchdog/utils/000077500000000000000000000000001440602103100170305ustar00rootroot00000000000000watchdog-3.0.0/src/watchdog/utils/__init__.py000066400000000000000000000106221440602103100211420ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.utils :synopsis: Utility classes and functions. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ------- .. autoclass:: BaseThread :members: :show-inheritance: :inherited-members: """ from __future__ import annotations import sys import threading from typing import TYPE_CHECKING class UnsupportedLibc(Exception): pass class WatchdogShutdown(Exception): """ Semantic exception used to signal an external shutdown event. """ pass class BaseThread(threading.Thread): """Convenience class for creating stoppable threads.""" def __init__(self): threading.Thread.__init__(self) if hasattr(self, "daemon"): self.daemon = True else: self.setDaemon(True) self._stopped_event = threading.Event() @property def stopped_event(self): return self._stopped_event def should_keep_running(self): """Determines whether the thread should continue running.""" return not self._stopped_event.is_set() def on_thread_stop(self): """Override this method instead of :meth:`stop()`. :meth:`stop()` calls this method. This method is called immediately after the thread is signaled to stop. """ pass def stop(self): """Signals the thread to stop.""" self._stopped_event.set() self.on_thread_stop() def on_thread_start(self): """Override this method instead of :meth:`start()`. :meth:`start()` calls this method. This method is called right before this thread is started and this object’s run() method is invoked. """ pass def start(self): self.on_thread_start() threading.Thread.start(self) def load_module(module_name): """Imports a module given its name and returns a handle to it.""" try: __import__(module_name) except ImportError: raise ImportError(f"No module named {module_name}") return sys.modules[module_name] def load_class(dotted_path): """Loads and returns a class definition provided a dotted path specification the last part of the dotted path is the class name and there is at least one module name preceding the class name. Notes: You will need to ensure that the module you are trying to load exists in the Python path. Examples: - module.name.ClassName # Provided module.name is in the Python path. - module.ClassName # Provided module is in the Python path. What won't work: - ClassName - modle.name.ClassName # Typo in module name. - module.name.ClasNam # Typo in classname. """ dotted_path_split = dotted_path.split(".") if len(dotted_path_split) <= 1: raise ValueError( f"Dotted module path {dotted_path} must contain a module name and a classname" ) klass_name = dotted_path_split[-1] module_name = ".".join(dotted_path_split[:-1]) module = load_module(module_name) if hasattr(module, klass_name): return getattr(module, klass_name) # Finally create and return an instance of the class # return klass(*args, **kwargs) else: raise AttributeError( f"Module {module_name} does not have class attribute {klass_name}" ) if TYPE_CHECKING or sys.version_info >= (3, 8): # using `as` to explicitly re-export this since this is a compatibility layer from typing import Protocol as Protocol else: # Provide a dummy Protocol class when not available from stdlib. Should be used # only for hinting. This could be had from typing_protocol, but not worth adding # the _first_ dependency just for this. class Protocol: ... watchdog-3.0.0/src/watchdog/utils/bricks.py000066400000000000000000000055051440602103100206640ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utility collections or "bricks". :module: watchdog.utils.bricks :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: lalinsky@gmail.com (Lukáš Lalinský) :author: python@rcn.com (Raymond Hettinger) :author: contact@tiger-222.fr (Mickaël Schoentgen) Classes ======= .. autoclass:: OrderedSetQueue :members: :show-inheritance: :inherited-members: .. autoclass:: OrderedSet """ from __future__ import annotations import queue class SkipRepeatsQueue(queue.Queue): """Thread-safe implementation of an special queue where a put of the last-item put'd will be dropped. The implementation leverages locking already implemented in the base class redefining only the primitives. Queued items must be immutable and hashable so that they can be used as dictionary keys. You must implement **only read-only properties** and the :meth:`Item.__hash__()`, :meth:`Item.__eq__()`, and :meth:`Item.__ne__()` methods for items to be hashable. An example implementation follows:: class Item: def __init__(self, a, b): self._a = a self._b = b @property def a(self): return self._a @property def b(self): return self._b def _key(self): return (self._a, self._b) def __eq__(self, item): return self._key() == item._key() def __ne__(self, item): return self._key() != item._key() def __hash__(self): return hash(self._key()) based on the OrderedSetQueue below """ def _init(self, maxsize): super()._init(maxsize) self._last_item = None def _put(self, item): if self._last_item is None or item != self._last_item: super()._put(item) self._last_item = item else: # `put` increments `unfinished_tasks` even if we did not put # anything into the queue here self.unfinished_tasks -= 1 def _get(self): item = super()._get() if item is self._last_item: self._last_item = None return item watchdog-3.0.0/src/watchdog/utils/delayed_queue.py000066400000000000000000000060011440602103100222120ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import threading import time from collections import deque from typing import Callable, Deque, Generic, Optional, Tuple, TypeVar T = TypeVar("T") class DelayedQueue(Generic[T]): def __init__(self, delay): self.delay_sec = delay self._lock = threading.Lock() self._not_empty = threading.Condition(self._lock) self._queue: Deque[Tuple[T, float, bool]] = deque() self._closed = False def put(self, element: T, delay: bool = False) -> None: """Add element to queue.""" self._lock.acquire() self._queue.append((element, time.time(), delay)) self._not_empty.notify() self._lock.release() def close(self): """Close queue, indicating no more items will be added.""" self._closed = True # Interrupt the blocking _not_empty.wait() call in get self._not_empty.acquire() self._not_empty.notify() self._not_empty.release() def get(self) -> Optional[T]: """Remove and return an element from the queue, or this queue has been closed raise the Closed exception. """ while True: # wait for element to be added to queue self._not_empty.acquire() while len(self._queue) == 0 and not self._closed: self._not_empty.wait() if self._closed: self._not_empty.release() return None head, insert_time, delay = self._queue[0] self._not_empty.release() # wait for delay if required if delay: time_left = insert_time + self.delay_sec - time.time() while time_left > 0: time.sleep(time_left) time_left = insert_time + self.delay_sec - time.time() # return element if it's still in the queue with self._lock: if len(self._queue) > 0 and self._queue[0][0] is head: self._queue.popleft() return head def remove(self, predicate: Callable[[T], bool]) -> Optional[T]: """Remove and return the first items for which predicate is True, ignoring delay.""" with self._lock: for i, (elem, t, delay) in enumerate(self._queue): if predicate(elem): del self._queue[i] return elem return None watchdog-3.0.0/src/watchdog/utils/dirsnapshot.py000066400000000000000000000270651440602103100217520ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.utils.dirsnapshot :synopsis: Directory snapshots and comparison. :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) .. ADMONITION:: Where are the moved events? They "disappeared" This implementation does not take partition boundaries into consideration. It will only work when the directory tree is entirely on the same file system. More specifically, any part of the code that depends on inode numbers can break if partition boundaries are crossed. In these cases, the snapshot diff will represent file/directory movement as created and deleted events. Classes ------- .. autoclass:: DirectorySnapshot :members: :show-inheritance: .. autoclass:: DirectorySnapshotDiff :members: :show-inheritance: .. autoclass:: EmptyDirectorySnapshot :members: :show-inheritance: """ from __future__ import annotations import errno import os from stat import S_ISDIR class DirectorySnapshotDiff: """ Compares two directory snapshots and creates an object that represents the difference between the two snapshots. :param ref: The reference directory snapshot. :type ref: :class:`DirectorySnapshot` :param snapshot: The directory snapshot which will be compared with the reference snapshot. :type snapshot: :class:`DirectorySnapshot` :param ignore_device: A boolean indicating whether to ignore the device id or not. By default, a file may be uniquely identified by a combination of its first inode and its device id. The problem is that the device id may (or may not) change between system boots. This problem would cause the DirectorySnapshotDiff to think a file has been deleted and created again but it would be the exact same file. Set to True only if you are sure you will always use the same device. :type ignore_device: :class:`bool` """ def __init__(self, ref, snapshot, ignore_device=False): created = snapshot.paths - ref.paths deleted = ref.paths - snapshot.paths if ignore_device: def get_inode(directory, full_path): return directory.inode(full_path)[0] else: def get_inode(directory, full_path): return directory.inode(full_path) # check that all unchanged paths have the same inode for path in ref.paths & snapshot.paths: if get_inode(ref, path) != get_inode(snapshot, path): created.add(path) deleted.add(path) # find moved paths moved = set() for path in set(deleted): inode = ref.inode(path) new_path = snapshot.path(inode) if new_path: # file is not deleted but moved deleted.remove(path) moved.add((path, new_path)) for path in set(created): inode = snapshot.inode(path) old_path = ref.path(inode) if old_path: created.remove(path) moved.add((old_path, path)) # find modified paths # first check paths that have not moved modified = set() for path in ref.paths & snapshot.paths: if get_inode(ref, path) == get_inode(snapshot, path): if ref.mtime(path) != snapshot.mtime(path) or ref.size( path ) != snapshot.size(path): modified.add(path) for old_path, new_path in moved: if ref.mtime(old_path) != snapshot.mtime(new_path) or ref.size( old_path ) != snapshot.size(new_path): modified.add(old_path) self._dirs_created = [path for path in created if snapshot.isdir(path)] self._dirs_deleted = [path for path in deleted if ref.isdir(path)] self._dirs_modified = [path for path in modified if ref.isdir(path)] self._dirs_moved = [(frm, to) for (frm, to) in moved if ref.isdir(frm)] self._files_created = list(created - set(self._dirs_created)) self._files_deleted = list(deleted - set(self._dirs_deleted)) self._files_modified = list(modified - set(self._dirs_modified)) self._files_moved = list(moved - set(self._dirs_moved)) def __str__(self): return self.__repr__() def __repr__(self): fmt = ( "<{0} files(created={1}, deleted={2}, modified={3}, moved={4})," " folders(created={5}, deleted={6}, modified={7}, moved={8})>" ) return fmt.format( type(self).__name__, len(self._files_created), len(self._files_deleted), len(self._files_modified), len(self._files_moved), len(self._dirs_created), len(self._dirs_deleted), len(self._dirs_modified), len(self._dirs_moved), ) @property def files_created(self): """List of files that were created.""" return self._files_created @property def files_deleted(self): """List of files that were deleted.""" return self._files_deleted @property def files_modified(self): """List of files that were modified.""" return self._files_modified @property def files_moved(self): """ List of files that were moved. Each event is a two-tuple the first item of which is the path that has been renamed to the second item in the tuple. """ return self._files_moved @property def dirs_modified(self): """ List of directories that were modified. """ return self._dirs_modified @property def dirs_moved(self): """ List of directories that were moved. Each event is a two-tuple the first item of which is the path that has been renamed to the second item in the tuple. """ return self._dirs_moved @property def dirs_deleted(self): """ List of directories that were deleted. """ return self._dirs_deleted @property def dirs_created(self): """ List of directories that were created. """ return self._dirs_created class DirectorySnapshot: """ A snapshot of stat information of files in a directory. :param path: The directory path for which a snapshot should be taken. :type path: ``str`` :param recursive: ``True`` if the entire directory tree should be included in the snapshot; ``False`` otherwise. :type recursive: ``bool`` :param stat: Use custom stat function that returns a stat structure for path. Currently only st_dev, st_ino, st_mode and st_mtime are needed. A function taking a ``path`` as argument which will be called for every entry in the directory tree. :param listdir: Use custom listdir function. For details see ``os.scandir``. """ def __init__(self, path, recursive=True, stat=os.stat, listdir=os.scandir): self.recursive = recursive self.stat = stat self.listdir = listdir self._stat_info = {} self._inode_to_path = {} st = self.stat(path) self._stat_info[path] = st self._inode_to_path[(st.st_ino, st.st_dev)] = path for p, st in self.walk(path): i = (st.st_ino, st.st_dev) self._inode_to_path[i] = p self._stat_info[p] = st def walk(self, root): try: paths = [os.path.join(root, entry.name) for entry in self.listdir(root)] except OSError as e: # Directory may have been deleted between finding it in the directory # list of its parent and trying to delete its contents. If this # happens we treat it as empty. Likewise if the directory was replaced # with a file of the same name (less likely, but possible). if e.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL): return else: raise entries = [] for p in paths: try: entry = (p, self.stat(p)) entries.append(entry) yield entry except OSError: continue if self.recursive: for path, st in entries: try: if S_ISDIR(st.st_mode): for entry in self.walk(path): yield entry except PermissionError: pass @property def paths(self): """ Set of file/directory paths in the snapshot. """ return set(self._stat_info.keys()) def path(self, id): """ Returns path for id. None if id is unknown to this snapshot. """ return self._inode_to_path.get(id) def inode(self, path): """Returns an id for path.""" st = self._stat_info[path] return (st.st_ino, st.st_dev) def isdir(self, path): return S_ISDIR(self._stat_info[path].st_mode) def mtime(self, path): return self._stat_info[path].st_mtime def size(self, path): return self._stat_info[path].st_size def stat_info(self, path): """ Returns a stat information object for the specified path from the snapshot. Attached information is subject to change. Do not use unless you specify `stat` in constructor. Use :func:`inode`, :func:`mtime`, :func:`isdir` instead. :param path: The path for which stat information should be obtained from a snapshot. """ return self._stat_info[path] def __sub__(self, previous_dirsnap): """Allow subtracting a DirectorySnapshot object instance from another. :returns: A :class:`DirectorySnapshotDiff` object. """ return DirectorySnapshotDiff(previous_dirsnap, self) def __str__(self): return self.__repr__() def __repr__(self): return str(self._stat_info) class EmptyDirectorySnapshot: """Class to implement an empty snapshot. This is used together with DirectorySnapshot and DirectorySnapshotDiff in order to get all the files/folders in the directory as created. """ @staticmethod def path(_): """Mock up method to return the path of the received inode. As the snapshot is intended to be empty, it always returns None. :returns: None. """ return None @property def paths(self): """Mock up method to return a set of file/directory paths in the snapshot. As the snapshot is intended to be empty, it always returns an empty set. :returns: An empty set. """ return set() watchdog-3.0.0/src/watchdog/utils/echo.py000066400000000000000000000120651440602103100203240ustar00rootroot00000000000000# echo.py: Tracing function calls using Python decorators. # # Written by Thomas Guest # Please see http://wordaligned.org/articles/echo # # Place into the public domain. """ Echo calls made to functions and methods in a module. "Echoing" a function call means printing out the name of the function and the values of its arguments before making the call (which is more commonly referred to as "tracing", but Python already has a trace module). Example: to echo calls made to functions in "my_module" do: import echo import my_module echo.echo_module(my_module) Example: to echo calls made to functions in "my_module.my_class" do: echo.echo_class(my_module.my_class) Alternatively, echo.echo can be used to decorate functions. Calls to the decorated function will be echoed. Example: @echo.echo def my_function(args): pass """ from __future__ import annotations import inspect import sys def name(item): """Return an item's name.""" return item.__name__ def is_classmethod(instancemethod, klass): """Determine if an instancemethod is a classmethod.""" return inspect.ismethod(instancemethod) and instancemethod.__self__ is klass def is_static_method(method, klass): """Returns True if method is an instance method of klass.""" return next( ( isinstance(c.__dict__[name(method)], staticmethod) for c in klass.mro() if name(method) in c.__dict__ ), False, ) def is_class_private_name(name): """Determine if a name is a class private name.""" # Exclude system defined names such as __init__, __add__ etc return name.startswith("__") and not name.endswith("__") def method_name(method): """Return a method's name. This function returns the name the method is accessed by from outside the class (i.e. it prefixes "private" methods appropriately). """ mname = name(method) if is_class_private_name(mname): mname = f"_{name(method.__self__.__class__)}{mname}" return mname def format_arg_value(arg_val): """Return a string representing a (name, value) pair. >>> format_arg_value(('x', (1, 2, 3))) 'x=(1, 2, 3)' """ arg, val = arg_val return f"{arg}={val!r}" def echo(fn, write=sys.stdout.write): """Echo calls to a function. Returns a decorated version of the input function which "echoes" calls made to it by writing out the function's name and the arguments it was called with. """ import functools # Unpack function's arg count, arg names, arg defaults code = fn.__code__ argcount = code.co_argcount argnames = code.co_varnames[:argcount] fn_defaults = fn.__defaults__ or [] argdefs = dict(list(zip(argnames[-len(fn_defaults) :], fn_defaults))) @functools.wraps(fn) def wrapped(*v, **k): # Collect function arguments by chaining together positional, # defaulted, extra positional and keyword arguments. positional = list(map(format_arg_value, list(zip(argnames, v)))) defaulted = [ format_arg_value((a, argdefs[a])) for a in argnames[len(v) :] if a not in k ] nameless = list(map(repr, v[argcount:])) keyword = list(map(format_arg_value, list(k.items()))) args = positional + defaulted + nameless + keyword write(f"{name(fn)}({', '.join(args)})\n") return fn(*v, **k) return wrapped def echo_instancemethod(klass, method, write=sys.stdout.write): """Change an instancemethod so that calls to it are echoed. Replacing a classmethod is a little more tricky. See: http://www.python.org/doc/current/ref/types.html """ mname = method_name(method) never_echo = ( "__str__", "__repr__", ) # Avoid recursion printing method calls if mname in never_echo: pass elif is_classmethod(method, klass): setattr(klass, mname, classmethod(echo(method.__func__, write))) else: setattr(klass, mname, echo(method, write)) def echo_class(klass, write=sys.stdout.write): """Echo calls to class methods and static functions""" for _, method in inspect.getmembers(klass, inspect.ismethod): # In python 3 only class methods are returned here echo_instancemethod(klass, method, write) for _, fn in inspect.getmembers(klass, inspect.isfunction): if is_static_method(fn, klass): setattr(klass, name(fn), staticmethod(echo(fn, write))) else: # It's not a class or a static method, so it must be an instance method. echo_instancemethod(klass, fn, write) def echo_module(mod, write=sys.stdout.write): """Echo calls to functions and methods in a module.""" for fname, fn in inspect.getmembers(mod, inspect.isfunction): setattr(mod, fname, echo(fn, write)) for _, klass in inspect.getmembers(mod, inspect.isclass): echo_class(klass, write) if __name__ == "__main__": import doctest optionflags = doctest.ELLIPSIS doctest.testfile("echoexample.txt", optionflags=optionflags) doctest.testmod(optionflags=optionflags) watchdog-3.0.0/src/watchdog/utils/event_debouncer.py000066400000000000000000000034051440602103100225530ustar00rootroot00000000000000from __future__ import annotations import logging import threading from watchdog.utils import BaseThread logger = logging.getLogger(__name__) class EventDebouncer(BaseThread): """Background thread for debouncing event handling. When an event is received, wait until the configured debounce interval passes before calling the callback. If additional events are received before the interval passes, reset the timer and keep waiting. When the debouncing interval passes, the callback will be called with a list of events in the order in which they were received. """ def __init__(self, debounce_interval_seconds, events_callback): super().__init__() self.debounce_interval_seconds = debounce_interval_seconds self.events_callback = events_callback self._events = [] self._cond = threading.Condition() def handle_event(self, event): with self._cond: self._events.append(event) self._cond.notify() def stop(self): with self._cond: super().stop() self._cond.notify() def run(self): with self._cond: while True: # Wait for first event (or shutdown). self._cond.wait() if self.debounce_interval_seconds: # Wait for additional events (or shutdown) until the debounce interval passes. while self.should_keep_running(): if not self._cond.wait(timeout=self.debounce_interval_seconds): break if not self.should_keep_running(): break events = self._events self._events = [] self.events_callback(events) watchdog-3.0.0/src/watchdog/utils/patterns.py000066400000000000000000000075441440602103100212540ustar00rootroot00000000000000# patterns.py: Common wildcard searching/filtering functionality for files. # # Copyright (C) 2010 Yesudeep Mangalapilly # # Written by Boris Staletic from __future__ import annotations # Non-pure path objects are only allowed on their respective OS's. # Thus, these utilities require "pure" path objects that don't access the filesystem. # Since pathlib doesn't have a `case_sensitive` parameter, we have to approximate it # by converting input paths to `PureWindowsPath` and `PurePosixPath` where: # - `PureWindowsPath` is always case-insensitive. # - `PurePosixPath` is always case-sensitive. # Reference: https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.match from pathlib import PurePosixPath, PureWindowsPath def _match_path(path, included_patterns, excluded_patterns, case_sensitive): """Internal function same as :func:`match_path` but does not check arguments.""" if case_sensitive: path = PurePosixPath(path) else: included_patterns = {pattern.lower() for pattern in included_patterns} excluded_patterns = {pattern.lower() for pattern in excluded_patterns} path = PureWindowsPath(path) common_patterns = included_patterns & excluded_patterns if common_patterns: raise ValueError( "conflicting patterns `{}` included and excluded".format(common_patterns) ) return any(path.match(p) for p in included_patterns) and not any( path.match(p) for p in excluded_patterns ) def filter_paths( paths, included_patterns=None, excluded_patterns=None, case_sensitive=True ): """ Filters from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: A list of pathnames that matched the allowable patterns and passed through the ignored patterns. """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for path in paths: if _match_path(path, set(included), set(excluded), case_sensitive): yield path def match_any_paths( paths, included_patterns=None, excluded_patterns=None, case_sensitive=True ): """ Matches from a set of paths based on acceptable patterns and ignorable patterns. :param pathnames: A list of path names that will be filtered based on matching and ignored patterns. :param included_patterns: Allow filenames matching wildcard patterns specified in this list. If no pattern list is specified, ["*"] is used as the default pattern, which matches all files. :param excluded_patterns: Ignores filenames matching wildcard patterns specified in this list. If no pattern list is specified, no files are ignored. :param case_sensitive: ``True`` if matching should be case-sensitive; ``False`` otherwise. :returns: ``True`` if any of the paths matches; ``False`` otherwise. """ included = ["*"] if included_patterns is None else included_patterns excluded = [] if excluded_patterns is None else excluded_patterns for path in paths: if _match_path(path, set(included), set(excluded), case_sensitive): return True return False watchdog-3.0.0/src/watchdog/utils/platform.py000066400000000000000000000027531440602103100212350ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import sys PLATFORM_WINDOWS = "windows" PLATFORM_LINUX = "linux" PLATFORM_BSD = "bsd" PLATFORM_DARWIN = "darwin" PLATFORM_UNKNOWN = "unknown" def get_platform_name(): if sys.platform.startswith("win"): return PLATFORM_WINDOWS elif sys.platform.startswith("darwin"): return PLATFORM_DARWIN elif sys.platform.startswith("linux"): return PLATFORM_LINUX elif sys.platform.startswith(("dragonfly", "freebsd", "netbsd", "openbsd", "bsd")): return PLATFORM_BSD else: return PLATFORM_UNKNOWN __platform__ = get_platform_name() def is_linux(): return __platform__ == PLATFORM_LINUX def is_bsd(): return __platform__ == PLATFORM_BSD def is_darwin(): return __platform__ == PLATFORM_DARWIN def is_windows(): return __platform__ == PLATFORM_WINDOWS watchdog-3.0.0/src/watchdog/utils/process_watcher.py000066400000000000000000000013131440602103100225730ustar00rootroot00000000000000from __future__ import annotations import logging from watchdog.utils import BaseThread logger = logging.getLogger(__name__) class ProcessWatcher(BaseThread): def __init__(self, popen_obj, process_termination_callback): super().__init__() self.popen_obj = popen_obj self.process_termination_callback = process_termination_callback def run(self): while True: if self.popen_obj.poll() is not None: break if self.stopped_event.wait(timeout=0.1): return try: self.process_termination_callback() except Exception: logger.exception("Error calling process termination callback") watchdog-3.0.0/src/watchdog/version.py000066400000000000000000000017521440602103100177340ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations # When updating this version number, please update the # ``docs/source/global.rst.inc`` file as well. VERSION_MAJOR = 3 VERSION_MINOR = 0 VERSION_BUILD = 0 VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_BUILD) VERSION_STRING = f"{VERSION_MAJOR}.{VERSION_MINOR}.{VERSION_BUILD}" __version__ = VERSION_INFO watchdog-3.0.0/src/watchdog/watchmedo.py000077500000000000000000000611721440602103100202270ustar00rootroot00000000000000# # Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: watchdog.watchmedo :author: yesudeep@google.com (Yesudeep Mangalapilly) :author: contact@tiger-222.fr (Mickaël Schoentgen) :synopsis: ``watchmedo`` shell script utility. """ from __future__ import annotations import errno import logging import os import os.path import sys import time from argparse import ArgumentParser, RawDescriptionHelpFormatter from io import StringIO from textwrap import dedent from typing import TYPE_CHECKING from watchdog.observers.api import BaseObserverSubclassCallable from watchdog.utils import WatchdogShutdown, load_class from watchdog.version import VERSION_STRING logging.basicConfig(level=logging.INFO) CONFIG_KEY_TRICKS = "tricks" CONFIG_KEY_PYTHON_PATH = "python-path" class HelpFormatter(RawDescriptionHelpFormatter): """A nicer help formatter. Help for arguments can be indented and contain new lines. It will be de-dented and arguments in the help will be separated by a blank line for better readability. Source: https://github.com/httpie/httpie/blob/2423f89/httpie/cli/argparser.py#L31 """ def __init__(self, *args, max_help_position=6, **kwargs): # A smaller indent for args help. kwargs["max_help_position"] = max_help_position super().__init__(*args, **kwargs) def _split_lines(self, text, width): text = dedent(text).strip() + "\n\n" return text.splitlines() epilog = """\ Copyright 2011 Yesudeep Mangalapilly . Copyright 2012 Google, Inc & contributors. Licensed under the terms of the Apache license, version 2.0. Please see LICENSE in the source code for more information.""" cli = ArgumentParser(epilog=epilog, formatter_class=HelpFormatter) cli.add_argument("--version", action="version", version=VERSION_STRING) subparsers = cli.add_subparsers(dest="top_command") command_parsers = {} def argument(*name_or_flags, **kwargs): """Convenience function to properly format arguments to pass to the command decorator. """ return list(name_or_flags), kwargs def command(args=[], parent=subparsers, cmd_aliases=[]): """Decorator to define a new command in a sanity-preserving way. The function will be stored in the ``func`` variable when the parser parses arguments so that it can be called directly like so:: >>> args = cli.parse_args() >>> args.func(args) """ def decorator(func): name = func.__name__.replace("_", "-") desc = dedent(func.__doc__) parser = parent.add_parser( name, description=desc, aliases=cmd_aliases, formatter_class=HelpFormatter ) command_parsers[name] = parser verbosity_group = parser.add_mutually_exclusive_group() verbosity_group.add_argument( "-q", "--quiet", dest="verbosity", action="append_const", const=-1 ) verbosity_group.add_argument( "-v", "--verbose", dest="verbosity", action="append_const", const=1 ) for arg in args: parser.add_argument(*arg[0], **arg[1]) parser.set_defaults(func=func) return func return decorator def path_split(pathname_spec, separator=os.pathsep): """ Splits a pathname specification separated by an OS-dependent separator. :param pathname_spec: The pathname specification. :param separator: (OS Dependent) `:` on Unix and `;` on Windows or user-specified. """ return list(pathname_spec.split(separator)) def add_to_sys_path(pathnames, index=0): """ Adds specified paths at specified index into the sys.path list. :param paths: A list of paths to add to the sys.path :param index: (Default 0) The index in the sys.path list where the paths will be added. """ for pathname in pathnames[::-1]: sys.path.insert(index, pathname) def load_config(tricks_file_pathname): """ Loads the YAML configuration from the specified file. :param tricks_file_path: The path to the tricks configuration file. :returns: A dictionary of configuration information. """ import yaml with open(tricks_file_pathname, "rb") as f: return yaml.safe_load(f.read()) def parse_patterns(patterns_spec, ignore_patterns_spec, separator=";"): """ Parses pattern argument specs and returns a two-tuple of (patterns, ignore_patterns). """ patterns = patterns_spec.split(separator) ignore_patterns = ignore_patterns_spec.split(separator) if ignore_patterns == [""]: ignore_patterns = [] return (patterns, ignore_patterns) def observe_with(observer, event_handler, pathnames, recursive): """ Single observer thread with a scheduled path and event handler. :param observer: The observer thread. :param event_handler: Event handler which will be called in response to file system events. :param pathnames: A list of pathnames to monitor. :param recursive: ``True`` if recursive; ``False`` otherwise. """ for pathname in set(pathnames): observer.schedule(event_handler, pathname, recursive) observer.start() try: while True: time.sleep(1) except WatchdogShutdown: observer.stop() observer.join() def schedule_tricks(observer, tricks, pathname, recursive): """ Schedules tricks with the specified observer and for the given watch path. :param observer: The observer thread into which to schedule the trick and watch. :param tricks: A list of tricks. :param pathname: A path name which should be watched. :param recursive: ``True`` if recursive; ``False`` otherwise. """ for trick in tricks: for name, value in list(trick.items()): TrickClass = load_class(name) handler = TrickClass(**value) trick_pathname = getattr(handler, "source_directory", None) or pathname observer.schedule(handler, trick_pathname, recursive) @command( [ argument("files", nargs="*", help="perform tricks from given file"), argument( "--python-path", default=".", help=f"Paths separated by {os.pathsep!r} to add to the Python path.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout (in seconds).", ), argument( "--recursive", action="store_true", default=True, help="Recursively monitor paths (defaults to True).", ), argument( "--debug-force-polling", action="store_true", help="[debug] Forces polling." ), argument( "--debug-force-kqueue", action="store_true", help="[debug] Forces BSD kqueue(2).", ), argument( "--debug-force-winapi", action="store_true", help="[debug] Forces Windows API.", ), argument( "--debug-force-fsevents", action="store_true", help="[debug] Forces macOS FSEvents.", ), argument( "--debug-force-inotify", action="store_true", help="[debug] Forces Linux inotify(7).", ), ], cmd_aliases=["tricks"], ) def tricks_from(args): """ Command to execute tricks from a tricks configuration file. """ Observer: BaseObserverSubclassCallable if args.debug_force_polling: from watchdog.observers.polling import PollingObserver as Observer elif args.debug_force_kqueue: from watchdog.observers.kqueue import KqueueObserver as Observer elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and sys.platform.startswith("win")): from watchdog.observers.read_directory_changes import WindowsApiObserver as Observer elif args.debug_force_inotify: from watchdog.observers.inotify import InotifyObserver as Observer elif args.debug_force_fsevents: from watchdog.observers.fsevents import FSEventsObserver as Observer else: # Automatically picks the most appropriate observer for the platform # on which it is running. from watchdog.observers import Observer add_to_sys_path(path_split(args.python_path)) observers = [] for tricks_file in args.files: observer = Observer(timeout=args.timeout) if not os.path.exists(tricks_file): raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), tricks_file) config = load_config(tricks_file) try: tricks = config[CONFIG_KEY_TRICKS] except KeyError: raise KeyError( f"No {CONFIG_KEY_TRICKS!r} key specified in {tricks_file!r}." ) if CONFIG_KEY_PYTHON_PATH in config: add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH]) dir_path = os.path.dirname(tricks_file) if not dir_path: dir_path = os.path.relpath(os.getcwd()) schedule_tricks(observer, tricks, dir_path, args.recursive) observer.start() observers.append(observer) try: while True: time.sleep(1) except WatchdogShutdown: for o in observers: o.unschedule_all() o.stop() for o in observers: o.join() @command( [ argument( "trick_paths", nargs="*", help="Dotted paths for all the tricks you want to generate.", ), argument( "--python-path", default=".", help=f"Paths separated by {os.pathsep!r} to add to the Python path.", ), argument( "--append-to-file", default=None, help=""" Appends the generated tricks YAML to a file. If not specified, prints to standard output.""", ), argument( "-a", "--append-only", dest="append_only", action="store_true", help=""" If --append-to-file is not specified, produces output for appending instead of a complete tricks YAML file.""", ), ], cmd_aliases=["generate-tricks-yaml"], ) def tricks_generate_yaml(args): """ Command to generate Yaml configuration for tricks named on the command line. """ import yaml python_paths = path_split(args.python_path) add_to_sys_path(python_paths) output = StringIO() for trick_path in args.trick_paths: TrickClass = load_class(trick_path) output.write(TrickClass.generate_yaml()) content = output.getvalue() output.close() header = yaml.dump({CONFIG_KEY_PYTHON_PATH: python_paths}) header += f"{CONFIG_KEY_TRICKS}:\n" if args.append_to_file is None: # Output to standard output. if not args.append_only: content = header + content sys.stdout.write(content) else: if not os.path.exists(args.append_to_file): content = header + content with open(args.append_to_file, "a", encoding="utf-8") as file: file.write(content) @command( [ argument( "directories", nargs="*", default=".", help="Directories to watch. (default: '.').", ), argument( "-p", "--pattern", "--patterns", dest="patterns", default="*", help="Matches event paths with these patterns (separated by ;).", ), argument( "-i", "--ignore-pattern", "--ignore-patterns", dest="ignore_patterns", default="", help="Ignores event paths with these patterns (separated by ;).", ), argument( "-D", "--ignore-directories", dest="ignore_directories", action="store_true", help="Ignores events for directories.", ), argument( "-R", "--recursive", dest="recursive", action="store_true", help="Monitors the directories recursively.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout.", ), argument( "--trace", action="store_true", help="Dumps complete dispatching trace." ), argument( "--debug-force-polling", action="store_true", help="[debug] Forces polling." ), argument( "--debug-force-kqueue", action="store_true", help="[debug] Forces BSD kqueue(2).", ), argument( "--debug-force-winapi", action="store_true", help="[debug] Forces Windows API.", ), argument( "--debug-force-fsevents", action="store_true", help="[debug] Forces macOS FSEvents.", ), argument( "--debug-force-inotify", action="store_true", help="[debug] Forces Linux inotify(7).", ), ] ) def log(args): """ Command to log file system events to the console. """ from watchdog.tricks import LoggerTrick from watchdog.utils import echo if args.trace: class_module_logger = logging.getLogger(LoggerTrick.__module__) echo.echo_class(LoggerTrick, write=lambda msg: class_module_logger.info(msg)) patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) handler = LoggerTrick( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=args.ignore_directories, ) Observer: BaseObserverSubclassCallable if args.debug_force_polling: from watchdog.observers.polling import PollingObserver as Observer elif args.debug_force_kqueue: from watchdog.observers.kqueue import KqueueObserver as Observer elif (not TYPE_CHECKING and args.debug_force_winapi) or (TYPE_CHECKING and sys.platform.startswith("win")): from watchdog.observers.read_directory_changes import WindowsApiObserver as Observer elif args.debug_force_inotify: from watchdog.observers.inotify import InotifyObserver as Observer elif args.debug_force_fsevents: from watchdog.observers.fsevents import FSEventsObserver as Observer else: # Automatically picks the most appropriate observer for the platform # on which it is running. from watchdog.observers import Observer observer = Observer(timeout=args.timeout) observe_with(observer, handler, args.directories, args.recursive) @command( [ argument("directories", nargs="*", default=".", help="Directories to watch."), argument( "-c", "--command", dest="command", default=None, help=""" Shell command executed in response to matching events. These interpolation variables are available to your command string: ${watch_src_path} - event source path ${watch_dest_path} - event destination path (for moved events) ${watch_event_type} - event type ${watch_object} - 'file' or 'directory' Note: Please ensure you do not use double quotes (") to quote your command string. That will force your shell to interpolate before the command is processed by this command. Example: --command='echo "${watch_src_path}"' """, ), argument( "-p", "--pattern", "--patterns", dest="patterns", default="*", help="Matches event paths with these patterns (separated by ;).", ), argument( "-i", "--ignore-pattern", "--ignore-patterns", dest="ignore_patterns", default="", help="Ignores event paths with these patterns (separated by ;).", ), argument( "-D", "--ignore-directories", dest="ignore_directories", default=False, action="store_true", help="Ignores events for directories.", ), argument( "-R", "--recursive", dest="recursive", action="store_true", help="Monitors the directories recursively.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout.", ), argument( "-w", "--wait", dest="wait_for_process", action="store_true", help="Wait for process to finish to avoid multiple simultaneous instances.", ), argument( "-W", "--drop", dest="drop_during_process", action="store_true", help="Ignore events that occur while command is still being" " executed to avoid multiple simultaneous instances.", ), argument( "--debug-force-polling", action="store_true", help="[debug] Forces polling." ), ] ) def shell_command(args): """ Command to execute shell commands in response to file system events. """ from watchdog.tricks import ShellCommandTrick if not args.command: args.command = None Observer: BaseObserverSubclassCallable if args.debug_force_polling: from watchdog.observers.polling import PollingObserver as Observer else: from watchdog.observers import Observer patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) handler = ShellCommandTrick( shell_command=args.command, patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=args.ignore_directories, wait_for_process=args.wait_for_process, drop_during_process=args.drop_during_process, ) observer = Observer(timeout=args.timeout) observe_with(observer, handler, args.directories, args.recursive) @command( [ argument("command", help="Long-running command to run in a subprocess."), argument( "command_args", metavar="arg", nargs="*", help=""" Command arguments. Note: Use -- before the command arguments, otherwise watchmedo will try to interpret them. """, ), argument( "-d", "--directory", dest="directories", metavar="DIRECTORY", action="append", help="Directory to watch. Use another -d or --directory option " "for each directory.", ), argument( "-p", "--pattern", "--patterns", dest="patterns", default="*", help="Matches event paths with these patterns (separated by ;).", ), argument( "-i", "--ignore-pattern", "--ignore-patterns", dest="ignore_patterns", default="", help="Ignores event paths with these patterns (separated by ;).", ), argument( "-D", "--ignore-directories", dest="ignore_directories", default=False, action="store_true", help="Ignores events for directories.", ), argument( "-R", "--recursive", dest="recursive", action="store_true", help="Monitors the directories recursively.", ), argument( "--interval", "--timeout", dest="timeout", default=1.0, type=float, help="Use this as the polling interval/blocking timeout.", ), argument( "--signal", dest="signal", default="SIGINT", help="Stop the subprocess with this signal (default SIGINT).", ), argument( "--debug-force-polling", action="store_true", help="[debug] Forces polling." ), argument( "--kill-after", dest="kill_after", default=10.0, type=float, help="When stopping, kill the subprocess after the specified timeout " "in seconds (default 10.0).", ), argument( "--debounce-interval", dest="debounce_interval", default=0.0, type=float, help="After a file change, Wait until the specified interval (in " "seconds) passes with no file changes, and only then restart.", ), argument( "--no-restart-on-command-exit", dest="restart_on_command_exit", default=True, action="store_false", help="Don't auto-restart the command after it exits.", ), ] ) def auto_restart(args): """ Command to start a long-running subprocess and restart it on matched events. """ Observer: BaseObserverSubclassCallable if args.debug_force_polling: from watchdog.observers.polling import PollingObserver as Observer else: from watchdog.observers import Observer import signal from watchdog.tricks import AutoRestartTrick if not args.directories: args.directories = ["."] # Allow either signal name or number. if args.signal.startswith("SIG"): stop_signal = getattr(signal, args.signal) else: stop_signal = int(args.signal) # Handle termination signals by raising a semantic exception which will # allow us to gracefully unwind and stop the observer termination_signals = {signal.SIGTERM, signal.SIGINT} if hasattr(signal, "SIGHUP"): termination_signals.add(signal.SIGHUP) def handler_termination_signal(_signum, _frame): # Neuter all signals so that we don't attempt a double shutdown for signum in termination_signals: signal.signal(signum, signal.SIG_IGN) raise WatchdogShutdown for signum in termination_signals: signal.signal(signum, handler_termination_signal) patterns, ignore_patterns = parse_patterns(args.patterns, args.ignore_patterns) command = [args.command] command.extend(args.command_args) handler = AutoRestartTrick( command=command, patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=args.ignore_directories, stop_signal=stop_signal, kill_after=args.kill_after, debounce_interval_seconds=args.debounce_interval, restart_on_command_exit=args.restart_on_command_exit, ) handler.start() observer = Observer(timeout=args.timeout) try: observe_with(observer, handler, args.directories, args.recursive) except WatchdogShutdown: pass finally: handler.stop() class LogLevelException(Exception): pass def _get_log_level_from_args(args): verbosity = sum(args.verbosity or []) if verbosity < -1: raise LogLevelException("-q/--quiet may be specified only once.") if verbosity > 2: raise LogLevelException("-v/--verbose may be specified up to 2 times.") return ["ERROR", "WARNING", "INFO", "DEBUG"][1 + verbosity] def main(): """Entry-point function.""" args = cli.parse_args() if args.top_command is None: cli.print_help() return 1 try: log_level = _get_log_level_from_args(args) except LogLevelException as exc: print(f"Error: {exc.args[0]}", file=sys.stderr) command_parsers[args.top_command].print_help() return 1 logging.getLogger("watchdog").setLevel(log_level) try: args.func(args) except KeyboardInterrupt: return 130 return 0 if __name__ == "__main__": sys.exit(main()) watchdog-3.0.0/src/watchdog_fsevents.c000066400000000000000000001012361440602103100177540ustar00rootroot00000000000000/** * watchdog_fsevents.c: Python-C bridge to the OS X FSEvents API. * * Copyright 2010 Malthe Borch * Copyright 2011 Yesudeep Mangalapilly * Copyright 2012 Google, Inc & contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include /* Compatibility; since fsevents won't set these on earlier macOS versions the properties will always be False */ #if MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_13 #error Watchdog module requires at least macOS 10.13 #endif /* Convenience macros to make code more readable. */ #define G_NOT(o) !o #define G_IS_NULL(o) o == NULL #define G_IS_NOT_NULL(o) o != NULL #define G_RETURN_NULL_IF_NULL(o) do { if (NULL == o) { return NULL; } } while (0) #define G_RETURN_NULL_IF(condition) do { if (condition) { return NULL; } } while (0) #define G_RETURN_NULL_IF_NOT(condition) do { if (!condition) { return NULL; } } while (0) #define G_RETURN_IF(condition) do { if (condition) { return; } } while (0) #define G_RETURN_IF_NOT(condition) do { if (!condition) { return; } } while (0) #define UNUSED(x) (void)x /* Error message definitions. */ #define ERROR_CANNOT_CALL_CALLBACK "Unable to call Python callback." /* Other information. */ #define MODULE_NAME "_watchdog_fsevents" /** * Event stream callback contextual information passed to * our ``watchdog_FSEventStreamCallback`` function by the * FSEvents API whenever an event occurs. */ typedef struct { /** * A pointer to the Python callback which will * will in turn be called by our event handler * with event information. The Python callback * function must accept 2 arguments, both of which * are Python lists:: * * def python_callback(event_paths, event_inodes, event_flags, event_ids): * pass */ PyObject *python_callback; /** * A pointer to the associated ``FSEventStream`` * instance. */ FSEventStreamRef stream_ref; /** * A pointer to the associated ``CFRunLoop`` * instance. */ CFRunLoopRef run_loop_ref; /** * A pointer to the state of the Python thread. */ PyThreadState *thread_state; } StreamCallbackInfo; /** * NativeEvent type so that we don't need to expose the FSEvents constants to Python land */ typedef struct { PyObject_HEAD const char *path; PyObject *inode; FSEventStreamEventFlags flags; FSEventStreamEventId id; } NativeEventObject; PyObject* NativeEventRepr(PyObject* instance) { NativeEventObject *self = (NativeEventObject*)instance; return PyUnicode_FromFormat( "NativeEvent(path=\"%s\", inode=%S, flags=%x, id=%llu)", self->path, self->inode, self->flags, self->id ); } PyObject* NativeEventTypeFlags(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; return PyLong_FromLong(self->flags); } PyObject* NativeEventTypePath(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; return PyUnicode_FromString(self->path); } PyObject* NativeEventTypeInode(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; Py_INCREF(self->inode); return self->inode; } PyObject* NativeEventTypeID(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; return PyLong_FromLong(self->id); } PyObject* NativeEventTypeIsCoalesced(PyObject* instance, void* closure) { UNUSED(closure); NativeEventObject *self = (NativeEventObject*)instance; // if any of these bitmasks match then we have a coalesced event and need to do sys calls to figure out what happened FSEventStreamEventFlags coalesced_masks[] = { kFSEventStreamEventFlagItemCreated | kFSEventStreamEventFlagItemRemoved, kFSEventStreamEventFlagItemCreated | kFSEventStreamEventFlagItemRenamed, kFSEventStreamEventFlagItemRemoved | kFSEventStreamEventFlagItemRenamed, }; for (size_t i = 0; i < sizeof(coalesced_masks) / sizeof(FSEventStreamEventFlags); ++i) { if ((self->flags & coalesced_masks[i]) == coalesced_masks[i]) { Py_RETURN_TRUE; } } Py_RETURN_FALSE; } #define FLAG_PROPERTY(suffix, flag) \ PyObject* NativeEventType##suffix(PyObject* instance, void* closure) \ { \ UNUSED(closure); \ NativeEventObject *self = (NativeEventObject*)instance; \ if (self->flags & flag) { \ Py_RETURN_TRUE; \ } \ Py_RETURN_FALSE; \ } FLAG_PROPERTY(IsMustScanSubDirs, kFSEventStreamEventFlagMustScanSubDirs) FLAG_PROPERTY(IsUserDropped, kFSEventStreamEventFlagUserDropped) FLAG_PROPERTY(IsKernelDropped, kFSEventStreamEventFlagKernelDropped) FLAG_PROPERTY(IsEventIdsWrapped, kFSEventStreamEventFlagEventIdsWrapped) FLAG_PROPERTY(IsHistoryDone, kFSEventStreamEventFlagHistoryDone) FLAG_PROPERTY(IsRootChanged, kFSEventStreamEventFlagRootChanged) FLAG_PROPERTY(IsMount, kFSEventStreamEventFlagMount) FLAG_PROPERTY(IsUnmount, kFSEventStreamEventFlagUnmount) FLAG_PROPERTY(IsCreated, kFSEventStreamEventFlagItemCreated) FLAG_PROPERTY(IsRemoved, kFSEventStreamEventFlagItemRemoved) FLAG_PROPERTY(IsInodeMetaMod, kFSEventStreamEventFlagItemInodeMetaMod) FLAG_PROPERTY(IsRenamed, kFSEventStreamEventFlagItemRenamed) FLAG_PROPERTY(IsModified, kFSEventStreamEventFlagItemModified) FLAG_PROPERTY(IsItemFinderInfoMod, kFSEventStreamEventFlagItemFinderInfoMod) FLAG_PROPERTY(IsChangeOwner, kFSEventStreamEventFlagItemChangeOwner) FLAG_PROPERTY(IsXattrMod, kFSEventStreamEventFlagItemXattrMod) FLAG_PROPERTY(IsFile, kFSEventStreamEventFlagItemIsFile) FLAG_PROPERTY(IsDirectory, kFSEventStreamEventFlagItemIsDir) FLAG_PROPERTY(IsSymlink, kFSEventStreamEventFlagItemIsSymlink) FLAG_PROPERTY(IsOwnEvent, kFSEventStreamEventFlagOwnEvent) FLAG_PROPERTY(IsHardlink, kFSEventStreamEventFlagItemIsHardlink) FLAG_PROPERTY(IsLastHardlink, kFSEventStreamEventFlagItemIsLastHardlink) FLAG_PROPERTY(IsCloned, kFSEventStreamEventFlagItemCloned) static int NativeEventInit(NativeEventObject *self, PyObject *args, PyObject *kwds) { static char *kwlist[] = {"path", "inode", "flags", "id", NULL}; self->inode = NULL; if (!PyArg_ParseTupleAndKeywords(args, kwds, "|sOIL", kwlist, &self->path, &self->inode, &self->flags, &self->id)) { return -1; } Py_INCREF(self->inode); return 0; } static void NativeEventDealloc(NativeEventObject *self) { Py_XDECREF(self->inode); } static PyGetSetDef NativeEventProperties[] = { {"flags", NativeEventTypeFlags, NULL, "The raw mask of flags as returned by FSEvents", NULL}, {"path", NativeEventTypePath, NULL, "The path for which this event was generated", NULL}, {"inode", NativeEventTypeInode, NULL, "The inode for which this event was generated", NULL}, {"event_id", NativeEventTypeID, NULL, "The id of the generated event", NULL}, {"is_coalesced", NativeEventTypeIsCoalesced, NULL, "True if multiple ambiguous changes to the monitored path happened", NULL}, {"must_scan_subdirs", NativeEventTypeIsMustScanSubDirs, NULL, "True if application must rescan all subdirectories", NULL}, {"is_user_dropped", NativeEventTypeIsUserDropped, NULL, "True if a failure during event buffering occurred", NULL}, {"is_kernel_dropped", NativeEventTypeIsKernelDropped, NULL, "True if a failure during event buffering occurred", NULL}, {"is_event_ids_wrapped", NativeEventTypeIsEventIdsWrapped, NULL, "True if event_id wrapped around", NULL}, {"is_history_done", NativeEventTypeIsHistoryDone, NULL, "True if all historical events are done", NULL}, {"is_root_changed", NativeEventTypeIsRootChanged, NULL, "True if a change to one of the directories along the path to one of the directories you watch occurred", NULL}, {"is_mount", NativeEventTypeIsMount, NULL, "True if a volume is mounted underneath one of the paths being monitored", NULL}, {"is_unmount", NativeEventTypeIsUnmount, NULL, "True if a volume is unmounted underneath one of the paths being monitored", NULL}, {"is_created", NativeEventTypeIsCreated, NULL, "True if self.path was created on the filesystem", NULL}, {"is_removed", NativeEventTypeIsRemoved, NULL, "True if self.path was removed from the filesystem", NULL}, {"is_inode_meta_mod", NativeEventTypeIsInodeMetaMod, NULL, "True if meta data for self.path was modified ", NULL}, {"is_renamed", NativeEventTypeIsRenamed, NULL, "True if self.path was renamed on the filesystem", NULL}, {"is_modified", NativeEventTypeIsModified, NULL, "True if self.path was modified", NULL}, {"is_item_finder_info_modified", NativeEventTypeIsItemFinderInfoMod, NULL, "True if FinderInfo for self.path was modified", NULL}, {"is_owner_change", NativeEventTypeIsChangeOwner, NULL, "True if self.path had its ownership changed", NULL}, {"is_xattr_mod", NativeEventTypeIsXattrMod, NULL, "True if extended attributes for self.path were modified ", NULL}, {"is_file", NativeEventTypeIsFile, NULL, "True if self.path is a file", NULL}, {"is_directory", NativeEventTypeIsDirectory, NULL, "True if self.path is a directory", NULL}, {"is_symlink", NativeEventTypeIsSymlink, NULL, "True if self.path is a symbolic link", NULL}, {"is_own_event", NativeEventTypeIsOwnEvent, NULL, "True if the event originated from our own process", NULL}, {"is_hardlink", NativeEventTypeIsHardlink, NULL, "True if self.path is a hard link", NULL}, {"is_last_hardlink", NativeEventTypeIsLastHardlink, NULL, "True if self.path was the last hard link", NULL}, {"is_cloned", NativeEventTypeIsCloned, NULL, "True if self.path is a clone or was cloned", NULL}, {NULL, NULL, NULL, NULL, NULL}, }; static PyTypeObject NativeEventType = { PyVarObject_HEAD_INIT(NULL, 0) .tp_name = "_watchdog_fsevents.NativeEvent", .tp_doc = "A wrapper around native FSEvents events", .tp_basicsize = sizeof(NativeEventObject), .tp_itemsize = 0, .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, .tp_new = PyType_GenericNew, .tp_getset = NativeEventProperties, .tp_init = (initproc) NativeEventInit, .tp_repr = (reprfunc) NativeEventRepr, .tp_dealloc = (destructor) NativeEventDealloc, }; /** * Dictionary to keep track of which run loop * belongs to which emitter thread. */ PyObject *thread_to_run_loop = NULL; /** * Dictionary to keep track of which stream * belongs to which watch. */ PyObject *watch_to_stream = NULL; /** * PyCapsule destructor. */ static void watchdog_pycapsule_destructor(PyObject *ptr) { void *p = PyCapsule_GetPointer(ptr, NULL); if (p) { PyMem_Free(p); } } /** * Converts a ``CFStringRef`` to a Python string object. * * :param cf_string: * A ``CFStringRef``. * :returns: * A Python unicode or utf-8 encoded bytestring object. */ PyObject * CFString_AsPyUnicode(CFStringRef cf_string_ref) { if (G_IS_NULL(cf_string_ref)) { return PyUnicode_FromString(""); } PyObject *py_string; const char *c_string_ptr = CFStringGetCStringPtr(cf_string_ref, kCFStringEncodingUTF8); if (G_IS_NULL(c_string_ptr)) { CFIndex length = CFStringGetLength(cf_string_ref); CFIndex max_size = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1; char *buffer = (char *)malloc(max_size); if (CFStringGetCString(cf_string_ref, buffer, max_size, kCFStringEncodingUTF8)) { py_string = PyUnicode_FromString(buffer); } else { py_string = PyUnicode_FromString(""); } free(buffer); } else { py_string = PyUnicode_FromString(c_string_ptr); } return py_string; } /** * Converts a ``CFNumberRef`` to a Python string object. * * :param cf_number: * A ``CFNumberRef``. * :returns: * A Python unicode or utf-8 encoded bytestring object. */ PyObject * CFNumberRef_AsPyLong(CFNumberRef cf_number) { long c_int; PyObject *py_long; CFNumberGetValue(cf_number, kCFNumberSInt64Type, &c_int); py_long = PyLong_FromLong(c_int); return py_long; } /** * This is the callback passed to the FSEvents API, which calls * the Python callback function, in turn, by passing in event data * as Python objects. * * :param stream_ref: * A pointer to an ``FSEventStream`` instance. * :param stream_callback_info_ref: * Callback context information passed by the FSEvents API. * This contains a reference to the Python callback that this * function calls in turn with information about the events. * :param num_events: * An unsigned integer representing the number of events * captured by the FSEvents API. * :param event_paths: * An array of NUL-terminated C strings representing event paths. * :param event_flags: * An array of ``FSEventStreamEventFlags`` unsigned integral * mask values. * :param event_ids: * An array of 64-bit unsigned integers representing event * identifiers. */ static void watchdog_FSEventStreamCallback(ConstFSEventStreamRef stream_ref, StreamCallbackInfo *stream_callback_info_ref, size_t num_events, CFArrayRef event_path_info_array_ref, const FSEventStreamEventFlags event_flags[], const FSEventStreamEventId event_ids[]) { UNUSED(stream_ref); size_t i = 0; CFDictionaryRef path_info_dict; CFStringRef cf_path; CFNumberRef cf_inode; PyObject *callback_result = NULL; PyObject *path = NULL; PyObject *inode = NULL; PyObject *id = NULL; PyObject *flags = NULL; PyObject *py_event_flags = NULL; PyObject *py_event_ids = NULL; PyObject *py_event_paths = NULL; PyObject *py_event_inodes = NULL; PyThreadState *saved_thread_state = NULL; /* Acquire interpreter lock and save original thread state. */ PyGILState_STATE gil_state = PyGILState_Ensure(); saved_thread_state = PyThreadState_Swap(stream_callback_info_ref->thread_state); /* Convert event flags and paths to Python ints and strings. */ py_event_paths = PyList_New(num_events); py_event_inodes = PyList_New(num_events); py_event_flags = PyList_New(num_events); py_event_ids = PyList_New(num_events); if (G_NOT(py_event_paths && py_event_inodes && py_event_flags && py_event_ids)) { Py_XDECREF(py_event_paths); Py_XDECREF(py_event_inodes); Py_XDECREF(py_event_ids); Py_XDECREF(py_event_flags); return /*NULL*/; } for (i = 0; i < num_events; ++i) { id = PyLong_FromLongLong(event_ids[i]); flags = PyLong_FromLong(event_flags[i]); path_info_dict = CFArrayGetValueAtIndex(event_path_info_array_ref, i); cf_path = CFDictionaryGetValue(path_info_dict, kFSEventStreamEventExtendedDataPathKey); cf_inode = CFDictionaryGetValue(path_info_dict, kFSEventStreamEventExtendedFileIDKey); path = CFString_AsPyUnicode(cf_path); if (G_IS_NOT_NULL(cf_inode)) { inode = CFNumberRef_AsPyLong(cf_inode); } else { Py_INCREF(Py_None); inode = Py_None; } if (G_NOT(path && inode && flags && id)) { Py_DECREF(py_event_paths); Py_DECREF(py_event_inodes); Py_DECREF(py_event_ids); Py_DECREF(py_event_flags); return /*NULL*/; } PyList_SET_ITEM(py_event_paths, i, path); PyList_SET_ITEM(py_event_inodes, i, inode); PyList_SET_ITEM(py_event_flags, i, flags); PyList_SET_ITEM(py_event_ids, i, id); } /* Call the Python callback function supplied by the stream information * struct. The Python callback function should accept two arguments, * both being Python lists: * * def python_callback(event_paths, event_flags, event_ids): * pass */ callback_result = \ PyObject_CallFunction(stream_callback_info_ref->python_callback, "OOOO", py_event_paths, py_event_inodes, py_event_flags, py_event_ids); if (G_IS_NULL(callback_result)) { if (G_NOT(PyErr_Occurred())) { PyErr_SetString(PyExc_ValueError, ERROR_CANNOT_CALL_CALLBACK); } CFRunLoopStop(stream_callback_info_ref->run_loop_ref); } /* Release the lock and restore thread state. */ PyThreadState_Swap(saved_thread_state); PyGILState_Release(gil_state); } /** * Converts a Python string object to an UTF-8 encoded ``CFStringRef``. * * :param py_string: * A Python unicode or utf-8 encoded bytestring object. * :returns: * A new ``CFStringRef`` with the contents of ``py_string``, or ``NULL`` if an error occurred. */ CFStringRef PyString_AsUTF8EncodedCFStringRef(PyObject *py_string) { CFStringRef cf_string = NULL; if (PyUnicode_Check(py_string)) { PyObject* helper = PyUnicode_AsUTF8String(py_string); if (!helper) { return NULL; } cf_string = CFStringCreateWithCString(kCFAllocatorDefault, PyBytes_AS_STRING(helper), kCFStringEncodingUTF8); Py_DECREF(helper); } else if (PyBytes_Check(py_string)) { PyObject *utf8 = PyUnicode_FromEncodedObject(py_string, NULL, "strict"); if (!utf8) { return NULL; } Py_DECREF(utf8); cf_string = CFStringCreateWithCString(kCFAllocatorDefault, PyBytes_AS_STRING(py_string), kCFStringEncodingUTF8); } else { PyErr_SetString(PyExc_TypeError, "Path to watch must be a string or a UTF-8 encoded bytes object."); return NULL; } return cf_string; } /** * Converts a list of Python strings to a ``CFMutableArray`` of * UTF-8 encoded ``CFString`` instances and returns a pointer to * the array. * * :param py_string_list: * List of Python strings. * :returns: * A pointer to ``CFMutableArray`` (that is, a * ``CFMutableArrayRef``) of UTF-8 encoded ``CFString`` * instances. */ static CFMutableArrayRef watchdog_CFMutableArrayRef_from_PyStringList(PyObject *py_string_list) { Py_ssize_t i = 0; Py_ssize_t string_list_size = 0; CFMutableArrayRef array_of_cf_string = NULL; CFStringRef cf_string = NULL; PyObject *py_string = NULL; G_RETURN_NULL_IF_NULL(py_string_list); string_list_size = PyList_Size(py_string_list); /* Allocate a CFMutableArray. */ array_of_cf_string = CFArrayCreateMutable(kCFAllocatorDefault, 1, &kCFTypeArrayCallBacks); G_RETURN_NULL_IF_NULL(array_of_cf_string); /* Loop through the Python string list and copy strings to the * CFString array list. */ for (i = 0; i < string_list_size; ++i) { py_string = PyList_GetItem(py_string_list, i); G_RETURN_NULL_IF_NULL(py_string); cf_string = PyString_AsUTF8EncodedCFStringRef(py_string); G_RETURN_NULL_IF_NULL(cf_string); CFArraySetValueAtIndex(array_of_cf_string, i, cf_string); CFRelease(cf_string); } return array_of_cf_string; } /** * Creates an instance of ``FSEventStream`` and returns a pointer * to the instance. * * :param stream_callback_info_ref: * Pointer to the callback context information that will be * passed by the FSEvents API to the callback handler specified * by the ``callback`` argument to this function. This * information contains a reference to the Python callback that * it must call in turn passing on the event information * as Python objects to the the Python callback. * :param py_paths: * A Python list of Python strings representing path names * to monitor. * :param callback: * A function pointer of type ``FSEventStreamCallback``. * :returns: * A pointer to an ``FSEventStream`` instance (that is, it returns * an ``FSEventStreamRef``). */ static FSEventStreamRef watchdog_FSEventStreamCreate(StreamCallbackInfo *stream_callback_info_ref, PyObject *py_paths, FSEventStreamCallback callback) { CFAbsoluteTime stream_latency = 0.01; CFMutableArrayRef paths = NULL; FSEventStreamRef stream_ref = NULL; /* Check arguments. */ G_RETURN_NULL_IF_NULL(py_paths); G_RETURN_NULL_IF_NULL(callback); /* Convert the Python paths list to a CFMutableArray. */ paths = watchdog_CFMutableArrayRef_from_PyStringList(py_paths); G_RETURN_NULL_IF_NULL(paths); /* Create the event stream. */ FSEventStreamContext stream_context = { 0, stream_callback_info_ref, NULL, NULL, NULL }; stream_ref = FSEventStreamCreate(kCFAllocatorDefault, callback, &stream_context, paths, kFSEventStreamEventIdSinceNow, stream_latency, kFSEventStreamCreateFlagNoDefer | kFSEventStreamCreateFlagFileEvents | kFSEventStreamCreateFlagWatchRoot | kFSEventStreamCreateFlagUseExtendedData | kFSEventStreamCreateFlagUseCFTypes); CFRelease(paths); return stream_ref; } PyDoc_STRVAR(watchdog_add_watch__doc__, MODULE_NAME ".add_watch(emitter_thread, watch, callback, paths) -> None\ \nAdds a watch into the event loop for the given emitter thread.\n\n\ :param emitter_thread:\n\ The emitter thread.\n\ :param watch:\n\ The watch to add.\n\ :param callback:\n\ The callback function to call when an event occurs.\n\n\ Example::\n\n\ def callback(paths, flags, ids):\n\ for path, flag, event_id in zip(paths, flags, ids):\n\ print(\"%d: %s=%ul\" % (event_id, path, flag))\n\ :param paths:\n\ A list of paths to monitor.\n"); static PyObject * watchdog_add_watch(PyObject *self, PyObject *args) { UNUSED(self); FSEventStreamRef stream_ref = NULL; StreamCallbackInfo *stream_callback_info_ref = NULL; CFRunLoopRef run_loop_ref = NULL; PyObject *emitter_thread = NULL; PyObject *watch = NULL; PyObject *paths_to_watch = NULL; PyObject *python_callback = NULL; PyObject *value = NULL; /* Ensure all arguments are received. */ G_RETURN_NULL_IF_NOT(PyArg_ParseTuple(args, "OOOO:schedule", &emitter_thread, &watch, &python_callback, &paths_to_watch)); /* Watch must not already be scheduled. */ if(PyDict_Contains(watch_to_stream, watch) == 1) { PyErr_Format(PyExc_RuntimeError, "Cannot add watch %S - it is already scheduled", watch); return NULL; } /* Create an instance of the callback information structure. */ stream_callback_info_ref = PyMem_New(StreamCallbackInfo, 1); if(stream_callback_info_ref == NULL) { PyErr_SetString(PyExc_SystemError, "Failed allocating stream callback info"); return NULL; } /* Create an FSEvent stream and * Save the stream reference to the global watch-to-stream dictionary. */ stream_ref = watchdog_FSEventStreamCreate(stream_callback_info_ref, paths_to_watch, (FSEventStreamCallback) &watchdog_FSEventStreamCallback); if (!stream_ref) { PyMem_Del(stream_callback_info_ref); PyErr_SetString(PyExc_RuntimeError, "Failed creating fsevent stream"); return NULL; } value = PyCapsule_New(stream_ref, NULL, watchdog_pycapsule_destructor); if (!value || !PyCapsule_IsValid(value, NULL)) { PyMem_Del(stream_callback_info_ref); FSEventStreamInvalidate(stream_ref); FSEventStreamRelease(stream_ref); return NULL; } PyDict_SetItem(watch_to_stream, watch, value); /* Get a reference to the runloop for the emitter thread * or to the current runloop. */ value = PyDict_GetItem(thread_to_run_loop, emitter_thread); if (G_IS_NULL(value)) { run_loop_ref = CFRunLoopGetCurrent(); } else { run_loop_ref = PyCapsule_GetPointer(value, NULL); } /* Schedule the stream with the obtained runloop. */ FSEventStreamScheduleWithRunLoop(stream_ref, run_loop_ref, kCFRunLoopDefaultMode); /* Set the stream information for the callback. * This data will be passed to our watchdog_FSEventStreamCallback function * by the FSEvents API whenever an event occurs. */ stream_callback_info_ref->python_callback = python_callback; stream_callback_info_ref->stream_ref = stream_ref; stream_callback_info_ref->run_loop_ref = run_loop_ref; stream_callback_info_ref->thread_state = PyThreadState_Get(); Py_INCREF(python_callback); /* Start the event stream. */ if (G_NOT(FSEventStreamStart(stream_ref))) { FSEventStreamInvalidate(stream_ref); FSEventStreamRelease(stream_ref); // There's no documentation on _why_ this might fail - "it ought to always succeed". But if it fails the // documentation says to "fall back to performing recursive scans of the directories [...] as appropriate". PyErr_SetString(PyExc_SystemError, "Cannot start fsevents stream. Use a kqueue or polling observer instead."); return NULL; } Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(watchdog_read_events__doc__, MODULE_NAME ".read_events(emitter_thread) -> None\n\ Blocking function that runs an event loop associated with an emitter thread.\n\n\ :param emitter_thread:\n\ The emitter thread for which the event loop will be run.\n"); static PyObject * watchdog_read_events(PyObject *self, PyObject *args) { UNUSED(self); CFRunLoopRef run_loop_ref = NULL; PyObject *emitter_thread = NULL; PyObject *value = NULL; G_RETURN_NULL_IF_NOT(PyArg_ParseTuple(args, "O:loop", &emitter_thread)); // PyEval_InitThreads() does nothing as of Python 3.7 and is deprecated in 3.9. // https://docs.python.org/3/c-api/init.html#c.PyEval_InitThreads #if PY_VERSION_HEX < 0x030700f0 PyEval_InitThreads(); #endif /* Allocate information and store thread state. */ value = PyDict_GetItem(thread_to_run_loop, emitter_thread); if (G_IS_NULL(value)) { run_loop_ref = CFRunLoopGetCurrent(); value = PyCapsule_New(run_loop_ref, NULL, watchdog_pycapsule_destructor); PyDict_SetItem(thread_to_run_loop, emitter_thread, value); Py_INCREF(emitter_thread); Py_INCREF(value); } /* No timeout, block until events. */ Py_BEGIN_ALLOW_THREADS; CFRunLoopRun(); Py_END_ALLOW_THREADS; /* Clean up state information. */ if (PyDict_DelItem(thread_to_run_loop, emitter_thread) == 0) { Py_DECREF(emitter_thread); Py_INCREF(value); } G_RETURN_NULL_IF(PyErr_Occurred()); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(watchdog_flush_events__doc__, MODULE_NAME ".flush_events(watch) -> None\n\ Flushes events for the watch.\n\n\ :param watch:\n\ The watch to flush.\n"); static PyObject * watchdog_flush_events(PyObject *self, PyObject *watch) { UNUSED(self); PyObject *value = PyDict_GetItem(watch_to_stream, watch); FSEventStreamRef stream_ref = PyCapsule_GetPointer(value, NULL); FSEventStreamFlushSync(stream_ref); Py_INCREF(Py_None); return Py_None; } PyDoc_STRVAR(watchdog_remove_watch__doc__, MODULE_NAME ".remove_watch(watch) -> None\n\ Removes a watch from the event loop.\n\n\ :param watch:\n\ The watch to remove.\n"); static PyObject * watchdog_remove_watch(PyObject *self, PyObject *watch) { UNUSED(self); PyObject *streamref_capsule = PyDict_GetItem(watch_to_stream, watch); if (!streamref_capsule) { // A watch might have been removed explicitly before, in which case we can simply early out. Py_RETURN_NONE; } PyDict_DelItem(watch_to_stream, watch); FSEventStreamRef stream_ref = PyCapsule_GetPointer(streamref_capsule, NULL); FSEventStreamStop(stream_ref); FSEventStreamInvalidate(stream_ref); FSEventStreamRelease(stream_ref); Py_RETURN_NONE; } PyDoc_STRVAR(watchdog_stop__doc__, MODULE_NAME ".stop(emitter_thread) -> None\n\ Stops running the event loop from the specified thread.\n\n\ :param emitter_thread:\n\ The thread for which the event loop will be stopped.\n"); static PyObject * watchdog_stop(PyObject *self, PyObject *emitter_thread) { UNUSED(self); PyObject *value = PyDict_GetItem(thread_to_run_loop, emitter_thread); if (G_IS_NULL(value)) { goto success; } CFRunLoopRef run_loop_ref = PyCapsule_GetPointer(value, NULL); G_RETURN_NULL_IF(PyErr_Occurred()); /* Stop the run loop. */ if (G_IS_NOT_NULL(run_loop_ref)) { CFRunLoopStop(run_loop_ref); } success: Py_INCREF(Py_None); return Py_None; } /****************************************************************************** * Module initialization. *****************************************************************************/ PyDoc_STRVAR(watchdog_fsevents_module__doc__, "Low-level FSEvents Python/C API bridge."); static PyMethodDef watchdog_fsevents_methods[] = { {"add_watch", watchdog_add_watch, METH_VARARGS, watchdog_add_watch__doc__}, {"read_events", watchdog_read_events, METH_VARARGS, watchdog_read_events__doc__}, {"flush_events", watchdog_flush_events, METH_O, watchdog_flush_events__doc__}, {"remove_watch", watchdog_remove_watch, METH_O, watchdog_remove_watch__doc__}, /* Aliases for compatibility with macfsevents. */ {"schedule", watchdog_add_watch, METH_VARARGS, "Alias for add_watch."}, {"loop", watchdog_read_events, METH_VARARGS, "Alias for read_events."}, {"unschedule", watchdog_remove_watch, METH_O, "Alias for remove_watch."}, {"stop", watchdog_stop, METH_O, watchdog_stop__doc__}, {NULL, NULL, 0, NULL}, }; /** * Initialize the module globals. */ static void watchdog_module_init(void) { thread_to_run_loop = PyDict_New(); watch_to_stream = PyDict_New(); } /** * Adds various attributes to the Python module. * * :param module: * A pointer to the Python module object to inject * the attributes into. */ static void watchdog_module_add_attributes(PyObject *module) { PyObject *version_tuple = Py_BuildValue("(iii)", WATCHDOG_VERSION_MAJOR, WATCHDOG_VERSION_MINOR, WATCHDOG_VERSION_BUILD); PyModule_AddIntConstant(module, "POLLIN", kCFFileDescriptorReadCallBack); PyModule_AddIntConstant(module, "POLLOUT", kCFFileDescriptorWriteCallBack); /* Adds version information. */ PyModule_AddObject(module, "__version__", version_tuple); PyModule_AddObject(module, "version_string", Py_BuildValue("s", WATCHDOG_VERSION_STRING)); } static struct PyModuleDef watchdog_fsevents_module = { PyModuleDef_HEAD_INIT, MODULE_NAME, watchdog_fsevents_module__doc__, -1, watchdog_fsevents_methods, NULL, /* m_slots */ NULL, /* m_traverse */ 0, /* m_clear */ NULL /* m_free */ }; /** * Initialize the Python 3.x module. */ PyMODINIT_FUNC PyInit__watchdog_fsevents(void){ G_RETURN_NULL_IF(PyType_Ready(&NativeEventType) < 0); PyObject *module = PyModule_Create(&watchdog_fsevents_module); G_RETURN_NULL_IF_NULL(module); Py_INCREF(&NativeEventType); if (PyModule_AddObject(module, "NativeEvent", (PyObject*)&NativeEventType) < 0) { Py_DECREF(&NativeEventType); Py_DECREF(module); return NULL; } watchdog_module_add_attributes(module); watchdog_module_init(); return module; } watchdog-3.0.0/tests/000077500000000000000000000000001440602103100144435ustar00rootroot00000000000000watchdog-3.0.0/tests/__init__.py000066400000000000000000000011331440602103100165520ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. watchdog-3.0.0/tests/conftest.py000066400000000000000000000040171440602103100166440ustar00rootroot00000000000000from __future__ import annotations import contextlib import gc import os import threading from functools import partial import pytest from .utils import ExpectEvent, Helper, P, StartWatching, TestEventQueue @pytest.fixture() def p(tmpdir, *args): """ Convenience function to join the temporary directory path with the provided arguments. """ return partial(os.path.join, tmpdir) @pytest.fixture(autouse=True) def no_thread_leaks(): """ Fail on thread leak. We do not use pytest-threadleak because it is not reliable. """ old_thread_count = threading.active_count() yield gc.collect() # Clear the stuff from other function-level fixtures assert ( threading.active_count() == old_thread_count ) # Only previously existing threads @pytest.fixture(autouse=True) def no_warnings(recwarn): """Fail on warning.""" yield warnings = [] for warning in recwarn: # pragma: no cover message = str(warning.message) filename = warning.filename if ( "Not importing directory" in message or "Using or importing the ABCs" in message or "dns.hash module will be removed in future versions" in message or "eventlet" in filename ): continue warnings.append("{w.filename}:{w.lineno} {w.message}".format(w=warning)) print(warnings) assert not warnings @pytest.fixture(name="helper") def helper_fixture(tmpdir): with contextlib.closing(Helper(tmp=os.fspath(tmpdir))) as helper: yield helper @pytest.fixture(name="p") def p_fixture(helper: Helper) -> P: return helper.joinpath @pytest.fixture(name="event_queue") def event_queue_fixture(helper: Helper) -> TestEventQueue: return helper.event_queue @pytest.fixture(name="start_watching") def start_watching_fixture(helper: Helper) -> StartWatching: return helper.start_watching @pytest.fixture(name="expect_event") def expect_event_fixture(helper: Helper) -> ExpectEvent: return helper.expect_event watchdog-3.0.0/tests/markers.py000066400000000000000000000003051440602103100164570ustar00rootroot00000000000000from __future__ import annotations from platform import python_implementation import pytest cpython_only = pytest.mark.skipif( python_implementation() != "CPython", reason="CPython only." ) watchdog-3.0.0/tests/shell.py000066400000000000000000000060521440602103100161270ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ :module: tests.shell :synopsis: Common shell operations for testing. :author: yesudeep@google.com (Yesudeep Mangalapilly) """ from __future__ import annotations import errno import os import os.path import shutil import tempfile import time # def tree(path='.', show_files=False): # print(path) # padding = '' # for root, directories, filenames in os.walk(path): # print(padding + os.path.basename(root) + os.path.sep) # padding = padding + ' ' # for filename in filenames: # print(padding + filename) def cd(path): os.chdir(path) def pwd(): path = os.getcwd() print(path) return path def mkfile(path): """Creates a file""" with open(path, "ab"): pass def mkdir(path, parents=False): """Creates a directory (optionally also creates all the parent directories in the path).""" if parents: try: os.makedirs(path) except OSError as e: if not e.errno == errno.EEXIST: raise else: os.mkdir(path) def rm(path, recursive=False): """Deletes files or directories.""" if os.path.isdir(path): if recursive: shutil.rmtree(path) # else: # os.rmdir(path) else: raise OSError(errno.EISDIR, os.strerror(errno.EISDIR), path) else: os.remove(path) def touch(path, times=None): """Updates the modified timestamp of a file or directory.""" if os.path.isdir(path): os.utime(path, times) else: with open(path, "ab"): os.utime(path, times) def truncate(path): """Truncates a file.""" with open(path, "wb"): os.utime(path, None) def mv(src_path, dest_path): """Moves files or directories.""" try: os.rename(src_path, dest_path) except OSError: # this will happen on windows os.remove(dest_path) os.rename(src_path, dest_path) def mkdtemp(): return tempfile.mkdtemp() def ls(path="."): return os.listdir(path) def msize(path): """Modify the file size without updating the modified time.""" with open(path, "w") as w: w.write("") os.utime(path, (0, 0)) time.sleep(0.4) with open(path, "w") as w: w.write("0") os.utime(path, (0, 0)) def mount_tmpfs(path): os.system(f"sudo mount -t tmpfs none {path}") def unmount(path): os.system(f"sudo umount {path}") watchdog-3.0.0/tests/test_0_watchmedo.py000066400000000000000000000236311440602103100202530ustar00rootroot00000000000000from __future__ import annotations import os import sys import time from unittest.mock import patch import pytest # Skip if import PyYAML failed. PyYAML missing possible because # watchdog installed without watchmedo. See Installation section # in README.rst yaml = pytest.importorskip("yaml") # noqa from yaml.constructor import ConstructorError # noqa from yaml.scanner import ScannerError # noqa from watchdog import watchmedo # noqa from watchdog.events import FileModifiedEvent, FileOpenedEvent # noqa from watchdog.tricks import AutoRestartTrick, ShellCommandTrick # noqa from watchdog.utils import WatchdogShutdown # noqa def test_load_config_valid(tmpdir): """Verifies the load of a valid yaml file""" yaml_file = os.path.join(tmpdir, "config_file.yaml") with open(yaml_file, "w") as f: f.write("one: value\ntwo:\n- value1\n- value2\n") config = watchmedo.load_config(yaml_file) assert isinstance(config, dict) assert "one" in config assert "two" in config assert isinstance(config["two"], list) assert config["one"] == "value" assert config["two"] == ["value1", "value2"] def test_load_config_invalid(tmpdir): """Verifies if safe load avoid the execution of untrusted code inside yaml files""" critical_dir = os.path.join(tmpdir, "critical") yaml_file = os.path.join(tmpdir, "tricks_file.yaml") with open(yaml_file, "w") as f: content = f'one: value\nrun: !!python/object/apply:os.system ["mkdir {critical_dir}"]\n' f.write(content) # PyYAML get_single_data() raises different exceptions for Linux and Windows with pytest.raises((ConstructorError, ScannerError)): watchmedo.load_config(yaml_file) assert not os.path.exists(critical_dir) def make_dummy_script(tmpdir, n=10): script = os.path.join(tmpdir, f"auto-test-{n}.py") with open(script, "w") as f: f.write( 'import time\nfor i in range(%d):\n\tprint("+++++ %%d" %% i, flush=True)\n\ttime.sleep(1)\n' % n ) return script def test_kill_auto_restart(tmpdir, capfd): script = make_dummy_script(tmpdir) a = AutoRestartTrick([sys.executable, script]) a.start() time.sleep(3) a.stop() cap = capfd.readouterr() assert "+++++ 0" in cap.out assert "+++++ 9" not in cap.out # we killed the subprocess before the end # in windows we seem to lose the subprocess stderr # assert 'KeyboardInterrupt' in cap.err def test_shell_command_wait_for_completion(tmpdir, capfd): script = make_dummy_script(tmpdir, n=1) command = " ".join([sys.executable, script]) trick = ShellCommandTrick(command, wait_for_process=True) assert not trick.is_process_running() start_time = time.monotonic() trick.on_any_event(FileModifiedEvent("foo/bar.baz")) elapsed = time.monotonic() - start_time print(capfd.readouterr()) assert not trick.is_process_running() assert elapsed >= 1 def test_shell_command_subprocess_termination_nowait(tmpdir): script = make_dummy_script(tmpdir, n=1) command = " ".join([sys.executable, script]) trick = ShellCommandTrick(command, wait_for_process=False) assert not trick.is_process_running() trick.on_any_event(FileModifiedEvent("foo/bar.baz")) assert trick.is_process_running() time.sleep(5) assert not trick.is_process_running() def test_shell_command_subprocess_termination_not_happening_on_file_opened_event( tmpdir, ): # FIXME: see issue #949, and find a way to better handle that scenario script = make_dummy_script(tmpdir, n=1) command = " ".join([sys.executable, script]) trick = ShellCommandTrick(command, wait_for_process=False) assert not trick.is_process_running() trick.on_any_event(FileOpenedEvent("foo/bar.baz")) assert not trick.is_process_running() time.sleep(5) assert not trick.is_process_running() def test_auto_restart_not_happening_on_file_opened_event(tmpdir, capfd): # FIXME: see issue #949, and find a way to better handle that scenario script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick([sys.executable, script]) trick.start() time.sleep(1) trick.on_any_event(FileOpenedEvent("foo/bar.baz")) trick.on_any_event(FileOpenedEvent("foo/bar2.baz")) trick.on_any_event(FileOpenedEvent("foo/bar3.baz")) time.sleep(1) trick.stop() cap = capfd.readouterr() assert cap.out.splitlines(keepends=False).count("+++++ 0") == 1 assert trick.restart_count == 0 def test_auto_restart_on_file_change(tmpdir, capfd): """Simulate changing 3 files. Expect 3 restarts. """ script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick([sys.executable, script]) trick.start() time.sleep(1) trick.on_any_event(FileModifiedEvent("foo/bar.baz")) trick.on_any_event(FileModifiedEvent("foo/bar2.baz")) trick.on_any_event(FileModifiedEvent("foo/bar3.baz")) time.sleep(1) trick.stop() cap = capfd.readouterr() assert cap.out.splitlines(keepends=False).count("+++++ 0") >= 2 assert trick.restart_count == 3 @pytest.mark.xfail( condition=sys.platform.startswith(("win", "darwin")) or sys.implementation.name == "pypy", reason="known to be problematic, see #973", ) def test_auto_restart_on_file_change_debounce(tmpdir, capfd): """Simulate changing 3 files quickly and then another change later. Expect 2 restarts due to debouncing. """ script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick([sys.executable, script], debounce_interval_seconds=0.5) trick.start() time.sleep(1) trick.on_any_event(FileModifiedEvent("foo/bar.baz")) trick.on_any_event(FileModifiedEvent("foo/bar2.baz")) time.sleep(0.1) trick.on_any_event(FileModifiedEvent("foo/bar3.baz")) time.sleep(1) trick.on_any_event(FileModifiedEvent("foo/bar.baz")) time.sleep(1) trick.stop() cap = capfd.readouterr() assert cap.out.splitlines(keepends=False).count("+++++ 0") == 3 assert trick.restart_count == 2 @pytest.mark.parametrize( "restart_on_command_exit", [ True, pytest.param( False, marks=pytest.mark.xfail( condition=sys.platform.startswith(("win", "darwin")), reason="known to be problematic, see #972", ), ), ], ) def test_auto_restart_subprocess_termination(tmpdir, capfd, restart_on_command_exit): """Run auto-restart with a script that terminates in about 2 seconds. After 5 seconds, expect it to have been restarted at least once. """ script = make_dummy_script(tmpdir, n=2) trick = AutoRestartTrick( [sys.executable, script], restart_on_command_exit=restart_on_command_exit ) trick.start() time.sleep(5) trick.stop() cap = capfd.readouterr() if restart_on_command_exit: assert cap.out.splitlines(keepends=False).count("+++++ 0") > 1 assert trick.restart_count >= 1 else: assert cap.out.splitlines(keepends=False).count("+++++ 0") == 1 assert trick.restart_count == 0 def test_auto_restart_arg_parsing_basic(): args = watchmedo.cli.parse_args( ["auto-restart", "-d", ".", "--recursive", "--debug-force-polling", "cmd"] ) assert args.func is watchmedo.auto_restart assert args.command == "cmd" assert args.directories == ["."] assert args.recursive assert args.debug_force_polling def test_auto_restart_arg_parsing(): args = watchmedo.cli.parse_args( [ "auto-restart", "-d", ".", "--kill-after", "12.5", "--debounce-interval=0.2", "cmd", ] ) assert args.func is watchmedo.auto_restart assert args.command == "cmd" assert args.directories == ["."] assert args.kill_after == pytest.approx(12.5) assert args.debounce_interval == pytest.approx(0.2) def test_shell_command_arg_parsing(): args = watchmedo.cli.parse_args(["shell-command", "--command='cmd'"]) assert args.command == "'cmd'" @pytest.mark.parametrize("cmdline", [["auto-restart", "-d", ".", "cmd"], ["log", "."]]) @pytest.mark.parametrize( "verbosity", [ ([], "WARNING"), (["-q"], "ERROR"), (["--quiet"], "ERROR"), (["-v"], "INFO"), (["--verbose"], "INFO"), (["-vv"], "DEBUG"), (["-v", "-v"], "DEBUG"), (["--verbose", "-v"], "DEBUG"), ], ) def test_valid_verbosity(cmdline, verbosity): (verbosity_cmdline_args, expected_log_level) = verbosity cmd = [cmdline[0], *verbosity_cmdline_args, *cmdline[1:]] args = watchmedo.cli.parse_args(cmd) log_level = watchmedo._get_log_level_from_args(args) assert log_level == expected_log_level @pytest.mark.parametrize("cmdline", [["auto-restart", "-d", ".", "cmd"], ["log", "."]]) @pytest.mark.parametrize( "verbosity_cmdline_args", [ ["-q", "-v"], ["-v", "-q"], ["-qq"], ["-q", "-q"], ["--quiet", "--quiet"], ["--quiet", "-q"], ["-vvv"], ["-vvvv"], ["-v", "-v", "-v"], ["-vv", "-v"], ["--verbose", "-vv"], ], ) def test_invalid_verbosity(cmdline, verbosity_cmdline_args): cmd = [cmdline[0], *verbosity_cmdline_args, *cmdline[1:]] with pytest.raises((watchmedo.LogLevelException, SystemExit)): args = watchmedo.cli.parse_args(cmd) watchmedo._get_log_level_from_args(args) @pytest.mark.parametrize("command", ["tricks-from", "tricks"]) def test_tricks_from_file(command, tmp_path): tricks_file = tmp_path / "tricks.yaml" tricks_file.write_text( """ tricks: - watchdog.tricks.LoggerTrick: patterns: ["*.py", "*.js"] """ ) args = watchmedo.cli.parse_args([command, str(tricks_file)]) checkpoint = False def mocked_sleep(_): nonlocal checkpoint checkpoint = True raise WatchdogShutdown() with patch("time.sleep", mocked_sleep): watchmedo.tricks_from(args) assert checkpoint watchdog-3.0.0/tests/test_delayed_queue.py000066400000000000000000000023441440602103100206720ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from time import time import pytest from watchdog.utils.delayed_queue import DelayedQueue @pytest.mark.flaky(max_runs=5, min_passes=1) def test_delayed_get(): q = DelayedQueue[str](2) q.put("", True) inserted = time() q.get() elapsed = time() - inserted # 2.10 instead of 2.05 for slow macOS slaves on Travis assert 2.10 > elapsed > 1.99 @pytest.mark.flaky(max_runs=5, min_passes=1) def test_nondelayed_get(): q = DelayedQueue[str](2) q.put("", False) inserted = time() q.get() elapsed = time() - inserted # Far less than 1 second assert elapsed < 1 watchdog-3.0.0/tests/test_emitter.py000066400000000000000000000550661440602103100175410ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging import os import stat import time from queue import Empty import pytest from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileClosedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileOpenedEvent, ) from watchdog.utils import platform from .shell import mkdir, mkfile, mv, rm, touch from .utils import ExpectEvent, P, StartWatching, TestEventQueue logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) if platform.is_darwin(): # enable more verbose logs fsevents_logger = logging.getLogger("fsevents") fsevents_logger.setLevel(logging.DEBUG) def rerun_filter(exc, *args): time.sleep(5) if issubclass(exc[0], Empty) and platform.is_windows(): return True return False @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_create(p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent) -> None: start_watching() open(p("a"), "a").close() expect_event(FileCreatedEvent(p("a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileOpenedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileClosedEvent) @pytest.mark.xfail(reason="known to be problematic") @pytest.mark.skipif( not platform.is_linux(), reason="FileCloseEvent only supported in GNU/Linux" ) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_close(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: f_d = open(p("a"), "a") start_watching() f_d.close() # After file creation/open in append mode event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileClosedEvent) event = event_queue.get(timeout=5)[0] assert os.path.normpath(event.src_path) == os.path.normpath(p("")) assert isinstance(event, DirModifiedEvent) # After read-only, only IN_CLOSE_NOWRITE is emitted but not caught for now #747 open(p("a"), "r").close() assert event_queue.empty() @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( platform.is_darwin() or platform.is_windows(), reason="Windows and macOS enforce proper encoding", ) def test_create_wrong_encoding(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: start_watching() open(p("a_\udce4"), "a").close() event = event_queue.get(timeout=5)[0] assert event.src_path == p("a_\udce4") assert isinstance(event, FileCreatedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert os.path.normpath(event.src_path) == os.path.normpath(p("")) assert isinstance(event, DirModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_delete(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkfile(p("a")) start_watching() rm(p("a")) expect_event(FileDeletedEvent(p("a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_modify(p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkfile(p("a")) start_watching() touch(p("a")) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileOpenedEvent) expect_event(FileModifiedEvent(p("a"))) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("a") assert isinstance(event, FileClosedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_chmod(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkfile(p("a")) start_watching() # Note: We use S_IREAD here because chmod on Windows only # allows setting the read-only flag. os.chmod(p("a"), stat.S_IREAD) expect_event(FileModifiedEvent(p("a"))) # Reset permissions to allow cleanup. os.chmod(p("a"), stat.S_IWRITE) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_move(p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching() mv(p("dir1", "a"), p("dir2", "b")) if not platform.is_windows(): expect_event(FileMovedEvent(p("dir1", "a"), p("dir2", "b"))) else: event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "a") assert isinstance(event, FileDeletedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "b") assert isinstance(event, FileCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_case_change( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "file")) start_watching() mv(p("dir1", "file"), p("dir2", "FILE")) if not platform.is_windows(): expect_event(FileMovedEvent(p("dir1", "file"), p("dir2", "FILE"))) else: event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "file") assert isinstance(event, FileDeletedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "FILE") assert isinstance(event, FileCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert event.src_path in [p("dir1"), p("dir2")] assert isinstance(event, DirModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_move_to(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(p("dir2")) mv(p("dir1", "a"), p("dir2", "b")) expect_event(FileCreatedEvent(p("dir2", "b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p("dir2"))) @pytest.mark.skipif( not platform.is_linux(), reason="InotifyFullEmitter only supported in Linux" ) def test_move_to_full(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(p("dir2"), use_full_emitter=True) mv(p("dir1", "a"), p("dir2", "b")) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileMovedEvent) assert event.dest_path == p("dir2", "b") assert event.src_path is None # Should equal None since the path was not watched @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_move_from(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(p("dir1")) mv(p("dir1", "a"), p("dir2", "b")) expect_event(FileDeletedEvent(p("dir1", "a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p("dir1"))) @pytest.mark.skipif( not platform.is_linux(), reason="InotifyFullEmitter only supported in Linux" ) def test_move_from_full(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: mkdir(p("dir1")) mkdir(p("dir2")) mkfile(p("dir1", "a")) start_watching(p("dir1"), use_full_emitter=True) mv(p("dir1", "a"), p("dir2", "b")) event = event_queue.get(timeout=5)[0] assert isinstance(event, FileMovedEvent) assert event.src_path == p("dir1", "a") assert event.dest_path is None # Should equal None since path not watched @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_separate_consecutive_moves(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) mkfile(p("dir1", "a")) mkfile(p("b")) start_watching(p("dir1")) mv(p("dir1", "a"), p("c")) mv(p("b"), p("dir1", "d")) dir_modif = DirModifiedEvent(p("dir1")) a_deleted = FileDeletedEvent(p("dir1", "a")) d_created = FileCreatedEvent(p("dir1", "d")) expected_events = [a_deleted, dir_modif, d_created, dir_modif] if platform.is_windows(): expected_events = [a_deleted, d_created] if platform.is_bsd(): # Due to the way kqueue works, we can't really order # 'Created' and 'Deleted' events in time, so creation queues first expected_events = [d_created, a_deleted, dir_modif, dir_modif] for expected_event in expected_events: expect_event(expected_event) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( platform.is_bsd(), reason="BSD create another set of events for this test" ) def test_delete_self(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: mkdir(p("dir1")) emitter = start_watching(p("dir1")) rm(p("dir1"), True) expect_event(DirDeletedEvent(p("dir1"))) emitter.join(5) assert not emitter.is_alive() @pytest.mark.skipif( platform.is_windows() or platform.is_bsd(), reason="Windows|BSD create another set of events for this test", ) def test_fast_subdirectory_creation_deletion(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: root_dir = p("dir1") sub_dir = p("dir1", "subdir1") times = 30 mkdir(root_dir) start_watching(root_dir) for _ in range(times): mkdir(sub_dir) rm(sub_dir, True) time.sleep(0.1) # required for macOS emitter to catch up with us count = {DirCreatedEvent: 0, DirModifiedEvent: 0, DirDeletedEvent: 0} etype_for_dir = { DirCreatedEvent: sub_dir, DirModifiedEvent: root_dir, DirDeletedEvent: sub_dir, } for _ in range(times * 4): event = event_queue.get(timeout=5)[0] logger.debug(event) etype = type(event) count[etype] += 1 assert event.src_path == etype_for_dir[etype] assert count[DirCreatedEvent] >= count[DirDeletedEvent] assert ( count[DirCreatedEvent] + count[DirDeletedEvent] >= count[DirModifiedEvent] ) assert count == { DirCreatedEvent: times, DirModifiedEvent: times * 2, DirDeletedEvent: times, } @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_passing_unicode_should_give_unicode(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: start_watching(str(p())) mkfile(p("a")) event = event_queue.get(timeout=5)[0] assert isinstance(event.src_path, str) @pytest.mark.skipif( platform.is_windows(), reason="Windows ReadDirectoryChangesW supports only" " unicode for paths.", ) def test_passing_bytes_should_give_bytes(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: start_watching(p().encode()) mkfile(p("a")) event = event_queue.get(timeout=5)[0] assert isinstance(event.src_path, bytes) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_recursive_on(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: mkdir(p("dir1", "dir2", "dir3"), True) start_watching() touch(p("dir1", "dir2", "dir3", "a")) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3", "a") assert isinstance(event, FileCreatedEvent) if not platform.is_windows(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3") assert isinstance(event, DirModifiedEvent) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3", "a") assert isinstance(event, FileOpenedEvent) if not platform.is_bsd(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2", "dir3", "a") assert isinstance(event, FileModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) def test_recursive_off( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: mkdir(p("dir1")) start_watching(recursive=False) touch(p("dir1", "a")) with pytest.raises(Empty): event_queue.get(timeout=5) mkfile(p("b")) expect_event(FileCreatedEvent(p("b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) if platform.is_linux(): expect_event(FileOpenedEvent(p("b"))) expect_event(FileClosedEvent(p("b"))) # currently limiting these additional events to macOS only, see https://github.com/gorakhargosh/watchdog/pull/779 if platform.is_darwin(): mkdir(p("dir1", "dir2")) with pytest.raises(Empty): event_queue.get(timeout=5) mkfile(p("dir1", "dir2", "somefile")) with pytest.raises(Empty): event_queue.get(timeout=5) mkdir(p("dir3")) expect_event( DirModifiedEvent(p()) ) # the contents of the parent directory changed mv(p("dir1", "dir2", "somefile"), p("somefile")) expect_event(FileMovedEvent(p("dir1", "dir2", "somefile"), p("somefile"))) expect_event(DirModifiedEvent(p())) mv(p("dir1", "dir2"), p("dir2")) expect_event(DirMovedEvent(p("dir1", "dir2"), p("dir2"))) expect_event(DirModifiedEvent(p())) @pytest.mark.skipif( platform.is_windows(), reason="Windows create another set of events for this test" ) def test_renaming_top_level_directory( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: start_watching() mkdir(p("a")) expect_event(DirCreatedEvent(p("a"))) expect_event(DirModifiedEvent(p())) mkdir(p("a", "b")) expect_event(DirCreatedEvent(p("a", "b"))) expect_event(DirModifiedEvent(p("a"))) mv(p("a"), p("a2")) expect_event(DirMovedEvent(p("a"), p("a2"))) expect_event(DirModifiedEvent(p())) expect_event(DirModifiedEvent(p())) expect_event(DirMovedEvent(p("a", "b"), p("a2", "b"))) if platform.is_bsd(): expect_event(DirModifiedEvent(p())) open(p("a2", "b", "c"), "a").close() # DirModifiedEvent may emitted, but sometimes after waiting time is out. events = [] while True: events.append(event_queue.get(timeout=5)[0]) if event_queue.empty(): break assert all( [ isinstance( e, ( FileCreatedEvent, FileMovedEvent, FileOpenedEvent, DirModifiedEvent, FileClosedEvent, ), ) for e in events ] ) for event in events: if isinstance(event, FileCreatedEvent): assert event.src_path == p("a2", "b", "c") elif isinstance(event, FileMovedEvent): assert event.dest_path == p("a2", "b", "c") assert event.src_path == p("a", "b", "c") elif isinstance(event, DirModifiedEvent): assert event.src_path == p("a2", "b") @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( not platform.is_windows(), reason="Non-Windows create another set of events for this test", ) def test_renaming_top_level_directory_on_windows( p: P, event_queue: TestEventQueue, start_watching: StartWatching, ) -> None: start_watching() mkdir(p("a")) event = event_queue.get(timeout=5)[0] assert isinstance(event, DirCreatedEvent) assert event.src_path == p("a") mkdir(p("a", "b")) event = event_queue.get(timeout=5)[0] assert isinstance(event, DirCreatedEvent) assert event.src_path == p("a", "b") event = event_queue.get(timeout=5)[0] assert isinstance(event, DirCreatedEvent) assert event.src_path == p("a", "b") event = event_queue.get(timeout=5)[0] assert isinstance(event, DirModifiedEvent) assert event.src_path == p("a") mv(p("a"), p("a2")) event = event_queue.get(timeout=5)[0] assert isinstance(event, DirMovedEvent) assert event.src_path == p("a", "b") open(p("a2", "b", "c"), "a").close() events = [] while True: events.append(event_queue.get(timeout=5)[0]) if event_queue.empty(): break assert all( [ isinstance( e, (FileCreatedEvent, FileMovedEvent, DirMovedEvent, DirModifiedEvent) ) for e in events ] ) for event in events: if isinstance(event, FileCreatedEvent): assert event.src_path == p("a2", "b", "c") elif isinstance(event, FileMovedEvent): assert event.dest_path == p("a2", "b", "c") assert event.src_path == p("a", "b", "c") elif isinstance(event, DirMovedEvent): assert event.dest_path == p("a2") assert event.src_path == p("a") elif isinstance(event, DirModifiedEvent): assert event.src_path == p("a2", "b") @pytest.mark.skipif( platform.is_windows(), reason="Windows create another set of events for this test" ) def test_move_nested_subdirectories( p: P, event_queue: TestEventQueue, start_watching: StartWatching, expect_event: ExpectEvent, ) -> None: mkdir(p("dir1/dir2/dir3"), parents=True) mkfile(p("dir1/dir2/dir3", "a")) start_watching() mv(p("dir1/dir2"), p("dir2")) expect_event(DirMovedEvent(p("dir1", "dir2"), p("dir2"))) expect_event(DirModifiedEvent(p("dir1"))) expect_event(DirModifiedEvent(p())) expect_event(DirMovedEvent(p("dir1", "dir2", "dir3"), p("dir2", "dir3"))) expect_event(FileMovedEvent(p("dir1", "dir2", "dir3", "a"), p("dir2", "dir3", "a"))) if platform.is_bsd(): event = event_queue.get(timeout=5)[0] assert p(event.src_path) == p() assert isinstance(event, DirModifiedEvent) event = event_queue.get(timeout=5)[0] assert p(event.src_path) == p("dir1") assert isinstance(event, DirModifiedEvent) touch(p("dir2/dir3", "a")) if platform.is_linux(): event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2/dir3", "a") assert isinstance(event, FileOpenedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2/dir3", "a") assert isinstance(event, FileModifiedEvent) @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( not platform.is_windows(), reason="Non-Windows create another set of events for this test", ) def test_move_nested_subdirectories_on_windows( p: P, event_queue: TestEventQueue, start_watching: StartWatching, ) -> None: mkdir(p("dir1/dir2/dir3"), parents=True) mkfile(p("dir1/dir2/dir3", "a")) start_watching(p("")) mv(p("dir1/dir2"), p("dir2")) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir1", "dir2") assert isinstance(event, FileDeletedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2") assert isinstance(event, DirCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "dir3") assert isinstance(event, DirCreatedEvent) event = event_queue.get(timeout=5)[0] assert event.src_path == p("dir2", "dir3", "a") assert isinstance(event, FileCreatedEvent) touch(p("dir2/dir3", "a")) events = [] while True: events.append(event_queue.get(timeout=5)[0]) if event_queue.empty(): break assert all([isinstance(e, (FileModifiedEvent, DirModifiedEvent)) for e in events]) for event in events: if isinstance(event, FileModifiedEvent): assert event.src_path == p("dir2", "dir3", "a") elif isinstance(event, DirModifiedEvent): assert event.src_path in [p("dir2"), p("dir2", "dir3")] @pytest.mark.flaky(max_runs=5, min_passes=1, rerun_filter=rerun_filter) @pytest.mark.skipif( platform.is_bsd(), reason="BSD create another set of events for this test" ) def test_file_lifecyle(p: P, start_watching: StartWatching, expect_event: ExpectEvent) -> None: start_watching() mkfile(p("a")) touch(p("a")) mv(p("a"), p("b")) rm(p("b")) expect_event(FileCreatedEvent(p("a"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) if platform.is_linux(): expect_event(FileOpenedEvent(p("a"))) expect_event(FileClosedEvent(p("a"))) expect_event(DirModifiedEvent(p())) expect_event(FileOpenedEvent(p("a"))) expect_event(FileModifiedEvent(p("a"))) if platform.is_linux(): expect_event(FileClosedEvent(p("a"))) expect_event(DirModifiedEvent(p())) expect_event(FileMovedEvent(p("a"), p("b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) expect_event(DirModifiedEvent(p())) expect_event(FileDeletedEvent(p("b"))) if not platform.is_windows(): expect_event(DirModifiedEvent(p())) watchdog-3.0.0/tests/test_events.py000066400000000000000000000122011440602103100173540ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CLOSED, EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, EVENT_TYPE_OPENED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileClosedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, FileOpenedEvent, FileSystemEventHandler, ) path_1 = "/path/xyz" path_2 = "/path/abc" def test_file_deleted_event(): event = FileDeletedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_DELETED == event.event_type assert not event.is_directory assert not event.is_synthetic def test_file_delete_event_is_directory(): # Inherited properties. event = FileDeletedEvent(path_1) assert not event.is_directory assert not event.is_synthetic def test_file_modified_event(): event = FileModifiedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_MODIFIED == event.event_type assert not event.is_directory assert not event.is_synthetic def test_file_modified_event_is_directory(): # Inherited Properties event = FileModifiedEvent(path_1) assert not event.is_directory assert not event.is_synthetic def test_file_created_event(): event = FileCreatedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_CREATED == event.event_type assert not event.is_directory assert not event.is_synthetic def test_file_moved_event(): event = FileMovedEvent(path_1, path_2) assert path_1 == event.src_path assert path_2 == event.dest_path assert EVENT_TYPE_MOVED == event.event_type assert not event.is_directory assert not event.is_synthetic def test_file_closed_event(): event = FileClosedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_CLOSED == event.event_type assert not event.is_directory assert not event.is_synthetic def test_file_opened_event(): event = FileOpenedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_OPENED == event.event_type assert not event.is_directory assert not event.is_synthetic def test_dir_deleted_event(): event = DirDeletedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_DELETED == event.event_type assert event.is_directory assert not event.is_synthetic def test_dir_modified_event(): event = DirModifiedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_MODIFIED == event.event_type assert event.is_directory assert not event.is_synthetic def test_dir_created_event(): event = DirCreatedEvent(path_1) assert path_1 == event.src_path assert EVENT_TYPE_CREATED == event.event_type assert event.is_directory assert not event.is_synthetic def test_file_system_event_handler_dispatch(): dir_del_event = DirDeletedEvent("/path/blah.py") file_del_event = FileDeletedEvent("/path/blah.txt") dir_cre_event = DirCreatedEvent("/path/blah.py") file_cre_event = FileCreatedEvent("/path/blah.txt") file_cls_event = FileClosedEvent("/path/blah.txt") file_opened_event = FileOpenedEvent("/path/blah.txt") dir_mod_event = DirModifiedEvent("/path/blah.py") file_mod_event = FileModifiedEvent("/path/blah.txt") dir_mov_event = DirMovedEvent("/path/blah.py", "/path/blah") file_mov_event = FileMovedEvent("/path/blah.txt", "/path/blah") all_events = [ dir_mod_event, dir_del_event, dir_cre_event, dir_mov_event, file_mod_event, file_del_event, file_cre_event, file_mov_event, file_cls_event, file_opened_event, ] class TestableEventHandler(FileSystemEventHandler): def on_any_event(self, event): pass def on_modified(self, event): assert event.event_type == EVENT_TYPE_MODIFIED def on_deleted(self, event): assert event.event_type == EVENT_TYPE_DELETED def on_moved(self, event): assert event.event_type == EVENT_TYPE_MOVED def on_created(self, event): assert event.event_type == EVENT_TYPE_CREATED def on_closed(self, event): assert event.event_type == EVENT_TYPE_CLOSED def on_opened(self, event): assert event.event_type == EVENT_TYPE_OPENED handler = TestableEventHandler() for event in all_events: assert not event.is_synthetic handler.dispatch(event) watchdog-3.0.0/tests/test_fsevents.py000066400000000000000000000244651440602103100177240ustar00rootroot00000000000000from __future__ import annotations import pytest from watchdog.utils import platform if not platform.is_darwin(): # noqa pytest.skip("macOS only.", allow_module_level=True) import logging import os import time from os import mkdir, rmdir from random import random from threading import Thread from time import sleep from unittest.mock import patch import _watchdog_fsevents as _fsevents # type: ignore[import] from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer from watchdog.observers.api import BaseObserver, ObservedWatch from watchdog.observers.fsevents import FSEventsEmitter from .shell import touch from .utils import P, StartWatching, TestEventQueue logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) @pytest.fixture def observer(): obs = Observer() obs.start() yield obs obs.stop() try: obs.join() except RuntimeError: pass @pytest.mark.parametrize( "event,expectation", [ # invalid flags (_fsevents.NativeEvent("", 0, 0, 0), False), # renamed (_fsevents.NativeEvent("", 0, 0x00000800, 0), False), # renamed, removed (_fsevents.NativeEvent("", 0, 0x00000800 | 0x00000200, 0), True), # renamed, removed, created (_fsevents.NativeEvent("", 0, 0x00000800 | 0x00000200 | 0x00000100, 0), True), # renamed, removed, created, itemfindermod ( _fsevents.NativeEvent( "", 0, 0x00000800 | 0x00000200 | 0x00000100 | 0x00002000, 0 ), True, ), # xattr, removed, modified, itemfindermod ( _fsevents.NativeEvent( "", 0, 0x00008000 | 0x00000200 | 0x00001000 | 0x00002000, 0 ), False, ), ], ) def test_coalesced_event_check(event, expectation): assert event.is_coalesced == expectation def test_add_watch_twice(observer: BaseObserver, p: P) -> None: """Adding the same watch twice used to result in a null pointer return without an exception. See https://github.com/gorakhargosh/watchdog/issues/765 """ a = p("a") mkdir(a) h = FileSystemEventHandler() w = ObservedWatch(a, recursive=False) def callback(path, inodes, flags, ids): pass _fsevents.add_watch(h, w, callback, [w.path]) with pytest.raises(RuntimeError): _fsevents.add_watch(h, w, callback, [w.path]) _fsevents.remove_watch(w) rmdir(a) def test_watcher_deletion_while_receiving_events_1( caplog: pytest.LogCaptureFixture, p: P, start_watching: StartWatching, ) -> None: """ When the watcher is stopped while there are events, such exception could happen: Traceback (most recent call last): File "observers/fsevents.py", line 327, in events_callback self.queue_events(self.timeout, events) File "observers/fsevents.py", line 187, in queue_events src_path = self._encode_path(event.path) File "observers/fsevents.py", line 352, in _encode_path if isinstance(self.watch.path, bytes): AttributeError: 'NoneType' object has no attribute 'path' """ tmpdir = p() orig = FSEventsEmitter.events_callback def cb(*args): FSEventsEmitter.stop(emitter) orig(*args) with caplog.at_level(logging.ERROR), patch.object( FSEventsEmitter, "events_callback", new=cb ): emitter = start_watching(tmpdir) # Less than 100 is not enough events to trigger the error for n in range(100): touch(p("{}.txt".format(n))) emitter.stop() assert not caplog.records def test_watcher_deletion_while_receiving_events_2( caplog: pytest.LogCaptureFixture, p: P, start_watching: StartWatching, ) -> None: """Note: that test takes about 20 seconds to complete. Quite similar test to prevent another issue when the watcher is stopped while there are events, such exception could happen: Traceback (most recent call last): File "observers/fsevents.py", line 327, in events_callback self.queue_events(self.timeout, events) File "observers/fsevents.py", line 235, in queue_events self._queue_created_event(event, src_path, src_dirname) File "observers/fsevents.py", line 132, in _queue_created_event self.queue_event(cls(src_path)) File "observers/fsevents.py", line 104, in queue_event if self._watch.is_recursive: AttributeError: 'NoneType' object has no attribute 'is_recursive' """ def try_to_fail(): tmpdir = p() emitter = start_watching(tmpdir) def create_files(): # Less than 2000 is not enough events to trigger the error for n in range(2000): touch(p(f"{n}.txt")) def stop(em): sleep(random()) em.stop() th1 = Thread(target=create_files) th2 = Thread(target=stop, args=(emitter,)) try: th1.start() th2.start() th1.join() th2.join() finally: emitter.stop() # 20 attempts to make the random failure happen with caplog.at_level(logging.ERROR): for _ in range(20): try_to_fail() sleep(random()) assert not caplog.records def test_remove_watch_twice(start_watching: StartWatching) -> None: """ ValueError: PyCapsule_GetPointer called with invalid PyCapsule object The above exception was the direct cause of the following exception: src/watchdog/utils/__init__.py:92: in stop self.on_thread_stop() src/watchdog/observers/fsevents.py:73: SystemError def on_thread_stop(self): > _fsevents.remove_watch(self.watch) E SystemError: returned a result with an error set (FSEvents.framework) FSEventStreamStop(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamInvalidate(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamRelease(): failed assertion 'streamRef != NULL' """ emitter = start_watching() # This one must work emitter.stop() # This is allowed to call several times .stop() emitter.stop() def test_unschedule_removed_folder(observer: BaseObserver, p: P) -> None: """ TypeError: PyCObject_AsVoidPtr called with null pointer The above exception was the direct cause of the following exception: def on_thread_stop(self): if self.watch: _fsevents.remove_watch(self.watch) E SystemError: returned a result with an error set (FSEvents.framework) FSEventStreamStop(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamInvalidate(): failed assertion 'streamRef != NULL' (FSEvents.framework) FSEventStreamRelease(): failed assertion 'streamRef != NULL' """ a = p("a") mkdir(a) w = observer.schedule(FileSystemEventHandler(), a, recursive=False) rmdir(a) time.sleep(0.1) observer.unschedule(w) def test_converting_cfstring_to_pyunicode(p: P, start_watching: StartWatching, event_queue: TestEventQueue) -> None: """See https://github.com/gorakhargosh/watchdog/issues/762""" tmpdir = p() emitter = start_watching(tmpdir) dirname = "TéstClass" try: mkdir(p(dirname)) event, _ = event_queue.get() assert event.src_path.endswith(dirname) finally: emitter.stop() def test_recursive_check_accepts_relative_paths(p: P) -> None: """See https://github.com/gorakhargosh/watchdog/issues/797 The test code provided in the defect observes the current working directory using ".". Since the watch path wasn't normalized then that failed. This test emulates the scenario. """ from watchdog.events import FileCreatedEvent, FileModifiedEvent, PatternMatchingEventHandler class TestEventHandler(PatternMatchingEventHandler): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # the TestEventHandler instance is set to ignore_directories, # as such we won't get a DirModifiedEvent(p()) here. self.expected_events = [ FileCreatedEvent(p("foo.json")), FileModifiedEvent(p("foo.json")), ] self.observed_events = set() def on_any_event(self, event): self.expected_events.remove(event) self.observed_events.add(event) def done(self): return not self.expected_events cwd = os.getcwd() os.chdir(p()) event_handler = TestEventHandler( patterns=["*.json"], ignore_patterns=[], ignore_directories=True ) observer = Observer() observer.schedule(event_handler, ".") observer.start() time.sleep(0.1) try: touch(p("foo.json")) timeout_at = time.time() + 5 while not event_handler.done() and time.time() < timeout_at: time.sleep(0.1) assert event_handler.done() finally: os.chdir(cwd) observer.stop() observer.join() def test_watchdog_recursive(p: P) -> None: """See https://github.com/gorakhargosh/watchdog/issues/706""" import os.path from watchdog.events import FileSystemEventHandler from watchdog.observers import Observer class Handler(FileSystemEventHandler): def __init__(self): super().__init__() self.changes = [] def on_any_event(self, event): self.changes.append(os.path.basename(event.src_path)) handler = Handler() observer = Observer() watches = [observer.schedule(handler, str(p("")), recursive=True)] try: observer.start() time.sleep(0.1) touch(p("my0.txt")) mkdir(p("dir_rec")) touch(p("dir_rec", "my1.txt")) expected = {"dir_rec", "my0.txt", "my1.txt"} timeout_at = time.time() + 5 while not expected.issubset(handler.changes) and time.time() < timeout_at: time.sleep(0.2) assert expected.issubset( handler.changes ), f"Did not find expected changes. Found: {handler.changes}" finally: for watch in watches: observer.unschedule(watch) observer.stop() observer.join(1) watchdog-3.0.0/tests/test_inotify_buffer.py000066400000000000000000000107661440602103100211000ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import pytest from watchdog.utils import platform if not platform.is_linux(): # noqa pytest.skip("GNU/Linux only.", allow_module_level=True) # noqa import os import random import time from watchdog.observers.inotify_buffer import InotifyBuffer from .shell import mkdir, mount_tmpfs, mv, rm, touch, unmount def wait_for_move_event(read_event): while True: event = read_event() if isinstance(event, tuple) or event.is_move: return event @pytest.mark.timeout(5) def test_move_from(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) inotify = InotifyBuffer(p("dir1").encode()) mv(p("dir1", "a"), p("dir2", "b")) event = wait_for_move_event(inotify.read_event) assert event.is_moved_from assert event.src_path == p("dir1", "a").encode() inotify.close() @pytest.mark.timeout(5) def test_move_to(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) inotify = InotifyBuffer(p("dir2").encode()) mv(p("dir1", "a"), p("dir2", "b")) event = wait_for_move_event(inotify.read_event) assert event.is_moved_to assert event.src_path == p("dir2", "b").encode() inotify.close() @pytest.mark.timeout(5) def test_move_internal(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) inotify = InotifyBuffer(p("").encode(), recursive=True) mv(p("dir1", "a"), p("dir2", "b")) frm, to = wait_for_move_event(inotify.read_event) assert frm.src_path == p("dir1", "a").encode() assert to.src_path == p("dir2", "b").encode() inotify.close() @pytest.mark.timeout(10) def test_move_internal_batch(p): n = 100 mkdir(p("dir1")) mkdir(p("dir2")) files = [str(i) for i in range(n)] for f in files: touch(p("dir1", f)) inotify = InotifyBuffer(p("").encode(), recursive=True) random.shuffle(files) for f in files: mv(p("dir1", f), p("dir2", f)) # Check that all n events are paired i = 0 while i < n: frm, to = wait_for_move_event(inotify.read_event) assert os.path.dirname(frm.src_path).endswith(b"/dir1") assert os.path.dirname(to.src_path).endswith(b"/dir2") assert frm.name == to.name i += 1 inotify.close() @pytest.mark.timeout(5) def test_delete_watched_directory(p): mkdir(p("dir")) inotify = InotifyBuffer(p("dir").encode()) rm(p("dir"), recursive=True) # Wait for the event to be picked up inotify.read_event() # Ensure InotifyBuffer shuts down cleanly without raising an exception inotify.close() @pytest.mark.timeout(5) def test_unmount_watched_directory_filesystem(p): mkdir(p("dir1")) mount_tmpfs(p("dir1")) mkdir(p("dir1/dir2")) inotify = InotifyBuffer(p("dir1/dir2").encode()) unmount(p("dir1")) # Wait for the event to be picked up inotify.read_event() # Ensure InotifyBuffer shuts down cleanly without raising an exception inotify.close() assert not inotify.is_alive() def delay_call(function, seconds): def delayed(*args, **kwargs): time.sleep(seconds) return function(*args, **kwargs) return delayed class InotifyBufferDelayedRead(InotifyBuffer): def run(self, *args, **kwargs): # Introduce a delay to trigger the race condition where the file descriptor is # closed prior to a read being triggered. Ignoring type concerns since we are # intentionally doing something odd. self._inotify.read_events = delay_call( # type: ignore[method-assign] function=self._inotify.read_events, seconds=1 ) return super().run(*args, **kwargs) @pytest.mark.parametrize( argnames="cls", argvalues=[InotifyBuffer, InotifyBufferDelayedRead] ) def test_close_should_terminate_thread(p, cls): inotify = cls(p("").encode(), recursive=True) assert inotify.is_alive() inotify.close() assert not inotify.is_alive() watchdog-3.0.0/tests/test_inotify_c.py000066400000000000000000000125621440602103100200450ustar00rootroot00000000000000from __future__ import annotations import pytest from watchdog.utils import platform if not platform.is_linux(): # noqa pytest.skip("GNU/Linux only.", allow_module_level=True) import ctypes import errno import logging import os import struct from unittest.mock import patch from watchdog.events import DirCreatedEvent, DirDeletedEvent, DirModifiedEvent from watchdog.observers.inotify_c import Inotify, InotifyConstants, InotifyEvent from .utils import Helper, P, StartWatching, TestEventQueue logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) def struct_inotify(wd, mask, cookie=0, length=0, name=b""): assert len(name) <= length struct_format = ( "=" # (native endianness, standard sizes) "i" # int wd "i" # uint32_t mask "i" # uint32_t cookie "i" # uint32_t len f"{length}s" # char[] name ) return struct.pack(struct_format, wd, mask, cookie, length, name) def test_late_double_deletion(helper: Helper, p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: inotify_fd = type("FD", (object,), {})() inotify_fd.last = 0 inotify_fd.wds = [] const = InotifyConstants() # CREATE DELETE CREATE DELETE DELETE_SELF IGNORE DELETE_SELF IGNORE inotify_fd.buf = ( struct_inotify( wd=1, mask=const.IN_CREATE | const.IN_ISDIR, length=16, name=b"subdir1" ) + struct_inotify( wd=1, mask=const.IN_DELETE | const.IN_ISDIR, length=16, name=b"subdir1" ) ) * 2 + ( struct_inotify(wd=2, mask=const.IN_DELETE_SELF) + struct_inotify(wd=2, mask=const.IN_IGNORED) + struct_inotify(wd=3, mask=const.IN_DELETE_SELF) + struct_inotify(wd=3, mask=const.IN_IGNORED) ) os_read_bkp = os.read def fakeread(fd, length): if fd is inotify_fd: result, fd.buf = fd.buf[:length], fd.buf[length:] return result return os_read_bkp(fd, length) os_close_bkp = os.close def fakeclose(fd): if fd is not inotify_fd: os_close_bkp(fd) def inotify_init(): return inotify_fd def inotify_add_watch(fd, path, mask): fd.last += 1 logger.debug(f"New wd = {fd.last}") fd.wds.append(fd.last) return fd.last def inotify_rm_watch(fd, wd): logger.debug(f"Removing wd = {wd}") fd.wds.remove(wd) return 0 # Mocks the API! from watchdog.observers import inotify_c mock1 = patch.object(os, "read", new=fakeread) mock2 = patch.object(os, "close", new=fakeclose) mock3 = patch.object(inotify_c, "inotify_init", new=inotify_init) mock4 = patch.object(inotify_c, "inotify_add_watch", new=inotify_add_watch) mock5 = patch.object(inotify_c, "inotify_rm_watch", new=inotify_rm_watch) with mock1, mock2, mock3, mock4, mock5: start_watching(p("")) # Watchdog Events for evt_cls in [DirCreatedEvent, DirDeletedEvent] * 2: event = event_queue.get(timeout=5)[0] assert isinstance(event, evt_cls) assert event.src_path == p("subdir1") event = event_queue.get(timeout=5)[0] assert isinstance(event, DirModifiedEvent) assert event.src_path == p("").rstrip(os.path.sep) helper.close() assert inotify_fd.last == 3 # Number of directories assert inotify_fd.buf == b"" # Didn't miss any event assert inotify_fd.wds == [2, 3] # Only 1 is removed explicitly @pytest.mark.parametrize( "error, patterns", [ (errno.ENOSPC, ("inotify watch limit reached",)), (errno.EMFILE, ("inotify instance limit reached",)), (errno.ENOENT, ("No such file or directory",)), # Error messages for -1 are undocumented # and vary between libc implementations. (-1, ("Unknown error -1", "No error information")), ], ) def test_raise_error(error, patterns): with patch.object(ctypes, "get_errno", new=lambda: error): with pytest.raises(OSError) as exc: Inotify._raise_error() assert exc.value.errno == error assert any(pattern in str(exc.value) for pattern in patterns) def test_non_ascii_path(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: """ Inotify can construct an event for a path containing non-ASCII. """ path = p("\N{SNOWMAN}") start_watching(p("")) os.mkdir(path) event, _ = event_queue.get(timeout=5) assert isinstance(event.src_path, type("")) assert event.src_path == path # Just make sure it doesn't raise an exception. assert repr(event) def test_watch_file(p: P, event_queue: TestEventQueue, start_watching: StartWatching) -> None: path = p("this_is_a_file") with open(path, "a"): pass start_watching(path) os.remove(path) event, _ = event_queue.get(timeout=5) assert repr(event) def test_event_equality(p: P) -> None: wd_parent_dir = 42 filename = "file.ext" full_path = p(filename) event1 = InotifyEvent( wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path ) event2 = InotifyEvent( wd_parent_dir, InotifyConstants.IN_CREATE, 0, filename, full_path ) event3 = InotifyEvent( wd_parent_dir, InotifyConstants.IN_ACCESS, 0, filename, full_path ) assert event1 == event2 assert event1 != event3 assert event2 != event3 watchdog-3.0.0/tests/test_logging_event_handler.py000066400000000000000000000046631440602103100224110ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, LoggingEventHandler, ) path_1 = "/path/xyz" path_2 = "/path/abc" class _TestableEventHandler(LoggingEventHandler): def on_any_event(self, event): assert True def on_modified(self, event): super().on_modified(event) assert event.event_type == EVENT_TYPE_MODIFIED def on_deleted(self, event): super().on_deleted(event) assert event.event_type == EVENT_TYPE_DELETED def on_moved(self, event): super().on_moved(event) assert event.event_type == EVENT_TYPE_MOVED def on_created(self, event): super().on_created(event) assert event.event_type == EVENT_TYPE_CREATED def test_logging_event_handler_dispatch(): # Utilities. dir_del_event = DirDeletedEvent("/path/blah.py") file_del_event = FileDeletedEvent("/path/blah.txt") dir_cre_event = DirCreatedEvent("/path/blah.py") file_cre_event = FileCreatedEvent("/path/blah.txt") dir_mod_event = DirModifiedEvent("/path/blah.py") file_mod_event = FileModifiedEvent("/path/blah.txt") dir_mov_event = DirMovedEvent("/path/blah.py", "/path/blah") file_mov_event = FileMovedEvent("/path/blah.txt", "/path/blah") all_events = [ dir_mod_event, dir_del_event, dir_cre_event, dir_mov_event, file_mod_event, file_del_event, file_cre_event, file_mov_event, ] handler = _TestableEventHandler() for event in all_events: handler.dispatch(event) watchdog-3.0.0/tests/test_observer.py000066400000000000000000000106301440602103100177030ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import contextlib import threading from typing import Iterator from unittest.mock import patch import pytest from watchdog.events import FileModifiedEvent, FileSystemEventHandler from watchdog.observers.api import BaseObserver, EventEmitter @pytest.fixture def observer() -> Iterator[BaseObserver]: obs = BaseObserver(EventEmitter) yield obs obs.stop() with contextlib.suppress(RuntimeError): obs.join() @pytest.fixture def observer2(): obs = BaseObserver(EventEmitter) yield obs obs.stop() with contextlib.suppress(RuntimeError): obs.join() def test_schedule_should_start_emitter_if_running(observer): observer.start() observer.schedule(None, "") (emitter,) = observer.emitters assert emitter.is_alive() def test_schedule_should_not_start_emitter_if_not_running(observer): observer.schedule(None, "") (emitter,) = observer.emitters assert not emitter.is_alive() def test_start_should_start_emitter(observer): observer.schedule(None, "") observer.start() (emitter,) = observer.emitters assert emitter.is_alive() def test_stop_should_stop_emitter(observer): observer.schedule(None, "") observer.start() (emitter,) = observer.emitters assert emitter.is_alive() observer.stop() observer.join() assert not observer.is_alive() assert not emitter.is_alive() def test_unschedule_self(observer): """ Tests that unscheduling a watch from within an event handler correctly correctly unregisters emitter and handler without deadlocking. """ class EventHandler(FileSystemEventHandler): def on_modified(self, event): observer.unschedule(watch) unschedule_finished.set() unschedule_finished = threading.Event() watch = observer.schedule(EventHandler(), "") observer.start() (emitter,) = observer.emitters emitter.queue_event(FileModifiedEvent("")) assert unschedule_finished.wait() assert len(observer.emitters) == 0 def test_schedule_after_unschedule_all(observer): observer.start() observer.schedule(None, "") assert len(observer.emitters) == 1 observer.unschedule_all() assert len(observer.emitters) == 0 observer.schedule(None, "") assert len(observer.emitters) == 1 def test_2_observers_on_the_same_path(observer, observer2): assert observer is not observer2 observer.schedule(None, "") assert len(observer.emitters) == 1 observer2.schedule(None, "") assert len(observer2.emitters) == 1 def test_start_failure_should_not_prevent_further_try(observer): observer.schedule(None, "") emitters = observer.emitters assert len(emitters) == 1 # Make the emitter to fail on start() def mocked_start(): raise OSError() emitter = next(iter(emitters)) with patch.object(emitter, "start", new=mocked_start): with pytest.raises(OSError): observer.start() # The emitter should be removed from the list assert len(observer.emitters) == 0 # Restoring the original behavior should work like there never be emitters observer.start() assert len(observer.emitters) == 0 # Re-scheduling the watch should work observer.schedule(None, "") assert len(observer.emitters) == 1 def test_schedule_failure_should_not_prevent_future_schedules(observer): observer.start() # Make the emitter fail on start(), and subsequently the observer to fail on schedule() def bad_start(_): raise OSError() with patch.object(EventEmitter, "start", new=bad_start), pytest.raises(OSError): observer.schedule(None, "") # The emitter should not be in the list assert not observer.emitters # Re-scheduling the watch should work observer.schedule(None, "") assert len(observer.emitters) == 1 watchdog-3.0.0/tests/test_observers_api.py000066400000000000000000000064111440602103100207210ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import time from pathlib import Path import pytest from watchdog.events import FileModifiedEvent, LoggingEventHandler from watchdog.observers.api import BaseObserver, EventDispatcher, EventEmitter, EventQueue, ObservedWatch def test_observer_constructor(): ObservedWatch(Path("/foobar"), True) def test_observer__eq__(): watch1 = ObservedWatch("/foobar", True) watch2 = ObservedWatch("/foobar", True) watch_ne1 = ObservedWatch("/foo", True) watch_ne2 = ObservedWatch("/foobar", False) assert watch1 == watch2 assert watch1.__eq__(watch2) assert not watch1.__eq__(watch_ne1) assert not watch1.__eq__(watch_ne2) def test_observer__ne__(): watch1 = ObservedWatch("/foobar", True) watch2 = ObservedWatch("/foobar", True) watch_ne1 = ObservedWatch("/foo", True) watch_ne2 = ObservedWatch("/foobar", False) assert not watch1.__ne__(watch2) assert watch1.__ne__(watch_ne1) assert watch1.__ne__(watch_ne2) def test_observer__repr__(): observed_watch = ObservedWatch("/foobar", True) repr_str = "" assert observed_watch.__repr__() == repr(observed_watch) assert repr(observed_watch) == repr_str def test_event_emitter(): event_queue = EventQueue() watch = ObservedWatch("/foobar", True) event_emitter = EventEmitter(event_queue, watch, timeout=1) event_emitter.queue_event(FileModifiedEvent("/foobar/blah")) def test_event_dispatcher(): event = FileModifiedEvent("/foobar") watch = ObservedWatch("/path", True) class TestableEventDispatcher(EventDispatcher): def dispatch_event(self, event, watch): assert True event_dispatcher = TestableEventDispatcher() event_dispatcher.event_queue.put((event, watch)) event_dispatcher.start() time.sleep(1) event_dispatcher.stop() event_dispatcher.join() def test_observer_basic(): observer = BaseObserver(EventEmitter) handler = LoggingEventHandler() watch = observer.schedule(handler, "/foobar", True) observer.add_handler_for_watch(handler, watch) observer.add_handler_for_watch(handler, watch) observer.remove_handler_for_watch(handler, watch) with pytest.raises(KeyError): observer.remove_handler_for_watch(handler, watch) observer.unschedule(watch) with pytest.raises(KeyError): observer.unschedule(watch) watch = observer.schedule(handler, "/foobar", True) observer.event_queue.put((FileModifiedEvent("/foobar"), watch)) observer.start() time.sleep(1) observer.unschedule_all() observer.stop() observer.join() watchdog-3.0.0/tests/test_observers_polling.py000066400000000000000000000102171440602103100216130ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os from queue import Empty, Queue from time import sleep import pytest from watchdog.events import ( DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, ) from watchdog.observers.api import ObservedWatch from watchdog.observers.polling import PollingEmitter as Emitter from .shell import mkdir, mkdtemp, msize, mv, rm, touch temp_dir = mkdtemp() def p(*args): """ Convenience function to join the temporary directory path with the provided arguments. """ return os.path.join(temp_dir, *args) @pytest.fixture def event_queue(): yield Queue() @pytest.fixture def emitter(event_queue): watch = ObservedWatch(temp_dir, True) em = Emitter(event_queue, watch, timeout=0.2) em.start() yield em em.stop() em.join(5) def test___init__(event_queue, emitter): SLEEP_TIME = 0.4 sleep(SLEEP_TIME) mkdir(p("project")) sleep(SLEEP_TIME) mkdir(p("project", "blah")) sleep(SLEEP_TIME) touch(p("afile")) sleep(SLEEP_TIME) touch(p("fromfile")) sleep(SLEEP_TIME) mv(p("fromfile"), p("project", "tofile")) sleep(SLEEP_TIME) touch(p("afile")) sleep(SLEEP_TIME) mv(p("project", "blah"), p("project", "boo")) sleep(SLEEP_TIME) rm(p("project"), recursive=True) sleep(SLEEP_TIME) rm(p("afile")) sleep(SLEEP_TIME) msize(p("bfile")) sleep(SLEEP_TIME) rm(p("bfile")) sleep(SLEEP_TIME) emitter.stop() # What we need here for the tests to pass is a collection type # that is: # * unordered # * non-unique # A multiset! Python's collections.Counter class seems appropriate. expected = { DirModifiedEvent(p()), DirCreatedEvent(p("project")), DirModifiedEvent(p("project")), DirCreatedEvent(p("project", "blah")), FileCreatedEvent(p("afile")), DirModifiedEvent(p()), FileCreatedEvent(p("fromfile")), DirModifiedEvent(p()), DirModifiedEvent(p()), FileModifiedEvent(p("afile")), DirModifiedEvent(p("project")), DirModifiedEvent(p()), FileDeletedEvent(p("project", "tofile")), DirDeletedEvent(p("project", "boo")), DirDeletedEvent(p("project")), DirModifiedEvent(p()), FileDeletedEvent(p("afile")), DirModifiedEvent(p()), FileCreatedEvent(p("bfile")), FileModifiedEvent(p("bfile")), DirModifiedEvent(p()), FileDeletedEvent(p("bfile")), } expected.add(FileMovedEvent(p("fromfile"), p("project", "tofile"))) expected.add(DirMovedEvent(p("project", "blah"), p("project", "boo"))) got = set() while True: try: event, _ = event_queue.get_nowait() got.add(event) except Empty: break assert expected == got def test_delete_watched_dir(event_queue, emitter): SLEEP_TIME = 0.4 rm(p(""), recursive=True) sleep(SLEEP_TIME) emitter.stop() # What we need here for the tests to pass is a collection type # that is: # * unordered # * non-unique # A multiset! Python's collections.Counter class seems appropriate. expected = { DirDeletedEvent(os.path.dirname(p(""))), } got = set() while True: try: event, _ = event_queue.get_nowait() got.add(event) except Empty: break assert expected == got watchdog-3.0.0/tests/test_observers_winapi.py000066400000000000000000000105551440602103100214430ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os import os.path import sys from queue import Empty, Queue from time import sleep import pytest from watchdog.events import DirCreatedEvent, DirMovedEvent from watchdog.observers.api import ObservedWatch from .shell import mkdir, mkdtemp, mv, rm # make pytest aware this is windows only if not sys.platform.startswith("win"): pytest.skip("Windows only.", allow_module_level=True) # make mypy aware this is windows only and provide a clear runtime error just in case assert sys.platform.startswith("win"), f"{__name__} requires Windows" from watchdog.observers.read_directory_changes import WindowsApiEmitter # noqa: E402 SLEEP_TIME = 2 # Path with non-ASCII temp_dir = os.path.join(mkdtemp(), "Strange \N{SNOWMAN}") os.makedirs(temp_dir) def p(*args): """ Convenience function to join the temporary directory path with the provided arguments. """ return os.path.join(temp_dir, *args) @pytest.fixture def event_queue(): yield Queue() @pytest.fixture def emitter(event_queue): watch = ObservedWatch(temp_dir, True) em = WindowsApiEmitter(event_queue, watch, timeout=0.2) yield em em.stop() def test___init__(event_queue, emitter): emitter.start() sleep(SLEEP_TIME) mkdir(p("fromdir")) sleep(SLEEP_TIME) mv(p("fromdir"), p("todir")) sleep(SLEEP_TIME) emitter.stop() # What we need here for the tests to pass is a collection type # that is: # * unordered # * non-unique # A multiset! Python's collections.Counter class seems appropriate. expected = { DirCreatedEvent(p("fromdir")), DirMovedEvent(p("fromdir"), p("todir")), } got = set() while True: try: event, _ = event_queue.get_nowait() except Empty: break else: got.add(event) assert expected == got def test_root_deleted(event_queue, emitter): r"""Test the event got when removing the watched folder. The regression to prevent is: Exception in thread Thread-1: Traceback (most recent call last): File "watchdog\observers\winapi.py", line 333, in read_directory_changes ctypes.byref(nbytes), None, None) File "watchdog\observers\winapi.py", line 105, in _errcheck_bool raise ctypes.WinError() PermissionError: [WinError 5] Access refused. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "C:\Python37-32\lib\threading.py", line 926, in _bootstrap_inner self.run() File "watchdog\observers\api.py", line 145, in run self.queue_events(self.timeout) File "watchdog\observers\read_directory_changes.py", line 76, in queue_events winapi_events = self._read_events() File "watchdog\observers\read_directory_changes.py", line 73, in _read_events return read_events(self._handle, self.watch.path, self.watch.is_recursive) File "watchdog\observers\winapi.py", line 387, in read_events buf, nbytes = read_directory_changes(handle, path, recursive) File "watchdog\observers\winapi.py", line 340, in read_directory_changes return _generate_observed_path_deleted_event() File "watchdog\observers\winapi.py", line 298, in _generate_observed_path_deleted_event event = FILE_NOTIFY_INFORMATION(0, FILE_ACTION_DELETED_SELF, len(path), path.value) TypeError: expected bytes, str found """ emitter.start() sleep(SLEEP_TIME) # This should not fail rm(p(), recursive=True) sleep(SLEEP_TIME) # The emitter is automatically stopped, with no error assert not emitter.should_keep_running() watchdog-3.0.0/tests/test_pattern_matching_event_handler.py000066400000000000000000000145171440602103100243110ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, PatternMatchingEventHandler, ) from watchdog.utils.patterns import filter_paths path_1 = "/path/xyz" path_2 = "/path/abc" g_allowed_patterns = ["*.py", "*.txt"] g_ignore_patterns = ["*.foo"] def assert_patterns(event): if hasattr(event, "dest_path"): paths = [event.src_path, event.dest_path] else: paths = [event.src_path] filtered_paths = filter_paths( paths, included_patterns=["*.py", "*.txt"], excluded_patterns=["*.pyc"], case_sensitive=False, ) assert filtered_paths def test_dispatch(): # Utilities. patterns = ["*.py", "*.txt"] ignore_patterns = ["*.pyc"] dir_del_event_match = DirDeletedEvent("/path/blah.py") dir_del_event_not_match = DirDeletedEvent("/path/foobar") dir_del_event_ignored = DirDeletedEvent("/path/foobar.pyc") file_del_event_match = FileDeletedEvent("/path/blah.txt") file_del_event_not_match = FileDeletedEvent("/path/foobar") file_del_event_ignored = FileDeletedEvent("/path/blah.pyc") dir_cre_event_match = DirCreatedEvent("/path/blah.py") dir_cre_event_not_match = DirCreatedEvent("/path/foobar") dir_cre_event_ignored = DirCreatedEvent("/path/foobar.pyc") file_cre_event_match = FileCreatedEvent("/path/blah.txt") file_cre_event_not_match = FileCreatedEvent("/path/foobar") file_cre_event_ignored = FileCreatedEvent("/path/blah.pyc") dir_mod_event_match = DirModifiedEvent("/path/blah.py") dir_mod_event_not_match = DirModifiedEvent("/path/foobar") dir_mod_event_ignored = DirModifiedEvent("/path/foobar.pyc") file_mod_event_match = FileModifiedEvent("/path/blah.txt") file_mod_event_not_match = FileModifiedEvent("/path/foobar") file_mod_event_ignored = FileModifiedEvent("/path/blah.pyc") dir_mov_event_match = DirMovedEvent("/path/blah.py", "/path/blah") dir_mov_event_not_match = DirMovedEvent("/path/foobar", "/path/blah") dir_mov_event_ignored = DirMovedEvent("/path/foobar.pyc", "/path/blah") file_mov_event_match = FileMovedEvent("/path/blah.txt", "/path/blah") file_mov_event_not_match = FileMovedEvent("/path/foobar", "/path/blah") file_mov_event_ignored = FileMovedEvent("/path/blah.pyc", "/path/blah") all_dir_events = [ dir_mod_event_match, dir_mod_event_not_match, dir_mod_event_ignored, dir_del_event_match, dir_del_event_not_match, dir_del_event_ignored, dir_cre_event_match, dir_cre_event_not_match, dir_cre_event_ignored, dir_mov_event_match, dir_mov_event_not_match, dir_mov_event_ignored, ] all_file_events = [ file_mod_event_match, file_mod_event_not_match, file_mod_event_ignored, file_del_event_match, file_del_event_not_match, file_del_event_ignored, file_cre_event_match, file_cre_event_not_match, file_cre_event_ignored, file_mov_event_match, file_mov_event_not_match, file_mov_event_ignored, ] all_events = all_file_events + all_dir_events def assert_check_directory(handler, event): assert not (handler.ignore_directories and event.is_directory) class TestableEventHandler(PatternMatchingEventHandler): def on_any_event(self, event): assert_check_directory(self, event) def on_modified(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MODIFIED assert_patterns(event) def on_deleted(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_DELETED assert_patterns(event) def on_moved(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MOVED assert_patterns(event) def on_created(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_CREATED assert_patterns(event) no_dirs_handler = TestableEventHandler( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=True ) handler = TestableEventHandler( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=False ) for event in all_events: no_dirs_handler.dispatch(event) for event in all_events: handler.dispatch(event) def test_handler(): handler1 = PatternMatchingEventHandler(g_allowed_patterns, g_ignore_patterns, True) handler2 = PatternMatchingEventHandler(g_allowed_patterns, g_ignore_patterns, False) assert handler1.patterns == g_allowed_patterns assert handler1.ignore_patterns == g_ignore_patterns assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_directories(): handler1 = PatternMatchingEventHandler(g_allowed_patterns, g_ignore_patterns, True) handler2 = PatternMatchingEventHandler(g_allowed_patterns, g_ignore_patterns, False) assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_patterns(): handler1 = PatternMatchingEventHandler(g_allowed_patterns, g_ignore_patterns, True) assert handler1.ignore_patterns == g_ignore_patterns def test_patterns(): handler1 = PatternMatchingEventHandler(g_allowed_patterns, g_ignore_patterns, True) assert handler1.patterns == g_allowed_patterns watchdog-3.0.0/tests/test_patterns.py000066400000000000000000000047461440602103100177270ustar00rootroot00000000000000# Copyright (C) 2010 Yesudeep Mangalapilly # Copyright 2020 Boris Staletic from __future__ import annotations import pytest from watchdog.utils.patterns import _match_path, filter_paths, match_any_paths @pytest.mark.parametrize( "input, included_patterns, excluded_patterns, case_sensitive, expected", [ ("/users/gorakhargosh/foobar.py", {"*.py"}, {"*.PY"}, True, True), ("/users/gorakhargosh/foobar.py", {"*.py"}, {"*.PY"}, True, True), ("/users/gorakhargosh/", {"*.py"}, {"*.txt"}, False, False), ("/users/gorakhargosh/foobar.py", {"*.py"}, {"*.PY"}, False, ValueError), ], ) def test_match_path( input, included_patterns, excluded_patterns, case_sensitive, expected ): if expected == ValueError: with pytest.raises(expected): _match_path(input, included_patterns, excluded_patterns, case_sensitive) else: assert ( _match_path(input, included_patterns, excluded_patterns, case_sensitive) is expected ) @pytest.mark.parametrize( "included_patterns, excluded_patterns, case_sensitive, expected", [ (None, None, True, None), (None, None, False, None), ( ["*.py", "*.conf"], ["*.status"], True, {"/users/gorakhargosh/foobar.py", "/etc/pdnsd.conf"}, ), ], ) def test_filter_paths(included_patterns, excluded_patterns, case_sensitive, expected): pathnames = { "/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python", } actual = set( filter_paths(pathnames, included_patterns, excluded_patterns, case_sensitive) ) assert actual == expected if expected else pathnames @pytest.mark.parametrize( "included_patterns, excluded_patterns, case_sensitive, expected", [ (None, None, True, True), (None, None, False, True), (["*py", "*.conf"], ["*.status"], True, True), (["*.txt"], None, False, False), (["*.txt"], None, True, False), ], ) def test_match_any_paths( included_patterns, excluded_patterns, case_sensitive, expected ): pathnames = { "/users/gorakhargosh/foobar.py", "/var/cache/pdnsd.status", "/etc/pdnsd.conf", "/usr/local/bin/python", } assert ( match_any_paths(pathnames, included_patterns, excluded_patterns, case_sensitive) == expected ) watchdog-3.0.0/tests/test_regex_matching_event_handler.py000066400000000000000000000200751440602103100237420ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from watchdog.events import ( EVENT_TYPE_CREATED, EVENT_TYPE_DELETED, EVENT_TYPE_MODIFIED, EVENT_TYPE_MOVED, DirCreatedEvent, DirDeletedEvent, DirModifiedEvent, DirMovedEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent, FileMovedEvent, LoggingEventHandler, RegexMatchingEventHandler, ) path_1 = "/path/xyz" path_2 = "/path/abc" g_allowed_regexes = [r".*\.py", r".*\.txt"] g_allowed_str_regexes = r".*\.py" g_ignore_regexes = [r".*\.pyc"] def test_dispatch(): # Utilities. regexes = [r".*\.py", r".*\.txt"] ignore_regexes = [r".*\.pyc"] def assert_regexes(handler, event): if hasattr(event, "dest_path"): paths = [event.src_path, event.dest_path] else: paths = [event.src_path] filtered_paths = set() for p in paths: if any(r.match(p) for r in handler.regexes): filtered_paths.add(p) assert filtered_paths dir_del_event_match = DirDeletedEvent("/path/blah.py") dir_del_event_not_match = DirDeletedEvent("/path/foobar") dir_del_event_ignored = DirDeletedEvent("/path/foobar.pyc") file_del_event_match = FileDeletedEvent("/path/blah.txt") file_del_event_not_match = FileDeletedEvent("/path/foobar") file_del_event_ignored = FileDeletedEvent("/path/blah.pyc") dir_cre_event_match = DirCreatedEvent("/path/blah.py") dir_cre_event_not_match = DirCreatedEvent("/path/foobar") dir_cre_event_ignored = DirCreatedEvent("/path/foobar.pyc") file_cre_event_match = FileCreatedEvent("/path/blah.txt") file_cre_event_not_match = FileCreatedEvent("/path/foobar") file_cre_event_ignored = FileCreatedEvent("/path/blah.pyc") dir_mod_event_match = DirModifiedEvent("/path/blah.py") dir_mod_event_not_match = DirModifiedEvent("/path/foobar") dir_mod_event_ignored = DirModifiedEvent("/path/foobar.pyc") file_mod_event_match = FileModifiedEvent("/path/blah.txt") file_mod_event_not_match = FileModifiedEvent("/path/foobar") file_mod_event_ignored = FileModifiedEvent("/path/blah.pyc") dir_mov_event_match = DirMovedEvent("/path/blah.py", "/path/blah") dir_mov_event_not_match = DirMovedEvent("/path/foobar", "/path/blah") dir_mov_event_ignored = DirMovedEvent("/path/foobar.pyc", "/path/blah") file_mov_event_match = FileMovedEvent("/path/blah.txt", "/path/blah") file_mov_event_not_match = FileMovedEvent("/path/foobar", "/path/blah") file_mov_event_ignored = FileMovedEvent("/path/blah.pyc", "/path/blah") all_dir_events = [ dir_mod_event_match, dir_mod_event_not_match, dir_mod_event_ignored, dir_del_event_match, dir_del_event_not_match, dir_del_event_ignored, dir_cre_event_match, dir_cre_event_not_match, dir_cre_event_ignored, dir_mov_event_match, dir_mov_event_not_match, dir_mov_event_ignored, ] all_file_events = [ file_mod_event_match, file_mod_event_not_match, file_mod_event_ignored, file_del_event_match, file_del_event_not_match, file_del_event_ignored, file_cre_event_match, file_cre_event_not_match, file_cre_event_ignored, file_mov_event_match, file_mov_event_not_match, file_mov_event_ignored, ] all_events = all_file_events + all_dir_events def assert_check_directory(handler, event): assert not (handler.ignore_directories and event.is_directory) class TestableEventHandler(RegexMatchingEventHandler): def on_any_event(self, event): assert_check_directory(self, event) def on_modified(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MODIFIED assert_regexes(self, event) def on_deleted(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_DELETED assert_regexes(self, event) def on_moved(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_MOVED assert_regexes(self, event) def on_created(self, event): assert_check_directory(self, event) assert event.event_type == EVENT_TYPE_CREATED assert_regexes(self, event) no_dirs_handler = TestableEventHandler( regexes=regexes, ignore_regexes=ignore_regexes, ignore_directories=True ) handler = TestableEventHandler( regexes=regexes, ignore_regexes=ignore_regexes, ignore_directories=False ) for event in all_events: no_dirs_handler.dispatch(event) for event in all_events: handler.dispatch(event) def test_handler(): handler1 = RegexMatchingEventHandler(g_allowed_regexes, g_ignore_regexes, True) handler2 = RegexMatchingEventHandler(g_allowed_regexes, g_ignore_regexes, False) assert [r.pattern for r in handler1.regexes] == g_allowed_regexes assert [r.pattern for r in handler1.ignore_regexes] == g_ignore_regexes assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_directories(): handler1 = RegexMatchingEventHandler(g_allowed_regexes, g_ignore_regexes, True) handler2 = RegexMatchingEventHandler(g_allowed_regexes, g_ignore_regexes, False) assert handler1.ignore_directories assert not handler2.ignore_directories def test_ignore_regexes(): handler1 = RegexMatchingEventHandler(g_allowed_regexes, g_ignore_regexes, True) assert [r.pattern for r in handler1.ignore_regexes] == g_ignore_regexes def test_regexes(): handler1 = RegexMatchingEventHandler(g_allowed_regexes, g_ignore_regexes, True) assert [r.pattern for r in handler1.regexes] == g_allowed_regexes def test_str_regexes(): handler1 = RegexMatchingEventHandler(g_allowed_str_regexes, g_ignore_regexes, True) assert [r.pattern for r in handler1.regexes] == [g_allowed_str_regexes] def test_logging_event_handler_dispatch(): class _TestableEventHandler(LoggingEventHandler): def on_any_event(self, event): assert True def on_modified(self, event): super().on_modified(event) assert event.event_type == EVENT_TYPE_MODIFIED def on_deleted(self, event): super().on_deleted(event) assert event.event_type == EVENT_TYPE_DELETED def on_moved(self, event): super().on_moved(event) assert event.event_type == EVENT_TYPE_MOVED def on_created(self, event): super().on_created(event) assert event.event_type == EVENT_TYPE_CREATED # Utilities. dir_del_event = DirDeletedEvent("/path/blah.py") file_del_event = FileDeletedEvent("/path/blah.txt") dir_cre_event = DirCreatedEvent("/path/blah.py") file_cre_event = FileCreatedEvent("/path/blah.txt") dir_mod_event = DirModifiedEvent("/path/blah.py") file_mod_event = FileModifiedEvent("/path/blah.txt") dir_mov_event = DirMovedEvent("/path/blah.py", "/path/blah") file_mov_event = FileMovedEvent("/path/blah.txt", "/path/blah") all_events = [ dir_mod_event, dir_del_event, dir_cre_event, dir_mov_event, file_mod_event, file_del_event, file_cre_event, file_mov_event, ] handler = _TestableEventHandler() for event in all_events: handler.dispatch(event) watchdog-3.0.0/tests/test_skip_repeats_queue.py000066400000000000000000000052011440602103100217470ustar00rootroot00000000000000# Copyright 2011 Yesudeep Mangalapilly # Copyright 2012 Google, Inc & contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import pytest import watchdog.events as events from watchdog.utils.bricks import SkipRepeatsQueue from .markers import cpython_only def basic_actions(): q = SkipRepeatsQueue() e1 = (2, "fred") e2 = (2, "george") e3 = (4, "sally") q.put(e1) q.put(e2) q.put(e3) assert e1 == q.get() assert e2 == q.get() assert e3 == q.get() assert q.empty() def test_basic_queue(): basic_actions() def test_allow_nonconsecutive(): q = SkipRepeatsQueue() e1 = (2, "fred") e2 = (2, "george") q.put(e1) q.put(e2) q.put(e1) # repeat the first entry assert e1 == q.get() assert e2 == q.get() assert e1 == q.get() assert q.empty() def test_put_with_watchdog_events(): # FileSystemEvent.__ne__() uses the key property without # doing any type checking. Since _last_item is set to # None in __init__(), an AttributeError is raised when # FileSystemEvent.__ne__() tries to use None.key queue = SkipRepeatsQueue() dummy_file = "dummy.txt" event = events.FileCreatedEvent(dummy_file) queue.put(event) assert queue.get() is event def test_prevent_consecutive(): q = SkipRepeatsQueue() e1 = (2, "fred") e2 = (2, "george") q.put(e1) q.put(e1) # repeat the first entry (this shouldn't get added) q.put(e2) assert e1 == q.get() assert e2 == q.get() assert q.empty() def test_consecutives_allowed_across_empties(): q = SkipRepeatsQueue() e1 = (2, "fred") q.put(e1) q.put(e1) # repeat the first entry (this shouldn't get added) assert e1 == q.get() assert q.empty() q.put(e1) # this repeat is allowed because 'last' added is now gone from queue assert e1 == q.get() assert q.empty() @cpython_only def test_eventlet_monkey_patching(): try: import eventlet # type: ignore[import] except Exception: pytest.skip("eventlet not installed") eventlet.monkey_patch() basic_actions() watchdog-3.0.0/tests/test_snapshot_diff.py000066400000000000000000000154771440602103100207210ustar00rootroot00000000000000# Copyright 2014 Thomas Amland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import errno import os import pickle import time from unittest.mock import patch from watchdog.utils import platform from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff, EmptyDirectorySnapshot from .shell import mkdir, mv, rm, touch def wait(): """ Wait long enough for file/folder mtime to change. This is needed to be able to detected modifications. """ if platform.is_darwin() or platform.is_windows(): # on macOS resolution of stat.mtime is only 1 second time.sleep(1.5) else: time.sleep(0.5) def test_pickle(p): """It should be possible to pickle a snapshot.""" mkdir(p("dir1")) snasphot = DirectorySnapshot(p("dir1")) pickle.dumps(snasphot) def test_move_to(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("dir2")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p("dir2"))) assert diff.files_created == [p("dir2", "b")] def test_move_from(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("dir1")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p("dir1"))) assert diff.files_deleted == [p("dir1", "a")] def test_move_internal(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.files_moved == [(p("dir1", "a"), p("dir2", "b"))] assert diff.files_created == [] assert diff.files_deleted == [] def test_move_replace(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) touch(p("dir2", "b")) ref = DirectorySnapshot(p("")) mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.files_moved == [(p("dir1", "a"), p("dir2", "b"))] assert diff.files_deleted == [p("dir2", "b")] assert diff.files_created == [] def test_dir_modify_on_create(p): ref = DirectorySnapshot(p("")) wait() touch(p("a")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.dirs_modified == [p("")] def test_dir_modify_on_move(p): mkdir(p("dir1")) mkdir(p("dir2")) touch(p("dir1", "a")) ref = DirectorySnapshot(p("")) wait() mv(p("dir1", "a"), p("dir2", "b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert set(diff.dirs_modified) == {p("dir1"), p("dir2")} def test_detect_modify_for_moved_files(p): touch(p("a")) ref = DirectorySnapshot(p("")) wait() touch(p("a")) mv(p("a"), p("b")) diff = DirectorySnapshotDiff(ref, DirectorySnapshot(p(""))) assert diff.files_moved == [(p("a"), p("b"))] assert diff.files_modified == [p("a")] def test_replace_dir_with_file(p): # Replace a dir with a file of the same name just before the normal listdir # call and ensure it doesn't cause an exception def listdir_fcn(path): if path == p("root", "dir"): rm(path, recursive=True) touch(path) return os.scandir(path) mkdir(p("root")) mkdir(p("root", "dir")) # Should NOT raise an OSError (ENOTDIR) DirectorySnapshot(p("root"), listdir=listdir_fcn) def test_permission_error(p): # Test that unreadable folders are not raising exceptions mkdir(p("a", "b", "c"), parents=True) ref = DirectorySnapshot(p("")) walk_orig = DirectorySnapshot.walk def walk(self, root): """Generate a permission error on folder "a/b".""" # Generate the permission error if root.startswith(p("a", "b")): raise OSError(errno.EACCES, os.strerror(errno.EACCES)) # Mimic the original method yield from walk_orig(self, root) with patch.object(DirectorySnapshot, "walk", new=walk): # Should NOT raise an OSError (EACCES) new_snapshot = DirectorySnapshot(p("")) diff = DirectorySnapshotDiff(ref, new_snapshot) assert repr(diff) # Children of a/b/ are no more accessible and so removed in the new snapshot assert diff.dirs_deleted == [(p("a", "b", "c"))] def test_ignore_device(p): # Create a file and take a snapshot. touch(p("file")) ref = DirectorySnapshot(p("")) wait() inode_orig = DirectorySnapshot.inode inode_times = 0 def inode(self, path): # This function will always return a different device_id, # even for the same file. nonlocal inode_times result = inode_orig(self, path) inode_times += 1 return result[0], result[1] + inode_times # Set the custom inode function. with patch.object(DirectorySnapshot, "inode", new=inode): # If we make the diff of the same directory, since by default the # DirectorySnapshotDiff compares the snapshots using the device_id (and it will # be different), it thinks that the same file has been deleted and created again. snapshot = DirectorySnapshot(p("")) diff_with_device = DirectorySnapshotDiff(ref, snapshot) assert diff_with_device.files_deleted == [(p("file"))] assert diff_with_device.files_created == [(p("file"))] # Otherwise, if we choose to ignore the device, the file will not be detected as # deleted and re-created. snapshot = DirectorySnapshot(p("")) diff_without_device = DirectorySnapshotDiff(ref, snapshot, ignore_device=True) assert diff_without_device.files_deleted == [] assert diff_without_device.files_created == [] def test_empty_snapshot(p): # Create a file and declare a DirectorySnapshot and a EmptyDirectorySnapshot. # When we make the diff, although both objects were declared with the same items on # the directory, the file and directories created BEFORE the DirectorySnapshot will # be detected as newly created. touch(p("a")) mkdir(p("b", "c"), parents=True) ref = DirectorySnapshot(p("")) empty = EmptyDirectorySnapshot() diff = DirectorySnapshotDiff(empty, ref) assert diff.files_created == [p("a")] assert sorted(diff.dirs_created) == sorted([p(""), p("b"), p("b", "c")]) watchdog-3.0.0/tests/utils.py000066400000000000000000000067641440602103100161720ustar00rootroot00000000000000from __future__ import annotations import dataclasses import os import sys from queue import Empty, Queue from typing import List, Optional, Tuple, Type, Union from watchdog.events import FileSystemEvent from watchdog.observers.api import EventEmitter, ObservedWatch from watchdog.utils import Protocol Emitter: Type[EventEmitter] if sys.platform.startswith("linux"): from watchdog.observers.inotify import InotifyEmitter as Emitter from watchdog.observers.inotify import InotifyFullEmitter elif sys.platform.startswith("darwin"): from watchdog.observers.fsevents import FSEventsEmitter as Emitter elif sys.platform.startswith("win"): from watchdog.observers.read_directory_changes import WindowsApiEmitter as Emitter elif sys.platform.startswith(("dragonfly", "freebsd", "netbsd", "openbsd", "bsd")): from watchdog.observers.kqueue import KqueueEmitter as Emitter class P(Protocol): def __call__(self, *args: str) -> str: ... class StartWatching(Protocol): def __call__( self, path: Optional[Union[str, bytes]] = ..., use_full_emitter: bool = ..., recursive: bool = ..., ) -> EventEmitter: ... class ExpectEvent(Protocol): def __call__(self, expected_event: FileSystemEvent, timeout: float = ...) -> None: ... TestEventQueue = Union["Queue[Tuple[FileSystemEvent, ObservedWatch]]"] @dataclasses.dataclass() class Helper: tmp: str emitters: List[EventEmitter] = dataclasses.field(default_factory=list) event_queue: TestEventQueue = dataclasses.field(default_factory=Queue) def joinpath(self, *args: str) -> str: return os.path.join(self.tmp, *args) def start_watching( self, path: Optional[Union[str, bytes]] = None, use_full_emitter: bool = False, recursive: bool = True, ) -> EventEmitter: # todo: check if other platforms expect the trailing slash (e.g. `p('')`) path = self.tmp if path is None else path emitter: EventEmitter if sys.platform.startswith("linux") and use_full_emitter: emitter = InotifyFullEmitter( self.event_queue, ObservedWatch(path, recursive=recursive) ) else: emitter = Emitter(self.event_queue, ObservedWatch(path, recursive=recursive)) self.emitters.append(emitter) if sys.platform.startswith("darwin"): # TODO: I think this could be better... .suppress_history should maybe # become a common attribute. from watchdog.observers.fsevents import FSEventsEmitter assert isinstance(emitter, FSEventsEmitter) emitter.suppress_history = True emitter.start() return emitter def expect_event(self, expected_event: FileSystemEvent, timeout: float = 2) -> None: """Utility function to wait up to `timeout` seconds for an `event_type` for `path` to show up in the queue. Provides some robustness for the otherwise flaky nature of asynchronous notifications. """ try: event = self.event_queue.get(timeout=timeout)[0] assert event == expected_event except Empty: raise def close(self) -> None: for emitter in self.emitters: emitter.stop() for emitter in self.emitters: if emitter.is_alive(): emitter.join(5) alive = [emitter.is_alive() for emitter in self.emitters] self.emitters = [] assert alive == [False] * len(alive) watchdog-3.0.0/tools/000077500000000000000000000000001440602103100144415ustar00rootroot00000000000000watchdog-3.0.0/tools/dump_fsevents_constants.py000077500000000000000000000016101440602103100217720ustar00rootroot00000000000000# coding: utf-8 import sys from io import StringIO import FSEvents # type: ignore header = """ File generated by watchdog/scripts/dump_mac_constants.py class Constants: """ def dump_constants(header): output = StringIO() output.write(header) for attribute in dir(FSEvents): value = getattr(FSEvents, attribute) if attribute.startswith("k") and isinstance(value, int): output.write(f" {attribute} = {hex(value)}\n") content = output.getvalue() output.close() return content def write_constants_to_file(filename): content = dump_constants(header) with open(filename, "wb") as f: f.write(content) if __name__ == "__main__": if len(sys.argv) > 1: output_file = sys.argv[1] else: print("Usage: scripts/dump_mac_constants.py ") sys.exit(1) write_constants_to_file(output_file) watchdog-3.0.0/tools/watchmedo.bat000066400000000000000000000017571440602103100171160ustar00rootroot00000000000000@REM Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation @REM Copyright 2011 Yesudeep Mangalapilly @REM Copyright 2012 Google, Inc & contributors. @REM watchmedo.bat - Wrapper .bat file for the watchmedo Python script. @echo off set SCRIPT_ERRORLEVEL= if "%OS%" == "Windows_NT" goto WinNT @REM Windows 9x/Me you better not have more than 9 arguments. python -c "from watchdog import watchmedo; watchmedo.main()" %1 %2 %3 %4 %5 %6 %7 %8 %9 @REM No way to set exit status of this script for 9x/Me goto endscript @REM Windows NT+ :WinNT setlocal set path=%~dp0;%~dp0..;%path% python -c "from watchdog import watchmedo; watchmedo.main()" %* endlocal & set SCRIPT_ERRORLEVEL=%ERRORLEVEL% if not "%COMSPEC%" == "%SystemRoot%\system32\cmd.exe" goto returncode if errorlevel 9009 echo You do not have python in your PATH environment variable. goto endscript :returncode exit /B %SCRIPT_ERRORLEVEL% :endscript call :returncode %SCRIPT_ERRORLEVEL% watchdog-3.0.0/tox.ini000066400000000000000000000016301440602103100146140ustar00rootroot00000000000000[tox] envlist = py{311,310,39,38,37,36,py3} docs mypy skip_missing_interpreters = True [testenv] usedevelop = true deps = -r requirements-tests.txt extras = watchmedo commands = python -bb -m pytest {posargs} [testenv:flake8] usedevelop = true deps = -r requirements-tests.txt extras = watchmedo commands = python -m flake8 docs tools src tests setup.py [testenv:docs] usedevelop = true deps = -r requirements-tests.txt extras = watchmedo commands = sphinx-build -aEWb html docs/source docs/build/html [testenv:mypy] usedevelop = true deps = -r requirements-tests.txt commands = mypy [testenv:isort] usedevelop = true deps = -r requirements-tests.txt commands = isort src/watchdog/ tests/ *.py [testenv:isort-ci] usedevelop = {[testenv:isort]usedevelop} deps = {[testenv:isort]deps} commands = isort --diff --check-only src/watchdog/ tests/ *.py