pax_global_header 0000666 0000000 0000000 00000000064 14176067602 0014523 g ustar 00root root 0000000 0000000 52 comment=e42549cb70796d0577c97be96a09bca0056a5755
python-zeroconf-0.38.3/ 0000775 0000000 0000000 00000000000 14176067602 0014762 5 ustar 00root root 0000000 0000000 python-zeroconf-0.38.3/.coveragerc 0000664 0000000 0000000 00000000135 14176067602 0017102 0 ustar 00root root 0000000 0000000 [report]
exclude_lines =
pragma: no cover
if TYPE_CHECKING:
if sys.version_info
python-zeroconf-0.38.3/.github/ 0000775 0000000 0000000 00000000000 14176067602 0016322 5 ustar 00root root 0000000 0000000 python-zeroconf-0.38.3/.github/workflows/ 0000775 0000000 0000000 00000000000 14176067602 0020357 5 ustar 00root root 0000000 0000000 python-zeroconf-0.38.3/.github/workflows/ci.yml 0000664 0000000 0000000 00000004300 14176067602 0021472 0 ustar 00root root 0000000 0000000 name: CI
on:
push:
branches:
- master
pull_request:
branches:
- "**"
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
python-version: [3.7, 3.8, 3.9, "3.10", "pypy-3.7"]
include:
- os: ubuntu-latest
venvcmd: . env/bin/activate
- os: macos-latest
venvcmd: . env/bin/activate
- os: windows-latest
venvcmd: env\Scripts\Activate.ps1
steps:
- uses: actions/checkout@v2
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
with:
python-version: '3.x'
architecture: 'x64'
- uses: actions/cache@v2
id: cache
with:
path: env
key: ${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/Makefile') }}-${{ hashFiles('**/requirements-dev.txt') }}
restore-keys: |
${{ runner.os }}-pip-${{ matrix.python-version }}-${{ hashFiles('**/Makefile') }}
- name: Install dependencies
if: steps.cache.outputs.cache-hit != 'true'
run: |
python -m venv env
${{ matrix.venvcmd }}
pip install --upgrade -r requirements-dev.txt pytest-github-actions-annotate-failures
- name: Validate readme
if: ${{ runner.os == 'Linux' && matrix.python-version != 'pypy3' }}
run: |
${{ matrix.venvcmd }}
python -m readme_renderer README.rst -o -
- name: Run flake8
if: ${{ runner.os == 'Linux' && matrix.python-version != 'pypy3' }}
run: |
${{ matrix.venvcmd }}
make flake8
- name: Run mypy
if: ${{ runner.os == 'Linux' && matrix.python-version != 'pypy3' }}
run: |
${{ matrix.venvcmd }}
make mypy
- name: Run black_check
if: ${{ runner.os == 'Linux' && matrix.python-version != 'pypy3' }}
run: |
${{ matrix.venvcmd }}
make black_check
- name: Run tests
run: |
${{ matrix.venvcmd }}
make test_coverage
- name: Report coverage to Codecov
uses: codecov/codecov-action@v1
python-zeroconf-0.38.3/.gitignore 0000664 0000000 0000000 00000000232 14176067602 0016747 0 ustar 00root root 0000000 0000000 build/
*.pyc
*.pyo
Thumbs.db
.DS_Store
.project
.pydevproject
.settings
.idea
.vslick
.cache
.mypy_cache/
docs/_build/
.vscode
/dist/
/zeroconf.egg-info/
python-zeroconf-0.38.3/COPYING 0000664 0000000 0000000 00000057506 14176067602 0016032 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
python-zeroconf-0.38.3/MANIFEST.in 0000664 0000000 0000000 00000000043 14176067602 0016515 0 ustar 00root root 0000000 0000000 include README.rst
include COPYING
python-zeroconf-0.38.3/Makefile 0000664 0000000 0000000 00000002625 14176067602 0016427 0 ustar 00root root 0000000 0000000 # version: 1.1
.PHONY: all virtualenv
MAX_LINE_LENGTH=110
PYTHON_IMPLEMENTATION:=$(shell python -c "import sys;import platform;sys.stdout.write(platform.python_implementation())")
PYTHON_VERSION:=$(shell python -c "import sys;sys.stdout.write('%d.%d' % sys.version_info[:2])")
LINT_TARGETS:=flake8
ifneq ($(findstring PyPy,$(PYTHON_IMPLEMENTATION)),PyPy)
LINT_TARGETS:=$(LINT_TARGETS) mypy black_check pylint
endif
virtualenv: ./env/requirements.built
env:
python -m venv env
./env/requirements.built: env requirements-dev.txt
./env/bin/pip install -r requirements-dev.txt
cp requirements-dev.txt ./env/requirements.built
.PHONY: ci
ci: lint test_coverage
.PHONY: lint
lint: $(LINT_TARGETS)
flake8:
flake8 --max-line-length=$(MAX_LINE_LENGTH) setup.py examples zeroconf
pylint:
pylint zeroconf
.PHONY: black_check
black_check:
black --check setup.py examples zeroconf
mypy:
# --no-warn-redundant-casts --no-warn-unused-ignores is needed since we support multiple python versions
# We should be able to drop this once python 3.6 goes away
mypy --no-warn-redundant-casts --no-warn-unused-ignores examples/*.py zeroconf
test:
pytest --durations=20 --timeout=60 -v tests
test_coverage:
pytest --durations=20 --timeout=60 -v --cov=zeroconf --cov-branch --cov-report xml --cov-report html --cov-report term-missing tests
autopep8:
autopep8 --max-line-length=$(MAX_LINE_LENGTH) -i setup.py examples zeroconf
python-zeroconf-0.38.3/README.rst 0000664 0000000 0000000 00000120456 14176067602 0016461 0 ustar 00root root 0000000 0000000 python-zeroconf
===============
.. image:: https://github.com/jstasiak/python-zeroconf/workflows/CI/badge.svg
:target: https://github.com/jstasiak/python-zeroconf?query=workflow%3ACI+branch%3Amaster
.. image:: https://img.shields.io/pypi/v/zeroconf.svg
:target: https://pypi.python.org/pypi/zeroconf
.. image:: https://codecov.io/gh/jstasiak/python-zeroconf/branch/master/graph/badge.svg
:target: https://codecov.io/gh/jstasiak/python-zeroconf
`Documentation `_.
This is fork of pyzeroconf, Multicast DNS Service Discovery for Python,
originally by Paul Scott-Murphy (https://github.com/paulsm/pyzeroconf),
modified by William McBrine (https://github.com/wmcbrine/pyzeroconf).
The original William McBrine's fork note::
This fork is used in all of my TiVo-related projects: HME for Python
(and therefore HME/VLC), Network Remote, Remote Proxy, and pyTivo.
Before this, I was tracking the changes for zeroconf.py in three
separate repos. I figured I should have an authoritative source.
Although I make changes based on my experience with TiVos, I expect that
they're generally applicable. This version also includes patches found
on the now-defunct (?) Launchpad repo of pyzeroconf, and elsewhere
around the net -- not always well-documented, sorry.
Compatible with:
* Bonjour
* Avahi
Compared to some other Zeroconf/Bonjour/Avahi Python packages, python-zeroconf:
* isn't tied to Bonjour or Avahi
* doesn't use D-Bus
* doesn't force you to use particular event loop or Twisted (asyncio is used under the hood but not required)
* is pip-installable
* has PyPI distribution
Python compatibility
--------------------
* CPython 3.7+
* PyPy3.7 7.3+
Versioning
----------
This project's versions follow the following pattern: MAJOR.MINOR.PATCH.
* MAJOR version has been 0 so far
* MINOR version is incremented on backward incompatible changes
* PATCH version is incremented on backward compatible changes
Status
------
This project is actively maintained.
Traffic Reduction
-----------------
Before version 0.32, most traffic reduction techniques described in https://datatracker.ietf.org/doc/html/rfc6762#section-7
where not implemented which could lead to excessive network traffic. It is highly recommended that version 0.32 or later
is used if this is a concern.
IPv6 support
------------
IPv6 support is relatively new and currently limited, specifically:
* `InterfaceChoice.All` is an alias for `InterfaceChoice.Default` on non-POSIX
systems.
* Dual-stack IPv6 sockets are used, which may not be supported everywhere (some
BSD variants do not have them).
* Listening on localhost (`::1`) does not work. Help with understanding why is
appreciated.
How to get python-zeroconf?
===========================
* PyPI page https://pypi.python.org/pypi/zeroconf
* GitHub project https://github.com/jstasiak/python-zeroconf
The easiest way to install python-zeroconf is using pip::
pip install zeroconf
How do I use it?
================
Here's an example of browsing for a service:
.. code-block:: python
from zeroconf import ServiceBrowser, Zeroconf
class MyListener:
def remove_service(self, zeroconf, type, name):
print("Service %s removed" % (name,))
def add_service(self, zeroconf, type, name):
info = zeroconf.get_service_info(type, name)
print("Service %s added, service info: %s" % (name, info))
zeroconf = Zeroconf()
listener = MyListener()
browser = ServiceBrowser(zeroconf, "_http._tcp.local.", listener)
try:
input("Press enter to exit...\n\n")
finally:
zeroconf.close()
.. note::
Discovery and service registration use *all* available network interfaces by default.
If you want to customize that you need to specify ``interfaces`` argument when
constructing ``Zeroconf`` object (see the code for details).
If you don't know the name of the service you need to browse for, try:
.. code-block:: python
from zeroconf import ZeroconfServiceTypes
print('\n'.join(ZeroconfServiceTypes.find()))
See examples directory for more.
Changelog
=========
0.38.3
======
Version bump only, no changes from 0.38.2
0.38.2
======
* Make decode errors more helpful in finding the source of the bad data (#1052) @bdraco
0.38.1
======
* Improve performance of query scheduler (#1043) @bdraco
* Avoid linear type searches in ServiceBrowsers (#1044) @bdraco
0.38.0
======
* Handle Service types that end with another service type (#1041) @apworks1
Backwards incompatible:
* Dropped Python 3.6 support (#1009) @bdraco
0.37.0
======
Technically backwards incompatible:
* Adding a listener that does not inherit from RecordUpdateListener now logs an error (#1034) @bdraco
* The NotRunningException exception is now thrown when Zeroconf is not running (#1033) @bdraco
Before this change the consumer would get a timeout or an EventLoopBlocked
exception when calling `ServiceInfo.*request` when the instance had already been shutdown
or had failed to startup.
* The EventLoopBlocked exception is now thrown when a coroutine times out (#1032) @bdraco
Previously `concurrent.futures.TimeoutError` would have been raised
instead. This is never expected to happen during normal operation.
0.36.13
=======
* Unavailable interfaces are now skipped during socket bind (#1028) @bdraco
* Downgraded incoming corrupt packet logging to debug (#1029) @bdraco
Warning about network traffic we have no control over is confusing
to users as they think there is something wrong with zeroconf
0.36.12
=======
* Prevented service lookups from deadlocking if time abruptly moves backwards (#1006) @bdraco
The typical reason time moves backwards is via an ntp update
0.36.11
=======
No functional changes from 0.36.10. This release corrects an error in the README.rst file
that prevented the build from uploading to PyPI
0.36.10
=======
* scope_id is now stripped from IPv6 addresses if given (#1020) @StevenLooman
cpython 3.9 allows a suffix %scope_id in IPv6Address. This caused an error
with the existing code if it was not stripped
* Optimized decoding labels from incoming packets (#1019) @bdraco
0.36.9
======
* Ensure ServiceInfo orders newest addresses first (#1012) @bdraco
This change effectively restored the behavior before 1s cache flush
expire behavior described in rfc6762 section 10.2 was added for callers that rely on this.
0.36.8
======
* Fixed ServiceBrowser infinite loop when zeroconf is closed before it is canceled (#1008) @bdraco
0.36.7
======
* Improved performance of responding to queries (#994) (#996) (#997) @bdraco
* Improved log message when receiving an invalid or corrupt packet (#998) @bdraco
0.36.6
======
* Improved performance of sending outgoing packets (#990) @bdraco
0.36.5
======
* Reduced memory usage for incoming and outgoing packets (#987) @bdraco
0.36.4
======
* Improved performance of constructing outgoing packets (#978) (#979) @bdraco
* Deferred parsing of incoming packets when it can be avoided (#983) @bdraco
0.36.3
======
* Improved performance of parsing incoming packets (#975) @bdraco
0.36.2
======
* Include NSEC records for non-existent types when responding with addresses (#972) (#971) @bdraco
Implements RFC6762 sec 6.2 (http://datatracker.ietf.org/doc/html/rfc6762#section-6.2)
0.36.1
======
* Skip goodbye packets for addresses when there is another service registered with the same name (#968) @bdraco
If a ServiceInfo that used the same server name as another ServiceInfo
was unregistered, goodbye packets would be sent for the addresses and
would cause the other service to be seen as offline.
* Fixed equality and hash for dns records with the unique bit (#969) @bdraco
These records should have the same hash and equality since
the unique bit (cache flush bit) is not considered when adding or removing
the records from the cache.
0.36.0
======
Technically backwards incompatible:
* Fill incomplete IPv6 tuples to avoid WinError on windows (#965) @lokesh2019
Fixed #932
0.35.1
======
* Only reschedule types if the send next time changes (#958) @bdraco
When the PTR response was seen again, the timer was being canceled and
rescheduled even if the timer was for the same time. While this did
not cause any breakage, it is quite inefficient.
* Cache DNS record and question hashes (#960) @bdraco
The hash was being recalculated every time the object
was being used in a set or dict. Since the hashes are
effectively immutable, we only calculate them once now.
0.35.0
======
* Reduced chance of accidental synchronization of ServiceInfo requests (#955) @bdraco
* Sort aggregated responses to increase chance of name compression (#954) @bdraco
Technically backwards incompatible:
* Send unicast replies on the same socket the query was received (#952) @bdraco
When replying to a QU question, we do not know if the sending host is reachable
from all of the sending sockets. We now avoid this problem by replying via
the receiving socket. This was the existing behavior when `InterfaceChoice.Default`
is set.
This change extends the unicast relay behavior to used with `InterfaceChoice.Default`
to apply when `InterfaceChoice.All` or interfaces are explicitly passed when
instantiating a `Zeroconf` instance.
Fixes #951
0.34.3
======
* Fix sending immediate multicast responses (#949) @bdraco
0.34.2
======
* Coalesce aggregated multicast answers (#945) @bdraco
When the random delay is shorter than the last scheduled response,
answers are now added to the same outgoing time group.
This reduces traffic when we already know we will be sending a group of answers
inside the random delay window described in
datatracker.ietf.org/doc/html/rfc6762#section-6.3
* Ensure ServiceInfo requests can be answered inside the default timeout with network protection (#946) @bdraco
Adjust the time windows to ensure responses that have triggered the
protection against against excessive packet flooding due to
software bugs or malicious attack described in RFC6762 section 6
can respond in under 1350ms to ensure ServiceInfo can ask two
questions within the default timeout of 3000ms
0.34.1
======
* Ensure multicast aggregation sends responses within 620ms (#942) @bdraco
Responses that trigger the protection against against excessive
packet flooding due to software bugs or malicious attack described
in RFC6762 section 6 could cause the multicast aggregation response
to be delayed longer than 620ms (The maximum random delay of 120ms
and 500ms additional for aggregation).
Only responses that trigger the protection are delayed longer than 620ms
0.34.0
======
* Implemented Multicast Response Aggregation (#940) @bdraco
Responses are now aggregated when possible per rules in RFC6762
section 6.4
Responses that trigger the protection against against excessive
packet flooding due to software bugs or malicious attack described
in RFC6762 section 6 are delayed instead of discarding as it was
causing responders that implement Passive Observation Of Failures
(POOF) to evict the records.
Probe responses are now always sent immediately as there were cases
where they would fail to be answered in time to defend a name.
0.33.4
======
* Ensure zeroconf can be loaded when the system disables IPv6 (#933) @che0
0.33.3
======
* Added support for forward dns compression pointers (#934) @bdraco
* Provide sockname when logging a protocol error (#935) @bdraco
0.33.2
======
* Handle duplicate goodbye answers in the same packet (#928) @bdraco
Solves an exception being thrown when we tried to remove the known answer
from the cache when the second goodbye answer in the same packet was processed
Fixed #926
* Skip ipv6 interfaces that return ENODEV (#930) @bdraco
0.33.1
======
* Version number change only with less restrictive directory permissions
Fixed #923
0.33.0
======
This release eliminates all threading locks as all non-threadsafe operations
now happen in the event loop.
* Let connection_lost close the underlying socket (#918) @bdraco
The socket was closed during shutdown before asyncio's connection_lost
handler had a chance to close it which resulted in a traceback on
windows.
Fixed #917
Technically backwards incompatible:
* Removed duplicate unregister_all_services code (#910) @bdraco
Calling Zeroconf.close from same asyncio event loop zeroconf is running in
will now skip unregister_all_services and log a warning as this a blocking
operation and is not async safe and never has been.
Use AsyncZeroconf instead, or for legacy code call async_unregister_all_services before Zeroconf.close
0.32.1
======
* Increased timeout in ServiceInfo.request to handle loaded systems (#895) @bdraco
It can take a few seconds for a loaded system to run the `async_request`
coroutine when the event loop is busy, or the system is CPU bound (example being
Home Assistant startup). We now add an additional `_LOADED_SYSTEM_TIMEOUT` (10s)
to the `run_coroutine_threadsafe` calls to ensure the coroutine has the total
amount of time to run up to its internal timeout (default of 3000ms).
Ten seconds is a bit large of a timeout; however, it is only used in cases
where we wrap other timeouts. We now expect the only instance the
`run_coroutine_threadsafe` result timeout will happen in a production
circumstance is when someone is running a `ServiceInfo.request()` in a thread and
another thread calls `Zeroconf.close()` at just the right moment that the future
is never completed unless the system is so loaded that it is nearly unresponsive.
The timeout for `run_coroutine_threadsafe` is the maximum time a thread can
cleanly shut down when zeroconf is closed out in another thread, which should
always be longer than the underlying thread operation.
0.32.0
======
This release offers 100% line and branch coverage.
* Made ServiceInfo first question QU (#852) @bdraco
We want an immediate response when requesting with ServiceInfo
by asking a QU question; most responders will not delay the response
and respond right away to our question. This also improves compatibility
with split networks as we may not have been able to see the response
otherwise. If the responder has not multicast the record recently,
it may still choose to do so in addition to responding via unicast
Reduces traffic when there are multiple zeroconf instances running
on the network running ServiceBrowsers
If we don't get an answer on the first try, we ask a QM question
in the event, we can't receive a unicast response for some reason
This change puts ServiceInfo inline with ServiceBrowser which
also asks the first question as QU since ServiceInfo is commonly
called from ServiceBrowser callbacks
* Limited duplicate packet suppression to 1s intervals (#841) @bdraco
Only suppress duplicate packets that happen within the same
second. Legitimate queriers will retry the question if they
are suppressed. The limit was reduced to one second to be
in line with rfc6762
* Made multipacket known answer suppression per interface (#836) @bdraco
The suppression was happening per instance of Zeroconf instead
of per interface. Since the same network can be seen on multiple
interfaces (usually and wifi and ethernet), this would confuse the
multi-packet known answer supression since it was not expecting
to get the same data more than once
* New ServiceBrowsers now request QU in the first outgoing when unspecified (#812) @bdraco
https://datatracker.ietf.org/doc/html/rfc6762#section-5.4
When we start a ServiceBrowser and zeroconf has just started up, the known
answer list will be small. By asking a QU question first, it is likely
that we have a large known answer list by the time we ask the QM question
a second later (current default which is likely too low but would be
a breaking change to increase). This reduces the amount of traffic on
the network, and has the secondary advantage that most responders will
answer a QU question without the typical delay answering QM questions.
* IPv6 link-local addresses are now qualified with scope_id (#343) @ibygrave
When a service is advertised on an IPv6 address where
the scope is link local, i.e. fe80::/64 (see RFC 4007)
the resolved IPv6 address must be extended with the
scope_id that identifies through the "%" symbol the
local interface to be used when routing to that address.
A new API `parsed_scoped_addresses()` is provided to
return qualified addresses to avoid breaking compatibility
on the existing parsed_addresses().
* Network adapters that are disconnected are now skipped (#327) @ZLJasonG
* Fixed listeners missing initial packets if Engine starts too quickly (#387) @bdraco
When manually creating a zeroconf.Engine object, it is no longer started automatically.
It must manually be started by calling .start() on the created object.
The Engine thread is now started after all the listeners have been added to avoid a
race condition where packets could be missed at startup.
* Fixed answering matching PTR queries with the ANY query (#618) @bdraco
* Fixed lookup of uppercase names in the registry (#597) @bdraco
If the ServiceInfo was registered with an uppercase name and the query was
for a lowercase name, it would not be found and vice-versa.
* Fixed unicast responses from any source port (#598) @bdraco
Unicast responses were only being sent if the source port
was 53, this prevented responses when testing with dig:
dig -p 5353 @224.0.0.251 media-12.local
The above query will now see a response
* Fixed queries for AAAA records not being answered (#616) @bdraco
* Removed second level caching from ServiceBrowsers (#737) @bdraco
The ServiceBrowser had its own cache of the last time it
saw a service that was reimplementing the DNSCache and
presenting a source of truth problem that lead to unexpected
queries when the two disagreed.
* Fixed server cache not being case-insensitive (#731) @bdraco
If the server name had uppercase chars and any of the
matching records were lowercase, and the server would not be
found
* Fixed cache handling of records with different TTLs (#729) @bdraco
There should only be one unique record in the cache at
a time as having multiple unique records will different
TTLs in the cache can result in unexpected behavior since
some functions returned all matching records and some
fetched from the right side of the list to return the
newest record. Instead we now store the records in a dict
to ensure that the newest record always replaces the same
unique record, and we never have a source of truth problem
determining the TTL of a record from the cache.
* Fixed ServiceInfo with multiple A records (#725) @bdraco
If there were multiple A records for the host, ServiceInfo
would always return the last one that was in the incoming
packet, which was usually not the one that was wanted.
* Fixed stale unique records expiring too quickly (#706) @bdraco
Records now expire 1s in the future instead of instant removal.
tools.ietf.org/html/rfc6762#section-10.2
Queriers receiving a Multicast DNS response with a TTL of zero SHOULD
NOT immediately delete the record from the cache, but instead record
a TTL of 1 and then delete the record one second later. In the case
of multiple Multicast DNS responders on the network described in
Section 6.6 above, if one of the responders shuts down and
incorrectly sends goodbye packets for its records, it gives the other
cooperating responders one second to send out their own response to
"rescue" the records before they expire and are deleted.
* Fixed exception when unregistering a service multiple times (#679) @bdraco
* Added an AsyncZeroconfServiceTypes to mirror ZeroconfServiceTypes to zeroconf.asyncio (#658) @bdraco
* Fixed interface_index_to_ip6_address not skiping ipv4 adapters (#651) @bdraco
* Added async_unregister_all_services to AsyncZeroconf (#649) @bdraco
* Fixed services not being removed from the registry when calling unregister_all_services (#644) @bdraco
There was a race condition where a query could be answered for a service
in the registry, while goodbye packets which could result in a fresh record
being broadcast after the goodbye if a query came in at just the right
time. To avoid this, we now remove the services from the registry right
after we generate the goodbye packet
* Fixed zeroconf exception on load when the system disables IPv6 (#624) @bdraco
* Fixed the QU bit missing from for probe queries (#609) @bdraco
The bit should be set per
datatracker.ietf.org/doc/html/rfc6762#section-8.1
* Fixed the TC bit missing for query packets where the known answers span multiple packets (#494) @bdraco
* Fixed packets not being properly separated when exceeding maximum size (#498) @bdraco
Ensure that questions that exceed the max packet size are
moved to the next packet. This fixes DNSQuestions being
sent in multiple packets in violation of:
datatracker.ietf.org/doc/html/rfc6762#section-7.2
Ensure only one resource record is sent when a record
exceeds _MAX_MSG_TYPICAL
datatracker.ietf.org/doc/html/rfc6762#section-17
* Fixed PTR questions asked in uppercase not being answered (#465) @bdraco
* Added Support for context managers in Zeroconf and AsyncZeroconf (#284) @shenek
* Implemented an AsyncServiceBrowser to compliment the sync ServiceBrowser (#429) @bdraco
* Added async_get_service_info to AsyncZeroconf and async_request to AsyncServiceInfo (#408) @bdraco
* Implemented allowing passing in a sync Zeroconf instance to AsyncZeroconf (#406) @bdraco
* Fixed IPv6 setup under MacOS when binding to "" (#392) @bdraco
* Fixed ZeroconfServiceTypes.find not always cancels the ServiceBrowser (#389) @bdraco
There was a short window where the ServiceBrowser thread
could be left running after Zeroconf is closed because
the .join() was never waited for when a new Zeroconf
object was created
* Fixed duplicate packets triggering duplicate updates (#376) @bdraco
If TXT or SRV records update was already processed and then
received again, it was possible for a second update to be
called back in the ServiceBrowser
* Fixed ServiceStateChange.Updated event happening for IPs that already existed (#375) @bdraco
* Fixed RFC6762 Section 10.2 paragraph 2 compliance (#374) @bdraco
* Reduced length of ServiceBrowser thread name with many types (#373) @bdraco
* Fixed empty answers being added in ServiceInfo.request (#367) @bdraco
* Fixed ServiceInfo not populating all AAAA records (#366) @bdraco
Use get_all_by_details to ensure all records are loaded
into addresses.
Only load A/AAAA records from the cache once in load_from_cache
if there is a SRV record present
Move duplicate code that checked if the ServiceInfo was complete
into its own function
* Fixed a case where the cache list can change during iteration (#363) @bdraco
* Return task objects created by AsyncZeroconf (#360) @nocarryr
Traffic Reduction:
* Added support for handling QU questions (#621) @bdraco
Implements RFC 6762 sec 5.4:
Questions Requesting Unicast Responses
datatracker.ietf.org/doc/html/rfc6762#section-5.4
* Implemented protect the network against excessive packet flooding (#619) @bdraco
* Additionals are now suppressed when they are already in the answers section (#617) @bdraco
* Additionals are no longer included when the answer is suppressed by known-answer suppression (#614) @bdraco
* Implemented multi-packet known answer supression (#687) @bdraco
Implements datatracker.ietf.org/doc/html/rfc6762#section-7.2
* Implemented efficient bucketing of queries with known answers (#698) @bdraco
* Implemented duplicate question suppression (#770) @bdraco
http://datatracker.ietf.org/doc/html/rfc6762#section-7.3
Technically backwards incompatible:
* Update internal version check to match docs (3.6+) (#491) @bdraco
Python version earlier then 3.6 were likely broken with zeroconf
already, however, the version is now explicitly checked.
* Update python compatibility as PyPy3 7.2 is required (#523) @bdraco
Backwards incompatible:
* Drop oversize packets before processing them (#826) @bdraco
Oversized packets can quickly overwhelm the system and deny
service to legitimate queriers. In practice, this is usually due to broken mDNS
implementations rather than malicious actors.
* Guard against excessive ServiceBrowser queries from PTR records significantly lowerthan recommended (#824) @bdraco
We now enforce a minimum TTL for PTR records to avoid
ServiceBrowsers generating excessive queries refresh queries.
Apple uses a 15s minimum TTL, however, we do not have the same
level of rate limit and safeguards, so we use 1/4 of the recommended value.
* RecordUpdateListener now uses async_update_records instead of update_record (#419, #726) @bdraco
This allows the listener to receive all the records that have
been updated in a single transaction such as a packet or
cache expiry.
update_record has been deprecated in favor of async_update_records
A compatibility shim exists to ensure classes that use
RecordUpdateListener as a base class continue to have
update_record called, however, they should be updated
as soon as possible.
A new method async_update_records_complete is now called on each
listener when all listeners have completed processing updates
and the cache has been updated. This allows ServiceBrowsers
to delay calling handlers until they are sure the cache
has been updated as its a common pattern to call for
ServiceInfo when a ServiceBrowser handler fires.
The async\_ prefix was chosen to make it clear that these
functions run in the eventloop and should never do blocking
I/O. Before 0.32+ these functions ran in a select() loop and
should not have been doing any blocking I/O, but it was not
clear to implementors that I/O would block the loop.
* Pass both the new and old records to async_update_records (#792) @bdraco
Pass the old_record (cached) as the value and the new_record (wire)
to async_update_records instead of forcing each consumer to
check the cache since we will always have the old_record
when generating the async_update_records call. This avoids
the overhead of multiple cache lookups for each listener.
0.31.0
======
* Separated cache loading from I/O in ServiceInfo and fixed cache lookup (#356),
thanks to J. Nick Koston.
The ServiceInfo class gained a load_from_cache() method to only fetch information
from Zeroconf cache (if it exists) with no IO performed. Additionally this should
reduce IO in cases where cache lookups were previously incorrectly failing.
0.30.0
======
* Some nice refactoring work including removal of the Reaper thread,
thanks to J. Nick Koston.
* Fixed a Windows-specific The requested address is not valid in its context regression,
thanks to Timothee ‘TTimo’ Besset and J. Nick Koston.
* Provided an asyncio-compatible service registration layer (in the zeroconf.asyncio module),
thanks to J. Nick Koston.
0.29.0
======
* A single socket is used for listening on responding when `InterfaceChoice.Default` is chosen.
Thanks to J. Nick Koston.
Backwards incompatible:
* Dropped Python 3.5 support
0.28.8
======
* Fixed the packet generation when multiple packets are necessary, previously invalid
packets were generated sometimes. Patch thanks to J. Nick Koston.
0.28.7
======
* Fixed the IPv6 address rendering in the browser example, thanks to Alexey Vazhnov.
* Fixed a crash happening when a service is added or removed during handle_response
and improved exception handling, thanks to J. Nick Koston.
0.28.6
======
* Loosened service name validation when receiving from the network this lets us handle
some real world devices previously causing errors, thanks to J. Nick Koston.
0.28.5
======
* Enabled ignoring duplicated messages which decreases CPU usage, thanks to J. Nick Koston.
* Fixed spurious AttributeError: module 'unittest' has no attribute 'mock' in tests.
0.28.4
======
* Improved cache reaper performance significantly, thanks to J. Nick Koston.
* Added ServiceListener to __all__ as it's part of the public API, thanks to Justin Nesselrotte.
0.28.3
======
* Reduced a time an internal lock is held which should eliminate deadlocks in high-traffic networks,
thanks to J. Nick Koston.
0.28.2
======
* Stopped asking questions we already have answers for in cache, thanks to Paul Daumlechner.
* Removed initial delay before querying for service info, thanks to Erik Montnemery.
0.28.1
======
* Fixed a resource leak connected to using ServiceBrowser with multiple types, thanks to
J. Nick Koston.
0.28.0
======
* Improved Windows support when using socket errno checks, thanks to Sandy Patterson.
* Added support for passing text addresses to ServiceInfo.
* Improved logging (includes fixing an incorrect logging call)
* Improved Windows compatibility by using Adapter.index from ifaddr, thanks to PhilippSelenium.
* Improved Windows compatibility by stopping using socket.if_nameindex.
* Fixed an OS X edge case which should also eliminate a memory leak, thanks to Emil Styrke.
Technically backwards incompatible:
* ``ifaddr`` 0.1.7 or newer is required now.
0.27.1
------
* Improved the logging situation (includes fixing a false-positive "packets() made no progress
adding records", thanks to Greg Badros)
0.27.0
------
* Large multi-resource responses are now split into separate packets which fixes a bad
mdns-repeater/ChromeCast Audio interaction ending with ChromeCast Audio crash (and possibly
some others) and improves RFC 6762 compliance, thanks to Greg Badros
* Added a warning presented when the listener passed to ServiceBrowser lacks update_service()
callback
* Added support for finding all services available in the browser example, thanks to Perry Kunder
Backwards incompatible:
* Removed previously deprecated ServiceInfo address constructor parameter and property
0.26.3
------
* Improved readability of logged incoming data, thanks to Erik Montnemery
* Threads are given unique names now to aid debugging, thanks to Erik Montnemery
* Fixed a regression where get_service_info() called within a listener add_service method
would deadlock, timeout and incorrectly return None, fix thanks to Erik Montnemery, but
Matt Saxon and Hmmbob were also involved in debugging it.
0.26.2
------
* Added support for multiple types to ServiceBrowser, thanks to J. Nick Koston
* Fixed a race condition where a listener gets a message before the lock is created, thanks to
J. Nick Koston
0.26.1
------
* Fixed a performance regression introduced in 0.26.0, thanks to J. Nick Koston (this is close in
spirit to an optimization made in 0.24.5 by the same author)
0.26.0
------
* Fixed a regression where service update listener wasn't called on IP address change (it's called
on SRV/A/AAAA record changes now), thanks to Matt Saxon
Technically backwards incompatible:
* Service update hook is no longer called on service addition (service added hook is still called),
this is related to the fix above
0.25.1
------
* Eliminated 5s hangup when calling Zeroconf.close(), thanks to Erik Montnemery
0.25.0
------
* Reverted uniqueness assertions when browsing, they caused a regression
Backwards incompatible:
* Rationalized handling of TXT records. Non-bytes values are converted to str and encoded to bytes
using UTF-8 now, None values mean value-less attributes. When receiving TXT records no decoding
is performed now, keys are always bytes and values are either bytes or None in value-less
attributes.
0.24.5
------
* Fixed issues with shared records being used where they shouldn't be (TXT, SRV, A records are
unique now), thanks to Matt Saxon
* Stopped unnecessarily excluding host-only interfaces from InterfaceChoice.all as they don't
forbid multicast, thanks to Andreas Oberritter
* Fixed repr() of IPv6 DNSAddress, thanks to Aldo Hoeben
* Removed duplicate update messages sent to listeners, thanks to Matt Saxon
* Added support for cooperating responders, thanks to Matt Saxon
* Optimized handle_response cache check, thanks to J. Nick Koston
* Fixed memory leak in DNSCache, thanks to J. Nick Koston
0.24.4
------
* Fixed resetting TTL in DNSRecord.reset_ttl(), thanks to Matt Saxon
* Improved various DNS class' string representations, thanks to Jay Hogg
0.24.3
------
* Fixed import-time "TypeError: 'ellipsis' object is not iterable." on CPython 3.5.2
0.24.2
------
* Added support for AWDL interface on macOS (needed and used by the opendrop project but should be
useful in general), thanks to Milan Stute
* Added missing type hints
0.24.1
------
* Applied some significant performance optimizations, thanks to Jaime van Kessel for the patch and
to Ghostkeeper for performance measurements
* Fixed flushing outdated cache entries when incoming record is unique, thanks to Michael Hu
* Fixed handling updates of TXT records (they'd not get recorded previously), thanks to Michael Hu
0.24.0
------
* Added IPv6 support, thanks to Dmitry Tantsur
* Added additional recommended records to PTR responses, thanks to Scott Mertz
* Added handling of ENOTCONN being raised during shutdown when using Eventlet, thanks to Tamás Nepusz
* Included the py.typed marker in the package so that type checkers know to use type hints from the
source code, thanks to Dmitry Tantsur
0.23.0
------
* Added support for MyListener call getting updates to service TXT records, thanks to Matt Saxon
* Added support for multiple addresses when publishing a service, getting/setting single address
has become deprecated. Change thanks to Dmitry Tantsur
Backwards incompatible:
* Dropped Python 3.4 support
0.22.0
------
* A lot of maintenance work (tooling, typing coverage and improvements, spelling) done, thanks to Ville Skyttä
* Provided saner defaults in ServiceInfo's constructor, thanks to Jorge Miranda
* Fixed service removal packets not being sent on shutdown, thanks to Andrew Bonney
* Added a way to define TTL-s through ServiceInfo contructor parameters, thanks to Andrew Bonney
Technically backwards incompatible:
* Adjusted query intervals to match RFC 6762, thanks to Andrew Bonney
* Made default TTL-s match RFC 6762, thanks to Andrew Bonney
0.21.3
------
* This time really allowed incoming service names to contain underscores (patch released
as part of 0.21.0 was defective)
0.21.2
------
* Fixed import-time typing-related TypeError when older typing version is used
0.21.1
------
* Fixed installation on Python 3.4 (we use typing now but there was no explicit dependency on it)
0.21.0
------
* Added an error message when importing the package using unsupported Python version
* Fixed TTL handling for published service
* Implemented unicast support
* Fixed WSL (Windows Subsystem for Linux) compatibility
* Fixed occasional UnboundLocalError issue
* Fixed UTF-8 multibyte name compression
* Switched from netifaces to ifaddr (pure Python)
* Allowed incoming service names to contain underscores
0.20.0
------
* Dropped support for Python 2 (this includes PyPy) and 3.3
* Fixed some class' equality operators
* ServiceBrowser entries are being refreshed when 'stale' now
* Cache returns new records first now instead of last
0.19.1
------
* Allowed installation with netifaces >= 0.10.6 (a bug that was concerning us
got fixed)
0.19.0
------
* Technically backwards incompatible - restricted netifaces dependency version to
work around a bug, see https://github.com/jstasiak/python-zeroconf/issues/84 for
details
0.18.0
------
* Dropped Python 2.6 support
* Improved error handling inside code executed when Zeroconf object is being closed
0.17.7
------
* Better Handling of DNS Incoming Packets parsing exceptions
* Many exceptions will now log a warning the first time they are seen
* Catch and log sendto() errors
* Fix/Implement duplicate name change
* Fix overly strict name validation introduced in 0.17.6
* Greatly improve handling of oversized packets including:
- Implement name compression per RFC1035
- Limit size of generated packets to 9000 bytes as per RFC6762
- Better handle over sized incoming packets
* Increased test coverage to 95%
0.17.6
------
* Many improvements to address race conditions and exceptions during ZC()
startup and shutdown, thanks to: morpav, veawor, justingiorgi, herczy,
stephenrauch
* Added more test coverage: strahlex, stephenrauch
* Stephen Rauch contributed:
- Speed up browser startup
- Add ZeroconfServiceTypes() query class to discover all advertised service types
- Add full validation for service names, types and subtypes
- Fix for subtype browsing
- Fix DNSHInfo support
0.17.5
------
* Fixed OpenBSD compatibility, thanks to Alessio Sergi
* Fixed race condition on ServiceBrowser startup, thanks to gbiddison
* Fixed installation on some Python 3 systems, thanks to Per Sandström
* Fixed "size change during iteration" bug on Python 3, thanks to gbiddison
0.17.4
------
* Fixed support for Linux kernel versions < 3.9 (thanks to Giovanni Harting
and Luckydonald, GitHub pull request #26)
0.17.3
------
* Fixed DNSText repr on Python 3 (it'd crash when the text was longer than
10 bytes), thanks to Paulus Schoutsen for the patch, GitHub pull request #24
0.17.2
------
* Fixed installation on Python 3.4.3+ (was failing because of enum34 dependency
which fails to install on 3.4.3+, changed to depend on enum-compat instead;
thanks to Michael Brennan for the original patch, GitHub pull request #22)
0.17.1
------
* Fixed EADDRNOTAVAIL when attempting to use dummy network interfaces on Windows,
thanks to daid
0.17.0
------
* Added some Python dependencies so it's not zero-dependencies anymore
* Improved exception handling (it'll be quieter now)
* Messages are listened to and sent using all available network interfaces
by default (configurable); thanks to Marcus Müller
* Started using logging more freely
* Fixed a bug with binary strings as property values being converted to False
(https://github.com/jstasiak/python-zeroconf/pull/10); thanks to Dr. Seuss
* Added new ``ServiceBrowser`` event handler interface (see the examples)
* PyPy3 now officially supported
* Fixed ServiceInfo repr on Python 3, thanks to Yordan Miladinov
0.16.0
------
* Set up Python logging and started using it
* Cleaned up code style (includes migrating from camel case to snake case)
0.15.1
------
* Fixed handling closed socket (GitHub #4)
0.15
----
* Forked by Jakub Stasiak
* Made Python 3 compatible
* Added setup script, made installable by pip and uploaded to PyPI
* Set up Travis build
* Reformatted the code and moved files around
* Stopped catching BaseException in several places, that could hide errors
* Marked threads as daemonic, they won't keep application alive now
0.14
----
* Fix for SOL_IP undefined on some systems - thanks Mike Erdely.
* Cleaned up examples.
* Lowercased module name.
0.13
----
* Various minor changes; see git for details.
* No longer compatible with Python 2.2. Only tested with 2.5-2.7.
* Fork by William McBrine.
0.12
----
* allow selection of binding interface
* typo fix - Thanks A. M. Kuchlingi
* removed all use of word 'Rendezvous' - this is an API change
0.11
----
* correction to comments for addListener method
* support for new record types seen from OS X
- IPv6 address
- hostinfo
* ignore unknown DNS record types
* fixes to name decoding
* works alongside other processes using port 5353 (e.g. on Mac OS X)
* tested against Mac OS X 10.3.2's mDNSResponder
* corrections to removal of list entries for service browser
0.10
----
* Jonathon Paisley contributed these corrections:
- always multicast replies, even when query is unicast
- correct a pointer encoding problem
- can now write records in any order
- traceback shown on failure
- better TXT record parsing
- server is now separate from name
- can cancel a service browser
* modified some unit tests to accommodate these changes
0.09
----
* remove all records on service unregistration
* fix DOS security problem with readName
0.08
----
* changed licensing to LGPL
0.07
----
* faster shutdown on engine
* pointer encoding of outgoing names
* ServiceBrowser now works
* new unit tests
0.06
----
* small improvements with unit tests
* added defined exception types
* new style objects
* fixed hostname/interface problem
* fixed socket timeout problem
* fixed add_service_listener() typo bug
* using select() for socket reads
* tested on Debian unstable with Python 2.2.2
0.05
----
* ensure case insensitivty on domain names
* support for unicast DNS queries
0.04
----
* added some unit tests
* added __ne__ adjuncts where required
* ensure names end in '.local.'
* timeout on receiving socket for clean shutdown
License
=======
LGPL, see COPYING file for details.
python-zeroconf-0.38.3/docs/ 0000775 0000000 0000000 00000000000 14176067602 0015712 5 ustar 00root root 0000000 0000000 python-zeroconf-0.38.3/docs/Makefile 0000664 0000000 0000000 00000015162 14176067602 0017357 0 ustar 00root root 0000000 0000000 # Makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
PAPER =
BUILDDIR = _build
# User-friendly check for sphinx-build
ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
endif
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
# the i18n builder cannot share the environment and doctrees with the others
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
help:
@echo "Please use \`make ' where is one of"
@echo " html to make standalone HTML files"
@echo " dirhtml to make HTML files named index.html in directories"
@echo " singlehtml to make a single large HTML file"
@echo " pickle to make pickle files"
@echo " json to make JSON files"
@echo " htmlhelp to make HTML files and a HTML help project"
@echo " qthelp to make HTML files and a qthelp project"
@echo " devhelp to make HTML files and a Devhelp project"
@echo " epub to make an epub"
@echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
@echo " latexpdf to make LaTeX files and run them through pdflatex"
@echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
@echo " text to make text files"
@echo " man to make manual pages"
@echo " texinfo to make Texinfo files"
@echo " info to make Texinfo files and run them through makeinfo"
@echo " gettext to make PO message catalogs"
@echo " changes to make an overview of all changed/added/deprecated items"
@echo " xml to make Docutils-native XML files"
@echo " pseudoxml to make pseudoxml-XML files for display purposes"
@echo " linkcheck to check all external links for integrity"
@echo " doctest to run all doctests embedded in the documentation (if enabled)"
clean:
rm -rf $(BUILDDIR)/*
html:
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
dirhtml:
$(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
@echo
@echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
singlehtml:
$(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
@echo
@echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
pickle:
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
@echo
@echo "Build finished; now you can process the pickle files."
json:
$(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
@echo
@echo "Build finished; now you can process the JSON files."
htmlhelp:
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
@echo
@echo "Build finished; now you can run HTML Help Workshop with the" \
".hhp project file in $(BUILDDIR)/htmlhelp."
qthelp:
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
@echo
@echo "Build finished; now you can run "qcollectiongenerator" with the" \
".qhcp project file in $(BUILDDIR)/qthelp, like this:"
@echo "# qcollectiongenerator $(BUILDDIR)/qthelp/zeroconf.qhcp"
@echo "To view the help file:"
@echo "# assistant -collectionFile $(BUILDDIR)/qthelp/zeroconf.qhc"
devhelp:
$(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
@echo
@echo "Build finished."
@echo "To view the help file:"
@echo "# mkdir -p $$HOME/.local/share/devhelp/zeroconf"
@echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/zeroconf"
@echo "# devhelp"
epub:
$(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
@echo
@echo "Build finished. The epub file is in $(BUILDDIR)/epub."
latex:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo
@echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
@echo "Run \`make' in that directory to run these through (pdf)latex" \
"(use \`make latexpdf' here to do that automatically)."
latexpdf:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through pdflatex..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
latexpdfja:
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
@echo "Running LaTeX files through platex and dvipdfmx..."
$(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
@echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
text:
$(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
@echo
@echo "Build finished. The text files are in $(BUILDDIR)/text."
man:
$(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
@echo
@echo "Build finished. The manual pages are in $(BUILDDIR)/man."
texinfo:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo
@echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
@echo "Run \`make' in that directory to run these through makeinfo" \
"(use \`make info' here to do that automatically)."
info:
$(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
@echo "Running Texinfo files through makeinfo..."
make -C $(BUILDDIR)/texinfo info
@echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
gettext:
$(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
@echo
@echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
changes:
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
@echo
@echo "The overview file is in $(BUILDDIR)/changes."
linkcheck:
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
@echo
@echo "Link check complete; look for any errors in the above output " \
"or in $(BUILDDIR)/linkcheck/output.txt."
doctest:
$(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
@echo "Testing of doctests in the sources finished, look at the " \
"results in $(BUILDDIR)/doctest/output.txt."
xml:
$(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
@echo
@echo "Build finished. The XML files are in $(BUILDDIR)/xml."
pseudoxml:
$(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
@echo
@echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
python-zeroconf-0.38.3/docs/api.rst 0000664 0000000 0000000 00000000352 14176067602 0017215 0 ustar 00root root 0000000 0000000 python-zeroconf API reference
=============================
.. automodule:: zeroconf
:members:
:undoc-members:
:show-inheritance:
.. automodule:: zeroconf.asyncio
:members:
:undoc-members:
:show-inheritance:
python-zeroconf-0.38.3/docs/conf.py 0000664 0000000 0000000 00000020136 14176067602 0017213 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import zeroconf
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'python-zeroconf'
copyright = 'python-zeroconf authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = zeroconf.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ('sidebar.html', 'sourcelink.html', 'searchbox.html'),
'**': ('localtoc.html', 'relations.html', 'sourcelink.html', 'searchbox.html'),
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'zeroconfdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
# latex_documents = []
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
# man_pages = []
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = []
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
def setup(app):
app.connect('autodoc-skip-member', skip_member)
def skip_member(app, what, name, obj, skip, options):
return (
skip
or getattr(obj, '__doc__', None) is None
or getattr(obj, '__private__', False) is True
or getattr(getattr(obj, '__func__', None), '__private__', False) is True
)
python-zeroconf-0.38.3/docs/index.rst 0000664 0000000 0000000 00000001737 14176067602 0017563 0 ustar 00root root 0000000 0000000 Welcome to python-zeroconf documentation!
=========================================
.. image:: https://github.com/jstasiak/python-zeroconf/workflows/CI/badge.svg
:target: https://github.com/jstasiak/python-zeroconf?query=workflow%3ACI+branch%3Amaster
.. image:: https://img.shields.io/pypi/v/zeroconf.svg
:target: https://pypi.python.org/pypi/zeroconf
.. image:: https://codecov.io/gh/jstasiak/python-zeroconf/branch/master/graph/badge.svg
:target: https://codecov.io/gh/jstasiak/python-zeroconf
GitHub (code repository, issues): https://github.com/jstasiak/python-zeroconf
PyPI (installable, stable distributions): https://pypi.org/project/zeroconf. You can install python-zeroconf using pip::
pip install zeroconf
python-zeroconf works with CPython 3.6+ and PyPy 3 implementing Python 3.6+.
Contents
--------
.. toctree::
:maxdepth: 1
api
See `the project's README `_ for more information.
python-zeroconf-0.38.3/examples/ 0000775 0000000 0000000 00000000000 14176067602 0016600 5 ustar 00root root 0000000 0000000 python-zeroconf-0.38.3/examples/async_apple_scanner.py 0000664 0000000 0000000 00000010341 14176067602 0023160 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
""" Scan for apple devices. """
import argparse
import asyncio
import logging
from typing import Any, Optional, cast
from zeroconf import DNSQuestionType, IPVersion, ServiceStateChange, Zeroconf
from zeroconf.asyncio import AsyncServiceBrowser, AsyncServiceInfo, AsyncZeroconf
HOMESHARING_SERVICE: str = "_appletv-v2._tcp.local."
DEVICE_SERVICE: str = "_touch-able._tcp.local."
MEDIAREMOTE_SERVICE: str = "_mediaremotetv._tcp.local."
AIRPLAY_SERVICE: str = "_airplay._tcp.local."
COMPANION_SERVICE: str = "_companion-link._tcp.local."
RAOP_SERVICE: str = "_raop._tcp.local."
AIRPORT_ADMIN_SERVICE: str = "_airport._tcp.local."
DEVICE_INFO_SERVICE: str = "_device-info._tcp.local."
ALL_SERVICES = [
HOMESHARING_SERVICE,
DEVICE_SERVICE,
MEDIAREMOTE_SERVICE,
AIRPLAY_SERVICE,
COMPANION_SERVICE,
RAOP_SERVICE,
AIRPORT_ADMIN_SERVICE,
DEVICE_INFO_SERVICE,
]
log = logging.getLogger(__name__)
def async_on_service_state_change(
zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange
) -> None:
print(f"Service {name} of type {service_type} state changed: {state_change}")
if state_change is not ServiceStateChange.Added:
return
base_name = name[: -len(service_type) - 1]
device_name = f"{base_name}.{DEVICE_INFO_SERVICE}"
asyncio.ensure_future(_async_show_service_info(zeroconf, service_type, name))
# Also probe for device info
asyncio.ensure_future(_async_show_service_info(zeroconf, DEVICE_INFO_SERVICE, device_name))
async def _async_show_service_info(zeroconf: Zeroconf, service_type: str, name: str) -> None:
info = AsyncServiceInfo(service_type, name)
await info.async_request(zeroconf, 3000, question_type=DNSQuestionType.QU)
print("Info from zeroconf.get_service_info: %r" % (info))
if info:
addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_addresses()]
print(" Name: %s" % name)
print(" Addresses: %s" % ", ".join(addresses))
print(" Weight: %d, priority: %d" % (info.weight, info.priority))
print(f" Server: {info.server}")
if info.properties:
print(" Properties are:")
for key, value in info.properties.items():
print(f" {key}: {value}")
else:
print(" No properties")
else:
print(" No info")
print('\n')
class AsyncAppleScanner:
def __init__(self, args: Any) -> None:
self.args = args
self.aiobrowser: Optional[AsyncServiceBrowser] = None
self.aiozc: Optional[AsyncZeroconf] = None
async def async_run(self) -> None:
self.aiozc = AsyncZeroconf(ip_version=ip_version)
await self.aiozc.zeroconf.async_wait_for_start()
print("\nBrowsing %s service(s), press Ctrl-C to exit...\n" % ALL_SERVICES)
kwargs = {'handlers': [async_on_service_state_change], 'question_type': DNSQuestionType.QU}
if self.args.target:
kwargs["addr"] = self.args.target
self.aiobrowser = AsyncServiceBrowser(self.aiozc.zeroconf, ALL_SERVICES, **kwargs) # type: ignore
while True:
await asyncio.sleep(1)
async def async_close(self) -> None:
assert self.aiozc is not None
assert self.aiobrowser is not None
await self.aiobrowser.async_cancel()
await self.aiozc.async_close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--target', help='Unicast target')
version_group.add_argument('--v6', action='store_true')
version_group.add_argument('--v6-only', action='store_true')
args = parser.parse_args()
if args.debug:
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
if args.v6:
ip_version = IPVersion.All
elif args.v6_only:
ip_version = IPVersion.V6Only
else:
ip_version = IPVersion.V4Only
loop = asyncio.get_event_loop()
runner = AsyncAppleScanner(args)
try:
loop.run_until_complete(runner.async_run())
except KeyboardInterrupt:
loop.run_until_complete(runner.async_close())
python-zeroconf-0.38.3/examples/async_browser.py 0000664 0000000 0000000 00000006777 14176067602 0022053 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
""" Example of browsing for a service.
The default is HTTP and HAP; use --find to search for all available services in the network
"""
import argparse
import asyncio
import logging
from typing import Any, Optional, cast
from zeroconf import IPVersion, ServiceStateChange, Zeroconf
from zeroconf.asyncio import AsyncServiceBrowser, AsyncServiceInfo, AsyncZeroconf, AsyncZeroconfServiceTypes
def async_on_service_state_change(
zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange
) -> None:
print(f"Service {name} of type {service_type} state changed: {state_change}")
if state_change is not ServiceStateChange.Added:
return
asyncio.ensure_future(async_display_service_info(zeroconf, service_type, name))
async def async_display_service_info(zeroconf: Zeroconf, service_type: str, name: str) -> None:
info = AsyncServiceInfo(service_type, name)
await info.async_request(zeroconf, 3000)
print("Info from zeroconf.get_service_info: %r" % (info))
if info:
addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_scoped_addresses()]
print(" Name: %s" % name)
print(" Addresses: %s" % ", ".join(addresses))
print(" Weight: %d, priority: %d" % (info.weight, info.priority))
print(f" Server: {info.server}")
if info.properties:
print(" Properties are:")
for key, value in info.properties.items():
print(f" {key}: {value}")
else:
print(" No properties")
else:
print(" No info")
print('\n')
class AsyncRunner:
def __init__(self, args: Any) -> None:
self.args = args
self.aiobrowser: Optional[AsyncServiceBrowser] = None
self.aiozc: Optional[AsyncZeroconf] = None
async def async_run(self) -> None:
self.aiozc = AsyncZeroconf(ip_version=ip_version)
services = ["_http._tcp.local.", "_hap._tcp.local."]
if self.args.find:
services = list(
await AsyncZeroconfServiceTypes.async_find(aiozc=self.aiozc, ip_version=ip_version)
)
print("\nBrowsing %s service(s), press Ctrl-C to exit...\n" % services)
self.aiobrowser = AsyncServiceBrowser(
self.aiozc.zeroconf, services, handlers=[async_on_service_state_change]
)
while True:
await asyncio.sleep(1)
async def async_close(self) -> None:
assert self.aiozc is not None
assert self.aiobrowser is not None
await self.aiobrowser.async_cancel()
await self.aiozc.async_close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--find', action='store_true', help='Browse all available services')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--v6', action='store_true')
version_group.add_argument('--v6-only', action='store_true')
args = parser.parse_args()
if args.debug:
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
if args.v6:
ip_version = IPVersion.All
elif args.v6_only:
ip_version = IPVersion.V6Only
else:
ip_version = IPVersion.V4Only
loop = asyncio.get_event_loop()
runner = AsyncRunner(args)
try:
loop.run_until_complete(runner.async_run())
except KeyboardInterrupt:
loop.run_until_complete(runner.async_close())
python-zeroconf-0.38.3/examples/async_registration.py 0000664 0000000 0000000 00000004765 14176067602 0023075 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
"""Example of announcing 250 services (in this case, a fake HTTP server)."""
import argparse
import asyncio
import logging
import socket
from typing import List, Optional
from zeroconf import IPVersion
from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf
class AsyncRunner:
def __init__(self, ip_version: IPVersion) -> None:
self.ip_version = ip_version
self.aiozc: Optional[AsyncZeroconf] = None
async def register_services(self, infos: List[AsyncServiceInfo]) -> None:
self.aiozc = AsyncZeroconf(ip_version=self.ip_version)
tasks = [self.aiozc.async_register_service(info) for info in infos]
background_tasks = await asyncio.gather(*tasks)
await asyncio.gather(*background_tasks)
print("Finished registration, press Ctrl-C to exit...")
while True:
await asyncio.sleep(1)
async def unregister_services(self, infos: List[AsyncServiceInfo]) -> None:
assert self.aiozc is not None
tasks = [self.aiozc.async_unregister_service(info) for info in infos]
background_tasks = await asyncio.gather(*tasks)
await asyncio.gather(*background_tasks)
await self.aiozc.async_close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--v6', action='store_true')
version_group.add_argument('--v6-only', action='store_true')
args = parser.parse_args()
if args.debug:
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
if args.v6:
ip_version = IPVersion.All
elif args.v6_only:
ip_version = IPVersion.V6Only
else:
ip_version = IPVersion.V4Only
infos = []
for i in range(250):
infos.append(
AsyncServiceInfo(
"_http._tcp.local.",
f"Paul's Test Web Site {i}._http._tcp.local.",
addresses=[socket.inet_aton("127.0.0.1")],
port=80,
properties={'path': '/~paulsm/'},
server=f"zcdemohost-{i}.local.",
)
)
print("Registration of 250 services...")
loop = asyncio.get_event_loop()
runner = AsyncRunner(ip_version)
try:
loop.run_until_complete(runner.register_services(infos))
except KeyboardInterrupt:
loop.run_until_complete(runner.unregister_services(infos))
python-zeroconf-0.38.3/examples/async_service_info_request.py 0000664 0000000 0000000 00000006601 14176067602 0024575 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
"""Example of perodic dump of homekit services.
This example is useful when a user wants an ondemand
list of HomeKit devices on the network.
"""
import argparse
import asyncio
import logging
from typing import Any, Optional, cast
from zeroconf import IPVersion, ServiceBrowser, ServiceStateChange, Zeroconf
from zeroconf.asyncio import AsyncServiceInfo, AsyncZeroconf
HAP_TYPE = "_hap._tcp.local."
async def async_watch_services(aiozc: AsyncZeroconf) -> None:
zeroconf = aiozc.zeroconf
while True:
await asyncio.sleep(5)
infos = []
for name in zeroconf.cache.names():
if not name.endswith(HAP_TYPE):
continue
infos.append(AsyncServiceInfo(HAP_TYPE, name))
tasks = [info.async_request(aiozc.zeroconf, 3000) for info in infos]
await asyncio.gather(*tasks)
for info in infos:
print("Info for %s" % (info.name))
if info:
addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_addresses()]
print(" Addresses: %s" % ", ".join(addresses))
print(" Weight: %d, priority: %d" % (info.weight, info.priority))
print(f" Server: {info.server}")
if info.properties:
print(" Properties are:")
for key, value in info.properties.items():
print(f" {key}: {value}")
else:
print(" No properties")
else:
print(" No info")
print('\n')
class AsyncRunner:
def __init__(self, args: Any) -> None:
self.args = args
self.threaded_browser: Optional[ServiceBrowser] = None
self.aiozc: Optional[AsyncZeroconf] = None
async def async_run(self) -> None:
self.aiozc = AsyncZeroconf(ip_version=ip_version)
assert self.aiozc is not None
def on_service_state_change(
zeroconf: Zeroconf, service_type: str, state_change: ServiceStateChange, name: str
) -> None:
"""Dummy handler."""
self.threaded_browser = ServiceBrowser(
self.aiozc.zeroconf, [HAP_TYPE], handlers=[on_service_state_change]
)
await async_watch_services(self.aiozc)
async def async_close(self) -> None:
assert self.aiozc is not None
assert self.threaded_browser is not None
self.threaded_browser.cancel()
await self.aiozc.async_close()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--v6', action='store_true')
version_group.add_argument('--v6-only', action='store_true')
args = parser.parse_args()
if args.debug:
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
if args.v6:
ip_version = IPVersion.All
elif args.v6_only:
ip_version = IPVersion.V6Only
else:
ip_version = IPVersion.V4Only
print(f"Services with {HAP_TYPE} will be shown every 5s, press Ctrl-C to exit...")
loop = asyncio.get_event_loop()
runner = AsyncRunner(args)
try:
loop.run_until_complete(runner.async_run())
except KeyboardInterrupt:
loop.run_until_complete(runner.async_close())
python-zeroconf-0.38.3/examples/browser.py 0000775 0000000 0000000 00000004771 14176067602 0020651 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
""" Example of browsing for a service.
The default is HTTP and HAP; use --find to search for all available services in the network
"""
import argparse
import logging
from time import sleep
from typing import cast
from zeroconf import IPVersion, ServiceBrowser, ServiceStateChange, Zeroconf, ZeroconfServiceTypes
def on_service_state_change(
zeroconf: Zeroconf, service_type: str, name: str, state_change: ServiceStateChange
) -> None:
print(f"Service {name} of type {service_type} state changed: {state_change}")
if state_change is ServiceStateChange.Added:
info = zeroconf.get_service_info(service_type, name)
print("Info from zeroconf.get_service_info: %r" % (info))
if info:
addresses = ["%s:%d" % (addr, cast(int, info.port)) for addr in info.parsed_scoped_addresses()]
print(" Addresses: %s" % ", ".join(addresses))
print(" Weight: %d, priority: %d" % (info.weight, info.priority))
print(f" Server: {info.server}")
if info.properties:
print(" Properties are:")
for key, value in info.properties.items():
print(f" {key}: {value}")
else:
print(" No properties")
else:
print(" No info")
print('\n')
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
parser.add_argument('--find', action='store_true', help='Browse all available services')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--v6', action='store_true')
version_group.add_argument('--v6-only', action='store_true')
args = parser.parse_args()
if args.debug:
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
if args.v6:
ip_version = IPVersion.All
elif args.v6_only:
ip_version = IPVersion.V6Only
else:
ip_version = IPVersion.V4Only
zeroconf = Zeroconf(ip_version=ip_version)
services = ["_http._tcp.local.", "_hap._tcp.local."]
if args.find:
services = list(ZeroconfServiceTypes.find(zc=zeroconf))
print("\nBrowsing %d service(s), press Ctrl-C to exit...\n" % len(services))
browser = ServiceBrowser(zeroconf, services, handlers=[on_service_state_change])
try:
while True:
sleep(0.1)
except KeyboardInterrupt:
pass
finally:
zeroconf.close()
python-zeroconf-0.38.3/examples/registration.py 0000775 0000000 0000000 00000002701 14176067602 0021667 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
""" Example of announcing a service (in this case, a fake HTTP server) """
import argparse
import logging
import socket
from time import sleep
from zeroconf import IPVersion, ServiceInfo, Zeroconf
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store_true')
version_group = parser.add_mutually_exclusive_group()
version_group.add_argument('--v6', action='store_true')
version_group.add_argument('--v6-only', action='store_true')
args = parser.parse_args()
if args.debug:
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
if args.v6:
ip_version = IPVersion.All
elif args.v6_only:
ip_version = IPVersion.V6Only
else:
ip_version = IPVersion.V4Only
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
"_http._tcp.local.",
"Paul's Test Web Site._http._tcp.local.",
addresses=[socket.inet_aton("127.0.0.1")],
port=80,
properties=desc,
server="ash-2.local.",
)
zeroconf = Zeroconf(ip_version=ip_version)
print("Registration of a service, press Ctrl-C to exit...")
zeroconf.register_service(info)
try:
while True:
sleep(0.1)
except KeyboardInterrupt:
pass
finally:
print("Unregistering...")
zeroconf.unregister_service(info)
zeroconf.close()
python-zeroconf-0.38.3/examples/resolver.py 0000775 0000000 0000000 00000001031 14176067602 0021011 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
""" Example of resolving a service with a known name """
import logging
import sys
from zeroconf import Zeroconf
TYPE = '_test._tcp.local.'
NAME = 'My Service Name'
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
zeroconf = Zeroconf()
try:
print(zeroconf.get_service_info(TYPE, NAME + '.' + TYPE))
finally:
zeroconf.close()
python-zeroconf-0.38.3/examples/self_test.py 0000775 0000000 0000000 00000003371 14176067602 0021151 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
import logging
import socket
import sys
from zeroconf import ServiceInfo, Zeroconf, __version__
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
# Test a few module features, including service registration, service
# query (for Zoe), and service unregistration.
print(f"Multicast DNS Service Discovery for Python, version {__version__}")
r = Zeroconf()
print("1. Testing registration of a service...")
desc = {'version': '0.10', 'a': 'test value', 'b': 'another value'}
addresses = [socket.inet_aton("127.0.0.1")]
expected = {'127.0.0.1'}
if socket.has_ipv6:
addresses.append(socket.inet_pton(socket.AF_INET6, '::1'))
expected.add('::1')
info = ServiceInfo(
"_http._tcp.local.",
"My Service Name._http._tcp.local.",
addresses=addresses,
port=1234,
properties=desc,
)
print(" Registering service...")
r.register_service(info)
print(" Registration done.")
print("2. Testing query of service information...")
print(" Getting ZOE service: %s" % (r.get_service_info("_http._tcp.local.", "ZOE._http._tcp.local.")))
print(" Query done.")
print("3. Testing query of own service...")
queried_info = r.get_service_info("_http._tcp.local.", "My Service Name._http._tcp.local.")
assert queried_info
assert set(queried_info.parsed_addresses()) == expected
print(f" Getting self: {queried_info}")
print(" Query done.")
print("4. Testing unregister of service information...")
r.unregister_service(info)
print(" Unregister done.")
r.close()
python-zeroconf-0.38.3/pyproject.toml 0000664 0000000 0000000 00000001157 14176067602 0017702 0 ustar 00root root 0000000 0000000 [tool.black]
line-length = 110
target_version = ['py35', 'py36', 'py37', 'py38']
skip_string_normalization = true
[tool.pylint.BASIC]
class-const-naming-style = "any"
good-names = [
"e",
"er",
"h",
"i",
"id",
"ip",
"os",
"n",
"rr",
"rs",
"s",
"t",
"wr",
"zc",
"_GLOBAL_DONE",
]
[tool.pylint."MESSAGES CONTROL"]
disable = [
"duplicate-code",
"fixme",
"format",
"missing-class-docstring",
"missing-function-docstring",
"too-few-public-methods",
"too-many-arguments",
"too-many-instance-attributes",
"too-many-public-methods"
]
python-zeroconf-0.38.3/requirements-dev.txt 0000664 0000000 0000000 00000000476 14176067602 0021031 0 ustar 00root root 0000000 0000000 autopep8
black;implementation_name=="cpython"
bump2version
coveralls
coverage
flake8
flake8-import-order
ifaddr
mypy;implementation_name=="cpython"
# 0.11.0 breaks things https://github.com/PyCQA/pep8-naming/issues/152
pep8-naming!=0.6.0,!=0.11.0
pylint
pytest
pytest-asyncio
pytest-cov
pytest-timeout
readme_renderer
python-zeroconf-0.38.3/setup.cfg 0000664 0000000 0000000 00000001337 14176067602 0016607 0 ustar 00root root 0000000 0000000 [bumpversion]
current_version = 0.38.3
commit = True
tag = True
tag_name = {new_version}
[bumpversion:file:zeroconf/__init__.py]
search = __version__ = '{current_version}'
replace = __version__ = '{new_version}'
[tool:pytest]
testpaths = tests
[flake8]
show-source = 1
application-import-names = zeroconf
max-line-length = 110
ignore = E203,W503,N818
[mypy]
ignore_missing_imports = true
follow_imports = skip
check_untyped_defs = true
no_implicit_optional = true
warn_incomplete_stub = true
warn_no_return = true
warn_redundant_casts = true
warn_unused_configs = true
warn_unused_ignores = true
warn_return_any = true
disallow_untyped_calls = false
disallow_untyped_defs = true
[mypy-zeroconf.test]
disallow_untyped_defs = false
python-zeroconf-0.38.3/setup.py 0000775 0000000 0000000 00000003446 14176067602 0016506 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python3
from io import open
from os.path import abspath, dirname, join
from setuptools import setup
PROJECT_ROOT = abspath(dirname(__file__))
with open(join(PROJECT_ROOT, 'README.rst'), encoding='utf-8') as f:
readme = f.read()
version = (
[ln for ln in open(join(PROJECT_ROOT, 'zeroconf', '__init__.py')) if '__version__' in ln][0]
.split('=')[-1]
.strip()
.strip('\'"')
)
setup(
name='zeroconf',
version=version,
description='Pure Python Multicast DNS Service Discovery Library ' '(Bonjour/Avahi compatible)',
long_description=readme,
author='Paul Scott-Murphy, William McBrine, Jakub Stasiak',
url='https://github.com/jstasiak/python-zeroconf',
package_data={"zeroconf": ["py.typed"]},
packages=["zeroconf", "zeroconf._protocol", "zeroconf._services", "zeroconf._utils"],
platforms=['unix', 'linux', 'osx'],
license='LGPL',
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
'Operating System :: POSIX',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
keywords=['Bonjour', 'Avahi', 'Zeroconf', 'Multicast DNS', 'Service Discovery', 'mDNS'],
install_requires=['ifaddr>=0.1.7'],
)
python-zeroconf-0.38.3/tests/ 0000775 0000000 0000000 00000000000 14176067602 0016124 5 ustar 00root root 0000000 0000000 python-zeroconf-0.38.3/tests/__init__.py 0000664 0000000 0000000 00000004561 14176067602 0020243 0 ustar 00root root 0000000 0000000 """ Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
import asyncio
import socket
from functools import lru_cache
from typing import List
import ifaddr
from zeroconf import DNSIncoming, Zeroconf
def _inject_responses(zc: Zeroconf, msgs: List[DNSIncoming]) -> None:
"""Inject a DNSIncoming response."""
assert zc.loop is not None
async def _wait_for_response():
for msg in msgs:
zc.handle_response(msg)
asyncio.run_coroutine_threadsafe(_wait_for_response(), zc.loop).result()
def _inject_response(zc: Zeroconf, msg: DNSIncoming) -> None:
"""Inject a DNSIncoming response."""
_inject_responses(zc, [msg])
def _wait_for_start(zc: Zeroconf) -> None:
"""Wait for all sockets to be up and running."""
assert zc.loop is not None
asyncio.run_coroutine_threadsafe(zc.async_wait_for_start(), zc.loop).result()
@lru_cache(maxsize=None)
def has_working_ipv6():
"""Return True if if the system can bind an IPv6 address."""
if not socket.has_ipv6:
return False
sock = None
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
except Exception:
return False
finally:
if sock:
sock.close()
for iface in ifaddr.get_adapters():
for addr in iface.ips:
if addr.is_IPv6 and iface.index is not None:
return True
return False
def _clear_cache(zc):
zc.cache.cache.clear()
zc.question_history._history.clear()
python-zeroconf-0.38.3/tests/conftest.py 0000664 0000000 0000000 00000001235 14176067602 0020324 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" conftest for zeroconf tests. """
import threading
import pytest
import unittest
from zeroconf import _core, const
@pytest.fixture(autouse=True)
def verify_threads_ended():
"""Verify that the threads are not running after the test."""
threads_before = frozenset(threading.enumerate())
yield
threads = frozenset(threading.enumerate()) - threads_before
assert not threads
@pytest.fixture
def run_isolated():
"""Change the mDNS port to run the test in isolation."""
with unittest.mock.patch.object(_core, "_MDNS_PORT", 5454), unittest.mock.patch.object(
const, "_MDNS_PORT", 5454
):
yield
python-zeroconf-0.38.3/tests/services/ 0000775 0000000 0000000 00000000000 14176067602 0017747 5 ustar 00root root 0000000 0000000 python-zeroconf-0.38.3/tests/services/__init__.py 0000664 0000000 0000000 00000001713 14176067602 0022062 0 ustar 00root root 0000000 0000000 """ Multicast DNS Service Discovery for Python, v0.14-wmcbrine
Copyright 2003 Paul Scott-Murphy, 2014 William McBrine
This module provides a framework for the use of DNS Service Discovery
using IP multicast.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301
USA
"""
python-zeroconf-0.38.3/tests/services/test_browser.py 0000664 0000000 0000000 00000114416 14176067602 0023052 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._services.browser. """
import asyncio
import logging
import socket
import time
import os
import unittest
from threading import Event
from unittest.mock import patch
import pytest
import zeroconf as r
from zeroconf import DNSPointer, DNSQuestion, const, current_time_millis, millis_to_seconds
import zeroconf._services.browser as _services_browser
from zeroconf import Zeroconf
from zeroconf._services import ServiceStateChange
from zeroconf._services.browser import ServiceBrowser
from zeroconf._services.info import ServiceInfo
from zeroconf.asyncio import AsyncZeroconf
from .. import has_working_ipv6, _inject_response, _wait_for_start
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
def test_service_browser_cancel_multiple_times():
"""Test we can cancel a ServiceBrowser multiple times before close."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_hap._tcp.local."
class MyServiceListener(r.ServiceListener):
pass
listener = MyServiceListener()
browser = r.ServiceBrowser(zc, type_, None, listener)
browser.cancel()
browser.cancel()
browser.cancel()
zc.close()
def test_service_browser_cancel_multiple_times_after_close():
"""Test we can cancel a ServiceBrowser multiple times after close."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_hap._tcp.local."
class MyServiceListener(r.ServiceListener):
pass
listener = MyServiceListener()
browser = r.ServiceBrowser(zc, type_, None, listener)
zc.close()
browser.cancel()
browser.cancel()
browser.cancel()
def test_service_browser_started_after_zeroconf_closed():
"""Test starting a ServiceBrowser after close raises RuntimeError."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_hap._tcp.local."
class MyServiceListener(r.ServiceListener):
pass
listener = MyServiceListener()
zc.close()
with pytest.raises(RuntimeError):
browser = r.ServiceBrowser(zc, type_, None, listener)
def test_multiple_instances_running_close():
"""Test we can shutdown multiple instances."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
zc2 = Zeroconf(interfaces=['127.0.0.1'])
zc3 = Zeroconf(interfaces=['127.0.0.1'])
assert zc.loop != zc2.loop
assert zc.loop != zc3.loop
class MyServiceListener(r.ServiceListener):
pass
listener = MyServiceListener()
zc2.add_service_listener("zca._hap._tcp.local.", listener)
zc.close()
zc2.remove_service_listener(listener)
zc2.close()
zc3.close()
class TestServiceBrowser(unittest.TestCase):
def test_update_record(self):
enable_ipv6 = has_working_ipv6() and not os.environ.get('SKIP_IPV6')
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-1.local.'
service_text = b'path=/~matt1/'
service_address = '10.0.1.2'
service_v6_address = "2001:db8::1"
service_v6_second_address = "6001:db8::1"
service_added_count = 0
service_removed_count = 0
service_updated_count = 0
service_add_event = Event()
service_removed_event = Event()
service_updated_event = Event()
class MyServiceListener(r.ServiceListener):
def add_service(self, zc, type_, name) -> None:
nonlocal service_added_count
service_added_count += 1
service_add_event.set()
def remove_service(self, zc, type_, name) -> None:
nonlocal service_removed_count
service_removed_count += 1
service_removed_event.set()
def update_service(self, zc, type_, name) -> None:
nonlocal service_updated_count
service_updated_count += 1
service_info = zc.get_service_info(type_, name)
assert socket.inet_aton(service_address) in service_info.addresses
if enable_ipv6:
assert socket.inet_pton(
socket.AF_INET6, service_v6_address
) in service_info.addresses_by_version(r.IPVersion.V6Only)
assert socket.inet_pton(
socket.AF_INET6, service_v6_second_address
) in service_info.addresses_by_version(r.IPVersion.V6Only)
assert service_info.text == service_text
assert service_info.server == service_server
service_updated_event.set()
def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
assert generated.is_response() is True
if service_state_change == r.ServiceStateChange.Removed:
ttl = 0
else:
ttl = 120
generated.add_answer_at_time(
r.DNSText(
service_name, const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, service_text
),
0,
)
generated.add_answer_at_time(
r.DNSService(
service_name,
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
0,
0,
80,
service_server,
),
0,
)
# Send the IPv6 address first since we previously
# had a bug where the IPv4 would be missing if the
# IPv6 was seen first
if enable_ipv6:
generated.add_answer_at_time(
r.DNSAddress(
service_server,
const._TYPE_AAAA,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_pton(socket.AF_INET6, service_v6_address),
),
0,
)
generated.add_answer_at_time(
r.DNSAddress(
service_server,
const._TYPE_AAAA,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_pton(socket.AF_INET6, service_v6_second_address),
),
0,
)
generated.add_answer_at_time(
r.DNSAddress(
service_server,
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_aton(service_address),
),
0,
)
generated.add_answer_at_time(
r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), 0
)
return r.DNSIncoming(generated.packets()[0])
zeroconf = r.Zeroconf(interfaces=['127.0.0.1'])
service_browser = r.ServiceBrowser(zeroconf, service_type, listener=MyServiceListener())
try:
wait_time = 3
# service added
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Added))
service_add_event.wait(wait_time)
assert service_added_count == 1
assert service_updated_count == 0
assert service_removed_count == 0
# service SRV updated
service_updated_event.clear()
service_server = 'ash-2.local.'
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated))
service_updated_event.wait(wait_time)
assert service_added_count == 1
assert service_updated_count == 1
assert service_removed_count == 0
# service TXT updated
service_updated_event.clear()
service_text = b'path=/~matt2/'
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated))
service_updated_event.wait(wait_time)
assert service_added_count == 1
assert service_updated_count == 2
assert service_removed_count == 0
# service TXT updated - duplicate update should not trigger another service_updated
service_updated_event.clear()
service_text = b'path=/~matt2/'
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated))
service_updated_event.wait(wait_time)
assert service_added_count == 1
assert service_updated_count == 2
assert service_removed_count == 0
# service A updated
service_updated_event.clear()
service_address = '10.0.1.3'
# Verify we match on uppercase
service_server = service_server.upper()
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated))
service_updated_event.wait(wait_time)
assert service_added_count == 1
assert service_updated_count == 3
assert service_removed_count == 0
# service all updated
service_updated_event.clear()
service_server = 'ash-3.local.'
service_text = b'path=/~matt3/'
service_address = '10.0.1.3'
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated))
service_updated_event.wait(wait_time)
assert service_added_count == 1
assert service_updated_count == 4
assert service_removed_count == 0
# service removed
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Removed))
service_removed_event.wait(wait_time)
assert service_added_count == 1
assert service_updated_count == 4
assert service_removed_count == 1
finally:
assert len(zeroconf.listeners) == 1
service_browser.cancel()
time.sleep(0.2)
assert len(zeroconf.listeners) == 0
zeroconf.remove_all_service_listeners()
zeroconf.close()
class TestServiceBrowserMultipleTypes(unittest.TestCase):
def test_update_record(self):
service_names = ['name2._type2._tcp.local.', 'name._type._tcp.local.', 'name._type._udp.local']
service_types = ['_type2._tcp.local.', '_type._tcp.local.', '_type._udp.local.']
service_added_count = 0
service_removed_count = 0
service_add_event = Event()
service_removed_event = Event()
class MyServiceListener(r.ServiceListener):
def add_service(self, zc, type_, name) -> None:
nonlocal service_added_count
service_added_count += 1
if service_added_count == 3:
service_add_event.set()
def remove_service(self, zc, type_, name) -> None:
nonlocal service_removed_count
service_removed_count += 1
if service_removed_count == 3:
service_removed_event.set()
def mock_incoming_msg(
service_state_change: r.ServiceStateChange, service_type: str, service_name: str, ttl: int
) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(
r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), 0
)
return r.DNSIncoming(generated.packets()[0])
zeroconf = r.Zeroconf(interfaces=['127.0.0.1'])
service_browser = r.ServiceBrowser(zeroconf, service_types, listener=MyServiceListener())
try:
wait_time = 3
# all three services added
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Added, service_types[0], service_names[0], 120),
)
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Added, service_types[1], service_names[1], 120),
)
time.sleep(0.1)
called_with_refresh_time_check = False
def _mock_get_expiration_time(self, percent):
nonlocal called_with_refresh_time_check
if percent == const._EXPIRE_REFRESH_TIME_PERCENT:
called_with_refresh_time_check = True
return 0
return self.created + (percent * self.ttl * 10)
# Set an expire time that will force a refresh
with patch("zeroconf.DNSRecord.get_expiration_time", new=_mock_get_expiration_time):
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Added, service_types[0], service_names[0], 120),
)
# Add the last record after updating the first one
# to ensure the service_add_event only gets set
# after the update
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Added, service_types[2], service_names[2], 120),
)
service_add_event.wait(wait_time)
assert called_with_refresh_time_check is True
assert service_added_count == 3
assert service_removed_count == 0
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Updated, service_types[0], service_names[0], 0),
)
# all three services removed
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Removed, service_types[0], service_names[0], 0),
)
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Removed, service_types[1], service_names[1], 0),
)
_inject_response(
zeroconf,
mock_incoming_msg(r.ServiceStateChange.Removed, service_types[2], service_names[2], 0),
)
service_removed_event.wait(wait_time)
assert service_added_count == 3
assert service_removed_count == 3
finally:
assert len(zeroconf.listeners) == 1
service_browser.cancel()
time.sleep(0.2)
assert len(zeroconf.listeners) == 0
zeroconf.remove_all_service_listeners()
zeroconf.close()
def test_backoff():
got_query = Event()
type_ = "_http._tcp.local."
zeroconf_browser = Zeroconf(interfaces=['127.0.0.1'])
_wait_for_start(zeroconf_browser)
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf_browser.async_send
time_offset = 0.0
start_time = time.monotonic() * 1000
initial_query_interval = _services_browser._BROWSER_TIME / 1000
def current_time_millis():
"""Current system time in milliseconds"""
return start_time + time_offset * 1000
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
"""Sends an outgoing packet."""
got_query.set()
old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope)
# patch the zeroconf send
# patch the zeroconf current_time_millis
# patch the backoff limit to prevent test running forever
with patch.object(zeroconf_browser, "async_send", send), patch.object(
zeroconf_browser.question_history, "suppresses", return_value=False
), patch.object(_services_browser, "current_time_millis", current_time_millis), patch.object(
_services_browser, "_BROWSER_BACKOFF_LIMIT", 10
), patch.object(
_services_browser, "_FIRST_QUERY_DELAY_RANDOM_INTERVAL", (0, 0)
):
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
browser = ServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
try:
# Test that queries are sent at increasing intervals
sleep_count = 0
next_query_interval = 0.0
expected_query_time = 0.0
while True:
sleep_count += 1
got_query.wait(0.1)
if time_offset == expected_query_time:
assert got_query.is_set()
got_query.clear()
if next_query_interval == _services_browser._BROWSER_BACKOFF_LIMIT:
# Only need to test up to the point where we've seen a query
# after the backoff limit has been hit
break
elif next_query_interval == 0:
next_query_interval = initial_query_interval
expected_query_time = initial_query_interval
else:
next_query_interval = min(
2 * next_query_interval, _services_browser._BROWSER_BACKOFF_LIMIT
)
expected_query_time += next_query_interval
else:
assert not got_query.is_set()
time_offset += initial_query_interval
zeroconf_browser.loop.call_soon_threadsafe(browser._async_send_ready_queries_schedule_next)
finally:
browser.cancel()
zeroconf_browser.close()
def test_first_query_delay():
"""Verify the first query is delayed.
https://datatracker.ietf.org/doc/html/rfc6762#section-5.2
"""
type_ = "_http._tcp.local."
zeroconf_browser = Zeroconf(interfaces=['127.0.0.1'])
_wait_for_start(zeroconf_browser)
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf_browser.async_send
first_query_time = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal first_query_time
if first_query_time is None:
first_query_time = current_time_millis()
old_send(out, addr=addr, port=port)
# patch the zeroconf send
with patch.object(zeroconf_browser, "async_send", send):
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
start_time = current_time_millis()
browser = ServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5))
try:
assert (
current_time_millis() - start_time > _services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[0]
)
finally:
browser.cancel()
zeroconf_browser.close()
def test_asking_default_is_asking_qm_questions_after_the_first_qu():
"""Verify the service browser's first question is QU and subsequent ones are QM questions."""
type_ = "_quservice._tcp.local."
zeroconf_browser = Zeroconf(interfaces=['127.0.0.1'])
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf_browser.async_send
first_outgoing = None
second_outgoing = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal first_outgoing
nonlocal second_outgoing
if first_outgoing is not None and second_outgoing is None:
second_outgoing = out
if first_outgoing is None:
first_outgoing = out
old_send(out, addr=addr, port=port)
# patch the zeroconf send
with patch.object(zeroconf_browser, "async_send", send):
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
browser = ServiceBrowser(zeroconf_browser, type_, [on_service_state_change], delay=5)
time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 120 + 5))
try:
assert first_outgoing.questions[0].unicast == True
assert second_outgoing.questions[0].unicast == False
finally:
browser.cancel()
zeroconf_browser.close()
def test_asking_qm_questions():
"""Verify explictly asking QM questions."""
type_ = "_quservice._tcp.local."
zeroconf_browser = Zeroconf(interfaces=['127.0.0.1'])
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf_browser.async_send
first_outgoing = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal first_outgoing
if first_outgoing is None:
first_outgoing = out
old_send(out, addr=addr, port=port)
# patch the zeroconf send
with patch.object(zeroconf_browser, "async_send", send):
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
browser = ServiceBrowser(
zeroconf_browser, type_, [on_service_state_change], question_type=r.DNSQuestionType.QM
)
time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5))
try:
assert first_outgoing.questions[0].unicast == False
finally:
browser.cancel()
zeroconf_browser.close()
def test_asking_qu_questions():
"""Verify the service browser can ask QU questions."""
type_ = "_quservice._tcp.local."
zeroconf_browser = Zeroconf(interfaces=['127.0.0.1'])
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf_browser.async_send
first_outgoing = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal first_outgoing
if first_outgoing is None:
first_outgoing = out
old_send(out, addr=addr, port=port)
# patch the zeroconf send
with patch.object(zeroconf_browser, "async_send", send):
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
browser = ServiceBrowser(
zeroconf_browser, type_, [on_service_state_change], question_type=r.DNSQuestionType.QU
)
time.sleep(millis_to_seconds(_services_browser._FIRST_QUERY_DELAY_RANDOM_INTERVAL[1] + 5))
try:
assert first_outgoing.questions[0].unicast == True
finally:
browser.cancel()
zeroconf_browser.close()
def test_legacy_record_update_listener():
"""Test a RecordUpdateListener that does not implement update_records."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
with pytest.raises(RuntimeError):
r.RecordUpdateListener().update_record(
zc, 0, r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL)
)
updates = []
class LegacyRecordUpdateListener(r.RecordUpdateListener):
"""A RecordUpdateListener that does not implement update_records."""
def update_record(self, zc: 'Zeroconf', now: float, record: r.DNSRecord) -> None:
nonlocal updates
updates.append(record)
listener = LegacyRecordUpdateListener()
zc.add_listener(listener, None)
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
# start a browser
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
browser = ServiceBrowser(zc, type_, [on_service_state_change])
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
zc.register_service(info_service)
time.sleep(0.001)
browser.cancel()
assert len(updates)
assert len([isinstance(update, r.DNSPointer) and update.name == type_ for update in updates]) >= 1
zc.remove_listener(listener)
# Removing a second time should not throw
zc.remove_listener(listener)
zc.close()
def test_service_browser_is_aware_of_port_changes():
"""Test that the ServiceBrowser is aware of port changes."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_hap._tcp.local."
registration_name = "xxxyyy.%s" % type_
callbacks = []
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
nonlocal callbacks
if name == registration_name:
callbacks.append((service_type, state_change, name))
browser = ServiceBrowser(zc, type_, [on_service_state_change])
desc = {'path': '/~paulsm/'}
address_parsed = "10.0.1.2"
address = socket.inet_aton(address_parsed)
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
def mock_incoming_msg(records) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for record in records:
generated.add_answer_at_time(record, 0)
return r.DNSIncoming(generated.packets()[0])
_inject_response(
zc,
mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]),
)
time.sleep(0.1)
assert callbacks == [('_hap._tcp.local.', ServiceStateChange.Added, 'xxxyyy._hap._tcp.local.')]
assert zc.get_service_info(type_, registration_name).port == 80
info.port = 400
_inject_response(
zc,
mock_incoming_msg([info.dns_service()]),
)
time.sleep(0.1)
assert callbacks == [
('_hap._tcp.local.', ServiceStateChange.Added, 'xxxyyy._hap._tcp.local.'),
('_hap._tcp.local.', ServiceStateChange.Updated, 'xxxyyy._hap._tcp.local.'),
]
assert zc.get_service_info(type_, registration_name).port == 400
browser.cancel()
zc.close()
def test_service_browser_listeners_update_service():
"""Test that the ServiceBrowser ServiceListener that implements update_service."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_hap._tcp.local."
registration_name = "xxxyyy.%s" % type_
callbacks = []
class MyServiceListener(r.ServiceListener):
def add_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("add", type_, name))
def remove_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("remove", type_, name))
def update_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("update", type_, name))
listener = MyServiceListener()
browser = r.ServiceBrowser(zc, type_, None, listener)
desc = {'path': '/~paulsm/'}
address_parsed = "10.0.1.2"
address = socket.inet_aton(address_parsed)
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
def mock_incoming_msg(records) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for record in records:
generated.add_answer_at_time(record, 0)
return r.DNSIncoming(generated.packets()[0])
_inject_response(
zc,
mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]),
)
time.sleep(0.2)
info.port = 400
_inject_response(
zc,
mock_incoming_msg([info.dns_service()]),
)
time.sleep(0.2)
assert callbacks == [
('add', type_, registration_name),
('update', type_, registration_name),
]
browser.cancel()
zc.close()
def test_service_browser_listeners_no_update_service():
"""Test that the ServiceBrowser ServiceListener that does not implement update_service."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_hap._tcp.local."
registration_name = "xxxyyy.%s" % type_
callbacks = []
class MyServiceListener:
def add_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("add", type_, name))
def remove_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("remove", type_, name))
listener = MyServiceListener()
browser = r.ServiceBrowser(zc, type_, None, listener)
desc = {'path': '/~paulsm/'}
address_parsed = "10.0.1.2"
address = socket.inet_aton(address_parsed)
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
def mock_incoming_msg(records) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for record in records:
generated.add_answer_at_time(record, 0)
return r.DNSIncoming(generated.packets()[0])
_inject_response(
zc,
mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]),
)
time.sleep(0.2)
info.port = 400
_inject_response(
zc,
mock_incoming_msg([info.dns_service()]),
)
time.sleep(0.2)
assert callbacks == [
('add', type_, registration_name),
]
browser.cancel()
zc.close()
def test_servicebrowser_uses_non_strict_names():
"""Verify we can look for technically invalid names as we cannot change what others do."""
# dummy service callback
def on_service_state_change(zeroconf, service_type, state_change, name):
pass
zc = r.Zeroconf(interfaces=['127.0.0.1'])
browser = ServiceBrowser(zc, ["_tivo-videostream._tcp.local."], [on_service_state_change])
browser.cancel()
# Still fail on completely invalid
with pytest.raises(r.BadTypeInNameException):
browser = ServiceBrowser(zc, ["tivo-videostream._tcp.local."], [on_service_state_change])
zc.close()
def test_group_ptr_queries_with_known_answers():
questions_with_known_answers: _services_browser._QuestionWithKnownAnswers = {}
now = current_time_millis()
for i in range(120):
name = f"_hap{i}._tcp._local."
questions_with_known_answers[DNSQuestion(name, const._TYPE_PTR, const._CLASS_IN)] = {
DNSPointer(
name,
const._TYPE_PTR,
const._CLASS_IN,
4500,
f"zoo{counter}.{name}",
)
for counter in range(i)
}
outs = _services_browser._group_ptr_queries_with_known_answers(now, True, questions_with_known_answers)
for out in outs:
packets = out.packets()
# If we generate multiple packets there must
# only be one question
assert len(packets) == 1 or len(out.questions) == 1
# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_generate_service_query_suppress_duplicate_questions():
"""Generate a service query for sending with zeroconf.send."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
now = current_time_millis()
name = "_suppresstest._tcp.local."
question = r.DNSQuestion(name, const._TYPE_PTR, const._CLASS_IN)
answer = r.DNSPointer(
name,
const._TYPE_PTR,
const._CLASS_IN,
10000,
f'known-to-other.{name}',
)
other_known_answers = {answer}
zc.question_history.add_question_at_time(question, now, other_known_answers)
assert zc.question_history.suppresses(question, now, other_known_answers)
# The known answer list is different, do not suppress
outs = _services_browser.generate_service_query(zc, now, [name], multicast=True)
assert outs
zc.cache.async_add_records([answer])
# The known answer list contains all the asked questions in the history
# we should suppress
outs = _services_browser.generate_service_query(zc, now, [name], multicast=True)
assert not outs
# We do not suppress once the question history expires
outs = _services_browser.generate_service_query(zc, now + 1000, [name], multicast=True)
assert outs
# We do not suppress QU queries ever
outs = _services_browser.generate_service_query(zc, now, [name], multicast=False)
assert outs
zc.question_history.async_expire(now + 2000)
# No suppression after clearing the history
outs = _services_browser.generate_service_query(zc, now, [name], multicast=True)
assert outs
# The previous query we just sent is still remembered and
# the next one is suppressed
outs = _services_browser.generate_service_query(zc, now, [name], multicast=True)
assert not outs
await aiozc.async_close()
@pytest.mark.asyncio
async def test_query_scheduler():
delay = const._BROWSER_TIME
types_ = {"_hap._tcp.local.", "_http._tcp.local."}
query_scheduler = _services_browser.QueryScheduler(types_, delay, (0, 0))
now = current_time_millis()
query_scheduler.start(now)
# Test query interval is increasing
assert query_scheduler.millis_to_wait(now - 1) == 1
assert query_scheduler.millis_to_wait(now) == 0
assert query_scheduler.millis_to_wait(now + 1) == 0
assert set(query_scheduler.process_ready_types(now)) == types_
assert set(query_scheduler.process_ready_types(now)) == set()
assert query_scheduler.millis_to_wait(now) == pytest.approx(delay, 0.00001)
assert set(query_scheduler.process_ready_types(now + delay)) == types_
assert set(query_scheduler.process_ready_types(now + delay)) == set()
assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 3, 0.00001)
assert set(query_scheduler.process_ready_types(now + delay * 3)) == types_
assert set(query_scheduler.process_ready_types(now + delay * 3)) == set()
assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 7, 0.00001)
assert set(query_scheduler.process_ready_types(now + delay * 7)) == types_
assert set(query_scheduler.process_ready_types(now + delay * 7)) == set()
assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 15, 0.00001)
assert set(query_scheduler.process_ready_types(now + delay * 15)) == types_
assert set(query_scheduler.process_ready_types(now + delay * 15)) == set()
# Test if we reschedule 1 second later, the millis_to_wait goes up by 1
query_scheduler.reschedule_type("_hap._tcp.local.", now + delay * 16)
assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 16, 0.00001)
assert set(query_scheduler.process_ready_types(now + delay * 15)) == set()
# Test if we reschedule 1 second later... and its ready for processing
assert set(query_scheduler.process_ready_types(now + delay * 16)) == {"_hap._tcp.local."}
assert query_scheduler.millis_to_wait(now) == pytest.approx(delay * 31, 0.00001)
assert set(query_scheduler.process_ready_types(now + delay * 20)) == set()
assert set(query_scheduler.process_ready_types(now + delay * 31)) == {"_http._tcp.local."}
def test_service_browser_matching():
"""Test that the ServiceBrowser matching does not match partial names."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_http._tcp.local."
registration_name = "xxxyyy.%s" % type_
not_match_type_ = "_asustor-looksgood_http._tcp.local."
not_match_registration_name = "xxxyyy.%s" % not_match_type_
callbacks = []
class MyServiceListener(r.ServiceListener):
def add_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("add", type_, name))
def remove_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("remove", type_, name))
def update_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("update", type_, name))
listener = MyServiceListener()
browser = r.ServiceBrowser(zc, type_, None, listener)
desc = {'path': '/~paulsm/'}
address_parsed = "10.0.1.2"
address = socket.inet_aton(address_parsed)
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
should_not_match = ServiceInfo(
not_match_type_, not_match_registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address]
)
def mock_incoming_msg(records) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for record in records:
generated.add_answer_at_time(record, 0)
return r.DNSIncoming(generated.packets()[0])
_inject_response(
zc,
mock_incoming_msg([info.dns_pointer(), info.dns_service(), info.dns_text(), *info.dns_addresses()]),
)
_inject_response(
zc,
mock_incoming_msg(
[
should_not_match.dns_pointer(),
should_not_match.dns_service(),
should_not_match.dns_text(),
*should_not_match.dns_addresses(),
]
),
)
time.sleep(0.2)
info.port = 400
_inject_response(
zc,
mock_incoming_msg([info.dns_service()]),
)
should_not_match.port = 400
_inject_response(
zc,
mock_incoming_msg([should_not_match.dns_service()]),
)
time.sleep(0.2)
assert callbacks == [
('add', type_, registration_name),
('update', type_, registration_name),
]
browser.cancel()
zc.close()
python-zeroconf-0.38.3/tests/services/test_info.py 0000664 0000000 0000000 00000070054 14176067602 0022321 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._services.info. """
import logging
import socket
import threading
import os
import unittest
from unittest.mock import patch
from threading import Event
from typing import List
import pytest
import zeroconf as r
from zeroconf import DNSAddress, const
from zeroconf._services.info import ServiceInfo
from zeroconf.asyncio import AsyncZeroconf
from .. import has_working_ipv6, _inject_response
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class TestServiceInfo(unittest.TestCase):
def test_get_name(self):
"""Verify the name accessor can strip the type."""
desc = {'path': '/~paulsm/'}
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-1.local.'
service_address = socket.inet_aton("10.0.1.2")
info = ServiceInfo(
service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address]
)
assert info.get_name() == "name"
def test_service_info_rejects_non_matching_updates(self):
"""Verify records with the wrong name are rejected."""
zc = r.Zeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-1.local.'
service_address = socket.inet_aton("10.0.1.2")
ttl = 120
now = r.current_time_millis()
info = ServiceInfo(
service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address]
)
# Verify backwards compatiblity with calling with None
info.update_record(zc, now, None)
# Matching updates
info.update_record(
zc,
now,
r.DNSText(
service_name,
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==',
),
)
assert info.properties[b"ci"] == b"2"
info.update_record(
zc,
now,
r.DNSService(
service_name,
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
0,
0,
80,
'ASH-2.local.',
),
)
assert info.server_key == 'ash-2.local.'
assert info.server == 'ASH-2.local.'
new_address = socket.inet_aton("10.0.1.3")
info.update_record(
zc,
now,
r.DNSAddress(
'ASH-2.local.',
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
new_address,
),
)
assert new_address in info.addresses
# Non-matching updates
info.update_record(
zc,
now,
r.DNSText(
"incorrect.name.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
b'\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==',
),
)
assert info.properties[b"ci"] == b"2"
info.update_record(
zc,
now,
r.DNSService(
"incorrect.name.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
0,
0,
80,
'ASH-2.local.',
),
)
assert info.server_key == 'ash-2.local.'
assert info.server == 'ASH-2.local.'
new_address = socket.inet_aton("10.0.1.4")
info.update_record(
zc,
now,
r.DNSAddress(
"incorrect.name.",
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
new_address,
),
)
assert new_address not in info.addresses
zc.close()
def test_service_info_rejects_expired_records(self):
"""Verify records that are expired are rejected."""
zc = r.Zeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-1.local.'
service_address = socket.inet_aton("10.0.1.2")
ttl = 120
now = r.current_time_millis()
info = ServiceInfo(
service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address]
)
# Matching updates
info.update_record(
zc,
now,
r.DNSText(
service_name,
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==',
),
)
assert info.properties[b"ci"] == b"2"
# Expired record
expired_record = r.DNSText(
service_name,
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
b'\x04ff=0\x04ci=3\x04sf=0\x0bsh=6fLM5A==',
)
expired_record.set_created_ttl(1000, 1)
info.update_record(zc, now, expired_record)
assert info.properties[b"ci"] == b"2"
zc.close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_get_info_partial(self):
zc = r.Zeroconf(interfaces=['127.0.0.1'])
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-1.local.'
service_text = b'path=/~matt1/'
service_address = '10.0.1.2'
service_address_v6_ll = 'fe80::52e:c2f2:bc5f:e9c6'
service_scope_id = 12
service_info = None
send_event = Event()
service_info_event = Event()
last_sent = None # type: Optional[r.DNSOutgoing]
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
"""Sends an outgoing packet."""
nonlocal last_sent
last_sent = out
send_event.set()
# patch the zeroconf send
with patch.object(zc, "async_send", send):
def mock_incoming_msg(records) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for record in records:
generated.add_answer_at_time(record, 0)
return r.DNSIncoming(generated.packets()[0])
def get_service_info_helper(zc, type, name):
nonlocal service_info
service_info = zc.get_service_info(type, name)
service_info_event.set()
try:
ttl = 120
helper_thread = threading.Thread(
target=get_service_info_helper, args=(zc, service_type, service_name)
)
helper_thread.start()
wait_time = 1
# Expext query for SRV, TXT, A, AAAA
send_event.wait(wait_time)
assert last_sent is not None
assert len(last_sent.questions) == 4
assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
assert service_info is None
# Expext query for SRV, A, AAAA
last_sent = None
send_event.clear()
_inject_response(
zc,
mock_incoming_msg(
[
r.DNSText(
service_name,
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
service_text,
)
]
),
)
send_event.wait(wait_time)
assert last_sent is not None
assert len(last_sent.questions) == 3
assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
assert service_info is None
# Expext query for A, AAAA
last_sent = None
send_event.clear()
_inject_response(
zc,
mock_incoming_msg(
[
r.DNSService(
service_name,
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
0,
0,
80,
service_server,
)
]
),
)
send_event.wait(wait_time)
assert last_sent is not None
assert len(last_sent.questions) == 2
assert r.DNSQuestion(service_server, const._TYPE_A, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_server, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
last_sent = None
assert service_info is None
# Expext no further queries
last_sent = None
send_event.clear()
_inject_response(
zc,
mock_incoming_msg(
[
r.DNSAddress(
service_server,
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_pton(socket.AF_INET, service_address),
),
r.DNSAddress(
service_server,
const._TYPE_AAAA,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_pton(socket.AF_INET6, service_address_v6_ll),
scope_id=service_scope_id,
),
]
),
)
send_event.wait(wait_time)
assert last_sent is None
assert service_info is not None
finally:
helper_thread.join()
zc.remove_all_service_listeners()
zc.close()
def test_get_info_single(self):
zc = r.Zeroconf(interfaces=['127.0.0.1'])
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-1.local.'
service_text = b'path=/~matt1/'
service_address = '10.0.1.2'
service_info = None
send_event = Event()
service_info_event = Event()
last_sent = None # type: Optional[r.DNSOutgoing]
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
"""Sends an outgoing packet."""
nonlocal last_sent
last_sent = out
send_event.set()
# patch the zeroconf send
with patch.object(zc, "async_send", send):
def mock_incoming_msg(records) -> r.DNSIncoming:
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for record in records:
generated.add_answer_at_time(record, 0)
return r.DNSIncoming(generated.packets()[0])
def get_service_info_helper(zc, type, name):
nonlocal service_info
service_info = zc.get_service_info(type, name)
service_info_event.set()
try:
ttl = 120
helper_thread = threading.Thread(
target=get_service_info_helper, args=(zc, service_type, service_name)
)
helper_thread.start()
wait_time = 1
# Expext query for SRV, TXT, A, AAAA
send_event.wait(wait_time)
assert last_sent is not None
assert len(last_sent.questions) == 4
assert r.DNSQuestion(service_name, const._TYPE_SRV, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_TXT, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_A, const._CLASS_IN) in last_sent.questions
assert r.DNSQuestion(service_name, const._TYPE_AAAA, const._CLASS_IN) in last_sent.questions
assert service_info is None
# Expext no further queries
last_sent = None
send_event.clear()
_inject_response(
zc,
mock_incoming_msg(
[
r.DNSText(
service_name,
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
service_text,
),
r.DNSService(
service_name,
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
0,
0,
80,
service_server,
),
r.DNSAddress(
service_server,
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_pton(socket.AF_INET, service_address),
),
]
),
)
send_event.wait(wait_time)
assert last_sent is None
assert service_info is not None
finally:
helper_thread.join()
zc.remove_all_service_listeners()
zc.close()
def test_service_info_duplicate_properties_txt_records(self):
"""Verify the first property is always used when there are duplicates in a txt record."""
zc = r.Zeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-1.local.'
service_address = socket.inet_aton("10.0.1.2")
ttl = 120
now = r.current_time_millis()
info = ServiceInfo(
service_type, service_name, 22, 0, 0, desc, service_server, addresses=[service_address]
)
info.async_update_records(
zc,
now,
[
r.RecordUpdate(
r.DNSText(
service_name,
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==\x04dd=0\x04jl=2\x04qq=0\x0brr=6fLM5A==\x04ci=3',
),
None,
)
],
)
assert info.properties[b"dd"] == b"0"
assert info.properties[b"jl"] == b"2"
assert info.properties[b"ci"] == b"2"
zc.close()
def test_multiple_addresses():
type_ = "_http._tcp.local."
registration_name = "xxxyyy.%s" % type_
desc = {'path': '/~paulsm/'}
address_parsed = "10.0.1.2"
address = socket.inet_aton(address_parsed)
# New kwarg way
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address, address])
assert info.addresses == [address, address]
assert info.parsed_addresses() == [address_parsed, address_parsed]
assert info.parsed_scoped_addresses() == [address_parsed, address_parsed]
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
parsed_addresses=[address_parsed, address_parsed],
)
assert info.addresses == [address, address]
assert info.parsed_addresses() == [address_parsed, address_parsed]
assert info.parsed_scoped_addresses() == [address_parsed, address_parsed]
if has_working_ipv6() and not os.environ.get('SKIP_IPV6'):
address_v6_parsed = "2001:db8::1"
address_v6 = socket.inet_pton(socket.AF_INET6, address_v6_parsed)
address_v6_ll_parsed = "fe80::52e:c2f2:bc5f:e9c6"
address_v6_ll_scoped_parsed = "fe80::52e:c2f2:bc5f:e9c6%12"
address_v6_ll = socket.inet_pton(socket.AF_INET6, address_v6_ll_parsed)
interface_index = 12
infos = [
ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[address, address_v6, address_v6_ll],
interface_index=interface_index,
),
ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
parsed_addresses=[address_parsed, address_v6_parsed, address_v6_ll_parsed],
interface_index=interface_index,
),
]
for info in infos:
assert info.addresses == [address]
assert info.addresses_by_version(r.IPVersion.All) == [address, address_v6, address_v6_ll]
assert info.addresses_by_version(r.IPVersion.V4Only) == [address]
assert info.addresses_by_version(r.IPVersion.V6Only) == [address_v6, address_v6_ll]
assert info.parsed_addresses() == [address_parsed, address_v6_parsed, address_v6_ll_parsed]
assert info.parsed_addresses(r.IPVersion.V4Only) == [address_parsed]
assert info.parsed_addresses(r.IPVersion.V6Only) == [address_v6_parsed, address_v6_ll_parsed]
assert info.parsed_scoped_addresses() == [
address_v6_ll_scoped_parsed,
address_parsed,
address_v6_parsed,
]
assert info.parsed_scoped_addresses(r.IPVersion.V4Only) == [address_parsed]
assert info.parsed_scoped_addresses(r.IPVersion.V6Only) == [
address_v6_ll_scoped_parsed,
address_v6_parsed,
]
# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_multiple_a_addresses_newest_address_first():
"""Test that info.addresses returns the newest seen address first."""
type_ = "_http._tcp.local."
registration_name = "multiarec.%s" % type_
desc = {'path': '/~paulsm/'}
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
cache = aiozc.zeroconf.cache
host = "multahost.local."
record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'\x7f\x00\x00\x01')
record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'\x7f\x00\x00\x02')
cache.async_add_records([record1, record2])
# New kwarg way
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host)
info.load_from_cache(aiozc.zeroconf)
assert info.addresses == [b'\x7f\x00\x00\x02', b'\x7f\x00\x00\x01']
await aiozc.async_close()
@pytest.mark.asyncio
async def test_invalid_a_addresses(caplog):
type_ = "_http._tcp.local."
registration_name = "multiarec.%s" % type_
desc = {'path': '/~paulsm/'}
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
cache = aiozc.zeroconf.cache
host = "multahost.local."
record1 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'a')
record2 = r.DNSAddress(host, const._TYPE_A, const._CLASS_IN, 1000, b'b')
cache.async_add_records([record1, record2])
# New kwarg way
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, host)
info.load_from_cache(aiozc.zeroconf)
assert not info.addresses
assert "Encountered invalid address while processing record" in caplog.text
await aiozc.async_close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_filter_address_by_type_from_service_info():
"""Verify dns_addresses can filter by ipversion."""
desc = {'path': '/~paulsm/'}
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
registration_name = f"{name}.{type_}"
ipv4 = socket.inet_aton("10.0.1.2")
ipv6 = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[ipv4, ipv6])
def dns_addresses_to_addresses(dns_address: List[DNSAddress]):
return [address.address for address in dns_address]
assert dns_addresses_to_addresses(info.dns_addresses()) == [ipv4, ipv6]
assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.All)) == [ipv4, ipv6]
assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.V4Only)) == [ipv4]
assert dns_addresses_to_addresses(info.dns_addresses(version=r.IPVersion.V6Only)) == [ipv6]
def test_changing_name_updates_serviceinfo_key():
"""Verify a name change will adjust the underlying key value."""
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
assert info_service.key == "mytesthome._homeassistant._tcp.local."
info_service.name = "YourTestHome._homeassistant._tcp.local."
assert info_service.key == "yourtesthome._homeassistant._tcp.local."
def test_serviceinfo_address_updates():
"""Verify adding/removing/setting addresses on ServiceInfo."""
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
# Verify addresses and parsed_addresses are mutually exclusive
with pytest.raises(TypeError):
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
parsed_addresses=["10.0.1.2"],
)
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
info_service.addresses = [socket.inet_aton("10.0.1.3")]
assert info_service.addresses == [socket.inet_aton("10.0.1.3")]
def test_serviceinfo_accepts_bytes_or_string_dict():
"""Verify a bytes or string dict can be passed to ServiceInfo."""
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
addresses = [socket.inet_aton("10.0.1.2")]
server_name = "ash-2.local."
info_service = ServiceInfo(
type_, f'{name}.{type_}', 80, 0, 0, {b'path': b'/~paulsm/'}, server_name, addresses=addresses
)
assert info_service.dns_text().text == b'\x0epath=/~paulsm/'
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
server_name,
addresses=addresses,
)
assert info_service.dns_text().text == b'\x0epath=/~paulsm/'
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{b'path': '/~paulsm/'},
server_name,
addresses=addresses,
)
assert info_service.dns_text().text == b'\x0epath=/~paulsm/'
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': b'/~paulsm/'},
server_name,
addresses=addresses,
)
assert info_service.dns_text().text == b'\x0epath=/~paulsm/'
def test_asking_qu_questions():
"""Verify explictly asking QU questions."""
type_ = "_quservice._tcp.local."
zeroconf = r.Zeroconf(interfaces=['127.0.0.1'])
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf.async_send
first_outgoing = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal first_outgoing
if first_outgoing is None:
first_outgoing = out
old_send(out, addr=addr, port=port)
# patch the zeroconf send
with patch.object(zeroconf, "async_send", send):
zeroconf.get_service_info(f"name.{type_}", type_, 500, question_type=r.DNSQuestionType.QU)
assert first_outgoing.questions[0].unicast == True
zeroconf.close()
def test_asking_qm_questions():
"""Verify explictly asking QM questions."""
type_ = "_quservice._tcp.local."
zeroconf = r.Zeroconf(interfaces=['127.0.0.1'])
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf.async_send
first_outgoing = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal first_outgoing
if first_outgoing is None:
first_outgoing = out
old_send(out, addr=addr, port=port)
# patch the zeroconf send
with patch.object(zeroconf, "async_send", send):
zeroconf.get_service_info(f"name.{type_}", type_, 500, question_type=r.DNSQuestionType.QM)
assert first_outgoing.questions[0].unicast == False
zeroconf.close()
def test_request_timeout():
"""Test that the timeout does not throw an exception and finishes close to the actual timeout."""
zeroconf = r.Zeroconf(interfaces=['127.0.0.1'])
start_time = r.current_time_millis()
assert zeroconf.get_service_info("_notfound.local.", "notthere._notfound.local.") is None
end_time = r.current_time_millis()
zeroconf.close()
# 3000ms for the default timeout
# 1000ms for loaded systems + schedule overhead
assert (end_time - start_time) < 3000 + 1000
@pytest.mark.asyncio
async def test_we_try_four_times_with_random_delay():
"""Verify we try four times even with the random delay."""
type_ = "_typethatisnothere._tcp.local."
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
# we are going to patch the zeroconf send to check query transmission
request_count = 0
def async_send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal request_count
request_count += 1
# patch the zeroconf send
with patch.object(aiozc.zeroconf, "async_send", async_send):
await aiozc.async_get_service_info(f"willnotbefound.{type_}", type_)
await aiozc.async_close()
assert request_count == 4
python-zeroconf-0.38.3/tests/services/test_registry.py 0000664 0000000 0000000 00000011473 14176067602 0023236 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""Unit tests for zeroconf._services.registry."""
import unittest
import socket
import zeroconf as r
from zeroconf import ServiceInfo
class TestServiceRegistry(unittest.TestCase):
def test_only_register_once(self):
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
registry = r.ServiceRegistry()
registry.async_add(info)
self.assertRaises(r.ServiceNameAlreadyRegistered, registry.async_add, info)
registry.async_remove(info)
registry.async_add(info)
def test_register_same_server(self):
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
name2 = "xxxyyy2"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name2}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "same.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = ServiceInfo(
type_, registration_name2, 80, 0, 0, desc, "same.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
registry = r.ServiceRegistry()
registry.async_add(info)
registry.async_add(info2)
assert registry.async_get_infos_server("same.local.") == [info, info2]
registry.async_remove(info)
assert registry.async_get_infos_server("same.local.") == [info2]
registry.async_remove(info2)
assert registry.async_get_infos_server("same.local.") == []
def test_unregister_multiple_times(self):
"""Verify we can unregister a service multiple times.
In production unregister_service and unregister_all_services
may happen at the same time during shutdown. We want to treat
this as non-fatal since its expected to happen and it is unlikely
that the callers know about each other.
"""
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
registry = r.ServiceRegistry()
registry.async_add(info)
self.assertRaises(r.ServiceNameAlreadyRegistered, registry.async_add, info)
registry.async_remove(info)
registry.async_remove(info)
def test_lookups(self):
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
registry = r.ServiceRegistry()
registry.async_add(info)
assert registry.async_get_service_infos() == [info]
assert registry.async_get_info_name(registration_name) == info
assert registry.async_get_infos_type(type_) == [info]
assert registry.async_get_infos_server("ash-2.local.") == [info]
assert registry.async_get_types() == [type_]
def test_lookups_upper_case_by_lower_case(self):
type_ = "_test-SRVC-type._tcp.local."
name = "Xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ASH-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
registry = r.ServiceRegistry()
registry.async_add(info)
assert registry.async_get_service_infos() == [info]
assert registry.async_get_info_name(registration_name.lower()) == info
assert registry.async_get_infos_type(type_.lower()) == [info]
assert registry.async_get_infos_server("ash-2.local.") == [info]
assert registry.async_get_types() == [type_.lower()]
def test_lookups_lower_case_by_upper_case(self):
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
registry = r.ServiceRegistry()
registry.async_add(info)
assert registry.async_get_service_infos() == [info]
assert registry.async_get_info_name(registration_name.upper()) == info
assert registry.async_get_infos_type(type_.upper()) == [info]
assert registry.async_get_infos_server("ASH-2.local.") == [info]
assert registry.async_get_types() == [type_]
python-zeroconf-0.38.3/tests/services/test_types.py 0000664 0000000 0000000 00000014201 14176067602 0022522 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""Unit tests for zeroconf._services.types."""
import logging
import os
import unittest
import socket
import sys
from unittest.mock import patch
import zeroconf as r
from zeroconf import Zeroconf, ServiceInfo, ZeroconfServiceTypes
from .. import _clear_cache, has_working_ipv6
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class ServiceTypesQuery(unittest.TestCase):
def test_integration_with_listener(self):
type_ = "_test-listen-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
zeroconf_registrar.registry.async_add(info)
try:
with patch.object(
zeroconf_registrar.engine.protocols[0], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_registrar.engine.protocols[1], "suppress_duplicate_packet", return_value=False
):
service_types = ZeroconfServiceTypes.find(interfaces=['127.0.0.1'], timeout=2)
assert type_ in service_types
_clear_cache(zeroconf_registrar)
service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
assert type_ in service_types
finally:
zeroconf_registrar.close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_integration_with_listener_v6_records(self):
type_ = "_test-listenv6rec-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
addr = "2606:2800:220:1:248:1893:25c8:1946" # example.com
zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_pton(socket.AF_INET6, addr)],
)
zeroconf_registrar.registry.async_add(info)
try:
with patch.object(
zeroconf_registrar.engine.protocols[0], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_registrar.engine.protocols[1], "suppress_duplicate_packet", return_value=False
):
service_types = ZeroconfServiceTypes.find(interfaces=['127.0.0.1'], timeout=2)
assert type_ in service_types
_clear_cache(zeroconf_registrar)
service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
assert type_ in service_types
finally:
zeroconf_registrar.close()
@unittest.skipIf(not has_working_ipv6() or sys.platform == 'win32', 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_integration_with_listener_ipv6(self):
type_ = "_test-listenv6ip-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
addr = "2606:2800:220:1:248:1893:25c8:1946" # example.com
zeroconf_registrar = Zeroconf(ip_version=r.IPVersion.V6Only)
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_pton(socket.AF_INET6, addr)],
)
zeroconf_registrar.registry.async_add(info)
try:
with patch.object(
zeroconf_registrar.engine.protocols[0], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_registrar.engine.protocols[1], "suppress_duplicate_packet", return_value=False
):
service_types = ZeroconfServiceTypes.find(ip_version=r.IPVersion.V6Only, timeout=2)
assert type_ in service_types
_clear_cache(zeroconf_registrar)
service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
assert type_ in service_types
finally:
zeroconf_registrar.close()
def test_integration_with_subtype_and_listener(self):
subtype_ = "_subtype._sub"
type_ = "_listen._tcp.local."
name = "xxxyyy"
# Note: discovery returns only DNS-SD type not subtype
discovery_type = f"{subtype_}.{type_}"
registration_name = f"{name}.{type_}"
zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
discovery_type,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
zeroconf_registrar.registry.async_add(info)
try:
with patch.object(
zeroconf_registrar.engine.protocols[0], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_registrar.engine.protocols[1], "suppress_duplicate_packet", return_value=False
):
service_types = ZeroconfServiceTypes.find(interfaces=['127.0.0.1'], timeout=2)
assert discovery_type in service_types
_clear_cache(zeroconf_registrar)
service_types = ZeroconfServiceTypes.find(zc=zeroconf_registrar, timeout=2)
assert discovery_type in service_types
finally:
zeroconf_registrar.close()
python-zeroconf-0.38.3/tests/test_asyncio.py 0000664 0000000 0000000 00000105514 14176067602 0021210 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""Unit tests for aio.py."""
import asyncio
import logging
import os
import socket
import time
import threading
from unittest.mock import ANY, call, patch, MagicMock
import pytest
from zeroconf.asyncio import AsyncServiceBrowser, AsyncServiceInfo, AsyncZeroconf, AsyncZeroconfServiceTypes
from zeroconf import (
DNSIncoming,
DNSOutgoing,
DNSQuestion,
DNSPointer,
DNSService,
DNSAddress,
DNSText,
NotRunningException,
ServiceStateChange,
Zeroconf,
const,
)
from zeroconf.const import _LISTENER_TIME
from zeroconf._core import AsyncListener
from zeroconf._exceptions import BadTypeInNameException, NonUniqueNameException, ServiceNameAlreadyRegistered
from zeroconf._services import ServiceListener
import zeroconf._services.browser as _services_browser
from zeroconf._services.info import ServiceInfo
from zeroconf._utils.time import current_time_millis
from . import _clear_cache, has_working_ipv6
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
@pytest.fixture(autouse=True)
def verify_threads_ended():
"""Verify that the threads are not running after the test."""
threads_before = frozenset(threading.enumerate())
yield
threads_after = frozenset(threading.enumerate())
non_executor_threads = frozenset(
thread
for thread in threads_after
if "asyncio" not in thread.name and "ThreadPoolExecutor" not in thread.name
)
threads = non_executor_threads - threads_before
assert not threads
@pytest.mark.asyncio
async def test_async_basic_usage() -> None:
"""Test we can create and close the instance."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_close_twice() -> None:
"""Test we can close twice."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
await aiozc.async_close()
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_with_sync_passed_in() -> None:
"""Test we can create and close the instance when passing in a sync Zeroconf."""
zc = Zeroconf(interfaces=['127.0.0.1'])
aiozc = AsyncZeroconf(zc=zc)
assert aiozc.zeroconf is zc
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_with_sync_passed_in_closed_in_async() -> None:
"""Test caller closes the sync version in async."""
zc = Zeroconf(interfaces=['127.0.0.1'])
aiozc = AsyncZeroconf(zc=zc)
assert aiozc.zeroconf is zc
zc.close()
await aiozc.async_close()
@pytest.mark.asyncio
async def test_sync_within_event_loop_executor() -> None:
"""Test sync version still works from an executor within an event loop."""
def sync_code():
zc = Zeroconf(interfaces=['127.0.0.1'])
assert zc.get_service_info("_neverused._tcp.local.", "xneverused._neverused._tcp.local.", 10) is None
zc.close()
await asyncio.get_event_loop().run_in_executor(None, sync_code)
@pytest.mark.asyncio
async def test_async_service_registration() -> None:
"""Test registering services broadcasts the registration by default."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test1-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
calls = []
class MyListener(ServiceListener):
def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("add", type, name))
def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("remove", type, name))
def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("update", type, name))
listener = MyListener()
aiozc.zeroconf.add_service_listener(type_, listener)
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
task = await aiozc.async_register_service(info)
await task
new_info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.3")],
)
task = await aiozc.async_update_service(new_info)
await task
task = await aiozc.async_unregister_service(new_info)
await task
await aiozc.async_close()
assert calls == [
('add', type_, registration_name),
('update', type_, registration_name),
('remove', type_, registration_name),
]
@pytest.mark.asyncio
async def test_async_service_registration_same_server_different_ports() -> None:
"""Test registering services with the same server with different srv records."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test1-srvc-type._tcp.local."
name = "xxxyyy"
name2 = "xxxyyy2"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name2}.{type_}"
calls = []
class MyListener(ServiceListener):
def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("add", type, name))
def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("remove", type, name))
def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("update", type, name))
listener = MyListener()
aiozc.zeroconf.add_service_listener(type_, listener)
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
info2 = ServiceInfo(
type_,
registration_name2,
81,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
tasks = []
tasks.append(await aiozc.async_register_service(info))
tasks.append(await aiozc.async_register_service(info2))
await asyncio.gather(*tasks)
task = await aiozc.async_unregister_service(info)
await task
entries = aiozc.zeroconf.cache.async_entries_with_server("ash-2.local.")
assert len(entries) == 1
assert info2.dns_service() in entries
await aiozc.async_close()
assert calls == [
('add', type_, registration_name),
('add', type_, registration_name2),
('remove', type_, registration_name),
('remove', type_, registration_name2),
]
@pytest.mark.asyncio
async def test_async_service_registration_same_server_same_ports() -> None:
"""Test registering services with the same server with the exact same srv record."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test1-srvc-type._tcp.local."
name = "xxxyyy"
name2 = "xxxyyy2"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name2}.{type_}"
calls = []
class MyListener(ServiceListener):
def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("add", type, name))
def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("remove", type, name))
def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("update", type, name))
listener = MyListener()
aiozc.zeroconf.add_service_listener(type_, listener)
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
info2 = ServiceInfo(
type_,
registration_name2,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
tasks = []
tasks.append(await aiozc.async_register_service(info))
tasks.append(await aiozc.async_register_service(info2))
await asyncio.gather(*tasks)
task = await aiozc.async_unregister_service(info)
await task
entries = aiozc.zeroconf.cache.async_entries_with_server("ash-2.local.")
assert len(entries) == 1
assert info2.dns_service() in entries
await aiozc.async_close()
assert calls == [
('add', type_, registration_name),
('add', type_, registration_name2),
('remove', type_, registration_name),
('remove', type_, registration_name2),
]
@pytest.mark.asyncio
async def test_async_service_registration_name_conflict() -> None:
"""Test registering services throws on name conflict."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test-srvc2-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
task = await aiozc.async_register_service(info)
await task
with pytest.raises(NonUniqueNameException):
task = await aiozc.async_register_service(info)
await task
with pytest.raises(ServiceNameAlreadyRegistered):
task = await aiozc.async_register_service(info, cooperating_responders=True)
await task
conflicting_info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-3.local.",
addresses=[socket.inet_aton("10.0.1.3")],
)
with pytest.raises(NonUniqueNameException):
task = await aiozc.async_register_service(conflicting_info)
await task
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_service_registration_name_does_not_match_type() -> None:
"""Test registering services throws when the name does not match the type."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test-srvc3-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
info.type = "_wrong._tcp.local."
with pytest.raises(BadTypeInNameException):
task = await aiozc.async_register_service(info)
await task
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_tasks() -> None:
"""Test awaiting broadcast tasks"""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test-srvc4-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
calls = []
class MyListener(ServiceListener):
def add_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("add", type, name))
def remove_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("remove", type, name))
def update_service(self, zeroconf: Zeroconf, type: str, name: str) -> None:
calls.append(("update", type, name))
listener = MyListener()
aiozc.zeroconf.add_service_listener(type_, listener)
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
task = await aiozc.async_register_service(info)
assert isinstance(task, asyncio.Task)
await task
new_info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.3")],
)
task = await aiozc.async_update_service(new_info)
assert isinstance(task, asyncio.Task)
await task
task = await aiozc.async_unregister_service(new_info)
assert isinstance(task, asyncio.Task)
await task
await aiozc.async_close()
assert calls == [
('add', type_, registration_name),
('update', type_, registration_name),
('remove', type_, registration_name),
]
@pytest.mark.asyncio
async def test_async_wait_unblocks_on_update() -> None:
"""Test async_wait will unblock on update."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test-srvc4-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
task = await aiozc.async_register_service(info)
# Should unblock due to update from the
# registration
now = current_time_millis()
await aiozc.zeroconf.async_wait(50000)
assert current_time_millis() - now < 3000
await task
now = current_time_millis()
await aiozc.zeroconf.async_wait(50)
assert current_time_millis() - now < 1000
await aiozc.async_close()
@pytest.mark.asyncio
async def test_service_info_async_request() -> None:
"""Test registering services broadcasts and query with AsyncServceInfo.async_request."""
if not has_working_ipv6() or os.environ.get('SKIP_IPV6'):
pytest.skip('Requires IPv6')
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test1-srvc-type._tcp.local."
name = "xxxyyy"
name2 = "abc"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name2}.{type_}"
# Start a tasks BEFORE the registration that will keep trying
# and see the registration a bit later
get_service_info_task1 = asyncio.ensure_future(aiozc.async_get_service_info(type_, registration_name))
await asyncio.sleep(_LISTENER_TIME / 1000 / 2)
get_service_info_task2 = asyncio.ensure_future(aiozc.async_get_service_info(type_, registration_name))
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-1.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
info2 = ServiceInfo(
type_,
registration_name2,
80,
0,
0,
desc,
"ash-5.local.",
addresses=[socket.inet_aton("10.0.1.5")],
)
tasks = []
tasks.append(await aiozc.async_register_service(info))
tasks.append(await aiozc.async_register_service(info2))
await asyncio.gather(*tasks)
aiosinfo = await get_service_info_task1
assert aiosinfo is not None
assert aiosinfo.addresses == [socket.inet_aton("10.0.1.2")]
aiosinfo = await get_service_info_task2
assert aiosinfo is not None
assert aiosinfo.addresses == [socket.inet_aton("10.0.1.2")]
aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
assert aiosinfo is not None
assert aiosinfo.addresses == [socket.inet_aton("10.0.1.2")]
new_info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.3"), socket.inet_pton(socket.AF_INET6, "6001:db8::1")],
)
task = await aiozc.async_update_service(new_info)
await task
aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
assert aiosinfo is not None
assert aiosinfo.addresses == [socket.inet_aton("10.0.1.3")]
aiosinfos = await asyncio.gather(
aiozc.async_get_service_info(type_, registration_name),
aiozc.async_get_service_info(type_, registration_name2),
)
assert aiosinfos[0] is not None
assert aiosinfos[0].addresses == [socket.inet_aton("10.0.1.3")]
assert aiosinfos[1] is not None
assert aiosinfos[1].addresses == [socket.inet_aton("10.0.1.5")]
aiosinfo = AsyncServiceInfo(type_, registration_name)
_clear_cache(aiozc.zeroconf)
# Generating the race condition is almost impossible
# without patching since its a TOCTOU race
with patch("zeroconf.asyncio.AsyncServiceInfo._is_complete", False):
await aiosinfo.async_request(aiozc.zeroconf, 3000)
assert aiosinfo is not None
assert aiosinfo.addresses == [socket.inet_aton("10.0.1.3")]
task = await aiozc.async_unregister_service(new_info)
await task
aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
assert aiosinfo is None
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_service_browser() -> None:
"""Test AsyncServiceBrowser."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test9-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
calls = []
class MyListener(ServiceListener):
def add_service(self, aiozc: AsyncZeroconf, type: str, name: str) -> None:
calls.append(("add", type, name))
def remove_service(self, aiozc: AsyncZeroconf, type: str, name: str) -> None:
calls.append(("remove", type, name))
def update_service(self, aiozc: AsyncZeroconf, type: str, name: str) -> None:
calls.append(("update", type, name))
listener = MyListener()
await aiozc.async_add_service_listener(type_, listener)
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
task = await aiozc.async_register_service(info)
await task
new_info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.3")],
)
task = await aiozc.async_update_service(new_info)
await task
task = await aiozc.async_unregister_service(new_info)
await task
await aiozc.zeroconf.async_wait(1)
await aiozc.async_close()
assert calls == [
('add', type_, registration_name),
('update', type_, registration_name),
('remove', type_, registration_name),
]
@pytest.mark.asyncio
async def test_async_context_manager() -> None:
"""Test using an async context manager."""
type_ = "_test10-sr-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
async with AsyncZeroconf(interfaces=['127.0.0.1']) as aiozc:
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
{'path': '/~paulsm/'},
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
task = await aiozc.async_register_service(info)
await task
aiosinfo = await aiozc.async_get_service_info(type_, registration_name)
assert aiosinfo is not None
@pytest.mark.asyncio
async def test_async_unregister_all_services() -> None:
"""Test unregistering all services."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
type_ = "_test1-srvc-type._tcp.local."
name = "xxxyyy"
name2 = "abc"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name2}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-1.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
info2 = ServiceInfo(
type_,
registration_name2,
80,
0,
0,
desc,
"ash-5.local.",
addresses=[socket.inet_aton("10.0.1.5")],
)
tasks = []
tasks.append(await aiozc.async_register_service(info))
tasks.append(await aiozc.async_register_service(info2))
await asyncio.gather(*tasks)
tasks = []
tasks.append(aiozc.async_get_service_info(type_, registration_name))
tasks.append(aiozc.async_get_service_info(type_, registration_name2))
results = await asyncio.gather(*tasks)
assert results[0] is not None
assert results[1] is not None
await aiozc.async_unregister_all_services()
_clear_cache(aiozc.zeroconf)
tasks = []
tasks.append(aiozc.async_get_service_info(type_, registration_name))
tasks.append(aiozc.async_get_service_info(type_, registration_name2))
results = await asyncio.gather(*tasks)
assert results[0] is None
assert results[1] is None
# Verify we can call again
await aiozc.async_unregister_all_services()
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_zeroconf_service_types():
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
task = await zeroconf_registrar.async_register_service(info)
await task
# Ensure we do not clear the cache until after the last broadcast is processed
await asyncio.sleep(0.2)
_clear_cache(zeroconf_registrar.zeroconf)
try:
service_types = await AsyncZeroconfServiceTypes.async_find(interfaces=['127.0.0.1'], timeout=2)
assert type_ in service_types
_clear_cache(zeroconf_registrar.zeroconf)
service_types = await AsyncZeroconfServiceTypes.async_find(aiozc=zeroconf_registrar, timeout=2)
assert type_ in service_types
finally:
await zeroconf_registrar.async_close()
@pytest.mark.asyncio
async def test_guard_against_running_serviceinfo_request_event_loop() -> None:
"""Test that running ServiceInfo.request from the event loop throws."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
service_info = AsyncServiceInfo("_hap._tcp.local.", "doesnotmatter._hap._tcp.local.")
with pytest.raises(RuntimeError):
service_info.request(aiozc.zeroconf, 3000)
await aiozc.async_close()
@pytest.mark.asyncio
async def test_service_browser_instantiation_generates_add_events_from_cache():
"""Test that the ServiceBrowser will generate Add events with the existing cache when starting."""
# instantiate a zeroconf instance
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
type_ = "_hap._tcp.local."
registration_name = "xxxyyy.%s" % type_
callbacks = []
class MyServiceListener(ServiceListener):
def add_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("add", type_, name))
def remove_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("remove", type_, name))
def update_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("update", type_, name))
listener = MyServiceListener()
desc = {'path': '/~paulsm/'}
address_parsed = "10.0.1.2"
address = socket.inet_aton(address_parsed)
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
zc.cache.async_add_records(
[info.dns_pointer(), info.dns_service(), *info.dns_addresses(), info.dns_text()]
)
browser = AsyncServiceBrowser(zc, type_, None, listener)
await asyncio.sleep(0)
assert callbacks == [
('add', type_, registration_name),
]
await browser.async_cancel()
await aiozc.async_close()
@pytest.mark.asyncio
async def test_integration():
service_added = asyncio.Event()
service_removed = asyncio.Event()
unexpected_ttl = asyncio.Event()
got_query = asyncio.Event()
type_ = "_http._tcp.local."
registration_name = "xxxyyy.%s" % type_
def on_service_state_change(zeroconf, service_type, state_change, name):
if name == registration_name:
if state_change is ServiceStateChange.Added:
service_added.set()
elif state_change is ServiceStateChange.Removed:
service_removed.set()
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zeroconf_browser = aiozc.zeroconf
await zeroconf_browser.async_wait_for_start()
# we are going to patch the zeroconf send to check packet sizes
old_send = zeroconf_browser.async_send
time_offset = 0.0
def _new_current_time_millis():
"""Current system time in milliseconds"""
return (time.monotonic() * 1000) + (time_offset * 1000)
expected_ttl = const._DNS_HOST_TTL
nbr_answers = 0
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT, v6_flow_scope=()):
"""Sends an outgoing packet."""
pout = DNSIncoming(out.packets()[0])
nonlocal nbr_answers
for answer in pout.answers:
nbr_answers += 1
if not answer.ttl > expected_ttl / 2:
unexpected_ttl.set()
got_query.set()
old_send(out, addr=addr, port=port, v6_flow_scope=v6_flow_scope)
assert len(zeroconf_browser.engine.protocols) == 2
aio_zeroconf_registrar = AsyncZeroconf(interfaces=['127.0.0.1'])
zeroconf_registrar = aio_zeroconf_registrar.zeroconf
await aio_zeroconf_registrar.zeroconf.async_wait_for_start()
assert len(zeroconf_registrar.engine.protocols) == 2
# patch the zeroconf send
# patch the zeroconf current_time_millis
# patch the backoff limit to ensure we always get one query every 1/4 of the DNS TTL
# Disable duplicate question suppression and duplicate packet suppression for this test as it works
# by asking the same question over and over
with patch.object(
zeroconf_registrar.engine.protocols[0], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_registrar.engine.protocols[1], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_browser.engine.protocols[0], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_browser.engine.protocols[1], "suppress_duplicate_packet", return_value=False
), patch.object(
zeroconf_browser.question_history, "suppresses", return_value=False
), patch.object(
zeroconf_browser, "async_send", send
), patch(
"zeroconf._services.browser.current_time_millis", _new_current_time_millis
), patch.object(
_services_browser, "_BROWSER_BACKOFF_LIMIT", int(expected_ttl / 4)
):
service_added = asyncio.Event()
service_removed = asyncio.Event()
browser = AsyncServiceBrowser(zeroconf_browser, type_, [on_service_state_change])
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
task = await aio_zeroconf_registrar.async_register_service(info)
await task
try:
await asyncio.wait_for(service_added.wait(), 1)
assert service_added.is_set()
# Test that we receive queries containing answers only if the remaining TTL
# is greater than half the original TTL
sleep_count = 0
test_iterations = 50
while nbr_answers < test_iterations:
# Increase simulated time shift by 1/4 of the TTL in seconds
time_offset += expected_ttl / 4
now = _new_current_time_millis()
browser.reschedule_type(type_, now, now)
sleep_count += 1
await asyncio.wait_for(got_query.wait(), 1)
got_query.clear()
# Prevent the test running indefinitely in an error condition
assert sleep_count < test_iterations * 4
assert not unexpected_ttl.is_set()
# Don't remove service, allow close() to cleanup
finally:
await aio_zeroconf_registrar.async_close()
await asyncio.wait_for(service_removed.wait(), 1)
assert service_removed.is_set()
await browser.async_cancel()
await aiozc.async_close()
@pytest.mark.asyncio
async def test_info_asking_default_is_asking_qm_questions_after_the_first_qu():
"""Verify the service info first question is QU and subsequent ones are QM questions."""
type_ = "_quservice._tcp.local."
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zeroconf_info = aiozc.zeroconf
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
zeroconf_info.registry.async_add(info)
# we are going to patch the zeroconf send to check query transmission
old_send = zeroconf_info.async_send
first_outgoing = None
second_outgoing = None
def send(out, addr=const._MDNS_ADDR, port=const._MDNS_PORT):
"""Sends an outgoing packet."""
nonlocal first_outgoing
nonlocal second_outgoing
if out.questions:
if first_outgoing is not None and second_outgoing is None:
second_outgoing = out
if first_outgoing is None:
first_outgoing = out
old_send(out, addr=addr, port=port)
# patch the zeroconf send
with patch.object(zeroconf_info, "async_send", send):
aiosinfo = AsyncServiceInfo(type_, registration_name)
# Patch _is_complete so we send multiple times
with patch("zeroconf.asyncio.AsyncServiceInfo._is_complete", False):
await aiosinfo.async_request(aiozc.zeroconf, 1200)
try:
assert first_outgoing.questions[0].unicast == True
assert second_outgoing.questions[0].unicast == False
finally:
await aiozc.async_close()
@pytest.mark.asyncio
async def test_service_browser_ignores_unrelated_updates():
"""Test that the ServiceBrowser ignores unrelated updates."""
# instantiate a zeroconf instance
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
type_ = "_veryuniqueone._tcp.local."
registration_name = "xxxyyy.%s" % type_
callbacks = []
class MyServiceListener(ServiceListener):
def add_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("add", type_, name))
def remove_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("remove", type_, name))
def update_service(self, zc, type_, name) -> None:
nonlocal callbacks
if name == registration_name:
callbacks.append(("update", type_, name))
listener = MyServiceListener()
desc = {'path': '/~paulsm/'}
address_parsed = "10.0.1.2"
address = socket.inet_aton(address_parsed)
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[address])
zc.cache.async_add_records(
[
info.dns_pointer(),
info.dns_service(),
*info.dns_addresses(),
info.dns_text(),
DNSService(
"zoom._unrelated._tcp.local.",
const._TYPE_SRV,
const._CLASS_IN,
const._DNS_HOST_TTL,
0,
0,
81,
'unrelated.local.',
),
]
)
browser = AsyncServiceBrowser(zc, type_, None, listener)
generated = DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(
DNSPointer(
"_unrelated._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN,
const._DNS_OTHER_TTL,
"zoom._unrelated._tcp.local.",
),
0,
)
generated.add_answer_at_time(
DNSAddress("unrelated.local.", const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b"1234"),
0,
)
generated.add_answer_at_time(
DNSText(
"zoom._unrelated._tcp.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
b"zoom",
),
0,
)
zc.handle_response(DNSIncoming(generated.packets()[0]))
await browser.async_cancel()
await asyncio.sleep(0)
assert callbacks == [
('add', type_, registration_name),
]
await aiozc.async_close()
@pytest.mark.asyncio
async def test_async_request_timeout():
"""Test that the timeout does not throw an exception and finishes close to the actual timeout."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
await aiozc.zeroconf.async_wait_for_start()
start_time = current_time_millis()
assert await aiozc.async_get_service_info("_notfound.local.", "notthere._notfound.local.") is None
end_time = current_time_millis()
await aiozc.async_close()
# 3000ms for the default timeout
# 1000ms for loaded systems + schedule overhead
assert (end_time - start_time) < 3000 + 1000
@pytest.mark.asyncio
async def test_async_request_non_running_instance():
"""Test that the async_request throws when zeroconf is not running."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
await aiozc.async_close()
with pytest.raises(NotRunningException):
await aiozc.async_get_service_info("_notfound.local.", "notthere._notfound.local.")
@pytest.mark.asyncio
async def test_legacy_unicast_response(run_isolated):
"""Verify legacy unicast responses include questions and correct id."""
type_ = "_mservice._tcp.local."
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
await aiozc.zeroconf.async_wait_for_start()
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
aiozc.zeroconf.registry.async_add(info)
query = DNSOutgoing(const._FLAGS_QR_QUERY, multicast=False, id_=888)
question = DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
query.add_question(question)
protocol = aiozc.zeroconf.engine.protocols[0]
with patch.object(aiozc.zeroconf, "async_send") as send_mock:
protocol.datagram_received(query.packets()[0], ('127.0.0.1', 6503))
calls = send_mock.mock_calls
# Verify the response is sent back on the socket it was recieved from
assert calls == [call(ANY, '127.0.0.1', 6503, (), protocol.transport)]
outgoing = send_mock.call_args[0][0]
assert isinstance(outgoing, DNSOutgoing)
assert outgoing.questions == [question]
assert outgoing.id == query.id
await aiozc.async_close()
python-zeroconf-0.38.3/tests/test_cache.py 0000664 0000000 0000000 00000021102 14176067602 0020574 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._cache. """
import logging
import unittest
import unittest.mock
import zeroconf as r
from zeroconf import const
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class TestDNSCache(unittest.TestCase):
def test_order(self):
record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
entry = r.DNSEntry('a', const._TYPE_SOA, const._CLASS_IN)
cached_record = cache.get(entry)
assert cached_record == record2
def test_adding_same_record_to_cache_different_ttls(self):
"""We should always get back the last entry we added if there are different TTLs.
This ensures we only have one source of truth for TTLs as a record cannot
be both expired and not expired.
"""
record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 10, b'a')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
entry = r.DNSEntry(record2)
cached_record = cache.get(entry)
assert cached_record == record2
def test_adding_same_record_to_cache_different_ttls(self):
"""Verify we only get one record back.
The last record added should replace the previous since two
records with different ttls are __eq__. This ensures we
only have one source of truth for TTLs as a record cannot
be both expired and not expired.
"""
record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 10, b'a')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
cached_records = cache.get_all_by_details('a', const._TYPE_A, const._CLASS_IN)
assert cached_records == [record2]
def test_cache_empty_does_not_leak_memory_by_leaving_empty_list(self):
record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert 'a' in cache.cache
cache.async_remove_records([record1, record2])
assert 'a' not in cache.cache
def test_cache_empty_multiple_calls(self):
record1 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert 'a' in cache.cache
cache.async_remove_records([record1, record2])
assert 'a' not in cache.cache
class TestDNSAsyncCacheAPI(unittest.TestCase):
def test_async_get_unique(self):
record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert cache.async_get_unique(record1) == record1
assert cache.async_get_unique(record2) == record2
def test_async_all_by_details(self):
record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert set(cache.async_all_by_details('a', const._TYPE_A, const._CLASS_IN)) == {record1, record2}
def test_async_entries_with_server(self):
record1 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab'
)
record2 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab'
)
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert set(cache.async_entries_with_server('ab')) == {record1, record2}
assert set(cache.async_entries_with_server('AB')) == {record1, record2}
def test_async_entries_with_name(self):
record1 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab'
)
record2 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab'
)
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert set(cache.async_entries_with_name('irrelevant')) == {record1, record2}
assert set(cache.async_entries_with_name('Irrelevant')) == {record1, record2}
# These functions have been seen in other projects so
# we try to maintain a stable API for all the threadsafe getters
class TestDNSCacheAPI(unittest.TestCase):
def test_get(self):
record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b')
record3 = r.DNSAddress('a', const._TYPE_AAAA, const._CLASS_IN, 1, b'ipv6')
cache = r.DNSCache()
cache.async_add_records([record1, record2, record3])
assert cache.get(record1) == record1
assert cache.get(record2) == record2
assert cache.get(r.DNSEntry('a', const._TYPE_A, const._CLASS_IN)) == record2
assert cache.get(r.DNSEntry('a', const._TYPE_AAAA, const._CLASS_IN)) == record3
assert cache.get(r.DNSEntry('notthere', const._TYPE_A, const._CLASS_IN)) is None
def test_get_by_details(self):
record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert cache.get_by_details('a', const._TYPE_A, const._CLASS_IN) == record2
def test_get_all_by_details(self):
record1 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'a')
record2 = r.DNSAddress('a', const._TYPE_A, const._CLASS_IN, 1, b'b')
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert set(cache.get_all_by_details('a', const._TYPE_A, const._CLASS_IN)) == {record1, record2}
def test_entries_with_server(self):
record1 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab'
)
record2 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab'
)
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert set(cache.entries_with_server('ab')) == {record1, record2}
assert set(cache.entries_with_server('AB')) == {record1, record2}
def test_entries_with_name(self):
record1 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab'
)
record2 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab'
)
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert set(cache.entries_with_name('irrelevant')) == {record1, record2}
assert set(cache.entries_with_name('Irrelevant')) == {record1, record2}
def test_current_entry_with_name_and_alias(self):
record1 = r.DNSPointer(
'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'x.irrelevant'
)
record2 = r.DNSPointer(
'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'y.irrelevant'
)
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert cache.current_entry_with_name_and_alias('irrelevant', 'x.irrelevant') == record1
def test_name(self):
record1 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 85, 'ab'
)
record2 = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab'
)
cache = r.DNSCache()
cache.async_add_records([record1, record2])
assert cache.names() == ['irrelevant']
python-zeroconf-0.38.3/tests/test_core.py 0000664 0000000 0000000 00000067664 14176067602 0020510 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._core """
import asyncio
import itertools
import logging
import os
import pytest
import socket
import sys
import time
import threading
import unittest
import unittest.mock
from typing import cast
from unittest.mock import patch
import zeroconf as r
from zeroconf import _core, const, Zeroconf, current_time_millis
from zeroconf.asyncio import AsyncZeroconf
from zeroconf._protocol import outgoing
from . import has_working_ipv6, _clear_cache, _inject_response, _wait_for_start
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
def threadsafe_query(zc, protocol, *args):
async def make_query():
protocol.handle_query_or_defer(*args)
asyncio.run_coroutine_threadsafe(make_query(), zc.loop).result()
# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_reaper():
with patch.object(_core, "_CACHE_CLEANUP_INTERVAL", 10):
assert _core._CACHE_CLEANUP_INTERVAL == 10
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zeroconf = aiozc.zeroconf
cache = zeroconf.cache
original_entries = list(itertools.chain(*(cache.entries_with_name(name) for name in cache.names())))
record_with_10s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 10, b'a')
record_with_1s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b')
zeroconf.cache.async_add_records([record_with_10s_ttl, record_with_1s_ttl])
question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
now = r.current_time_millis()
other_known_answers = {
r.DNSPointer(
"_hap._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN,
10000,
'known-to-other._hap._tcp.local.',
)
}
zeroconf.question_history.add_question_at_time(question, now, other_known_answers)
assert zeroconf.question_history.suppresses(question, now, other_known_answers)
entries_with_cache = list(itertools.chain(*(cache.entries_with_name(name) for name in cache.names())))
await asyncio.sleep(1.2)
entries = list(itertools.chain(*(cache.entries_with_name(name) for name in cache.names())))
assert zeroconf.cache.get(record_with_1s_ttl) is None
await aiozc.async_close()
assert not zeroconf.question_history.suppresses(question, now, other_known_answers)
assert entries != original_entries
assert entries_with_cache != original_entries
assert record_with_10s_ttl in entries
assert record_with_1s_ttl not in entries
@pytest.mark.asyncio
async def test_reaper_aborts_when_done():
"""Ensure cache cleanup stops when zeroconf is done."""
with patch.object(_core, "_CACHE_CLEANUP_INTERVAL", 10):
assert _core._CACHE_CLEANUP_INTERVAL == 10
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zeroconf = aiozc.zeroconf
record_with_10s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 10, b'a')
record_with_1s_ttl = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'b')
zeroconf.cache.async_add_records([record_with_10s_ttl, record_with_1s_ttl])
assert zeroconf.cache.get(record_with_10s_ttl) is not None
assert zeroconf.cache.get(record_with_1s_ttl) is not None
await aiozc.async_close()
await asyncio.sleep(1.2)
assert zeroconf.cache.get(record_with_10s_ttl) is not None
assert zeroconf.cache.get(record_with_1s_ttl) is not None
class Framework(unittest.TestCase):
def test_launch_and_close(self):
rv = r.Zeroconf(interfaces=r.InterfaceChoice.All)
rv.close()
rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default)
rv.close()
def test_launch_and_close_context_manager(self):
with r.Zeroconf(interfaces=r.InterfaceChoice.All) as rv:
assert rv.done is False
assert rv.done is True
with r.Zeroconf(interfaces=r.InterfaceChoice.Default) as rv:
assert rv.done is False
assert rv.done is True
def test_launch_and_close_unicast(self):
rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, unicast=True)
rv.close()
rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, unicast=True)
rv.close()
def test_close_multiple_times(self):
rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default)
rv.close()
rv.close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_launch_and_close_v4_v6(self):
rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, ip_version=r.IPVersion.All)
rv.close()
rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.All)
rv.close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_launch_and_close_v6_only(self):
rv = r.Zeroconf(interfaces=r.InterfaceChoice.All, ip_version=r.IPVersion.V6Only)
rv.close()
rv = r.Zeroconf(interfaces=r.InterfaceChoice.Default, ip_version=r.IPVersion.V6Only)
rv.close()
@unittest.skipIf(sys.platform == 'darwin', reason="apple_p2p failure path not testable on mac")
def test_launch_and_close_apple_p2p_not_mac(self):
with pytest.raises(RuntimeError):
r.Zeroconf(apple_p2p=True)
@unittest.skipIf(sys.platform != 'darwin', reason="apple_p2p happy path only testable on mac")
def test_launch_and_close_apple_p2p_on_mac(self):
rv = r.Zeroconf(apple_p2p=True)
rv.close()
def test_handle_response(self):
def mock_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming:
ttl = 120
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
if service_state_change == r.ServiceStateChange.Updated:
generated.add_answer_at_time(
r.DNSText(
service_name,
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
service_text,
),
0,
)
return r.DNSIncoming(generated.packets()[0])
if service_state_change == r.ServiceStateChange.Removed:
ttl = 0
generated.add_answer_at_time(
r.DNSPointer(service_type, const._TYPE_PTR, const._CLASS_IN, ttl, service_name), 0
)
generated.add_answer_at_time(
r.DNSService(
service_name,
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
0,
0,
80,
service_server,
),
0,
)
generated.add_answer_at_time(
r.DNSText(
service_name, const._TYPE_TXT, const._CLASS_IN | const._CLASS_UNIQUE, ttl, service_text
),
0,
)
generated.add_answer_at_time(
r.DNSAddress(
service_server,
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_aton(service_address),
),
0,
)
return r.DNSIncoming(generated.packets()[0])
def mock_split_incoming_msg(service_state_change: r.ServiceStateChange) -> r.DNSIncoming:
"""Mock an incoming message for the case where the packet is split."""
ttl = 120
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(
r.DNSAddress(
service_server,
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
socket.inet_aton(service_address),
),
0,
)
generated.add_answer_at_time(
r.DNSService(
service_name,
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
ttl,
0,
0,
80,
service_server,
),
0,
)
return r.DNSIncoming(generated.packets()[0])
service_name = 'name._type._tcp.local.'
service_type = '_type._tcp.local.'
service_server = 'ash-2.local.'
service_text = b'path=/~paulsm/'
service_address = '10.0.1.2'
zeroconf = r.Zeroconf(interfaces=['127.0.0.1'])
try:
# service added
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Added))
dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
assert dns_text is not None
assert cast(r.DNSText, dns_text).text == service_text # service_text is b'path=/~paulsm/'
all_dns_text = zeroconf.cache.get_all_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
assert [dns_text] == all_dns_text
# https://tools.ietf.org/html/rfc6762#section-10.2
# Instead of merging this new record additively into the cache in addition
# to any previous records with the same name, rrtype, and rrclass,
# all old records with that name, rrtype, and rrclass that were received
# more than one second ago are declared invalid,
# and marked to expire from the cache in one second.
time.sleep(1.1)
# service updated. currently only text record can be updated
service_text = b'path=/~humingchun/'
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Updated))
dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
assert dns_text is not None
assert cast(r.DNSText, dns_text).text == service_text # service_text is b'path=/~humingchun/'
time.sleep(1.1)
# The split message only has a SRV and A record.
# This should not evict TXT records from the cache
_inject_response(zeroconf, mock_split_incoming_msg(r.ServiceStateChange.Updated))
time.sleep(1.1)
dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
assert dns_text is not None
assert cast(r.DNSText, dns_text).text == service_text # service_text is b'path=/~humingchun/'
# service removed
_inject_response(zeroconf, mock_incoming_msg(r.ServiceStateChange.Removed))
dns_text = zeroconf.cache.get_by_details(service_name, const._TYPE_TXT, const._CLASS_IN)
assert dns_text.is_expired(current_time_millis() + 1000)
finally:
zeroconf.close()
def test_generate_service_query_set_qu_bit():
"""Test generate_service_query sets the QU bit."""
zeroconf_registrar = Zeroconf(interfaces=['127.0.0.1'])
desc = {'path': '/~paulsm/'}
type_ = "._hap._tcp.local."
registration_name = "this-host-is-not-used._hap._tcp.local."
info = r.ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
out = zeroconf_registrar.generate_service_query(info)
assert out.questions[0].unicast is True
zeroconf_registrar.close()
def test_invalid_packets_ignored_and_does_not_cause_loop_exception():
"""Ensure an invalid packet cannot cause the loop to collapse."""
zc = Zeroconf(interfaces=['127.0.0.1'])
generated = r.DNSOutgoing(0)
packet = generated.packets()[0]
packet = packet[:8] + b'deadbeef' + packet[8:]
parsed = r.DNSIncoming(packet)
assert parsed.valid is False
# Invalid Packet
mock_out = unittest.mock.Mock()
mock_out.packets = lambda: [packet]
zc.send(mock_out)
# Invalid oversized packet
mock_out = unittest.mock.Mock()
mock_out.packets = lambda: [packet * 1000]
zc.send(mock_out)
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
entry = r.DNSText(
"didnotcrashincoming._crash._tcp.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
500,
b'path=/~paulsm/',
)
assert isinstance(entry, r.DNSText)
assert isinstance(entry, r.DNSRecord)
assert isinstance(entry, r.DNSEntry)
generated.add_answer_at_time(entry, 0)
zc.send(generated)
time.sleep(0.2)
zc.close()
assert zc.cache.get(entry) is not None
def test_goodbye_all_services():
"""Verify generating the goodbye query does not change with time."""
zc = Zeroconf(interfaces=['127.0.0.1'])
out = zc.generate_unregister_all_services()
assert out is None
type_ = "_http._tcp.local."
registration_name = "xxxyyy.%s" % type_
desc = {'path': '/~paulsm/'}
info = r.ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
out = zc.generate_unregister_all_services()
assert out is not None
first_packet = out.packets()
zc.registry.async_add(info)
out2 = zc.generate_unregister_all_services()
assert out2 is not None
second_packet = out.packets()
assert second_packet == first_packet
# Verify the registery is empty
out3 = zc.generate_unregister_all_services()
assert out3 is None
assert zc.registry.async_get_service_infos() == []
zc.close()
def test_register_service_with_custom_ttl():
"""Test a registering a service with a custom ttl."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
info_service = r.ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
"ash-90.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
zc.register_service(info_service, ttl=3000)
assert zc.cache.get(info_service.dns_pointer()).ttl == 3000
zc.close()
def test_logging_packets(caplog):
"""Test packets are only logged with debug logging."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_logging._tcp.local."
name = "TLD"
info_service = r.ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
"ash-90.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
caplog.clear()
zc.register_service(info_service, ttl=3000)
assert "Sending to" in caplog.text
assert zc.cache.get(info_service.dns_pointer()).ttl == 3000
logging.getLogger('zeroconf').setLevel(logging.INFO)
caplog.clear()
zc.unregister_service(info_service)
assert "Sending to" not in caplog.text
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
zc.close()
def test_get_service_info_failure_path():
"""Verify get_service_info return None when the underlying call returns False."""
zc = Zeroconf(interfaces=['127.0.0.1'])
assert zc.get_service_info("_neverused._tcp.local.", "xneverused._neverused._tcp.local.", 10) is None
zc.close()
def test_sending_unicast():
"""Test sending unicast response."""
zc = Zeroconf(interfaces=['127.0.0.1'])
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
entry = r.DNSText(
"didnotcrashincoming._crash._tcp.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
500,
b'path=/~paulsm/',
)
generated.add_answer_at_time(entry, 0)
zc.send(generated, "2001:db8::1", const._MDNS_PORT) # https://www.iana.org/go/rfc3849
time.sleep(0.2)
assert zc.cache.get(entry) is None
zc.send(generated, "198.51.100.0", const._MDNS_PORT) # Documentation (TEST-NET-2)
time.sleep(0.2)
assert zc.cache.get(entry) is None
zc.send(generated)
time.sleep(0.2)
assert zc.cache.get(entry) is not None
zc.close()
def test_tc_bit_defers():
zc = Zeroconf(interfaces=['127.0.0.1'])
_wait_for_start(zc)
type_ = "_tcbitdefer._tcp.local."
name = "knownname"
name2 = "knownname2"
name3 = "knownname3"
registration_name = f"{name}.{type_}"
registration2_name = f"{name2}.{type_}"
registration3_name = f"{name3}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
server_name2 = "ash-3.local."
server_name3 = "ash-4.local."
info = r.ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = r.ServiceInfo(
type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")]
)
info3 = r.ServiceInfo(
type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
zc.registry.async_add(info2)
zc.registry.async_add(info3)
protocol = zc.engine.protocols[0]
now = r.current_time_millis()
_clear_cache(zc)
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
for _ in range(300):
# Add so many answers we end up with another packet
generated.add_answer_at_time(info.dns_pointer(), now)
generated.add_answer_at_time(info2.dns_pointer(), now)
generated.add_answer_at_time(info3.dns_pointer(), now)
packets = generated.packets()
assert len(packets) == 4
expected_deferred = []
source_ip = '203.0.113.13'
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
assert source_ip in protocol._timers
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
assert source_ip in protocol._timers
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
assert source_ip in protocol._timers
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
assert source_ip in protocol._timers
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert source_ip not in protocol._deferred
assert source_ip not in protocol._timers
# unregister
zc.unregister_service(info)
zc.close()
def test_tc_bit_defers_last_response_missing():
zc = Zeroconf(interfaces=['127.0.0.1'])
_wait_for_start(zc)
type_ = "_knowndefer._tcp.local."
name = "knownname"
name2 = "knownname2"
name3 = "knownname3"
registration_name = f"{name}.{type_}"
registration2_name = f"{name2}.{type_}"
registration3_name = f"{name3}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
server_name2 = "ash-3.local."
server_name3 = "ash-4.local."
info = r.ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = r.ServiceInfo(
type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")]
)
info3 = r.ServiceInfo(
type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
zc.registry.async_add(info2)
zc.registry.async_add(info3)
protocol = zc.engine.protocols[0]
now = r.current_time_millis()
_clear_cache(zc)
source_ip = '203.0.113.12'
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
for _ in range(300):
# Add so many answers we end up with another packet
generated.add_answer_at_time(info.dns_pointer(), now)
generated.add_answer_at_time(info2.dns_pointer(), now)
generated.add_answer_at_time(info3.dns_pointer(), now)
packets = generated.packets()
assert len(packets) == 4
expected_deferred = []
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
timer1 = protocol._timers[source_ip]
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
timer2 = protocol._timers[source_ip]
if sys.version_info >= (3, 7):
assert timer1.cancelled()
assert timer2 != timer1
# Send the same packet again to similar multi interfaces
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
assert source_ip in protocol._timers
timer3 = protocol._timers[source_ip]
if sys.version_info >= (3, 7):
assert not timer3.cancelled()
assert timer3 == timer2
next_packet = r.DNSIncoming(packets.pop(0))
expected_deferred.append(next_packet)
threadsafe_query(zc, protocol, next_packet, source_ip, const._MDNS_PORT, None)
assert protocol._deferred[source_ip] == expected_deferred
assert source_ip in protocol._timers
timer4 = protocol._timers[source_ip]
if sys.version_info >= (3, 7):
assert timer3.cancelled()
assert timer4 != timer3
for _ in range(8):
time.sleep(0.1)
if source_ip not in protocol._timers and source_ip not in protocol._deferred:
break
assert source_ip not in protocol._deferred
assert source_ip not in protocol._timers
# unregister
zc.registry.async_remove(info)
zc.close()
@pytest.mark.asyncio
async def test_open_close_twice_from_async() -> None:
"""Test we can close twice from a coroutine when using Zeroconf.
Ideally callers switch to using AsyncZeroconf, however there will
be a peroid where they still call the sync wrapper that we want
to ensure will not deadlock on shutdown.
This test is expected to throw warnings about tasks being destroyed
since we force shutdown right away since we don't want to block
callers event loops and since they aren't using the AsyncZeroconf
version they won't yield with an await like async_close we don't
have much choice but to force things down.
"""
zc = Zeroconf(interfaces=['127.0.0.1'])
zc.close()
zc.close()
await asyncio.sleep(0)
@pytest.mark.asyncio
async def test_multiple_sync_instances_stared_from_async_close():
"""Test we can shutdown multiple sync instances from async."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
zc2 = Zeroconf(interfaces=['127.0.0.1'])
assert zc.loop == zc2.loop
zc.close()
assert zc.loop.is_running()
zc2.close()
assert zc2.loop.is_running()
zc3 = Zeroconf(interfaces=['127.0.0.1'])
assert zc3.loop == zc2.loop
zc3.close()
assert zc3.loop.is_running()
await asyncio.sleep(0)
def test_guard_against_oversized_packets():
"""Ensure we do not process oversized packets.
These packets can quickly overwhelm the system.
"""
zc = Zeroconf(interfaces=['127.0.0.1'])
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
for i in range(5000):
generated.add_answer_at_time(
r.DNSText(
"packet{i}.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
500,
b'path=/~paulsm/',
),
0,
)
# We are patching to generate an oversized packet
with patch.object(outgoing, "_MAX_MSG_ABSOLUTE", 100000), patch.object(
outgoing, "_MAX_MSG_TYPICAL", 100000
):
over_sized_packet = generated.packets()[0]
assert len(over_sized_packet) > const._MAX_MSG_ABSOLUTE
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
okpacket_record = r.DNSText(
"okpacket.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
500,
b'path=/~paulsm/',
)
generated.add_answer_at_time(
okpacket_record,
0,
)
ok_packet = generated.packets()[0]
# We cannot test though the network interface as some operating systems
# will guard against the oversized packet and we won't see it.
listener = _core.AsyncListener(zc)
listener.transport = unittest.mock.MagicMock()
listener.datagram_received(ok_packet, ('127.0.0.1', const._MDNS_PORT))
assert zc.cache.async_get_unique(okpacket_record) is not None
listener.datagram_received(over_sized_packet, ('127.0.0.1', const._MDNS_PORT))
assert (
zc.cache.async_get_unique(
r.DNSText(
"packet0.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
500,
b'path=/~paulsm/',
)
)
is None
)
zc.close()
def test_guard_against_duplicate_packets():
"""Ensure we do not process duplicate packets.
These packets can quickly overwhelm the system.
"""
zc = Zeroconf(interfaces=['127.0.0.1'])
listener = _core.AsyncListener(zc)
assert listener.suppress_duplicate_packet(b"first packet", current_time_millis()) is False
assert listener.suppress_duplicate_packet(b"first packet", current_time_millis()) is True
assert listener.suppress_duplicate_packet(b"first packet", current_time_millis()) is True
assert listener.suppress_duplicate_packet(b"first packet", current_time_millis() + 1000) is False
assert listener.suppress_duplicate_packet(b"first packet", current_time_millis()) is True
assert listener.suppress_duplicate_packet(b"other packet", current_time_millis()) is False
assert listener.suppress_duplicate_packet(b"other packet", current_time_millis()) is True
assert listener.suppress_duplicate_packet(b"other packet", current_time_millis() + 1000) is False
assert listener.suppress_duplicate_packet(b"first packet", current_time_millis()) is False
zc.close()
def test_shutdown_while_register_in_process():
"""Test we can shutdown while registering a service in another thread."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# start a browser
type_ = "_homeassistant._tcp.local."
name = "MyTestHome"
info_service = r.ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
{'path': '/~paulsm/'},
"ash-90.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
def _background_register():
zc.register_service(info_service)
bgthread = threading.Thread(target=_background_register, daemon=True)
bgthread.start()
time.sleep(0.3)
zc.close()
bgthread.join()
python-zeroconf-0.38.3/tests/test_dns.py 0000664 0000000 0000000 00000032351 14176067602 0020325 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._dns. """
import logging
import os
import socket
import time
import unittest
import unittest.mock
import zeroconf as r
from zeroconf import const, current_time_millis
from zeroconf._dns import DNSRRSet
from zeroconf import (
DNSHinfo,
DNSText,
ServiceInfo,
)
from . import has_working_ipv6
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class TestDunder(unittest.TestCase):
def test_dns_text_repr(self):
# There was an issue on Python 3 that prevented DNSText's repr
# from working when the text was longer than 10 bytes
text = DNSText('irrelevant', 0, 0, 0, b'12345678901')
repr(text)
text = DNSText('irrelevant', 0, 0, 0, b'123')
repr(text)
def test_dns_hinfo_repr_eq(self):
hinfo = DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu', 'os')
assert hinfo == hinfo
repr(hinfo)
def test_dns_pointer_repr(self):
pointer = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '123')
repr(pointer)
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_dns_address_repr(self):
address = r.DNSAddress('irrelevant', const._TYPE_SOA, const._CLASS_IN, 1, b'a')
assert repr(address).endswith("b'a'")
address_ipv4 = r.DNSAddress(
'irrelevant', const._TYPE_SOA, const._CLASS_IN, 1, socket.inet_pton(socket.AF_INET, '127.0.0.1')
)
assert repr(address_ipv4).endswith('127.0.0.1')
address_ipv6 = r.DNSAddress(
'irrelevant', const._TYPE_SOA, const._CLASS_IN, 1, socket.inet_pton(socket.AF_INET6, '::1')
)
assert repr(address_ipv6).endswith('::1')
def test_dns_question_repr(self):
question = r.DNSQuestion('irrelevant', const._TYPE_SRV, const._CLASS_IN | const._CLASS_UNIQUE)
repr(question)
assert not question != question
def test_dns_service_repr(self):
service = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'a'
)
repr(service)
def test_dns_record_abc(self):
record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL)
self.assertRaises(r.AbstractMethodException, record.__eq__, record)
self.assertRaises(r.AbstractMethodException, record.write, None)
def test_dns_record_reset_ttl(self):
record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL)
time.sleep(1)
record2 = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL)
now = r.current_time_millis()
assert record.created != record2.created
assert record.get_remaining_ttl(now) != record2.get_remaining_ttl(now)
record.reset_ttl(record2)
assert record.ttl == record2.ttl
assert record.created == record2.created
assert record.get_remaining_ttl(now) == record2.get_remaining_ttl(now)
def test_service_info_dunder(self):
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
b'',
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
assert not info != info
repr(info)
def test_service_info_text_properties_not_given(self):
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
info = ServiceInfo(
type_=type_,
name=registration_name,
addresses=[socket.inet_aton("10.0.1.2")],
port=80,
server="ash-2.local.",
)
assert isinstance(info.text, bytes)
repr(info)
def test_dns_outgoing_repr(self):
dns_outgoing = r.DNSOutgoing(const._FLAGS_QR_QUERY)
repr(dns_outgoing)
def test_dns_record_is_expired(self):
record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, 8)
now = current_time_millis()
assert record.is_expired(now) is False
assert record.is_expired(now + (8 / 2 * 1000)) is False
assert record.is_expired(now + (8 * 1000)) is True
def test_dns_record_is_stale(self):
record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, 8)
now = current_time_millis()
assert record.is_stale(now) is False
assert record.is_stale(now + (8 / 4.1 * 1000)) is False
assert record.is_stale(now + (8 / 2 * 1000)) is True
assert record.is_stale(now + (8 * 1000)) is True
def test_dns_record_is_recent(self):
now = current_time_millis()
record = r.DNSRecord('irrelevant', const._TYPE_SRV, const._CLASS_IN, 8)
assert record.is_recent(now + (8 / 4.1 * 1000)) is True
assert record.is_recent(now + (8 / 3 * 1000)) is False
assert record.is_recent(now + (8 / 2 * 1000)) is False
assert record.is_recent(now + (8 * 1000)) is False
def test_dns_question_hashablity():
"""Test DNSQuestions are hashable."""
record1 = r.DNSQuestion('irrelevant', const._TYPE_A, const._CLASS_IN)
record2 = r.DNSQuestion('irrelevant', const._TYPE_A, const._CLASS_IN)
record_set = {record1, record2}
assert len(record_set) == 1
record_set.add(record1)
assert len(record_set) == 1
record3_dupe = r.DNSQuestion('irrelevant', const._TYPE_A, const._CLASS_IN)
assert record2 == record3_dupe
assert record2.__hash__() == record3_dupe.__hash__()
record_set.add(record3_dupe)
assert len(record_set) == 1
record4_dupe = r.DNSQuestion('notsame', const._TYPE_A, const._CLASS_IN)
assert record2 != record4_dupe
assert record2.__hash__() != record4_dupe.__hash__()
record_set.add(record4_dupe)
assert len(record_set) == 2
def test_dns_record_hashablity_does_not_consider_ttl():
"""Test DNSRecord are hashable."""
# Verify the TTL is not considered in the hash
record1 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b'same')
record2 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b'same')
record_set = {record1, record2}
assert len(record_set) == 1
record_set.add(record1)
assert len(record_set) == 1
record3_dupe = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_HOST_TTL, b'same')
assert record2 == record3_dupe
assert record2.__hash__() == record3_dupe.__hash__()
record_set.add(record3_dupe)
assert len(record_set) == 1
def test_dns_record_hashablity_does_not_consider_unique():
"""Test DNSRecord are hashable and unique is ignored."""
# Verify the unique value is not considered in the hash
record1 = r.DNSAddress(
'irrelevant', const._TYPE_A, const._CLASS_IN | const._CLASS_UNIQUE, const._DNS_OTHER_TTL, b'same'
)
record2 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, const._DNS_OTHER_TTL, b'same')
assert record1.class_ == record2.class_
assert record1.__hash__() == record2.__hash__()
record_set = {record1, record2}
assert len(record_set) == 1
def test_dns_address_record_hashablity():
"""Test DNSAddress are hashable."""
address1 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'a')
address2 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'b')
address3 = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'c')
address4 = r.DNSAddress('irrelevant', const._TYPE_AAAA, const._CLASS_IN, 1, b'c')
record_set = {address1, address2, address3, address4}
assert len(record_set) == 4
record_set.add(address1)
assert len(record_set) == 4
address3_dupe = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1, b'c')
record_set.add(address3_dupe)
assert len(record_set) == 4
# Verify we can remove records
additional_set = {address1, address2}
record_set -= additional_set
assert record_set == {address3, address4}
def test_dns_hinfo_record_hashablity():
"""Test DNSHinfo are hashable."""
hinfo1 = r.DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu1', 'os')
hinfo2 = r.DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu2', 'os')
record_set = {hinfo1, hinfo2}
assert len(record_set) == 2
record_set.add(hinfo1)
assert len(record_set) == 2
hinfo2_dupe = r.DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu2', 'os')
assert hinfo2 == hinfo2_dupe
assert hinfo2.__hash__() == hinfo2_dupe.__hash__()
record_set.add(hinfo2_dupe)
assert len(record_set) == 2
def test_dns_pointer_record_hashablity():
"""Test DNSPointer are hashable."""
ptr1 = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '123')
ptr2 = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '456')
record_set = {ptr1, ptr2}
assert len(record_set) == 2
record_set.add(ptr1)
assert len(record_set) == 2
ptr2_dupe = r.DNSPointer('irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, '456')
assert ptr2 == ptr2
assert ptr2.__hash__() == ptr2_dupe.__hash__()
record_set.add(ptr2_dupe)
assert len(record_set) == 2
def test_dns_text_record_hashablity():
"""Test DNSText are hashable."""
text1 = r.DNSText('irrelevant', 0, 0, const._DNS_OTHER_TTL, b'12345678901')
text2 = r.DNSText('irrelevant', 1, 0, const._DNS_OTHER_TTL, b'12345678901')
text3 = r.DNSText('irrelevant', 0, 1, const._DNS_OTHER_TTL, b'12345678901')
text4 = r.DNSText('irrelevant', 0, 0, const._DNS_OTHER_TTL, b'ABCDEFGHIJK')
record_set = {text1, text2, text3, text4}
assert len(record_set) == 4
record_set.add(text1)
assert len(record_set) == 4
text1_dupe = r.DNSText('irrelevant', 0, 0, const._DNS_OTHER_TTL, b'12345678901')
assert text1 == text1_dupe
assert text1.__hash__() == text1_dupe.__hash__()
record_set.add(text1_dupe)
assert len(record_set) == 4
def test_dns_service_record_hashablity():
"""Test DNSService are hashable."""
srv1 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'a')
srv2 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 1, 80, 'a')
srv3 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 81, 'a')
srv4 = r.DNSService('irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'ab')
record_set = {srv1, srv2, srv3, srv4}
assert len(record_set) == 4
record_set.add(srv1)
assert len(record_set) == 4
srv1_dupe = r.DNSService(
'irrelevant', const._TYPE_SRV, const._CLASS_IN, const._DNS_HOST_TTL, 0, 0, 80, 'a'
)
assert srv1 == srv1_dupe
assert srv1.__hash__() == srv1_dupe.__hash__()
record_set.add(srv1_dupe)
assert len(record_set) == 4
def test_dns_nsec_record_hashablity():
"""Test DNSNsec are hashable."""
nsec1 = r.DNSNsec(
'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'irrelevant', [1, 2, 3]
)
nsec2 = r.DNSNsec(
'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'irrelevant', [1, 2]
)
record_set = {nsec1, nsec2}
assert len(record_set) == 2
record_set.add(nsec1)
assert len(record_set) == 2
nsec2_dupe = r.DNSNsec(
'irrelevant', const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, 'irrelevant', [1, 2]
)
assert nsec2 == nsec2_dupe
assert nsec2.__hash__() == nsec2_dupe.__hash__()
record_set.add(nsec2_dupe)
assert len(record_set) == 2
def test_rrset_does_not_consider_ttl():
"""Test DNSRRSet does not consider the ttl in the hash."""
longarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 100, b'same')
shortarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 10, b'same')
longaaaarec = r.DNSAddress('irrelevant', const._TYPE_AAAA, const._CLASS_IN, 100, b'same')
shortaaaarec = r.DNSAddress('irrelevant', const._TYPE_AAAA, const._CLASS_IN, 10, b'same')
rrset = DNSRRSet([longarec, shortaaaarec])
assert rrset.suppresses(longarec)
assert rrset.suppresses(shortarec)
assert not rrset.suppresses(longaaaarec)
assert rrset.suppresses(shortaaaarec)
verylongarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 1000, b'same')
longarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 100, b'same')
mediumarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 60, b'same')
shortarec = r.DNSAddress('irrelevant', const._TYPE_A, const._CLASS_IN, 10, b'same')
rrset2 = DNSRRSet([mediumarec])
assert not rrset2.suppresses(verylongarec)
assert rrset2.suppresses(longarec)
assert rrset2.suppresses(mediumarec)
assert rrset2.suppresses(shortarec)
python-zeroconf-0.38.3/tests/test_exceptions.py 0000664 0000000 0000000 00000011654 14176067602 0021725 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._exceptions """
import logging
import unittest
import unittest.mock
import zeroconf as r
from zeroconf import (
ServiceInfo,
Zeroconf,
)
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class Exceptions(unittest.TestCase):
browser = None # type: Zeroconf
@classmethod
def setUpClass(cls):
cls.browser = Zeroconf(interfaces=['127.0.0.1'])
@classmethod
def tearDownClass(cls):
cls.browser.close()
del cls.browser
def test_bad_service_info_name(self):
self.assertRaises(r.BadTypeInNameException, self.browser.get_service_info, "type", "type_not")
def test_bad_service_names(self):
bad_names_to_try = (
'',
'local',
'_tcp.local.',
'_udp.local.',
'._udp.local.',
'_@._tcp.local.',
'_A@._tcp.local.',
'_x--x._tcp.local.',
'_-x._udp.local.',
'_x-._tcp.local.',
'_22._udp.local.',
'_2-2._tcp.local.',
'\x00._x._udp.local.',
)
for name in bad_names_to_try:
self.assertRaises(r.BadTypeInNameException, self.browser.get_service_info, name, 'x.' + name)
def test_bad_local_names_for_get_service_info(self):
bad_names_to_try = (
'homekitdev._nothttp._tcp.local.',
'homekitdev._http._udp.local.',
)
for name in bad_names_to_try:
self.assertRaises(
r.BadTypeInNameException, self.browser.get_service_info, '_http._tcp.local.', name
)
def test_good_instance_names(self):
assert r.service_type_name('.._x._tcp.local.') == '_x._tcp.local.'
assert r.service_type_name('x.y._http._tcp.local.') == '_http._tcp.local.'
assert r.service_type_name('1.2.3._mqtt._tcp.local.') == '_mqtt._tcp.local.'
assert r.service_type_name('x.sub._http._tcp.local.') == '_http._tcp.local.'
assert (
r.service_type_name('6d86f882b90facee9170ad3439d72a4d6ee9f511._zget._http._tcp.local.')
== '_http._tcp.local.'
)
def test_good_instance_names_without_protocol(self):
good_names_to_try = (
"Rachio-C73233.local.",
'YeelightColorBulb-3AFD.local.',
'YeelightTunableBulb-7220.local.',
"AlexanderHomeAssistant 74651D.local.",
'iSmartGate-152.local.',
'MyQ-FGA.local.',
'lutron-02c4392a.local.',
'WICED-hap-3E2734.local.',
'MyHost.local.',
'MyHost.sub.local.',
)
for name in good_names_to_try:
assert r.service_type_name(name, strict=False) == 'local.'
for name in good_names_to_try:
# Raises without strict=False
self.assertRaises(r.BadTypeInNameException, r.service_type_name, name)
def test_bad_types(self):
bad_names_to_try = (
'._x._tcp.local.',
'a' * 64 + '._sub._http._tcp.local.',
'a' * 62 + 'â._sub._http._tcp.local.',
)
for name in bad_names_to_try:
self.assertRaises(r.BadTypeInNameException, r.service_type_name, name)
def test_bad_sub_types(self):
bad_names_to_try = (
'_sub._http._tcp.local.',
'._sub._http._tcp.local.',
'\x7f._sub._http._tcp.local.',
'\x1f._sub._http._tcp.local.',
)
for name in bad_names_to_try:
self.assertRaises(r.BadTypeInNameException, r.service_type_name, name)
def test_good_service_names(self):
good_names_to_try = (
('_x._tcp.local.', '_x._tcp.local.'),
('_x._udp.local.', '_x._udp.local.'),
('_12345-67890-abc._udp.local.', '_12345-67890-abc._udp.local.'),
('x._sub._http._tcp.local.', '_http._tcp.local.'),
('a' * 63 + '._sub._http._tcp.local.', '_http._tcp.local.'),
('a' * 61 + 'â._sub._http._tcp.local.', '_http._tcp.local.'),
)
for name, result in good_names_to_try:
assert r.service_type_name(name) == result
assert r.service_type_name('_one_two._tcp.local.', strict=False) == '_one_two._tcp.local.'
def test_invalid_addresses(self):
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
bad = (b'127.0.0.1', b'::1')
for addr in bad:
self.assertRaisesRegex(
TypeError,
'Addresses must either ',
ServiceInfo,
type_,
registration_name,
port=80,
addresses=[addr],
)
python-zeroconf-0.38.3/tests/test_handlers.py 0000664 0000000 0000000 00000172403 14176067602 0021344 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._handlers """
import asyncio
import logging
import os
import pytest
import socket
import time
import unittest
import unittest.mock
from typing import List
import zeroconf as r
from zeroconf import _handlers, ServiceInfo, Zeroconf, current_time_millis
from zeroconf import const
from zeroconf._handlers import construct_outgoing_multicast_answers, MulticastOutgoingQueue
from zeroconf._utils.time import millis_to_seconds
from zeroconf.asyncio import AsyncZeroconf
from . import _clear_cache, _inject_response, has_working_ipv6
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class TestRegistrar(unittest.TestCase):
def test_ttl(self):
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# service definition
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_,
registration_name,
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
nbr_answers = nbr_additionals = nbr_authorities = 0
def get_ttl(record_type):
if expected_ttl is not None:
return expected_ttl
elif record_type in [const._TYPE_A, const._TYPE_SRV]:
return const._DNS_HOST_TTL
else:
return const._DNS_OTHER_TTL
def _process_outgoing_packet(out):
"""Sends an outgoing packet."""
nonlocal nbr_answers, nbr_additionals, nbr_authorities
for answer, time_ in out.answers:
nbr_answers += 1
assert answer.ttl == get_ttl(answer.type)
for answer in out.additionals:
nbr_additionals += 1
assert answer.ttl == get_ttl(answer.type)
for answer in out.authorities:
nbr_authorities += 1
assert answer.ttl == get_ttl(answer.type)
# register service with default TTL
expected_ttl = None
for _ in range(3):
_process_outgoing_packet(zc.generate_service_query(info))
zc.registry.async_add(info)
for _ in range(3):
_process_outgoing_packet(zc.generate_service_broadcast(info, None))
assert nbr_answers == 12 and nbr_additionals == 0 and nbr_authorities == 3
nbr_answers = nbr_additionals = nbr_authorities = 0
# query
query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
assert query.is_query() is True
query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
query.add_question(r.DNSQuestion(info.name, const._TYPE_SRV, const._CLASS_IN))
query.add_question(r.DNSQuestion(info.name, const._TYPE_TXT, const._CLASS_IN))
query.add_question(r.DNSQuestion(info.server, const._TYPE_A, const._CLASS_IN))
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
_process_outgoing_packet(construct_outgoing_multicast_answers(question_answers.mcast_aggregate))
# The additonals should all be suppresed since they are all in the answers section
# There will be one NSEC additional to indicate the lack of AAAA record
#
assert nbr_answers == 4 and nbr_additionals == 1 and nbr_authorities == 0
nbr_answers = nbr_additionals = nbr_authorities = 0
# unregister
expected_ttl = 0
zc.registry.async_remove(info)
for _ in range(3):
_process_outgoing_packet(zc.generate_service_broadcast(info, 0))
assert nbr_answers == 12 and nbr_additionals == 0 and nbr_authorities == 0
nbr_answers = nbr_additionals = nbr_authorities = 0
expected_ttl = None
for _ in range(3):
_process_outgoing_packet(zc.generate_service_query(info))
zc.registry.async_add(info)
# register service with custom TTL
expected_ttl = const._DNS_HOST_TTL * 2
assert expected_ttl != const._DNS_HOST_TTL
for _ in range(3):
_process_outgoing_packet(zc.generate_service_broadcast(info, expected_ttl))
assert nbr_answers == 12 and nbr_additionals == 0 and nbr_authorities == 3
nbr_answers = nbr_additionals = nbr_authorities = 0
# query
expected_ttl = None
query = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
query.add_question(r.DNSQuestion(info.name, const._TYPE_SRV, const._CLASS_IN))
query.add_question(r.DNSQuestion(info.name, const._TYPE_TXT, const._CLASS_IN))
query.add_question(r.DNSQuestion(info.server, const._TYPE_A, const._CLASS_IN))
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
_process_outgoing_packet(construct_outgoing_multicast_answers(question_answers.mcast_aggregate))
# There will be one NSEC additional to indicate the lack of AAAA record
assert nbr_answers == 4 and nbr_additionals == 1 and nbr_authorities == 0
nbr_answers = nbr_additionals = nbr_authorities = 0
# unregister
expected_ttl = 0
zc.registry.async_remove(info)
for _ in range(3):
_process_outgoing_packet(zc.generate_service_broadcast(info, 0))
assert nbr_answers == 12 and nbr_additionals == 0 and nbr_authorities == 0
nbr_answers = nbr_additionals = nbr_authorities = 0
zc.close()
def test_name_conflicts(self):
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_homeassistant._tcp.local."
name = "Home"
registration_name = f"{name}.{type_}"
info = ServiceInfo(
type_,
name=registration_name,
server="random123.local.",
addresses=[socket.inet_pton(socket.AF_INET, "1.2.3.4")],
port=80,
properties={"version": "1.0"},
)
zc.register_service(info)
conflicting_info = ServiceInfo(
type_,
name=registration_name,
server="random456.local.",
addresses=[socket.inet_pton(socket.AF_INET, "4.5.6.7")],
port=80,
properties={"version": "1.0"},
)
with pytest.raises(r.NonUniqueNameException):
zc.register_service(conflicting_info)
zc.close()
def test_register_and_lookup_type_by_uppercase_name(self):
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_mylowertype._tcp.local."
name = "Home"
registration_name = f"{name}.{type_}"
info = ServiceInfo(
type_,
name=registration_name,
server="random123.local.",
addresses=[socket.inet_pton(socket.AF_INET, "1.2.3.4")],
port=80,
properties={"version": "1.0"},
)
zc.register_service(info)
_clear_cache(zc)
info = ServiceInfo(type_, registration_name)
info.load_from_cache(zc)
assert info.addresses == []
out = r.DNSOutgoing(const._FLAGS_QR_QUERY)
out.add_question(r.DNSQuestion(type_.upper(), const._TYPE_PTR, const._CLASS_IN))
zc.send(out)
time.sleep(1)
info = ServiceInfo(type_, registration_name)
info.load_from_cache(zc)
assert info.addresses == [socket.inet_pton(socket.AF_INET, "1.2.3.4")]
assert info.properties == {b"version": b"1.0"}
zc.close()
def test_ptr_optimization():
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# service definition
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
# register
zc.register_service(info)
# Verify we won't respond for 1s with the same multicast
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
# Since we sent the PTR in the last second, they
# should end up in the delayed at least one second bucket
assert question_answers.mcast_aggregate_last_second
# Clear the cache to allow responding again
_clear_cache(zc)
# Verify we will now respond
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate_last_second
has_srv = has_txt = has_a = False
nbr_additionals = 0
nbr_answers = len(question_answers.mcast_aggregate)
additionals = set().union(*question_answers.mcast_aggregate.values())
for answer in additionals:
nbr_additionals += 1
if answer.type == const._TYPE_SRV:
has_srv = True
elif answer.type == const._TYPE_TXT:
has_txt = True
elif answer.type == const._TYPE_A:
has_a = True
assert nbr_answers == 1 and nbr_additionals == 4
# There will be one NSEC additional to indicate the lack of AAAA record
assert has_srv and has_txt and has_a
# unregister
zc.unregister_service(info)
zc.close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_any_query_for_ptr():
"""Test that queries for ANY will return PTR records and the response is aggregated."""
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_anyptr._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address])
zc.registry.async_add(info)
_clear_cache(zc)
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_ANY, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
mcast_answers = list(question_answers.mcast_aggregate)
assert mcast_answers[0].name == type_
assert mcast_answers[0].alias == registration_name
# unregister
zc.registry.async_remove(info)
zc.close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_aaaa_query():
"""Test that queries for AAAA records work and should respond right away."""
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_knownaaaservice._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
info = ServiceInfo(type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address])
zc.registry.async_add(info)
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(server_name, const._TYPE_AAAA, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
mcast_answers = list(question_answers.mcast_now)
assert mcast_answers[0].address == ipv6_address
# unregister
zc.registry.async_remove(info)
zc.close()
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_a_and_aaaa_record_fate_sharing():
"""Test that queries for AAAA always return A records in the additionals and should respond right away."""
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_a-and-aaaa-service._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
ipv6_address = socket.inet_pton(socket.AF_INET6, "2001:db8::1")
ipv4_address = socket.inet_aton("10.0.1.2")
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[ipv6_address, ipv4_address]
)
aaaa_record = info.dns_addresses(version=r.IPVersion.V6Only)[0]
a_record = info.dns_addresses(version=r.IPVersion.V4Only)[0]
zc.registry.async_add(info)
# Test AAAA query
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(server_name, const._TYPE_AAAA, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
additionals = set().union(*question_answers.mcast_now.values())
assert aaaa_record in question_answers.mcast_now
assert a_record in additionals
assert len(question_answers.mcast_now) == 1
assert len(additionals) == 1
# Test A query
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
additionals = set().union(*question_answers.mcast_now.values())
assert a_record in question_answers.mcast_now
assert aaaa_record in additionals
assert len(question_answers.mcast_now) == 1
assert len(additionals) == 1
# unregister
zc.registry.async_remove(info)
zc.close()
def test_unicast_response():
"""Ensure we send a unicast response when the source port is not the MDNS port."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# service definition
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
# register
zc.registry.async_add(info)
_clear_cache(zc)
# query
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
query.add_question(r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN))
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], True
)
for answers in (question_answers.ucast, question_answers.mcast_aggregate):
has_srv = has_txt = has_a = has_aaaa = has_nsec = False
nbr_additionals = 0
nbr_answers = len(answers)
additionals = set().union(*answers.values())
for answer in additionals:
nbr_additionals += 1
if answer.type == const._TYPE_SRV:
has_srv = True
elif answer.type == const._TYPE_TXT:
has_txt = True
elif answer.type == const._TYPE_A:
has_a = True
elif answer.type == const._TYPE_AAAA:
has_aaaa = True
elif answer.type == const._TYPE_NSEC:
has_nsec = True
# There will be one NSEC additional to indicate the lack of AAAA record
assert nbr_answers == 1 and nbr_additionals == 4
assert has_srv and has_txt and has_a and has_nsec
assert not has_aaaa
# unregister
zc.registry.async_remove(info)
zc.close()
@pytest.mark.asyncio
async def test_probe_answered_immediately():
"""Verify probes are responded to immediately."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# service definition
type_ = "_test-srvc-type._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
query.add_question(question)
query.add_authorative_answer(info.dns_pointer())
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.ucast
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
assert question_answers.mcast_now
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True
query.add_question(question)
query.add_authorative_answer(info.dns_pointer())
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert question_answers.ucast
assert question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
zc.close()
def test_qu_response():
"""Handle multicast incoming with the QU bit set."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# service definition
type_ = "_test-srvc-type._tcp.local."
other_type_ = "_notthesame._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name}.{other_type_}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = ServiceInfo(
other_type_,
registration_name2,
80,
0,
0,
desc,
"ash-other.local.",
addresses=[socket.inet_aton("10.0.4.2")],
)
# register
zc.register_service(info)
def _validate_complete_response(answers):
has_srv = has_txt = has_a = has_aaaa = has_nsec = False
nbr_answers = len(answers.keys())
additionals = set().union(*answers.values())
nbr_additionals = len(additionals)
for answer in additionals:
if answer.type == const._TYPE_SRV:
has_srv = True
elif answer.type == const._TYPE_TXT:
has_txt = True
elif answer.type == const._TYPE_A:
has_a = True
elif answer.type == const._TYPE_AAAA:
has_aaaa = True
elif answer.type == const._TYPE_NSEC:
has_nsec = True
assert nbr_answers == 1 and nbr_additionals == 4
assert has_srv and has_txt and has_a and has_nsec
assert not has_aaaa
# With QU should respond to only unicast when the answer has been recently multicast
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
_validate_complete_response(question_answers.ucast)
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
_clear_cache(zc)
# With QU should respond to only multicast since the response hasn't been seen since 75% of the ttl
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.ucast
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate
_validate_complete_response(question_answers.mcast_now)
# With QU set and an authorative answer (probe) should respond to both unitcast and multicast since the response hasn't been seen since 75% of the ttl
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
query.add_authorative_answer(info2.dns_pointer())
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
_validate_complete_response(question_answers.ucast)
_validate_complete_response(question_answers.mcast_now)
_inject_response(
zc, r.DNSIncoming(construct_outgoing_multicast_answers(question_answers.mcast_now).packets()[0])
)
# With the cache repopulated; should respond to only unicast when the answer has been recently multicast
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
_validate_complete_response(question_answers.ucast)
# unregister
zc.unregister_service(info)
zc.close()
def test_known_answer_supression():
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_knownanswersv8._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
now = current_time_millis()
_clear_cache(zc)
# Test PTR supression
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
generated.add_answer_at_time(info.dns_pointer(), now)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
# Test A supression
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN)
generated.add_question(question)
for dns_address in info.dns_addresses():
generated.add_answer_at_time(dns_address, now)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
# Test NSEC record returned when there is no AAAA record and we expectly ask
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(server_name, const._TYPE_AAAA, const._CLASS_IN)
generated.add_question(question)
for dns_address in info.dns_addresses():
generated.add_answer_at_time(dns_address, now)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
expected_nsec_record: r.DNSNsec = list(question_answers.mcast_now)[0]
assert const._TYPE_A not in expected_nsec_record.rdtypes
assert const._TYPE_AAAA in expected_nsec_record.rdtypes
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
# Test SRV supression
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(registration_name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(registration_name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
generated.add_answer_at_time(info.dns_service(), now)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
# Test TXT supression
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(registration_name, const._TYPE_TXT, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(registration_name, const._TYPE_TXT, const._CLASS_IN)
generated.add_question(question)
generated.add_answer_at_time(info.dns_text(), now)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
# unregister
zc.registry.async_remove(info)
zc.close()
def test_multi_packet_known_answer_supression():
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_handlermultis._tcp.local."
name = "knownname"
name2 = "knownname2"
name3 = "knownname3"
registration_name = f"{name}.{type_}"
registration2_name = f"{name2}.{type_}"
registration3_name = f"{name3}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
server_name2 = "ash-3.local."
server_name3 = "ash-4.local."
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = ServiceInfo(
type_, registration2_name, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")]
)
info3 = ServiceInfo(
type_, registration3_name, 80, 0, 0, desc, server_name3, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
zc.registry.async_add(info2)
zc.registry.async_add(info3)
now = current_time_millis()
_clear_cache(zc)
# Test PTR supression
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
for _ in range(1000):
# Add so many answers we end up with another packet
generated.add_answer_at_time(info.dns_pointer(), now)
generated.add_answer_at_time(info2.dns_pointer(), now)
generated.add_answer_at_time(info3.dns_pointer(), now)
packets = generated.packets()
assert len(packets) > 1
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
# unregister
zc.registry.async_remove(info)
zc.registry.async_remove(info2)
zc.registry.async_remove(info3)
zc.close()
def test_known_answer_supression_service_type_enumeration_query():
zc = Zeroconf(interfaces=['127.0.0.1'])
type_ = "_otherknown._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
type_2 = "_otherknown2._tcp.local."
name = "knownname"
registration_name2 = f"{name}.{type_2}"
desc = {'path': '/~paulsm/'}
server_name2 = "ash-3.local."
info2 = ServiceInfo(
type_2, registration_name2, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info2)
now = current_time_millis()
_clear_cache(zc)
# Test PTR supression
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(const._SERVICE_TYPE_ENUMERATION_NAME, const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
generated.add_answer_at_time(
r.DNSPointer(
const._SERVICE_TYPE_ENUMERATION_NAME,
const._TYPE_PTR,
const._CLASS_IN,
const._DNS_OTHER_TTL,
type_,
),
now,
)
generated.add_answer_at_time(
r.DNSPointer(
const._SERVICE_TYPE_ENUMERATION_NAME,
const._TYPE_PTR,
const._CLASS_IN,
const._DNS_OTHER_TTL,
type_2,
),
now,
)
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
# unregister
zc.registry.async_remove(info)
zc.registry.async_remove(info2)
zc.close()
# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_qu_response_only_sends_additionals_if_sends_answer():
"""Test that a QU response does not send additionals unless it sends the answer as well."""
# instantiate a zeroconf instance
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
type_ = "_addtest1._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "ash-2.local."
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info)
type_2 = "_addtest2._tcp.local."
name = "knownname"
registration_name2 = f"{name}.{type_2}"
desc = {'path': '/~paulsm/'}
server_name2 = "ash-3.local."
info2 = ServiceInfo(
type_2, registration_name2, 80, 0, 0, desc, server_name2, addresses=[socket.inet_aton("10.0.1.2")]
)
zc.registry.async_add(info2)
ptr_record = info.dns_pointer()
# Add the PTR record to the cache
zc.cache.async_add_records([ptr_record])
# Add the A record to the cache with 50% ttl remaining
a_record = info.dns_addresses()[0]
a_record.set_created_ttl(current_time_millis() - (a_record.ttl * 1000 / 2), a_record.ttl)
assert not a_record.is_recent(current_time_millis())
zc.cache.async_add_records([a_record])
# With QU should respond to only unicast when the answer has been recently multicast
# even if the additional has not been recently multicast
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
additionals = set().union(*question_answers.ucast.values())
assert a_record in additionals
assert ptr_record in question_answers.ucast
# Remove the 50% A record and add a 100% A record
zc.cache.async_remove_records([a_record])
a_record = info.dns_addresses()[0]
assert a_record.is_recent(current_time_millis())
zc.cache.async_add_records([a_record])
# With QU should respond to only unicast when the answer has been recently multicast
# even if the additional has not been recently multicast
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
additionals = set().union(*question_answers.ucast.values())
assert a_record in additionals
assert ptr_record in question_answers.ucast
# Remove the 100% PTR record and add a 50% PTR record
zc.cache.async_remove_records([ptr_record])
ptr_record.set_created_ttl(current_time_millis() - (ptr_record.ttl * 1000 / 2), ptr_record.ttl)
assert not ptr_record.is_recent(current_time_millis())
zc.cache.async_add_records([ptr_record])
# With QU should respond to only multicast since the has less
# than 75% of its ttl remaining
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.ucast
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
additionals = set().union(*question_answers.mcast_now.values())
assert a_record in additionals
assert info.dns_text() in additionals
assert info.dns_service() in additionals
assert ptr_record in question_answers.mcast_now
# Ask 2 QU questions, with info the PTR is at 50%, with info2 the PTR is at 100%
# We should get back a unicast reply for info2, but info should be multicasted since its within 75% of its TTL
# With QU should respond to only multicast since the has less
# than 75% of its ttl remaining
query = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
question = r.DNSQuestion(info2.type, const._TYPE_PTR, const._CLASS_IN)
question.unicast = True # Set the QU bit
assert question.unicast is True
query.add_question(question)
zc.cache.async_add_records([info2.dns_pointer()]) # Add 100% TTL for info2 to the cache
question_answers = zc.query_handler.async_response(
[r.DNSIncoming(packet) for packet in query.packets()], False
)
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
mcast_now_additionals = set().union(*question_answers.mcast_now.values())
assert a_record in mcast_now_additionals
assert info.dns_text() in mcast_now_additionals
assert info.dns_addresses()[0] in mcast_now_additionals
assert info.dns_pointer() in question_answers.mcast_now
ucast_additionals = set().union(*question_answers.ucast.values())
assert info2.dns_pointer() in question_answers.ucast
assert info2.dns_text() in ucast_additionals
assert info2.dns_service() in ucast_additionals
assert info2.dns_addresses()[0] in ucast_additionals
# unregister
zc.registry.async_remove(info)
await aiozc.async_close()
# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_cache_flush_bit():
"""Test that the cache flush bit sets the TTL to one for matching records."""
# instantiate a zeroconf instance
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
type_ = "_cacheflush._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "server-uu1.local."
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
a_record = info.dns_addresses()[0]
zc.cache.async_add_records([info.dns_pointer(), a_record, info.dns_text(), info.dns_service()])
info.addresses = [socket.inet_aton("10.0.1.5"), socket.inet_aton("10.0.1.6")]
new_records = info.dns_addresses()
for new_record in new_records:
assert new_record.unique is True
original_a_record = zc.cache.async_get_unique(a_record)
# Do the run within 1s to verify the original record is not going to be expired
out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA, multicast=True)
for answer in new_records:
out.add_answer_at_time(answer, 0)
for packet in out.packets():
zc.record_manager.async_updates_from_response(r.DNSIncoming(packet))
assert zc.cache.async_get_unique(a_record) is original_a_record
assert original_a_record.ttl != 1
for record in new_records:
assert zc.cache.async_get_unique(record) is not None
original_a_record.created = current_time_millis() - 1001
# Do the run within 1s to verify the original record is not going to be expired
out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA, multicast=True)
for answer in new_records:
out.add_answer_at_time(answer, 0)
for packet in out.packets():
zc.record_manager.async_updates_from_response(r.DNSIncoming(packet))
assert original_a_record.ttl == 1
for record in new_records:
assert zc.cache.async_get_unique(record) is not None
cached_records = [zc.cache.async_get_unique(record) for record in new_records]
for record in cached_records:
record.created = current_time_millis() - 1001
fresh_address = socket.inet_aton("4.4.4.4")
info.addresses = [fresh_address]
# Do the run within 1s to verify the two new records get marked as expired
out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA, multicast=True)
for answer in info.dns_addresses():
out.add_answer_at_time(answer, 0)
for packet in out.packets():
zc.record_manager.async_updates_from_response(r.DNSIncoming(packet))
for record in cached_records:
assert record.ttl == 1
for entry in zc.cache.async_all_by_details(server_name, const._TYPE_A, const._CLASS_IN):
if entry.address == fresh_address:
assert entry.ttl > 1
else:
assert entry.ttl == 1
# Wait for the ttl 1 records to expire
await asyncio.sleep(1.1)
loaded_info = r.ServiceInfo(type_, registration_name)
loaded_info.load_from_cache(zc)
assert loaded_info.addresses == info.addresses
await aiozc.async_close()
# This test uses asyncio because it needs to access the cache directly
# which is not threadsafe
@pytest.mark.asyncio
async def test_record_update_manager_add_listener_callsback_existing_records():
"""Test that the RecordUpdateManager will callback existing records."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc: Zeroconf = aiozc.zeroconf
updated = []
class MyListener(r.RecordUpdateListener):
"""A RecordUpdateListener that does not implement update_records."""
def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.RecordUpdate]) -> None:
"""Update multiple records in one shot."""
updated.extend(records)
type_ = "_cacheflush._tcp.local."
name = "knownname"
registration_name = f"{name}.{type_}"
desc = {'path': '/~paulsm/'}
server_name = "server-uu1.local."
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, server_name, addresses=[socket.inet_aton("10.0.1.2")]
)
a_record = info.dns_addresses()[0]
ptr_record = info.dns_pointer()
zc.cache.async_add_records([ptr_record, a_record, info.dns_text(), info.dns_service()])
listener = MyListener()
zc.add_listener(
listener,
[
r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN),
r.DNSQuestion(server_name, const._TYPE_A, const._CLASS_IN),
],
)
await asyncio.sleep(0) # flush out the call_soon_threadsafe
assert {record.new for record in updated} == {ptr_record, a_record}
# The old records should be None so we trigger Add events
# in service browsers instead of Update events
assert {record.old for record in updated} == {None}
await aiozc.async_close()
@pytest.mark.asyncio
async def test_questions_query_handler_populates_the_question_history_from_qm_questions():
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
now = current_time_millis()
_clear_cache(zc)
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
question.unicast = False
known_answer = r.DNSPointer(
"_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-other._hap._tcp.local.'
)
generated.add_question(question)
generated.add_answer_at_time(known_answer, 0)
now = r.current_time_millis()
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
assert zc.question_history.suppresses(question, now, {known_answer})
await aiozc.async_close()
@pytest.mark.asyncio
async def test_questions_query_handler_does_not_put_qu_questions_in_history():
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
now = current_time_millis()
_clear_cache(zc)
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
question.unicast = True
known_answer = r.DNSPointer(
"_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-other._hap._tcp.local.'
)
generated.add_question(question)
generated.add_answer_at_time(known_answer, 0)
now = r.current_time_millis()
packets = generated.packets()
question_answers = zc.query_handler.async_response([r.DNSIncoming(packet) for packet in packets], False)
assert not question_answers.ucast
assert not question_answers.mcast_now
assert not question_answers.mcast_aggregate
assert not question_answers.mcast_aggregate_last_second
assert not zc.question_history.suppresses(question, now, {known_answer})
await aiozc.async_close()
@pytest.mark.asyncio
async def test_guard_against_low_ptr_ttl():
"""Ensure we enforce a minimum for PTR record ttls to avoid excessive refresh queries from ServiceBrowsers.
Some poorly designed IoT devices can set excessively low PTR
TTLs would will cause ServiceBrowsers to flood the network
with excessive refresh queries.
"""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
# Apple uses a 15s minimum TTL, however we do not have the same
# level of rate limit and safe guards so we use 1/4 of the recommended value
answer_with_low_ttl = r.DNSPointer(
"myservicelow_tcp._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN | const._CLASS_UNIQUE,
2,
'low.local.',
)
answer_with_normal_ttl = r.DNSPointer(
"myservicelow_tcp._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'normal.local.',
)
good_bye_answer = r.DNSPointer(
"myservicelow_tcp._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN | const._CLASS_UNIQUE,
0,
'goodbye.local.',
)
# TTL should be adjusted to a safe value
response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
response.add_answer_at_time(answer_with_low_ttl, 0)
response.add_answer_at_time(answer_with_normal_ttl, 0)
response.add_answer_at_time(good_bye_answer, 0)
incoming = r.DNSIncoming(response.packets()[0])
zc.record_manager.async_updates_from_response(incoming)
incoming_answer_low = zc.cache.async_get_unique(answer_with_low_ttl)
assert incoming_answer_low.ttl == const._DNS_PTR_MIN_TTL
incoming_answer_normal = zc.cache.async_get_unique(answer_with_normal_ttl)
assert incoming_answer_normal.ttl == const._DNS_OTHER_TTL
assert zc.cache.async_get_unique(good_bye_answer) is None
await aiozc.async_close()
@pytest.mark.asyncio
async def test_duplicate_goodbye_answers_in_packet():
"""Ensure we do not throw an exception when there are duplicate goodbye records in a packet."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc = aiozc.zeroconf
answer_with_normal_ttl = r.DNSPointer(
"myservicelow_tcp._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'host.local.',
)
good_bye_answer = r.DNSPointer(
"myservicelow_tcp._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN | const._CLASS_UNIQUE,
0,
'host.local.',
)
response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
response.add_answer_at_time(answer_with_normal_ttl, 0)
incoming = r.DNSIncoming(response.packets()[0])
zc.record_manager.async_updates_from_response(incoming)
response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
response.add_answer_at_time(good_bye_answer, 0)
response.add_answer_at_time(good_bye_answer, 0)
incoming = r.DNSIncoming(response.packets()[0])
zc.record_manager.async_updates_from_response(incoming)
await aiozc.async_close()
@pytest.mark.asyncio
async def test_response_aggregation_timings(run_isolated):
"""Verify multicast respones are aggregated."""
type_ = "_mservice._tcp.local."
type_2 = "_mservice2._tcp.local."
type_3 = "_mservice3._tcp.local."
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
await aiozc.zeroconf.async_wait_for_start()
name = "xxxyyy"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name}.{type_2}"
registration_name3 = f"{name}.{type_3}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = ServiceInfo(
type_2, registration_name2, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.3")]
)
info3 = ServiceInfo(
type_3, registration_name3, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.3")]
)
aiozc.zeroconf.registry.async_add(info)
aiozc.zeroconf.registry.async_add(info2)
aiozc.zeroconf.registry.async_add(info3)
query = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
question = r.DNSQuestion(info.type, const._TYPE_PTR, const._CLASS_IN)
query.add_question(question)
query2 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
question2 = r.DNSQuestion(info2.type, const._TYPE_PTR, const._CLASS_IN)
query2.add_question(question2)
query3 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
question3 = r.DNSQuestion(info3.type, const._TYPE_PTR, const._CLASS_IN)
query3.add_question(question3)
query4 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
query4.add_question(question)
query4.add_question(question2)
zc = aiozc.zeroconf
protocol = zc.engine.protocols[0]
with unittest.mock.patch.object(aiozc.zeroconf, "async_send") as send_mock:
protocol.datagram_received(query.packets()[0], ('127.0.0.1', const._MDNS_PORT))
protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT))
protocol.datagram_received(query.packets()[0], ('127.0.0.1', const._MDNS_PORT))
await asyncio.sleep(0.7)
# Should aggregate into a single answer with up to a 500ms + 120ms delay
calls = send_mock.mock_calls
assert len(calls) == 1
outgoing = send_mock.call_args[0][0]
incoming = r.DNSIncoming(outgoing.packets()[0])
zc.handle_response(incoming)
assert info.dns_pointer() in incoming.answers
assert info2.dns_pointer() in incoming.answers
send_mock.reset_mock()
protocol.datagram_received(query3.packets()[0], ('127.0.0.1', const._MDNS_PORT))
await asyncio.sleep(0.3)
# Should send within 120ms since there are no other
# answers to aggregate with
calls = send_mock.mock_calls
assert len(calls) == 1
outgoing = send_mock.call_args[0][0]
incoming = r.DNSIncoming(outgoing.packets()[0])
zc.handle_response(incoming)
assert info3.dns_pointer() in incoming.answers
send_mock.reset_mock()
# Because the response was sent in the last second we need to make
# sure the next answer is delayed at least a second
aiozc.zeroconf.engine.protocols[0].datagram_received(
query4.packets()[0], ('127.0.0.1', const._MDNS_PORT)
)
await asyncio.sleep(0.5)
# After 0.5 seconds it should not have been sent
# Protect the network against excessive packet flooding
# https://datatracker.ietf.org/doc/html/rfc6762#section-14
calls = send_mock.mock_calls
assert len(calls) == 0
send_mock.reset_mock()
await asyncio.sleep(1.2)
calls = send_mock.mock_calls
assert len(calls) == 1
outgoing = send_mock.call_args[0][0]
incoming = r.DNSIncoming(outgoing.packets()[0])
assert info.dns_pointer() in incoming.answers
await aiozc.async_close()
@pytest.mark.asyncio
async def test_response_aggregation_timings_multiple(run_isolated):
"""Verify multicast responses that are aggregated do not take longer than 620ms to send.
620ms is the maximum random delay of 120ms and 500ms additional for aggregation."""
type_2 = "_mservice2._tcp.local."
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
await aiozc.zeroconf.async_wait_for_start()
name = "xxxyyy"
registration_name2 = f"{name}.{type_2}"
desc = {'path': '/~paulsm/'}
info2 = ServiceInfo(
type_2, registration_name2, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.3")]
)
aiozc.zeroconf.registry.async_add(info2)
query2 = r.DNSOutgoing(const._FLAGS_QR_QUERY, multicast=True)
question2 = r.DNSQuestion(info2.type, const._TYPE_PTR, const._CLASS_IN)
query2.add_question(question2)
zc = aiozc.zeroconf
protocol = zc.engine.protocols[0]
with unittest.mock.patch.object(aiozc.zeroconf, "async_send") as send_mock, unittest.mock.patch.object(
protocol, "suppress_duplicate_packet", return_value=False
):
send_mock.reset_mock()
protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT))
await asyncio.sleep(0.2)
calls = send_mock.mock_calls
assert len(calls) == 1
outgoing = send_mock.call_args[0][0]
incoming = r.DNSIncoming(outgoing.packets()[0])
zc.handle_response(incoming)
assert info2.dns_pointer() in incoming.answers
send_mock.reset_mock()
protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT))
await asyncio.sleep(1.2)
calls = send_mock.mock_calls
assert len(calls) == 1
outgoing = send_mock.call_args[0][0]
incoming = r.DNSIncoming(outgoing.packets()[0])
zc.handle_response(incoming)
assert info2.dns_pointer() in incoming.answers
send_mock.reset_mock()
protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT))
protocol.datagram_received(query2.packets()[0], ('127.0.0.1', const._MDNS_PORT))
# The delay should increase with two packets and
# 900ms is beyond the maximum aggregation delay
# when there is no network protection delay
await asyncio.sleep(0.9)
calls = send_mock.mock_calls
assert len(calls) == 0
# 1000ms (1s network protection delays)
# - 900ms (already slept)
# + 120ms (maximum random delay)
# + 200ms (maximum protected aggregation delay)
# + 20ms (execution time)
await asyncio.sleep(millis_to_seconds(1000 - 900 + 120 + 200 + 20))
calls = send_mock.mock_calls
assert len(calls) == 1
outgoing = send_mock.call_args[0][0]
incoming = r.DNSIncoming(outgoing.packets()[0])
zc.handle_response(incoming)
assert info2.dns_pointer() in incoming.answers
@pytest.mark.asyncio
async def test_response_aggregation_random_delay():
"""Verify the random delay for outgoing multicast will coalesce into a single group
When the random delay is shorter than the last outgoing group,
the groups should be combined.
"""
type_ = "_mservice._tcp.local."
type_2 = "_mservice2._tcp.local."
type_3 = "_mservice3._tcp.local."
type_4 = "_mservice4._tcp.local."
type_5 = "_mservice5._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name}.{type_2}"
registration_name3 = f"{name}.{type_3}"
registration_name4 = f"{name}.{type_4}"
registration_name5 = f"{name}.{type_5}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-1.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = ServiceInfo(
type_2, registration_name2, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.3")]
)
info3 = ServiceInfo(
type_3, registration_name3, 80, 0, 0, desc, "ash-3.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
info4 = ServiceInfo(
type_4, registration_name4, 80, 0, 0, desc, "ash-4.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
info5 = ServiceInfo(
type_5, registration_name5, 80, 0, 0, desc, "ash-5.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
mocked_zc = unittest.mock.MagicMock()
outgoing_queue = MulticastOutgoingQueue(mocked_zc, 0, 500)
now = current_time_millis()
with unittest.mock.patch.object(_handlers, "_MULTICAST_DELAY_RANDOM_INTERVAL", (500, 600)):
outgoing_queue.async_add(now, {info.dns_pointer(): set()})
# The second group should always be coalesced into first group since it will always come before
with unittest.mock.patch.object(_handlers, "_MULTICAST_DELAY_RANDOM_INTERVAL", (300, 400)):
outgoing_queue.async_add(now, {info2.dns_pointer(): set()})
# The third group should always be coalesced into first group since it will always come before
with unittest.mock.patch.object(_handlers, "_MULTICAST_DELAY_RANDOM_INTERVAL", (100, 200)):
outgoing_queue.async_add(now, {info3.dns_pointer(): set(), info4.dns_pointer(): set()})
assert len(outgoing_queue.queue) == 1
assert info.dns_pointer() in outgoing_queue.queue[0].answers
assert info2.dns_pointer() in outgoing_queue.queue[0].answers
assert info3.dns_pointer() in outgoing_queue.queue[0].answers
assert info4.dns_pointer() in outgoing_queue.queue[0].answers
# The forth group should not be coalesced because its scheduled after the last group in the queue
with unittest.mock.patch.object(_handlers, "_MULTICAST_DELAY_RANDOM_INTERVAL", (700, 800)):
outgoing_queue.async_add(now, {info5.dns_pointer(): set()})
assert len(outgoing_queue.queue) == 2
assert info.dns_pointer() not in outgoing_queue.queue[1].answers
assert info2.dns_pointer() not in outgoing_queue.queue[1].answers
assert info3.dns_pointer() not in outgoing_queue.queue[1].answers
assert info4.dns_pointer() not in outgoing_queue.queue[1].answers
assert info5.dns_pointer() in outgoing_queue.queue[1].answers
@pytest.mark.asyncio
async def test_future_answers_are_removed_on_send():
"""Verify any future answers scheduled to be sent are removed when we send."""
type_ = "_mservice._tcp.local."
type_2 = "_mservice2._tcp.local."
name = "xxxyyy"
registration_name = f"{name}.{type_}"
registration_name2 = f"{name}.{type_2}"
desc = {'path': '/~paulsm/'}
info = ServiceInfo(
type_, registration_name, 80, 0, 0, desc, "ash-1.local.", addresses=[socket.inet_aton("10.0.1.2")]
)
info2 = ServiceInfo(
type_2, registration_name2, 80, 0, 0, desc, "ash-2.local.", addresses=[socket.inet_aton("10.0.1.3")]
)
mocked_zc = unittest.mock.MagicMock()
outgoing_queue = MulticastOutgoingQueue(mocked_zc, 0, 0)
now = current_time_millis()
with unittest.mock.patch.object(_handlers, "_MULTICAST_DELAY_RANDOM_INTERVAL", (1, 1)):
outgoing_queue.async_add(now, {info.dns_pointer(): set()})
assert len(outgoing_queue.queue) == 1
with unittest.mock.patch.object(_handlers, "_MULTICAST_DELAY_RANDOM_INTERVAL", (2, 2)):
outgoing_queue.async_add(now, {info.dns_pointer(): set()})
assert len(outgoing_queue.queue) == 2
with unittest.mock.patch.object(_handlers, "_MULTICAST_DELAY_RANDOM_INTERVAL", (1000, 1000)):
outgoing_queue.async_add(now, {info2.dns_pointer(): set()})
outgoing_queue.async_add(now, {info.dns_pointer(): set()})
assert len(outgoing_queue.queue) == 3
await asyncio.sleep(0.1)
outgoing_queue.async_ready()
assert len(outgoing_queue.queue) == 1
# The answer should get removed because we just sent it
assert info.dns_pointer() not in outgoing_queue.queue[0].answers
# But the one we have not sent yet shoudl still go out later
assert info2.dns_pointer() in outgoing_queue.queue[0].answers
@pytest.mark.asyncio
async def test_add_listener_warns_when_not_using_record_update_listener(caplog):
"""Log when a listener is added that is not using RecordUpdateListener as a base class."""
aiozc = AsyncZeroconf(interfaces=['127.0.0.1'])
zc: Zeroconf = aiozc.zeroconf
updated = []
class MyListener:
"""A RecordUpdateListener that does not implement update_records."""
def async_update_records(self, zc: 'Zeroconf', now: float, records: List[r.RecordUpdate]) -> None:
"""Update multiple records in one shot."""
updated.extend(records)
zc.add_listener(MyListener(), None)
await asyncio.sleep(0) # flush out any call soons
assert "listeners passed to async_add_listener must inherit from RecordUpdateListener" in caplog.text
await aiozc.async_close()
python-zeroconf-0.38.3/tests/test_history.py 0000664 0000000 0000000 00000004706 14176067602 0021245 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""Unit tests for _history.py."""
from zeroconf._history import QuestionHistory
import zeroconf as r
import zeroconf.const as const
def test_question_suppression():
history = QuestionHistory()
question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
now = r.current_time_millis()
other_known_answers = {
r.DNSPointer(
"_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-other._hap._tcp.local.'
)
}
our_known_answers = {
r.DNSPointer(
"_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-us._hap._tcp.local.'
)
}
history.add_question_at_time(question, now, other_known_answers)
# Verify the question is suppressed if the known answers are the same
assert history.suppresses(question, now, other_known_answers)
# Verify the question is suppressed if we know the answer to all the known answers
assert history.suppresses(question, now, other_known_answers | our_known_answers)
# Verify the question is not suppressed if our known answers do no include the ones in the last question
assert not history.suppresses(question, now, set())
# Verify the question is not suppressed if our known answers do no include the ones in the last question
assert not history.suppresses(question, now, our_known_answers)
# Verify the question is no longer suppressed after 1s
assert not history.suppresses(question, now + 1000, other_known_answers)
def test_question_expire():
history = QuestionHistory()
question = r.DNSQuestion("_hap._tcp._local.", const._TYPE_PTR, const._CLASS_IN)
now = r.current_time_millis()
other_known_answers = {
r.DNSPointer(
"_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN, 10000, 'known-to-other._hap._tcp.local.'
)
}
history.add_question_at_time(question, now, other_known_answers)
# Verify the question is suppressed if the known answers are the same
assert history.suppresses(question, now, other_known_answers)
history.async_expire(now)
# Verify the question is suppressed if the known answers are the same since the cache hasn't expired
assert history.suppresses(question, now, other_known_answers)
history.async_expire(now + 1000)
# Verify the question not longer suppressed since the cache has expired
assert not history.suppresses(question, now, other_known_answers)
python-zeroconf-0.38.3/tests/test_init.py 0000664 0000000 0000000 00000014760 14176067602 0020510 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf.py """
import logging
import socket
import time
import unittest
import unittest.mock
from unittest.mock import patch
import zeroconf as r
from zeroconf import ServiceInfo, Zeroconf, const
from . import _inject_responses
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class Names(unittest.TestCase):
def test_long_name(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
question = r.DNSQuestion(
"this.is.a.very.long.name.with.lots.of.parts.in.it.local.", const._TYPE_SRV, const._CLASS_IN
)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_exceedingly_long_name(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
name = "%slocal." % ("part." * 1000)
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_extra_exceedingly_long_name(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
name = "%slocal." % ("part." * 4000)
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_exceedingly_long_name_part(self):
name = "%s.local." % ("a" * 1000)
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
self.assertRaises(r.NamePartTooLongException, generated.packets)
def test_same_name(self):
name = "paired.local."
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
question = r.DNSQuestion(name, const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
generated.add_question(question)
r.DNSIncoming(generated.packets()[0])
def test_verify_name_change_with_lots_of_names(self):
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
# create a bunch of servers
type_ = "_my-service._tcp.local."
name = 'a wonderful service'
server_count = 300
self.generate_many_hosts(zc, type_, name, server_count)
# verify that name changing works
self.verify_name_change(zc, type_, name, server_count)
zc.close()
def test_large_packet_exception_log_handling(self):
"""Verify we downgrade debug after warning."""
# instantiate a zeroconf instance
zc = Zeroconf(interfaces=['127.0.0.1'])
with patch('zeroconf._logger.log.warning') as mocked_log_warn, patch(
'zeroconf._logger.log.debug'
) as mocked_log_debug:
# now that we have a long packet in our possession, let's verify the
# exception handling.
out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
out.data.append(b'\0' * 10000)
# mock the zeroconf logger and check for the correct logging backoff
call_counts = mocked_log_warn.call_count, mocked_log_debug.call_count
# try to send an oversized packet
zc.send(out)
assert mocked_log_warn.call_count == call_counts[0]
zc.send(out)
assert mocked_log_warn.call_count == call_counts[0]
# mock the zeroconf logger and check for the correct logging backoff
call_counts = mocked_log_warn.call_count, mocked_log_debug.call_count
# force receive on oversized packet
zc.send(out, const._MDNS_ADDR, const._MDNS_PORT)
zc.send(out, const._MDNS_ADDR, const._MDNS_PORT)
time.sleep(0.3)
r.log.debug(
'warn %d debug %d was %s',
mocked_log_warn.call_count,
mocked_log_debug.call_count,
call_counts,
)
assert mocked_log_debug.call_count > call_counts[0]
# close our zeroconf which will close the sockets
zc.close()
def verify_name_change(self, zc, type_, name, number_hosts):
desc = {'path': '/~paulsm/'}
info_service = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
# verify name conflict
self.assertRaises(r.NonUniqueNameException, zc.register_service, info_service)
# verify no name conflict https://tools.ietf.org/html/rfc6762#section-6.6
zc.register_service(info_service, cooperating_responders=True)
# Create a new object since allow_name_change will mutate the
# original object and then we will have the wrong service
# in the registry
info_service2 = ServiceInfo(
type_,
f'{name}.{type_}',
80,
0,
0,
desc,
"ash-2.local.",
addresses=[socket.inet_aton("10.0.1.2")],
)
zc.register_service(info_service2, allow_name_change=True)
assert info_service2.name.split('.')[0] == '%s-%d' % (name, number_hosts + 1)
def generate_many_hosts(self, zc, type_, name, number_hosts):
block_size = 25
number_hosts = int((number_hosts - 1) / block_size + 1) * block_size
out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
for i in range(1, number_hosts + 1):
next_name = name if i == 1 else '%s-%d' % (name, i)
self.generate_host(out, next_name, type_)
_inject_responses(zc, [r.DNSIncoming(packet) for packet in out.packets()])
@staticmethod
def generate_host(out, host_name, type_):
name = '.'.join((host_name, type_))
out.add_answer_at_time(
r.DNSPointer(type_, const._TYPE_PTR, const._CLASS_IN, const._DNS_OTHER_TTL, name), 0
)
out.add_answer_at_time(
r.DNSService(
type_,
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
name,
),
0,
)
python-zeroconf-0.38.3/tests/test_logger.py 0000664 0000000 0000000 00000006503 14176067602 0021020 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
"""Unit tests for logger.py."""
import logging
from unittest.mock import call, patch
from zeroconf._logger import QuietLogger, set_logger_level_if_unset
def test_loading_logger():
"""Test loading logger does not change level unless it is unset."""
log = logging.getLogger('zeroconf')
log.setLevel(logging.CRITICAL)
set_logger_level_if_unset()
log = logging.getLogger('zeroconf')
assert log.level == logging.CRITICAL
log = logging.getLogger('zeroconf')
log.setLevel(logging.NOTSET)
set_logger_level_if_unset()
log = logging.getLogger('zeroconf')
assert log.level == logging.WARNING
def test_log_warning_once():
"""Test we only log with warning level once."""
QuietLogger._seen_logs = {}
quiet_logger = QuietLogger()
with patch("zeroconf._logger.log.warning") as mock_log_warning, patch(
"zeroconf._logger.log.debug"
) as mock_log_debug:
quiet_logger.log_warning_once("the warning")
assert mock_log_warning.mock_calls
assert not mock_log_debug.mock_calls
with patch("zeroconf._logger.log.warning") as mock_log_warning, patch(
"zeroconf._logger.log.debug"
) as mock_log_debug:
quiet_logger.log_warning_once("the warning")
assert not mock_log_warning.mock_calls
assert mock_log_debug.mock_calls
def test_log_exception_warning():
"""Test we only log with warning level once."""
QuietLogger._seen_logs = {}
quiet_logger = QuietLogger()
with patch("zeroconf._logger.log.warning") as mock_log_warning, patch(
"zeroconf._logger.log.debug"
) as mock_log_debug:
quiet_logger.log_exception_warning("the exception warning")
assert mock_log_warning.mock_calls
assert not mock_log_debug.mock_calls
with patch("zeroconf._logger.log.warning") as mock_log_warning, patch(
"zeroconf._logger.log.debug"
) as mock_log_debug:
quiet_logger.log_exception_warning("the exception warning")
assert not mock_log_warning.mock_calls
assert mock_log_debug.mock_calls
def test_llog_exception_debug():
"""Test we only log with a trace once."""
QuietLogger._seen_logs = {}
quiet_logger = QuietLogger()
with patch("zeroconf._logger.log.debug") as mock_log_debug:
quiet_logger.log_exception_debug("the exception")
assert mock_log_debug.mock_calls == [call('the exception', exc_info=True)]
with patch("zeroconf._logger.log.debug") as mock_log_debug:
quiet_logger.log_exception_debug("the exception")
assert mock_log_debug.mock_calls == [call('the exception', exc_info=False)]
def test_log_exception_once():
"""Test we only log with warning level once."""
QuietLogger._seen_logs = {}
quiet_logger = QuietLogger()
exc = Exception()
with patch("zeroconf._logger.log.warning") as mock_log_warning, patch(
"zeroconf._logger.log.debug"
) as mock_log_debug:
quiet_logger.log_exception_once(exc, "the exceptional exception warning")
assert mock_log_warning.mock_calls
assert not mock_log_debug.mock_calls
with patch("zeroconf._logger.log.warning") as mock_log_warning, patch(
"zeroconf._logger.log.debug"
) as mock_log_debug:
quiet_logger.log_exception_once(exc, "the exceptional exception warning")
assert not mock_log_warning.mock_calls
assert mock_log_debug.mock_calls
python-zeroconf-0.38.3/tests/test_protocol.py 0000664 0000000 0000000 00000121265 14176067602 0021405 0 ustar 00root root 0000000 0000000 #!/usr/bin/env python
""" Unit tests for zeroconf._protocol """
import copy
import logging
import os
import socket
import struct
import unittest
import unittest.mock
from typing import cast
import zeroconf as r
from zeroconf import DNSIncoming, const, current_time_millis
from zeroconf import (
DNSHinfo,
DNSText,
)
from . import has_working_ipv6
log = logging.getLogger('zeroconf')
original_logging_level = logging.NOTSET
def setup_module():
global original_logging_level
original_logging_level = log.level
log.setLevel(logging.DEBUG)
def teardown_module():
if original_logging_level != logging.NOTSET:
log.setLevel(original_logging_level)
class PacketGeneration(unittest.TestCase):
def test_parse_own_packet_simple(self):
generated = r.DNSOutgoing(0)
r.DNSIncoming(generated.packets()[0])
def test_parse_own_packet_simple_unicast(self):
generated = r.DNSOutgoing(0, False)
r.DNSIncoming(generated.packets()[0])
def test_parse_own_packet_flags(self):
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
r.DNSIncoming(generated.packets()[0])
def test_parse_own_packet_question(self):
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
generated.add_question(r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN))
r.DNSIncoming(generated.packets()[0])
def test_parse_own_packet_nsec(self):
answer = r.DNSNsec(
'eufy HomeBase2-2464._hap._tcp.local.',
const._TYPE_NSEC,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'eufy HomeBase2-2464._hap._tcp.local.',
[const._TYPE_TXT, const._TYPE_SRV],
)
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(answer, 0)
parsed = r.DNSIncoming(generated.packets()[0])
assert answer in parsed.answers
# Types > 255 should be ignored
answer_invalid_types = r.DNSNsec(
'eufy HomeBase2-2464._hap._tcp.local.',
const._TYPE_NSEC,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'eufy HomeBase2-2464._hap._tcp.local.',
[const._TYPE_TXT, const._TYPE_SRV, 1000],
)
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(answer_invalid_types, 0)
parsed = r.DNSIncoming(generated.packets()[0])
assert answer in parsed.answers
def test_parse_own_packet_response(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(
r.DNSService(
"æøå.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
"foo.local.",
),
0,
)
parsed = r.DNSIncoming(generated.packets()[0])
assert len(generated.answers) == 1
assert len(generated.answers) == len(parsed.answers)
def test_adding_empty_answer(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(
None,
0,
)
generated.add_answer_at_time(
r.DNSService(
"æøå.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
"foo.local.",
),
0,
)
parsed = r.DNSIncoming(generated.packets()[0])
assert len(generated.answers) == 1
assert len(generated.answers) == len(parsed.answers)
def test_adding_expired_answer(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
generated.add_answer_at_time(
r.DNSService(
"æøå.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
"foo.local.",
),
current_time_millis() + 1000000,
)
parsed = r.DNSIncoming(generated.packets()[0])
assert len(generated.answers) == 0
assert len(generated.answers) == len(parsed.answers)
def test_match_question(self):
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
parsed = r.DNSIncoming(generated.packets()[0])
assert len(generated.questions) == 1
assert len(generated.questions) == len(parsed.questions)
assert question == parsed.questions[0]
def test_suppress_answer(self):
query_generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
question = r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN)
query_generated.add_question(question)
answer1 = r.DNSService(
"testname1.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
"foo.local.",
)
staleanswer2 = r.DNSService(
"testname2.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL / 2,
0,
0,
80,
"foo.local.",
)
answer2 = r.DNSService(
"testname2.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
"foo.local.",
)
query_generated.add_answer_at_time(answer1, 0)
query_generated.add_answer_at_time(staleanswer2, 0)
query = r.DNSIncoming(query_generated.packets()[0])
# Should be suppressed
response = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
response.add_answer(query, answer1)
assert len(response.answers) == 0
# Should not be suppressed, TTL in query is too short
response.add_answer(query, answer2)
assert len(response.answers) == 1
# Should not be suppressed, name is different
tmp = copy.copy(answer1)
tmp.key = "testname3.local."
tmp.name = "testname3.local."
response.add_answer(query, tmp)
assert len(response.answers) == 2
# Should not be suppressed, type is different
tmp = copy.copy(answer1)
tmp.type = const._TYPE_A
response.add_answer(query, tmp)
assert len(response.answers) == 3
# Should not be suppressed, class is different
tmp = copy.copy(answer1)
tmp.class_ = const._CLASS_NONE
response.add_answer(query, tmp)
assert len(response.answers) == 4
# ::TODO:: could add additional tests for DNSAddress, DNSHinfo, DNSPointer, DNSText, DNSService
def test_dns_hinfo(self):
generated = r.DNSOutgoing(0)
generated.add_additional_answer(DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu', 'os'))
parsed = r.DNSIncoming(generated.packets()[0])
answer = cast(r.DNSHinfo, parsed.answers[0])
assert answer.cpu == 'cpu'
assert answer.os == 'os'
generated = r.DNSOutgoing(0)
generated.add_additional_answer(DNSHinfo('irrelevant', const._TYPE_HINFO, 0, 0, 'cpu', 'x' * 257))
self.assertRaises(r.NamePartTooLongException, generated.packets)
def test_many_questions(self):
"""Test many questions get seperated into multiple packets."""
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
questions = []
for i in range(100):
question = r.DNSQuestion(f"testname{i}.local.", const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
questions.append(question)
assert len(generated.questions) == 100
packets = generated.packets()
assert len(packets) == 2
assert len(packets[0]) < const._MAX_MSG_TYPICAL
assert len(packets[1]) < const._MAX_MSG_TYPICAL
parsed1 = r.DNSIncoming(packets[0])
assert len(parsed1.questions) == 85
parsed2 = r.DNSIncoming(packets[1])
assert len(parsed2.questions) == 15
def test_many_questions_with_many_known_answers(self):
"""Test many questions and known answers get seperated into multiple packets."""
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
questions = []
for _ in range(30):
question = r.DNSQuestion(f"_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN)
generated.add_question(question)
questions.append(question)
assert len(generated.questions) == 30
now = current_time_millis()
for _ in range(200):
known_answer = r.DNSPointer(
"myservice{i}_tcp._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'123.local.',
)
generated.add_answer_at_time(known_answer, now)
packets = generated.packets()
assert len(packets) == 3
assert len(packets[0]) <= const._MAX_MSG_TYPICAL
assert len(packets[1]) <= const._MAX_MSG_TYPICAL
assert len(packets[2]) <= const._MAX_MSG_TYPICAL
parsed1 = r.DNSIncoming(packets[0])
assert len(parsed1.questions) == 30
assert len(parsed1.answers) == 88
assert parsed1.truncated
parsed2 = r.DNSIncoming(packets[1])
assert len(parsed2.questions) == 0
assert len(parsed2.answers) == 101
assert parsed2.truncated
parsed3 = r.DNSIncoming(packets[2])
assert len(parsed3.questions) == 0
assert len(parsed3.answers) == 11
assert not parsed3.truncated
def test_massive_probe_packet_split(self):
"""Test probe with many authorative answers."""
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
questions = []
for _ in range(30):
question = r.DNSQuestion(
f"_hap._tcp.local.", const._TYPE_PTR, const._CLASS_IN | const._CLASS_UNIQUE
)
generated.add_question(question)
questions.append(question)
assert len(generated.questions) == 30
now = current_time_millis()
for _ in range(200):
authorative_answer = r.DNSPointer(
"myservice{i}_tcp._tcp.local.",
const._TYPE_PTR,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'123.local.',
)
generated.add_authorative_answer(authorative_answer)
packets = generated.packets()
assert len(packets) == 3
assert len(packets[0]) <= const._MAX_MSG_TYPICAL
assert len(packets[1]) <= const._MAX_MSG_TYPICAL
assert len(packets[2]) <= const._MAX_MSG_TYPICAL
parsed1 = r.DNSIncoming(packets[0])
assert parsed1.questions[0].unicast is True
assert len(parsed1.questions) == 30
assert parsed1.num_authorities == 88
assert parsed1.truncated
parsed2 = r.DNSIncoming(packets[1])
assert len(parsed2.questions) == 0
assert parsed2.num_authorities == 101
assert parsed2.truncated
parsed3 = r.DNSIncoming(packets[2])
assert len(parsed3.questions) == 0
assert parsed3.num_authorities == 11
assert not parsed3.truncated
def test_only_one_answer_can_by_large(self):
"""Test that only the first answer in each packet can be large.
https://datatracker.ietf.org/doc/html/rfc6762#section-17
"""
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
query = r.DNSIncoming(r.DNSOutgoing(const._FLAGS_QR_QUERY).packets()[0])
for i in range(3):
generated.add_answer(
query,
r.DNSText(
"zoom._hap._tcp.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
1200,
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==' * 100,
),
)
generated.add_answer(
query,
r.DNSService(
"testname1.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
"foo.local.",
),
)
assert len(generated.answers) == 4
packets = generated.packets()
assert len(packets) == 4
assert len(packets[0]) <= const._MAX_MSG_ABSOLUTE
assert len(packets[0]) > const._MAX_MSG_TYPICAL
assert len(packets[1]) <= const._MAX_MSG_ABSOLUTE
assert len(packets[1]) > const._MAX_MSG_TYPICAL
assert len(packets[2]) <= const._MAX_MSG_ABSOLUTE
assert len(packets[2]) > const._MAX_MSG_TYPICAL
assert len(packets[3]) <= const._MAX_MSG_TYPICAL
for packet in packets:
parsed = r.DNSIncoming(packet)
assert len(parsed.answers) == 1
def test_questions_do_not_end_up_every_packet(self):
"""Test that questions are not sent again when multiple packets are needed.
https://datatracker.ietf.org/doc/html/rfc6762#section-7.2
Sometimes a Multicast DNS querier will already have too many answers
to fit in the Known-Answer Section of its query packets.... It MUST
immediately follow the packet with another query packet containing no
questions and as many more Known-Answer records as will fit.
"""
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
for i in range(35):
question = r.DNSQuestion(f"testname{i}.local.", const._TYPE_SRV, const._CLASS_IN)
generated.add_question(question)
answer = r.DNSService(
f"testname{i}.local.",
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
80,
f"foo{i}.local.",
)
generated.add_answer_at_time(answer, 0)
assert len(generated.questions) == 35
assert len(generated.answers) == 35
packets = generated.packets()
assert len(packets) == 2
assert len(packets[0]) <= const._MAX_MSG_TYPICAL
assert len(packets[1]) <= const._MAX_MSG_TYPICAL
parsed1 = r.DNSIncoming(packets[0])
assert len(parsed1.questions) == 35
assert len(parsed1.answers) == 33
parsed2 = r.DNSIncoming(packets[1])
assert len(parsed2.questions) == 0
assert len(parsed2.answers) == 2
class PacketForm(unittest.TestCase):
def test_transaction_id(self):
"""ID must be zero in a DNS-SD packet"""
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
bytes = generated.packets()[0]
id = bytes[0] << 8 | bytes[1]
assert id == 0
def test_setting_id(self):
"""Test setting id in the constructor"""
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY, id_=4444)
assert generated.id == 4444
def test_query_header_bits(self):
generated = r.DNSOutgoing(const._FLAGS_QR_QUERY)
bytes = generated.packets()[0]
flags = bytes[2] << 8 | bytes[3]
assert flags == 0x0
def test_response_header_bits(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
bytes = generated.packets()[0]
flags = bytes[2] << 8 | bytes[3]
assert flags == 0x8000
def test_numbers(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
bytes = generated.packets()[0]
(num_questions, num_answers, num_authorities, num_additionals) = struct.unpack('!4H', bytes[4:12])
assert num_questions == 0
assert num_answers == 0
assert num_authorities == 0
assert num_additionals == 0
def test_numbers_questions(self):
generated = r.DNSOutgoing(const._FLAGS_QR_RESPONSE)
question = r.DNSQuestion("testname.local.", const._TYPE_SRV, const._CLASS_IN)
for i in range(10):
generated.add_question(question)
bytes = generated.packets()[0]
(num_questions, num_answers, num_authorities, num_additionals) = struct.unpack('!4H', bytes[4:12])
assert num_questions == 10
assert num_answers == 0
assert num_authorities == 0
assert num_additionals == 0
class TestDnsIncoming(unittest.TestCase):
def test_incoming_exception_handling(self):
generated = r.DNSOutgoing(0)
packet = generated.packets()[0]
packet = packet[:8] + b'deadbeef' + packet[8:]
parsed = r.DNSIncoming(packet)
parsed = r.DNSIncoming(packet)
assert parsed.valid is False
def test_incoming_unknown_type(self):
generated = r.DNSOutgoing(0)
answer = r.DNSAddress('a', const._TYPE_SOA, const._CLASS_IN, 1, b'a')
generated.add_additional_answer(answer)
packet = generated.packets()[0]
parsed = r.DNSIncoming(packet)
assert len(parsed.answers) == 0
assert parsed.is_query() != parsed.is_response()
def test_incoming_circular_reference(self):
assert not r.DNSIncoming(
bytes.fromhex(
'01005e0000fb542a1bf0577608004500006897934000ff11d81bc0a86a31e00000fb'
'14e914e90054f9b2000084000000000100000000095f7365727669636573075f646e'
'732d7364045f756470056c6f63616c00000c0001000011940018105f73706f746966'
'792d636f6e6e656374045f746370c023'
)
).valid
@unittest.skipIf(not has_working_ipv6(), 'Requires IPv6')
@unittest.skipIf(os.environ.get('SKIP_IPV6'), 'IPv6 tests disabled')
def test_incoming_ipv6(self):
addr = "2606:2800:220:1:248:1893:25c8:1946" # example.com
packed = socket.inet_pton(socket.AF_INET6, addr)
generated = r.DNSOutgoing(0)
answer = r.DNSAddress('domain', const._TYPE_AAAA, const._CLASS_IN | const._CLASS_UNIQUE, 1, packed)
generated.add_additional_answer(answer)
packet = generated.packets()[0]
parsed = r.DNSIncoming(packet)
record = parsed.answers[0]
assert isinstance(record, r.DNSAddress)
assert record.address == packed
def test_dns_compression_rollback_for_corruption():
"""Verify rolling back does not lead to dns compression corruption."""
out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
address = socket.inet_pton(socket.AF_INET, "192.168.208.5")
additionals = [
{
"name": "HASS Bridge ZJWH FF5137._hap._tcp.local.",
"address": address,
"port": 51832,
"text": b"\x13md=HASS Bridge"
b" ZJWH\x06pv=1.0\x14id=01:6B:30:FF:51:37\x05c#=12\x04s#=1\x04ff=0\x04"
b"ci=2\x04sf=0\x0bsh=L0m/aQ==",
},
{
"name": "HASS Bridge 3K9A C2582A._hap._tcp.local.",
"address": address,
"port": 51834,
"text": b"\x13md=HASS Bridge"
b" 3K9A\x06pv=1.0\x14id=E2:AA:5B:C2:58:2A\x05c#=12\x04s#=1\x04ff=0\x04"
b"ci=2\x04sf=0\x0bsh=b2CnzQ==",
},
{
"name": "Master Bed TV CEDB27._hap._tcp.local.",
"address": address,
"port": 51830,
"text": b"\x10md=Master Bed"
b" TV\x06pv=1.0\x14id=9E:B7:44:CE:DB:27\x05c#=18\x04s#=1\x04ff=0\x05"
b"ci=31\x04sf=0\x0bsh=CVj1kw==",
},
{
"name": "Living Room TV 921B77._hap._tcp.local.",
"address": address,
"port": 51833,
"text": b"\x11md=Living Room"
b" TV\x06pv=1.0\x14id=11:61:E7:92:1B:77\x05c#=17\x04s#=1\x04ff=0\x05"
b"ci=31\x04sf=0\x0bsh=qU77SQ==",
},
{
"name": "HASS Bridge ZC8X FF413D._hap._tcp.local.",
"address": address,
"port": 51829,
"text": b"\x13md=HASS Bridge"
b" ZC8X\x06pv=1.0\x14id=96:14:45:FF:41:3D\x05c#=12\x04s#=1\x04ff=0\x04"
b"ci=2\x04sf=0\x0bsh=b0QZlg==",
},
{
"name": "HASS Bridge WLTF 4BE61F._hap._tcp.local.",
"address": address,
"port": 51837,
"text": b"\x13md=HASS Bridge"
b" WLTF\x06pv=1.0\x14id=E0:E7:98:4B:E6:1F\x04c#=2\x04s#=1\x04ff=0\x04"
b"ci=2\x04sf=0\x0bsh=ahAISA==",
},
{
"name": "FrontdoorCamera 8941D1._hap._tcp.local.",
"address": address,
"port": 54898,
"text": b"\x12md=FrontdoorCamera\x06pv=1.0\x14id=9F:B7:DC:89:41:D1\x04c#=2\x04"
b"s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=0+MXmA==",
},
{
"name": "HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
"address": address,
"port": 51836,
"text": b"\x13md=HASS Bridge"
b" W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1\x04ff=0\x04"
b"ci=2\x04sf=0\x0bsh=6fLM5A==",
},
{
"name": "HASS Bridge Y9OO EFF0A7._hap._tcp.local.",
"address": address,
"port": 51838,
"text": b"\x13md=HASS Bridge"
b" Y9OO\x06pv=1.0\x14id=D3:FE:98:EF:F0:A7\x04c#=2\x04s#=1\x04ff=0\x04"
b"ci=2\x04sf=0\x0bsh=u3bdfw==",
},
{
"name": "Snooze Room TV 6B89B0._hap._tcp.local.",
"address": address,
"port": 51835,
"text": b"\x11md=Snooze Room"
b" TV\x06pv=1.0\x14id=5F:D5:70:6B:89:B0\x05c#=17\x04s#=1\x04ff=0\x05"
b"ci=31\x04sf=0\x0bsh=xNTqsg==",
},
{
"name": "AlexanderHomeAssistant 74651D._hap._tcp.local.",
"address": address,
"port": 54811,
"text": b"\x19md=AlexanderHomeAssistant\x06pv=1.0\x14id=59:8A:0B:74:65:1D\x05"
b"c#=14\x04s#=1\x04ff=0\x04ci=2\x04sf=0\x0bsh=ccZLPA==",
},
{
"name": "HASS Bridge OS95 39C053._hap._tcp.local.",
"address": address,
"port": 51831,
"text": b"\x13md=HASS Bridge"
b" OS95\x06pv=1.0\x14id=7E:8C:E6:39:C0:53\x05c#=12\x04s#=1\x04ff=0\x04ci=2"
b"\x04sf=0\x0bsh=Xfe5LQ==",
},
]
out.add_answer_at_time(
DNSText(
"HASS Bridge W9DN 5B5CC5._hap._tcp.local.",
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1'
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==',
),
0,
)
for record in additionals:
out.add_additional_answer(
r.DNSService(
record["name"], # type: ignore
const._TYPE_SRV,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
0,
0,
record["port"], # type: ignore
record["name"], # type: ignore
)
)
out.add_additional_answer(
r.DNSText(
record["name"], # type: ignore
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
record["text"], # type: ignore
)
)
out.add_additional_answer(
r.DNSAddress(
record["name"], # type: ignore
const._TYPE_A,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_HOST_TTL,
record["address"], # type: ignore
)
)
for packet in out.packets():
# Verify we can process the packets we created to
# ensure there is no corruption with the dns compression
incoming = r.DNSIncoming(packet)
assert incoming.valid is True
assert (
len(incoming.answers)
== incoming.num_answers + incoming.num_authorities + incoming.num_additionals
)
def test_tc_bit_in_query_packet():
"""Verify the TC bit is set when known answers exceed the packet size."""
out = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
type_ = "_hap._tcp.local."
out.add_question(r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN))
for i in range(30):
out.add_answer_at_time(
DNSText(
("HASS Bridge W9DN %s._hap._tcp.local." % i),
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1'
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==',
),
0,
)
packets = out.packets()
assert len(packets) == 3
first_packet = r.DNSIncoming(packets[0])
assert first_packet.truncated
assert first_packet.valid is True
second_packet = r.DNSIncoming(packets[1])
assert second_packet.truncated
assert second_packet.valid is True
third_packet = r.DNSIncoming(packets[2])
assert not third_packet.truncated
assert third_packet.valid is True
def test_tc_bit_not_set_in_answer_packet():
"""Verify the TC bit is not set when there are no questions and answers exceed the packet size."""
out = r.DNSOutgoing(const._FLAGS_QR_RESPONSE | const._FLAGS_AA)
for i in range(30):
out.add_answer_at_time(
DNSText(
("HASS Bridge W9DN %s._hap._tcp.local." % i),
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1'
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==',
),
0,
)
packets = out.packets()
assert len(packets) == 3
first_packet = r.DNSIncoming(packets[0])
assert not first_packet.truncated
assert first_packet.valid is True
second_packet = r.DNSIncoming(packets[1])
assert not second_packet.truncated
assert second_packet.valid is True
third_packet = r.DNSIncoming(packets[2])
assert not third_packet.truncated
assert third_packet.valid is True
# 4003 15.973052 192.168.107.68 224.0.0.251 MDNS 76 Standard query 0xffc4 PTR _raop._tcp.local, "QM" question
def test_qm_packet_parser():
"""Test we can parse a query packet with the QM bit."""
qm_packet = (
b'\xff\xc4\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x05_raop\x04_tcp\x05local\x00\x00\x0c\x00\x01'
)
parsed = DNSIncoming(qm_packet)
assert parsed.questions[0].unicast is False
assert ",QM," in str(parsed.questions[0])
# 389951 1450.577370 192.168.107.111 224.0.0.251 MDNS 115 Standard query 0x0000 PTR _companion-link._tcp.local, "QU" question OPT
def test_qu_packet_parser():
"""Test we can parse a query packet with the QU bit."""
qu_packet = b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x01\x0f_companion-link\x04_tcp\x05local\x00\x00\x0c\x80\x01\x00\x00)\x05\xa0\x00\x00\x11\x94\x00\x12\x00\x04\x00\x0e\x00dz{\x8a6\x9czF\x84,\xcaQ\xff'
parsed = DNSIncoming(qu_packet)
assert parsed.questions[0].unicast is True
assert ",QU," in str(parsed.questions[0])
def test_parse_packet_with_nsec_record():
"""Test we can parse a packet with an NSEC record."""
nsec_packet = (
b"\x00\x00\x84\x00\x00\x00\x00\x01\x00\x00\x00\x03\x08_meshcop\x04_udp\x05local\x00\x00\x0c\x00"
b"\x01\x00\x00\x11\x94\x00\x0f\x0cMyHome54 (2)\xc0\x0c\xc0+\x00\x10\x80\x01\x00\x00\x11\x94\x00"
b")\x0bnn=MyHome54\x13xp=695034D148CC4784\x08tv=0.0.0\xc0+\x00!\x80\x01\x00\x00\x00x\x00\x15\x00"
b"\x00\x00\x00\xc0'\x0cMaster-Bed-2\xc0\x1a\xc0+\x00/\x80\x01\x00\x00\x11\x94\x00\t\xc0+\x00\x05"
b"\x00\x00\x80\x00@"
)
parsed = DNSIncoming(nsec_packet)
nsec_record = parsed.answers[3]
assert "nsec," in str(nsec_record)
assert nsec_record.rdtypes == [16, 33]
assert nsec_record.next_name == "MyHome54 (2)._meshcop._udp.local."
def test_records_same_packet_share_fate():
"""Test records in the same packet all have the same created time."""
out = r.DNSOutgoing(const._FLAGS_QR_QUERY | const._FLAGS_AA)
type_ = "_hap._tcp.local."
out.add_question(r.DNSQuestion(type_, const._TYPE_PTR, const._CLASS_IN))
for i in range(30):
out.add_answer_at_time(
DNSText(
("HASS Bridge W9DN %s._hap._tcp.local." % i),
const._TYPE_TXT,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
b'\x13md=HASS Bridge W9DN\x06pv=1.0\x14id=11:8E:DB:5B:5C:C5\x05c#=12\x04s#=1'
b'\x04ff=0\x04ci=2\x04sf=0\x0bsh=6fLM5A==',
),
0,
)
for packet in out.packets():
dnsin = DNSIncoming(packet)
first_time = dnsin.answers[0].created
for answer in dnsin.answers:
assert answer.created == first_time
def test_dns_compression_invalid_skips_bad_name_compress_in_question():
"""Test our wire parser can skip bad compression in questions."""
packet = (
b'\x00\x00\x00\x00\x00\x04\x00\x00\x00\x07\x00\x00\x11homeassistant1128\x05l'
b'ocal\x00\x00\xff\x00\x014homeassistant1128 [534a4794e5ed41879ecf012252d3e02'
b'a]\x0c_workstation\x04_tcp\xc0\x1e\x00\xff\x00\x014homeassistant1127 [534a47'
b'94e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1123 [534a479'
b'4e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x014homeassistant1118 [534a4794'
b'e5ed41879ecf012252d3e02a]\xc0^\x00\xff\x00\x01\xc0\x0c\x00\x01\x80'
b'\x01\x00\x00\x00x\x00\x04\xc0\xa8<\xc3\xc0v\x00\x10\x80\x01\x00\x00\x00'
b'x\x00\x01\x00\xc0v\x00!\x80\x01\x00\x00\x00x\x00\x1f\x00\x00\x00\x00'
b'\x00\x00\x11homeassistant1127\x05local\x00\xc0\xb1\x00\x10\x80'
b'\x01\x00\x00\x00x\x00\x01\x00\xc0\xb1\x00!\x80\x01\x00\x00\x00x\x00\x1f'
b'\x00\x00\x00\x00\x00\x00\x11homeassistant1123\x05local\x00\xc0)\x00\x10\x80'
b'\x01\x00\x00\x00x\x00\x01\x00\xc0)\x00!\x80\x01\x00\x00\x00x\x00\x1f'
b'\x00\x00\x00\x00\x00\x00\x11homeassistant1128\x05local\x00'
)
parsed = r.DNSIncoming(packet)
assert len(parsed.questions) == 4
def test_dns_compression_all_invalid(caplog):
"""Test our wire parser can skip all invalid data."""
packet = (
b'\x00\x00\x84\x00\x00\x00\x00\x01\x00\x00\x00\x00!roborock-vacuum-s5e_miio416'
b'112328\x00\x00/\x80\x01\x00\x00\x00x\x00\t\xc0P\x00\x05@\x00\x00\x00\x00'
)
parsed = r.DNSIncoming(packet, ("2.4.5.4", 5353))
assert len(parsed.questions) == 0
assert len(parsed.answers) == 0
assert " Unable to parse; skipping record" in caplog.text
def test_invalid_next_name_ignored():
"""Test our wire parser does not throw an an invalid next name.
The RFC states it should be ignored when used with mDNS.
"""
packet = (
b'\x00\x00\x00\x00\x00\x01\x00\x02\x00\x00\x00\x00\x07Android\x05local\x00\x00'
b'\xff\x00\x01\xc0\x0c\x00/\x00\x01\x00\x00\x00x\x00\x08\xc02\x00\x04@'
b'\x00\x00\x08\xc0\x0c\x00\x01\x00\x01\x00\x00\x00x\x00\x04\xc0\xa8X<'
)
parsed = r.DNSIncoming(packet)
assert len(parsed.questions) == 1
assert len(parsed.answers) == 2
def test_dns_compression_invalid_skips_record():
"""Test our wire parser can skip records we do not know how to parse."""
packet = (
b"\x00\x00\x84\x00\x00\x00\x00\x06\x00\x00\x00\x00\x04_hap\x04_tcp\x05local\x00\x00\x0c"
b"\x00\x01\x00\x00\x11\x94\x00\x16\x13eufy HomeBase2-2464\xc0\x0c\x04Eufy\xc0\x16\x00/"
b"\x80\x01\x00\x00\x00x\x00\x08\xc0\xa6\x00\x04@\x00\x00\x08\xc0'\x00/\x80\x01\x00\x00"
b"\x11\x94\x00\t\xc0'\x00\x05\x00\x00\x80\x00@\xc0=\x00\x01\x80\x01\x00\x00\x00x\x00\x04"
b"\xc0\xa8Dp\xc0'\x00!\x80\x01\x00\x00\x00x\x00\x08\x00\x00\x00\x00\xd1_\xc0=\xc0'\x00"
b"\x10\x80\x01\x00\x00\x11\x94\x00K\x04c#=1\x04ff=2\x14id=38:71:4F:6B:76:00\x08md=T8010"
b"\x06pv=1.1\x05s#=75\x04sf=1\x04ci=2\x0bsh=xaQk4g=="
)
parsed = r.DNSIncoming(packet)
answer = r.DNSNsec(
'eufy HomeBase2-2464._hap._tcp.local.',
const._TYPE_NSEC,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'eufy HomeBase2-2464._hap._tcp.local.',
[const._TYPE_TXT, const._TYPE_SRV],
)
assert answer in parsed.answers
def test_dns_compression_points_forward():
"""Test our wire parser can unpack nsec records with compression."""
packet = (
b"\x00\x00\x84\x00\x00\x00\x00\x07\x00\x00\x00\x00\x0eTV Beneden (2)"
b"\x10_androidtvremote\x04_tcp\x05local\x00\x00\x10\x80\x01\x00\x00\x11"
b"\x94\x00\x15\x14bt=D8:13:99:AC:98:F1\xc0\x0c\x00/\x80\x01\x00\x00\x11"
b"\x94\x00\t\xc0\x0c\x00\x05\x00\x00\x80\x00@\tAndroid-3\xc01\x00/\x80"
b"\x01\x00\x00\x00x\x00\x08\xc0\x9c\x00\x04@\x00\x00\x08\xc0l\x00\x01\x80"
b"\x01\x00\x00\x00x\x00\x04\xc0\xa8X\x0f\xc0\x0c\x00!\x80\x01\x00\x00\x00"
b"x\x00\x08\x00\x00\x00\x00\x19B\xc0l\xc0\x1b\x00\x0c\x00\x01\x00\x00\x11"
b"\x94\x00\x02\xc0\x0c\t_services\x07_dns-sd\x04_udp\xc01\x00\x0c\x00\x01"
b"\x00\x00\x11\x94\x00\x02\xc0\x1b"
)
parsed = r.DNSIncoming(packet)
answer = r.DNSNsec(
'TV Beneden (2)._androidtvremote._tcp.local.',
const._TYPE_NSEC,
const._CLASS_IN | const._CLASS_UNIQUE,
const._DNS_OTHER_TTL,
'TV Beneden (2)._androidtvremote._tcp.local.',
[const._TYPE_TXT, const._TYPE_SRV],
)
assert answer in parsed.answers
def test_dns_compression_points_to_itself():
"""Test our wire parser does not loop forever when a compression pointer points to itself."""
packet = (
b"\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01"
b"\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xc0(\x00\x01\x80\x01\x00\x00\x00"
b"\x01\x00\x04\xc0\xa8\xd0\x06"
)
parsed = r.DNSIncoming(packet)
assert len(parsed.answers) == 1
def test_dns_compression_points_beyond_packet():
"""Test our wire parser does not fail when the compression pointer points beyond the packet."""
packet = (
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01'
b'\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xe7\x0f\x00\x01\x80\x01\x00\x00'
b'\x00\x01\x00\x04\xc0\xa8\xd0\x06'
)
parsed = r.DNSIncoming(packet)
assert len(parsed.answers) == 1
def test_dns_compression_generic_failure(caplog):
"""Test our wire parser does not loop forever when dns compression is corrupt."""
packet = (
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x06domain\x05local\x00\x00\x01'
b'\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05-\x0c\x00\x01\x80\x01\x00\x00'
b'\x00\x01\x00\x04\xc0\xa8\xd0\x06'
)
parsed = r.DNSIncoming(packet, ("1.2.3.4", 5353))
assert len(parsed.answers) == 1
assert "Received invalid packet from ('1.2.3.4', 5353)" in caplog.text
def test_label_length_attack():
"""Test our wire parser does not loop forever when the name exceeds 253 chars."""
packet = (
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d'
b'\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x01d\x00\x00\x01\x80'
b'\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\xc0\x0c\x00\x01\x80\x01\x00\x00\x00'
b'\x01\x00\x04\xc0\xa8\xd0\x06'
)
parsed = r.DNSIncoming(packet)
assert len(parsed.answers) == 0
def test_label_compression_attack():
"""Test our wire parser does not loop forever when exceeding the maximum number of labels."""
packet = (
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x03atk\x00\x00\x01\x80'
b'\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03'
b'atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\x03atk\xc0'
b'\x0c\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x06'
)
parsed = r.DNSIncoming(packet)
assert len(parsed.answers) == 1
def test_dns_compression_loop_attack():
"""Test our wire parser does not loop forever when dns compression is in a loop."""
packet = (
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07\x03atk\x03dns\x05loc'
b'al\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05\x04a'
b'tk2\x04dns2\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0\x05'
b'\x04atk3\xc0\x10\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0\xa8\xd0'
b'\x05\x04atk4\x04dns5\xc0\x14\x00\x01\x80\x01\x00\x00\x00\x01\x00\x04\xc0'
b'\xa8\xd0\x05\x04atk5\x04dns2\xc0^\x00\x01\x80\x01\x00\x00\x00\x01\x00'
b'\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00'
b'\x04\xc0\xa8\xd0\x05\xc0s\x00\x01\x80\x01\x00\x00\x00\x01\x00'
b'\x04\xc0\xa8\xd0\x05'
)
parsed = r.DNSIncoming(packet)
assert len(parsed.answers) == 0
def test_txt_after_invalid_nsec_name_still_usable():
"""Test that we can see the txt record after the invalid nsec record."""
packet = (
b'\x00\x00\x84\x00\x00\x00\x00\x06\x00\x00\x00\x00\x06_sonos\x04_tcp\x05loc'
b'al\x00\x00\x0c\x00\x01\x00\x00\x11\x94\x00\x15\x12Sonos-542A1BC9220E'
b'\xc0\x0c\x12Sonos-542A1BC9220E\xc0\x18\x00/\x80\x01\x00\x00\x00x\x00'
b'\x08\xc1t\x00\x04@\x00\x00\x08\xc0)\x00/\x80\x01\x00\x00\x11\x94\x00'
b'\t\xc0)\x00\x05\x00\x00\x80\x00@\xc0)\x00!\x80\x01\x00\x00\x00x'
b'\x00\x08\x00\x00\x00\x00\x05\xa3\xc0>\xc0>\x00\x01\x80\x01\x00\x00\x00x'
b'\x00\x04\xc0\xa8\x02:\xc0)\x00\x10\x80\x01\x00\x00\x11\x94\x01*2info=/api'
b'/v1/players/RINCON_542A1BC9220E01400/info\x06vers=3\x10protovers=1.24.1\nbo'
b'otseq=11%hhid=Sonos_rYn9K9DLXJe0f3LP9747lbvFvh;mhhid=Sonos_rYn9K9DLXJe0f3LP9'
b'747lbvFvh.Q45RuMaeC07rfXh7OJGm