pax_global_header00006660000000000000000000000064146405513640014522gustar00rootroot0000000000000052 comment=c6a41cf63f24283868428b3627216b0de273c556 python-dlt-2.18.10.0/000077500000000000000000000000001464055136400141335ustar00rootroot00000000000000python-dlt-2.18.10.0/.coveragerc000066400000000000000000000000771464055136400162600ustar00rootroot00000000000000[run] branch = True source = dlt [report] show_missing = True python-dlt-2.18.10.0/.flake8000066400000000000000000000006401464055136400153060ustar00rootroot00000000000000[flake8] max-line-length = 119 ignore = # D10*: Missing docstring D10 # E203: whitespace before ':' # This error is not PEP8 complaint and should be ignored E203 # W503: line break before binary operator # seems to conflict with black code formatting W503 # W605: invalid escape sequence '\d' W605 exclude = .git, .tox, .eggs, __pycache__, build, dist python-dlt-2.18.10.0/.github/000077500000000000000000000000001464055136400154735ustar00rootroot00000000000000python-dlt-2.18.10.0/.github/workflows/000077500000000000000000000000001464055136400175305ustar00rootroot00000000000000python-dlt-2.18.10.0/.github/workflows/python-dlt-ci.yaml000066400000000000000000000020471464055136400231120ustar00rootroot00000000000000name: python-dlt-ci-actions on: [push, pull_request] jobs: run-test-for-python-dlt: runs-on: ubuntu-latest strategy: matrix: LIBDLT_VERSION: - "v2.18.8" steps: - uses: actions/checkout@v2 - name: Build python-dlt unit test docker image id: docker_build uses: docker/build-push-action@v2 with: push: false build-args: | LIBDLT_VERSION=${{ matrix.LIBDLT_VERSION }} tags: python-dlt/python-dlt-unittest:${{ matrix.LIBDLT_VERSION }} - name: lint check for the code base uses: addnab/docker-run-action@v3 with: image: python-dlt/python-dlt-unittest:${{ matrix.LIBDLT_VERSION }} options: -v ${{ github.workspace }}:/pydlt -w /pydlt run: tox -e black,ruff - name: Run unit test uses: addnab/docker-run-action@v3 with: image: python-dlt/python-dlt-unittest:${{ matrix.LIBDLT_VERSION }} options: -v ${{ github.workspace }}:/pydlt -w /pydlt run: tox python-dlt-2.18.10.0/.gitignore000066400000000000000000000022631464055136400161260ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ python-dlt-2.18.10.0/.pylintrc000066400000000000000000000272321464055136400160060ustar00rootroot00000000000000[MASTER] # Specify a configuration file. rcfile= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Profiled execution. profile=no # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS # Pickle collected data for later comparisons. persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Use multiple processes to speed up Pylint. jobs=1 # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code extension-pkg-whitelist= # Allow optimization of some AST trees. This will activate a peephole AST # optimizer, which will apply various small optimizations. For instance, it can # be used to obtain the result of joining multiple strings with the addition # operator. Joining a lot of strings can lead to a maximum recursion error in # Pylint and this flag can prevent that. It has one side effect, the resulting # AST will be different than the one from reality. optimize-ast=no [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html. You can also give a reporter class, eg # mypackage.mymodule.MyReporterClass. output-format=text # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". files-output=no # Tells whether to display a full report or only the messages reports=yes # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Add a comment according to your evaluation note. This is used by the global # evaluation report (RP0004). comment=no # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details #msg-template= [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED confidence= # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time. See also the "--disable" option for examples. #enable= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once).You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" disable=I0011,R0201,R0902,R0903,R0912,R0913,R0921,R0922,R0801,C0325,W0511 [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME,XXX,TODO [LOGGING] # Logging modules to check that the string format arguments are in logging # function parameter format logging-modules=logging [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching the name of dummy variables (i.e. expectedly # not used). dummy-variables-rgx=_$|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_,_cb [SPELLING] # Spelling dictionary name. Available dictionaries: none. To make it working # install python-enchant package. spelling-dict= # List of comma separated words that should not be checked. spelling-ignore-words= # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file= # Tells whether to store unknown words to indicated private dictionary in # --spelling-private-dict-file option instead of raising a message. spelling-store-unknown-words=no [TYPECHECK] # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis ignored-modules= # List of classes names for which member attributes should not be checked # (useful for classes with attributes dynamically set). ignored-classes=SQLObject # When zope mode is activated, add a predefined set of Zope acquired attributes # to generated-members. zope=no # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E0201 when accessed. Python regular # expressions are accepted. generated-members=REQUEST,acl_users,aq_parent [FORMAT] # Maximum number of characters on a single line. max-line-length=119 # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no # List of optional constructs for which whitespace checking is disabled no-space-check=trailing-comma,dict-separator # Maximum number of lines in a module max-module-lines=1000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format= [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=4 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=no [BASIC] # Required attributes for module, separated by a comma required-attributes= # List of builtins function names that should not be used, separated by a comma bad-functions=map,filter,apply,input # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_ # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Include a hint for the correct naming format with invalid-name include-naming-hint=no # Regular expression matching correct variable names variable-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for variable names variable-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Naming hint for module names module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression matching correct constant names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Naming hint for constant names const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Regular expression matching correct class attribute names class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ # Naming hint for class attribute names class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ # Regular expression matching correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Naming hint for class names class-name-hint=[A-Z_][a-zA-Z0-9]+$ # Regular expression matching correct inline iteration names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Naming hint for inline iteration names inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ # Regular expression matching correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for function names function-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for attribute names attr-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct method names method-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for method names method-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for argument names argument-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match function or class names that do # not require a docstring. no-docstring-rgx=__.*__ # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 [DESIGN] # Maximum number of arguments for function / method max-args=5 # Argument names that match this expression will be ignored. Default to name # with leading underscore ignored-argument-names=_.* # Maximum number of locals for function / method body max-locals=15 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of branch for function / method body max-branches=12 # Maximum number of statements in function / method body max-statements=50 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Minimum number of public methods for a class (see R0903). min-public-methods=2 # Maximum number of public methods for a class (see R0904). max-public-methods=20 [CLASSES] # List of interface methods to ignore, separated by a comma. This is used for # instance to not check methods defines in Zope's Interface base class. ignore-iface-methods=isImplementedBy,deferred,extends,names,namesAndDescriptions,queryDescriptionFor,getBases,getDescriptionFor,getDoc,getName,getTaggedValue,getTaggedValueTags,isEqualOrExtendedBy,setTaggedValue,isImplementedByInstancesOf,adaptWith,is_implemented_by # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=mcs # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict,_fields,_replace,_source,_make [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=stringprep,optparse # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=Exception python-dlt-2.18.10.0/Dockerfile000066400000000000000000000010661464055136400161300ustar00rootroot00000000000000ARG BASE_IMAGE=alpine:3.20 FROM ${BASE_IMAGE} ARG LIBDLT_VERSION=v2.18.8 RUN set -ex \ && apk add --no-cache build-base musl-dev linux-headers git cmake ninja \ wget curl dbus zlib zlib-dev \ python3 python3-dev py3-pip py3-tox \ && git clone https://github.com/GENIVI/dlt-daemon \ && cd /dlt-daemon \ && git checkout ${LIBDLT_VERSION} \ && cd /dlt-daemon \ && cmake CMakeLists.txt \ && make -j \ && make install \ && ldconfig /usr/local/lib RUN mkdir -p /workspace WORKDIR /workspace # vim: set ft=dockerfile : python-dlt-2.18.10.0/LICENCE.txt000066400000000000000000000352011464055136400157370ustar00rootroot00000000000000Mozilla Public License Version 2.0 1. Definitions 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or (b) any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: (a) for any code that a Contributor has removed from Covered Software; or (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. 6. Disclaimer of Warranty Covered Software is provided under this License on an "as is" basis, without warranty of any kind, either expressed, implied, or statutory, including, without limitation, warranties that the Covered Software is free of defects, merchantable, fit for a particular purpose or non-infringing. The entire risk as to the quality and performance of the Covered Software is with You. Should any Covered Software prove defective in any respect, You (not any Contributor) assume the cost of any necessary servicing, repair, or correction. This disclaimer of warranty constitutes an essential part of this License. No use of any Covered Software is authorized under this License except under this disclaimer. 7. Limitation of Liability Under no circumstances and under no legal theory, whether tort (including negligence), contract, or otherwise, shall any Contributor, or anyone who distributes Covered Software as permitted above, be liable to You for any direct, indirect, special, incidental, or consequential damages of any character including, without limitation, damages for lost profits, loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability shall not apply to liability for death or personal injury resulting from such party's negligence to the extent applicable law prohibits such limitation. Some jurisdictions do not allow the exclusion or limitation of incidental or consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. python-dlt-2.18.10.0/MANIFEST.in000066400000000000000000000000241464055136400156650ustar00rootroot00000000000000include LICENCE.txt python-dlt-2.18.10.0/Makefile000066400000000000000000000023111464055136400155700ustar00rootroot00000000000000# Non released dlt-daemon version based on 2.18.10 LIBDLT_VERSION=v2.18.10 IMAGE=python-dlt/python-dlt-unittest TAG?=latest DK_CMD=docker run --rm -v $(shell pwd):/pydlt -w /pydlt TEST_ARGS?="-e py3,lint" .PHONY: all all: @echo "python-dlt testing commands, libdlt version: ${LIBDLT_VERSION}" @echo " make unit-test -- Run unit tests with tox (Run 'make build-image' the first time)" @echo " make build-image -- Build docker image for the usage of 'make unit-test'" @echo " make clean -- Remove all temporary files" .PHONY: unit-test unit-test: ${DK_CMD} ${IMAGE}:${TAG} tox ${TEST_ARGS} .PHONY: lint lint: ${DK_CMD} ${IMAGE}:${TAG} tox -e lint .PHONY: build-image build-image: docker build --build-arg LIBDLT_VERSION=${LIBDLT_VERSION} \ --tag ${IMAGE}:${TAG} . docker build --build-arg LIBDLT_VERSION=${LIBDLT_VERSION} \ --tag ${IMAGE}:${LIBDLT_VERSION} . .PHONY: bash bash: ${DK_CMD} -it ${IMAGE}:${TAG} .PHONY: clean clean: ifeq (,$(wildcard /.dockerenv)) ${DK_CMD} ${IMAGE}:${TAG} make clean else find . -name "__pycache__" | xargs -n1 rm -rf find . -name "*.pyc" | xargs -n1 rm -rf rm -rf .coverage rm -rf *.egg-info rm -rf .eggs rm -rf junit_reports rm -rf .tox endif python-dlt-2.18.10.0/README.md000066400000000000000000000144541464055136400154220ustar00rootroot00000000000000# python-dlt python-dlt is a thin Python ctypes wrapper around libdlt functions. It was primarily created for use with BMW's test execution framework. However, the implementation is independent and the API makes few assumptions about the intended use. Note: This is only tested with libdlt version v2.18.8 and v2.18.10, later versions might require adaptations. The package will not support previous libdlt versions from python-dlt v2.0. Also only GENIVI DLT daemon produced traces have been tested. ## Design The code is split up into 3 primary components: * The `core`: This subpackage provides the major chunk of ctypes wrappers for the structures defined in libdlt. It abstracts out the libdlt structures for use by the rest of python-dlt. Classes defined here ideally should *not* be used outside of python-dlt. The module `core_base.py` provides the default implementation of the classes and the other `core_*.py` modules provide the overrides for the version specific implementations of libdlt. The correct version specific implementation will be loaded automatically at runtime. (the logic for this is in `core/__init__.py`) * The python interface classes: These are defined in `dlt.py`. Most of the classes here derive from their corresponding ctypes class definitions from `core` and provide a more python friendly api/access to the underlying C/ctypes implementations. Ideally, python code using `python-dlt` would use these classes rather than the base classes in `core`. * API for tools: This is the component that provides common interfaces required by the tools that use `python-dlt`, like the `DLTBroker`, 'DLTLifecycle' etc. These classes do not have equivalents in libdlt and were created based on usage requirements (and as such make assumptions about the manner in which they would be used). If you're reading this document to work on the core or the python classes, it would be a good idea to first understand the design of libdlt itself. This is fairly well documented (look under the `doc/` directory of the `dlt-deamon` code base). Of course the best reference is the code itself. `dlt-daemon` is written in C and is a pretty well laid out, straight forward (ie: not many layers of abstractions), small code base. Makes for good bedtime reading. The rest of this document will describe and demonstrate some of the design of the external API of python-dlt. The classes most relevant for users of python-dlt possibly are `DLTClient`, `DLTFile`, `DLTMessage`, `DLTBroker`. The names hopefully make their purpose evident. Here are examples of some interesting ways to use these classes: * DLTFile and DLTMessage:: ```python >>> from dlt import dlt >>> # DLTFile object can be obtained by loading a trace file >>> d = dlt.load("high_full_trace.dlt") >>> d.generate_index() # Read the whole trace file and generate its index >>> print(d.counter_total) # number of DLT messages in the file ... >>> print(d[0]) # messages can be indexed ... >>> for msg in d: # DLTFile object is iterable ... print(msg.apid) # DLTMessage objects have all the attrs ... print(msg.payload_decoded) # one might expect from a DLT frame ... print(msg) # The str() of the DLTMessage closely matches the ... # output of dlt-receive >>> d[0] == d[-1] # DLTMessage objects can be compared to each other >>> d.compare(dict(apid="SYS", citd="JOUR")) # ...or can be compared to an ... # dict of attributes >>> import pickle >>> pickle.dumps(d[0]) # DLTMessage objects are (de)serializable using ... # the pickle protocol (this is to enable sharing ... # of the DLTMessage in a multiprocessing ... # environment) ``` * DLTClient and DLTBroker:: ```python >>> from dlt import dlt >>> c = dlt.DLTClient(servIP="127.0.0.1") # Only initializes the client >>> c.connect() # ...this connects >>> dlt.dltlib.dlt_receiver_receive(ctypes.byref(client.receiver), DLT_RECEIVE_SOCKET) # receives data >>> c.read_message() # reads a single DLTMessage from received data and returns it >>> >>> # more interesting is the DLTBroker class... >>> # - create an instance that initializes a DLTClient. Accepts a filename >>> # where DLT traces would be stored >>> broker = DLTBroker(ip_address="127.0.0.1", filename='/tmp/testing_log.dlt') >>> # needs to be started and stopped explicitly and will create a run a >>> # DLTClient instance in a new *process*. >>> broker.start() >>> broker.stop() >>> >>> # Usually, used in conjunction with the DLTContext class from mtee >>> from mtee.testing.connectors.connector_dlt import DLTContext >>> broker = DLTBroker(ip_address="127.0.0.1", filename="/tmp/testing_log.dlt", verbose=True) >>> ctx = DLTContext(broker, filters=[("SYS", "JOUR")]) >>> broker.start() >>> print(ctx.wait_for(count=10)) >>> ``` ## Design of DLTBroker The DLTBroker abstracts out the management of 2 (multiprocessing) queues: * The `message_queue`: This queue receives *all* messages from the DLT daemon (via a DLTClient instance, running as a separate process, code in `dlt.dlt_broker_handlers.DLTMessageHandler`) and stores them to a trace file. * The `filter_queue`: This queue instructs the `DLTMessageHandler` which messages would be interesting at runtime, to be filtered and returned (for example, via a request from `DLTContext`). This is run as a separate thread in the `DLTBroker` process. The code for this is in `dlt.dlt_broker_handlers.DLTContextHandler`. ## Running tox on a local machine In order to run tox command for this repository, please perform the following: 1. Build a docker image from the `Dockerfile` provided using: ```commandline $ docker build -t python-dlt -f Dockerfile . ``` 2. Run the tox in the docker container using: ```commandline $ docker run -it --rm --volume $(pwd):/workspace python-dlt /bin/sh -xc "tox -e py3,lint" ``` 3. [Special Case] Getting an interactive shell inside the docker container to run arbitrary commands: ```commandline $ docker run -it --rm --volume $(pwd):/workspace --entrypoint sh python-dlt ``` python-dlt-2.18.10.0/dlt/000077500000000000000000000000001464055136400147165ustar00rootroot00000000000000python-dlt-2.18.10.0/dlt/__init__.py000066400000000000000000000025071464055136400170330ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. """DLT support module""" import collections import logging import subprocess if not hasattr(subprocess, "TimeoutExpired"): import subprocess32 as subprocess # pylint: disable=import-error LOGGER = logging.getLogger(__name__) ProcessResult = collections.namedtuple("ProcessResult", ("stdout", "stderr", "returncode")) def run_command(command, timeout=60, shell=True): """Run command in a shell and return stdout, stderr and return code :param str|list command: a command to run :param int timeout: timeout for the command :param bool shell: shell switch :returns: process result :rtype: subprocess compatible ProcessResult :raises RuntimeError: If timeout expires. """ process = subprocess.Popen( command, shell=shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) try: stdout, stderr = process.communicate(timeout=timeout) except subprocess.TimeoutExpired: process.terminate() raise RuntimeError("Timeout %d seconds reached for command '%s'" % (timeout, command)) if isinstance(stdout, bytes): stdout = stdout.decode("utf-8") if isinstance(stderr, bytes): stderr = stderr.decode("utf-8") return ProcessResult(stdout, stderr, process.returncode) python-dlt-2.18.10.0/dlt/core/000077500000000000000000000000001464055136400156465ustar00rootroot00000000000000python-dlt-2.18.10.0/dlt/core/__init__.py000066400000000000000000000054651464055136400177710ustar00rootroot00000000000000# Copyright (C) 2017. BMW Car IT GmbH. All rights reserved. """Basic ctypes binding to the DLT library""" import ctypes import os from dlt.core.core_base import * # noqa: F403 API_VER = None def get_version(loaded_lib): """Return the API version of the loaded libdlt.so library""" global API_VER # pylint: disable=global-statement if API_VER is None: buf = ctypes.create_string_buffer(255) loaded_lib.dlt_get_version(ctypes.byref(buf), 255) # buf would be something like: # DLT Package Version: X.XX.X STABLE, Package Revision: vX.XX.XX build on Jul XX XXXX XX:XX:XX # -SYSTEMD -SYSTEMD_WATCHDOG -TEST -SHM buf_split = buf.value.decode().split() API_VER = buf_split[3] return API_VER def get_api_specific_file(version): """Return specific version api filename, if not found fallback to first major version release""" version_tuple = [int(num) for num in version.split(".")] name = "core_{}.py".format("".join((str(num) for num in version_tuple))) if os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), name)): return name # The minor version does not exist, try to truncate if version_tuple[-1] != 0: version_tuple = version_tuple[:-1] + [0] name = "core_{}.py".format("".join((str(num) for num in version_tuple))) if not os.path.exists(os.path.join(os.path.dirname(os.path.abspath(__file__)), name)): raise ImportError("No module file: {}".format(name)) return name def check_libdlt_version(api_ver): """Check the version compatibility. python-dlt now only supports to run libdlt 2.18.5 or above. """ ver_info = tuple(int(num) for num in api_ver.split(".")) if ver_info < (2, 18, 5): raise ImportError( "python-dlt only supports libdlt \ v2.18.5 (33fbad18c814e13bd7ba2053525d8959fee437d1) or above" ) API_VER = get_version(dltlib) # noqa: F405 check_libdlt_version(API_VER) # Load version specific definitions, if such a file exists, possibly # overriding above definitions # # The intent is to have version specific implementations to be able to # provide declarations *incrementally*. # # For instance if version 2.17.0 introduces new changes in addition to # retaining all changes from 2.16.0, then core_2170.py would import # core_2160.py and declare only version specific changes/overrides. The # loading logic here below should not require changes. # # This allows the implementation below to import just one final module # (as opposed to loading multiple implementations in a specific order) # to provide new/overriding implementations. api_specific_file = get_api_specific_file(API_VER) overrides = __import__("dlt.core.{}".format(api_specific_file[:-3]), globals(), locals(), ["*"]) locals().update(overrides.__dict__) python-dlt-2.18.10.0/dlt/core/core_21810.py000066400000000000000000000135031464055136400177050ustar00rootroot00000000000000# Copyright (C) 2022. BMW CTW PT. All rights reserved. """v2.18.8 specific class definitions""" import ctypes import logging from dlt.core.core_base import dltlib # DltClientMode from dlt_client.h DLT_CLIENT_MODE_UNDEFINED = -1 DLT_CLIENT_MODE_TCP = 0 DLT_CLIENT_MODE_SERIAL = 1 DLT_CLIENT_MODE_UNIX = 2 DLT_CLIENT_MODE_UDP_MULTICAST = 3 # DltReceiverType from dlt_common.h DLT_RECEIVE_SOCKET = 0 DLT_RECEIVE_UDP_SOCKET = 1 DLT_RECEIVE_FD = 2 DLT_ID_SIZE = 4 DLT_FILTER_MAX = 30 # Maximum number of filters DLT_RETURN_ERROR = -1 # Return value for DLTFilter.add() - exceeded maximum number of filters MAX_FILTER_REACHED = 1 # Return value for DLTFilter.add() - specified filter already exists REPEATED_FILTER = 2 logger = logging.getLogger(__name__) # pylint: disable=invalid-name class sockaddr_in(ctypes.Structure): # pylint: disable=invalid-name """Auxiliary definition for cDltReceiver. Defined in netinet/in.h header""" _fields_ = [ ("sa_family", ctypes.c_ushort), # sin_family ("sin_port", ctypes.c_ushort), ("sin_addr", ctypes.c_byte * 4), ("__pad", ctypes.c_byte * 8), ] # struct sockaddr_in is 16 class cDltReceiver(ctypes.Structure): # pylint: disable=invalid-name """The structure is used to organise the receiving of data including buffer handling. This structure is used by the corresponding functions. typedef struct { int32_t lastBytesRcvd; /**< bytes received in last receive call */ int32_t bytesRcvd; /**< received bytes */ int32_t totalBytesRcvd; /**< total number of received bytes */ char *buffer; /**< pointer to receiver buffer */ char *buf; /**< pointer to position within receiver buffer */ char *backup_buf; /** pointer to the buffer with partial messages if any **/ int fd; /**< connection handle */ DltReceiverType type; /**< type of connection handle */ int32_t buffersize; /**< size of receiver buffer */ struct sockaddr_in addr; /**< socket address information */ } DltReceiver; """ _fields_ = [ ("lastBytesRcvd", ctypes.c_int32), ("bytesRcvd", ctypes.c_int32), ("totalBytesRcvd", ctypes.c_int32), ("buffer", ctypes.POINTER(ctypes.c_char)), ("buf", ctypes.POINTER(ctypes.c_char)), ("backup_buf", ctypes.POINTER(ctypes.c_char)), ("fd", ctypes.c_int), ("type", ctypes.c_int), ("buffersize", ctypes.c_int32), ("addr", sockaddr_in), ] class cDltClient(ctypes.Structure): # pylint: disable=invalid-name """ typedef struct { DltReceiver receiver; /**< receiver pointer to dlt receiver structure */ int sock; /**< sock Connection handle/socket */ char *servIP; /**< servIP IP adress/Hostname of TCP/IP interface */ char *hostip; /**< IP multicast address of group */ int port; /**< Port for TCP connections (optional) */ char *serialDevice; /**< serialDevice Devicename of serial device */ char *socketPath; /**< socketPath Unix socket path */ char ecuid[4]; /**< ECUiD */ speed_t baudrate; /**< baudrate Baudrate of serial interface, as speed_t */ DltClientMode mode; /**< mode DltClientMode */ int send_serial_header; /**< (Boolean) Send DLT messages with serial header */ int resync_serial_header; /**< (Boolean) Resync to serial header on all connection */ } DltClient; """ _fields_ = [ ("receiver", cDltReceiver), ("sock", ctypes.c_int), ("servIP", ctypes.c_char_p), ("hostip", ctypes.c_char_p), ("port", ctypes.c_int), ("serialDevice", ctypes.c_char_p), ("socketPath", ctypes.c_char_p), ("ecuid", ctypes.c_char * 4), ("baudrate", ctypes.c_uint), ("mode", ctypes.c_int), ("send_serial_header", ctypes.c_int), ("resync_serial_header", ctypes.c_int), ] class cDLTFilter(ctypes.Structure): # pylint: disable=invalid-name """ typedef struct { char apid[DLT_FILTER_MAX][DLT_ID_SIZE]; /**< application id */ char ctid[DLT_FILTER_MAX][DLT_ID_SIZE]; /**< context id */ int log_level[DLT_FILTER_MAX]; /**< log level */ int32_t payload_max[DLT_FILTER_MAX]; /**< upper border for payload */ int32_t payload_min[DLT_FILTER_MAX]; /**< lower border for payload */ int counter; /**< number of filters */ } DltFilter; """ _fields_ = [ ("apid", (ctypes.c_char * DLT_ID_SIZE) * DLT_FILTER_MAX), ("ctid", (ctypes.c_char * DLT_ID_SIZE) * DLT_FILTER_MAX), ("log_level", ctypes.c_int * DLT_FILTER_MAX), ("payload_max", (ctypes.c_int32 * DLT_FILTER_MAX)), ("payload_min", (ctypes.c_int32 * DLT_FILTER_MAX)), ("counter", ctypes.c_int), ] # pylint: disable=too-many-arguments def add(self, apid, ctid, log_level=0, payload_min=0, payload_max=ctypes.c_uint32(-1).value // 2): """Add new filter pair""" if isinstance(apid, str): apid = bytes(apid, "ascii") if isinstance(ctid, str): ctid = bytes(ctid, "ascii") if ( dltlib.dlt_filter_add( ctypes.byref(self), apid or b"", ctid or b"", log_level, payload_min, payload_max, self.verbose ) == DLT_RETURN_ERROR ): if self.counter >= DLT_FILTER_MAX: logger.error("Maximum number (%d) of allowed filters reached, ignoring filter!\n", DLT_FILTER_MAX) return MAX_FILTER_REACHED logger.debug("Filter ('%s', '%s') already exists", apid, ctid) return REPEATED_FILTER return 0 python-dlt-2.18.10.0/dlt/core/core_2185.py000066400000000000000000000066301464055136400176340ustar00rootroot00000000000000# Copyright (C) 2019. BMW Car IT GmbH. All rights reserved. """v2.18.5 specific class definitions""" import ctypes # DltClientMode from dlt_client.h DLT_CLIENT_MODE_UNDEFINED = -1 DLT_CLIENT_MODE_TCP = 0 DLT_CLIENT_MODE_SERIAL = 1 DLT_CLIENT_MODE_UNIX = 2 DLT_CLIENT_MODE_UDP_MULTICAST = 3 # DltReceiverType from dlt_common.h # DltReceiverType is an enum type. These definitions could not be found in shared library (libdlt.so) so # the enum values are defined here. DLT_RECEIVE_SOCKET = 0 DLT_RECEIVE_UDP_SOCKET = 1 DLT_RECEIVE_FD = 2 class sockaddr_in(ctypes.Structure): # pylint: disable=invalid-name """Auxiliary definition for cDltReceiver. Defined in netinet/in.h header""" _fields_ = [ ("sa_family", ctypes.c_ushort), # sin_family ("sin_port", ctypes.c_ushort), ("sin_addr", ctypes.c_byte * 4), ("__pad", ctypes.c_byte * 8), ] # struct sockaddr_in is 16 class cDltReceiver(ctypes.Structure): # pylint: disable=invalid-name """The structure is used to organise the receiving of data including buffer handling. This structure is used by the corresponding functions. typedef struct { int32_t lastBytesRcvd; /**< bytes received in last receive call */ int32_t bytesRcvd; /**< received bytes */ int32_t totalBytesRcvd; /**< total number of received bytes */ char *buffer; /**< pointer to receiver buffer */ char *buf; /**< pointer to position within receiver buffer */ char *backup_buf; /** pointer to the buffer with partial messages if any **/ int fd; /**< connection handle */ int32_t buffersize; /**< size of receiver buffer */ struct sockaddr_in addr; /**< socket address information */ } DltReceiver; """ _fields_ = [ ("lastBytesRcvd", ctypes.c_int32), ("bytesRcvd", ctypes.c_int32), ("totalBytesRcvd", ctypes.c_int32), ("buffer", ctypes.POINTER(ctypes.c_char)), ("buf", ctypes.POINTER(ctypes.c_char)), ("backup_buf", ctypes.POINTER(ctypes.c_char)), ("fd", ctypes.c_int), ("buffersize", ctypes.c_int32), ("addr", sockaddr_in), ] class cDltClient(ctypes.Structure): # pylint: disable=invalid-name """ typedef struct { DltReceiver receiver; /**< receiver pointer to dlt receiver structure */ int sock; /**< sock Connection handle/socket */ char *servIP; /**< servIP IP adress/Hostname of TCP/IP interface */ char *hostip; /**< IP multicast address of group */ int port; /**< Port for TCP connections (optional) */ char *serialDevice; /**< serialDevice Devicename of serial device */ char *socketPath; /**< socketPath Unix socket path */ char ecuid[4]; /**< ECUiD */ speed_t baudrate; /**< baudrate Baudrate of serial interface, as speed_t */ DltClientMode mode; /**< mode DltClientMode */ } DltClient; """ _fields_ = [ ("receiver", cDltReceiver), ("sock", ctypes.c_int), ("servIP", ctypes.c_char_p), ("hostip", ctypes.c_char_p), ("port", ctypes.c_int), ("serialDevice", ctypes.c_char_p), ("socketPath", ctypes.c_char_p), ("ecuid", ctypes.c_char * 4), ("baudrate", ctypes.c_uint), ("mode", ctypes.c_int), ] python-dlt-2.18.10.0/dlt/core/core_2188.py000066400000000000000000000135031464055136400176340ustar00rootroot00000000000000# Copyright (C) 2022. BMW CTW PT. All rights reserved. """v2.18.8 specific class definitions""" import ctypes import logging from dlt.core.core_base import dltlib # DltClientMode from dlt_client.h DLT_CLIENT_MODE_UNDEFINED = -1 DLT_CLIENT_MODE_TCP = 0 DLT_CLIENT_MODE_SERIAL = 1 DLT_CLIENT_MODE_UNIX = 2 DLT_CLIENT_MODE_UDP_MULTICAST = 3 # DltReceiverType from dlt_common.h DLT_RECEIVE_SOCKET = 0 DLT_RECEIVE_UDP_SOCKET = 1 DLT_RECEIVE_FD = 2 DLT_ID_SIZE = 4 DLT_FILTER_MAX = 30 # Maximum number of filters DLT_RETURN_ERROR = -1 # Return value for DLTFilter.add() - exceeded maximum number of filters MAX_FILTER_REACHED = 1 # Return value for DLTFilter.add() - specified filter already exists REPEATED_FILTER = 2 logger = logging.getLogger(__name__) # pylint: disable=invalid-name class sockaddr_in(ctypes.Structure): # pylint: disable=invalid-name """Auxiliary definition for cDltReceiver. Defined in netinet/in.h header""" _fields_ = [ ("sa_family", ctypes.c_ushort), # sin_family ("sin_port", ctypes.c_ushort), ("sin_addr", ctypes.c_byte * 4), ("__pad", ctypes.c_byte * 8), ] # struct sockaddr_in is 16 class cDltReceiver(ctypes.Structure): # pylint: disable=invalid-name """The structure is used to organise the receiving of data including buffer handling. This structure is used by the corresponding functions. typedef struct { int32_t lastBytesRcvd; /**< bytes received in last receive call */ int32_t bytesRcvd; /**< received bytes */ int32_t totalBytesRcvd; /**< total number of received bytes */ char *buffer; /**< pointer to receiver buffer */ char *buf; /**< pointer to position within receiver buffer */ char *backup_buf; /** pointer to the buffer with partial messages if any **/ int fd; /**< connection handle */ DltReceiverType type; /**< type of connection handle */ int32_t buffersize; /**< size of receiver buffer */ struct sockaddr_in addr; /**< socket address information */ } DltReceiver; """ _fields_ = [ ("lastBytesRcvd", ctypes.c_int32), ("bytesRcvd", ctypes.c_int32), ("totalBytesRcvd", ctypes.c_int32), ("buffer", ctypes.POINTER(ctypes.c_char)), ("buf", ctypes.POINTER(ctypes.c_char)), ("backup_buf", ctypes.POINTER(ctypes.c_char)), ("fd", ctypes.c_int), ("type", ctypes.c_int), ("buffersize", ctypes.c_int32), ("addr", sockaddr_in), ] class cDltClient(ctypes.Structure): # pylint: disable=invalid-name """ typedef struct { DltReceiver receiver; /**< receiver pointer to dlt receiver structure */ int sock; /**< sock Connection handle/socket */ char *servIP; /**< servIP IP adress/Hostname of TCP/IP interface */ char *hostip; /**< IP multicast address of group */ int port; /**< Port for TCP connections (optional) */ char *serialDevice; /**< serialDevice Devicename of serial device */ char *socketPath; /**< socketPath Unix socket path */ char ecuid[4]; /**< ECUiD */ speed_t baudrate; /**< baudrate Baudrate of serial interface, as speed_t */ DltClientMode mode; /**< mode DltClientMode */ int send_serial_header; /**< (Boolean) Send DLT messages with serial header */ int resync_serial_header; /**< (Boolean) Resync to serial header on all connection */ } DltClient; """ _fields_ = [ ("receiver", cDltReceiver), ("sock", ctypes.c_int), ("servIP", ctypes.c_char_p), ("hostip", ctypes.c_char_p), ("port", ctypes.c_int), ("serialDevice", ctypes.c_char_p), ("socketPath", ctypes.c_char_p), ("ecuid", ctypes.c_char * 4), ("baudrate", ctypes.c_uint), ("mode", ctypes.c_int), ("send_serial_header", ctypes.c_int), ("resync_serial_header", ctypes.c_int), ] class cDLTFilter(ctypes.Structure): # pylint: disable=invalid-name """ typedef struct { char apid[DLT_FILTER_MAX][DLT_ID_SIZE]; /**< application id */ char ctid[DLT_FILTER_MAX][DLT_ID_SIZE]; /**< context id */ int log_level[DLT_FILTER_MAX]; /**< log level */ int32_t payload_max[DLT_FILTER_MAX]; /**< upper border for payload */ int32_t payload_min[DLT_FILTER_MAX]; /**< lower border for payload */ int counter; /**< number of filters */ } DltFilter; """ _fields_ = [ ("apid", (ctypes.c_char * DLT_ID_SIZE) * DLT_FILTER_MAX), ("ctid", (ctypes.c_char * DLT_ID_SIZE) * DLT_FILTER_MAX), ("log_level", ctypes.c_int * DLT_FILTER_MAX), ("payload_max", (ctypes.c_int32 * DLT_FILTER_MAX)), ("payload_min", (ctypes.c_int32 * DLT_FILTER_MAX)), ("counter", ctypes.c_int), ] # pylint: disable=too-many-arguments def add(self, apid, ctid, log_level=0, payload_min=0, payload_max=ctypes.c_uint32(-1).value // 2): """Add new filter pair""" if isinstance(apid, str): apid = bytes(apid, "ascii") if isinstance(ctid, str): ctid = bytes(ctid, "ascii") if ( dltlib.dlt_filter_add( ctypes.byref(self), apid or b"", ctid or b"", log_level, payload_min, payload_max, self.verbose ) == DLT_RETURN_ERROR ): if self.counter >= DLT_FILTER_MAX: logger.error("Maximum number (%d) of allowed filters reached, ignoring filter!\n", DLT_FILTER_MAX) return MAX_FILTER_REACHED logger.debug("Filter ('%s', '%s') already exists", apid, ctid) return REPEATED_FILTER return 0 python-dlt-2.18.10.0/dlt/core/core_base.py000066400000000000000000000525571464055136400201600ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. """Default implementation of the ctypes bindings for the DLT library""" import ctypes import logging import sys if sys.platform.startswith("darwin"): dltlib = ctypes.cdll.LoadLibrary("libdlt.dylib") elif sys.platform.startswith("linux"): dltlib = ctypes.cdll.LoadLibrary("libdlt.so.2") else: raise RuntimeError("Platform %s not supported" % sys.platform) logger = logging.getLogger(__name__) # pylint: disable=invalid-name # Return value for DLTFilter.add() - exceeded maximum number of filters MAX_FILTER_REACHED = 1 # Return value for DLTFilter.add() - specified filter already exists REPEATED_FILTER = 2 DLT_ID_SIZE = 4 DLT_FILTER_MAX = 30 # Maximum number of filters DLT_HTYP_UEH = 0x01 # use extended header DLT_HTYP_WEID = 0x04 # with ECU ID DLT_HTYP_WSID = 0x08 # with Session ID DLT_HTYP_WTMS = 0x10 # with timestamp DLT_MESSAGE_ERROR_OK = 0 DLT_DAEMON_TEXTSIZE = 10024 DLT_OUTPUT_HEX = 1 DLT_OUTPUT_ASCII = 2 DLT_OUTPUT_MIXED_FOR_PLAIN = 3 DLT_OUTPUT_MIXED_FOR_HTML = 4 DLT_OUTPUT_ASCII_LIMITED = 5 DLT_RETURN_ERROR = -1 DLT_RETURN_OK = 0 DLT_RETURN_TRUE = 1 # DltClientMode from dlt_client.h DLT_CLIENT_MODE_UNDEFINED = -1 DLT_CLIENT_MODE_TCP = 0 DLT_CLIENT_MODE_SERIAL = 1 DLT_CLIENT_MODE_UNIX = 2 DLT_CLIENT_MODE_UDP_MULTICAST = 3 # DltReceiverType from dlt_common.h # DltReceiverType is an enum type. These definitions could not be found in shared library (libdlt.so) so # the enum values are defined here. DLT_RECEIVE_SOCKET = 0 DLT_RECEIVE_FD = 1 DLT_TYPE_LOG = 0x00 # Log message type DLT_TYPE_APP_TRACE = 0x01 # Application trace message type DLT_TYPE_NW_TRACE = 0x02 # Network trace message type DLT_TYPE_CONTROL = 0x03 # Control message type DLT_CONTROL_REQUEST = 0x01 DLT_CONTROL_RESPONSE = 0x02 # Response to request message DLT_CONTROL_TIME = 0x03 DLT_MSIN_MSTP_SHIFT = 1 # shift right offset to get mstp value DLT_MSIN_MTIN_SHIFT = 4 # shift right offset to get mtin value DLT_MSIN_MSTP = 0x0E # message type DLT_MSIN_MTIN = 0xF0 # message type info DLT_MSIN_VERB = 0x01 # verbose mode DLT_MSIN_CONTROL_RESPONSE = (DLT_TYPE_CONTROL << DLT_MSIN_MSTP_SHIFT) | (DLT_CONTROL_RESPONSE << DLT_MSIN_MTIN_SHIFT) # dlt_protocol.h DLT_SERVICE_ID_GET_SOFTWARE_VERSION = 0x13 # Service ID: Get software version DLT_SERVICE_ID_UNREGISTER_CONTEXT = 0xF01 # Service ID: Message unregister context DLT_SERVICE_ID_CONNECTION_INFO = 0xF02 # Service ID: Message connection info DLT_SERVICE_ID_TIMEZONE = 0xF03 # Service ID: Timezone DLT_SERVICE_ID_MARKER = 0xF04 # Service ID: Marker DLT_CONNECTION_STATUS_DISCONNECTED = 0x01 # Client is disconnected DLT_CONNECTION_STATUS_CONNECTED = 0x02 # Client is connected DLT_TYPE_INFO_TYLE = 0x0000000F # Length of standard data: 1 = 8bit, 2 = 16bit, 3 = 32 bit, 4 = 64 bit, 5 = 128 bit DLT_TYPE_INFO_BOOL = 0x00000010 # Boolean data DLT_TYPE_INFO_SINT = 0x00000020 # Signed integer data DLT_TYPE_INFO_UINT = 0x00000040 # Unsigned integer data DLT_TYPE_INFO_FLOA = 0x00000080 # Float data DLT_TYPE_INFO_ARAY = 0x00000100 # Array of standard types DLT_TYPE_INFO_STRG = 0x00000200 # String DLT_TYPE_INFO_RAWD = 0x00000400 # Raw data DLT_TYPE_INFO_VARI = 0x00000800 # Set, if additional information to a variable is available DLT_TYPE_INFO_FIXP = 0x00001000 # Set, if quantization and offset are added DLT_TYPE_INFO_TRAI = 0x00002000 # Set, if additional trace information is added DLT_TYPE_INFO_STRU = 0x00004000 # Struct DLT_TYPE_INFO_SCOD = 0x00038000 # coding of the type string: 0 = ASCII, 1 = UTF-8 DLT_SCOD_ASCII = 0x00000000 DLT_SCOD_UTF8 = 0x00008000 DLT_SCOD_HEX = 0x00010000 DLT_SCOD_BIN = 0x00018000 DLT_TYLE_8BIT = 0x00000001 DLT_TYLE_16BIT = 0x00000002 DLT_TYLE_32BIT = 0x00000003 DLT_TYLE_64BIT = 0x00000004 DLT_TYLE_128BIT = 0x00000005 DLT_DAEMON_TCP_PORT = 3490 DLT_CLIENT_RCVBUFSIZE = 10024 # Size of client receive buffer from dlt_client_cfg.h # dlt-viever/qdltbase.cpp qDltMessageType = [b"log", b"app_trace", b"nw_trace", b"control", b"", b"", b"", b""] qDltLogInfo = [ b"", b"fatal", b"error", b"warn", b"info", b"debug", b"verbose", b"", b"", b"", b"", b"", b"", b"", b"", b"", ] qDltTraceType = [ b"", b"variable", b"func_in", b"func_out", b"state", b"vfb", b"", b"", b"", b"", b"", b"", b"", b"", b"", b"", ] qDltNwTraceType = [b"", b"ipc", b"can", b"flexray", b"most", b"vfb", b"", b"", b"", b"", b"", b"", b"", b"", b"", b""] qDltControlType = [b"", b"request", b"response", b"time", b"", b"", b"", b"", b"", b"", b"", b"", b"", b"", b"", b""] cqDltMode = [b"non-verbose", b"verbose"] qDltEndianness = [b"little-endian", b"big-endian"] cqDltTypeInfo = [ b"String", b"Bool", b"SignedInteger", b"UnsignedInteger", b"Float", b"RawData", b"TraceInfo", b"Utf8String", ] qDltCtrlServiceId = [ b"", b"set_log_level", b"set_trace_status", b"get_log_info", b"get_default_log_level", b"store_config", b"reset_to_factory_default", b"set_com_interface_status", b"set_com_interface_max_bandwidth", b"set_verbose_mode", b"set_message_filtering", b"set_timing_packets", b"get_local_time", b"use_ecu_id", b"use_session_id", b"use_timestamp", b"use_extended_header", b"set_default_log_level", b"set_default_trace_status", b"get_software_version", b"message_buffer_overflow", ] qDltCtrlReturnType = [b"ok", b"not_supported", b"error", b"3", b"4", b"5", b"6", b"7", b"no_matching_context_id"] class cDltServiceConnectionInfo(ctypes.Structure): """ typedef struct { uint32_t service_id; /**< service ID */ uint8_t status; /**< reponse status */ uint8_t state; /**< new state */ char comid[DLT_ID_SIZE]; /**< communication interface */ } PACKED DltServiceConnectionInfo; """ _fields_ = [ ("service_id", ctypes.c_uint32), ("status", ctypes.c_uint8), ("state", ctypes.c_uint8), ("comid", DLT_ID_SIZE * ctypes.c_byte), ] _pack_ = 1 class MessageMode(object): """Default properties for the DLTMessage""" # pylint: disable=no-member @property def use_extended_header(self): """Returns True if the DLTMessage has extended header""" return self.standardheader.htyp & DLT_HTYP_UEH @property def _is_extended_header_exists(self): return self.extendedheader and self.extendedheader.msin @property def is_mode_verbose(self): """Returns True if the DLTMessage is set to verbose mode""" if not self._is_extended_header_exists: return self.verbose return self.extendedheader.msin & DLT_MSIN_VERB @property def mode_string(self): """Returns 'verbose' if DLTMessage is set to verbose mode. Otherwise 'non-verbose'""" return b"verbose" if self.is_mode_verbose else b"non-verbose" @property def is_mode_non_verbose(self): """Returns True if the DLTMessage is set to non-verbose mode""" return not self.is_mode_verbose @property def is_type_control(self): """Returns True if the DLTMessage type is control""" return self.standardheader.htyp & DLT_TYPE_CONTROL @property def is_type_control_response(self): """Returns True if the DLTMessage type is control response""" return self.standardheader.htyp & DLT_MSIN_CONTROL_RESPONSE @property def message_id(self): """Returns message ID of the DLTMessage""" if self.is_mode_non_verbose and (self.datasize >= 4): ptr_int = ctypes.cast(self.databuffer, ctypes.POINTER(ctypes.c_uint32)) mid = ptr_int[0] return mid return 0 @property def message_id_string(self): """Returns string representation of message ID""" mid = self.message_id return qDltCtrlServiceId[mid] if 0 <= mid <= len(qDltCtrlServiceId) else b"" @property def ctrl_service_id(self): """Returns service ID of the DLTMessage""" service_id = 0 if self.is_type_control and self.datasize >= 4: ptr_int = ctypes.cast(self.databuffer, ctypes.POINTER(ctypes.c_uint32)) service_id = ptr_int[0] return service_id @property def ctrl_service_id_string(self): """Returns string representation of service ID""" sid = self.ctrl_service_id if sid == DLT_SERVICE_ID_UNREGISTER_CONTEXT: return b"unregister_context" if sid == DLT_SERVICE_ID_CONNECTION_INFO: return b"connection_info" if sid == DLT_SERVICE_ID_TIMEZONE: return b"timezone" if sid == DLT_SERVICE_ID_MARKER: return b"marker" return qDltCtrlServiceId[sid] if sid <= 20 else b"" @property def ctrl_return_type(self): """Returns ctrl type of the DLTMessage""" return_type = 0 if self.is_type_control and (self.is_type_control_response and self.datasize >= 6): return_type = self.databuffer[4] return return_type @property def ctrl_return_type_string(self): """Returns string representation of ctrl type""" return qDltCtrlReturnType[self.ctrl_return_type] if self.ctrl_return_type <= 8 else b"" @property def type(self): """Returns message type of the DLTMessage""" if not self._is_extended_header_exists: return DLT_TYPE_LOG return (self.extendedheader.msin & DLT_MSIN_MSTP) >> DLT_MSIN_MSTP_SHIFT @property def type_string(self): """Returns string representation of the message type""" mtype = self.type return qDltMessageType[mtype] if 0 <= mtype <= 7 else b"" @property def subtype(self): """Returns message subtype of the DLTMessage""" if not self._is_extended_header_exists: return DLT_TYPE_LOG return (self.extendedheader.msin & DLT_MSIN_MTIN) >> DLT_MSIN_MTIN_SHIFT @property def subtype_string(self): """Returns string representation of the message subtype""" mtype = self.type msubtype = self.subtype if mtype == DLT_TYPE_LOG: return qDltLogInfo[msubtype] if 0 <= msubtype <= 7 else b"" if mtype == DLT_TYPE_APP_TRACE: return qDltTraceType[msubtype] if 0 <= msubtype <= 7 else b"" if mtype == DLT_TYPE_NW_TRACE: return qDltNwTraceType[msubtype] if 0 <= msubtype <= 7 else b"" if mtype == DLT_TYPE_CONTROL: return qDltControlType[msubtype] if 0 <= msubtype <= 7 else b"" return b"" @property def payload_decoded(self): """Decode the payload data :returns: Payload data :rtype: str """ text = b"" if self.is_mode_non_verbose and not self.is_type_control and self.noar == 0: buf = ctypes.create_string_buffer(b"\000" * DLT_DAEMON_TEXTSIZE) dltlib.dlt_message_payload(ctypes.byref(self), buf, DLT_DAEMON_TEXTSIZE, DLT_OUTPUT_ASCII, self.verbose) return b"[%s] #%s#" % (self.message_id_string, buf.value[4:].rstrip(b"\000")) if self.type == DLT_TYPE_CONTROL and self.subtype == DLT_CONTROL_RESPONSE: if self.ctrl_service_id == DLT_SERVICE_ID_MARKER: return b"MARKER" text = b"[%s %s] " % (self.ctrl_service_id_string, self.ctrl_return_type_string) service_id = self.ctrl_service_id if self.ctrl_service_id == DLT_SERVICE_ID_GET_SOFTWARE_VERSION: text += ctypes.string_at(self.databuffer, self.datasize)[9:].rstrip(b"\000") elif self.ctrl_service_id == DLT_SERVICE_ID_CONNECTION_INFO: if self.datasize == ctypes.sizeof(cDltServiceConnectionInfo): conn_info = cDltServiceConnectionInfo.from_buffer(bytearray(self.databuffer[: self.datasize])) if conn_info.state == DLT_CONNECTION_STATUS_DISCONNECTED: text += b"disconnected" elif conn_info.state == DLT_CONNECTION_STATUS_CONNECTED: text += b"connected" else: text += b"unknown" text += b" " + ctypes.string_at(conn_info.comid, DLT_ID_SIZE).rstrip(b"\000") else: text += ctypes.string_at(self.databuffer, self.datasize)[5 : 256 + 5].rstrip(b"\000") elif service_id == DLT_SERVICE_ID_TIMEZONE: text += ctypes.string_at(self.databuffer, self.datasize)[5 : 256 + 5].rstrip(b"\000") else: buf = ctypes.create_string_buffer(b"\000" * DLT_DAEMON_TEXTSIZE) dltlib.dlt_message_payload( ctypes.byref(self), buf, DLT_DAEMON_TEXTSIZE, DLT_OUTPUT_ASCII, self.verbose ) text += buf.value.rstrip(b"\000") return text if self.type == DLT_TYPE_CONTROL: return b"[%s] %s" % ( self.ctrl_service_id_string, ctypes.string_at(self.databuffer, self.datasize)[4 : 256 + 4].rstrip(b"\000"), ) buf = ctypes.create_string_buffer(b"\000" * DLT_DAEMON_TEXTSIZE) dltlib.dlt_message_payload(ctypes.byref(self), buf, DLT_DAEMON_TEXTSIZE, DLT_OUTPUT_ASCII, self.verbose) return buf.value.rstrip(b"\000").strip() class cDltStorageHeader(ctypes.Structure): """ /** * The structure of the DLT file storage header. This header is used before each stored DLT message. */ typedef struct { char pattern[DLT_ID_SIZE]; /**< This pattern should be DLT0x01 */ uint32_t seconds; /**< seconds since 1.1.1970 */ int32_t microseconds; /**< Microseconds */ char ecu[DLT_ID_SIZE]; /**< The ECU id is added, if it is not already in the DLT message itself */ } PACKED DltStorageHeader; """ _fields_ = [ ("pattern", ctypes.c_char * DLT_ID_SIZE), ("seconds", ctypes.c_uint32), ("microseconds", ctypes.c_int32), ("ecu", ctypes.c_char * DLT_ID_SIZE), ] _pack_ = 1 def __reduce__(self): return (cDltStorageHeader, (self.pattern, self.seconds, self.microseconds, self.ecu)) class cDltStandardHeader(ctypes.BigEndianStructure): """The structure of the DLT standard header. This header is used in each DLT message. typedef struct { uint8_t htyp; /**< This parameter contains several informations, see definitions below */ uint8_t mcnt; /**< The message counter is increased with each sent DLT message */ uint16_t len; /**< Length of the complete message, without storage header */ } PACKED DltStandardHeader; """ _fields_ = [("htyp", ctypes.c_uint8), ("mcnt", ctypes.c_uint8), ("len", ctypes.c_ushort)] _pack_ = 1 def __reduce__(self): return (cDltStandardHeader, (self.htyp, self.mcnt, self.len)) class cDltStandardHeaderExtra(ctypes.Structure): """The structure of the DLT extra header parameters. Each parameter is sent only if enabled in htyp. typedef struct { char ecu[DLT_ID_SIZE]; /**< ECU id */ uint32_t seid; /**< Session number */ uint32_t tmsp; /**< Timestamp since system start in 0.1 milliseconds */ } PACKED DltStandardHeaderExtra; """ _fields_ = [("ecu", ctypes.c_char * DLT_ID_SIZE), ("seid", ctypes.c_uint32), ("tmsp", ctypes.c_uint32)] _pack_ = 1 def __reduce__(self): return (cDltStandardHeaderExtra, (self.ecu, self.seid, self.tmsp)) class cDltExtendedHeader(ctypes.Structure): """The structure of the DLT extended header. This header is only sent if enabled in htyp parameter. typedef struct { uint8_t msin; /**< messsage info */ uint8_t noar; /**< number of arguments */ char apid[DLT_ID_SIZE]; /**< application id */ char ctid[DLT_ID_SIZE]; /**< context id */ } PACKED DltExtendedHeader; """ _fields_ = [ ("msin", ctypes.c_uint8), ("noar", ctypes.c_uint8), ("apid", ctypes.c_char * DLT_ID_SIZE), ("ctid", ctypes.c_char * DLT_ID_SIZE), ] _pack_ = 1 def __reduce__(self): return (cDltExtendedHeader, (self.msin, self.noar, self.apid, self.ctid)) class cDLTMessage(ctypes.Structure): """The structure of the DLT messages. typedef struct sDltMessage { /* flags */ int8_t found_serialheader; /* offsets */ int32_t resync_offset; /* size parameters */ int32_t headersize; /**< size of complete header including storage header */ int32_t datasize; /**< size of complete payload */ /* buffer for current loaded message */ uint8_t headerbuffer[sizeof(DltStorageHeader)+ sizeof(DltStandardHeader)+sizeof(DltStandardHeaderExtra)+sizeof(DltExtendedHeader)]; /**< buffer for loading complete header */ uint8_t *databuffer; /**< buffer for loading payload */ int32_t databuffersize; /* header values of current loaded message */ DltStorageHeader *storageheader; /**< pointer to storage header of current loaded header */ DltStandardHeader *standardheader; /**< pointer to standard header of current loaded header */ DltStandardHeaderExtra headerextra; /**< extra parameters of current loaded header */ DltExtendedHeader *extendedheader; /**< pointer to extended of current loaded header */ } DltMessage; """ _fields_ = [ ("found_serialheader", ctypes.c_int8), ("resync_offset", ctypes.c_int32), ("headersize", ctypes.c_int32), ("datasize", ctypes.c_int32), ( "headerbuffer", ctypes.c_uint8 * ( ctypes.sizeof(cDltStorageHeader) + ctypes.sizeof(cDltStandardHeader) + ctypes.sizeof(cDltStandardHeaderExtra) + ctypes.sizeof(cDltExtendedHeader) ), ), ("databuffer", ctypes.POINTER(ctypes.c_uint8)), ("databuffersize", ctypes.c_uint32), ("p_storageheader", ctypes.POINTER(cDltStorageHeader)), ("p_standardheader", ctypes.POINTER(cDltStandardHeader)), ("headerextra", cDltStandardHeaderExtra), ("p_extendedheader", ctypes.POINTER(cDltExtendedHeader)), ] class cDltReceiver(ctypes.Structure): """The structure is used to organise the receiving of data including buffer handling. This structure is used by the corresponding functions. typedef struct { int32_t lastBytesRcvd; /**< bytes received in last receive call */ int32_t bytesRcvd; /**< received bytes */ int32_t totalBytesRcvd; /**< total number of received bytes */ char *buffer; /**< pointer to receiver buffer */ char *buf; /**< pointer to position within receiver buffer */ int fd; /**< connection handle */ int32_t buffersize; /**< size of receiver buffer */ } DltReceiver; """ _fields_ = [ ("lastBytesRcvd", ctypes.c_int32), ("bytesRcvd", ctypes.c_int32), ("totalBytesRcvd", ctypes.c_int32), ("buffer", ctypes.POINTER(ctypes.c_char)), ("buf", ctypes.POINTER(ctypes.c_char)), ("fd", ctypes.c_int), ("buffersize", ctypes.c_int32), ] class cDltClient(ctypes.Structure): """ typedef struct { DltReceiver receiver; /**< receiver pointer to dlt receiver structure */ int sock; /**< sock Connection handle/socket */ char *servIP; /**< servIP IP adress/Hostname of TCP/IP interface */ char *serialDevice; /**< serialDevice Devicename of serial device */ speed_t baudrate; /**< baudrate Baudrate of serial interface, as speed_t */ int serial_mode; /**< serial_mode Serial mode enabled =1, disabled =0 */ } DltClient; """ _fields_ = [ ("receiver", cDltReceiver), ("sock", ctypes.c_int), ("servIP", ctypes.c_char_p), ("serialDevice", ctypes.c_char_p), ("baudrate", ctypes.c_int), ("serial_mode", ctypes.c_int), ] class cDLTFilter(ctypes.Structure): # pylint: disable=invalid-name """ typedef struct { char apid[DLT_FILTER_MAX][DLT_ID_SIZE]; /**< application id */ char ctid[DLT_FILTER_MAX][DLT_ID_SIZE]; /**< context id */ int counter; /**< number of filters */ } DltFilter; """ _fields_ = [ ("apid", (ctypes.c_char * DLT_ID_SIZE) * DLT_FILTER_MAX), ("ctid", (ctypes.c_char * DLT_ID_SIZE) * DLT_FILTER_MAX), ("counter", ctypes.c_int), ] # pylint: disable=too-many-arguments def add(self, apid, ctid): """Add new filter pair""" if isinstance(apid, str): apid = bytes(apid, "ascii") if isinstance(ctid, str): ctid = bytes(ctid, "ascii") if dltlib.dlt_filter_add(ctypes.byref(self), apid or b"", ctid or b"", self.verbose) == DLT_RETURN_ERROR: if self.counter >= DLT_FILTER_MAX: logger.error("Maximum number (%d) of allowed filters reached, ignoring filter!\n", DLT_FILTER_MAX) return MAX_FILTER_REACHED logger.debug("Filter ('%s', '%s') already exists", apid, ctid) return REPEATED_FILTER return 0 python-dlt-2.18.10.0/dlt/dlt.py000066400000000000000000001450551464055136400160650ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. """Pure Python implementation of DLT library""" import ctypes import ipaddress as ip import logging import os import re import socket import struct import time import threading import multiprocessing from dlt.core import ( cDLTFilter, dltlib, DLT_CLIENT_MODE_UDP_MULTICAST, DLT_ID_SIZE, DLT_HTYP_WEID, DLT_HTYP_WSID, DLT_HTYP_WTMS, DLT_HTYP_UEH, DLT_RETURN_OK, DLT_RETURN_ERROR, DLT_RETURN_TRUE, DLT_MESSAGE_ERROR_OK, cDltExtendedHeader, cDltClient, MessageMode, cDLTMessage, cDltStorageHeader, cDltStandardHeader, DLT_TYPE_INFO_UINT, DLT_TYPE_INFO_SINT, DLT_TYPE_INFO_STRG, DLT_TYPE_INFO_SCOD, DLT_TYPE_INFO_TYLE, DLT_TYPE_INFO_VARI, DLT_TYPE_INFO_RAWD, DLT_SCOD_ASCII, DLT_SCOD_UTF8, DLT_TYLE_8BIT, DLT_TYLE_16BIT, DLT_TYLE_32BIT, DLT_TYLE_64BIT, DLT_TYLE_128BIT, DLT_DAEMON_TCP_PORT, DLT_CLIENT_RCVBUFSIZE, DLT_RECEIVE_SOCKET, ) from dlt.helpers import bytes_to_str MAX_LOG_IN_ROW = 3 logger = logging.getLogger(__name__) # pylint: disable=invalid-name DLT_EMPTY_FILE_ERROR = "DLT TRACE FILE IS EMPTY" cDLT_FILE_NOT_OPEN_ERROR = "Could not open DLT Trace file (libdlt)" # pylint: disable=invalid-name DLT_UDP_MULTICAST_FD_BUFFER_SIZE = int(os.environ.get("PYDLT_UDP_MULTICAST_FD_BUFFER_SIZE", 2 * (2**20))) # 2 Mb DLT_UDP_MULTICAST_BUFFER_SIZE = int(os.environ.get("PYDLT_UDP_MULTICAST_BUFFER_SIZE", 8 * (2**20))) # 8 Mb class cached_property(object): # pylint: disable=invalid-name """ A property that is only computed once per instance and then replaces itself with an ordinary attribute. Deleting the attribute resets the property. Copyright: Marcel Hellkamp Source: https://github.com/bottlepy/bottle/commit/fa7733e075da0d790d809aa3d2f53071897e6f76 Licence: MIT """ # noqa def __init__(self, func): self.__doc__ = getattr(func, "__doc__") self.func = func def __get__(self, obj, cls): if obj is None: return self value = obj.__dict__[self.func.__name__] = self.func(obj) return value class DLTFilter(cDLTFilter): """Structure to store filter parameters. ID are maximal four characters. Unused values are filled with zeros. If every value as filter is valid, the id should be empty by having only zero values. """ verbose = 0 def __init__(self, **kwords): self.verbose = kwords.pop("verbose", 0) if dltlib.dlt_filter_init(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not initialize DLTFilter") super(DLTFilter, self).__init__(**kwords) def __del__(self): if dltlib.dlt_filter_free(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not cleanup DLTFilter") def __repr__(self): """Return the 'official' string representation of an object""" apids = [ctypes.string_at(entry[:DLT_ID_SIZE]) for entry in self.apid] ctids = [ctypes.string_at(entry[:DLT_ID_SIZE]) for entry in self.ctid] return str(list(zip(apids[: self.counter], ctids[: self.counter]))) def __nonzero__(self): """Truth value testing""" return self.counter > 0 __bool__ = __nonzero__ class Payload(object): """Payload object encapsulates the payload decoding and list-like access to arguments""" def __init__(self, message): self._params = None self._noar = message.noar self._buf = ctypes.string_at(message.databuffer, message.datasize) def __getitem__(self, index): """Accessing the payload item as a list""" if index < 0 or index > self._noar: return IndexError() # we have parsed it already - just return the item if self._params is not None: return self._params[index] self._parse_payload() return self._params[index] def _parse_payload(self): # pylint: disable=too-many-branches,too-many-statements """Parse the payload into list of arguments""" self._params = [] offset = 0 for _ in range(self._noar): type_info = struct.unpack_from("I", self._buf, offset)[0] offset += struct.calcsize("I") def get_scod(type_info): """Helper function""" return type_info & DLT_TYPE_INFO_SCOD value = None if type_info & DLT_TYPE_INFO_STRG: if (get_scod(type_info) == DLT_SCOD_ASCII) or (get_scod(type_info) == DLT_SCOD_UTF8): length = struct.unpack_from("H", self._buf, offset)[0] offset += struct.calcsize("H") value = self._buf[offset : offset + length - 1] # strip the string terminating char \x00 offset += length elif type_info & DLT_TYPE_INFO_UINT: if type_info & DLT_TYPE_INFO_VARI: pass tyle = type_info & DLT_TYPE_INFO_TYLE if tyle == DLT_TYLE_8BIT: value = struct.unpack_from("B", self._buf, offset)[0] offset += 1 elif tyle == DLT_TYLE_16BIT: value = struct.unpack_from("H", self._buf, offset)[0] offset += 2 elif tyle == DLT_TYLE_32BIT: value = struct.unpack_from("I", self._buf, offset)[0] offset += 4 elif tyle == DLT_TYLE_64BIT: value = struct.unpack_from("Q", self._buf, offset)[0] offset += 8 elif tyle == DLT_TYLE_128BIT: raise TypeError("reading 128BIT values not supported") elif type_info & DLT_TYPE_INFO_SINT: if type_info & DLT_TYPE_INFO_VARI: pass tyle = type_info & DLT_TYPE_INFO_TYLE if tyle == DLT_TYLE_8BIT: value = struct.unpack_from("b", self._buf, offset)[0] offset += 1 elif tyle == DLT_TYLE_16BIT: value = struct.unpack_from("h", self._buf, offset)[0] offset += 2 elif tyle == DLT_TYLE_32BIT: value = struct.unpack_from("i", self._buf, offset)[0] offset += 4 elif tyle == DLT_TYLE_64BIT: value = struct.unpack_from("q", self._buf, offset)[0] offset += 8 elif tyle == DLT_TYLE_128BIT: raise TypeError("reading 128BIT values not supported") elif type_info & DLT_TYPE_INFO_RAWD: if type_info & DLT_TYPE_INFO_VARI: pass length = struct.unpack_from("H", self._buf, offset)[0] offset += struct.calcsize("H") value = self._buf[offset : offset + length] offset += length else: value = "ERROR" self._params.append(value) def __len__(self): """Return number of parsed parameters""" if self._params is None: self._parse_payload() return len(self._params) class DLTMessage(cDLTMessage, MessageMode): """Python wrapper class for the cDLTMessage structure""" verbose = 0 # object is not initialized if the message is loaded from a file initialized_as_object = False re_pattern_type = type(re.compile(r"type")) def __init__(self, *args, **kwords): self.initialized_as_object = True self.verbose = kwords.pop("verbose", 0) if self.verbose: logger.debug("DLTMessage._init_(%s)", kwords) self.lifecycle = None if dltlib.dlt_message_init(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not initialize DLTMessage") super(DLTMessage, self).__init__(*args, **kwords) def __reduce__(self): """Pickle serialization API This method is called by the pickle module to serialize objects that it cannot automatically serialize. """ # copy the data from the databuffer pointer into an array databuffer = ctypes.ARRAY(ctypes.c_uint8, self.datasize)() ctypes.memmove(databuffer, self.databuffer, self.datasize) init_args = (self.found_serialheader, self.resync_offset, self.headersize, self.datasize) state_dict = { "headerbuffer": bytearray(self.headerbuffer), "databuffer": bytearray(databuffer), "databuffersize": self.databuffersize, "storageheader": self.storageheader, "standardheader": self.standardheader, "headerextra": self.headerextra, "extendedheader": self.extendedheader, } return (DLTMessage, init_args, state_dict) # pylint: disable=attribute-defined-outside-init def __setstate__(self, state): """Pickle deserialization API This method is called by the pickle module to populate a deserialized object's state after it has been created. """ self.databuffersize = state["databuffersize"] self.p_storageheader.contents = state["storageheader"] self.p_standardheader.contents = state["standardheader"] self.headerextra = state["headerextra"] self.p_extendedheader.contents = state["extendedheader"] # - populate databuffer databuffer = ctypes.ARRAY(ctypes.c_uint8, self.datasize)() for index, byte in enumerate(state["databuffer"]): databuffer[index] = byte self.databuffer = databuffer # - populate headerbuffer for index, byte in enumerate(state["headerbuffer"]): self.headerbuffer[index] = byte # - This is required because we are not calling # dlt_message_init() so we do not need to call # dlt_message_free() self.initialized_as_object = False @staticmethod def from_bytes(data): """Create a class instance from a byte string in DLT storage format""" msg = DLTMessage() storageheader, remainder = msg.extract_storageheader(data) buf = ctypes.create_string_buffer(remainder) dltlib.dlt_message_read( ctypes.byref(msg), ctypes.cast(buf, ctypes.POINTER(ctypes.c_uint8)), ctypes.c_uint(len(remainder)), 0, # resync 0, ) # verbose msg.p_storageheader.contents = storageheader msg.initialized_as_object = False return msg def to_bytes(self): """Create DLT storage format bytes from DLTMessage instance""" return ctypes.string_at(self.headerbuffer, self.headersize) + ctypes.string_at(self.databuffer, self.datasize) def __copy__(self): """Create a copy of the message""" return DLTMessage.from_bytes(self.to_bytes()) @staticmethod def extract_storageheader(data): """Split binary message data into storage header and remainder""" header = data[0 : ctypes.sizeof(cDltStorageHeader)] # pylint: disable=no-member return (cDltStorageHeader.from_buffer_copy(header), data[ctypes.sizeof(cDltStorageHeader) :]) @staticmethod def extract_sort_data(data): """Extract timestamp, message length, apid, ctid from a bytestring in DLT storage format (speed optimized)""" htyp_data = ord(chr(data[16])) len_data = data[19:17:-1] len_value = ctypes.cast(len_data, ctypes.POINTER(ctypes.c_ushort)).contents.value + 16 apid = b"" ctid = b"" tmsp_value = 0.0 bytes_offset = 0 # We know where data will be in the message, but ... if not htyp_data & DLT_HTYP_WEID: # if there is no ECU ID and/or Session ID, then it will be earlier bytes_offset -= 4 if not htyp_data & DLT_HTYP_WSID: bytes_offset -= 4 if htyp_data & DLT_HTYP_WTMS: tmsp_base = 31 + bytes_offset # Typical timestamp end offset tmsp_data = data[tmsp_base : tmsp_base - 4 : -1] tmsp_value = ctypes.cast(tmsp_data, ctypes.POINTER(ctypes.c_uint32)).contents.value / 10000.0 if htyp_data & DLT_HTYP_UEH: apid_base = 38 + bytes_offset # Typical APID end offset apid = data[apid_base - 4 : apid_base].rstrip(b"\x00") ctid = data[apid_base : apid_base + 4].rstrip(b"\x00") apid = bytes_to_str(apid) ctid = bytes_to_str(ctid) return tmsp_value, len_value, apid, ctid def __del__(self): if self.initialized_as_object is True: if dltlib.dlt_message_free(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not free DLTMessage") @property def storageheader(self): """Workaround to get rid of need to call .contents""" try: return self.p_storageheader.contents except ValueError: return None @property def standardheader(self): """Workaround to get rid of need to call .contents""" return self.p_standardheader.contents @property def extendedheader(self): """Workaround to get rid of need to call .contents""" try: return self.p_extendedheader.contents except ValueError: return None def __eq__(self, other): """Equal test - not comparing storage header (contains timestamps)""" header1 = ctypes.string_at(self.headerbuffer, self.headersize)[ctypes.sizeof(cDltStorageHeader) :] header2 = ctypes.string_at(other.headerbuffer, other.headersize)[ctypes.sizeof(cDltStorageHeader) :] data1 = ctypes.string_at(self.databuffer, self.datasize) data2 = ctypes.string_at(other.databuffer, other.datasize) return header1 == header2 and data1 == data2 def compare(self, other=None): # pylint: disable=too-many-return-statements,too-many-branches """Compare messages by given attributes :param [DLTMessage|DLTFilter|dict] other: DLTMessage object (or DLTFilter or a dict with selected keys) to compare with. Use DLTFilter object with APID,CTID pairs for the best performance. :returns: True if all attributes match or False if any of the given attributes differs :rtype: bool :raises TypeError: if other is neither DLTMessage nor a dictionary Example: message.compare(other=message2) message.compare(message2) message.compare(other=dict(apid="AP1", ctid="CT1")) message.compare(dict(apid="AP1", ctid="CT1")) message.compare(dict(apid=re.compile(r"^A.*")) # match all messages which apid starting with A message.compare(dict(apid="AP1", ctid="CT1", payload_decoded=re.compile(r".connected.*"))) """ if hasattr(other, "apid") and hasattr(other, "ctid") and hasattr(other, "payload_decoded"): # other is DLTMessage - full compare return self.apid == other.apid and self.ctid == other.ctid and self.__eq__(other) # pylint: disable=protected-access if hasattr(other, "_fields_") and [x[0] for x in other._fields_] == [ "apid", "ctid", "log_level", "payload_max", "payload_min", "counter", ]: # other id DLTFilter return dltlib.dlt_message_filter_check(ctypes.byref(self), ctypes.byref(other), 0) if not isinstance(other, dict): raise TypeError( "other must be instance of dlt.dlt.DLTMessage, dlt.dlt.DLTFilter or a dictionary" " found: {}".format(type(other)) ) other = other.copy() apid = other.get("apid", None) if apid and not isinstance(apid, self.re_pattern_type) and self.apid != apid: return False ctid = other.get("ctid", None) if ctid and not isinstance(ctid, self.re_pattern_type) and self.ctid != ctid: return False for key, val in other.items(): if val is None: continue key = key.rsplit(".", 1)[-1] # In case the obsolete "extendedheader.apid" notation is used msg_val = getattr(self, key, b"") if not msg_val: return False if isinstance(val, self.re_pattern_type): if not val.search(msg_val): return False elif msg_val != val: return False return True def __str__(self): """Construct DLTViewer-like string""" out = [time.asctime(time.gmtime(self.storage_timestamp))] if self.headerextra: out.append(self.headerextra.tmsp / 10000.0) out += [self.standardheader.mcnt, self.storageheader.ecu] if self.extendedheader: out += [self.extendedheader.apid, self.extendedheader.ctid] if self.headerextra: out.append(self.headerextra.seid) out += [self.type_string, self.subtype_string, self.mode_string, self.noar, self.payload_decoded] return " ".join(bytes_to_str(item) for item in out) # convenient access to import DLT message attributes # no need to remember in which header are those attrs defined @cached_property def ecuid(self): # pylint: disable=invalid-overridden-method """Get the ECU ID :returns: ECU ID :rtype: str """ return bytes_to_str(self.storageheader.ecu or self.headerextra.ecu) @cached_property def mcnt(self): # pylint: disable=invalid-overridden-method """Get the message counter index :returns: message index :rtype: int """ return int(self.standardheader.mcnt) @cached_property def seid(self): # pylint: disable=invalid-overridden-method """Get the Session ID if WSID is set in the message type, otherwise 0 :returns: Session ID :rtype: int """ return int(self.headerextra.seid) if (self.standardheader.htyp & DLT_HTYP_WSID) else 0 @cached_property def tmsp(self): # pylint: disable=invalid-overridden-method """Get the timestamp :returns: timestamp :rtype: float [s] """ return (self.headerextra.tmsp / 10000.0) if (self.standardheader.htyp & DLT_HTYP_WTMS) else 0 @cached_property def apid(self): # pylint: disable=invalid-overridden-method """Get the Application ID :returns: Application ID :rtype: str """ return bytes_to_str(self.extendedheader.apid if self.extendedheader else "") @cached_property def ctid(self): # pylint: disable=invalid-overridden-method """Get the Context ID :returns: Context ID :rtype: str """ return bytes_to_str(self.extendedheader.ctid if self.extendedheader else "") @cached_property def noar(self): # pylint: disable=invalid-overridden-method """Get the number of arguments :returns: Context ID :rtype: str """ if self.use_extended_header and self.is_mode_verbose: return self.extendedheader.noar return 0 @cached_property def payload(self): # pylint: disable=invalid-overridden-method """Get the payload object :returns: Payload object :rtype: Payload """ return Payload(self) @cached_property def payload_decoded(self): # pylint: disable=invalid-overridden-method """Get the payload string :returns: Payload string :rtype: str """ return bytes_to_str(super(DLTMessage, self).payload_decoded) @cached_property def storage_timestamp(self): # pylint: disable=invalid-overridden-method """Get the storage header timestamp in seconds :returns: storage header timestamp :rtype: float """ return self.storageheader.seconds + self.storageheader.microseconds * 0.000001 class cDLTFile(ctypes.Structure): # pylint: disable=invalid-name """The structure to organise the access to DLT files. This structure is used by the corresponding functions. typedef struct sDltFile { /* file handle and index for fast access */ FILE *handle; /**< file handle of opened DLT file */ long *index; /**< file positions of all DLT messages for fast access to file, only filtered messages */ /* size parameters */ int32_t counter; /**< number of messages in DLT file with filter */ int32_t counter_total; /**< number of messages in DLT file without filter */ int32_t position; /**< current index to message parsed in DLT file starting at 0 */ uint64_t file_length; /**< length of the file */ uint64_t file_position; /**< current position in the file */ /* error counters */ int32_t error_messages; /**< number of incomplete DLT messages found during file parsing */ /* filter parameters */ DltFilter *filter; /**< pointer to filter list. Zero if no filter is set. */ int32_t filter_counter; /**< number of filter set */ /* current loaded message */ DltMessage msg; /**< pointer to message */ } DltFile; """ _fields_ = [ ("handle", ctypes.POINTER(ctypes.c_int)), ("index", ctypes.POINTER(ctypes.c_long)), ("counter", ctypes.c_int32), ("counter_total", ctypes.c_int32), ("position", ctypes.c_int32), ("file_length", ctypes.c_uint64), ("file_position", ctypes.c_uint64), ("error_messages", ctypes.c_int32), ("filter", ctypes.POINTER(DLTFilter)), ("filter_counter", ctypes.c_int32), ("msg", DLTMessage), ] def __init__(self, **kwords): self.verbose = kwords.pop("verbose", 0) self.filename = kwords.pop("filename", None) if isinstance(self.filename, str): self.filename = bytes(self.filename, "utf-8") super(cDLTFile, self).__init__(**kwords) if dltlib.dlt_file_init(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not initialize DLTFile") self._iter_index = 0 self.corrupt_msg_count = 0 self.indexed = False self.end = False self.live_run = kwords.pop("is_live", False) # Stop event for threading usage in caller self.stop_reading = threading.Event() # Stop event for process usage in caller self.stop_reading_proc = multiprocessing.Event() def __repr__(self): # pylint: disable=bad-continuation return "".format( "filename={}".format(self.filename) if self.filename else "", self.counter_total ) def __del__(self): if dltlib.dlt_file_free(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not cleanup DLTFile") def _find_next_header(self): """Helper function for generate_index to skip over invalid storage headers. :returns: Offset to the next storage header position (after self.file_position), if it was found, or position of EOF if not :rtype: int """ with open(self.filename, "rb") as fobj: last_position = self.file_position # pylint: disable=access-member-before-definition fobj.seek(last_position) buf = fobj.read(1024) while buf: found = buf.find(b"DLT\x01") if found != -1: return last_position + found last_position = fobj.tell() buf = fobj.read(1024) return None # pylint: disable=attribute-defined-outside-init,access-member-before-definition def generate_index(self): """Generate an index for the loaded DLT file :returns: True if file had been previously read and the index is successfully generated, otherwise False :rtype: bool """ if not self.filename: return False self.indexed = False if dltlib.dlt_file_open(ctypes.byref(self), self.filename, self.verbose) >= DLT_RETURN_OK: # load, analyse data file and create index list if self.file_length == 0: raise IOError(DLT_EMPTY_FILE_ERROR) while self.file_position < self.file_length: ret = dltlib.dlt_file_read(ctypes.byref(self), self.verbose) if ret < DLT_RETURN_OK: # - This can happen if either the frame's storage # header could not be read correctly or the frame is # corrupt. If the frame's storage header could not # be read correctly we try to get the next storage # header and continue indexing next_header_position = self._find_next_header() if next_header_position: if self.file_position == next_header_position: # pylint: disable=no-else-break # - This this implies that dltlib.dlt_read_file() # returned due to an error other than invalid storage # header because we already were at the correct # header_position in the last iteration. So, we # need to break out of the read/index loop. break else: self.file_position = next_header_position self.corrupt_msg_count += 1 else: break self.indexed = True else: raise IOError(cDLT_FILE_NOT_OPEN_ERROR) return self.indexed def read(self, filename, filters=None): """Index the DLT trace file for optimized DLT Message access :param str filename: DLT log filename to read the messages from :param list filters: List of filters to apply [("APPID", "CTID"), ...] :returns: True if file was read and indexed successfully, otherwise False :rtype: bool """ # load the filters self.set_filters(filters) if isinstance(filename, str): filename = bytes(filename, "utf-8") # read and index file self.filename = filename self.generate_index() return self.indexed def set_filters(self, filters): """Set filters to optimize access""" if filters is not None: dlt_filter = DLTFilter(verbose=self.verbose) for apid, ctid in filters: if isinstance(apid, str): apid = bytes(apid, "ascii") if isinstance(ctid, str): ctid = bytes(ctid, "ascii") dlt_filter.add(apid, ctid) self.filters = dlt_filter dltlib.dlt_file_set_filter(ctypes.byref(self), ctypes.byref(dlt_filter), self.verbose) def __getitem__(self, index): """Load a DLT message from opened file :param int index: Index of a message to load :returns: Loaded DLTMessage :rtype: DLTMessage object :raises IndexError: If message index is out of boundary """ if index < 0: if self.counter == 0: self.read(self.filename) index = self.counter + index if index == 0 and self.counter == 0: self.read(self.filename) if index < 0 or index >= self.counter: raise IndexError("Index out of range (0 < %d < %d)" % (index, self.counter)) dltlib.dlt_file_message(ctypes.byref(self), index, self.verbose) # deepcopy the object msg = DLTMessage.from_buffer_copy(self.msg) # pylint: disable=no-member msg.databuffer.contents = ctypes.create_string_buffer(self.msg.datasize) ctypes.memmove(msg.databuffer, self.msg.databuffer, msg.datasize) # set the new storage header pointer offset = 0 hdr = cDltStorageHeader.from_address(ctypes.addressof(msg.headerbuffer) + offset) # pylint: disable=no-member msg.p_storageheader = ctypes.pointer(hdr) # set the new standard header pointer offset = ctypes.sizeof(cDltStorageHeader) hdr = cDltStandardHeader.from_address(ctypes.addressof(msg.headerbuffer) + offset) # pylint: disable=no-member msg.p_standardheader = ctypes.pointer(hdr) # set the new extended header pointer if self.msg.use_extended_header: offset = ctypes.addressof(self.msg.p_extendedheader.contents) - ctypes.addressof(self.msg.headerbuffer) # pylint: disable=no-member hdr = cDltExtendedHeader.from_address(ctypes.addressof(msg.headerbuffer) + offset) msg.p_extendedheader = ctypes.pointer(hdr) return msg def _open_file(self): """Open the configured file for processing""" file_opened = False while not self._is_stop_reading_set(): if dltlib.dlt_file_open(ctypes.byref(self), self.filename, self.verbose) >= DLT_RETURN_OK: file_opened = True break if not self.live_run: break time.sleep(0.5) if not file_opened: logger.error("DLT FILE OPEN FAILED - Analysis will not be performed") raise IOError(cDLT_FILE_NOT_OPEN_ERROR) def _log_message_progress(self): """Logs current message for progress information""" length = os.stat(self.filename).st_size logger.debug( "Processed %s messages (%s%% of %sfile) from %s, next message is apid %s, ctid %s", self.position, int(100 * self.file_position / length), "live " if self.live_run else "", self.filename, self.msg.apid, self.msg.ctid, ) def _is_stop_reading_set(self): return self.stop_reading.is_set() or self.stop_reading_proc.is_set() def __iter__(self): # pylint: disable=too-many-branches """Iterate over messages in the file""" logger.debug("Starting File Read") logger.debug( "File Position: %d File Counter: %d File Name: %s", self.file_position, self.counter, self.filename ) cached_mtime = 0 cached_file_pos = 0 corruption_check_try = True self._open_file() found_data = False while not self._is_stop_reading_set() or corruption_check_try: # pylint: disable=too-many-nested-blocks os_stat = os.stat(self.filename) mtime = os_stat.st_mtime if mtime != cached_mtime and os_stat.st_size or corruption_check_try: cached_mtime = mtime corruption_check_try = False while not self._is_stop_reading_set() and ( dltlib.dlt_file_read(ctypes.byref(self), self.verbose) >= DLT_RETURN_OK ): found_data = True if ( self.filter and dltlib.dlt_message_filter_check(ctypes.byref(self.msg), self.filter, 0) != DLT_RETURN_TRUE ): continue index = self.position msg = self[index] if not index % 100000: self._log_message_progress() yield msg if cached_file_pos != self.file_position: # We were able to read messages, don't do a corrupt message check yet. corruption_check_try = True cached_file_pos = self.file_position else: next_header_position = self._find_next_header() if next_header_position: if self.file_position == next_header_position: if not self.live_run: logger.warning("Incomplete message while parsing DLT file at %s", self.file_position) break else: logger.warning("Found a corrupt message at %s, skipping it", self.file_position) self.file_position = next_header_position self.corrupt_msg_count += 1 corruption_check_try = True # Wait for further messages to determine if corrupt, else just end of file else: if not self.live_run: logger.info("End of file reached at %s", self.file_position) break time.sleep(0.1) if not found_data: raise IOError(DLT_EMPTY_FILE_ERROR) def __len__(self): """Returns filtered file length""" return self.counter class DLTClient(cDltClient): """DLTClient class takes care about correct initialization and cleanup""" verbose = 0 def __init__(self, **kwords): """Initialize a DLTClient. :param servIP: Optional[str] - dlt server IP. :param hostIP: Optional[str] - Only available for udp multicast mode. Set host interface address. :param port: Optional[int] - dlt tcp daemon port. :param verbose: Optional[bool] - Enable verbose output. :param udp_fd_buffer_size_bytes: Optional[int] - Only available for udp multicast mode. Set the UDP buffer size through setsockopt (unit: bytes). :param udp_buffer_size_bytes: Optional[int] - Only available for udp multicast mode. Set the DltReceiver's buffer size (unit: bytes). """ self.is_udp_multicast = False self.verbose = kwords.pop("verbose", 0) if dltlib.dlt_client_init(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not initialize DLTClient") if "servIP" in kwords: serv_ip = kwords.pop("servIP") if isinstance(serv_ip, str): serv_ip = serv_ip.encode("utf8") ip_init_state = dltlib.dlt_client_set_server_ip(ctypes.byref(self), ctypes.create_string_buffer(serv_ip)) if ip_init_state == DLT_RETURN_ERROR: raise RuntimeError("Could not initialize servIP for DLTClient") if ip.ip_address(serv_ip.decode("utf8")).is_multicast: logger.info("Initializing DLTClient using UDP") self.is_udp_multicast = True if "hostIP" in kwords: host_ip = kwords.pop("hostIP") if isinstance(host_ip, str): host_ip = host_ip.encode("utf8") ip_init_state = dltlib.dlt_client_set_host_if_address( ctypes.byref(self), ctypes.create_string_buffer(host_ip) ) if ip_init_state == DLT_RETURN_ERROR: raise RuntimeError("Could not initialize multicast address for DLTClient") set_mode_state = dltlib.dlt_client_set_mode(ctypes.byref(self), DLT_CLIENT_MODE_UDP_MULTICAST) logger.info("DLTClient using UDP set mode state: %s", set_mode_state) if set_mode_state == DLT_RETURN_ERROR: raise RuntimeError("Could not initialize socket mode for DLTClient") # attribute to hold a reference to the connected socket in case # we created a connection with a timeout (via python, as opposed # to dltlib). This avoids the socket object from being garbage # collected when it goes out of the connect() method scope self._connected_socket = None super(DLTClient, self).__init__(**kwords) # (re)set self.port, even for API version <2.16.0 since we use # it ourselves elsewhere self.port = kwords.get("port", DLT_DAEMON_TCP_PORT) self._udp_fd_buffer_size_bytes = kwords.get("udp_fd_buffer_size_bytes", DLT_UDP_MULTICAST_FD_BUFFER_SIZE) self._udp_buffer_size_bytes = kwords.get("udp_buffer_size_bytes", DLT_UDP_MULTICAST_BUFFER_SIZE) def connect(self, timeout=None): """Connect to the server If timeout is provided, block on connect until timeout occurs. If timeout is not provided or is None, try to connect and return immediately :param int|None timeout: Seconds to wait for connection :returns: True if connected successfully, False otherwise :rtype: bool """ connected = None error_count = 0 if not self.is_udp_multicast: if self.verbose: logger.info("Connecting DLTClient using TCP Connection") if timeout: end_time = time.time() + timeout while time.time() < end_time: timeout_remaining = max(end_time - time.time(), 1) if timeout else None try: self._connected_socket = socket.create_connection( (ctypes.string_at(self.servIP), self.port), timeout=timeout_remaining ) except IOError as exc: if error_count < MAX_LOG_IN_ROW and self.verbose: logger.debug( "DLT client connect failed to connect to %s:%s : %s", self.servIP, self.port, exc ) error_count += 1 time.sleep(1) if self._connected_socket: # pylint: disable=attribute-defined-outside-init self.sock = ctypes.c_int(self._connected_socket.fileno()) # - also init the receiver to replicate # dlt_client_connect() behavior if hasattr(self.receiver, "type"): connected = dltlib.dlt_receiver_init( ctypes.byref(self.receiver), self.sock, DLT_RECEIVE_SOCKET, DLT_CLIENT_RCVBUFSIZE ) else: connected = dltlib.dlt_receiver_init( ctypes.byref(self.receiver), self.sock, DLT_CLIENT_RCVBUFSIZE ) break else: connected = dltlib.dlt_client_connect(ctypes.byref(self), self.verbose) # - create a python socket object so that we can detect # connection loss in the main_loop below as described at # http://stefan.buettcher.org/cs/conn_closed.html self._connected_socket = socket.fromfd(self.sock, socket.AF_INET6, socket.SOCK_STREAM) if error_count > MAX_LOG_IN_ROW and self.verbose: logger.debug("Surpressed %d messages for failed connection attempts", error_count - MAX_LOG_IN_ROW) else: if self.verbose: logger.info("Connecting DLTClient using UDP Connection") connected = dltlib.dlt_client_connect(ctypes.byref(self), self.verbose) self._set_udp_multicast_buffer_size() if self.verbose: logger.info("DLT Connection return: %s", connected) return connected == DLT_RETURN_OK def disconnect(self): """Close all sockets""" if dltlib.dlt_client_cleanup(ctypes.byref(self), self.verbose) == DLT_RETURN_ERROR: raise RuntimeError("Could not cleanup DLTClient") if self._connected_socket: try: self._connected_socket.shutdown(socket.SHUT_RDWR) except IOError: pass except Exception: # pylint: disable=broad-except logger.exception("Unexpected exception while shutting down connection") try: self._connected_socket.close() except IOError: pass except Exception: # pylint: disable=broad-except logger.exception("Unexpected exception while disconnecting") def read_message(self, verbose=False): """Read new message :param bool verbose: Log every dlt_message_read(). Set True only for debugging. :returns: A new DLTMessage on successful read, None otherwise :rtype: DLTMessage|None """ msg = DLTMessage(verbose=verbose) res = dltlib.dlt_message_read( ctypes.byref(msg), ctypes.cast(self.receiver.buf, ctypes.POINTER(ctypes.c_uint8)), ctypes.c_uint(self.receiver.bytesRcvd), # length ctypes.c_int(0), # resync ctypes.c_int(verbose), ) # verbose if res != DLT_MESSAGE_ERROR_OK: # - failed to read a complete message, possibly read an incomplete # message return None # prepare storage header if msg.standardheader.htyp & DLT_HTYP_WEID: dltlib.dlt_set_storageheader(msg.p_storageheader, msg.headerextra.ecu) else: dltlib.dlt_set_storageheader(msg.p_storageheader, ctypes.c_char_p(b"")) return msg # NEW_API - ensure backwards compatibility @property def serial_mode(self): """Get the mode""" return getattr(self, "mode", getattr(super(DLTClient, self), "serial_mode", 0)) @ctypes.CFUNCTYPE(ctypes.c_int, ctypes.POINTER(DLTMessage), ctypes.c_void_p) def msg_callback(msg, data): # pylint: disable=no-self-argument """Implements a simple callback that prints a dlt message received""" if msg is None: print("NULL message in callback") return -1 if msg.contents.p_standardheader.contents.htyp & DLT_HTYP_WEID: dltlib.dlt_set_storageheader(msg.contents.p_storageheader, msg.contents.headerextra.ecu) else: dltlib.dlt_set_storageheader(msg.contents.p_storageheader, ctypes.c_char_p(b"")) print(msg.contents) return 0 def client_loop(self): """Executes native dlt_client_main_loop() after registering msg_callback method as callback""" dltlib.dlt_client_register_message_callback(self.msg_callback) dltlib.dlt_client_main_loop(ctypes.byref(self), None, self.verbose) def _set_udp_multicast_buffer_size(self, custom_fd_buffer_size_bytes=None, custom_buffer_size_bytes=None) -> None: fd_buffer_size = int(self._udp_fd_buffer_size_bytes or custom_fd_buffer_size_bytes or 0) buffer_size_bytes = int(self._udp_buffer_size_bytes or custom_buffer_size_bytes or 0) if fd_buffer_size: # Socket options are associated with an open file description. This # means that file descriptors duplicated as a consequence of dup() # (or similar) or fork() share the same set of socket options. # -- Chapter 61.9 Socket Options. # The Linux Programming Interface, p.1279 # # The buffer size can be changed with a new fd which is created by # dup system call (it's the internal implementation in # `socket.fromfd`), so the code creates a socket instance first # configures it and directly close it. with socket.fromfd(self.sock, socket.AF_INET, socket.SOCK_DGRAM) as conf_socket: logger.debug("Set UDP Multicast socket buffer size: %s kbytes", fd_buffer_size / 1024) conf_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, fd_buffer_size) real_buffer_size = int(conf_socket.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF) / 2) if real_buffer_size != fd_buffer_size: logger.warning( ( "Failed to set UDP Multicast buffer size. set_size: %s, real_size: %s. " "Bypass the error and continue" ), fd_buffer_size / 1024, real_buffer_size / 1024, ) logger.warning( ( "Please run command `sysctl -w net.core.rmem_max=%s` with root permission to " "set the maximum size and restart dlt again." ), fd_buffer_size, ) if buffer_size_bytes: logger.debug("Set UDP Multicast DltReceiver buffer size: %s kbytes", buffer_size_bytes / 1024) ret = dltlib.dlt_receiver_init( ctypes.byref(self.receiver), self.sock, self.receiver.type, buffer_size_bytes ) if ret < 0: raise RuntimeError( f"Failed to set UDP Multicast DltReceiver buffer size. return code: {ret}, " f"buffer_size_bytes: {buffer_size_bytes}" ) def py_dlt_file_main_loop(dlt_reader, limit=None, callback=None): """Main loop to read dlt messages from dlt file.""" try: for msg in dlt_reader: # send the message to the callback and check whether we # need to continue if callback and not callback(msg): logger.debug("callback returned 'False'. Stopping main loop") return False if limit: limit -= 1 if limit == 0: break except IOError as err: # If the dlt file is empty, main_loop should not break, so it returns True if str(err) == DLT_EMPTY_FILE_ERROR: logger.debug("Dlt file is empty now. Wait until content is written") return True raise err return True # pylint: disable=too-many-arguments,too-many-return-statements,too-many-branches def py_dlt_client_main_loop(client, limit=None, verbose=0, dumpfile=None, callback=None): """Reimplementation of dlt_client.c:dlt_client_main_loop() in order to handle callback function return value """ bad_messages = 0 while True: if bad_messages > 100: # Some bad data is coming in and we can not recover - raise an error to cause a reconnect logger.warning("Dropping connection due to multiple malformed messages") return False # check connection status by peeking on the socket for data. # Note that if the remote connection is abruptly terminated, # this will raise a socket.timeout exception which the caller is # expected to handle (possibly by attempting a reconnect) # pylint: disable=protected-access if not client.is_udp_multicast: try: ready_to_read = client._connected_socket.recv(1, socket.MSG_PEEK | socket.MSG_DONTWAIT) except OSError as os_exc: logger.error("[%s]: DLTLib closed connected socket", os_exc) return False if not ready_to_read: # - implies that the other end has called close()/shutdown() # (ie: clean disconnect) logger.debug("connection terminated, returning") return False # - check if stop flag has been set (end of loop) if callback and not callback(None): logger.debug("callback returned 'False'. Stopping main loop") return False # we now have data to read. Note that dlt_receiver_receive() # is a blocking call that only returns if there is data to be # read or the remote end closes connection. So, irrespective of # the status of the callback (in the case of dlt_broker, this is # the stop_flag Event), this loop will only proceed after the # function has returned or terminate when an exception is raised if hasattr(client.receiver, "type"): recv_size = dltlib.dlt_receiver_receive(ctypes.byref(client.receiver)) else: recv_size = dltlib.dlt_receiver_receive(ctypes.byref(client.receiver), DLT_RECEIVE_SOCKET) if recv_size <= 0: logger.error("Error while reading from socket") return False msg = client.read_message(verbose) while msg: try: if msg.apid == b"" and msg.ctid == b"": logger.debug("Received a corrupt message") bad_messages += 1 except AttributeError: logger.debug("Skipping a very corrupted message") bad_messages += 1 msg = client.read_message() continue bad_messages = 0 # save the message if dumpfile: dumpfile.write(msg.to_bytes()) # remove message from receiver buffer size = msg.headersize + msg.datasize - ctypes.sizeof(cDltStorageHeader) if msg.found_serialheader: size += DLT_ID_SIZE if dltlib.dlt_receiver_remove(ctypes.byref(client.receiver), size) < 0: logger.error("dlt_receiver_remove failed") return False # send the message to the callback and check whether we # need to continue if callback and not callback(msg): logger.debug("callback returned 'False'. Stopping main loop") break if limit is not None: limit -= 1 if limit == 0: break # read the next message msg = client.read_message() else: # - failed to read a complete message, rewind the client # receiver buffer pointer to start of the buffer if dltlib.dlt_receiver_move_to_begin(ctypes.byref(client.receiver)) == DLT_RETURN_ERROR: logger.error("dlt_receiver_move_to_begin failed") return False # Check if we need to keep going if callback and not callback(msg): logger.debug("callback returned 'False'. Stopping main loop") break return True def save(messages, filename, append=False): """Save DLT messages to a file :param list messages: List of messages to save :param str filename: Filename for the DLT log file the messages will be stored to :param bool append: New data will be appended to an existing file if set to True """ with open(filename, "ab" if append else "wb") as tracefile: for msg in messages: tracefile.write(msg.to_bytes()) def load(filename, filters=None, split=False, verbose=False, live_run=False): """Load DLT messages from a file :param str filename: Filename for the DLT log file the messages will be store to :param list filters: List of filters to apply [("APPID", "CTID"), ...] :param bool split: Ignored - compatibility option :param bool verbose: Be verbose :returns: A DLTFile object :rtype: DLTFile object """ cfile = cDLTFile(filename=filename, is_live=live_run) cfile.set_filters(filters) return cfile python-dlt-2.18.10.0/dlt/dlt_broker.py000066400000000000000000000253311464055136400174230ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. """DLT Broker is running in a loop in a separate thread until stop_flag is set and adding received messages to all registered queues """ from contextlib import contextmanager import ipaddress as ip import logging from multiprocessing import Event, Queue import queue as tqueue from dlt.dlt_broker_handlers import ( DLT_DAEMON_TCP_PORT, DLTContextHandler, DLTFilterAckMessageHandler, DLTMessageDispatcherBase, DLTMessageHandler, DLTFileSpinner, DLTTimeValue, ) DLT_CLIENT_TIMEOUT = 5 logger = logging.getLogger(__name__) # pylint: disable=invalid-name @contextmanager def create_filter_ack_queue(filter_ack_msg_handler): """Register and unregister a queue into DLTFilterAckMessageHandler""" queue = tqueue.Queue() try: filter_ack_msg_handler.register(queue) yield queue finally: filter_ack_msg_handler.unregister(queue) class DLTBroker(object): """DLT Broker class manages receiving and filtering of DLT Messages""" def __init__( self, ip_address=None, port=DLT_DAEMON_TCP_PORT, use_proxy=False, enable_dlt_time=False, enable_filter_set_ack=False, filter_set_ack_timeout=2.0, ignore_filter_set_ack_timeout=False, **kwargs, ): """Initialize the DLT Broker :param str | None ip_address: IP address of the DLT Daemon. If None, then dlt does not come with any ip listening, in other words, it comes from dlt log directly; Else, dlt comes from listening to some ip address. Defaults to TCP connection, unless a multicast address is used. In that case an UDP multicast connection will be used :param str post: Port of the DLT Daemon :param bool use_proxy: Ignored - compatibility option :param bool enable_dlt_time: Record the latest dlt message timestamp if enabled. :param bool enable_filter_set_ack: Wait an ack message when sending a filter-setting message :param float filter_set_ack_timeout: Waiting time for the ack message :param bool ignore_filter_set_ack_timeout: Ignore the timeout when the value is True :param **kwargs: All other args passed to DLTMessageHandler """ # - dlt-time share memory init self._dlt_time_value = DLTTimeValue() if enable_dlt_time else None # - handlers init self.mp_stop_flag = Event() self.filter_queue = Queue() self.message_queue = Queue() # - filter ack queue setting self.enable_filter_set_ack = enable_filter_set_ack self.ignore_filter_set_ack_timeout = ignore_filter_set_ack_timeout self.filter_set_ack_timeout = filter_set_ack_timeout if enable_filter_set_ack: # Optional[multiprocessing.Queue[Tuple[int, bool]]] # int presents queue id, bool presents enable or not self.filter_ack_queue = Queue() self.filter_ack_msg_handler = DLTFilterAckMessageHandler(self.filter_ack_queue) else: self.filter_ack_queue = None self.filter_ack_msg_handler = None self.msg_handler = self.create_dlt_message_dispather(ip_address, port, kwargs) self.context_handler = DLTContextHandler(self.filter_queue, self.message_queue) self._ip_address = ip_address self._port = port self._filename = kwargs.get("filename") def create_dlt_message_dispather(self, ip_address, port, client_cfg) -> DLTMessageDispatcherBase: if ip_address: # If ip_address is given, then messages are retrieved from dlt client at run-time return self._create_dlt_message_handler(ip_address, port, client_cfg) else: # If not ip_address is given, then messages are retrieved from the given filename # The logs are written to the given filename from another process return self._create_dlt_file_spinner(client_cfg.get("filename")) def _create_dlt_message_handler(self, ip_address, port, client_cfg): client_cfg["ip_address"] = ip_address client_cfg["port"] = port client_cfg["timeout"] = client_cfg.get("timeout", DLT_CLIENT_TIMEOUT) return DLTMessageHandler( self.filter_queue, self.message_queue, self.mp_stop_flag, client_cfg, dlt_time_value=self._dlt_time_value, filter_ack_queue=self.filter_ack_queue, ) def _create_dlt_file_spinner(self, file_name): return DLTFileSpinner( self.filter_queue, self.message_queue, self.mp_stop_flag, file_name, dlt_time_value=self._dlt_time_value, filter_ack_queue=self.filter_ack_queue, ) def start(self): """DLTBroker main worker method""" if isinstance(self.msg_handler, DLTMessageHandler): logger.debug( "Starting DLTBroker with parameters: use_proxy=%s, ip_address=%s, port=%s, filename=%s, multicast=%s", False, self._ip_address, self._port, self._filename, ip.ip_address(self._ip_address).is_multicast, ) else: logger.debug("Starting DLTBroker by reading %s", self._filename) if self._dlt_time_value: logger.debug("Enable dlt time for DLTBroker.") self.msg_handler.start() self.context_handler.start() if self.enable_filter_set_ack: self.filter_ack_msg_handler.start() # - ensure we don't block on join_thread() in stop() # https://docs.python.org/2.7/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread self.filter_queue.cancel_join_thread() self.message_queue.cancel_join_thread() if self.enable_filter_set_ack: self.filter_ack_queue.cancel_join_thread() def _recv_filter_set_ack(self, context_filter_ack_queue, required_response): try: resp = context_filter_ack_queue.get(timeout=self.filter_set_ack_timeout) if resp != required_response: logger.debug("Filter-setting ack response not matched: %s, expected: %s", resp, required_response) return False return True except tqueue.Empty as err: if self.ignore_filter_set_ack_timeout: logger.info( "Timeout for getting filter-setting ack: %s, %s", id(context_filter_ack_queue), required_response ) return None raise err return False def add_context(self, context_queue, filters=None): """Register context :param Queue context_queue: The queue to which new messages will be added :param tuple filters: An list of tuples (eg: [(apid, ctid)]) used to filter messages that go into this queue. """ filters = filters or [(None, None)] if not isinstance(filters, (tuple, list)): raise RuntimeError("Context queue filters must be a tuple. Ex. (('SYS', 'JOUR'), ('AUDI', 'CAPI'))") if self.enable_filter_set_ack: logger.debug("Send a filter-setting message with requesting ack") with create_filter_ack_queue(self.filter_ack_msg_handler) as context_filter_ack_queue: self.context_handler.register( context_queue, filters, context_filter_ack_queue=context_filter_ack_queue ) if not self._recv_filter_set_ack(context_filter_ack_queue, True): failure_reason = "" if isinstance(self.msg_handler, DLTMessageHandler): failure_reason = ( "It's possible that DLTClient client does not start." " If it's a test case, it might be an error" ) elif isinstance(self.msg_handler, DLTFileSpinner): failure_reason = ( f"It's possible that dlt file {self._filename} is empty now. No big issue, " f"filters would be added once after new message is available in dlt file" ) logger.warning( ( "Could not receive filter-setting message ack. %s. For now, Run it anyway. " "filters: %s, queue_id: %s" ), failure_reason, filters, id(context_queue), ) else: self.context_handler.register(context_queue, filters) def remove_context(self, context_queue): """Unregister context :param Queue context_queue: The queue to unregister. """ self.context_handler.unregister(context_queue) def stop(self): """Stop the broker""" logger.info("Stopping DLTContextHandler and %s", type(self.msg_handler).__name__) self.msg_handler.break_blocking_main_loop() logger.debug("Stop %s", type(self.msg_handler).__name__) self.mp_stop_flag.set() logger.debug("Stop DLTContextHandler") self.context_handler.stop() logger.debug("Waiting on DLTContextHandler ending") self.context_handler.join() if self.enable_filter_set_ack: logger.debug("Stop DLTFilterAckMessageHandler") self.filter_ack_msg_handler.stop() logger.debug("Waiting on DLTFilterAckMessageHandler ending") self.filter_ack_msg_handler.join() logger.debug("Waiting on %s ending", type(self.msg_handler).__name__) if self.msg_handler.is_alive(): try: self.msg_handler.terminate() except OSError: pass else: self.msg_handler.join() logger.debug("DLTBroker execution done") # pylint: disable=invalid-name def isAlive(self): """Backwards compatibility method Called from mtee.testing.connectors.tools.broker_assert. Will need to be replaced in MTEE eventually. """ return any((self.msg_handler.is_alive(), self.context_handler.is_alive())) def dlt_time(self): """Get time for the last dlt message The value is seconds from 1970/1/1 0:00:00 :rtype: float """ if self._dlt_time_value: return self._dlt_time_value.timestamp raise RuntimeError("Getting dlt time function is not enabled") @property def injection_support(self) -> bool: return bool(self._ip_address) python-dlt-2.18.10.0/dlt/dlt_broker_handlers.py000066400000000000000000000467221464055136400213120ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. """Handlers are classes that assist dlt_broker in receiving and filtering DLT messages """ from abc import ABC, abstractmethod from collections import defaultdict import ctypes import logging from multiprocessing import Lock, Process, Value from queue import Empty import socket import time from threading import Thread, Event from dlt.dlt import ( DLTClient, DLT_DAEMON_TCP_PORT, DLT_UDP_MULTICAST_BUFFER_SIZE, DLT_UDP_MULTICAST_FD_BUFFER_SIZE, cDLT_FILE_NOT_OPEN_ERROR, load, py_dlt_client_main_loop, py_dlt_file_main_loop, ) DLT_CLIENT_TIMEOUT = 5 logger = logging.getLogger(__name__) # pylint: disable=invalid-name class DLTTimeValue(object): """Create a share memory to pass the timestamp between processes The type of dlt time is float (4 bytes). There are several ways to send the value between DLTMessageHandler (it's a process) and DLTBroker. Since DLTMessageHandler has to send the value many times every second, choosing a lightweight solution is must. khiz678 studied and evaluated the following solutions for the problem. - multiprocessing.Queue (Queue in the following discussion) - multiprocessing.Pipe (Pipe in the following discussion) - multiprocessing.Value (Value in the following discussion) Value is our final solution. Queue's implementation is based on Pipe (in cpython). If the solution is based on Queue or Pipe, it needs another thread in DLTBroker process to receive and write the value to a local variable. The solution based on Value does not have such problem, only assigns the value to the shared memory directly. khiz678 also did a simple benchmark for the Value solution. It could receive more than 100000 timestamps per second. It's twice faster than Pipe's implementation. """ def __init__(self, default_value=0.0): self._timestamp_mem = Value(ctypes.c_double, default_value) @property def timestamp(self): """Get the seconds from 1970/1/1 0:00:00 :rtype: float """ with self._timestamp_mem.get_lock(): return self._timestamp_mem.value @timestamp.setter def timestamp(self, new_timestamp): with self._timestamp_mem.get_lock(): self._timestamp_mem.value = new_timestamp class DLTFilterAckMessageHandler(Thread): """Receive filter-set ack message and pass it to the corresponding ack queue""" def __init__(self, filter_ack_queue): # (multiprocessing.Queue[Tuple[ContextQueueID, bool]]) -> None super(DLTFilterAckMessageHandler, self).__init__() self.filter_ack_queue = filter_ack_queue self.stop_flag = Event() self.context_map_lock = Lock() self.context_map = {} # Dict[ContextQueueID, Queue.Queue[bool]] def stop(self): """Stops thread execution""" self.stop_flag.set() self.filter_ack_queue.put((None, None)) if self.is_alive(): self.join() def register(self, filter_ack_queue): # (Queue.Queue[bool]) -> None """Register an ack queue""" with self.context_map_lock: self.context_map[id(filter_ack_queue)] = filter_ack_queue def unregister(self, filter_ack_queue): # (Queue.Queue[bool]) -> None """Unregister an ack queue""" with self.context_map_lock: key = id(filter_ack_queue) if key in self.context_map: del self.context_map[key] def run(self): """Run the thread to recieve the message and dispatch it""" while not self.stop_flag.is_set(): queue_ack_id, enable = self.filter_ack_queue.get() if not queue_ack_id: continue with self.context_map_lock: if queue_ack_id in self.context_map: self.context_map[queue_ack_id].put(enable) else: logger.warning("Could not send an ack to the queue: %s", queue_ack_id) class DLTContextHandler(Thread): """Communication layer between the DLTContext instances and DLTMessageHandler Process. This class handles the transfer of messages between the process receiving traces from the DLT Daemon and the DLTContext queues. """ def __init__(self, filter_queue, message_queue): super(DLTContextHandler, self).__init__() self.stop_flag = Event() self.context_map = {} self.lock = Lock() self.filter_queue = filter_queue self.message_queue = message_queue def _make_send_filter_msg(self, queue, filters, is_register, context_filter_ack_queue=None): """Send a filter message to the filter message queue""" queue_id = id(queue) if context_filter_ack_queue: return queue_id, id(context_filter_ack_queue), filters, is_register return queue_id, filters, is_register def register(self, queue, filters=None, context_filter_ack_queue=None): """Register a queue to collect messages matching specific filters :param Queue queue: The new queue to add :param tuple filters: An tuple with (apid, ctid) used to filter messages that go into this queue. """ if filters is None: filters = [(None, None)] queue_id = id(queue) # - unique identifier for this queue with self.lock: self.context_map[queue_id] = (queue, filters) # - inform the DLTMessageHandler process about this new # (queue, filter) pair self.filter_queue.put( self._make_send_filter_msg(queue, filters, True, context_filter_ack_queue=context_filter_ack_queue) ) def unregister(self, queue, context_filter_ack_queue=None): """Remove a queue from set of queues being handled :param Queue queue: The queue to remove """ queue_id = id(queue) _, filters = self.context_map.get(queue_id, (None, None)) if filters: with self.lock: try: del self.context_map[queue_id] except KeyError: pass # - inform the DLTMessageHandler process about removal of this # (queue, filter) pair self.filter_queue.put( self._make_send_filter_msg(queue, filters, False, context_filter_ack_queue=context_filter_ack_queue) ) def run(self): """The thread's main loop""" while not self.stop_flag.is_set(): queue_id, message = None, None try: if self.message_queue.full(): logger.error("message_queue is full ! put() on this queue will block") queue_id, message = self.message_queue.get_nowait() except Empty: pass if message: queue, _ = self.context_map.get(queue_id, (None, None)) if queue: queue.put(message) else: time.sleep(0.01) def stop(self): """Stops thread execution""" self.stop_flag.set() self.filter_queue.close() if self.is_alive(): self.join() class DLTMessageDispatcherBase(ABC, Process): """Base class for different dlt message dispatchers The derived class could dispatch dlt messages from dlt-daemon, or from at-runtime written file. """ def __init__(self, filter_queue, message_queue, mp_stop_event, dlt_time_value=None, filter_ack_queue=None): """ Common members needed for common dispatching behavirours :param Queue filter_queue: contexts for filtering received dlt message :param Queue message_queue: received dlt messages after filtering against context :param multiprocessing.Event mp_stop_event: stop signal for this process :param bool enable_dlt_time: Record the latest dlt message timestamp if enabled. :param bool filter_ack_queue: acks for accepting contexts """ super().__init__() self.filter_queue = filter_queue self.filter_ack_queue = filter_ack_queue self.message_queue = message_queue self.mp_stop_flag = mp_stop_event # - dict mapping filters to queue ids self.context_map = defaultdict(list) self._dlt_time_value = dlt_time_value def _process_filter_queue(self): """Check if filters have been added or need to be removed""" while not self.filter_queue.empty(): queue_ack_id = None msg = self.filter_queue.get_nowait() logger.debug("Process filter queue message: %s", msg) if isinstance(msg, tuple) and len(msg) == 4: queue_id, queue_ack_id, filters, add = msg else: queue_id, filters, add = msg if add: for apid_ctid in filters: self.context_map[apid_ctid].append(queue_id) else: try: for apid_ctid in filters: self.context_map[apid_ctid].remove(queue_id) if not self.context_map[apid_ctid]: del self.context_map[apid_ctid] except (KeyError, ValueError): # - queue_id already removed or not inserted pass if self.filter_ack_queue and queue_ack_id: logger.debug("Send filter ack message: queue_ack_id: %s, add: %s", queue_ack_id, add) self.filter_ack_queue.put((queue_ack_id, add)) @abstractmethod def is_valid_message(self, message): """Validate if the received message is a valid message according to AUTOSAR doc""" return True def handle(self, message): """Function to be called for every message received :param DLTMessage message: received new DLTMessage instance :returns: True if the loop should continue, False to stop the loop and exit :rtype: bool """ self._process_filter_queue() if self.is_valid_message(message): # Dispatch the message msg_ctx = ((message.apid, message.ctid), (None, None), (message.apid, None), (None, message.ctid)) qids = ( queue_id for filters, queue_ids in self.context_map.items() for queue_id in queue_ids if filters in msg_ctx ) for queue_id in qids: if self.message_queue.full(): logger.error("message_queue is full ! put() on this queue will block") self.message_queue.put((queue_id, message)) # Send the message's timestamp if self._dlt_time_value: self._dlt_time_value.timestamp = message.storage_timestamp return not self.mp_stop_flag.is_set() @abstractmethod def run(self) -> None: pass def break_blocking_main_loop(self): """All message dispatchers need a main loop to fetch dlt messages from source. If it could constantly dispatch messages, then the main loop will not get into blocking state. Only when no more message could not be dispatched, the main loop would get into blocking state. Not all message dispatchers need to implement this method """ pass class DLTFileSpinner(DLTMessageDispatcherBase): """Process receiving the DLT messages and handing them to DLTContextHandler This process instance is responsible for collecting messages from the at-runtime written dlt log, tagging them with the correct queue id and placing them on the messages queue. """ def __init__( self, filter_queue, message_queue, mp_stop_event, file_name, dlt_time_value=None, filter_ack_queue=None ): super().__init__(filter_queue, message_queue, mp_stop_event, dlt_time_value, filter_ack_queue) self.file_name = file_name self.dlt_reader = load(filename=self.file_name, live_run=True) def is_valid_message(self, message): """According to AUTOSAR doc, message with empty apid and empty ctid is still valid""" return message is not None def run(self): """DLTFileSpinner worker method""" logger.info("Start to process dlt file %s", self.file_name) # Even though dlt connector for ioc should only be instantiated after successful SerialConsole with fibex, # the corner case of not-existing dlt file will still be handled here with max 5 retires retries_for_non_existing_file = 5 while not self.mp_stop_flag.is_set(): try: logger.debug("py_dlt_file_main_loop") res = py_dlt_file_main_loop(self.dlt_reader, callback=self.handle) if res is False and not self.mp_stop_flag.is_set(): # main loop returned False logger.error("Too many bad messages read from %s", self.file_name) self.mp_stop_flag.set() break except KeyboardInterrupt: logger.debug("main loop manually interrupted") break except IOError as err: if str(err) == cDLT_FILE_NOT_OPEN_ERROR: # Not every time of non-existing file, cDLTFile will report error # Sometimes, it simply works through without issue. # So, no unittest could be done for this error handling if retries_for_non_existing_file == 0: logger.error("After retries, dlt file %s still does not exist", self.file_name) raise err logger.warning( "DLT file %s does not exist, will try %d times again", self.file_name, retries_for_non_existing_file, ) retries_for_non_existing_file = retries_for_non_existing_file - 1 time.sleep(1) else: raise err except Exception: # pylint: disable=broad-except logger.exception("Exception during the DLT message receive") logger.debug("DLTFileSpinner starts to quit...") if not self.dlt_reader.stop_reading_proc.is_set(): self.dlt_reader.stop_reading_proc.set() self.message_queue.close() logger.info("DLTFileSpinner worker execution complete") def break_blocking_main_loop(self): """A big user for DLTFileSpinner is IOC dlt, which does not have so many dlt messages as HU, so it is quite easy for the main loop to get into blocking state, at the moment that no more dlt messages could be dispatched. """ logger.debug("Stop iterating to file %s", self.file_name) self.dlt_reader.stop_reading_proc.set() class DLTMessageHandler(DLTMessageDispatcherBase): """Process receiving the DLT messages and handing them to DLTContextHandler This process instance is responsible for collecting messages from the DLT daemon, tagging them with the correct queue id and placing them on the messages queue. """ def __init__( self, filter_queue, message_queue, mp_stop_event, client_cfg, dlt_time_value=None, filter_ack_queue=None ): super().__init__(filter_queue, message_queue, mp_stop_event, dlt_time_value, filter_ack_queue) self._ip_address = client_cfg["ip_address"] self._port = client_cfg.get("port", DLT_DAEMON_TCP_PORT) self._filename = client_cfg.get("filename") self.verbose = client_cfg.get("verbose", 0) self.timeout = client_cfg.get("timeout", DLT_CLIENT_TIMEOUT) self._client = None self.tracefile = None self.last_connected = time.time() self.last_message = time.time() - 120.0 self._udp_fd_buffer_size_bytes = client_cfg.get("udp_fd_buffer_size_bytes", DLT_UDP_MULTICAST_FD_BUFFER_SIZE) self._udp_buffer_size_bytes = client_cfg.get("udp_buffer_size_bytes", DLT_UDP_MULTICAST_BUFFER_SIZE) def is_valid_message(self, message): return message and (message.apid != "" or message.ctid != "") def _client_connect(self): """Create a new DLTClient :param int timeout: Time in seconds to wait for connection. :returns: True if connected, False otherwise :rtype: bool """ if self.verbose: logger.debug( "Creating DLTClient (ip_address='%s', Port='%s', logfile='%s')", self._ip_address, self._port, self._filename, ) self._client = DLTClient( servIP=self._ip_address, port=self._port, verbose=self.verbose, udp_fd_buffer_size_bytes=self._udp_fd_buffer_size_bytes, udp_buffer_size_bytes=self._udp_buffer_size_bytes, ) connected = self._client.connect(self.timeout) if connected: logger.info("DLTClient connected to %s", self._client.servIP) return connected def run(self): """DLTMessageHandler worker method""" if self._filename is not None: logger.info("Opening the DLT trace file '%s'", self._filename) self.tracefile = open(self._filename, mode="ab", buffering=False) while not self.mp_stop_flag.is_set(): exception_occured = False if not self._client_connect(): # keep trying to reconnect, until we either successfully # connect or the stop_flag is set if time.time() - self.last_message > 60: # Once per minute log that we still have no DLT Connection logger.info( "DLT connection to %s missing since %s seconds", self._ip_address, time.time() - self.last_connected, ) self.last_message = time.time() continue try: if self.last_connected: logger.info( "DLT connection to %s re-established after %s seconds", self._ip_address, time.time() - self.last_connected, ) self.last_connected = time.time() res = py_dlt_client_main_loop(self._client, verbose=0, callback=self.handle, dumpfile=self.tracefile) if res is False and not self.mp_stop_flag.is_set(): # main loop returned False logger.warning("DLT connection to %s lost. Restarting DLT client", self._ip_address) self.last_connected = time.time() self.last_message = time.time() exception_occured = True except KeyboardInterrupt: exception_occured = True logger.debug("main loop manually interrupted") break except socket.timeout as exc: exception_occured = True logger.error("socket timeout error") logger.debug(exc) except Exception: # pylint: disable=broad-except exception_occured = True logger.exception("Exception during the DLT message receive") finally: if exception_occured: logger.debug("Closing open socket connections.") self._client.disconnect() self.message_queue.close() self._client.disconnect() logger.info("DLTMessageHandler worker execution complete") python-dlt-2.18.10.0/dlt/helpers.py000066400000000000000000000036441464055136400167410ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. """DLT client helpers""" class LimitCondition(object): """Condition object for counting messages""" def __init__(self, limit): """Constructor :param int limit: The maximum number of the messages for the condition """ self.limit = limit def __call__(self): if self.limit is None: return True self.limit = self.limit - 1 return self.limit >= 0 class ContinuousnessChecker(object): """ContinuousnessChecker class is intended to find problems in the order of DLT messages""" _ignore = ["DA1-DC1-0"] # control message will be ignored - there is no continuation def __init__(self, start=0): self._index = start self._counter = dict() def __call__(self, message): key = "{}-{}-{}".format(message.apid, message.ctid, message.seid) self._index += 1 if key in self._ignore: return if key in self._counter: # message of current type already received - check the continuousness err_msg = "Missing message detected. Message" err_msg += " #{} (apid='%s', ctid='%s', seid='%s')" % (message.apid, message.ctid, message.seid) err_msg += " should have counter '{}' instead of '{}'" if not (self._counter[key] + 1) % 256 == message.mcnt: counter = self._counter[key] self._counter[key] = message.mcnt raise RuntimeError(err_msg.format(self._index - 1, (counter + 1) % 256, message.mcnt)) self._counter[key] = message.mcnt else: # first message of current type self._counter[key] = message.mcnt def bytes_to_str(byte_or_str): """Return string from bytes""" if isinstance(byte_or_str, bytes): return byte_or_str.decode("utf8", errors="replace") return str(byte_or_str) python-dlt-2.18.10.0/dlt/py_dlt_receive.py000066400000000000000000000044711464055136400202730ustar00rootroot00000000000000# Copyright (C) 2017. BMW Car IT GmbH. All rights reserved. """DLT Receive using py_dlt""" import argparse import logging import time from dlt.dlt import DLT_UDP_MULTICAST_FD_BUFFER_SIZE, DLT_UDP_MULTICAST_BUFFER_SIZE from dlt.dlt_broker import DLTBroker logging.basicConfig(format="%(asctime)s %(name)s %(levelname)-8s %(message)s") root_logger = logging.getLogger() # pylint: disable=invalid-name logger = logging.getLogger("py-dlt-receive") # pylint: disable=invalid-name def parse_args(): """Parse command line arguments""" logger.info("Parsing arguments") parser = argparse.ArgumentParser(description="Receive DLT messages") parser.add_argument("--host", required=True, help="hostname or ip address to connect to") parser.add_argument("--file", required=True, help="The file into which the messages will be written") parser.add_argument( "--udp-fd-buffer-size", dest="udp_fd_buffer_size", default=DLT_UDP_MULTICAST_FD_BUFFER_SIZE, type=int, help=f"Set the socket buffer size in udp multicast mode. default: {DLT_UDP_MULTICAST_FD_BUFFER_SIZE} bytes", ) parser.add_argument( "--udp-buffer-size", dest="udp_buffer_size", default=DLT_UDP_MULTICAST_BUFFER_SIZE, type=int, help=f"Set the DltReceiver buffer size in udp multicast mode. default: {DLT_UDP_MULTICAST_BUFFER_SIZE} bytes", ) return parser.parse_args() def dlt_receive(options): """Receive DLT messages via DLTBroker""" logger.info("Creating DLTBroker instance") broker = DLTBroker( ip_address=options.host, filename=options.file, udp_fd_buffer_size_bytes=options.udp_buffer_size, udp_buffer_size_bytes=options.udp_fd_buffer_size, ) logger.info("Starting DLTBroker") broker.start() # start the loop try: logger.info("Receiving messages...") while True: time.sleep(0.1) except KeyboardInterrupt: logger.info("Interrupted...") finally: logger.info("Stopping DLT broker") broker.stop() logger.info("Stopped DLT broker") def main(): """Main function""" root_logger.setLevel(level=logging.INFO) options = parse_args() logger.info("Parsed arguments: %s", options) dlt_receive(options) if __name__ == "__main__": main() python-dlt-2.18.10.0/pyproject.toml000066400000000000000000000042101464055136400170440ustar00rootroot00000000000000[project] name = "dlt" dynamic = ["version"] description = "Python implementation for DLT" authors = [ {name = "BMW CarIT", email="carit.info@bmw.de"}, ] readme = "README.md" license = {file = "LICENCE.txt"} classifiers = [ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", "Intended Audience :: Developers", "Topic :: Software Development", "Topic :: System :: Logging", "Programming Language :: Python :: 3", ] dependencies = [ ] [project.optional-dependencies] dev = [ "black>=22.10", "flake8>=5", "pytest>=7.2.0", "pytest-cov>=4.0.0" ] [project.urls] "Homepage" = "https://github.com/bmwcarit/python-dlt" [project.scripts] py_dlt_receive = "dlt.py_dlt_receive:main" [tool.setuptools.packages.find] include = ["dlt*"] exclude = ["playbook*", "zuul.d*", "extracted_files*", "tests"] [build-system] requires = ["setuptools>=45", "setuptools-git-versioning"] build-backend = "setuptools.build_meta" [tool.setuptools-git-versioning] enabled = true dev_template = "{tag}.dev{ccount}+{sha}" [tool.black] line-length = 119 target_version = ['py37'] include = '\.pyi?$' exclude = ''' ( /( \.eggs # exclude a few common directories in the | \.git # root of the project | \.hg | \.mypy_cache | \.tox | \.venv | _build | buck-out | build | dist )/ | foo.py # also separately exclude a file named foo.py in # the root of the project | _version.py ) ''' [tool.ruff] line-length = 119 select = ["E", "F", "Q", "D"] # the following is equivalent to --docstring-convention=pep8 extend-ignore = [ "D100", "D107", "D105", "D401", "D101", "D102", "D103", "D104", "D200", "D400", "D203", "D205", "D212", "D213", "D214", "D215", "D404", "D405", "D406", "D407", "D408", "D409", "D410", "D411", "D413", "D415", "D416", "D417", ] # D104: Missing docstring in public package # This D104 error will be ignored only in __init__ files per-file-ignores = {"__init__.py" = ["D104"]} python-dlt-2.18.10.0/tests/000077500000000000000000000000001464055136400152755ustar00rootroot00000000000000python-dlt-2.18.10.0/tests/__init__.py000066400000000000000000000000741464055136400174070ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. python-dlt-2.18.10.0/tests/condition_test.py000066400000000000000000000006551464055136400207020ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. from dlt.helpers import LimitCondition class TestsLimitCondition(object): __test__ = True def test_none(self): cond = LimitCondition(None) assert cond() def test_limit_decreasing(self): cond = LimitCondition(2) cond() assert cond.limit == 1 assert cond() # limit=0 assert not cond() # limit=-1 python-dlt-2.18.10.0/tests/continuousness_helper_test.py000066400000000000000000000036771464055136400233610ustar00rootroot00000000000000from dlt.helpers import ContinuousnessChecker import pytest class Msg(object): def __init__(self, apid, ctid, seid, mcnt): self.apid = apid self.ctid = ctid self.seid = seid self.mcnt = mcnt def run_check(messages): cont = ContinuousnessChecker() for msg in messages: cont(msg) class TestsContinuousness(object): def test_simple(self): messages = [ Msg("X", "Y", "99", 4), Msg("X", "Y", "99", 5), Msg("X", "Y", "99", 6), Msg("X", "Y", "99", 7), Msg("X", "Y", "99", 8), ] run_check(messages) def test_simple_missing(self): with pytest.raises(RuntimeError): messages = [ Msg("X", "Y", "99", 4), Msg("X", "Y", "99", 5), Msg("X", "Y", "99", 6), # 7 is missing Msg("X", "Y", "99", 8), Msg("X", "Y", "99", 9), ] run_check(messages) def test_simple_over(self): # message counter is a unsigned char so counts till 255 and then restarted back to 0 messages = [Msg("X", "Y", "99", 254), Msg("X", "Y", "99", 255), Msg("X", "Y", "99", 0), Msg("X", "Y", "99", 1)] run_check(messages) def test_simple_reset(self): with pytest.raises(RuntimeError): messages = [Msg("X", "Y", "99", 230), Msg("X", "Y", "99", 231), Msg("X", "Y", "99", 0)] run_check(messages) def test_ignore_control(self): messages = [Msg("DA1", "DC1", "0", 0), Msg("X", "Y", "99", 231), Msg("DA1", "DC1", "0", 0)] run_check(messages) def test_zeros_da1_dc1(self): messages = [Msg("DA1", "DC1", "0", 0), Msg("DA1", "DC1", "0", 0)] run_check(messages) def test_zeros_non_da1_dc1(self): with pytest.raises(RuntimeError): messages = [Msg("X", "Y", "0", 0), Msg("X", "Y", "0", 0)] run_check(messages) python-dlt-2.18.10.0/tests/dlt_broker_from_file_spinner_test.py000066400000000000000000000210551464055136400246200ustar00rootroot00000000000000# Copyright (C) 2023. BMW Car IT GmbH. All rights reserved. """Test DLTBroker with message handler DLTFileSpinner""" import os import pytest import tempfile import time import unittest from unittest.mock import ANY, patch from queue import Queue, Empty from dlt.dlt_broker import DLTBroker, logger from tests.utils import ( stream_multiple, stream_with_params, append_stream_to_file, create_messages, append_message_to_file, ) class TestDLTBrokerFromDLTFileSpinnerWithNotExistingDLT(unittest.TestCase): def setUp(self) -> None: self.broker = None _, self.dlt_file_name = tempfile.mkstemp(suffix=b".dlt") def tearDown(self) -> None: if self.broker: self.broker.stop() if os.path.exists(self.dlt_file_name): os.remove(self.dlt_file_name) def test_broker_with_not_existing_dlt_file(self): """ Test DLTBroker could work with not existing dlt file 1. prepare a file name which does not exist 2. start dlt broker to dispatch messages from this not-existing file --> no error 3. dlt broker could not add context successfully, but encounter a warning message 4. no message could be dispatched from not existing file and throws out Queue.Empty exception 5. dlt_time is 0.0, because it could not be reset according to the latest timestamp of messages """ # Remove the dlt file os.remove(self.dlt_file_name) # Start broker with non-existing dlt file self.broker = DLTBroker( filename=self.dlt_file_name, enable_dlt_time=True, enable_filter_set_ack=True, ignore_filter_set_ack_timeout=True, ) self.broker.start() # Add context should report warning message queue = Queue(maxsize=0) with patch.object(logger, "warning") as logger_mock: self.broker.add_context(queue, filters=None) logger_mock.assert_called_with(ANY, ANY, [(None, None)], id(queue)) # Not existing dlt file should not throw any exception out for _ in range(5): with pytest.raises(Empty): queue.get_nowait() # dlt_time is not None, even though it is not reset with latest timestamp from messages self.assertEqual(self.broker.dlt_time(), 0.0) def test_broker_with_later_created_dlt_file(self): """ Simulate a scenario: first dlt file does not exist, then dlt file is created and written with messages. 1. delete the dlt file 2. start broker 3. create the dlt file and write 1 sample message Expectation: 1 message could be dispatched from broker """ # 1. delete the dlt file os.remove(self.dlt_file_name) # 2. Start broker with non-existing dlt file self.broker = DLTBroker( filename=self.dlt_file_name, enable_dlt_time=True, enable_filter_set_ack=True, ignore_filter_set_ack_timeout=True, ) self.broker.start() # Add context should report warning message queue = Queue(maxsize=0) self.broker.add_context(queue, filters=None) # 3. Write 1 sample message to the dlt file append_stream_to_file(stream_with_params, self.dlt_file_name) # Expectation: 1 message could be dispatched from broker time.sleep(0.5) self.assertIsNotNone(queue.get_nowait()) # If we try to dispatch for another time, exception Queue.Empty is thrown, # because there is no new log from dlt file with pytest.raises(Empty): queue.get_nowait() class TestDLTBrokerFromDLTFileSpinner(unittest.TestCase): def setUp(self): # Dlt file is created with empty content _, self.dlt_file_name = tempfile.mkstemp(suffix=b".dlt") self.dispatched_message_queue = Queue(maxsize=0) # Instantiate DLTBroker without ignoring fileter ack timeout self.broker = DLTBroker( filename=self.dlt_file_name, enable_dlt_time=True, enable_filter_set_ack=True, ignore_filter_set_ack_timeout=True, ) self.broker.start() self.broker.add_context(self.dispatched_message_queue, filters=None) def tearDown(self): self.broker.stop() os.remove(self.dlt_file_name) def test_001_dispatch_from_empty_dlt_file(self): """ From empty file, no message could be dispatched from queue and raise Queue.Empty. dlt_time is 0.0, because it could not be reset according to the latest timestamp of messages """ for _ in range(5): with pytest.raises(Empty): self.dispatched_message_queue.get_nowait() self.assertEqual(self.broker.dlt_time(), 0.0) def test_002_dispatch_from_real_dlt_file(self): """ Test DltBroker dispatches from a run-time written dlt file With a running dlt broker: 1. Write 2 sample messages to dlt file 2. These two messages could be dispatched with the running dlt broker With another try to dispatch, Queue.Empty is thrown, because no more logs could be read from dlt log; dlt_time from dlt_broker is equal to the timestamp of 2nd message 3. Append another 1 message to the same dlt file 4. Total 3 messages could be dispatched with the dlt broker With another try to dispatch, Queue.Empty is thrown, because no more logs could be read from dlt log; dlt_time from dlt_broker is equal to the timestamp of 3rd message """ # 1. Write 2 sample messages to dlt file append_stream_to_file(stream_multiple, self.dlt_file_name) # 2. Dispatch 2 messages from dlt broker time.sleep(0.1) message_1 = self.dispatched_message_queue.get_nowait() time.sleep(0.1) message_2 = self.dispatched_message_queue.get_nowait() self.assertNotEqual(message_1, message_2) # If we try to dispatch for another time, exception Queue.Empty is thrown, # because there is no new log from dlt file with pytest.raises(Empty): self.dispatched_message_queue.get_nowait() # Validate dlt time from broker self.assertEqual(self.broker.dlt_time(), message_2.storage_timestamp) # 3. Append another 1 message to the same dlt file append_stream_to_file(stream_with_params, self.dlt_file_name) # 4. Total 3 messages could be dispatched with the dlt broker time.sleep(0.1) message_3 = self.dispatched_message_queue.get_nowait() self.assertNotEqual(message_1, message_3) self.assertNotEqual(message_2, message_3) # If try to dispatch for another time, exception Queue.Empty is thrown, # because there is no new log from dlt file with pytest.raises(Empty): self.dispatched_message_queue.get_nowait() # Validate dlt time from broker self.assertEqual(self.broker.dlt_time(), message_3.storage_timestamp) def test_003_dispatch_from_real_dlt_file(self): """ Test DltBroker dispatches apid==b"" and ctid==b"" message from a run-time written dlt file With a running dlt broker: 1. Write apid==b"" and ctid==b"" message to dlt file 2. This message could be dispatched with the running dlt broker a. With another try to dispatch, Queue.Empty is thrown, because no more logs could be read from dlt log; b. dlt_time from dlt_broker is equal to the timestamp of this message c. the received message should have apid==b"" and ctid==b"" """ # 1. Write apid==b"" and ctid==b"" message to dlt file # Construct a message with apid==b"" and ctid==b"" message = create_messages(stream_with_params, from_file=True)[0] message.extendedheader.apid = b"" message.extendedheader.ctid = b"" # Write this message into dlt file append_message_to_file(message, self.dlt_file_name) # 2. Dispatch from dlt broker time.sleep(0.5) message = self.dispatched_message_queue.get_nowait() # If we try to dispatch for another time, exception Queue.Empty is thrown, # because there is no new log from dlt file with pytest.raises(Empty): self.dispatched_message_queue.get_nowait() # Validate dlt time from broker self.assertEqual(self.broker.dlt_time(), message.storage_timestamp) # Expectation: the received message should have apid==b"" and ctid==b"" self.assertEqual("", message.apid) self.assertEqual("", message.ctid) python-dlt-2.18.10.0/tests/dlt_broker_time_test.py000066400000000000000000000255601464055136400220630ustar00rootroot00000000000000# Copyright (C) 2021. BMW Car IT GmbH. All rights reserved. """Test DLTBroker with enabling dlt_time""" from contextlib import contextmanager from multiprocessing import Queue import queue as tqueue import time from unittest.mock import ANY, patch, MagicMock import pytest from dlt.dlt_broker import create_filter_ack_queue, DLTBroker, logger from dlt.dlt_broker_handlers import DLTContextHandler, DLTFilterAckMessageHandler, DLTMessageHandler from tests.utils import MockDLTMessage def fake_py_dlt_client_main_loop(client, callback, *args, **kwargs): return True @contextmanager def dlt_broker(pydlt_main_func=fake_py_dlt_client_main_loop, enable_dlt_time=True, enable_filter_set_ack=False): """Initialize a fake DLTBroker""" with patch("dlt.dlt_broker_handlers.DLTMessageHandler._client_connect"), patch( "dlt.dlt_broker_handlers.py_dlt_client_main_loop", side_effect=pydlt_main_func ): broker = DLTBroker("42.42.42.42", enable_dlt_time=enable_dlt_time, enable_filter_set_ack=enable_filter_set_ack) broker.msg_handler._client = MagicMock() try: broker.start() yield broker finally: broker.stop() @contextmanager def dlt_filter_ack_msg_handler(): queue = Queue() handler = DLTFilterAckMessageHandler(queue) try: handler.start() queue.cancel_join_thread() yield (handler, queue) finally: handler.stop() queue.close() def fake_dlt_msg_handler(msg, with_filter_ack_queue): """Create a fake DLTMessageHandler""" filter_queue = MagicMock() filter_ack_queue = MagicMock() if with_filter_ack_queue else None client_cfg = {"ip_address": b"127.0.0.1", "filename": b"/dev/null", "verbose": 0, "port": "1234"} filter_queue.empty.side_effect = [False, True] filter_queue.get_nowait.return_value = msg return DLTMessageHandler( filter_queue, MagicMock(), MagicMock(), client_cfg, dlt_time_value=None, filter_ack_queue=filter_ack_queue ) def test_start_stop_dlt_broker(): """Test to stop DLTBroker with dlt-time normally""" with dlt_broker(fake_py_dlt_client_main_loop, enable_dlt_time=True) as broker: assert broker._dlt_time_value def test_start_stop_dlt_broker_without_dlt_time(): """Test to stop DLTBroker without dlt-time normally""" with dlt_broker(fake_py_dlt_client_main_loop, enable_dlt_time=False) as broker: assert not broker._dlt_time_value @pytest.mark.parametrize( "input_sec,input_msec,expected_value", [ (42, 42, 42.42), # normal test case (1618993559, 7377682, 1618993559.7377682), # big value. The value will be truncated when type is not double ], ) def test_dlt_broker_get_dlt_time(input_sec, input_msec, expected_value): """Test to get time from DLTBroker""" def handle(client, callback=None, *args, **kwargs): return callback(MockDLTMessage(payload="test_payload", sec=input_sec, msec=input_msec)) with dlt_broker(handle) as broker: time.sleep(0.01) assert broker.dlt_time() == expected_value def test_dlt_broker_get_latest_dlt_time(): """Test to get the latest time from DLTBroker""" # ref: https://stackoverflow.com/questions/3190706/nonlocal-keyword-in-python-2-x time_value = {"v": 42} def handle(client, callback=None, *args, **kwargs): if time_value["v"] < 45: time_value["v"] += 1 time.sleep(0.01) return callback(MockDLTMessage(payload="test_payload", sec=time_value["v"], msec=42)) with dlt_broker(handle) as broker: time_vals = set() for i in range(10): time_vals.add(broker.dlt_time()) time.sleep(0.01) assert sorted(time_vals) == [0.0, 43.42, 44.42, 45.42] def test_start_stop_dlt_broker_with_dlt_ack_msg_handler(): """Test to stop DLTBroker with ack msg handler normally""" with dlt_broker(fake_py_dlt_client_main_loop, enable_dlt_time=True, enable_filter_set_ack=True) as broker: assert broker.filter_ack_msg_handler def test_start_stop_dlt_broker_without_dlt_ack_msg_handler(): """Test to stop DLTBroker without ack msg handler normally""" with dlt_broker(fake_py_dlt_client_main_loop, enable_dlt_time=True, enable_filter_set_ack=False) as broker: assert not broker.filter_ack_msg_handler def test_create_filter_ack_queue(): """Test to register and unregister an ack queue""" handler_mock = MagicMock() with create_filter_ack_queue(handler_mock) as queue: queue.put(True) assert queue.get() handler_mock.register.assert_called_with(queue) handler_mock.unregister.assert_called_with(queue) @pytest.mark.parametrize( "ack,required_ack,return_val", [ (True, True, True), (False, False, True), (True, False, False), (False, True, False), ], ) def test_recv_filter_set_ack(ack, required_ack, return_val): """Test to receive an ack value""" queue = tqueue.Queue() queue.put(ack) with dlt_broker(enable_filter_set_ack=True) as broker: assert return_val == broker._recv_filter_set_ack(queue, required_ack) def test_recv_filter_set_ack_timeout_ignore(): """Test not to receive an ack value""" queue = tqueue.Queue() with dlt_broker(enable_filter_set_ack=True) as broker: broker.filter_set_ack_timeout = 0.01 broker.ignore_filter_set_ack_timeout = True assert not broker._recv_filter_set_ack(queue, True) def test_recv_filter_set_ack_timeout_exception(): """Test not to receive an ack value and with an exception""" queue = tqueue.Queue() with dlt_broker(enable_filter_set_ack=True) as broker: broker.filter_set_ack_timeout = 0.01 broker.ignore_filter_set_ack_timeout = False with pytest.raises(tqueue.Empty) as err: broker._recv_filter_set_ack(queue, True) assert not str(err.value) def test_add_context_with_ack(): """Test to send a filter-setting message with required ack""" queue = tqueue.Queue() with patch("dlt.dlt_broker.DLTBroker._recv_filter_set_ack", return_value=True) as ack_mock: with dlt_broker(enable_filter_set_ack=True) as broker: ori_context_handler = broker.context_handler broker.context_handler = MagicMock() try: broker.add_context(queue, [("APID", "CTID")]) broker.context_handler.register.assert_called() ack_mock.assert_called() finally: broker.context_handler = ori_context_handler def test_add_context_with_ack_warning(): """Test to send a filter-setting message but not received an ack""" queue = tqueue.Queue() with patch("dlt.dlt_broker.DLTBroker._recv_filter_set_ack", return_value=False) as ack_mock, patch.object( logger, "warning" ) as logger_mock: with dlt_broker(enable_filter_set_ack=True) as broker: ori_context_handler = broker.context_handler broker.context_handler = MagicMock() try: broker.add_context(queue, [("APID", "CTID")]) broker.context_handler.register.assert_called() ack_mock.assert_called() logger_mock.assert_called_with(ANY, ANY, [("APID", "CTID")], id(queue)) finally: broker.context_handler = ori_context_handler def test_start_stop_dlt_filter_ack_msg_handler(): """Test to start/stop DLTFilterAckMessageHandler normally""" with dlt_filter_ack_msg_handler() as (handler, _): pass assert not handler.is_alive() def test_dlt_filter_ack_msg_handler_register(): """Test to register a new ack queue into DLTFilterAckMessageHandler""" queue_ack = tqueue.Queue() with dlt_filter_ack_msg_handler() as (handler, queue): handler.register(queue_ack) queue.put((id(queue_ack), True)) assert queue_ack.get() def test_dlt_filter_ack_msg_handler_unregister(): """Test to unregister a new ack queue into DLTFilterAckMessageHandler""" queue_ack = tqueue.Queue() with dlt_filter_ack_msg_handler() as (handler, queue): handler.register(queue_ack) handler.unregister(queue_ack) with pytest.raises(tqueue.Empty): queue.put((id(queue_ack), False)) queue_ack.get_nowait() def test_make_send_filter_msg(): """Test to generate a filter message""" handler = DLTContextHandler(MagicMock(), MagicMock()) is_register = True filters = [("APID", "CTID")] queue = MagicMock() assert handler._make_send_filter_msg(queue, filters, is_register) == (id(queue), filters, is_register) def test_make_send_filter_msg_with_ack_queue(): """Test to generate a filter message with ack queue setting""" handler = DLTContextHandler(MagicMock(), MagicMock()) is_register = True filters = [("APID", "CTID")] queue = MagicMock() queue_ack = MagicMock() assert handler._make_send_filter_msg(queue, filters, is_register, context_filter_ack_queue=queue_ack) == ( id(queue), id(queue_ack), filters, is_register, ) def test_dlt_message_handler_process_filter_queue_add(): """Test to add a filter""" handler = fake_dlt_msg_handler(msg=(42, [("APID", "CTID")], True), with_filter_ack_queue=True) handler._process_filter_queue() assert handler.context_map[("APID", "CTID")] == [42] handler.filter_ack_queue.put.assert_not_called() def test_dlt_message_handler_process_filter_queue_add_ack(): """Test to add a filter with ack""" handler = fake_dlt_msg_handler(msg=(42, 43, [("APID", "CTID")], True), with_filter_ack_queue=True) handler._process_filter_queue() assert handler.context_map[("APID", "CTID")] == [42] handler.filter_ack_queue.put.assert_called_with((43, True)) def test_dlt_message_handler_process_filter_queue_remove(): """Test to remove a filter""" handler = fake_dlt_msg_handler(msg=(42, [("APID", "CTID")], False), with_filter_ack_queue=True) handler.context_map[("APID", "CTID")].append(42) handler._process_filter_queue() assert ("APID", "CTID") not in handler.context_map handler.filter_ack_queue.put.assert_not_called() def test_dlt_message_handler_process_filter_queue_remove_ack(): """Test to remove a filter with ack""" handler = fake_dlt_msg_handler(msg=(42, 43, [("APID", "CTID")], False), with_filter_ack_queue=True) handler.context_map[("APID", "CTID")].append(42) handler._process_filter_queue() assert ("APID", "CTID") not in handler.context_map handler.filter_ack_queue.put.assert_called_with((43, False)) def test_dlt_message_handler_process_filter_queue_remove_exception(): """Test to remove a filter when the filter does not exists""" handler = fake_dlt_msg_handler(msg=(42, [("APID", "CTID")], False), with_filter_ack_queue=True) handler._process_filter_queue() assert not handler.context_map[("APID", "CTID")] handler.filter_ack_queue.put.assert_not_called() python-dlt-2.18.10.0/tests/dlt_client_unit_test.py000066400000000000000000000022051464055136400220650ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. """Basic unittests for DLTClient class""" import unittest from unittest.mock import patch, Mock from dlt.dlt import DLTClient, DLT_RETURN_OK, DLT_RETURN_ERROR class TestDLTClient(unittest.TestCase): def setUp(self): # - patch port so that connect fails even if dlt-daemon is running self.client = DLTClient(servIP="127.0.0.1", port=424242) def test_connect_with_timeout_failed(self): # - timeout error self.assertFalse(self.client.connect(timeout=2)) # - dlt_receiver_init error with patch("socket.create_connection", return_value=Mock(fileno=Mock(return_value=2000000))), patch( "dlt.dlt.dltlib.dlt_receiver_init", return_value=DLT_RETURN_ERROR ): self.assertFalse(self.client.connect(timeout=2)) def test_connect_with_timeout_success(self): with patch("socket.create_connection", return_value=Mock(fileno=Mock(return_value=2000000))), patch( "dlt.dlt.dltlib.dlt_receiver_init", return_value=DLT_RETURN_OK ): self.assertTrue(self.client.connect(timeout=2)) python-dlt-2.18.10.0/tests/dlt_context_handler_unit_test.py000066400000000000000000000151651464055136400240010ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. from multiprocessing import Queue as mp_queue from queue import Empty, Queue import time import unittest from dlt.dlt_broker_handlers import DLTContextHandler from tests.utils import create_messages, stream_one, stream_multiple class TestDLTContextHandler(unittest.TestCase): def setUp(self): self.filter_queue = mp_queue() self.message_queue = mp_queue() self.handler = DLTContextHandler(self.filter_queue, self.message_queue) def test_init(self): self.assertFalse(self.handler.stop_flag.is_set()) self.assertFalse(self.handler.is_alive()) self.assertTrue(self.handler.filter_queue.empty()) self.assertTrue(self.handler.message_queue.empty()) def test_register_no_filter(self): queue = Queue() queue_id = id(queue) self.handler.register(queue) # When no filter is specified, filter (None, None) should be # added (ie: match all messages) self.assertIn(queue_id, self.handler.context_map) self.assertEqual(self.handler.context_map[queue_id], (queue, [(None, None)])) self.assertEqual(self.handler.filter_queue.get(), (queue_id, [(None, None)], True)) def test_register_single_filter(self): queue = Queue() queue_id = id(queue) filters = ("SYS", "JOUR") self.handler.register(queue, filters) # Specified, filter should be added to filter_queue self.assertIn(queue_id, self.handler.context_map) self.assertEqual(self.handler.context_map[queue_id], (queue, filters)) self.assertEqual(self.handler.filter_queue.get(), (queue_id, filters, True)) def test_register_similar_filters(self): queue0 = Queue() queue_id0 = id(queue0) filters0 = ("SYS", "JOUR") queue1 = Queue() queue_id1 = id(queue1) filters1 = ("SYS", "JOUR") self.handler.register(queue0, filters0) self.handler.register(queue1, filters1) # Each queue should have a unique entry in the context_map and # filter_queue (even if they have the same filter) self.assertIn(queue_id0, self.handler.context_map) self.assertIn(queue_id1, self.handler.context_map) self.assertEqual(self.handler.context_map[queue_id0], (queue0, filters0)) self.assertEqual(self.handler.context_map[queue_id1], (queue1, filters1)) self.assertEqual(self.handler.filter_queue.get(), (queue_id0, filters0, True)) self.assertEqual(self.handler.filter_queue.get(), (queue_id1, filters1, True)) def test_unregister(self): queue = Queue() queue_id = id(queue) filters = ("SYS", "JOUR") self.handler.register(queue, filters) self.assertIn(queue_id, self.handler.context_map) self.assertEqual(self.handler.filter_queue.get(), (queue_id, filters, True)) self.handler.unregister(queue) self.assertNotIn(queue_id, self.handler.context_map) self.assertEqual(self.handler.filter_queue.get(), (queue_id, filters, False)) def test_run_no_messages(self): try: self.handler.start() time.sleep(0.2) self.handler.stop() self.assertTrue(self.handler.stop_flag.is_set()) self.assertFalse(self.handler.is_alive()) except: # noqa: E722 self.fail() def test_run_single_context_queue(self): queue = Queue() queue_id = id(queue) filters = ("DA1", "DC1") self.handler.register(queue, filters) self.handler.start() # - simulate feeding of messages into the message_queue for _ in range(10): self.handler.message_queue.put((queue_id, create_messages(stream_one))) try: for _ in range(10): queue.get(timeout=0.01) except Empty: # - we should not get an Empty for exactly 10 messages self.fail() finally: self.handler.stop() def test_run_multiple_context_queue(self): self.handler.start() queue0 = Queue() queue_id0 = id(queue0) filters0 = ("DA1", "DC1") self.handler.register(queue0, filters0) queue1 = Queue() queue_id1 = id(queue1) filters1 = ("SYS", "JOUR") self.handler.register(queue1, filters1) # - queue with no filter queue2 = Queue() queue_id2 = id(queue2) self.handler.register(queue2) # - simulate feeding of messages into the message_queue for _ in range(10): for message in create_messages(stream_multiple, from_file=True): queue_id = queue_id0 if message.apid == "DA1" else queue_id1 self.handler.message_queue.put((queue_id, message)) # - simulate feeding of all messages for the queue with # no filter. self.handler.message_queue.put((queue_id2, message)) try: da1_messages = [] sys_messages = [] all_messages = [] for _ in range(10): da1_messages.append(queue0.get(timeout=0.01)) sys_messages.append(queue1.get(timeout=0.01)) all_messages.append(queue2.get(timeout=0.01)) # these queues should not get any messages from other queues self.assertTrue(all(msg.apid == "DA1" for msg in da1_messages)) self.assertTrue(all(msg.apid == "SYS" for msg in sys_messages)) # this queues should get all messages self.assertFalse( all(msg.apid == "DA1" for msg in all_messages) or all(msg.apid == "SYS" for msg in all_messages) ) except Empty: # - we should not get an Empty for at least 10 messages self.fail() finally: self.handler.stop() def test_run_unregister_with_unread_messages(self): self.handler.start() queue = Queue() queue_id = id(queue) filters = ("DA1", "DC1") self.handler.register(queue, filters) self.assertIn(queue_id, self.handler.context_map) self.handler.unregister(queue) # - simulate feeding of messages into the message_queue for _ in range(3): self.handler.message_queue.put((queue_id, create_messages(stream_one))) try: self.assertNotIn(queue_id, self.handler.context_map) # allow some time for the thread to read all messages time.sleep(0.5) self.assertTrue(self.handler.message_queue.empty()) self.assertTrue(queue.empty()) finally: self.handler.stop() python-dlt-2.18.10.0/tests/dlt_core_unit_test.py000066400000000000000000000063541464055136400215500ustar00rootroot00000000000000# Copyright (C) 2017. BMW Car IT GmbH. All rights reserved. """Basic size tests for ctype wrapper definitions, to protect against regressions""" import ctypes import importlib import os import unittest from unittest.mock import patch, MagicMock import dlt class TestCoreStructures(unittest.TestCase): def setUp(self): self.size_map = { "cDltServiceConnectionInfo": 10, "cDltStorageHeader": 16, "cDltStandardHeader": 4, "cDltStandardHeaderExtra": 12, "cDltExtendedHeader": 10, "cDLTMessage": 120, "cDltReceiver": 72, "cDltClient": 144, "cDLTFilter": 604, } def test_sizeof(self): importlib.import_module("dlt.core") for clsname, expected in self.size_map.items(): actual = ctypes.sizeof(getattr(dlt.core, clsname)) self.assertEqual( actual, expected, msg="v{0}, sizeof {1}: {2} != {3}".format( dlt.core.get_version(dlt.core.dltlib), clsname, actual, expected ), ) class TestImportSpecificVersion(unittest.TestCase): def setUp(self): self.original_api_version = dlt.core.API_VER self.version_answer = b"2.18.5" self.version_str = ( b"DLT Package Version: 2.18.5 STABLE, Package Revision: v2.18.5_5_g33fbad1, " b"build on Sep 2 2020 11:55:50\n-SYSTEMD -SYSTEMD_WATCHDOG -TEST -SHM\n" ) self.version_filename = "core_2185.py" self.version_truncate_str = "2.18.5" self.version_truncate_filename = "core_2180.py" dlt.core.API_VER = None def tearDown(self): dlt.core.API_VER = self.original_api_version def test_get_version(self): def mock_dlt_get_version(buf, buf_size): ver_cstr = ctypes.create_string_buffer(self.version_str) ctypes.memmove(buf, ver_cstr, len(ver_cstr)) mock_loaded_lib = MagicMock() mock_loaded_lib.dlt_get_version = MagicMock(side_effect=mock_dlt_get_version) api_version = dlt.core.get_version(mock_loaded_lib) self.assertEqual(mock_loaded_lib.dlt_get_version.call_count, 1) self.assertEqual(api_version, self.version_answer.decode()) self.assertEqual(dlt.core.API_VER, self.version_answer.decode()) def test_get_api_specific_file(self): with patch.object(os.path, "exists", return_value=True): filename = dlt.core.get_api_specific_file(self.version_answer.decode()) self.assertEqual(filename, self.version_filename) def test_get_api_specific_file_not_found(self): with patch.object(os.path, "exists", side_effect=[False, False]): with self.assertRaises(ImportError) as err_cm: dlt.core.get_api_specific_file(self.version_answer.decode()) self.assertEqual(str(err_cm.exception), "No module file: {}".format(self.version_truncate_filename)) def test_get_api_specific_file_truncate_minor_version(self): with patch.object(os.path, "exists", side_effect=[False, True]): filename = dlt.core.get_api_specific_file(self.version_truncate_str) self.assertEqual(filename, self.version_truncate_filename) python-dlt-2.18.10.0/tests/dlt_file_spinner_unit_test.py000066400000000000000000000351651464055136400232770ustar00rootroot00000000000000# Copyright (C) 2023. BMW Car IT GmbH. All rights reserved. import logging from multiprocessing import Event, Queue import os import time import tempfile import unittest from queue import Empty from dlt.dlt_broker_handlers import DLTFileSpinner from tests.utils import ( create_messages, stream_multiple, stream_with_params, append_stream_to_file, append_message_to_file, ) logger = logging.getLogger(__name__) class TestDLTFileSpinner(unittest.TestCase): def setUp(self): self.filter_queue = Queue() self.message_queue = Queue() self.stop_event = Event() # Dlt file is created with empty content _, self.dlt_file_name = tempfile.mkstemp(suffix=b".dlt") self.dlt_file_spinner = DLTFileSpinner( self.filter_queue, self.message_queue, self.stop_event, self.dlt_file_name ) # dispatched_messages from DLTFileSpinner.message_queue self.dispatched_messages = [] def tearDown(self): if self.dlt_file_spinner.is_alive(): self.dlt_file_spinner.break_blocking_main_loop() self.stop_event.set() self.dlt_file_spinner.join() if os.path.exists(self.dlt_file_name): os.remove(self.dlt_file_name) def test_init(self): self.assertFalse(self.dlt_file_spinner.mp_stop_flag.is_set()) self.assertFalse(self.dlt_file_spinner.is_alive()) self.assertTrue(self.dlt_file_spinner.filter_queue.empty()) self.assertTrue(self.dlt_file_spinner.message_queue.empty()) def test_run_basic_without_dlt_file(self): # Delete the created dlt file os.remove(self.dlt_file_name) self.assertFalse(self.dlt_file_spinner.is_alive()) self.dlt_file_spinner.start() self.assertTrue(self.dlt_file_spinner.is_alive()) self.assertNotEqual(self.dlt_file_spinner.pid, os.getpid()) # DLT file does NOT exist self.assertFalse(os.path.exists(self.dlt_file_spinner.file_name)) self.dlt_file_spinner.break_blocking_main_loop() self.stop_event.set() self.dlt_file_spinner.join() self.assertFalse(self.dlt_file_spinner.is_alive()) def test_run_basic_with_empty_dlt_file(self): self.assertFalse(self.dlt_file_spinner.is_alive()) self.dlt_file_spinner.start() self.assertTrue(self.dlt_file_spinner.is_alive()) self.assertNotEqual(self.dlt_file_spinner.pid, os.getpid()) # dlt_reader is instantiated and keeps alive self.assertTrue(os.path.exists(self.dlt_file_spinner.file_name)) # Expect no dlt log is dispatched time.sleep(2) self.assertTrue(self.dlt_file_spinner.message_queue.empty()) # First stop dlt reader, then stop DLTFileSpinner self.dlt_file_spinner.break_blocking_main_loop() self.stop_event.set() self.dlt_file_spinner.join() self.assertFalse(self.dlt_file_spinner.is_alive()) def test_handle_add_new_filter(self): self.dlt_file_spinner.filter_queue.put(("queue_id", [("SYS", "JOUR")], True)) time.sleep(0.01) self.dlt_file_spinner.handle(None) self.assertIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) self.assertEqual(self.dlt_file_spinner.context_map[("SYS", "JOUR")], ["queue_id"]) def test_handle_remove_filter_single_entry(self): self.dlt_file_spinner.filter_queue.put(("queue_id", [("SYS", "JOUR")], True)) time.sleep(0.01) self.dlt_file_spinner.handle(None) self.assertIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) self.assertEqual(self.dlt_file_spinner.context_map[("SYS", "JOUR")], ["queue_id"]) self.dlt_file_spinner.filter_queue.put(("queue_id", [("SYS", "JOUR")], False)) time.sleep(0.01) self.dlt_file_spinner.handle(None) self.assertNotIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) def test_handle_remove_filter_multiple_entries(self): self.dlt_file_spinner.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True)) self.dlt_file_spinner.filter_queue.put(("queue_id2", [("SYS", "JOUR")], True)) time.sleep(0.01) self.dlt_file_spinner.handle(None) self.assertIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) self.assertEqual(self.dlt_file_spinner.context_map[("SYS", "JOUR")], ["queue_id1", "queue_id2"]) self.dlt_file_spinner.filter_queue.put(("queue_id1", [("SYS", "JOUR")], False)) time.sleep(0.01) self.dlt_file_spinner.handle(None) self.assertIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) self.assertEqual(self.dlt_file_spinner.context_map[("SYS", "JOUR")], ["queue_id2"]) def test_handle_multiple_similar_filters(self): self.dlt_file_spinner.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.dlt_file_spinner.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True)) time.sleep(0.01) self.dlt_file_spinner.handle(None) self.assertIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) self.assertEqual(self.dlt_file_spinner.context_map[("SYS", "JOUR")], ["queue_id0", "queue_id1"]) def test_handle_multiple_different_filters(self): self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True)) time.sleep(0.01) self.dlt_file_spinner.handle(None) self.assertIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) self.assertIn(("DA1", "DC1"), self.dlt_file_spinner.context_map) self.assertEqual(self.dlt_file_spinner.context_map[("SYS", "JOUR")], ["queue_id0"]) self.assertEqual(self.dlt_file_spinner.context_map[("DA1", "DC1")], ["queue_id1"]) def test_handle_message_tag_and_distribute(self): self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True)) self.filter_queue.put(("queue_id2", [("SYS", None)], True)) self.filter_queue.put(("queue_id3", [(None, "DC1")], True)) self.filter_queue.put(("queue_id4", [(None, None)], True)) time.sleep(0.01) # - simulate receiving of messages for _ in range(10): for message in create_messages(stream_multiple, from_file=True): self.dlt_file_spinner.handle(message) self.assertIn(("SYS", "JOUR"), self.dlt_file_spinner.context_map) self.assertIn(("DA1", "DC1"), self.dlt_file_spinner.context_map) self.assertIn((None, None), self.dlt_file_spinner.context_map) self.assertIn(("SYS", None), self.dlt_file_spinner.context_map) self.assertIn((None, "DC1"), self.dlt_file_spinner.context_map) try: # 60 == 10 messages of each for SYS, JOUR and None combinations + # 10 for (None,None) messages = [self.message_queue.get(timeout=0.01) for _ in range(60)] # these queues should not get any messages from other queues self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id0"]), 10) self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id1"]), 10) self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id2"]), 10) self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id3"]), 10) # this queue should get all messages self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id4"]), 20) except Empty: # - we should not get an Empty for at least 40 messages self.fail() def _update_dispatch_messages_from_dlt_file_spinner(self): for index in range(60): try: message = self.dlt_file_spinner.message_queue.get(timeout=0.01) if not self.dispatched_messages or message[1] != self.dispatched_messages[-1][1]: self.dispatched_messages.append(message) except: # noqa: E722 pass def test_run_with_writing_to_file(self): """ Test with real dlt file, which is written at runtime 1. set filter_queue properly, so that the handled messages could be added to message_queue later 2. start DLTFileSpinner At this moment, no messages are written to dlt file, so no messages in DLTFileSpinner.message_queue 3. write 2 sample messages to dlt file Expectation: we could dispatch 2 messages from DLTFileSpinner.message_queue 5. stop DLTFileSpinner """ # 1. set filter_queue properly, so that the handled messages could be added to message_queue later self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True)) self.filter_queue.put(("queue_id2", [("SYS", None)], True)) self.filter_queue.put(("queue_id3", [(None, "DC1")], True)) self.filter_queue.put(("queue_id4", [(None, None)], True)) time.sleep(0.01) # 2. start DLTFileSpinner self.assertFalse(self.dlt_file_spinner.is_alive()) self.dlt_file_spinner.start() self.assertTrue(self.dlt_file_spinner.is_alive()) self.assertNotEqual(self.dlt_file_spinner.pid, os.getpid()) # dlt_reader is instantiated and keeps alive self.assertTrue(os.path.exists(self.dlt_file_spinner.file_name)) # With empty file content, no messages are dispatched to message_queue time.sleep(2) self.assertTrue(self.dlt_file_spinner.message_queue.empty()) # 3. write 2 sample messages to dlt file append_stream_to_file(stream_multiple, self.dlt_file_name) # Expect the written dlt logs are dispatched to message_queue self._update_dispatch_messages_from_dlt_file_spinner() self.assertEqual(2, len(self.dispatched_messages)) # 4. stop DLTFileSpinner self.dlt_file_spinner.break_blocking_main_loop() self.stop_event.set() self.dlt_file_spinner.join() self.assertFalse(self.dlt_file_spinner.is_alive()) def test_run_with_writing_to_file_twice(self): """ Test with real dlt file, which is written at runtime 2 times 1. set filter_queue properly, so that the handled messages could be added to message_queue later 2. start DLTFileSpinner 3. write 2 sample messages to dlt file Expectation: we could dispatch 2 messages from DLTFileSpinner.message_queue 4. append 1 sample message to dlt file Expectation: we could dispatch 3 messages from DLTFileSpinner.message_queue 5. stop DLTFileSpinner """ # 1. set filter_queue properly, so that the handled messages could be added to message_queue later self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True)) self.filter_queue.put(("queue_id2", [("SYS", None)], True)) self.filter_queue.put(("queue_id3", [(None, "DC1")], True)) self.filter_queue.put(("queue_id4", [(None, None)], True)) time.sleep(0.01) # 2. start DLTFileSpinner self.assertFalse(self.dlt_file_spinner.is_alive()) self.dlt_file_spinner.start() self.assertTrue(self.dlt_file_spinner.is_alive()) self.assertNotEqual(self.dlt_file_spinner.pid, os.getpid()) # dlt_reader is instantiated and keeps alive self.assertTrue(os.path.exists(self.dlt_file_spinner.file_name)) # With empty file content, no messages are dispatched to message_queue time.sleep(2) self.assertTrue(self.dlt_file_spinner.message_queue.empty()) # 3. write 2 sample messages to dlt file append_stream_to_file(stream_multiple, self.dlt_file_name) # Expect the written dlt logs are dispatched to message_queue self._update_dispatch_messages_from_dlt_file_spinner() self.assertEqual(2, len(self.dispatched_messages)) # 4. append 1 sample message to dlt file append_stream_to_file(stream_with_params, self.dlt_file_name) self._update_dispatch_messages_from_dlt_file_spinner() self.assertEqual(3, len(self.dispatched_messages)) # 5. stop DLTFileSpinner self.dlt_file_spinner.break_blocking_main_loop() self.stop_event.set() self.dlt_file_spinner.join() self.assertFalse(self.dlt_file_spinner.is_alive()) def test_run_with_writing_empty_apid_ctid_to_file(self): """ Test with real dlt file, which contains message with apid=b"" and ctid=b"" 1. set filter_queue properly, so that the handled messages could be added to message_queue later 2. start DLTFileSpinner At this moment, no messages are written to dlt file, so no messages in DLTFileSpinner.message_queue 3. write message with apid=b"" and ctid=b"" to dlt file Expectation: we could dispatch 1 message from DLTFileSpinner.message_queue and, apid==b"" and ctid==b"" 5. stop DLTFileSpinner """ # 1. set filter_queue properly, so that the handled messages could be added to message_queue later self.filter_queue.put(("queue_id0", [(None, None)], True)) time.sleep(0.01) # 2. start DLTFileSpinner self.assertFalse(self.dlt_file_spinner.is_alive()) self.dlt_file_spinner.start() self.assertTrue(self.dlt_file_spinner.is_alive()) self.assertNotEqual(self.dlt_file_spinner.pid, os.getpid()) # dlt_reader is instantiated and keeps alive self.assertTrue(os.path.exists(self.dlt_file_spinner.file_name)) # With empty file content, no messages are dispatched to message_queue time.sleep(2) self.assertTrue(self.dlt_file_spinner.message_queue.empty()) # 3. write a message to dlt file # Construct a message with apid==b"" and ctid==b"" message = create_messages(stream_with_params, from_file=True)[0] message.extendedheader.apid = b"" message.extendedheader.ctid = b"" # Write this message into dlt file append_message_to_file(message, self.dlt_file_name) # Expect the written dlt logs are dispatched to message_queue self._update_dispatch_messages_from_dlt_file_spinner() self.assertEqual(1, len(self.dispatched_messages)) # Expectation: the received message should have apid==b"" and ctid==b"" self.assertEqual("", self.dispatched_messages[0][1].apid) self.assertEqual("", self.dispatched_messages[0][1].ctid) # 4. stop DLTFileSpinner self.dlt_file_spinner.break_blocking_main_loop() self.stop_event.set() self.dlt_file_spinner.join() self.assertFalse(self.dlt_file_spinner.is_alive()) python-dlt-2.18.10.0/tests/dlt_filter_unit_test.py000066400000000000000000000052661464055136400221060ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. """Basic unittests for DLTFilter definition""" import unittest import ctypes from dlt.dlt import DLTFilter from dlt.core.core_base import DLT_FILTER_MAX, DLT_ID_SIZE class TestDLTFilter(unittest.TestCase): def setUp(self): self.dlt_filter = DLTFilter() def tearDown(self): del self.dlt_filter def test_init(self): assert len(self.dlt_filter.apid) == DLT_FILTER_MAX assert len(self.dlt_filter.ctid) == DLT_FILTER_MAX assert self.dlt_filter.counter == 0 for entry in self.dlt_filter.apid: assert ctypes.string_at(entry, DLT_ID_SIZE) == b"\0\0\0\0" for entry in self.dlt_filter.ctid: assert ctypes.string_at(entry, DLT_ID_SIZE) == b"\0\0\0\0" def test_add0(self): assert self.dlt_filter.add("AAA", "BBB") == 0 assert self.dlt_filter.counter == 1 assert len(self.dlt_filter.apid[0]) == 4 assert len(self.dlt_filter.ctid[0]) == 4 assert ctypes.string_at(self.dlt_filter.apid[0], DLT_ID_SIZE) == b"AAA\0" assert ctypes.string_at(self.dlt_filter.ctid[0], DLT_ID_SIZE) == b"BBB\0" def test_add1(self): assert self.dlt_filter.add("AAA", "BBB") == 0 assert self.dlt_filter.add("XXX", "YYY") == 0 assert self.dlt_filter.counter == 2 assert ctypes.string_at(self.dlt_filter.apid[0], DLT_ID_SIZE) == b"AAA\0" assert ctypes.string_at(self.dlt_filter.ctid[0], DLT_ID_SIZE) == b"BBB\0" assert ctypes.string_at(self.dlt_filter.apid[1], DLT_ID_SIZE) == b"XXX\0" assert ctypes.string_at(self.dlt_filter.ctid[1], DLT_ID_SIZE) == b"YYY\0" def test_add2(self): assert self.dlt_filter.add("AAAA", "BBBB") == 0 assert self.dlt_filter.add("XXX", "YYY") == 0 assert self.dlt_filter.add("CCCC", "DDDD") == 0 assert self.dlt_filter.counter == 3 assert ctypes.string_at(self.dlt_filter.apid[0], DLT_ID_SIZE) == b"AAAA" assert ctypes.string_at(self.dlt_filter.ctid[0], DLT_ID_SIZE) == b"BBBB" assert ctypes.string_at(self.dlt_filter.apid[1], DLT_ID_SIZE) == b"XXX\0" assert ctypes.string_at(self.dlt_filter.ctid[1], DLT_ID_SIZE) == b"YYY\0" assert ctypes.string_at(self.dlt_filter.apid[2], DLT_ID_SIZE) == b"CCCC" assert ctypes.string_at(self.dlt_filter.ctid[2], DLT_ID_SIZE) == b"DDDD" def test_repr(self): assert self.dlt_filter.add("AAAA", "BBBB") == 0 assert self.dlt_filter.add("XXX", "YYY") == 0 assert self.dlt_filter.add("CCCC", "DDDD") == 0 print(self.dlt_filter) assert str(self.dlt_filter) == str([(b"AAAA", b"BBBB"), (b"XXX", b"YYY"), (b"CCCC", b"DDDD")]) python-dlt-2.18.10.0/tests/dlt_main_loop_by_reading_dlt_file_unit_test.py000066400000000000000000000140431464055136400266140ustar00rootroot00000000000000# Copyright (C) 2023. BMW Car IT GmbH. All rights reserved. """Basic unittests for the py_dlt_file_main_loop function""" import os import unittest import tempfile from threading import Thread import time from dlt.dlt import cDLTFile, py_dlt_file_main_loop from tests.utils import ( append_stream_to_file, stream_multiple, stream_with_params, create_messages, append_message_to_file, ) class TestMainLoopByReadingDltFile(unittest.TestCase): def setUp(self): # Empty content dlt file is created _, self.dlt_file_name = tempfile.mkstemp(suffix=b".dlt") self.dlt_reader = cDLTFile(filename=self.dlt_file_name, is_live=True, iterate_unblock_mode=False) # message_queue to store the dispatched messages from main loop self.message_queue = [] # When callback() is called, then it is reset to True self.callback_is_called = False # With this variable, we could test different return value from callback() # If callback() returns True, then main loop keeps going; otherwise, it breaks self.callback_return_value = True # Thread for main loop, which is instantiated in test case self.main_loop = None def _callback_for_message(self, message): self.callback_is_called = True print("Called here") if message: self.message_queue.append(message) return self.callback_return_value def _start_main_loop(self): self.main_loop = Thread( target=py_dlt_file_main_loop, kwargs={"dlt_reader": self.dlt_reader, "callback": self._callback_for_message}, ) # self.main_loop.daemon = True self.main_loop.start() time.sleep(1) def tearDown(self): if not self.dlt_reader.stop_reading_proc.is_set(): self.dlt_reader.stop_reading_proc.set() # After the stop of dlt_reader, main loop should be stopped automatically if self.main_loop: for _ in range(5): if not self.main_loop.is_alive(): break time.sleep(0.1) self.assertFalse(self.main_loop.is_alive()) os.remove(self.dlt_file_name) def test_001_empty_dlt_file(self): """When dlt file has empty content, then no message could be dispatched, and no return value from main loop""" self._start_main_loop() time.sleep(0.1) # When file has empty content, callback() will not be called by any message self.assertFalse(self.callback_is_called) self.assertEqual(0, len(self.message_queue)) def test_002_first_write_then_read_dlt_file(self): """ Simulate a real dlt file case: first write to it, and then use main loop to read it """ # First write to dlt file without opening main loop append_stream_to_file(stream_multiple, self.dlt_file_name) time.sleep(0.1) # Expectation: py_dlt_file_main_loop reads out the first batch messages to message_queue self._start_main_loop() time.sleep(0.1) self.assertTrue(self.callback_is_called) self.assertEqual(2, len(self.message_queue)) def test_003_first_read_then_write_dlt_file(self): """ Simulate a real dlt file case: first open main loop to read, then write to the file at opening main loop """ # First only main loop to read dlt file self._start_main_loop() # Then write to dlt file append_stream_to_file(stream_multiple, self.dlt_file_name) time.sleep(0.1) # Expect the written logs could be dispatched by main loop self.assertTrue(self.callback_is_called) self.assertEqual(2, len(self.message_queue)) def test_004_read_2_writes(self): """ Test main loop reads from 2 consecutive writes to dlt file """ # First only main loop to read dlt file self._start_main_loop() # First write to dlt file append_stream_to_file(stream_multiple, self.dlt_file_name) time.sleep(0.1) # Expect main loop could dispatch 2 logs self.assertTrue(self.callback_is_called) self.assertEqual(2, len(self.message_queue)) # Second write to dlt file, and expect to dispatch 3 logs append_stream_to_file(stream_with_params, self.dlt_file_name) time.sleep(0.1) self.assertEqual(3, len(self.message_queue)) def test_005_callback_return_false(self): """ If callback returns false, then main loop should exit """ # Set callback return value to False self.callback_return_value = False # Write to file append_stream_to_file(stream_multiple, self.dlt_file_name) time.sleep(0.1) # Open main loop to dispatch logs self._start_main_loop() # Expect main loop could dispatch 2 logs self.assertTrue(self.callback_is_called) # Callback returns False after it handles the first message, which terminates main loop # So, main loop wont be able to proceed the second message self.assertEqual(1, len(self.message_queue)) self.assertFalse(self.main_loop.is_alive()) def test_006_read_empty_apid_ctid_message(self): """ Simulate a case to read a apid==b"" and ctid==b"" message """ # Construct a message with apid==b"" and ctid==b"" message = create_messages(stream_with_params, from_file=True)[0] message.extendedheader.apid = b"" message.extendedheader.ctid = b"" # Write this message into dlt file append_message_to_file(message, self.dlt_file_name) # Expectation: py_dlt_file_main_loop reads out the first batch messages to message_queue self._start_main_loop() time.sleep(0.1) self.assertTrue(self.callback_is_called) self.assertEqual(1, len(self.message_queue)) # Expectation: the received message should have apid==b"" and ctid==b"" self.assertEqual("", self.message_queue[0].apid) self.assertEqual("", self.message_queue[0].ctid) python-dlt-2.18.10.0/tests/dlt_main_loop_with_dlt_client_unit_test.py000066400000000000000000000062011464055136400260200ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. """Basic unittests for the py_dlt_client_main_loop function""" import ctypes import functools from io import BytesIO as StringIO import socket import unittest from unittest.mock import patch, Mock from dlt.dlt import py_dlt_client_main_loop, DLTClient, logger from dlt.core import cDltStorageHeader from tests.utils import stream_one def mock_dlt_receiver_receive_socket(client_receiver, partial=False, Fail=False): if Fail: return 0 stream_one.seek(0) buf = stream_one.read() if partial: buf = buf[:16] client_receiver._obj.buf = ctypes.create_string_buffer(buf) client_receiver._obj.bytesRcvd = len(buf) return len(buf) class TestMainLoopWithDltClient(unittest.TestCase): def setUp(self): self.client = DLTClient() self.client._connected_socket = Mock() def test_target_down(self): with patch.object(self.client._connected_socket, "recv", side_effect=socket.timeout): callback = Mock(return_value="should not be called") with self.assertLogs(logger=logger) as dlt_logger: return_value = py_dlt_client_main_loop(self.client, callback=callback) self.assertFalse(return_value) log_output = dlt_logger.output self.assertEqual(len(log_output), 1) self.assertEqual(log_output[0], "ERROR:dlt.dlt:[]: DLTLib closed connected socket") self.assertFalse(callback.called) def test_target_up_nothing_to_read(self): with patch.object(self.client._connected_socket, "recv", return_value=b"") as mock_recv: callback = Mock(return_value="should not be called") self.assertFalse(py_dlt_client_main_loop(self.client, callback=callback)) self.assertEqual(mock_recv.call_count, 1) self.assertFalse(callback.called) @patch("dlt.dlt.dltlib.dlt_receiver_move_to_begin", return_value=0) def test_exit_if_callback_returns_false(self, *ignored): with patch.object(self.client._connected_socket, "recv", return_value=b"X"): # setup dlt_receiver_receive to return a partial message replacement = functools.partial(mock_dlt_receiver_receive_socket, partial=True) with patch("dlt.dlt.dltlib.dlt_receiver_receive", new=replacement): self.assertFalse(py_dlt_client_main_loop(self.client, callback=lambda msg: False)) def test_read_message(self, *ignored): dumpfile = StringIO() stream_one.seek(0) expected = stream_one.read() with patch.object(self.client._connected_socket, "recv", return_value=b"X"): # setup dlt_receiver_receive to return a complete message replacement = functools.partial(mock_dlt_receiver_receive_socket) callback = Mock(side_effect=[True, False, False]) with patch("dlt.dlt.dltlib.dlt_receiver_receive", new=replacement): self.assertTrue(py_dlt_client_main_loop(self.client, dumpfile=dumpfile, callback=callback)) self.assertEqual(dumpfile.getvalue()[ctypes.sizeof(cDltStorageHeader) :], expected) python-dlt-2.18.10.0/tests/dlt_message_handler_unit_test.py000066400000000000000000000131411464055136400237310ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. import os import time import unittest from queue import Empty from multiprocessing import Event, Queue from dlt.dlt_broker_handlers import DLTMessageHandler from tests.utils import create_messages, stream_multiple class TestDLTMessageHandler(unittest.TestCase): def setUp(self): self.filter_queue = Queue() self.message_queue = Queue() self.client_cfg = { "ip_address": b"127.0.0.1", "filename": b"/dev/null", "verbose": 0, "port": "1234", } self.stop_event = Event() self.handler = DLTMessageHandler(self.filter_queue, self.message_queue, self.stop_event, self.client_cfg) def test_init(self): self.assertFalse(self.handler.mp_stop_flag.is_set()) self.assertFalse(self.handler.is_alive()) self.assertTrue(self.handler.filter_queue.empty()) self.assertTrue(self.handler.message_queue.empty()) def test_run_basic(self): self.assertFalse(self.handler.is_alive()) self.handler.start() self.assertTrue(self.handler.is_alive()) self.assertNotEqual(self.handler.pid, os.getpid()) self.stop_event.set() self.handler.join() self.assertFalse(self.handler.is_alive()) def test_handle_add_new_filter(self): self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], True)) time.sleep(0.01) self.handler.handle(None) self.assertIn(("SYS", "JOUR"), self.handler.context_map) self.assertEqual(self.handler.context_map[("SYS", "JOUR")], ["queue_id"]) def test_handle_remove_filter_single_entry(self): self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], True)) time.sleep(0.01) self.handler.handle(None) self.assertIn(("SYS", "JOUR"), self.handler.context_map) self.assertEqual(self.handler.context_map[("SYS", "JOUR")], ["queue_id"]) self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], False)) time.sleep(0.01) self.handler.handle(None) self.assertNotIn(("SYS", "JOUR"), self.handler.context_map) def test_handle_remove_filter_multiple_entries(self): self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True)) self.handler.filter_queue.put(("queue_id2", [("SYS", "JOUR")], True)) time.sleep(0.01) self.handler.handle(None) self.assertIn(("SYS", "JOUR"), self.handler.context_map) self.assertEqual(self.handler.context_map[("SYS", "JOUR")], ["queue_id1", "queue_id2"]) self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], False)) time.sleep(0.01) self.handler.handle(None) self.assertIn(("SYS", "JOUR"), self.handler.context_map) self.assertEqual(self.handler.context_map[("SYS", "JOUR")], ["queue_id2"]) def test_handle_multiple_similar_filters(self): self.handler.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True)) time.sleep(0.01) self.handler.handle(None) self.assertIn(("SYS", "JOUR"), self.handler.context_map) self.assertEqual(self.handler.context_map[("SYS", "JOUR")], ["queue_id0", "queue_id1"]) def test_handle_multiple_different_filters(self): self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True)) time.sleep(0.01) self.handler.handle(None) self.assertIn(("SYS", "JOUR"), self.handler.context_map) self.assertIn(("DA1", "DC1"), self.handler.context_map) self.assertEqual(self.handler.context_map[("SYS", "JOUR")], ["queue_id0"]) self.assertEqual(self.handler.context_map[("DA1", "DC1")], ["queue_id1"]) def test_handle_message_tag_and_distribute(self): self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True)) self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True)) self.filter_queue.put(("queue_id2", [("SYS", None)], True)) self.filter_queue.put(("queue_id3", [(None, "DC1")], True)) self.filter_queue.put(("queue_id4", [(None, None)], True)) time.sleep(0.01) # - simulate receiving of messages for _ in range(10): for message in create_messages(stream_multiple, from_file=True): self.handler.handle(message) self.assertIn(("SYS", "JOUR"), self.handler.context_map) self.assertIn(("DA1", "DC1"), self.handler.context_map) self.assertIn((None, None), self.handler.context_map) self.assertIn(("SYS", None), self.handler.context_map) self.assertIn((None, "DC1"), self.handler.context_map) try: # 60 == 10 messages of each for SYS, JOUR and None combinations + # 10 for (None,None) messages = [self.message_queue.get(timeout=0.01) for _ in range(60)] # these queues should not get any messages from other queues self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id0"]), 10) self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id1"]), 10) self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id2"]), 10) self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id3"]), 10) # this queue should get all messages self.assertEqual(len([msg for qid, msg in messages if qid == "queue_id4"]), 20) except Empty: # - we should not get an Empty for at least 40 messages self.fail() python-dlt-2.18.10.0/tests/dlt_message_performance_test.py000066400000000000000000000023041464055136400235550ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. """Basic unittests for DLT messages""" import io import unittest from dlt.dlt import DLTFilter from .utils import create_messages stream_one = io.BytesIO(b"5\x00\x00 MGHS\xdd\xf6e\xca&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00") stream_two = io.BytesIO(b"5\x00\x00 MGHS\xdd\xf6e\xca&\x01DA1\x00DC2\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00") LOOPS = 100000 class TestsDLTMessagePerf(unittest.TestCase): def setUp(self): self.msgs = [create_messages(stream_one) for i in range(int(LOOPS * 0.1))] self.msgs += [create_messages(stream_two) for i in range(int(LOOPS * 0.9))] def test_compare_dict(self): # with dict as other attrs = {"apid": "DA1", "ctid": "DC1"} for msg in self.msgs: msg.compare(other=attrs) def test_compare_filter(self): # with DLTFilter as other flt = DLTFilter() flt.add("DA1", "DC1") for msg in self.msgs: msg.compare(other=flt) def test_compare_mesage(self): # with dict as other other = create_messages(stream_one) for msg in self.msgs: msg.compare(other=other) python-dlt-2.18.10.0/tests/dlt_message_unit_test.py000066400000000000000000000233131464055136400222360ustar00rootroot00000000000000# Copyright (C) 2015. BMW Car IT GmbH. All rights reserved. """Basic unittests for DLT messages""" import io import pickle import re from unittest.mock import patch, PropertyMock import pytest from dlt.dlt import DLTMessage from tests.utils import ( create_messages, stream_one, stream_with_params, stream_multiple, stream_multiple_with_malformed_message_at_begining, msg_benoit, control_one, ) class TestsDLTMessageUnit(object): def test_malformed_message(self): msgs = create_messages(stream_multiple_with_malformed_message_at_begining, from_file=True) assert msgs[0].message_id == 1279675715 assert len(msgs) == 3 assert not msgs[0].extendedheader def test_compare_default_attrs(self): attrs = {"extendedheader.apid": "DA1", "extendedheader.ctid": "DC1"} msg = create_messages(stream_one) assert msg.compare(other=attrs) assert msg.compare(other={"extendedheader.ctid": "DC1"}) def test_equal(self): msg1 = create_messages(stream_one) msg2 = create_messages(stream_one) assert msg1 == msg2 def test_easy_attributes(self): msg = create_messages(stream_one) assert msg.ecuid == "MGHS" assert msg.seid == 0 assert msg.tmsp == 372391.26500000001 assert msg.apid == "DA1" assert msg.ctid == "DC1" def test_compare(self): msg1 = create_messages(stream_one) msg2 = create_messages(stream_one) assert msg1.compare(msg2) assert msg1.compare(other=msg2) assert msg1.compare(dict(apid="DA1", ctid="DC1")) assert not msg1.compare(dict(apid="DA1", ctid="XX")) def test_compare_regexp(self): msg1 = create_messages(stream_one) assert msg1.compare(dict(apid="DA1", ctid=re.compile(r"D.*"))) assert msg1.compare( dict(apid="DA1", ctid=re.compile(r"D.*"), payload_decoded=re.compile(r".connection_info ok.")) ) assert msg1.compare( dict(apid="DA1", ctid=re.compile(r"D.*"), payload_decoded=re.compile(r".connection_info ok.")) ) assert msg1.compare(dict(apid="DA1", ctid=re.compile(r"D.*"), payload_decoded=re.compile(r".*info ok."))) assert msg1.compare(dict(apid="DA1", ctid="DC1", payload_decoded=re.compile(r".*info ok."))) assert msg1.compare(dict(apid=re.compile(r"D."))) assert msg1.compare(dict(apid=re.compile(r"D.+"))) assert msg1.compare(dict(apid=re.compile(r"D."))) assert not msg1.compare(dict(apid=re.compile(r"X."))) def test_compare_regexp_nsm(self): nsm = create_messages( io.BytesIO(b"5\x00\x00 MGHS\xdd\xf6e\xca&\x01NSM\x00DC1\x00\x02\x0f\x00\x00" b"\x00\x02\x00\x00\x00\x00") ) nsma = create_messages( io.BytesIO(b"5\x00\x00 MGHS\xdd\xf6e\xca&\x01NSMADC1\x00\x02\x0f\x00\x00" b"\x00\x02\x00\x00\x00\x00") ) assert nsm.compare(dict(apid=re.compile("^NSM$"))) assert not nsma.compare(dict(apid=re.compile("^NSM$"))) assert nsm.compare(dict(apid="NSM")) assert not nsma.compare(dict(apid="NSM")) assert nsm.compare(dict(apid=re.compile("NSM"))) assert nsma.compare(dict(apid=re.compile("NSM"))) def test_compare_regexp_throw(self): nsm = create_messages( io.BytesIO(b"5\x00\x00 MGHS\xdd\xf6e\xca&\x01NSM\x00DC1\x00\x02\x0f\x00\x00" b"\x00\x02\x00\x00\x00\x00") ) with pytest.raises(Exception): assert nsm.compare(dict(apid=b"NSM"), regexp=True) def test_compare_regexp_benoit(self): msg1 = create_messages(msg_benoit, from_file=True)[0] assert msg1.compare( { "apid": "DEMO", "ctid": "DATA", "payload_decoded": re.compile("Logging from the constructor of a global instance"), } ) def test_compare_two_msgs(self): msgs = create_messages(stream_multiple, from_file=True) assert msgs[0] != msgs[-1] def test_compare_other_not_modified(self): msg = create_messages(stream_one) other = dict(apid="XX", ctid="DC1") assert not msg.compare(other) assert other == dict(apid="XX", ctid="DC1") def test_compare_quick_return(self): msg = create_messages(stream_one) other = dict(apid=b"DA1", ctid=b"XX", ecuid=b"FOO") with patch("dlt.dlt.DLTMessage.ecuid", new_callable=PropertyMock) as ecuid: ecuid.return_value = b"FOO" assert not msg.compare(other) ecuid.assert_not_called() def test_compare_matching_apid_ctid(self): msg = create_messages(stream_one) other = dict(apid="DA1", ctid="DC1", ecuid="FOO") with patch("dlt.dlt.DLTMessage.ecuid", new_callable=PropertyMock) as ecuid: ecuid.return_value = "BAR" assert not msg.compare(other) ecuid.assert_called_once() ecuid.return_value = "FOO" assert msg.compare(other) assert ecuid.call_count == 2 def test_pickle_api(self): messages = create_messages(stream_multiple, from_file=True) for msg in messages: assert msg == pickle.loads(pickle.dumps(msg)) def test_from_bytes_control(self): msg = DLTMessage.from_bytes( b"DLT\x011\xd9PY(<\x08\x00MGHS5\x00\x00 MGHS\x00\x00\x96\x85&\x01DA1\x00DC1" b"\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" ) assert msg.apid == "DA1" assert msg.ctid == "DC1" assert msg.ecuid == "MGHS" assert msg.tmsp == 3.8533 assert msg.storage_timestamp == 1498470705.539688 assert msg.payload_decoded == "[connection_info ok] connected " def test_from_bytes_log_multipayload(self): msg = DLTMessage.from_bytes( b"DLT\x011\xd9PYfI\x08\x00MGHS=\x00\x000MGHS\x00\x00\x03\x1e\x00\x00\x94\xc8A" b"\x01MON\x00CPUS\x00\x02\x00\x00\x10\x004 online cores\n\x00" ) assert msg.apid == "MON" assert msg.ctid == "CPUS" assert msg.ecuid == "MGHS" assert msg.tmsp == 3.8088 assert msg.payload_decoded == "4 online cores" def test_sort_data_control(self): data = ( b"DLT\x011\xd9PY(<\x08\x00MGHS5\x00\x00 MGHS\x00\x00\x96\x85&\x01DA1\x00DC1" b"\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" ) tmsp, length, apid, ctid = DLTMessage.extract_sort_data(data) assert tmsp == 3.8533 assert length == len(data) assert apid == "DA1" assert ctid == "DC1" def test_sort_data_log_multipayload(self): data = ( b"DLT\x011\xd9PYfI\x08\x00MGHS=\x00\x000MGHS\x00\x00\x03\x1e\x00\x00\x94\xc8A" b"\x01MON\x00CPUS\x00\x02\x00\x00\x10\x004 online cores\n\x00" ) tmsp, length, apid, ctid = DLTMessage.extract_sort_data(data) assert tmsp == 3.8088 assert length == len(data) assert apid == "MON" assert ctid == "CPUS" def test_largelog(self): data = ( b"DLT\x012\xd9PY)\x00\x01\x00MGHS=o\x02\x04MGHS\x00\x00\x03\x1e\x00\x00\x9e\xb7" b"A\x01MON\x00THRD\x00\x02\x00\x00\xe4\x01Process avb_streamhandl with pid: 307 " b'"/usr/bin/avb_streamhandler_app_someip -s pluginias-media_transport-avb_config' b"uration_bmw_mgu.so --bg setup --target Harman_MGU_B1 -p MGU_ICAM -k local.alsa" b".baseperiod=256 -k ptp.loopcount=0 -k ptp.pdelaycount=0 -k ptp.synccount=0 -k " b"sched.priority=20 -k tspec.vlanprio.low=3 -k tspec.presentation.time.offset.lo" b"w=2200000 -k tspec.interval.low=1333000 -k debug.loglevel._RXE=4 -k alsa.group" b'name=mgu_avbsh -n socnet0 -b 2 " started 2401 msec ago\x00' ) msg = DLTMessage.from_bytes(data) assert msg.apid == "MON" assert msg.ctid == "THRD" assert msg.ecuid == "MGHS" assert msg.tmsp == 4.0631 assert ( msg.payload_decoded == 'Process avb_streamhandl with pid: 307 "/usr/bin/avb_streamhandler_app_someip -s ' "pluginias-media_transport-avb_configuration_bmw_mgu.so --bg setup --target Harman_MGU_B1 -p MGU_ICAM " "-k local.alsa.baseperiod=256 -k ptp.loopcount=0 -k ptp.pdelaycount=0 -k ptp.synccount=0 " "-k sched.priority=20 -k tspec.vlanprio.low=3 -k tspec.presentation.time.offset.low=2200000 " "-k tspec.interval.low=1333000 -k debug.loglevel._RXE=4 -k alsa.groupname=mgu_avbsh -n socnet0 " '-b 2 " started 2401 msec ago' ) tmsp, length, apid, ctid = DLTMessage.extract_sort_data(data) assert msg.tmsp == tmsp assert len(msg.to_bytes()) == length assert msg.apid == apid assert msg.ctid == ctid class TestsPayload(object): def test_split(self): msg = create_messages(stream_with_params, from_file=True)[0] payload = msg.payload assert len(payload) == msg.noar assert payload[0] == b"CLevelMonitor::notification() => commandType" assert payload[1] == 3 assert payload[2] == b"deviceId" assert payload[3] == 5 assert payload[4] == b"value" assert payload[5] == 4074 assert payload[6] == b"simulation status" assert payload[7] == 0 with pytest.raises(IndexError): payload.__getitem__(8) class TestsControl(object): def test_load(self): msg = create_messages(control_one, from_file=True)[0] assert msg.apid == "DA1" assert msg.ctid == "DC1" assert msg.is_mode_verbose == 0 assert ( msg.payload_decoded == "[get_log_info 7] get_log_info, 07, 01 00 48 44 44 4d 01 00 43 41 50 49 ff" " ff 04 00 43 41 50 49 06 00 68 64 64 6d 67 72 72 65 6d 6f" ) python-dlt-2.18.10.0/tests/utils.py000066400000000000000000000233571464055136400170210ustar00rootroot00000000000000# Copyright (C) 2016. BMW Car IT GmbH. All rights reserved. """Test helpers and data""" import atexit import ctypes import io import tempfile import os from dlt.dlt import DLTClient, load stream_one = io.BytesIO(b"5\x00\x00 MGHS\xdd\xf6e\xca&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00") stream_with_params = ( b"DLT\x01\xc2<\x85W\xc7\xc5\x02\x00MGHS=r\x00\xa0MGHS\x00\x00\x02B\x00X\xd4\xf1A\x08" b"ENV\x00LVLM\x00\x02\x00\x00-\x00CLevelMonitor::notification() => commandType\x00#" b"\x00\x00\x00\x03\x00\x00\x00\x00\x02\x00\x00\t\x00deviceId\x00#\x00\x00\x00\x05\x00" b"\x00\x00\x00\x02\x00\x00\x06\x00value\x00#\x00\x00\x00\xea\x0f\x00\x00\x00\x02\x00" b"\x00\x12\x00simulation status\x00#\x00\x00\x00\x00\x00\x00\x00" ) stream_multiple = ( b"DLT\x01#o\xd1WD>\x0c\x00MGHS5\x00\x00YMGHS\x00\x01\x80\xd1&\x01DA1\x00DC1\x00\x03\x00\x00\x00" b"\x07\x01\x00SYS\x00\x01\x00FILE\xff\xff\x16\x00File transfer manager.\x12\x00" b"DLT System ManagerremoDLT\x01#o\xd1Wo>\x0c\x00MGHS=\x00\x01PMGHS\x00\x00\x03\xf4\x00" b"\x01i\xa6A\x05SYS\x00JOUR\x00\x02\x00\x00\x1b\x002011/11/11 11:11:18.005274\x00\x00\x02\x00\x00" b"\t\x006.005274\x00\x00\x02\x00\x00\x16\x00systemd-journal[748]:\x00\x00\x02\x00\x00\x0f\x00" b"Informational:\x00\x00\x02\x00\x00\xcf\x00Runtime journal (/run/log/journal/) is currently" b" using 8.0M.\nMaximum allowed usage is set to 385.9M.\nLeaving at least 578.8M free (of" b" currently available 3.7G of space).\nEnforced usage limit is thus 385.9M.\x00" ) stream_multiple_with_malformed_message_at_begining = ( b"DLT\x01\xfar\xc5c\xf7j\x03\x00\x00\x00\x00\x00\x00\x00\x00LCMFLOW WUP invalidDLT\x01" b"\xfar\xc5c\x0bo\x03\x00XORA'\x01\x00\x1bXORA\x16\x02\x00\x00\x00\x00\x00\x00\x00" b"\x00\x00\x00\x00\x11\x04\x00\x00\x00\x00DLT\x01\xfar\xc5c?o\x03" b"DLT\x01#o\xd1WD>\x0c\x00MGHS5\x00\x00YMGHS\x00\x01\x80\xd1&\x01DA1\x00DC1\x00\x03\x00\x00\x00" b"\x07\x01\x00SYS\x00\x01\x00FILE\xff\xff\x16\x00File transfer manager.\x12\x00" b"DLT System ManagerremoDLT\x01#o\xd1Wo>\x0c\x00MGHS=\x00\x01PMGHS\x00\x00\x03\xf4\x00" b"\x01i\xa6A\x05SYS\x00JOUR\x00\x02\x00\x00\x1b\x002011/11/11 11:11:18.005274\x00\x00\x02\x00\x00" b"\t\x006.005274\x00\x00\x02\x00\x00\x16\x00systemd-journal[748]:\x00\x00\x02\x00\x00\x0f\x00" b"Informational:\x00\x00\x02\x00\x00\xcf\x00Runtime journal (/run/log/journal/) is currently" b" using 8.0M.\nMaximum allowed usage is set to 385.9M.\nLeaving at least 578.8M free (of" b" currently available 3.7G of space).\nEnforced usage limit is thus 385.9M.\x00" ) msg_benoit = ( b"DLT\x01\xa5\xd1\xceW\x90\xb9\r\x00MGHS=\x00\x00RMGHS\x00\x00\n[\x00\x0f\x9b#A\x01DEMODATA\x00" b"\x82\x00\x002\x00Logging from the constructor of a global instance\x00" ) control_one = ( b"DLT\x01#o\xd1W\x99!\x0c\x00MGHS5\x00\x00;MGHS\x00\x01\x7f\xdb&\x01DA1\x00DC1\x00\x03" b"\x00\x00\x00\x07\x01\x00HDDM\x01\x00CAPI\xff\xff\x04\x00CAPI\x06\x00hddmgrremo" ) # DLT file with invalid storage header and frames file_storage_clean = ( b"DLT\x01\x9a\xc6\xbfW\x020\t\x00MGHS5\x00\x00 MGHS\x00\x02\x8aC&\x01DA1\x00DC1" b"\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00DLT\x01\x9a\xc6\xbfWoA\t\x00MGHS=" b"\x00\x00NMGHS\x00\x00\x049\x00\x01p\n\x00MGHS5\x00\x00 MGHS" # not to buffer b"\x00\x00mj&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" ) file_with_lifecycles_without_start = ( b"DLT\x01\xc5\x82\xdaX\x19\x93\r\x00XORA'\x01\x00\x1bXORA" # trace to buffer b"\x16\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x04\x00\x00\x00\x00" b"DLT\x01\xc5\x82\xdaXQi\x0e\x00MGHS5\x00\x00 MGHS" # trace to buffer b"\x00\x03U\xe0&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" b"DLT\x01m\xc2\x91Y\xad\xe4\x07\x00MGHS=\x01\x00zMGHS" # random trace b"\x00\x00\x02\xab\x00\x00@VA\x01DLTDINTM\x00\x02\x00\x00Z\x00" b"ApplicationID 'DBSY' registered for PID 689, Description=DBus" b" Logging|SysInfra|Log&Trace\n\x00" b"DLT\x01\xed\xc2\x91Y\x0f\xf0\x08\x00MGHS5\x00\x00 MGHS" # trace to buffer b"\x00\x00\x9dC&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" b"DLT\x01\xed\xc2\x91Y\x17.\n\x00MGHS=\x00\x00NMGHS" # new lifecycle b"\x00\x00\x02\xae\x00\x00@/A\x01DLTDINTM\x00\x02\x00\x00.\x00" b"Daemon launched. Starting to output traces...\x00" ) def append_stream_to_file(stream, file_name): msgs = create_messages(stream, from_file=True) for msg in msgs: append_message_to_file(msg, file_name) def append_message_to_file(message, file_name): # Use 'ab' instead of 'wb' because it is to append instead to overwrite with open(file_name, "ab") as file: file.write(message.to_bytes()) file.flush() def create_messages(stream, from_file=False): if from_file is False: stream.seek(0) buf = stream.read() client = DLTClient() client.receiver.buf = ctypes.create_string_buffer(buf) client.receiver.bytesRcvd = len(buf) return client.read_message() _, tmpname = tempfile.mkstemp(suffix=b"") tmpfile = open(tmpname, "wb") tmpfile.write(stream) tmpfile.flush() tmpfile.seek(0) tmpfile.close() atexit.register(os.remove, tmpname) msgs = load(tmpname) return msgs class MockDLTMessage(object): """Mock DLT message for dltlyse plugin testing""" def __init__(self, ecuid="MGHS", apid="SYS", ctid="JOUR", sid="958", payload="", tmsp=0.0, sec=0, msec=0, mcnt=0): self.ecuid = ecuid self.apid = apid self.ctid = ctid self.sid = sid self.payload = payload self.tmsp = tmsp self.mcnt = mcnt self.storageheader = MockStorageHeader(sec=sec, msec=msec) def compare(self, target): """Compare DLT Message to a dictionary""" return target == {k: v for k, v in self.__dict__.items() if k in target.keys()} @property def payload_decoded(self): """Fake payload decoding""" return self.payload @property def storage_timestamp(self): """Fake storage timestamp""" return float("{}.{}".format(self.storageheader.seconds, self.storageheader.microseconds)) def __repr__(self): return str(self.__dict__) class MockStorageHeader(object): """Mock DLT storage header for plugin testing""" def __init__(self, msec=0, sec=0): self.microseconds = msec self.seconds = sec python-dlt-2.18.10.0/tox.ini000066400000000000000000000022711464055136400154500ustar00rootroot00000000000000[tox] envlist = py3,black,ruff output_dir={env:SPHINX_OUTPUT_DIR:{toxworkdir}/_build} isolated_build = True [testenv] deps = pytest pytest-cov commands = pytest \ --cov=dlt \ --cov-branch \ --cov-report=html \ --cov-report=term-missing \ {posargs:tests} [pytest] filterwarnings = error [testenv:ruff] basepython = python3 skip_install = true deps = ruff mypy commands = ruff check ./dlt ./tests [testenv:black] skip_install = True skipsdist = True deps = black commands = black -l 119 --check . [testenv:docs] deps=-r{toxinidir}/docs/requirements-docs.txt commands= # Workaround for https://github.com/tox-dev/tox/issues/149 pip install -q -r {toxinidir}/docs/requirements-docs.txt sphinx-build -T -j auto --color -W -c docs docs {[tox]output_dir} {posargs} python -c 'import pathlib; print("website available under file://\{0\}".format(pathlib.Path(r"{[tox]output_dir}") / "index.html"))' [testenv:release] basepython = python3 passenv = SOURCE_DATE_EPOCH skip_install = True skipsdist = True deps = build twine wheel commands = python -m build twine upload -r software-factory-pypi dist/*