pax_global_header00006660000000000000000000000064143421076330014515gustar00rootroot0000000000000052 comment=a033438237787a59d1258132ef40974254d9253e dltlyse-1.4.3/000077500000000000000000000000001434210763300132025ustar00rootroot00000000000000dltlyse-1.4.3/.dockerignore000066400000000000000000000023101434210763300156520ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ dltlyse/_version.py dltlyse-1.4.3/.flake8000066400000000000000000000006401434210763300143550ustar00rootroot00000000000000[flake8] max-line-length = 119 ignore = # D10*: Missing docstring D10 # E203: whitespace before ':' # This error is not PEP8 complaint and should be ignored E203 # W503: line break before binary operator # seems to conflict with black code formatting W503 # W605: invalid escape sequence '\d' W605 exclude = .git, .tox, .eggs, __pycache__, build, dist dltlyse-1.4.3/.github/000077500000000000000000000000001434210763300145425ustar00rootroot00000000000000dltlyse-1.4.3/.github/workflows/000077500000000000000000000000001434210763300165775ustar00rootroot00000000000000dltlyse-1.4.3/.github/workflows/dltlyse-ci.yaml000066400000000000000000000020571434210763300215400ustar00rootroot00000000000000name: dltlyse-ci-actions on: [push, pull_request] jobs: run-test-for-dltlyse: runs-on: ubuntu-latest strategy: matrix: LIBDLT_VERSION: - "v2.18.8" steps: - uses: actions/checkout@v3 - name: Build dltlyse unit test docker image id: docker_build uses: docker/build-push-action@v3 with: file: "Dockerfile.github_flow" push: false build-args: | LIBDLT_VERSION=${{ matrix.LIBDLT_VERSION }} tags: dltlyse/dltlyse-unittest:${{ matrix.LIBDLT_VERSION }} - name: Run tox static check uses: addnab/docker-run-action@v3 with: image: dltlyse/dltlyse-unittest:${{ matrix.LIBDLT_VERSION }} options: -v ${{ github.workspace }}:/dltlyse -w /dltlyse run: tox -e statictest - name: Run tox uses: addnab/docker-run-action@v3 with: image: dltlyse/dltlyse-unittest:${{ matrix.LIBDLT_VERSION }} options: -v ${{ github.workspace }}:/dltlyse -w /dltlyse run: tox dltlyse-1.4.3/.gitignore000066400000000000000000000023541434210763300151760ustar00rootroot00000000000000# Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # pyenv .python-version # celery beat schedule file celerybeat-schedule # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ src/dltlyse/_version.py src/dltlyse/dltlyse_results.xml dltlyse-1.4.3/Dockerfile000066400000000000000000000022761434210763300152030ustar00rootroot00000000000000FROM alpine:3.17 as builder ARG LIBDLT_VERSION=v2.18.8 RUN set -ex \ && apk update \ && apk add build-base musl-dev linux-headers git cmake ninja wget curl dbus zlib # Install libdlt RUN set -ex \ && git clone https://github.com/GENIVI/dlt-daemon \ && cd /dlt-daemon \ && git checkout ${LIBDLT_VERSION} \ && cd /dlt-daemon \ && cmake CMakeLists.txt \ && make -j \ && make install COPY . /build/dltlyse RUN set -ex \ && apk add python3 py3-pip py3-virtualenv \ && cd /build/dltlyse \ && pip install --no-cache-dir build wheel \ && python3 -m build --wheel \ && git clone https://github.com/bmwcarit/python-dlt /build/python-dlt \ && cd /build/python-dlt \ && python3 -m build --wheel FROM alpine:3.17 COPY --from=builder /usr/local/lib /usr/local/lib COPY --from=builder /build/dltlyse/dist/dltlyse*.whl / COPY --from=builder /build/python-dlt/dist/dlt*.whl / RUN set -ex \ && ldconfig /usr/local/lib \ && apk add --no-cache python3 py3-six \ && apk add --no-cache --virtual .build-deps py3-pip git \ && pip install --no-cache-dir dlt*.whl dltlyse*.whl \ && apk del .build-deps ENTRYPOINT [ "dltlyse" ] CMD [ "--help" ] dltlyse-1.4.3/Dockerfile.github_flow000066400000000000000000000011221434210763300175000ustar00rootroot00000000000000FROM alpine:3.17 ARG LIBDLT_VERSION=v2.18.8 RUN set -ex \ && apk update \ && apk add build-base musl-dev linux-headers git cmake ninja \ wget curl dbus zlib python3 py3-pip \ && pip install --no-cache-dir tox virtualenv \ && git clone https://github.com/GENIVI/dlt-daemon \ && cd /dlt-daemon \ && git checkout ${LIBDLT_VERSION} \ && cd /dlt-daemon \ && cmake CMakeLists.txt \ && make -j \ && make install \ && ldconfig /usr/local/lib \ && pip install --no-cache-dir git+https://github.com/bmwcarit/python-dlt # vim: set ft=dockerfile : dltlyse-1.4.3/LICENSE000066400000000000000000000405251434210763300142150ustar00rootroot00000000000000Mozilla Public License Version 2.0 ================================== 1. Definitions -------------- 1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. 1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a Contributor and that particular Contributor's Contribution. 1.3. "Contribution" means Covered Software of a particular Contributor. 1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. 1.5. "Incompatible With Secondary Licenses" means (a) that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or (b) that the Covered Software was made available under the terms of version 1.1 or earlier of the License, but not also under the terms of a Secondary License. 1.6. "Executable Form" means any form of the work other than Source Code Form. 1.7. "Larger Work" means a work that combines Covered Software with other material, in a separate file or files, that is not Covered Software. 1.8. "License" means this document. 1.9. "Licensable" means having the right to grant, to the maximum extent possible, whether at the time of the initial grant or subsequently, any and all of the rights conveyed by this License. 1.10. "Modifications" means any of the following: (a) any file in Source Code Form that results from an addition to, deletion from, or modification of the contents of Covered Software; or (b) any new file in Source Code Form that contains any Covered Software. 1.11. "Patent Claims" of a Contributor means any patent claim(s), including without limitation, method, process, and apparatus claims, in any patent Licensable by such Contributor that would be infringed, but for the grant of the License, by the making, using, selling, offering for sale, having made, import, or transfer of either its Contributions or its Contributor Version. 1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. 1.13. "Source Code Form" means the form of the work preferred for making modifications. 1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. 2. License Grants and Conditions -------------------------------- 2.1. Grants Each Contributor hereby grants You a world-wide, royalty-free, non-exclusive license: (a) under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its Contributions, either on an unmodified basis, with Modifications, or as part of a Larger Work; and (b) under Patent Claims of such Contributor to make, use, sell, offer for sale, have made, import, and otherwise transfer either its Contributions or its Contributor Version. 2.2. Effective Date The licenses granted in Section 2.1 with respect to any Contribution become effective for each Contribution on the date the Contributor first distributes such Contribution. 2.3. Limitations on Grant Scope The licenses granted in this Section 2 are the only rights granted under this License. No additional rights or licenses will be implied from the distribution or licensing of Covered Software under this License. Notwithstanding Section 2.1(b) above, no patent license is granted by a Contributor: (a) for any code that a Contributor has removed from Covered Software; or (b) for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or (c) under Patent Claims infringed by Covered Software in the absence of its Contributions. This License does not grant any rights in the trademarks, service marks, or logos of any Contributor (except as may be necessary to comply with the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to distribute the Covered Software under a subsequent version of this License (see Section 10.2) or under the terms of a Secondary License (if permitted under the terms of Section 3.3). 2.5. Representation Each Contributor represents that the Contributor believes its Contributions are its original creation(s) or it has sufficient rights to grant the rights to its Contributions conveyed by this License. 2.6. Fair Use This License is not intended to limit any rights You have under applicable copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in Section 2.1. 3. Responsibilities ------------------- 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any Modifications that You create or to which You contribute, must be under the terms of this License. You must inform recipients that the Source Code Form of the Covered Software is governed by the terms of this License, and how they can obtain a copy of this License. You may not attempt to alter or restrict the recipients' rights in the Source Code Form. 3.2. Distribution of Executable Form If You distribute Covered Software in Executable Form then: (a) such Covered Software must also be made available in Source Code Form, as described in Section 3.1, and You must inform recipients of the Executable Form how they can obtain a copy of such Source Code Form by reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and (b) You may distribute such Executable Form under the terms of this License, or sublicense it under different terms, provided that the license for the Executable Form does not attempt to limit or alter the recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, provided that You also comply with the requirements of this License for the Covered Software. If the Larger Work is a combination of Covered Software with a work governed by one or more Secondary Licenses, and the Covered Software is not Incompatible With Secondary Licenses, this License permits You to additionally distribute such Covered Software under the terms of such Secondary License(s), so that the recipient of the Larger Work may, at their option, further distribute the Covered Software under the terms of either this License or such Secondary License(s). 3.4. Notices You may not remove or alter the substance of any license notices (including copyright notices, patent notices, disclaimers of warranty, or limitations of liability) contained within the Source Code Form of the Covered Software, except that You may alter any license notices to the extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered Software. However, You may do so only on Your own behalf, and not on behalf of any Contributor. You must make it absolutely clear that any such warranty, support, indemnity, or liability obligation is offered by You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any jurisdiction. 4. Inability to Comply Due to Statute or Regulation --------------------------------------------------- If it is impossible for You to comply with any of the terms of this License with respect to some or all of the Covered Software due to statute, judicial order, or regulation then You must: (a) comply with the terms of this License to the maximum extent possible; and (b) describe the limitations and the code they affect. Such description must be placed in a text file included with all distributions of the Covered Software under this License. Except to the extent prohibited by statute or regulation, such description must be sufficiently detailed for a recipient of ordinary skill to be able to understand it. 5. Termination -------------- 5.1. The rights granted under this License will terminate automatically if You fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor explicitly and finally terminates Your grants, and (b) on an ongoing basis, if such Contributor fails to notify You of the non-compliance by some reasonable means prior to 60 days after You have come back into compliance. Moreover, Your grants from a particular Contributor are reinstated on an ongoing basis if such Contributor notifies You of the non-compliance by some reasonable means, this is the first time You have received notice of non-compliance with this License from such Contributor, and You become compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent infringement claim (excluding declaratory judgment actions, counter-claims, and cross-claims) alleging that a Contributor Version directly or indirectly infringes any patent, then the rights granted to You by any and all Contributors for the Covered Software under Section 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been validly granted by You or Your distributors under this License prior to termination shall survive termination. ************************************************************************ * * * 6. Disclaimer of Warranty * * ------------------------- * * * * Covered Software is provided under this License on an "as is" * * basis, without warranty of any kind, either expressed, implied, or * * statutory, including, without limitation, warranties that the * * Covered Software is free of defects, merchantable, fit for a * * particular purpose or non-infringing. The entire risk as to the * * quality and performance of the Covered Software is with You. * * Should any Covered Software prove defective in any respect, You * * (not any Contributor) assume the cost of any necessary servicing, * * repair, or correction. This disclaimer of warranty constitutes an * * essential part of this License. No use of any Covered Software is * * authorized under this License except under this disclaimer. * * * ************************************************************************ ************************************************************************ * * * 7. Limitation of Liability * * -------------------------- * * * * Under no circumstances and under no legal theory, whether tort * * (including negligence), contract, or otherwise, shall any * * Contributor, or anyone who distributes Covered Software as * * permitted above, be liable to You for any direct, indirect, * * special, incidental, or consequential damages of any character * * including, without limitation, damages for lost profits, loss of * * goodwill, work stoppage, computer failure or malfunction, or any * * and all other commercial damages or losses, even if such party * * shall have been informed of the possibility of such damages. This * * limitation of liability shall not apply to liability for death or * * personal injury resulting from such party's negligence to the * * extent applicable law prohibits such limitation. Some * * jurisdictions do not allow the exclusion or limitation of * * incidental or consequential damages, so this exclusion and * * limitation may not apply to You. * * * ************************************************************************ 8. Litigation ------------- Any litigation relating to this License may be brought only in the courts of a jurisdiction where the defendant maintains its principal place of business and such litigation shall be governed by laws of that jurisdiction, without reference to its conflict-of-law provisions. Nothing in this Section shall prevent a party's ability to bring cross-claims or counter-claims. 9. Miscellaneous ---------------- This License represents the complete agreement concerning the subject matter hereof. If any provision of this License is held to be unenforceable, such provision shall be reformed only to the extent necessary to make it enforceable. Any law or regulation which provides that the language of a contract shall be construed against the drafter shall not be used to construe this License against a Contributor. 10. Versions of the License --------------------------- 10.1. New Versions Mozilla Foundation is the license steward. Except as provided in Section 10.3, no one other than the license steward has the right to modify or publish new versions of this License. Each version will be given a distinguishing version number. 10.2. Effect of New Versions You may distribute the Covered Software under the terms of the version of the License under which You originally received the Covered Software, or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to create a new license for such software, you may create and use a modified version of this License if you rename the license and remove any references to the name of the license steward (except to note that such modified license differs from this License). 10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses If You choose to distribute Source Code Form that is Incompatible With Secondary Licenses under the terms of this version of the License, the notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice ------------------------------------------- This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. If it is not possible or desirable to put the notice in a particular file, then You may include the notice in a location (such as a LICENSE file in a relevant directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. Exhibit B - "Incompatible With Secondary Licenses" Notice --------------------------------------------------------- This Source Code Form is "Incompatible With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. dltlyse-1.4.3/README.md000066400000000000000000000053261434210763300144670ustar00rootroot00000000000000# DLT Analyser A Python module and a collection of plugins to support analysis of DLT traces. ## Run dltlyse with docker 1. Build the docker image ```bash git clone https://github.com/bmwcarit/dltlyse cd dltlyse docker build -t bmwcarit/dltlyse . ``` 2. Run the dltlyse container ```bash # Get the command line help docker run -it --rm bmwcarit/dltlyse --help # Run with with dlt file(s) docker run -it --rm \ -v "$(pwd):/workspace" \ -w /workspace bmwcarit/dltlyse \ .dlt .dlt # To specify your own dltlyse plugins specify path to their folder: docker run -it --rm \ -v /path/to/plugins:/plugins \ -v "$(pwd):/workspace" \ -w /workspace \ bmwcarit/dltlyse \ -d /plugins .dlt ``` ## How it works `dltlyse` reads all messages from given DLT trace file and passes each DLT message to __call__ of all enabled plugins. Plugin then decides if the message is interesting for it's purpose and collects data. At start of each device lifecycle new_lifecycle is called and at the end end_lifecycle is called, in this way the plugins can track when the device was rebooted. It is guaranteed that all messages will belong to a lifecycle, so new_lifecycle will be called before any DLT message is passed to __call__ and end_lifecycle will be called after last message before there will be a call ro report. Then the report() method from each plugin is called after all DLT messages have been passed through all enabled plugins. The report() method should set one or more results from the processing as well as write details into files. # Writing custom plugins `dltlyse` could be easily extended with custom plugins using simple plugin API. Just use the following code snipplet as a template stored in the "plugins" directory: ``` from dltlyse.core.plugin_base import Plugin class MyCustomPlugin(Plugin): """Does some custom job""" message_filters = ["XXX", "YYY"] def __call__(self, message): # will be called for each message where message.apid="XXX" and message.ctid="YYY": # do some stuff, save knowledge into self def new_lifecycle(self, ecu_id, lifecycle_id): # will be called each time the device starts up with incremental id def end_lifecycle(self, ecu_id, lifecycle_id): # will be called each time the device shuts down def report(self): # called at the end if self.good: self.add_result(message="Good result", attach=["somefile.txt"]) # Attachment path is relative to extracted_files/ folder in results else: self.add_result( state="failure", message="This failed", stdout="Detailed log of failure", ) ``` dltlyse-1.4.3/mypy.ini000066400000000000000000000000621434210763300146770ustar00rootroot00000000000000[mypy] [mypy-dlt.*] ignore_missing_imports = True dltlyse-1.4.3/pyproject.toml000066400000000000000000000027011434210763300161160ustar00rootroot00000000000000[project] name = "dltlyse" dynamic = ["version"] description = "DLT trace file analyser for the BMW head unit platform" authors = [ {name = "BMW CarIT", email="carit.info@bmw.de"}, ] license = {file = "LICENSE"} readme = "README.md" requires-python = ">=3.7" keywords = ["dlt"] classifiers = [ "Development Status :: 5 - Production/Stable", "License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Software Development :: Testing", ] dependencies = [ ] [project.optional-dependencies] dev = [ "black>=22.10", "flake8>=5", "pytest>=7.2.0", ] [project.urls] homepage = "https://github.com/bmwcarit/dltlyse" repository = "https://github.com/bmwcarit/dltlyse" [project.scripts] dltlyse = "dltlyse.run_dltlyse:main" [build-system] requires = ["setuptools>=45", "setuptools_scm[toml]>=6.2"] build-backend = "setuptools.build_meta" [tool.setuptools_scm] write_to = "src/dltlyse/_version.py" [tool.black] line-length = 119 target_version = ['py37'] include = '\.pyi?$' exclude = ''' ( /( \.eggs # exclude a few common directories in the | \.git # root of the project | \.hg | \.mypy_cache | \.tox | \.venv | _build | buck-out | build | dist )/ | foo.py # also separately exclude a file named foo.py in # the root of the project | _version.py ) ''' dltlyse-1.4.3/src/000077500000000000000000000000001434210763300137715ustar00rootroot00000000000000dltlyse-1.4.3/src/dltlyse/000077500000000000000000000000001434210763300154515ustar00rootroot00000000000000dltlyse-1.4.3/src/dltlyse/__init__.py000066400000000000000000000000001434210763300175500ustar00rootroot00000000000000dltlyse-1.4.3/src/dltlyse/core/000077500000000000000000000000001434210763300164015ustar00rootroot00000000000000dltlyse-1.4.3/src/dltlyse/core/__init__.py000066400000000000000000000000001434210763300205000ustar00rootroot00000000000000dltlyse-1.4.3/src/dltlyse/core/analyser.py000066400000000000000000000701451434210763300206000ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """DLT file analyser""" from contextlib import contextmanager from collections import defaultdict import itertools import logging import os import signal import sys import time import traceback from typing import DefaultDict, Dict, Iterable, List, Tuple, TypeVar # noqa: F401 from dlt import dlt from dltlyse.core.report import XUnitReport, Result from dltlyse.core.plugin_base import Plugin # pylint: disable= too-many-nested-blocks, no-member T = TypeVar("T") logger = logging.getLogger(__name__) stdoutlogger = logging.getLogger("summary") stdoutlogger.addHandler(logging.StreamHandler(sys.stdout)) DEFAULT_PLUGINS_DIRS = [ os.path.join(os.path.dirname(__file__), "../plugins"), # installation folder # e.g. /usr/bin/pythonX.X/site-packages/dltlyse/plugins os.path.join(os.getcwd(), "plugins"), # plugins folder in current working directory ] # Traces to buffer since they might be stored before lifecycle start message BUFFER_MATCHES_MSG = { "apid": "DA1", "ctid": "DC1", # Note there is a trailing space at the end of the str. The reason is due to an # optimization that compares (using `__eq__` aka `==`) the exact string that gets # extracted by python-dlt now with stripped nul bytes (\00) that existed before. "payload_decoded": "[connection_info ok] connected ", } BUFFER_MATCHES_ECUID = "XORA" DLT_LIFECYCLE_START = { "apid": "DLTD", "ctid": "INTM", "payload_decoded": "Daemon launched. Starting to output traces...", } MAX_BUFFER_SIZE = 50 class DLTLifecycle(object): """Single DLT lifecycle""" def __init__(self, ecu_id, lifecycle_id, dltfile=None): self.ecu_id = ecu_id self.dltfile = dltfile self.lifecycle_id = lifecycle_id self._first_msg = None self._last_msg = None def set_first_msg(self, msg): """Set the first msg explicitly needed for old dlt-atlas scripts :param DLTMessage msg: The message to be set as the first """ self._first_msg = msg def set_last_msg(self, msg): """Set the first msg explicitly needed for old dlt-atlas scripts :param DLTMessage msg: The message to be set as the last """ self._last_msg = msg def __getitem__(self, index): """Get either the first or last msg in this lifecycle explicitly needed for old dlt-atlas scripts :param int index: Index to either get first or last msg """ if index == 0: if self._first_msg: return self._first_msg else: logger.error("Set first msg of lifecycle before using lifecycle object") raise ValueError() elif index == -1: if self._last_msg: return self._last_msg else: logger.error("Set last msg of lifecycle before using lifecycle object") raise ValueError() else: logger.error("Access to messages beyond 0 and -1 unsupported" "- use DLTFile.lifecycles") raise IndexError() def clear_msgs(self): """Clear the first and last msg""" self._first_msg = None self._last_msg = None def get_end(self): """Get last msg in this lifecycle explicitly needed for old dlt-atlas scripts """ return self._last_msg def make_plugin_exception_message(plugin, action, traceback_format_exc, sys_exec_info): """Handle plugin exception""" message = "Error {} plugin {} - {}".format(action, plugin.get_plugin_name(), sys_exec_info[0]) logger.error(message) logger.error(traceback_format_exc) if not isinstance(plugin, type): plugin.add_exception("\n".join([message, traceback_format_exc])) @contextmanager def handle_plugin_exceptions(plugin, action="running"): """Catch all exceptions and store them in the plugin.__exceptions structure""" start_time = time.time() try: yield except: # noqa: E722 make_plugin_exception_message(plugin, action, traceback.format_exc(), sys.exc_info()) if not isinstance(plugin, type): plugin.add_timing(action, time.time() - start_time) def _scan_folder(root, plugin_classes): """Scans a folder seeking for plugins. Args: root(str): the path to scan. plugin_classes(list): a list which collects all plugins found. """ if not os.path.isdir(root): # Skip non-existing folders. logger.warning("Directory '%s' doesn't exist!", root) return filenames = os.listdir(root) if "__NO_PLUGINS__" in filenames: # If the folder hasn't plugins, we skip it. return sys.path.insert(0, root) sys.path.insert(1, os.path.dirname(__file__)) for name in filenames: full_path = os.path.join(root, name) if os.path.isdir(full_path): if name != "tests": # We skip the tests folder. _scan_folder(full_path, plugin_classes) elif name.endswith(".py") and not name.startswith("_"): # We skip non-Python files, and private files. module_name = os.path.splitext(os.path.split(name)[-1])[0] try: __import__(module_name) module = sys.modules[module_name] for class_name in dir(module): cls = getattr(module, class_name) if ( hasattr(cls, "__mro__") and issubclass(cls, Plugin) and ( not any( hasattr(getattr(cls, item), "__isabstractmethod__") and not isinstance(getattr(cls, item), property) for item in dir(cls) ) ) ): plugin_classes.append(cls) except (ImportError, ValueError): logger.error("Could not load plugin %s\n%s", module_name, traceback.format_exc()) def get_plugin_classes(plugin_dirs): # pylint: disable=too-many-locals """Collect plugin classes""" plugin_classes = [] for plugins_dir in plugin_dirs: logger.info("Searching directory '%s' for plugins", plugins_dir) _scan_folder(plugins_dir, plugin_classes) return plugin_classes class DltlysePluginCollector(object): """Dispatch the dlt messages to each plugins The class collects all plugins by plugin's message filter setting. The analyser could pass messages to these plugins with fewer comparisons than before. Based on the performance consideration, these plugins are saved as a tuple. Since these data strcutre will be searched over million of times, choosing a low overhead data structure is necessary. """ def __init__(self): # type: () -> None self.msg_plugins = {} # type: Dict[Tuple[str, str], Tuple[Plugin, ...]] self.apid_plugins = {} # type: Dict[str, Tuple[Plugin, ...]] self.ctid_plugins = {} # type: Dict[str, Tuple[Plugin, ...]] self.greedy_plugins = () # type: Tuple[Plugin, ...] def _convert_dict_value_tuple(self, plugins): # type: (Dict[T, List[Plugin]]) -> Dict[T, Tuple[Plugin, ...]] """Helper function to convert the list value type to tuple value type""" return {key: tuple(value) for key, value in plugins.items() if value} def _dispatch_plugins(self, plugins): # type: (Iterable[Plugin]) -> None """Dispatch plugins by message filters""" msg_plugins = defaultdict(list) # type: DefaultDict[Tuple[str, str], List[Plugin]] apid_plugins = defaultdict(list) # type: DefaultDict[str, List[Plugin]] ctid_plugins = defaultdict(list) # type: DefaultDict[str, List[Plugin]] greedy_plugins = [] # type: List[Plugin] for plugin in plugins: msg_filters = plugin.message_filters if isinstance(msg_filters, str) and msg_filters == "all": greedy_plugins.append(plugin) elif isinstance(msg_filters, list): msg_filters = frozenset(msg_filters) # type: ignore for apid, ctid in msg_filters: # type: ignore if apid and ctid: msg_plugins[apid, ctid].append(plugin) elif apid: apid_plugins[apid].append(plugin) elif ctid: ctid_plugins[ctid].append(plugin) self.msg_plugins = self._convert_dict_value_tuple(msg_plugins) self.apid_plugins = self._convert_dict_value_tuple(apid_plugins) self.ctid_plugins = self._convert_dict_value_tuple(ctid_plugins) self.greedy_plugins = tuple(greedy_plugins) def _check_plugin_msg_filters(self, plugins): # type: (Iterable[Plugin]) -> None """Check the plugin's message filter setting Check if the message filters is valid. If there is any duplicated setting, it will cause the plugin to process the same message many times. :raises ValueError: When the settings of the plugin's message filter is invalid. """ for plugin in plugins: error_msg_postfix = "{plugin} - {msg_filters}".format( plugin=plugin.get_plugin_name(), msg_filters=plugin.message_filters ) msg_filters = plugin.message_filters if isinstance(msg_filters, str): if msg_filters != "all": raise ValueError("Invalid message filter setting: " + error_msg_postfix) continue if not msg_filters: raise ValueError("Message filter should not empty: " + error_msg_postfix) msg_filters = frozenset(plugin.message_filters) # type: ignore apid_filters = {apid for apid, ctid in msg_filters if apid and not ctid} # type: ignore ctid_filters = {ctid for apid, ctid in msg_filters if not apid and ctid} # type: ignore if any( # type: ignore apid in apid_filters or ctid in ctid_filters for apid, ctid in msg_filters if apid and ctid ): raise ValueError("Duplicated message filter setting: " + error_msg_postfix) def _convert_plugin_obj_to_name(self, plugins): # (Union[Tuple[Plugin, ...], Dict[T, Tuple[Plugin, ...]]]) -> # Union[List[str], Dict[_t, List[str]]] """Helper functioon to convert the plugin object to its name from a dict or a tuple The method is only used for debugging purpose. """ if isinstance(plugins, tuple): return [plugin.get_plugin_name() for plugin in plugins] return {key: [plugin.get_plugin_name() for plugin in value] for key, value in plugins.items()} def _print_plugin_collections(self): # type: () -> None """Print the collections for all plugins The method is only used for debugging purpose. """ logger.debug("Message filter plugins: %s", self._convert_plugin_obj_to_name(self.msg_plugins)) logger.debug("APID plugins: %s", self._convert_plugin_obj_to_name(self.apid_plugins)) logger.debug("CTID plugins: %s", self._convert_plugin_obj_to_name(self.ctid_plugins)) logger.debug("Greedy plugins: %s", self._convert_plugin_obj_to_name(self.greedy_plugins)) def init_plugins(self, plugins): # type: (List[Plugin]) -> None """Init with plugins Please call the function after all plugins are initialized, the method parses the plugin's message filter then creates the corresponding plugin lists for message dispatching. """ self._check_plugin_msg_filters(plugins) self._dispatch_plugins(plugins) self._print_plugin_collections() class DLTAnalyser(object): """Main program to run live/offline analysis The analyser receives/get dlt messages. If the message is a lifecycle-start message, the analyser will end last life cycle, start a new lifecycle and pass the information to plugins which are implemented with `new_lifecycle` and `end_lifecycle`. If the message is a normal message. The message will be passed to registered plugins. The class is not a plugin. If there is an uncaught exception happened in execution time, the File Sanity Check will fail. """ def __init__(self): self.plugins = [] self.file_exceptions = {} self.traces = [] self._buffered_traces = [] self.dlt_file = None self.plugin_collector = DltlysePluginCollector() def process_buffer(self): """Return buffered traces and clear buffer""" for trace in self._buffered_traces: self.process_message(trace) self._buffered_traces = [] def load_plugins(self, plugin_dirs, plugins=None, exclude=None, no_default_dir=False): """Load plugins from "plugins" directory""" if no_default_dir is False: plugin_dirs += DEFAULT_PLUGINS_DIRS plugin_classes = get_plugin_classes(plugin_dirs) if plugins: plugins = list(set(plugins)) for cls in plugin_classes: if plugins is None: if cls.manually_executed and os.environ.get("DLTLYSE_ALL_INCLUDES_MANUAL", "false").lower() not in ( "1", "true", "yes", ): continue else: if not cls.get_plugin_name() in plugins: continue plugins.remove(cls.get_plugin_name()) if exclude is not None and cls.get_plugin_name() in exclude: continue logger.info("Loading plugin '%s' from '%s'", cls.get_plugin_name(), cls.__module__) with handle_plugin_exceptions(cls, "loading"): self.plugins.append(cls()) if plugins: logger.error("Some plugins that were requested were not found: %s", plugins) raise RuntimeError("Error loading requested plugins: {}".format(", ".join(plugins))) self.plugin_collector.init_plugins(self.plugins) def show_plugins(self): """Show available plugins""" text = "Available plugins:\n" for plugin in self.plugins: classname = plugin.get_plugin_name() try: plugindoc = plugin.__doc__.splitlines()[0] except AttributeError: plugindoc = plugin.__doc__ text += " - {} ({})\n".format(classname, plugindoc) return text def get_filters(self): """Extract filtering information from plugins""" filters = set() for plugin in self.plugins: if plugin.message_filters == "all": logger.debug( "Speed optimization disabled: '%s' plugin requires all messages", plugin.get_plugin_name() ) return None for flt in plugin.message_filters: filters.add(flt) return list(filters) def start_lifecycle(self, ecu_id, lifecycle_id): """call DltAtlas plugin API - new_lifecycle""" for plugin in self.plugins: with handle_plugin_exceptions(plugin, "calling new_lifecycle"): plugin.new_lifecycle(ecu_id, lifecycle_id) def end_lifecycle(self, lifecycle, lifecycle_id): """Finish lifecycle processing for all plugins""" for plugin in self.plugins: if hasattr(plugin, "prep_plugin_env"): plugin.prep_plugin_env(lifecycle, lifecycle_id) for plugin in self.plugins: with handle_plugin_exceptions(plugin, "calling end_lifecycle"): plugin.end_lifecycle(lifecycle.ecu_id, lifecycle_id) def process_message(self, msg): """Process the message""" msg_apid = msg.apid msg_ctid = msg.ctid for plugin in itertools.chain( self.plugin_collector.msg_plugins.get((msg_apid, msg_ctid), ()), self.plugin_collector.apid_plugins.get(msg_apid, ()), self.plugin_collector.ctid_plugins.get(msg_ctid, ()), self.plugin_collector.greedy_plugins, ): try: plugin(msg) except: # noqa: E722 make_plugin_exception_message(plugin, "calling", traceback.format_exc(), sys.exc_info()) # pylint: disable=too-many-locals, too-many-statements def run_analyse(self, traces, xunit, no_sort, is_live, testsuite_name="dltlyse"): """Read the DLT trace and call each plugin for each message read""" # # CAUTION: DON'T REFACTOR THE METHOD FOR READABILITY. # # The method is optimized for performance. We do a lot of optimizations # for the method (e.g. avoid to access attribute with dots, function # inlining, loop unrolling, ...). The inner most loop is called over # 10 million times when the input file is large. Any small/tiny change # could causes performance pentlty. filters = self.get_filters() # add filter for lifecycle start message in case it is missing # filters == None means no filtering is done at all flt = (DLT_LIFECYCLE_START["apid"].encode("utf-8"), DLT_LIFECYCLE_START["ctid"].encode("utf-8")) if filters and flt not in filters: filters.append(flt) old_lifecycle = None lifecycle = None last_msg = None lifecycle_id = 0 self.traces = traces if is_live: signal.signal(signal.SIGINT, self.stop_signal_handler) # Optimization: Local variables for global constant values # https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Local_Variables buffer_matches_msg_apid = BUFFER_MATCHES_MSG["apid"] buffer_matches_msg_ctid = BUFFER_MATCHES_MSG["ctid"] buffer_matches_msg_payload_decoded = BUFFER_MATCHES_MSG["payload_decoded"] # pylint: disable=invalid-name dlt_lifecycle_start_apid = DLT_LIFECYCLE_START["apid"] dlt_lifecycle_start_ctid = DLT_LIFECYCLE_START["ctid"] dlt_lifecycle_start_payload_decoded = DLT_LIFECYCLE_START["payload_decoded"] # pylint: disable=invalid-name # Optimization: Local variables for the get functions # ref: https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Avoiding_dots... msg_plugins_getter = self.plugin_collector.msg_plugins.get apid_plugins_getter = self.plugin_collector.apid_plugins.get ctid_plugins_getter = self.plugin_collector.ctid_plugins.get greedy_plugins = self.plugin_collector.greedy_plugins for filename in traces: logger.info("Reading trace file '%s'", filename) with self.handle_file_exceptions(filename): tracefile = dlt.load(filename, split=not no_sort, filters=filters, live_run=is_live) self.dlt_file = tracefile msg = None for msg in tracefile: # Optimization: Local variables for values # https://wiki.python.org/moin/PythonSpeed/PerformanceTips#Local_Variables msg_apid = msg.apid msg_ctid = msg.ctid msg_payload_decoded = msg.payload_decoded # Buffer Messages if we find special # marked msgs that should be buffered # don't process these messages yet in this lifecycle # # Optimization: don't use msg.compare here, just expand # the comparison to reduce any unnecessary comparisons if ( ( msg_apid == buffer_matches_msg_apid and msg_ctid == buffer_matches_msg_ctid and msg_payload_decoded == buffer_matches_msg_payload_decoded ) or msg.ecuid == BUFFER_MATCHES_ECUID ) and len(self._buffered_traces) < MAX_BUFFER_SIZE: self._buffered_traces.append(msg) continue # We found a start message, if this is the first ever then just start a new lifecycle, # process any buffered messages and proceed. If we already have a lifecycle, then end that # lifecycle and proceed as previously stated. # # Optimization: don't use msg.compare here, just expand # the comparison to reduce any unnecessary comparisons if ( msg_apid == dlt_lifecycle_start_apid and msg_ctid == dlt_lifecycle_start_ctid and msg_payload_decoded == dlt_lifecycle_start_payload_decoded ): if lifecycle: lifecycle.set_last_msg(last_msg) self.end_lifecycle(lifecycle, lifecycle.lifecycle_id) lifecycle_id += 1 lifecycle = self.setup_lifecycle(msg=msg, lifecycle_id=lifecycle_id) logger.info("DLT Analysis Starting life cycle %d", lifecycle.lifecycle_id) if not lifecycle: lifecycle = self.setup_lifecycle(msg, lifecycle_id=lifecycle_id, process_buffer=True) if self._buffered_traces: self.process_buffer() # Optimization: # 1. Inline the self.process_message function, it could # reduce at 5 byte-code instructions and we could use # local variables to reduce the access time for plugin # lists. # 2. loop unrolling for these plugin lists. Without # performance consideration, we could use itertool.chains # to reduce the bolierplate code. But it is slower 3x # than the unrolling version. # 3. Inline the exception handing rather than use a context # manager. It reduces at least 10 byte-code instructions. # 4. Remove the recording the execution time for each plugin # It could speed up more than 5% execution time. If you # have need to know the execution time for each plugin, # you could replace the try-except block with # `handle_plugin_exceptions` to get it. # 5. Return a empty tuple when the plugin list is not found, # a tuple is a singleton object, it avoids any unnecessary # object constructions/destructions. for plugin in msg_plugins_getter((msg_apid, msg_ctid), ()): try: plugin(msg) except: # noqa: E722 make_plugin_exception_message(plugin, "calling", traceback.format_exc(), sys.exc_info()) for plugin in apid_plugins_getter(msg_apid, ()): try: plugin(msg) except: # noqa: E722 make_plugin_exception_message(plugin, "calling", traceback.format_exc(), sys.exc_info()) for plugin in ctid_plugins_getter(msg_ctid, ()): try: plugin(msg) except: # noqa: E722 make_plugin_exception_message(plugin, "calling", traceback.format_exc(), sys.exc_info()) for plugin in greedy_plugins: try: plugin(msg) except: # noqa: E722 make_plugin_exception_message(plugin, "calling", traceback.format_exc(), sys.exc_info()) last_msg = msg if lifecycle: lifecycle.set_last_msg(last_msg) old_lifecycle = lifecycle # If the files only contained bufferable traces less than MAX_BUFFER_SIZE # we create a life_cycle 0 to accommodate these msgs if not lifecycle and self._buffered_traces: lifecycle = self.setup_lifecycle(msg=msg, lifecycle_id=lifecycle_id, process_buffer=True) old_lifecycle = lifecycle if old_lifecycle: self.process_buffer() self.end_lifecycle(old_lifecycle, lifecycle_id) return self.generate_reports(xunit, testsuite_name) def generate_reports(self, xunit, testsuite_name): """Generates reports at the end of execution""" logger.info("Generating reports") xreport = XUnitReport(outfile=xunit, testsuite_name=testsuite_name) run_result = 0 file_results = [] for plugin in self.plugins: output = "Report for {} ... ".format(plugin.get_plugin_name()) with handle_plugin_exceptions(plugin, "calling report"): plugin.report() run_result |= 0 if plugin.report_exceptions() else 2 for state in ["success", "error", "failure", "skipped"]: output += "{} {} ".format(len([x for x in plugin.get_results() if x.state == state]), state) if all([x.state in ["success", "skipped"] for x in plugin.get_results()]): output += "= passed." else: output += "= failed." run_result |= 1 stdoutlogger.debug("- Error report for %s:", plugin.get_plugin_name()) for result in plugin.get_results(): if result.state != "success": stdoutlogger.debug(result.message) stdoutlogger.debug(result.stdout) stdoutlogger.info(output) xreport.add_results(plugin.get_results()) for filename in self.traces: output = "Report for file" if filename in self.file_exceptions: stdoutlogger.debug(self.file_exceptions[filename]) stdoutlogger.info("%s %s ... = failed", output, filename) file_results.append( Result( classname="DLTAnalyser", testname="File Sanity Checks During Execution", state="error", stdout=self.file_exceptions[filename], message=self.file_exceptions[filename], ) ) else: stdoutlogger.info("%s %s ... = passed", output, filename) file_results.append( Result( classname="DLTAnalyser", testname="File Sanity Checks During Execution", state="success", stdout="File Parsed Successfully", message="File Parsed Successfully", ) ) xreport.add_results(file_results) if self.file_exceptions: run_result |= 4 xreport.render() logger.info("Done.") return run_result def setup_lifecycle(self, msg, lifecycle_id, process_buffer=False): """Setup a new lifecycle by setting correct properties""" lifecycle = DLTLifecycle(ecu_id=msg.ecuid, lifecycle_id=lifecycle_id) lifecycle.set_first_msg(msg) self.start_lifecycle(lifecycle.ecu_id, lifecycle.lifecycle_id) if process_buffer: self.process_buffer() return lifecycle @contextmanager def handle_file_exceptions(self, file_name): """Catch all exceptions and store them in the DLTAnalyzer.file_exceptions structure""" try: yield except IOError as err: # pylint: disable=bare-except message = "Error Loading File {} - {}".format(file_name, err) logger.exception(message) self.file_exceptions[file_name] = message def stop_signal_handler(self, signum, frame): """Catch SIGINT to stop any further analyzing of DLT Trace file in a live run""" logging.debug("Signal Handler called with signal:%d", signum) self.dlt_file.stop_reading.set() dltlyse-1.4.3/src/dltlyse/core/plugin_base.py000066400000000000000000000420701434210763300212460ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Base class for dltlyse plugins""" import copy import csv import functools import inspect import logging import os import re from typing import List, Optional, Tuple from abc import ABCMeta, abstractmethod from collections import defaultdict from dltlyse.core.report import Result from dltlyse.core.utils import round_float # pylint: disable= unsupported-membership-test EXTRACT_DIR = "extracted_files" logger = logging.getLogger(__name__) def plugin_metadata(**kwargs): """Plugin metadata decorator for Plugin class You can add metadata information in your Plugin class. For example, @plugin_metadata(type="test", function="monitor") class TestMetadataPlugin(Plugin): pass The metadata is stored in cls.plugin_metadata >>> print(TestMetadataPlugin.plugin_metadata) {'type': 'test', 'function': 'monitor'} If the class is derived from another Plugin with metadata, the class also has the metadata of parent class. If the parent and derived class have the same key, the value will be from derived class. For example, @plugin_metadata(function="logging", extra="extra") class TestMetadataLoggingPlugin(Plugin): pass >>> print(TestMetadataPlugin.plugin_metadata) {"type": "test", "function": "logging", "extra": "extra"} You can get the complete example from dltlyse/core/tests/test_plugin_report.py The current usage is that the dltlyse xunit report will show metadata for each plugin. """ def _metadata(cls): # pylint: disable=missing-docstring metadata_key = "plugin_metadata" metadata = copy.deepcopy(getattr(cls, metadata_key, {})) metadata.update(kwargs) setattr(cls, metadata_key, metadata) return cls return _metadata class Plugin(object): """dltlyse Plugin base class""" __metaclass__ = ABCMeta # message filters are filters that will be used during loading DLT trace file. Each plugin defines # list of (APID, CTID) pairs and only messages matching those criteria are read from the DLT trace file. # This is used for speed optimization # Limitation: DLT library only supports adding 30 filters. If we collect more than 30 filter pairs, the whole # mechanism is disabled # For special purposes when you need to analyse all messages, you can define message_filters = "all" # which also disables the filtering completely. message_filters: List[Optional[Tuple[str, str]]] = [] manually_executed = False # True if a plugin should be manually selected (not automatic execution). def __init__(self): self.__results = [] self.__exceptions = [] self.__timings = defaultdict(float) @abstractmethod def __call__(self, message): """object will be called for every message param: DltMessage message: object represeting a single line in DLT log. Commonly used methods are: message.compare(dict(apid="APP", ctid="CONTEXT")) -- match a message to a filter str(message.payload_decoded) - full payload of the message as a string message.payload - a list with payload data fields with their types message.tmsp - message timestamp (relative to lifecycle start) """ pass @classmethod def get_plugin_name(cls): """Return plugin name""" return cls.__name__ @abstractmethod def report(self): """Report the run after all messages has been read""" pass def add_result(self, **kwargs): """Adds a Result object with set values :param str state: possible values "success", "error", "failures", "skipped" :param str message: log for the result :param str stdout: stdout :param str stderr: stderr """ # Parse class name kwargs.setdefault("classname", self.get_plugin_name()) # Parse class docstring plugin_docstring = inspect.getdoc(self) # Parse plugin short description kwargs.setdefault("testname", plugin_docstring.splitlines()[0] if plugin_docstring else "") # Parse plugin metadata and add plugin docstring metadata = copy.deepcopy(getattr(self, "plugin_metadata", {})) metadata["docstring"] = plugin_docstring or "" kwargs.setdefault("metadata", metadata) self.__results.append(Result(**kwargs)) def add_attachments(self, attachments): """Adds attachments to the last result, creating a result if none exist""" if not self.__results: self.add_result() if attachments: self.__results[-1].attach.extend(attachments) def add_timing(self, action, timing): """Add time used by the plugin in an action""" self.__timings[action] += timing def add_exception(self, message): """Add an exception message""" if message not in self.__exceptions: self.__exceptions.append(message) def report_exceptions(self): """Report all detected exceptions""" logger.debug( "Timings of plugin %s: %s", self.get_plugin_name(), {k: str(round_float(v, 2)) for k, v in self.__timings.items()}, ) if self.__exceptions: self.add_result( testname="Exceptions during execution", state="error", message="Exceptions detected while executing the plugin", stdout="\n-------------\n".join(self.__exceptions), ) return False return True def get_results(self): """Return the results object""" return self.__results def new_lifecycle(self, ecu_id, lifecycle_id): # pylint: disable=no-self-use,unused-argument """Called at the start of each lifecycle (including first)""" pass def end_lifecycle(self, ecu_id, lifecycle_id): # pylint: disable=no-self-use,unused-argument """Called at the end of each lifecycle (including last)""" pass class CSVPlugin(Plugin): # pylint: disable=abstract-method """Base class for plugins that output a CSV file as an output""" # If you have only one file you can use these two lines csv_filename = None # If you only have one file, you can use this. Set to "subdir/example.csv" in subclass csv_fields = None # If using only one file, set this to the list of column headers # If you want to use multiple CSV files, please use csv_filenames and provide columns per file csv_filenames = None # Examples: # csv_filenames = {} # csv_filenames ["my_csvfile.csv"] = ["column1", "column2", ...] # csv_filenames ["my_subdir/my_csvfile2.csv"] = ["column2.1", "column2.2", ...] def __init__(self): self._csv = {} self._csv_fileobj = {} # for backward compatibility: if csv_filename was defined, add it to csv_filenames if self.csv_filename: self.csv_filenames = {self.csv_filename: self.csv_fields} super(CSVPlugin, self).__init__() def _create_csvfile(self, filename=None): """Create csv file and add first row with column names""" filename = filename or list(self.csv_filenames)[0] pathname = os.path.join(EXTRACT_DIR, filename) if not os.path.exists(os.path.dirname(pathname)): os.makedirs(os.path.dirname(pathname)) self._csv_fileobj[filename] = open(pathname, "w") self._csv[filename] = csv.writer(self._csv_fileobj[filename]) if self.csv_filenames[filename]: # Only write header line if columns are defined. self._csv[filename].writerow(self.csv_filenames[filename]) else: logger.debug("No header line written to file %s", filename) def writerow(self, data_row, filename=None): """Write a row to CSV file""" filename = filename or list(self.csv_filenames)[0] if filename not in self._csv: self._create_csvfile(filename) self._csv[filename].writerow(data_row) def writerows(self, data_rows, filename=None): """Write several rows to csv file""" filename = filename or list(self.csv_filenames)[0] if filename not in self._csv: self._create_csvfile(filename) self._csv[filename].writerows(data_rows) def report(self): """Write the csv file""" self._close_csv_files() self.add_attachments(self.csv_filenames.keys()) def _close_csv_file(self, filename=None): """Close CSV file""" filename = filename or list(self.csv_filenames)[0] if self._csv[filename]: self._csv_fileobj[filename].close() def _close_csv_files(self): """Close all CSV files""" for filename in self._csv: self._close_csv_file(filename) class LifecycleCSVPlugin(CSVPlugin): # pylint: disable=abstract-method """Used to create a set of csv files for every lifecycle""" # These will be copied to csv_filenames and csv_fields for every lifecycle lifecycle_csv_filenames = None __all_csv_filenames = None def new_lifecycle(self, ecu_id, lifecycle_id): """Creates the CSV files for the lifecycle""" base_folder = "Lifecycles/{0:02}".format(lifecycle_id) self.csv_filenames = {os.path.join(base_folder, k): v for k, v in self.lifecycle_csv_filenames.items()} super(LifecycleCSVPlugin, self).new_lifecycle(ecu_id, lifecycle_id) def end_lifecycle(self, ecu_id, lifecycle_id): """Closes the CSV files and stores them for attaching to the result""" self._close_csv_files() if not self.__all_csv_filenames: self.__all_csv_filenames = [] self.__all_csv_filenames.extend(self.csv_filenames.keys()) super(LifecycleCSVPlugin, self).end_lifecycle(ecu_id, lifecycle_id) def report(self): """Attaches all CSV files to the result""" self.add_attachments(self.__all_csv_filenames) def find_file(self, filename): """Find a filename matching a substring from the current lifecycle""" return [afile for afile in self.csv_filenames.keys() if filename in afile][0] def dlt_callback(app_id=None, ctx_id=None): """Decorates a method which is intended to be used as a callback for dltlyse. It collects the app_id and ctx_id values, and saves them into the method. Args: app_id(str): if defined, is the app_id that we want to catch. ctx_id(str): if defined, is the ctx_id that we want to catch. """ def wrapper(func): # pylint: disable=missing-docstring func.filter_condition = app_id or "", ctx_id or "" return func return wrapper class CallBacksAndReportPlugin(Plugin): # pylint: disable=abstract-method """An extended version of the dltlyse Plugin, which automatically handles some common operations. A get_report method is provided, which automatically gets the report_output member, converts it to a string and writes the result to a file with the class name (converting all capital letters to '_' + their lowercase) + .txt appended as filename. So, basically a plugin has just to collect its data and put them in the report_output member. get_report calls prepare_report before writing the report to the file, because sometimes a preparation is needed to generate the final report. This plugin provides also a facility for registering callbacks: it's enough to decorate them with dlt_callback, providing the app_id and/or ctx_id filters (see dlt_callback's docstring). All methods which are decorated will be automatically retrieved and registered. Example: @dlt_callback('LTM', 'MAIN') def gather_version_info(self, frame): pass The plugin then will take care of calling the registered callbacks only when the proper filter conditions are matched, so eventually they only have to look at the payload. Finally, it automatically sets the log level to DEBUG, and creates a logger using the class name. The logger is available as the logger member. """ def __init__(self): """Automatically sets a default for report (None -> no report) and logger.""" self.collect_and_register_callbacks() self.report_output = None # Should be defined before calling the parent constructor. super(CallBacksAndReportPlugin, self).__init__() self.logger = logging.getLogger(self.get_plugin_name()) def collect_and_register_callbacks(self): """Collects and registers all dlt callbacks. The dlt callbacks should be decorated with the dlt_callback decorator. It also registers all message filters in class.message_filters. """ self.dlt_callbacks = defaultdict(list) self.dlt_greedy_callbacks = [] for member_name in dir(self): # Scans the class members. member = getattr(self, member_name) filter_condition = getattr(member, "filter_condition", None) if filter_condition: if filter_condition[0] or filter_condition[1]: if self.message_filters != "all": self.message_filters.append(filter_condition) # pylint: disable=no-member self.dlt_callbacks[filter_condition].append(member) else: self.message_filters = "all" self.dlt_greedy_callbacks.append(member) # pylint: disable=invalid-name def add_callback_from_template_function(self, template_function, app_id, ctx_id, userdata): """Adds an additional callback which is automatically generated from a "template" function or method. Args: template_function(function or method): a function or method that acts a template, to be "specialized" (according to the given app_id, ctx_id, payloads) to catch specific traces. app_id(str): the app id. ctx_id(str): the context id. userdata(object): normally is a sequence of strings that should be matched in the trace payload, but in reality it can be anything, since it's up to the template function to use this parameter as it wants. """ # Data should be converted to strings, since dltlyse fails to register a filter if it's using unicode strings. app_id, ctx_id, userdata = ( str(app_id), str(ctx_id), str(userdata) if isinstance(userdata, str) else userdata, ) callback = functools.partial(template_function, app_id=app_id, ctx_id=ctx_id, userdata=userdata) callback = dlt_callback(app_id, ctx_id)(callback) filter_condition = app_id, ctx_id if filter_condition[0] or filter_condition[1]: if self.message_filters != "all": self.message_filters.append(filter_condition) # pylint: disable=no-member self.dlt_callbacks[filter_condition].append(callback) else: self.message_filters = "all" self.dlt_greedy_callbacks.append(callback) def get_result_dir(self): """Return result directory""" if not os.path.exists(EXTRACT_DIR): os.makedirs(EXTRACT_DIR) return EXTRACT_DIR def report_filename(self): """Builds & returns a standard/base filename for the report.""" # Converts all uppercase letters in lowercase, pre-pending them with a '_'. report_filename = re.sub(r"([A-Z])", r"_\1", self.get_plugin_name()) return report_filename.lower().strip("_") + ".txt" def prepare_report(self): """It's invoked just before writing the report to file, in case that some operation needs to be done to prepare the report with the final/required format (a string, or a list/tuple/ dict). """ pass def get_report(self): """Provides automatic report generation. prepare_report is called to ensure that the report is ready for writing. Then the type of the report data is analyzed, to see if a JSON file (for list, tuple, or dict data type) should be written instead of the normal string/text file. """ self.prepare_report() if self.report_output is None: return "No report is generated!" return self.write_to_domain_file(self.report_filename(), str(self.report_output)) def write_to_domain_file(self, filename, report): """Write the given report to a file. Args: filename(str): the filename. report(str): the string with the report to be saved. """ fullpath = os.path.join(self.get_result_dir(), filename) with open(fullpath, "w") as report_file: report_file.write(report) self.logger.info("See %s", fullpath) return fullpath def __call__(self, message): """Dispatches the message to the registered callback. The callbacks were registered with the dlt_callbacks decorator. """ for callback in self.dlt_callbacks[message.apid, message.ctid]: # pylint: disable=no-member callback(message) for callback in self.dlt_greedy_callbacks: # pylint: disable=no-member callback(message) dltlyse-1.4.3/src/dltlyse/core/report.py000066400000000000000000000127711434210763300202760ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Reporting for dltlyse""" from collections import Counter import logging import xml.etree.ElementTree as etree ATTACHMENT_TEMPLATE = "[[ATTACHMENT|{filename}]]" TEST_CASE_RESULT_TYPE = { "success": "success", "error": "error", "failure": "failure", "skipped": "skip", } logger = logging.getLogger(__name__) # pylint: disable=invalid-name class Metadata(object): """Store the metadata of result The metadata is a dict. It can contain any type of data, but it presents with `str()` finally. The class is internally used for `class Result`. Normally you should not use the class directly. """ def __init__(self, metadata=None): self.metadata = metadata or {} def _render_xml(self, node, metadata): """Real implementation for render_xml If parses self.metadata and transforms it to a xml element. If the type of value is a dict, it parses it recursively. Otherwise, it will convert the value with `str()` """ for key, value in sorted(metadata.items(), key=lambda keyvalue: keyvalue[0]): item = etree.SubElement(node, "item", name=key) if isinstance(value, dict): self._render_xml(item, value) else: item.text = str(value) def render_xml(self): """Return a xml element to present metadata The function is a wrapper function for `Metadata._render_xml`, you can get more details from it. """ if not self.metadata or not isinstance(self.metadata, dict): return None root = etree.Element("metadata") self._render_xml(root, self.metadata) return root class Result(object): """Class representing a single testcase result""" def __init__( self, classname="Unknown", testname="Unknown", state="success", stdout="", stderr="", message="", metadata=None, attach=None, ): self.classname = classname self.testname = testname self.state = state self.stdout = stdout self.stderr = stderr self.message = message if not attach: attach = [] self.attach = attach self.metadata = Metadata(metadata) def __repr__(self): return repr(self.__dict__) def __eq__(self, other): self_dict = self.__dict__.copy() del self_dict["metadata"] other_dict = other.__dict__.copy() del other_dict["metadata"] return self_dict == other_dict def render_xml(self): """Return a xml element to present test result""" if self.state not in TEST_CASE_RESULT_TYPE: logger.warning("Not supported for the test state: %s - plugin: %s", self.state, self.classname) self.state = "error" # Prepare test case root root = etree.Element("testcase", classname="dltlyse." + self.classname, name=self.testname, time="0") # Set attachment root.text = "".join(ATTACHMENT_TEMPLATE.format(filename=filename) for filename in self.attach) # If the result is not success, output state and error message if self.state != "success": root.append(etree.Element(self.state, type=TEST_CASE_RESULT_TYPE[self.state], message=self.message)) # Output stdout stdout = etree.SubElement(root, "system-out") stdout.text = str(self.stdout) # Add metadata metadata = self.metadata.render_xml() if metadata is not None: root.append(metadata) return root class XUnitReport(object): """Template class producing report in xUnit format""" def __init__(self, outfile="", testsuite_name="dltlyse"): self.results = [] self.outfile = outfile self.testsuite_name = testsuite_name def add_results(self, results): """Adds a result to the report""" self.results.extend(results) def _generate_summary(self): """Count the number of stats for test cases""" counts = Counter(x.state for x in self.results) return { "number_of_errors": str(counts["error"]), "number_of_failures": str(counts["failure"]), "number_of_skipped": str(counts["skipped"]), "number_of_tests": str(len(self.results)), } def render_xml(self): """Return a xml element to present report""" summary = self._generate_summary() root = etree.Element( "testsuite", name=self.testsuite_name, tests=summary["number_of_tests"], errors=summary["number_of_errors"], failures=summary["number_of_failures"], skip=summary["number_of_skipped"], ) result_elements = [] for result in self.results: try: element = result.render_xml() result_elements.append(element) except Exception as err: # pylint: disable=broad-except logger.error("Render result error: %s - %s", result, err) root.extend(result_elements) return root def render(self): """Renders a XUnit report to file""" if not self.outfile: return # Generate the xml element tree tree = etree.ElementTree(self.render_xml()) # Write to file with open(self.outfile, "wb") as report_file: tree.write(report_file, encoding="UTF-8", xml_declaration=True, method="xml") dltlyse-1.4.3/src/dltlyse/core/utils.py000066400000000000000000000313331434210763300201160ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Helper functions""" import atexit import logging import os import tempfile from decimal import Decimal import xml.dom.minidom from xml.etree.ElementTree import Element, SubElement, tostring as xml_element_to_string logger = logging.getLogger(__name__) dlt_example_stream = ( b"DLT\x01#o\xd1WD>\x0c\x00MGHS5\x00\x00YMGHS\x00\x01\x80\xd1&\x01DA1\x00DC1\x00\x03\x00\x00\x00" b"\x07\x01\x00SYS\x00\x01\x00FILE\xff\xff\x16\x00File transfer manager.\x12\x00" b"DLT System ManagerremoDLT\x01#o\xd1Wo>\x0c\x00MGHS=\x00\x01PMGHS\x00\x00\x03\xf4\x00" b"\x01i\xa6A\x05SYS\x00JOUR\x00\x02\x00\x00\x1b\x002011/11/11 11:11:18.005274\x00\x00\x02\x00\x00" b"\t\x006.005274\x00\x00\x02\x00\x00\x16\x00systemd-journal[748]:\x00\x00\x02\x00\x00\x0f\x00" b"Informational:\x00\x00\x02\x00\x00\xcf\x00Runtime journal (/run/log/journal/) is currently" b" using 8.0M.\nMaximum allowed usage is set to 385.9M.\nLeaving at least 578.8M free (of" b" currently available 3.7G of space).\nEnforced usage limit is thus 385.9M.\x00" ) file_with_two_lifecycles = ( b"DLT\x01\xc5\x82\xdaX\x82o\x0e\x00MG1S=\x00\x00NMG1S" # first lifecycle b"\x00\x00\x02r\x00\x00\x8frA\x01DLTDINTM\x00\x02\x00\x00.\x00" b"Daemon launched. Starting to output traces...\x00" b"DLT\x01m\xc2\x91Y\x9f\xda\x07\x00MGHS5\x00\x00 MGHS" # no new lifecycle b"\x00\x00_\xde&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" b"DLT\x01m\xc2\x91Y\xad\xe4\x07\x00MGHS=\x01\x00zMGHS" # random trace b"\x00\x00\x02\xab\x00\x00@VA\x01DLTDINTM\x00\x02\x00\x00Z\x00" b"ApplicationID 'DBSY' registered for PID 689, Description=DBus" b" Logging|SysInfra|Log&Trace\n\x00" b"DLT\x01\xed\xc2\x91Y\x0f\xf0\x08\x00MGHS5\x00\x00 MGHS" # trace to buffer b"\x00\x00\x9dC&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" b"DLT\x01\xed\xc2\x91Y\x17.\n\x00MG2S=\x00\x00NMG2S" # new lifecycle b"\x00\x00\x02\xae\x00\x00@/A\x01DLTDINTM\x00\x02\x00\x00.\x00" b"Daemon launched. Starting to output traces...\x00" ) file_with_lifecycles_without_start = ( b"DLT\x01\xc5\x82\xdaX\x19\x93\r\x00XORA'\x01\x00\x1bXORA" # trace to buffer b"\x16\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x04\x00\x00\x00\x00" b"DLT\x01\xc5\x82\xdaXQi\x0e\x00MGHS5\x00\x00 MGHS" # trace to buffer b"\x00\x03U\xe0&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" b"DLT\x01m\xc2\x91Y\xad\xe4\x07\x00MGHS=\x01\x00zMGHS" # random trace b"\x00\x00\x02\xab\x00\x00@VA\x01DLTDINTM\x00\x02\x00\x00Z\x00" b"ApplicationID 'DBSY' registered for PID 689, Description=DBus" b" Logging|SysInfra|Log&Trace\n\x00" b"DLT\x01\xed\xc2\x91Y\x0f\xf0\x08\x00MGHS5\x00\x00 MGHS" # trace to buffer b"\x00\x00\x9dC&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" b"DLT\x01\xed\xc2\x91Y\x17.\n\x00MG3S=\x00\x00NMG3S" # new lifecycle b"\x00\x00\x02\xae\x00\x00@/A\x01DLTDINTM\x00\x02\x00\x00.\x00" b"Daemon launched. Starting to output traces...\x00" ) single_random_dlt_message = bytearray( b"DLT\x01m\xc2\x91Y\xad\xe4\x07\x00MGHS=\x01\x00zMGHS" # random trace b"\x00\x00\x02\xab\x00\x00@VA\x01DLTDINTM\x00\x02\x00\x00Z\x00" b"ApplicationID 'DBSY' registered for PID 689, Description=DBus" b" Logging|SysInfra|Log&Trace\n\x00" ) start_dlt_message = bytearray( b"DLT\x01\xed\xc2\x91Y\x17.\n\x00MGHS=\x00\x00NMGHS" # new lifecycle b"\x00\x00\x02\xae\x00\x00@/A\x01DLTDINTM\x00\x02\x00\x00.\x00" b"Daemon launched. Starting to output traces...\x00" ) single_random_corrupt_dlt_message = bytearray( b"\x00\x00\x02\xab\x00\x00@VA\x01DLTDINTM\x00\x02\x00\x00Z\x00" # random corrupt trace b"ApplicationID 'DBSY' registered for PID 689, Description=DBus" b" Logging|SysInfra|Log&Trace\n\x00" ) single_bufferable_trace_1 = bytearray( b"DLT\x01\xc5\x82\xdaX\x19\x93\r\x00XORA'\x01\x00\x1bXORA" # trace to buffer b"\x16\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x11\x04\x00\x00\x00\x00" ) single_bufferable_trace_2 = bytearray( b"DLT\x01\xc5\x82\xdaXQi\x0e\x00MGHS5\x00\x00 MGHS" # trace to buffer b"\x00\x03U\xe0&\x01DA1\x00DC1\x00\x02\x0f\x00\x00\x00\x02\x00\x00\x00\x00" ) def seconds_to_human_readable(seconds): """Splits seconds and returns a string in the form hr:min:secs.ms""" secs, msecs = divmod(seconds, 1) mins, secs = divmod(int(seconds), 60) hrs, mins = divmod(mins, 60) return "{:d}:{:02d}:{:02d}.{:02.0f}".format(hrs, mins, secs, msecs * 100) def data_to_xml_tree(data, parent=None, child_tag=None, child_attrib=None): """Converts a Python structure in an ElementTree structure. The key concept when using this function is that for generating a valid XML ElementData, three information should be available: tag, attributes, and value/children. Some of such information can be omitted if it's optional (so, not specified) or if it can be already extracted by the context. Of course, at least the tag information should be provided in some way. Usually a tuple of three elements is passed to fully qualify all three required data. For example, passing ('foo', {'bar': '123'}, 'spam'), as the data parameter, generates an ElementTree structure which, once converted to string, looks like: spam To generate only a node with the tag, it's enough to call the function with only a string as parameter (the tag). For example, 'foo' gives back: That's because no tuple was provided, but only a basic primitive (a string), and since the tag is mandatory, it's automatically assumed that the string has to be used as the tag. Instead, passing the tuple ('foo', 'bar') generates: bar In this case the second element should contain either the attributes or the value(s) of the tag, but since it's not a dictionary (the only data type which can used to specify the list of attributes with their values), it's automatically assumed to be used as the value. Finally, passing ('foo', {'bar': '123'}) generates: That's because the two elements tuple has not enough information, but the second element is a dictionary, so it's automatically used for the tag's attributes. A list or tuple can also be passed as the tag's value, and in this case a deeper XML structure is generated. For example, passing ('foo', ['bar', 'spam']) generates: To each list's element is applied the same logic defined before, so a tuple/list can be passed as well, to better qualify each sub-tag. For example, passing ('foo', ['bar', ('spam', 123), ('droogs', {'milk': 'plus'})]) generates: 123 Sometimes the sub-tags share the same tag name, so a mechanism is defined in order to avoid to specify it for all of them. In this case, a special key in the main tag's attributes can be used: '$tag'. For example, ('foo', {'$tag': 'bar'}, [1, 2, 3]) generates: 1 2 3 So, the application can focus on providing only the concrete data that should be generated. Similarly, if the sub-tags use the same attributes sets, a special key in the main tag's attributes can be used: '$attr'. For example, ('foo', {'$attr': {'bar': 'spam'}}, ['droogs', 'milk', 'plus']) generates: A combination of $tag and $attr can be used as well, so passing ('foo', {'$tag': 'bar', '$attr': {'milk': 'plus'}}, [1, 2, 3]) generates: 1 2 3 Finally, it has to be noted that if the value information isn't a list or tuple, it'll be automatically converted to a string. For example, ('foo', datetime.datetime.now()) generates: 2017-02-20 09:20:12.746000 Args: data(list, tuple, dict, or any type): the Python data structure. See above for more details. parent(Element): the parent node (if available). child_tag(str, None): the tag to be used for direct children (if any). child_attrib(dict, None): the attributes to to be used for direct children (if any). """ # print('data_to_xml_tree: data={}, parent={}, child_tag={}, child_attrib={}'.format( # data, parent, child_tag, child_attrib), file=out) attrib, value = {}, None if child_tag: # Have: tag. Miss: attrib, value tag, child_tag = child_tag, None if child_attrib is not None: # Have: tag, attrib. Miss: value attrib, child_attrib, value = child_attrib, {}, data else: # Have: tag, Miss: attrib, value if isinstance(data, dict): attrib = data elif isinstance(data, (tuple, list)): if len(data) == 2: attrib, value = data else: tag, attrib, value = data[:3] else: value = data else: # Miss: tag, attrib, value if child_attrib is not None: # Have: attrib. Miss: tag, value attrib, child_attrib = child_attrib, {} if isinstance(data, (tuple, list)): if len(data) == 2: tag, value = data else: tag, attrib, value = data[:3] else: tag = data else: # Miss: tag, attrib, value if isinstance(data, (tuple, list)): if len(data) == 2: tag, data = data if isinstance(data, dict): attrib = data else: value = data else: tag, attrib, value = data[:3] else: tag = data if attrib: # The original attribute dictionary should be preserved, because it might be used by other # tags. That's because we'll remove some keys, if they are present. See below. attrib = attrib.copy() new_child_tag = attrib.pop("$tag", None) if new_child_tag is not None: child_tag = new_child_tag new_child_attrib = attrib.pop("$attr", None) if new_child_attrib is not None: child_attrib = new_child_attrib text, children = ( (None, value) if isinstance(value, (tuple, list)) else (str(value) if value is not None else None, ()) ) node = Element(tag, attrib) if parent is None else SubElement(parent, tag, attrib) if text is not None: node.text = text for child in children: data_to_xml_tree(child, node, child_tag, child_attrib) return node def data_to_xml_string(data, prettify=True, indent="\t", newline="\n"): """Generates an XML string representation of a Python structure according to data_to_xml_tree. Args: data(list, tuple, dict, or any type): the Python data structure. See data_to_xml_tree. prettify(bool): True if the XML string should be reformatted with a nice output. indent(str): the string to be used for indenting the XML elements. newline(str): the string to be used when an XML element is complete. """ xml_string = xml_element_to_string(data_to_xml_tree(data)) if prettify: xml_data = xml.dom.minidom.parseString(xml_string) xml_string = xml_data.toprettyxml(indent, newline) return xml_string def create_temp_dlt_file(stream=None, dlt_message=None, empty=False): """Creates temporary DLT trace files for testing purposes Args: stream: A byte stream variable containing a stream in byte hex format dlt_message(DLTMessage object): A dlt message object to be converted into temporary file empty(bool): True will just create an empty DLT file """ _, tmpname = tempfile.mkstemp() if empty: return tmpname msg = () if dlt_message: msg = dlt_message.to_bytes() else: msg = stream tmpfile = open(tmpname, "wb") tmpfile.write(msg) tmpfile.flush() tmpfile.seek(0) tmpfile.close() atexit.register(os.remove, tmpname) return tmpname def round_float(val, precision=4): """Rounds off the floating point number to correct precision regardless of underlying platform floating point precision Args: val(float): The value that needs to be rounded off precision(int): Number of decimal places to round off """ decimal_points = Decimal(10) ** -(precision) result_val = Decimal(val).quantize(decimal_points) return result_val if result_val.normalize() == result_val.to_integral() else result_val.normalize() dltlyse-1.4.3/src/dltlyse/mock_dlt_message.py000066400000000000000000000020721434210763300213240ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Helpers for dltlyse plugin testing""" class MockDLTMessage(object): """Mock DLT message for dltlyse plugin testing""" def __init__(self, ecuid="MGHS", apid="SYS", ctid="JOUR", sid="958", payload="", tmsp=0.0, sec=0, msec=0, mcnt=0): self.ecuid = ecuid self.apid = apid self.ctid = ctid self.sid = sid self.payload = payload self.tmsp = tmsp self.mcnt = mcnt self.storageheader = MockStorageHeader(sec=sec, msec=msec) def compare(self, target): """Compare DLT Message to a dictionary""" return target == {k: v for k, v in self.__dict__.items() if k in target.keys()} @property def payload_decoded(self): """Fake payload decoding""" return self.payload def __repr__(self): return str(self.__dict__) class MockStorageHeader(object): """Mock DLT storage header for plugin testing""" def __init__(self, msec=0, sec=0): self.microseconds = msec self.seconds = sec dltlyse-1.4.3/src/dltlyse/plugins/000077500000000000000000000000001434210763300171325ustar00rootroot00000000000000dltlyse-1.4.3/src/dltlyse/plugins/__init__.py000066400000000000000000000000001434210763300212310ustar00rootroot00000000000000dltlyse-1.4.3/src/dltlyse/plugins/context.py000066400000000000000000000012571434210763300211750ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Count DLTD INTM DLT messages""" from dltlyse.core.plugin_base import Plugin class ContextPlugin(Plugin): """Count DLTD INTM messages""" message_filters = [("DLTD", "INTM")] matched_messages = 0 def __call__(self, message): if message.apid == "DLTD" and message.ctid == "INTM": self.matched_messages += 1 def report(self): if self.matched_messages > 0: self.add_result(stdout="found {} DLTD INTM messages".format(self.matched_messages)) else: self.add_result(state="failure", message="could not find any DLTD INTM messages in the trace") dltlyse-1.4.3/src/dltlyse/plugins/extract_files.py000066400000000000000000000133231434210763300223420ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Extracting all files from DLT trace Example: $ python dltlyse.py -p ExtractFilesPlugin vmwx86_full_trace.dlt """ import logging import os from typing import Dict from collections import OrderedDict from dltlyse.core.plugin_base import Plugin, EXTRACT_DIR COREDUMP_DIR = "Coredumps" FULL_EXTRACT_DIR = os.path.join(EXTRACT_DIR, COREDUMP_DIR) logger = logging.getLogger(__name__) class File(object): """File data""" def __init__(self, transfer_id, filename): self.transfer_id = transfer_id self.filename = filename self.index = 0 self.error = False self.finished = False # store the temporary (part) file in the extracted_files/Coredumps/${transfer_id}/${filename}.part self._part_filepath = os.path.join(FULL_EXTRACT_DIR, self.transfer_id, self.filename + ".part") # warn if the file has been already extracted before (not finished extraction) if os.path.exists(self._part_filepath): logger.warning("File '%s' exists already!", self._part_filepath) # make sure the extracted_files/Coredumps/${transfer_id} directory exists if not os.path.exists(os.path.join(FULL_EXTRACT_DIR, self.transfer_id)): os.makedirs(os.path.join(FULL_EXTRACT_DIR, self.transfer_id)) self.handle = open(self._part_filepath, "wb") def close(self): """Close the handle and rename file to be completed""" self.handle.close() # move the file into the extracted_files/Coredumps/ if it does not exist already. # Otherwise keep it in the trasfer_id subdirectory and remove the ".part" suffix if self.error is False: final_name = os.path.join(FULL_EXTRACT_DIR, self.filename) if not os.path.exists(final_name): os.rename(self._part_filepath, final_name) try: os.rmdir(os.path.dirname(self._part_filepath)) except OSError: pass else: os.rename(self._part_filepath, os.path.join(os.path.dirname(self._part_filepath), self.filename)) def __repr__(self): return self.filename class ExtractFilesPlugin(Plugin): """Extracting all files from DLT trace""" message_filters = [("SYS", "FILE"), ("FLT", "FILE")] extracted_files: Dict[str, str] = {} success = False counter = 0 def __call__(self, message): if message.apid in ["SYS", "FLT"] and message.ctid == "FILE": # file transfer payload header # FLST - file trasfer start - first DLT message from the file transfer # ["FLST", transfer_id, filename, length, date, "FLST"] # FLDA - file data # ["FLDA", transfer_id, index, data, "FLDA"] # FLFI - file transfer end # ["FLFI", transfer_id, "FLFI"] payload_header = message.payload[0].decode("utf8") transfer_id = str(message.payload[1]) # used as a dictionary key if payload_header == "FLST": filename = message.payload[2].decode("utf8") filename = os.path.basename(filename) # ignore whatever path is included in DLT logger.info("Found file '%s' in the trace", filename) extr_file = File(transfer_id=transfer_id, filename=filename) self.extracted_files[transfer_id] = extr_file elif payload_header == "FLDA": extr_file = self.extracted_files[transfer_id] extr_file.index += 1 if extr_file.index != message.payload[2]: if not extr_file.error: logger.error( "Expected index %d, got %d, failing file %s", extr_file.index, message.payload[2], extr_file.filename, ) extr_file.error = True extr_file.handle.write(message.payload[3]) elif payload_header == "FLFI": extr_file = self.extracted_files[transfer_id] extr_file.finished = True extr_file.close() def report(self): bad_files = [] text = "extracted files found:\n" sorted_extracted_files = OrderedDict(sorted(self.extracted_files.items())) successful_attachments = [ os.path.join(COREDUMP_DIR, x.filename) for x in sorted_extracted_files.values() if not x.error and x.finished ] for extr_file in sorted_extracted_files.values(): text += " - {}".format(extr_file.filename) if extr_file.error: bad_files.append(extr_file.filename) text += " ERROR: File parts missing!" if extr_file.finished is False: if os.path.join(COREDUMP_DIR, extr_file.filename) in successful_attachments: # another file transfer of the same file succeeded logger.warning("File '%s' is not complete", extr_file.filename) else: # file hasn't been re-transferred - error bad_files.append(extr_file.filename) logger.error("File '%s' is not complete", extr_file.filename) text += " ERROR: File not complete!" text += "\n" if bad_files: self.add_result( state="error", message="Error extracting {} files".format(len(set(bad_files))), stdout=text, attach=successful_attachments, ) else: self.add_result(stdout=text, attach=successful_attachments) dltlyse-1.4.3/src/dltlyse/plugins/sys_errors.py000066400000000000000000000030351434210763300217170ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Search SYS|JOUR for detected errors""" import collections import re from typing import DefaultDict, Set from dltlyse.core.plugin_base import Plugin class TestSysErrorPlugin(Plugin): """Errors found by SYS|JOUR""" # relevant APIDs and CTIDs to filter for # - SYS|JOUR: error detection message_filters = [("SYS", "JOUR")] shared_regex = re.compile( r"\[[0-9]*\]: (?P\S*?): error while loading shared libraries: " r"(?P\S*?): cannot open shared object file" ) errors: DefaultDict[str, Set[str]] = collections.defaultdict(set) def __call__(self, message): """Handle traces""" if not (message.apid == "SYS" and message.ctid == "JOUR"): return payload_decoded = str(message.payload_decoded) match = self.shared_regex.search(payload_decoded) if match: self.errors["error while loading shared libraries"].add( "{} faild to load {}".format(match.group("program"), match.group("librabry")) ) def report(self): """Report if errors were found""" if self.errors: message = "\n".join(self.errors.keys()) stdout = [] for error in self.errors: stdout.append("{}:\n{}".format(error, "\n".join(self.errors[error]))) self.add_result(state="failure", message=message, stdout="\n---\n".join(stdout)) else: self.add_result(message="No errors found") dltlyse-1.4.3/src/dltlyse/plugins/sysmem_plugin.py000066400000000000000000000047061434210763300224060ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Parses DLT messages from the Monitor tool to gather system RAM usage""" from csv import writer from dltlyse.core.plugin_base import Plugin class SysmemPlugin(Plugin): """Report system memory information""" message_filters = [("MON", "MEMS")] pathname = "sysmem_report.csv" lifecycle_csv_fields = ("lifecycle", "time", "mem_total", "mem_available", "buffers", "cached", "shared") field_mapping = { "MemTotal": "mem_total", "MemAvailable": "mem_available", "Buffers": "buffers", "Cached": "cached", "Shmem": "shared", } def __init__(self): self.csv_fileobj = None self.csv = None self.lifecycle = None self.min_mem_available = None super(SysmemPlugin, self).__init__() def new_lifecycle(self, ecu_id, lifecycle_id): """New device start""" if not self.csv: # Only create the report file if this plugin is actually run self.csv_fileobj = open(self.pathname, "w") self.csv = writer(self.csv_fileobj) self.csv.writerow(self.lifecycle_csv_fields) self.lifecycle = lifecycle_id super(SysmemPlugin, self).new_lifecycle(ecu_id, lifecycle_id) def __call__(self, message): data = {"lifecycle": str(self.lifecycle), "time": message.tmsp} for combo_value in message.payload_decoded.split("MB"): if ":" not in combo_value: continue field, value = combo_value.split(":") field = field.strip() value = int(float(value) * 1024) if field == "MemAvailable": self.min_mem_available = min(value, self.min_mem_available) if self.min_mem_available else value if field in self.field_mapping: data[self.field_mapping[field]] = value self.csv.writerow([str(data.get(k, "")) for k in self.lifecycle_csv_fields]) def end_lifecycle(self, ecu_id, lifecycle_id): """Device shut down""" self.csv_fileobj.flush() super(SysmemPlugin, self).end_lifecycle(ecu_id, lifecycle_id) def report(self): """Close report files and attach them to a test result""" self.csv.close() self.csv_fileobj.close() if self.min_mem_available < 1024 * 1024: self.add_result(message="Available memory dropped below 1Gb", state="failure") self.add_attachments(self.pathname) dltlyse-1.4.3/src/dltlyse/run_dltlyse.py000077500000000000000000000122061434210763300203730ustar00rootroot00000000000000#!/usr/bin/env python # Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """DLT file analyser""" import configparser import argparse import fnmatch import logging import os import sys from dltlyse.core.analyser import DLTAnalyser # pylint: disable=dangerous-default-value logger = logging.getLogger("dltlyse") def parse_options(args=sys.argv[1:]): """parse command line parameters""" # Turn off help, so we print all options in response to -h conf_parser = argparse.ArgumentParser(add_help=False) conf_parser.add_argument("-c", "--config", dest="config_file", metavar="FILE", help="Use specific config file") args, remaining_args = conf_parser.parse_known_args(args) defaults = {"plugins": None} if args.config_file: if not os.path.exists(args.config_file): raise IOError("Configuration file '{}' could not be found.".format(args.config_file)) config = configparser.ConfigParser() config.read([args.config_file]) defaults = dict(config.items("default")) # https://gist.github.com/von/949337/ # Don't surpress add_help here so it will handle -h parser = argparse.ArgumentParser( # Inherit options from config_parser parents=[conf_parser], # print script description with -h/--help description=__doc__, # Don't mess with format of description formatter_class=argparse.RawDescriptionHelpFormatter, ) # convert string to list if isinstance(defaults["plugins"], str): defaults["plugins"] = defaults["plugins"].split(",") parser.set_defaults(**defaults) parser.add_argument( "-d", "--plugins-dir", dest="plugin_dirs", action="append", default=[], help="Add directory to search for plugins", ) parser.add_argument( "--no-default-dir", dest="no_default_dir", action="store_true", default=False, help="Do not look for plugins in the default directories", ) parser.add_argument( "-p", "--plugins", dest="plugins", action="append", default=defaults["plugins"], help="Initialize only explicitly listed plugin classes", ) parser.add_argument("--exclude", dest="exclude", action="append", help="Exclude listed plugin classes") parser.add_argument( "-s", "--show-plugins", dest="show_plugins", action="store_true", default=False, help="Show available plugins" ) parser.add_argument( "-r", "--recursive", dest="recursive_search", action="store_true", default=False, help="Search directories for traces recursively", ) parser.add_argument( "-v", "--verbose", dest="verbose", action="store_true", default=False, help="Turn on verbose messages" ) parser.add_argument( "-x", "--xunit", dest="xunit", default="dltlyse_results.xml", help="Generate result file in xUnit format to the specified file", ) parser.add_argument( "--xunit-testsuite-name", dest="xunit_testsuite_name", default="dltlyse", help="Testsuite name used inside the xunit results file", ) parser.add_argument( "--no-sort", dest="no_sort", action="store_true", default=False, help="Compatibility option - ignored" ) parser.add_argument( "--live-run", dest="live_run", action="store_true", default=False, help="Do a live run of DLTlyse plugins on incoming DLT logs", ) parser.add_argument("traces", nargs="*", help="DLT trace files") return parser.parse_args(remaining_args) def main(): """Entry point""" logging.basicConfig(level=logging.INFO) options = parse_options() logging.root.setLevel(logging.DEBUG if options.verbose is True else logging.INFO) if len(options.traces) > 1 and options.live_run: logger.error("DLTlyse does not support multiple trace files with '--live-run' option.") return 1 analyser = DLTAnalyser() analyser.load_plugins( plugin_dirs=options.plugin_dirs, plugins=options.plugins, exclude=options.exclude, no_default_dir=options.no_default_dir, ) if options.show_plugins: print(analyser.show_plugins(), file=sys.stderr) return 0 traces = [] for trace in options.traces: if os.path.isdir(trace): dir_traces = [] if options.recursive_search is True: for root, _, filenames in os.walk(trace): for filename in fnmatch.filter(filenames, "*.dlt"): dir_traces.append(os.path.join(root, filename)) else: for filename in fnmatch.filter(os.listdir(trace), "*.dlt"): dir_traces.append(os.path.join(trace, filename)) traces.extend(sorted(dir_traces)) else: traces.append(trace) return analyser.run_analyse( traces, xunit=options.xunit, no_sort=True, is_live=options.live_run, testsuite_name=options.xunit_testsuite_name, ) if __name__ == "__main__": sys.exit(main()) dltlyse-1.4.3/tests/000077500000000000000000000000001434210763300143445ustar00rootroot00000000000000dltlyse-1.4.3/tests/__init__.py000066400000000000000000000000001434210763300164430ustar00rootroot00000000000000dltlyse-1.4.3/tests/unittests/000077500000000000000000000000001434210763300164065ustar00rootroot00000000000000dltlyse-1.4.3/tests/unittests/__init__.py000066400000000000000000000000001434210763300205050ustar00rootroot00000000000000dltlyse-1.4.3/tests/unittests/test_analyser.py000066400000000000000000000334351434210763300216450ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Tests for core analyser parts of dltlyse.""" from contextlib import contextmanager import os import signal import threading import time from typing import List, Tuple, Union # noqa: F401 from unittest.mock import ANY, call, MagicMock, patch import pytest from dlt.dlt import cDLT_FILE_NOT_OPEN_ERROR, DLT_EMPTY_FILE_ERROR, DLTMessage # noqa: F401 from dlt.core import API_VER as DLT_VERSION_STR from dltlyse.core.analyser import DLTAnalyser, DLTLifecycle, DltlysePluginCollector from dltlyse.core.utils import ( create_temp_dlt_file, dlt_example_stream, single_random_corrupt_dlt_message, single_random_dlt_message, start_dlt_message, ) from dltlyse.mock_dlt_message import MockDLTMessage try: DLT_VERSION = tuple(int(num) for num in DLT_VERSION_STR.split(".")) except: # noqa: E722 DLT_VERSION = (2, 18, 5) @contextmanager def fake_analyser(): """A fake DLTAnalyser""" with patch("dltlyse.core.analyser.get_plugin_classes", return_value=[]): analyser = DLTAnalyser() analyser.load_plugins([]) yield analyser @contextmanager def fake_analyser_with_run_analyse_mock(dlt_msgs, plugin=None): """Helper function to mock internal functions for DLTAnalyser.run_analyse""" cls_name = "dltlyse.core.analyser.DLTAnalyser" with patch("{}.get_filters".format(cls_name)), patch("signal.signal"), patch( "dltlyse.core.analyser.dlt.load", return_value=dlt_msgs ), patch("{}.process_buffer".format(cls_name)) as mock_process_buffer, patch( "{}.generate_reports".format(cls_name) ), patch( "{}.setup_lifecycle".format(cls_name), return_value=DLTLifecycle("MGHS", 1) ) as mock_setup_lifecycle, patch( "{}.end_lifecycle".format(cls_name) ) as mock_end_lifecycle: with fake_analyser() as analyser: mocks = { "end_lifecycle": mock_end_lifecycle, "process_buffer": mock_process_buffer, "setup_lifecycle": mock_setup_lifecycle, } if plugin: analyser.plugin_collector.msg_plugins = {("APID", "CTID"): (plugin,)} analyser.plugin_collector.apid_plugins = {"APID": (plugin,)} analyser.plugin_collector.ctid_plugins = {"CTID": (plugin,)} analyser.plugin_collector.greedy_plugins = (plugin,) yield (analyser, mocks) class FakePlugin(object): """Fake plugin, only for testing purpose""" def __init__(self, plugin_name, message_filters): # type: (str, Union[str, List[Tuple[str, str]]]) -> None self.plugin_name = plugin_name # type: str self.message_filters = message_filters # type: (Union[str, List[Tuple[str, str]]]) self.call_count = 0 def __call__(self, msg): # type: (DLTMessage) -> None self.call_count += 1 def get_plugin_name(self): # type: () -> str """Get a fake plugin name""" return self.plugin_name class FakePluginException(FakePlugin): """Fake plugin, only for testing purpose""" def __call__(self, msg): super(FakePluginException, self).__call__(msg) raise Exception("Fake exception") def test_load_plugins(): """Test plugin loading""" obj = DLTAnalyser() obj.load_plugins([]) assert "ExtractFilesPlugin" in obj.show_plugins() assert "ContextPlugin" in obj.show_plugins() def test_load_plugins_specific(): """Test specific plugin loading""" obj = DLTAnalyser() obj.load_plugins([], plugins=["ExtractFilesPlugin"]) assert "ExtractFilesPlugin" in obj.show_plugins() assert "ContextPlugin" not in obj.show_plugins() def test_dont_load_manually_executed_plugins(): """Test that a manually-executed plugin isn't automatically loaded""" obj = DLTAnalyser() obj.load_plugins([]) assert "HeavyLifecyclesAnalyzer" not in obj.show_plugins() def test_analyse_file_sanity(): """Simulate test run of the dltlyse with invalid dlt trace files""" obj = DLTAnalyser() obj.start_lifecycle = MagicMock() obj.end_lifecycle = MagicMock() obj.process_message = MagicMock() obj.generate_reports = MagicMock() xunit = MagicMock() file_not_exist = "mock.dlt" file_empty = create_temp_dlt_file(empty=True) file_valid = create_temp_dlt_file(stream=dlt_example_stream) obj.load_plugins([], plugins=["ExtractFilesPlugin", "TestSysErrorPlugin"]) obj.run_analyse([file_not_exist, file_empty, file_valid], xunit, True, False) assert file_valid not in obj.file_exceptions assert cDLT_FILE_NOT_OPEN_ERROR in obj.file_exceptions[file_not_exist] assert DLT_EMPTY_FILE_ERROR in obj.file_exceptions[file_empty] def test_corrupt_msg_live(): """Simulate test run of the dltlyse live with corrupt message""" def send_stop_signal(pid): """Send a stop signal to the live run""" time.sleep(0.1) os.kill(pid, signal.SIGINT) # Test with exactly MAX_BUFFER_SIZE MSGS and No Start obj = DLTAnalyser() obj.get_filters = MagicMock(return_value=[]) obj.start_lifecycle = MagicMock() obj.end_lifecycle = MagicMock() obj.generate_reports = MagicMock() xunit = MagicMock() stop_thread = threading.Thread(target=send_stop_signal, args=(os.getpid(),)) random_msgs = bytearray() for i in range(60): if i % 25 == 0: random_msgs.extend(single_random_corrupt_dlt_message) elif i % 15 == 0: random_msgs.extend(start_dlt_message) else: random_msgs.extend(single_random_dlt_message) file1 = create_temp_dlt_file(stream=random_msgs) stop_thread.start() obj.run_analyse([file1], xunit, True, True) assert obj.start_lifecycle.mock_calls == [ call("MGHS", 0), call("MGHS", 1), call("MGHS", 2), call("MGHS", 3), ] assert obj.end_lifecycle.call_count == 4 if DLT_VERSION < (2, 18, 5): assert obj.dlt_file.corrupt_msg_count == 3 assert obj.generate_reports.mock_calls == [call(xunit, "dltlyse")] def test_init_plugin_collector(): """Test to init the plugin collector""" with patch("dltlyse.core.analyser.DltlysePluginCollector.init_plugins") as mock_init: with fake_analyser(): mock_init.assert_called_once() @pytest.mark.parametrize( "plugins,expected_filters", [ ([FakePlugin("fake_plugin", "all")], None), ( [ FakePlugin("fake_plugin", [("APID", "CTID")]), FakePlugin("fake_plugin", [("APID1", "CTID1")]), FakePlugin("fake_plugin", [("APID", "CTID")]), ], [("APID", "CTID"), ("APID1", "CTID1")], ), ], ) def test_check_get_filters(plugins, expected_filters): """Check filters""" with fake_analyser() as analyser: analyser.plugins = plugins flts = analyser.get_filters() if isinstance(flts, list): assert sorted(flts) == sorted(expected_filters) else: assert flts == expected_filters @pytest.mark.parametrize( "msg_buffer", [ [], [MagicMock(), MagicMock()], ], ) def test_check_process_buffer(msg_buffer): """Check process buffer""" with fake_analyser() as analyser, patch("dltlyse.core.analyser.DLTAnalyser.process_message") as mock_process: analyser._buffered_traces = msg_buffer analyser.process_buffer() assert mock_process.call_count == len(msg_buffer) assert not analyser._buffered_traces def test_run_analyse_init_lifecycle(): """Test to init lifecycle without lifecycle start messages""" dlt_msgs = [ MockDLTMessage(apid="APID", ctid="CTID"), MockDLTMessage(apid="APID", ctid="CTID"), ] with fake_analyser_with_run_analyse_mock(dlt_msgs) as (analyser, mocks): analyser.run_analyse(["/tmp/no-such-file"], MagicMock(), False, False) mocks["setup_lifecycle"].assert_called_with(dlt_msgs[0], lifecycle_id=0, process_buffer=True) def test_run_analyse_init_lifecycle_with_msg(): """Test to init lifecycle with a lifecycle start message""" dlt_msgs = [ MockDLTMessage(apid="DLTD", ctid="INTM", payload="Daemon launched. Starting to output traces..."), ] with fake_analyser_with_run_analyse_mock(dlt_msgs) as (analyser, mocks): analyser.run_analyse(["/tmp/no-such-file"], MagicMock(), False, False) mocks["setup_lifecycle"].assert_called_with(msg=dlt_msgs[0], lifecycle_id=1) def test_run_analyse_init_lifecycle_with_msgs(): """Test to init lifecycle with lifecycle start messages""" dlt_msgs = [ MockDLTMessage(apid="DLTD", ctid="INTM", payload="Daemon launched. Starting to output traces..."), MockDLTMessage(apid="DLTD", ctid="INTM", payload="Daemon launched. Starting to output traces..."), ] with fake_analyser_with_run_analyse_mock(dlt_msgs) as (analyser, mocks): analyser.run_analyse(["/tmp/no-such-file"], MagicMock(), False, False) mocks["end_lifecycle"].assert_called_with(ANY, 2) def test_run_analyse_call_plugin(): """Test to dispatch messages to plugins""" plugin = FakePlugin("fake_plugin", None) dlt_msgs = [ MockDLTMessage(apid="APID", ctid="CTID"), ] with fake_analyser_with_run_analyse_mock(dlt_msgs, plugin) as (analyser, _): analyser.run_analyse(["/tmp/no-such-file"], MagicMock(), False, False) assert plugin.call_count == 4 def test_run_analyse_call_plugin_with_exception(): """Test to handle plugin's exceptions""" plugin = FakePluginException("fake_plugin", None) dlt_msgs = [ MockDLTMessage(apid="APID", ctid="CTID"), ] with patch("dltlyse.core.analyser.make_plugin_exception_message") as mock_exception: with fake_analyser_with_run_analyse_mock(dlt_msgs, plugin) as (analyser, _): analyser.run_analyse(["/tmp/no-such-file"], MagicMock(), False, False) assert mock_exception.call_count == 4 def test_run_analyse_buffer_traces(): """Test to append traces to buffer""" dlt_msgs = [ MockDLTMessage(apid="DA1", ctid="DC1", payload="[connection_info ok] connected "), MockDLTMessage(ecuid="XORA"), ] with fake_analyser_with_run_analyse_mock(dlt_msgs) as (analyser, mocks): analyser.run_analyse(["/tmp/no-such-file"], MagicMock(), False, False) assert len(analyser._buffered_traces) == 2 mocks["process_buffer"].assert_called() def test_plugin_collector_convert_dict_value_tuple(): """Test to convert a list of plugins to a tuple of plugins""" collector = DltlysePluginCollector() # pylint: disable=protected-access assert collector._convert_dict_value_tuple({"abc": ["1", "2", "3"]}) == {"abc": ("1", "2", "3")} def test_plugin_collector_dispatch(): """Test to dispatch plugins by message filters""" test_plugins = { "greedy": FakePlugin("greedy", "all"), "apid_ctid": FakePlugin("apid_ctid", [("APID", "CTID")]), "apid": FakePlugin("apid", [("APID", "")]), "ctid": FakePlugin("ctid", [("", "CTID")]), } collector = DltlysePluginCollector() collector._dispatch_plugins(test_plugins.values()) # pylint: disable=protected-access assert collector.msg_plugins == {("APID", "CTID"): (test_plugins["apid_ctid"],)} assert collector.apid_plugins == {"APID": (test_plugins["apid"],)} assert collector.ctid_plugins == {"CTID": (test_plugins["ctid"],)} assert collector.greedy_plugins == (test_plugins["greedy"],) @pytest.mark.parametrize( "plugins,expected_msg", [ ([FakePlugin("fake_plugin", "not-valid")], "Invalid message filter setting: fake_plugin - not-valid"), ([FakePlugin("fake_plugin", [])], "Message filter should not empty: fake_plugin - []"), ( [FakePlugin("fake_plugin", [("APID", "CTID"), ("APID", "")])], "Duplicated message filter setting: fake_plugin - [('APID', 'CTID'), ('APID', '')]", ), ( [FakePlugin("fake_plugin", [("APID", "CTID"), ("", "CTID")])], "Duplicated message filter setting: fake_plugin - [('APID', 'CTID'), ('', 'CTID')]", ), ], ) def test_check_plugin_collector_check_plugin_msg_filters(plugins, expected_msg): """Check the message filter format""" with pytest.raises(ValueError) as err: DltlysePluginCollector()._check_plugin_msg_filters(plugins) # pylint: disable=protected-access assert str(err.value) == expected_msg @pytest.mark.parametrize( "plugins,expected_value", [ ((FakePlugin("fake_plugin", [("APID", "CTID")]),), ["fake_plugin"]), ({"APID": (FakePlugin("fake_plugin", [("APID", "CTID")]),)}, {"APID": ["fake_plugin"]}), ], ) def test_check_plugin_collector_convert_plugin_obj_to_name(plugins, expected_value): """Check that the conversion from plugin objects to plugin names is correct""" # pylint: disable=protected-access assert DltlysePluginCollector()._convert_plugin_obj_to_name(plugins) == expected_value def test_plugin_collector_print_plugin_collections(): """Test to print the plugin dispatching information""" with patch("dltlyse.core.analyser.DltlysePluginCollector._convert_plugin_obj_to_name") as mock_convert: DltlysePluginCollector()._print_plugin_collections() # pylint: disable=protected-access assert mock_convert.call_count == 4 def test_plugin_collector_init_plugins(): """Test to init plugin dispatching information""" cls_name = "dltlyse.core.analyser.DltlysePluginCollector" with patch("{}._check_plugin_msg_filters".format(cls_name)) as mock_check, patch( "{}._dispatch_plugins".format(cls_name) ) as mock_dispatch, patch("{}._print_plugin_collections".format(cls_name)) as mock_print: DltlysePluginCollector().init_plugins([]) mock_check.assert_called_with([]) mock_dispatch.assert_called_with([]) mock_print.assert_called_with() dltlyse-1.4.3/tests/unittests/test_call_backs_and_report_plugin.py000066400000000000000000000226131434210763300256740ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Tests for CallBacksAndReport plugin for dltlyse.""" import types from unittest import TestCase from unittest.mock import Mock, call # pylint: disable=no-name-in-module,import-error from dltlyse.core.plugin_base import CallBacksAndReportPlugin, dlt_callback from dltlyse.mock_dlt_message import MockDLTMessage class CallBacksAndReportPluginForTesting(CallBacksAndReportPlugin): """Define the abstract method report, only for testing.""" def report(self): """See above: empty implementation.""" class TestCallBacksAndReportPluginBase(TestCase): """Base class for CallBacksAndReportPlugin descendants.""" plugin_class = None # The CallBacksAndReportPlugin descendant class to be instantiated. def setUp(self): self.init_and_add_callbacks() # Create a default instance of the class, and sets-up a mock. def init_and_add_callbacks(self, *callbacks): """ "Helper to register callbacks to the plugin. It creates a fresh instance of the CallBacksAndReportPlugin descendant class, registers the given callbacks, and creates a mock for the write_to_domain_file method. Args: callbacks(list/tuple of callbacks): the list/tuple of callbacks to be registered. """ # pylint: disable=not-callable self.plugin = self.plugin_class() # Creates a fresh instance of the plugin_class. for callback in callbacks: # Creates a method of the plugin_class, for the instance which we created. func = types.MethodType(callback, self.plugin) # Plugs such method to the instance. setattr(self.plugin, callback.__name__, func) # We need to re-execute it, to register the above callbacks. self.plugin.collect_and_register_callbacks() # Creates a mock for this method. self.mock = self.plugin.write_to_domain_file = Mock() class TestDltCallbackDecorator(TestCase): """dlt_callback decorator unit tests.""" def check_filters(self, func, app_id="", ctx_id=""): """Checks if the dlt callback has defined the specific filters. Args: func(func): a function (method) which should be decorated with dlt_callback. kwargs(dict): a dictionary with the filters that should be defined, and their values. """ filter_condition = getattr(func, "filter_condition", None) self.assertIsNotNone(filter_condition) self.assertEqual(filter_condition, (app_id, ctx_id)) def test_no_decoration_applied(self): """Tests that a non-decorated function doesn't contain filter criteria.""" def callback(self, message): """Callback invoked when the expected trace is matched.""" pass self.assertFalse(hasattr(callback, "filter_condition")) def test_no_filter_defined(self): """Tests that no filter is defined.""" @dlt_callback() def callback(self, message): """Callback invoked when the expected trace is matched.""" pass self.check_filters(callback) def test_only_app_id_defined(self): """Tests that only the app_id is defined.""" @dlt_callback("SYS") def callback(self, message): """Callback invoked when the expected trace is matched.""" pass self.check_filters(callback, "SYS") def test_only_ctx_id_defined(self): """Tests that only the ctx_id is defined.""" @dlt_callback(None, "JOUR") def callback(self, message): """Callback invoked when the expected trace is matched.""" pass self.check_filters(callback, ctx_id="JOUR") def test_app_id_and_ctx_id_defined(self): """Tests that app_id and ctx_id are defined.""" @dlt_callback("SYS", "JOUR") def callback(self, message): """Callback invoked when the expected trace is matched.""" pass self.check_filters(callback, "SYS", "JOUR") class TestCallBacksAndReportPlugin(TestCallBacksAndReportPluginBase): """CallBacksAndReport plugin unit tests.""" plugin_class = CallBacksAndReportPluginForTesting def test_report_filename(self): """Tests that the report filename is the name of the class + .txt.""" self.assertEqual(self.plugin.report_filename(), "call_backs_and_report_plugin_for_testing.txt") self.assertEqual(self.mock.call_count, 0) def test_prepare_report(self): """Tests that prepare_report is called when a report is generated.""" self.plugin.prepare_report = prepare_report_mock = Mock() self.assertEqual(self.plugin.get_report(), "No report is generated!") self.assertEqual(prepare_report_mock.call_count, 1) self.assertEqual(self.mock.call_count, 0) def test_no_report(self): """Tests that no report is generated if there was no data collected.""" self.assertEqual(self.plugin.get_report(), "No report is generated!") self.assertEqual(self.mock.call_count, 0) def test_text_file_report(self): """Tests that a text file report is generated, when some data is collected.""" @dlt_callback("SYS", "JOUR") def systemd_callback(self, message): """Callback invoked when the expected trace is matched.""" self.report_output = "Something was found!" self.init_and_add_callbacks(systemd_callback) self.plugin( MockDLTMessage( apid="SYS", ctid="JOUR", payload="2017/01/31 14:03:33.154124 1.454729 kernel: Warning: " "sd: u=dri-permissions.path, inactive_exit=548934", ) ) # Since we are using a mock for write_to_domain_file, the mock returns another Mock instance # to the caller. self.assertIsInstance(self.plugin.get_report(), Mock) self.assertEqual(self.mock.call_count, 1) self.assertEqual( self.mock.call_args_list[0], call("call_backs_and_report_plugin_for_testing.txt", "Something was found!") ) def test_collect_and_register_callbacks(self): # pylint: disable=invalid-name """Tests that collect_and_register_callbacks detects and registers the callbacks which were decorated with dlt_callback.""" @dlt_callback("SYS", "JOUR") def systemd_callback(self, message): """Callback invoked when the expected trace is matched.""" pass self.init_and_add_callbacks(systemd_callback) self.assertEqual(len(self.plugin.dlt_callbacks), 1) filter_condition, callbacks = self.plugin.dlt_callbacks.popitem() self.assertEqual(filter_condition, ("SYS", "JOUR")) self.assertEqual(len(callbacks), 1) self.assertEqual(callbacks[0].__name__, systemd_callback.__name__) def test_add_callback_from_template_function(self): # pylint: disable=invalid-name """Tests that add_callback_from_template_function adds a callback, deriving it from a template function.""" def mtee_callback(message, app_id=None, ctx_id=None, userdata=None): """Callback invoked when the expected mtee trace is matched.""" pass self.plugin.add_callback_from_template_function(mtee_callback, "SYS", "JOUR", "TEST") self.assertEqual(len(self.plugin.dlt_callbacks), 1) filter_condition, callbacks = self.plugin.dlt_callbacks.popitem() self.assertEqual(filter_condition, ("SYS", "JOUR")) self.assertEqual(len(callbacks), 1) self.assertEqual(callbacks[0].keywords, {"app_id": "SYS", "ctx_id": "JOUR", "userdata": "TEST"}) self.assertEqual(callbacks[0].func.__name__, mtee_callback.__name__) # pylint: disable=no-member def test_calling_callbacks(self): """Tests that all registered callbacks are correctly called when a message matches their filter conditions.""" @dlt_callback("SYS", "JOUR") def systemd_callback(self, message): """Callback invoked when a systemd trace is matched.""" matches.append(message) @dlt_callback("LTM", "MAIN") def version_callback(self, message): """Callback invoked when the ltm trace is matched.""" matches.append(message) def mtee_callback(message, app_id=None, ctx_id=None, userdata=None): """Callback invoked when the expected mtee trace is matched.""" payload = str(message.payload_decoded) if payload == userdata: matches.append(message) matches = [] self.init_and_add_callbacks(systemd_callback, version_callback) self.plugin.add_callback_from_template_function(mtee_callback, "MTEE", "MTEE", "START") systemd_message = MockDLTMessage(apid="SYS", ctid="JOUR", payload="systemd!") self.plugin(systemd_message) main_message = MockDLTMessage(apid="LTM", ctid="MAIN", payload="main!") self.plugin(main_message) self.plugin(MockDLTMessage(apid="DA1", ctid="DC1", payload="New lifecycle!")) self.plugin(MockDLTMessage(apid="MTEE", ctid="MTEE", payload="STOP")) mtee_message = MockDLTMessage(apid="MTEE", ctid="MTEE", payload="START") self.plugin(mtee_message) self.assertEqual(len(matches), 3) self.assertEqual(matches[0], systemd_message) self.assertEqual(matches[1], main_message) self.assertEqual(matches[2], mtee_message) dltlyse-1.4.3/tests/unittests/test_plugin_report.py000066400000000000000000000250731434210763300227170ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Test plugin_metadata decorator and xunit report functions""" import xml.etree.ElementTree as etree import inspect from unittest.mock import patch, mock_open from dltlyse.core.plugin_base import Plugin, plugin_metadata from dltlyse.core.report import logger, Metadata, Result, XUnitReport def _equal_xml_tree(root, other): # pylint: disable=too-many-return-statements """Real implementation for equal_xml_tree""" if root is None or other is None: return False if root.tag != other.tag: return False if root.text and other.text and root.text != other.text: return False if len(tuple(root)) != len(tuple(other)): return False if dict(root.attrib) != dict(other.attrib): return False for root_child, other_child in zip(root, other): if not _equal_xml_tree(root_child, other_child): return False return True def equal_xml_tree(root, other): """Compare the two xml trees are equal or not""" def to_node(node): """Convert str/element type to element type""" return etree.fromstring(node) if isinstance(node, str) else node return _equal_xml_tree(to_node(root), to_node(other)) # pylint: disable=missing-docstring class TestNoMetadataPlugin(Plugin): def __call__(self, message): pass def report(self): pass @plugin_metadata(type="test", function="monitor") class TestMetadataPlugin(TestNoMetadataPlugin): """TestMetadataPlugin-first-line TestMetadataPlugin-description """ @plugin_metadata(function="logging", extra="extra") class TestMetadataLoggingPlugin(TestMetadataPlugin): """TestMetadataLoggingPlugin-first-line TestMetadataLoggingPlugin-description """ @plugin_metadata(type="test", function="report") class TestPlugin(TestNoMetadataPlugin): """Test-for-report Get full description here. """ def generate_test_result(attach=None, extra=""): """Prepare test result data and xml string""" attach = attach or [] result = Result( classname="TestPlugin", testname="TestPlugin-shot-description", state="success", stdout="TestPlugin-stdoutput", message="TestPlugin-success-message", attach=attach, ) xml_str = ( '' "{}" "TestPlugin-stdoutput" "{}" "" ).format("".join("[[ATTACHMENT|{}]]".format(filename) for filename in attach), extra) return result, xml_str def test_plugin_no_metadata(): """Tests that plugin metadata is not set without plugin_metadata decorator""" assert TestNoMetadataPlugin.__name__ == "TestNoMetadataPlugin" assert not hasattr(TestNoMetadataPlugin, "plugin_metadata") assert inspect.getdoc(TestNoMetadataPlugin) == "dltlyse Plugin base class" def test_plugin_metadata_base_class(): """Tests that plugin metadata is set correctly.""" assert TestMetadataPlugin.__name__ == "TestMetadataPlugin" assert hasattr(TestMetadataPlugin, "plugin_metadata") assert TestMetadataPlugin.plugin_metadata == {"type": "test", "function": "monitor"} assert inspect.getdoc(TestMetadataPlugin) == "TestMetadataPlugin-first-line\n\nTestMetadataPlugin-description" def test_plugin_metadata_derived_class(): # pylint: disable=invalid-name """Tests that plugin metadata is set correctly for derived class.""" assert TestMetadataLoggingPlugin.__name__ == "TestMetadataLoggingPlugin" assert hasattr(TestMetadataLoggingPlugin, "plugin_metadata") assert TestMetadataLoggingPlugin.plugin_metadata == {"type": "test", "function": "logging", "extra": "extra"} assert ( inspect.getdoc(TestMetadataLoggingPlugin) == "TestMetadataLoggingPlugin-first-line\n\nTestMetadataLoggingPlugin-description" ) def test_plugin_add_result_no_metadata(): # pylint: disable=invalid-name """Tests that the result is added correctly without metadata.""" plugin = TestNoMetadataPlugin() plugin.add_result(state="success", message="Test successfully", stdout="test-stdout") results = plugin.get_results() assert len(results) == 1 result = results[0] assert result.classname == "TestNoMetadataPlugin" assert result.testname == "" assert result.state == "success" assert result.message == "Test successfully" assert result.metadata.metadata == {"docstring": ""} def test_plugin_add_result(): """Tests that the result is added correctly.""" plugin = TestPlugin() plugin.add_result(state="success", message="Test successfully", stdout="test-stdout") results = plugin.get_results() assert len(results) == 1 result = results[0] assert result.classname == "TestPlugin" assert result.testname == "Test-for-report" assert result.state == "success" assert result.message == "Test successfully" assert result.metadata.metadata == { "type": "test", "function": "report", "docstring": "Test-for-report\n\nGet full description here.", } def test_metadata_render_default(): """Tests that metadata xml is None by default""" meta = Metadata() assert not meta.render_xml() def test_metadata_render_wrong_type(): """Tests that metadata xml is None when the metadata type is not dict""" meta = Metadata([]) assert not meta.render_xml() def test_metadata_render_normal(): """Tests that metadata xml is rendered correctly.""" meta = Metadata({"type": "test", "function": "monitor"}) assert equal_xml_tree( meta.render_xml(), 'monitortest' ) def test_metadata_render_recursive(): """Tests that metadata xml is rendered correctly and recursively.""" meta = Metadata({"type": "test", "function": "monitor", "traceability": {"JIRA": "No exist"}}) assert equal_xml_tree( meta.render_xml(), ( "" 'monitor' '' 'No exist' "" 'test' "" ), ) def test_result_equal(): result = Result() other = Result(metadata={"key": "should-not-have-effect"}) assert result == other def test_result_render_xml_error_state(): # pylint: disable=invalid-name """Test the warning message when the test state is undefined.""" result = Result(classname="noclass", state="nostate") with patch.object(logger, "warning") as logger_mock: result.render_xml() logger_mock.assert_called_with("Not supported for the test state: %s - plugin: %s", "nostate", "noclass") assert result.state == "error" def test_result_render_xml_fail(): """Tests that result is rendered when the state is error.""" state = "error" state_type = "error" result = Result( classname="TestPlugin", testname="TestPlugin-shot-description", state=state, stdout="TestPlugin-stdoutput", message="TestPlugin-{}-message".format(state), ) assert equal_xml_tree( result.render_xml(), ( '' '<{state} message="TestPlugin-{state}-message" type="{state_type}"/>' "TestPlugin-stdoutput" "" ).format(state=state, state_type=state_type), ) def test_result_render_xml_success(): """Tests that result is rendered when the state is success.""" result, excepted = generate_test_result() assert equal_xml_tree(result.render_xml(), excepted) def test_result_render_xml_with_metadata(): # pylint: disable=invalid-name """Tests that result is rendered with metadata""" result, excepted = generate_test_result(extra="") with patch("dltlyse.core.report.Metadata.render_xml", return_value=etree.Element("metadata")): assert equal_xml_tree(result.render_xml(), excepted) def test_result_render_xml_with_attachment(): # pylint: disable=invalid-name """Tests that result is rendered with attachment""" result, excepted = generate_test_result(attach=["test.csv"]) assert equal_xml_tree(result.render_xml(), excepted) def test_result_render_xml_with_metadata_and_attachment(): # pylint: disable=invalid-name """Tests that result is rendered with metadata and attachment""" result, excepted = generate_test_result(attach=["test.csv"], extra="") with patch("dltlyse.core.report.Metadata.render_xml", return_value=etree.Element("metadata")): assert equal_xml_tree(result.render_xml(), excepted) def test_xunit_rerport_summary(): """Tests that the statistics of test state are correct.""" xunit_report = XUnitReport() xunit_report.add_results( [Result(state="success"), Result(state="failure"), Result(state="skipped"), Result(state="error")] ) assert xunit_report._generate_summary() == { "number_of_errors": "1", "number_of_failures": "1", "number_of_skipped": "1", "number_of_tests": "4", } def test_xunit_report_render_xml(): """Tests that xunit report is rendered correctly.""" xunit_report = XUnitReport() xunit_report.add_results([Result()]) with patch("dltlyse.core.report.Result.render_xml", return_value=etree.Element("testcase")): assert equal_xml_tree( xunit_report.render_xml(), '', ) def test_xunit_report_not_render(): """Tests that xunit report is not written with an invalid filename.""" xunit_report = XUnitReport() with patch("dltlyse.core.report.open", mock_open()) as mocked_file: xunit_report.render() mocked_file().write.assert_not_called() def test_xunit_report_render(): """Tests that xunit report is written to file correctly.""" xunit_report = XUnitReport() xunit_report.outfile = "mocked-file" with patch("dltlyse.core.report.XUnitReport.render_xml", return_value=etree.Element("testsuite")): with patch("dltlyse.core.report.open", mock_open()) as mocked_file: xunit_report.render() assert mocked_file().write.call_count >= 1 write_xml = "".join(args[0].decode() for args, _ in mocked_file().write.call_args_list) assert write_xml == "\n" dltlyse-1.4.3/tests/unittests/test_python_data_to_xml.py000066400000000000000000000112171434210763300237150ustar00rootroot00000000000000# Copyright (C) 2022. BMW Car IT GmbH. All rights reserved. """Tests for data_to_xml_string plugin for dltlyse.""" from unittest import TestCase from dltlyse.core.utils import data_to_xml_string class TestDataToXMLString(TestCase): """data_to_xml_string unit tests.""" def test_only_the_tag_present(self): """Tests that only the tag is present.""" data = "foo" self.assertEqual( data_to_xml_string(data), """ """, ) def test_tag_and_value_present(self): """Tests that the tag and value are present.""" data = "foo", "bar" self.assertEqual( data_to_xml_string(data), """ bar """, ) def test_tag_and_attributes_present(self): """Tests that the tag and attributes are present.""" data = "foo", {"bar": "123"} self.assertEqual( data_to_xml_string(data), """ """, ) def test_tag_and_attributes_and_value_present(self): # pylint: disable=invalid-name """Tests that the tag, attributes, and value are present.""" data = "foo", {"bar": "123"}, "spam" self.assertEqual( data_to_xml_string(data), """ spam """, ) def test_value_is_a_tuple(self): """Tests that the value is a tuple which generates sub-tags.""" data = "foo", ("bar", "spam") self.assertEqual( data_to_xml_string(data), """ \t \t """, ) def test_value_is_a_list(self): """Tests that the value is a list which generates sub-tags.""" data = "foo", ["bar", "spam"] self.assertEqual( data_to_xml_string(data), """ \t \t """, ) def test_value_is_not_a_string(self): """Tests that the value is not a string. It can be any value, that can be converted to a string. A float is used for the test. """ data = "foo", 1.5 self.assertEqual( data_to_xml_string(data), """ 1.5 """, ) def test_children_with_complex_data_structure(self): # pylint: disable=invalid-name """Tests that children can use the same rules of the main tag. It allows to easily define more complex data structures. """ data = "foo", ["bar", ("spam", 123), ("droogs", {"milk": "plus"})] self.assertEqual( data_to_xml_string(data), """ \t \t123 \t """, ) def test_children_with_the_same_tag_name(self): # pylint: disable=invalid-name """Tests that the tag name for children can be defined just ones. When all children share the same tag name, it's possible to define it once (with the special $tag attribute), and then it'll be automatically used by all of them. """ data = "foo", {"$tag": "bar"}, [1, 2, 3] self.assertEqual( data_to_xml_string(data), """ \t1 \t2 \t3 """, ) def test_children_with_the_same_attributes(self): # pylint: disable=invalid-name """Tests that the attributes for children can be defined just ones. When all children share the same attributes, it's possible to define them once (with the special $attr attribute), and then they'll be automatically used by all of them. """ data = "foo", {"$attr": {"bar": "spam"}}, ["droogs", "milk", "plus"] self.assertEqual( data_to_xml_string(data), """ \t \t \t """, ) def test_children_with_the_same_name_and_attributes(self): # pylint: disable=invalid-name """Tests that the tag name and attributes for children can be defined just ones. When all children share the same name and attributes, it's possible to define them once (with the special $tag and $attr attributes), and then they'll be automatically used by all of them. """ data = "foo", {"$tag": "bar", "$attr": {"milk": "plus"}}, [1, 2, 3] self.assertEqual( data_to_xml_string(data), """ \t1 \t2 \t3 """, ) dltlyse-1.4.3/tox.ini000066400000000000000000000004351434210763300145170ustar00rootroot00000000000000[tox] isolated_build = True envlist = py3 [testenv] deps = pytest six git+https://github.com/bmwcarit/python-dlt commands = pytest tests/ [testenv:statictest] deps = flake8 black mypy commands = black -l 119 --check . flake8 mypy src/dltlyse/