pax_global_header00006660000000000000000000000064141501657250014520gustar00rootroot0000000000000052 comment=d51fd5c1c4572ff2989375e6dc691ce17625fdcc labgrid-0.4.1/000077500000000000000000000000001415016572500131265ustar00rootroot00000000000000labgrid-0.4.1/.codecov.yaml000066400000000000000000000001171415016572500155110ustar00rootroot00000000000000coverage: precision: 1 range: "50..70" github_checks: annotations: false labgrid-0.4.1/.coveragerc000066400000000000000000000003071415016572500152470ustar00rootroot00000000000000[report] exclude_lines = pragma: no cover def __repr__ if self.debug: if settings.DEBUG raise AssertionError raise NotImplementedError if 0: if __name__ == .__main__.:labgrid-0.4.1/.crossbar/000077500000000000000000000000001415016572500150225ustar00rootroot00000000000000labgrid-0.4.1/.crossbar/.gitignore000066400000000000000000000000321415016572500170050ustar00rootroot00000000000000key.priv key.pub node.pid labgrid-0.4.1/.crossbar/config.yaml000066400000000000000000000023551415016572500171600ustar00rootroot00000000000000version: 2 workers: - type: router realms: - name: realm1 roles: - name: authenticator permissions: - uri: org.labgrid.authenticate match: exact allow: call: false register: true publish: false subscribe: false disclose: caller: false publisher: false cache: true - name: public permissions: - uri: '' match: prefix allow: call: true register: true publish: true subscribe: true disclose: caller: true publisher: true cache: true transports: - type: web endpoint: type: tcp port: 20408 paths: /: type: static directory: ../web ws: type: websocket auth: ticket: type: dynamic authenticator: org.labgrid.authenticate components: - type: class classname: labgrid.remote.authenticator.AuthenticatorSession realm: realm1 role: authenticator - id: coordinator type: guest executable: python3 arguments: - -mlabgrid.remote.coordinator options: workdir: . env: vars: WS: ws://localhost:20408/ws labgrid-0.4.1/.dockerignore000066400000000000000000000004151415016572500156020ustar00rootroot00000000000000*.pyc *__pycache__ *console_main .*.swp .cache /MANIFEST .tox /.#tox.ini /.eggs/README.txt *.egg *.egg-info /venv* /build /doc/.build /doc/modules /.coverage /dist /.pytest_cache/ /htmlcov/ /.idea */__pycache__ /dockerfiles/ !/dockerfiles/exporter/entrypoint.sh .git/ labgrid-0.4.1/.flake8000066400000000000000000000001571415016572500143040ustar00rootroot00000000000000[flake8] exclude = .git, __pycache__, docs/conf.py, build, dist ignore = E124, E128 max-line-length = 119 labgrid-0.4.1/.github/000077500000000000000000000000001415016572500144665ustar00rootroot00000000000000labgrid-0.4.1/.github/pull_request_template.md000066400000000000000000000030051415016572500214250ustar00rootroot00000000000000 **Description** **Checklist** - [ ] Documentation for the feature - [ ] Tests for the feature - [ ] The arguments and description in doc/configuration.rst have been updated - [ ] Add a section on how to use the feature to doc/usage.rst - [ ] Add a section on how to use the feature to doc/development.rst - [ ] CHANGES.rst has been updated - [ ] PR has been tested - [ ] Man pages have been regenerated labgrid-0.4.1/.github/workflows/000077500000000000000000000000001415016572500165235ustar00rootroot00000000000000labgrid-0.4.1/.github/workflows/docker.yml000066400000000000000000000022321415016572500205140ustar00rootroot00000000000000name: docker build on: push: branches: [ master ] jobs: docker: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Install system dependencies run: | sudo apt install -yq python3-pip python3 -m pip install --upgrade pip setuptools wheel - name: Login to DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build docker image run: | ./dockerfiles/build.sh docker-compose -f dockerfiles/staging/docker-compose.yml up --exit-code-from client client docker-compose -f dockerfiles/staging/docker-compose.yml down docker images docker tag labgrid-client ${{ secrets.DOCKERHUB_PREFIX }}client docker tag labgrid-exporter ${{ secrets.DOCKERHUB_PREFIX }}exporter docker tag labgrid-coordinator ${{ secrets.DOCKERHUB_PREFIX }}coordinator docker push ${{ secrets.DOCKERHUB_PREFIX }}client docker push ${{ secrets.DOCKERHUB_PREFIX }}exporter docker push ${{ secrets.DOCKERHUB_PREFIX }}coordinator docker images labgrid-0.4.1/.github/workflows/unit-tests.yml000066400000000000000000000056231415016572500213730ustar00rootroot00000000000000name: unit tests on: [push, pull_request] jobs: build: runs-on: ubuntu-latest continue-on-error: ${{ matrix.experimental }} strategy: fail-fast: false matrix: python-version: ['3.6', '3.7', '3.8', '3.9'] experimental: [false] steps: - uses: actions/checkout@v2 - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - uses: actions/cache@v2 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('*requirements.txt') }} restore-keys: | ${{ runner.os }}-pip- - name: Install system dependencies run: | sudo apt-get install -yq libow-dev openssh-server openssh-client libsnappy-dev ncurses-term sudo mkdir -p /var/cache/labgrid/runner && sudo chown runner /var/cache/labgrid/runner - name: Prepare local SSH run: | # the default of 777 is too open for SSH chmod 755 ~ ssh-keygen -f ~/.ssh/id_ed25519.local -t ed25519 -N "" cat ~/.ssh/id_ed25519.local.pub >> ~/.ssh/authorized_keys echo -e "Host localhost ip6-localhost\n Hostname 127.0.0.1\n IdentityFile ~/.ssh/id_ed25519.local\n UserKnownHostsFile ~/.ssh/known_hosts.local" >> ~/.ssh/config ssh -o StrictHostKeyChecking=no localhost echo OK - name: Install python dependencies run: | python -m pip install --upgrade pip setuptools wheel python -m pip install flake8 pytest pip install -r ci-requirements.txt - name: Install labgrid run: | pip install -e . #- name: Lint with flake8 # run: | # # stop the build if there are Python syntax errors or undefined names # flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics # # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide # flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest run: | TERM=xterm pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" - name: Build documentation run: | python setup.py build_sphinx rm man/*.1 make -C man all git --no-pager diff --exit-code - uses: codecov/codecov-action@v1 docker: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - name: Install system dependencies run: | sudo apt install -yq python3-pip python3 -m pip install --upgrade pip setuptools wheel - name: Build docker images run: | ./dockerfiles/build.sh docker-compose -f dockerfiles/staging/docker-compose.yml up --exit-code-from client client docker-compose -f dockerfiles/staging/docker-compose.yml down - name: Show docker images run: | docker images labgrid-0.4.1/.gitignore000066400000000000000000000004301415016572500151130ustar00rootroot00000000000000*.pyc *__pycache__ *console_main .*.swp .cache /MANIFEST .tox /.#tox.ini /.eggs/README.txt *.egg *.egg-info /venv* /build /doc/.build /doc/modules /.coverage /dist /.pytest_cache/ /htmlcov/ /dockerfiles/staging/crossbar/* !/dockerfiles/staging/crossbar/places_example.yaml /.idea labgrid-0.4.1/.pylintrc000066400000000000000000000330011415016572500147700ustar00rootroot00000000000000[MASTER] # Specify a configuration file. #rcfile= # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS # Add files or directories matching the regex patterns to the blacklist. The # regex matches against base names, not paths. ignore-patterns= # Pickle collected data for later comparisons. persistent=yes # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Use multiple processes to speed up Pylint. jobs=1 # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code extension-pkg-whitelist= # Allow optimization of some AST trees. This will activate a peephole AST # optimizer, which will apply various small optimizations. For instance, it can # be used to obtain the result of joining multiple strings with the addition # operator. Joining a lot of strings can lead to a maximum recursion error in # Pylint and this flag can prevent that. It has one side effect, the resulting # AST will be different than the one from reality. This option is deprecated # and it will be removed in Pylint 2.0. optimize-ast=no [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED confidence= # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. #enable= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once).You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" disable=next-method-called,import-star-module-level,dict-view-method,basestring-builtin,unpacking-in-except,map-builtin-not-iterating,input-builtin,coerce-builtin,xrange-builtin,backtick,indexing-exception,unichr-builtin,suppressed-message,parameter-unpacking,execfile-builtin,standarderror-builtin,filter-builtin-not-iterating,reload-builtin,attribute-defined-outside-init,raising-string,print-statement,hex-method,useless-suppression,long-suffix,reduce-builtin,cmp-builtin,intern-builtin,delslice-method,coerce-method,raw_input-builtin,oct-method,setslice-method,buffer-builtin,range-builtin-not-iterating,cmp-method,dict-iter-method,no-absolute-import,old-octal-literal,apply-builtin,file-builtin,zip-builtin-not-iterating,getslice-method,long-builtin,unicode-builtin,round-builtin,nonzero-method,metaclass-assignment,old-division,old-ne-operator,old-raise-syntax,using-cmp-argument,useless-super-delegation,duplicate-code,no-member [REPORTS] # Set the output format. Available formats are text, parseable, colorized, msvs # (visual studio) and html. You can also give a reporter class, eg # mypackage.mymodule.MyReporterClass. output-format=text # Put messages in a separate file for each module / package specified on the # command line instead of printing them on stdout. Reports (if any) will be # written in a file name "pylint_global.[txt|html]". This option is deprecated # and it will be removed in Pylint 2.0. files-output=no # Tells whether to display a full report or only the messages reports=yes # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details #msg-template= [SIMILARITIES] # Minimum lines number of a similarity. min-similarity-lines=4 # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=no [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME,XXX,TODO [VARIABLES] # Tells whether we should check for unused import in __init__ files. init-import=no # A regular expression matching the name of dummy variables (i.e. expectedly # not used). dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_,_cb # List of qualified module names which can have objects that can redefine # builtins. redefining-builtins-modules=six.moves,future.builtins [BASIC] # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_ # Bad variable names which should always be refused, separated by a comma bad-names=foo,bar,baz,toto,tutu,tata # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Include a hint for the correct naming format with invalid-name include-naming-hint=no # List of decorators that produce properties, such as abc.abstractproperty. Add # to this list to register other decorators that produce valid properties. property-classes=abc.abstractproperty # Regular expression matching correct constant names const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Naming hint for constant names const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ # Regular expression matching correct function names function-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for function names function-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct attribute names attr-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for attribute names attr-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct module names module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Naming hint for module names module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ # Regular expression matching correct method names method-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for method names method-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct variable names variable-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for variable names variable-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression matching correct inline iteration names inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ # Naming hint for inline iteration names inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ # Regular expression matching correct class attribute names class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ # Naming hint for class attribute names class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ # Regular expression matching correct class names class-rgx=[A-Z_][a-zA-Z0-9]+$ # Naming hint for class names class-name-hint=[A-Z_][a-zA-Z0-9]+$ # Regular expression matching correct argument names argument-rgx=[a-z_][a-z0-9_]{2,30}$ # Naming hint for argument names argument-name-hint=[a-z_][a-z0-9_]{2,30}$ # Regular expression which should only match function or class names that do # not require a docstring. no-docstring-rgx=^_ # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 [ELIF] # Maximum number of nested blocks for function / method body max-nested-blocks=5 [FORMAT] # Maximum number of characters on a single line. max-line-length=100 # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no # List of optional constructs for which whitespace checking is disabled. `dict- # separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. # `trailing-comma` allows a space between comma and closing bracket: (a, ). # `empty-line` allows space-only lines. no-space-check=trailing-comma,dict-separator # Maximum number of lines in a module max-module-lines=1000 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format= [LOGGING] # Logging modules to check that the string format arguments are in logging # function parameter format logging-modules=logging [SPELLING] # Spelling dictionary name. Available dictionaries: none. To make it working # install python-enchant package. spelling-dict= # List of comma separated words that should not be checked. spelling-ignore-words= # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file= # Tells whether to store unknown words to indicated private dictionary in # --spelling-private-dict-file option instead of raising a message. spelling-store-unknown-words=no [TYPECHECK] # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. ignored-modules= # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. ignored-classes=optparse.Values,thread._local,_thread._local # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members= # List of decorators that produce context managers, such as # contextlib.contextmanager. Add to this list to register other decorators that # produce valid context managers. contextmanager-decorators=contextlib.contextmanager [IMPORTS] # Deprecated modules which should not be used, separated by a comma deprecated-modules=optparse # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= # Force import order to recognize a module as part of the standard # compatibility libraries. known-standard-library= # Force import order to recognize a module as part of a third party library. known-third-party=enchant # Analyse import fallback blocks. This can be used to support both Python 2 and # 3 compatible code, which means that the block might have code that exists # only in one or another interpreter, leading to false positives when analysed. analyse-fallback-blocks=no [DESIGN] # Maximum number of arguments for function / method max-args=5 # Argument names that match this expression will be ignored. Default to name # with leading underscore ignored-argument-names=_.* # Maximum number of locals for function / method body max-locals=15 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of branch for function / method body max-branches=12 # Maximum number of statements in function / method body max-statements=50 # Maximum number of parents for a class (see R0901). max-parents=10 # Maximum number of attributes for a class (see R0902). max-attributes=10 # Minimum number of public methods for a class (see R0903). min-public-methods=0 # Maximum number of public methods for a class (see R0904). max-public-methods=20 # Maximum number of boolean expressions in a if statement max-bool-expr=5 [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__,__new__,setUp # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=mcs # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict,_fields,_replace,_source,_make [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=Exception labgrid-0.4.1/CHANGES.rst000066400000000000000000000455211415016572500147370ustar00rootroot00000000000000Release 0.4.1 (unreleased) -------------------------- New Features in 0.4.1 ~~~~~~~~~~~~~~~~~~~~~ Bug fixes in 0.4.1 ~~~~~~~~~~~~~~~~~~ - The exporter now exports sysfsgpios during place acquire/release, fixing a race in the sysfspgio agent interface. - Fixed a bug where using ``labgrid-client io get`` always returned ``low`` when reading a ``sysfsgpio``. - Fixed ``labgrid-client forward --remote``/``-R``, which used either the LOCAL part of ``--local``/``-L`` accidentally (if specified) or raised an UnboundLocalError. - Fix labgrid-client exit code on keyboard interrupt. Known issues in 0.4.1 ~~~~~~~~~~~~~~~~~~~~~~~~~ Release 0.4.0 (unreleased) -------------------------- New Features in 0.4.0 ~~~~~~~~~~~~~~~~~~~~~ - Duplicate bindings for the same driver are now allowed (see the QEMUDriver) - The `NetworkPowerDriver` now additionally supports: - Siglent SPD3000X series power supplies - Labgrid client lock now enforces that all matches need to be fulfilled. - Support for USB HID relays has been added. - UBootDriver now allows overriding of currently fixed await boot timeout via new ``boot_timeout`` argument. - With ``--lg-colored-steps``, two new ``dark`` and ``light`` color schemes which only use the standard 8 ANSI colors can be set in ``LG_COLOR_SCHEME``. The existing color schemes have been renamed to ``dark-256color`` and ``light-256color``. Also, the `ColoredStepReporter` now tries to autodetect whether the terminal supports 8 or 256 colors, and defaults to the respective dark variant. The 256-color schemes now use purple instead of green for the ``run`` lines to make them easier distinguishable from pytest's "PASSED" output. - Network controlled relay providing GET/PUT based REST API - The QEMUDriver gains support for -bios and qcow2 images. - Support for audio input has been added. - Usage of sshpass for SSH password input has been replaced with the SSH_ASKPASS environment variable. - Labgrid supports the Linux Automation GmBH USB Mux now. - NetworkManager control support on the exporter has been added. This allows control of bluetooth and wifi connected to the exporter. - TFTP-/NFS-/HTTPProvider has been added, allowing easy staging of files for the DUT to later retrieve. - Improved LG_PROXY documentation in docs/usage.rst. - Exporter now checks /usr/sbin/ser2net for SerialPortExport - Support for Tasmota-flashed power outlets controlled via MQTT has been added. - The OpenOCDDriver has been reworked with new options and better output. - A script to synchronize places to an external description was added. - ShellDriver has regained the support to retrieve the active interface and IP addresses. - Labgrid has gained support for HTTP Video streams. - A settle time for the ShellDriver has been added to wait for chatty systems to settle before interacting with the shell. - Support for Macrosilicon HDMI to USB (MJPEG) adapters was added. - Console logfiles can now be created by the labgrid client command. - A ManualSwitchDriver has been added to prompt the user to flip a switch or set a jumper. - AndroidFastbootDriver now supports booting/flashing images preconfigured in the environment configuration. Bug fixes in 0.4.0 ~~~~~~~~~~~~~~~~~~ - ``pytest --lg-log foobar`` now creates the folder ``foobar`` before trying to write the log into it, and error handling was improved so that all possible errors that can occur when opening the log file are reported to stderr. - gstreamer log messages are now suppressed when using labgrid-client video. - Travis CI has been dropped for Github Actions. Breaking changes in 0.4.0 ~~~~~~~~~~~~~~~~~~~~~~~~~ - ``EthernetInterface`` has been renamed to ``NetworkInterface``. Known issues in 0.4.0 ~~~~~~~~~~~~~~~~~~~~~~~~~ - Some client commands return 0 even if the command failed. - Currently empty passwords are not well supported by the ShellDriver Release 0.3.0 (released Jan 22, 2021) ------------------------------------- New Features in 0.3.0 ~~~~~~~~~~~~~~~~~~~~~ - All `CommandProtocol` drivers support the poll_until_success method. - The new `FileDigitalOutputDriver` respresents a digital signal with a file. - The new `GpioDigitalOutputDriver` controls the state of a GPIO via the sysfs interface. - Crossbar and autobahn have been updated to 19.3.3 and 19.3.5 respectively. - The InfoDriver was removed. The functions have been integrated into the labgridhelper library, please use the library for the old functionality. - labgrid-client ``write-image`` subcommand: labgrid client now has a ``write-image`` command to write images onto block devices. - ``labgrid-client ssh`` now also uses port from NetworkService resource if available - The ``PLACE`` and ``STATE`` variables used by labgrid-client are replaced by ``LG_PLACE`` and ``LG_STATE``, the old variables are still supported for the time being. - The SSHDriver's keyfile attribute is now specified relative to the config file just like the images are. - The ShellDriver's keyfile attribute is now specified relative to the config file just like the images are. - ``labgrid-client -P `` and the ``LG_PROXY`` enviroment variable can be used to access the coordinator and network resources via that SSH proxy host. Drivers which run commands via SSH to the exporter still connect directly, allowing custom configuration in the user's ``.ssh/config`` as needed. Note that not all drivers have been updated to use the ProxyManager yet. - Deditec RELAIS8 devices are now supported by the `DeditecRelaisDriver`. - The `RKUSBDriver` was added to support the rockchip serial download mode. - The `USBStorageDriver` gained support for BMAP. - Flashrom support added, by hard-wiring e.g. an exporter to the DUT, the ROM on the DUT can be written directly. The flashrom driver implements the bootstrap protocol. - AndroidFastbootDriver now supports 'getvar' and 'oem getenv' subcommands. - The coordinator now updates the resource acquired state at the exporter. Accordingly, the exporter now starts ser2net only when a resources is aquired. Furthermore, resource conflicts between places are now detected. - Labgrid now uses the `ProcessWrapper` for externally called processes. This should include output from these calls better inside the test runs. - The binding dictionary can now supports type name strings in addition to the types themselves, avoiding the need to import a specific protocol or driver in some cases. - The remote infrastructure gained support for place reservations, for further information check the section in the documentation. - The `SigrokDriver` gained support for the Manson HCS-2302, it allows enabling and disabling channels, measurement and setting the current and voltage limit. - ``labgrid-client write-image`` gained new arguments: ``--partition``, ``--skip``, ``--seek``. - Support for Sentry PDUs has been added. - Strategies now implement a ``force`` method, to ``force`` a strategy state irrespective of the current state. - SSH Connections can now be proxied over the exporter, used by adding a device suffix to the `NetworkService` address. - UBootDriver now allows overriding of default boot command (``run bootcmd``) via new ``boot_command`` argument. - The config file supports per-target options, in addition to global options. - Add power driver to support GEMBIRD SiS-PM implementing SiSPMPowerDriver. - A cleanup of the cleanup functions was performed, labgrid should now clean up after itself and throws an error if the user needs to handle it himself. - ``labgrid-client`` now respects the ``LG_HOSTNAME`` and ``LG_USERNAME`` environment variables to set the hostname and username when accessing resources. - PyVISA support added, allowing to use PyVISA controlled test equipment from Labgrid. - ``labgrid-client write-image`` gained a new argument ``--mode`` to specify which tool should be used to write the image (either ``dd`` or ``bmaptool``) - Exporter configuration file ``exporter.yaml`` now allows use of environment variables. Breaking changes in 0.3.0 ~~~~~~~~~~~~~~~~~~~~~~~~~ - `ManagedFile` now saves the files in a different directory on the exporter. Previously ``/tmp`` was used, labgrid now uses ``/var/cache/labgrid``. A tmpfiles example configuration for systemd is provided in the ``/contrib`` directory. It is also highly recommended to enable ``fs.protected_regular=1`` and ``fs.protected_fifos=1`` for kernels>=4.19. This requires user intervention after the upgrade to create the directory and setup the cleanup job. - ``@attr.s(cmp=False)`` is deprecated and all classes have been moved to ``@attr.s(eq=False)``, this release requires attrs version 19.2.0 - Coordinator work dir is now set to the same dir as the crossbar configuration dir. Hence coordinator specific files like ``places.yaml`` and ``resources.yaml`` are now also stored in the crossbar configuration folder. Previously it would use ``..``. - The ``HawkbitTestClient`` and ``USBStick`` classes have been removed - The original USBStorageDriver was removed, ``NetworkUSBStorageDriver`` was renamed to `USBStorageDriver`. A deprecated `NetworkUSBStorageDriver` exists temporarily for compatibility reasons. Known issues in 0.3.0 ~~~~~~~~~~~~~~~~~~~~~~~~~ - There are several reports of ``sshpass`` used within the SSHDriver not working in call cases or only on the first connection. - Some client commands return 0 even if the command failed. - Currently empty passwords are not well supported by the ShellDriver Release 0.2.0 (released Jan 4, 2019) ------------------------------------ New Features in 0.2.0 ~~~~~~~~~~~~~~~~~~~~~ - A colored StepReporter was added and can be used with ``pytest --lg-colored-steps``. - ``labgrid-client`` can now use the last changed information to sort listed resources and places. - ``labgrid-client ssh`` now uses ip/user/password from NetworkService resource if available - The pytest plugin option ``--lg-log`` enables logging of the serial traffic into a file (see below). - The environement files can contain feature flags which can be used to control which tests are run in pytest. - ``LG_*`` variables from the OS environment can be used in the config file with the ``!template`` directive. - The new "managed file" support takes a local file and synchronizes it to a resource on a remote host. If the resource is not a `NetworkResource`, the local file is used instead. - ProxyManager: a class to automatically create ssh forwardings to proxy connections over the exporter - SSHManager: a global manager to multiplex connections to different exporters - The target now saves it's attached drivers, resources and protocols in a lookup table, avoiding the need of importing many Drivers and Protocols (see `Syntactic sugar for Targets`_) - When multiple Drivers implement the same Protocol, the best one can be selected using a priority (see below). - The new subcommand ``labgrid-client monitor`` shows resource or places changes as they happen, which is useful during development or debugging. - The environment yaml file can now list Python files (under the 'imports' key). They are imported before constructing the Targets, which simplifies using custom Resources, Drivers or Strategies. - The pytest plugin now stores metadata about the environment yaml file in the junit XML output. - The ``labgrid-client`` tool now understands a ``--state`` option to transition to the provided state using a :any:`Strategy`. This requires an environment yaml file with a :any:`RemotePlace` Resources and matching Drivers. - Resource matches for places configured in the coordinator can now have a name, allowing multiple resources with the same class. - The new `Target.__getitem__` method makes writing using protocols less verbose. - Experimental: The labgrid-autoinstall tool was added (see below). New and Updated Drivers ~~~~~~~~~~~~~~~~~~~~~~~ - The new `DigitalOutputResetDriver` adapts a driver implementing the DigitalOutputProtocol to the ResetProtocol. - The new `ModbusCoilDriver` support outputs on a ModbusTCP device. - The new ``NetworkUSBStorageDriver`` allows writing to remote USB storage devices (such as SD cards or memory sticks connected to a mux). - The new `QEMUDriver` runs a system image in QEmu and implements the :any:`ConsoleProtocol` and :any:`PowerProtocol`. This allows using labgrid without any real hardware. - The new `QuartusHPSDriver` controls the "Quartus Prime Programmer and Tools" to flash a target's QSPI. - The new `SerialPortDigitalOutputDriver` controls the state of a GPIO using the control lines of a serial port. - The new `SigrokDriver` uses a (local or remote) device supported by sigrok to record samples. - The new `SmallUBootDriver` supports the extremely limited U-Boot found in cheap WiFi routers. - The new `USBSDMuxDriver` controls a Pengutronix USB-SD-Mux device. - The new `USBTMCDriver` can fetch measurements and screenshots from the "Keysight DSOX2000 series" and the "Tektronix TDS 2000 series". - The new `USBVideoDriver` can stream video from a remote H.264 UVC (USB Video Class) camera using gstreamer over SSH. Currently, configuration for the "Logitech HD Pro Webcam C920" exists. - The new `XenaDriver` allows interacting with Xena network testing equipment. - The new `YKUSHPowerDriver` and `USBPowerDriver` support software-controlled USB hubs. - The bootloader drivers now have a ``reset`` method. - The `BareboxDriver`'s boot string is now configurable, which allows it to work with the ``quiet`` Linux boot parameter. - The `IMXUSBLoader` now recognizes more USB IDs. - The `OpenOCDDriver` is now more flexible with loading configuration files. - The `NetworkPowerDriver` now additionally supports: - 24 port "Gude Expert Power Control 8080" - 8 port "Gude Expert Power Control 8316" - NETIO 4 models (via telnet) - a simple REST interface - The `SerialDriver` now supports using plain TCP instead of RFC 2217, which is needed from some console servers. - The `ShellDriver` has been improved: - It supports configuring the various timeouts used during the login process. - It can use xmodem to transfer file from and to the target. Incompatible Changes ~~~~~~~~~~~~~~~~~~~~ - When using the coordinator, it must be upgrade together with the clients because of the newly introduce match names. - Resources and Drivers now need to be created with an explicit name parameter. It can be ``None`` to keep the old behaviour. See below for details. - Classes derived from :any:`Resource` or :any:`Driver` now need to use ``@attr.s(cmp=False)`` instead of ``@attr.s`` because of a change in the attrs module version 17.1.0. Syntactic sugar for Targets ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Targets are now able to retrieve requested drivers, resources or protocols by name instead of by class. This allows removing many imports, e.g. :: from labgrid.driver import ShellDriver shell = target.get_driver(ShellDriver) becomes :: shell = target.get_driver("ShellDriver") Also take a look at the examples, they have been ported to the new syntax as well. Multiple Driver Instances ~~~~~~~~~~~~~~~~~~~~~~~~~ For some Protocols, it is useful to allow multiple instances. DigitalOutputProtocol: A board may have two jumpers to control the boot mode in addition to a reset GPIO. Previously, it was not possible to use these on a single target. ConsoleProtocol: Some boards have multiple console interfaces or expose a login prompt via a USB serial gadget. PowerProtocol: In some cases, multiple power ports need to be controlled for one Target. To support these use cases, Resources and Drivers must be created with a name parameter. When updating your code to this version, you can either simply set the name to ``None`` to keep the previous behaviour. Alternatively, pass a string as the name. Old: .. code-block:: python >>> t = Target("MyTarget") >>> SerialPort(t) SerialPort(target=Target(name='MyTarget', env=None), state=, avail=True, port=None, speed=115200) >>> SerialDriver(t) SerialDriver(target=Target(name='MyTarget', env=None), state=, txdelay=0.0) New (with name=None): .. code-block:: python >>> t = Target("MyTarget") >>> SerialPort(t, None) SerialPort(target=Target(name='MyTarget', env=None), name=None, state=, avail=True, port=None, speed=115200) >>> SerialDriver(t, None) SerialDriver(target=Target(name='MyTarget', env=None), name=None, state=, txdelay=0.0) New (with real names): .. code-block:: python >>> t = Target("MyTarget") >>> SerialPort(t, "MyPort") SerialPort(target=Target(name='MyTarget', env=None), name='MyPort', state=, avail=True, port=None, speed=115200) >>> SerialDriver(t, "MyDriver") SerialDriver(target=Target(name='MyTarget', env=None), name='MyDriver', state=, txdelay=0.0) Priorities ~~~~~~~~~~ Each driver supports a priorities class variable. This allows drivers which implement the same protocol to add a priority option to each of their protocols. This way a `NetworkPowerDriver` can implement the `ResetProtocol`, but if another `ResetProtocol` driver with a higher protocol is available, it will be selected instead. See the documentation for details. ConsoleLogging Reporter ~~~~~~~~~~~~~~~~~~~~~~~ The ConsoleLoggingReporter can be used with the pytest plugin or the library. It records the Data send from a DUT to the computer running labgrid. The logfile contains a header with the name of the device from the environment configuration and a timestamp. When using the library, the reporter can be started with:: from labgrid.consoleloggingreporter import ConsoleLoggingReporter ConsoleLoggingReporter.start(".") where "." is the output directory. The pytest plugin accepts the ``--lg-log`` commandline option, either with or without an output path. Auto-Installer Tool ~~~~~~~~~~~~~~~~~~~ To simplify using labgrid for provisioning several boards in parallel, the ``labgrid-autoinstall`` tool was added. It reads a YAML file defining several targets and a Python script to be run for each board. Interally, it spawns a child process for each target, which waits until a matching resource becomes available and then executes the script. For example, this makes it simple to load a bootloader via the :any:`BootstrapProtocol`, use the :any:`AndroidFastbootDriver` to upload a kernel with initramfs and then write the target's eMMC over a USB Mass Storage gadget. .. note:: ``labgrid-autoinstall`` is still experimental and no documentation has been written. Contributions from: Ahmad Fatoum, Bastian Krause, Björn Lässig, Chris Fiege, Enrico Joerns, Esben Haabendal, Felix Lampe, Florian Scherf, Georg Hofmann, Jan Lübbe, Jan Remmet, Johannes Nau, Kasper Revsbech, Kjeld Flarup, Laurentiu Palcu, Oleksij Rempel, Roland Hieber, Rouven Czerwinski, Stanley Phoong Cheong Kwan, Steffen Trumtrar, Tobi Gschwendtner, Vincent Prince Release 0.1.0 (released May 11, 2017) ------------------------------------- This is the initial release of labgrid. labgrid-0.4.1/CODEOWNERS000066400000000000000000000000441415016572500145170ustar00rootroot00000000000000/dockerfiles/ @krevsbech labgrid-0.4.1/COPYING000066400000000000000000000636421415016572500141740ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! labgrid-0.4.1/LICENSE000066400000000000000000000015421415016572500141350ustar00rootroot00000000000000Copyright (C) 2016-2017 Pengutronix, Jan Luebbe Copyright (C) 2016-2017 Pengutronix, Rouven Czerwinski This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA labgrid-0.4.1/MANIFEST.in000066400000000000000000000000321415016572500146570ustar00rootroot00000000000000include fastentrypoints.pylabgrid-0.4.1/README.rst000066400000000000000000000102131415016572500146120ustar00rootroot00000000000000.. image:: labgrid_logo.png :alt: labgrid logo :align: center Welcome to labgrid ================== |license| |unit-tests| |docker-build| |coverage-status| |docs-status| |chat| Purpose ------- Labgrid is an embedded board control python library with a focus on testing, development and general automation. It includes a remote control layer to control boards connected to other hosts. The idea behind labgrid is to create an abstraction of the hardware control layer needed for testing of embedded systems, automatic software installation and automation during development. Labgrid itself is *not* a testing framework, but is intended to be combined with `pytest `_ (and additional pytest plugins). Please see `Design Decisions `_ for more background information. It currently supports: - pytest plugin to write tests for embedded systems connecting serial console or SSH - remote client-exporter-coordinator infrastructure to make boards available from different computers on a network - power/reset management via drivers for power switches or onewire PIOs - upload of binaries via USB: imxusbloader/mxsusbloader (bootloader) or fastboot (kernel) While labgrid is currently used for daily development on embedded boards and for automated testing, several planned features are not yet implemented and the APIs may be changed as more use-cases appear. We appreciate code contributions and feedback on using labgrid on other environments (see `Contributing `_ for details). Please consider contacting us (via a GitHub issue) before starting larger changes, so we can discuss design trade-offs early and avoid redundant work. You can also look at `Ideas `_ for enhancements which are not yet implemented. Documentation ------------- `Read the Docs `_ Contributing ------------ `Development Docs `_ IRC channel ``#labgrid`` on libera.chat (bridged to the `Matrix channel #labgrid:matrix.org `_) Background ---------- Work on labgrid started at `Pengutronix `_ in late 2016 and is currently in active use and development. Quickstart ---------- See the `Installation section `_ for more details. Clone the git repository: .. code-block:: bash $ git clone https://github.com/labgrid-project/labgrid Create and activate a virtualenv for labgrid: .. code-block:: bash $ virtualenv -p python3 venv $ source venv/bin/activate Install labgrid into the virtualenv: .. code-block:: bash $ pip install -r requirements.txt $ python setup.py install Tests can now run via: .. code-block:: bash $ python -m pytest --lg-env .. |license| image:: https://img.shields.io/badge/license-LGPLv2.1-blue.svg :alt: LGPLv2.1 :target: https://raw.githubusercontent.com/labgrid-project/labgrid/master/LICENSE .. |unit-tests| image:: https://github.com/labgrid-project/labgrid/workflows/unit%20tests/badge.svg :alt: unit tests status :target: https://github.com/labgrid-project/labgrid/actions?query=workflow%3A%22unit+tests%22+branch%3Amaster .. |docker-build| image:: https://github.com/labgrid-project/labgrid/workflows/docker%20build/badge.svg :alt: docker build status :target: https://github.com/labgrid-project/labgrid/actions?query=workflow%3A%22docker+build%22+branch%3Amaster .. |coverage-status| image:: https://codecov.io/gh/labgrid-project/labgrid/branch/master/graph/badge.svg :alt: coverage status :target: https://codecov.io/gh/labgrid-project/labgrid .. |docs-status| image:: https://readthedocs.org/projects/labgrid/badge/?version=latest :alt: documentation status :target: https://labgrid.readthedocs.io/en/latest/?badge=latest .. |chat| image:: https://matrix.to/img/matrix-badge.svg :alt: chat :target: https://app.element.io/#/room/#labgrid:matrix.org labgrid-0.4.1/ci-requirements.txt000066400000000000000000000000711415016572500170010ustar00rootroot00000000000000coveralls>=1.3.0 codecov>=2.0.15 -r dev-requirements.txt labgrid-0.4.1/contrib/000077500000000000000000000000001415016572500145665ustar00rootroot00000000000000labgrid-0.4.1/contrib/sync-places.py000077500000000000000000000154461415016572500173760ustar00rootroot00000000000000#! /usr/bin/env python3 # # Copyright 2021 Garmin Ltd. or its subsidiaries # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from contextlib import contextmanager from labgrid.remote.client import start_session from labgrid.util.proxy import proxymanager import os import sys import textwrap import yaml def main(): @contextmanager def get_file(name, mode, standard): if name == "-": yield standard else: with open(name, mode) as f: yield f async def do_sync(session, args): with get_file(args.places, "r", sys.stdin) as f: config = yaml.safe_load(f) config.setdefault("places", {}) changed = False seen_places = set() remove_places = set() for name, place in session.places.items(): if name in config["places"]: seen_places.add(name) else: remove_places.add(name) for name in remove_places: print(f"Removing place {name}") if not args.dry_run: await session.call("org.labgrid.coordinator.del_place", name) changed = True for name in config["places"]: if not name in seen_places: print(f"Adding place {name}") if not args.dry_run: await session.call("org.labgrid.coordinator.add_place", name) changed = True for name in config["places"]: matches = config["places"][name].get("matches", []) seen_matches = set() remove_matches = set() place_tags = {} if name in seen_places: place = session.places[name] for m in place.matches: m = repr(m) if m in matches: seen_matches.add(m) else: remove_matches.add(m) place_tags = place.tags for m in remove_matches: print(f"Deleting match '{m}' for place {name}") if not args.dry_run: await session.call( "org.labgrid.coordinator.del_place_match", name, m ) changed = True for m in matches: if not m in seen_matches: print(f"Adding match '{m}' for place {name}") if not args.dry_run: await session.call( "org.labgrid.coordinator.add_place_match", name, m ) changed = True tags = config["places"][name].get("tags", {}).copy() if place_tags != tags: print( "Setting tags for place %s to %s" % ( name, ", ".join( "%s=%s" % (key, value) for (key, value) in tags.items() ), ) ) # Set the empty string for tags that should be removed for k in place_tags: if k not in tags: tags[k] = "" if not args.dry_run: await session.call( "org.labgrid.coordinator.set_place_tags", name, tags ) changed = True async def do_dump(session, args): config = {"places": {}} for name, place in session.places.items(): config["places"][name] = { "matches": [repr(m) for m in place.matches], "tags": {k: v for k, v in place.tags.items()}, } with get_file(args.dest, "w", sys.stdout) as f: yaml.dump(config, f) parser = argparse.ArgumentParser( description="Synchronize Labgrid places", epilog=textwrap.dedent( """\ The YAML files describe what places should exist and what match strings and tags should be assigned to those places. The files are structured like: places: # A dictonary of places where each key is a place name my-place1: # Replace with your place matches: # A list of match patterns. Replace with your match patterns - "*/my-place1/*" tags: # A dictionary of key/value tags. Replace with your tags board: awesomesauce bar: baz When syncing places, tags, and matches will be added or removed until the remote configuration matches the one in the YAML file """ ), formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "--crossbar", "-x", metavar="URL", default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), help="Crossbar websocket URL (default: %(default)s)", ) parser.add_argument("--proxy", "-P", help="Proxy connections via given ssh host") subparsers = parser.add_subparsers() subparsers.required = True sync_parser = subparsers.add_parser( "sync", help="Synchronize coordinator places with file" ) sync_parser.add_argument( "places", metavar="FILE", help="Places configuration YAML file. Use '-' for stdin", ) sync_parser.add_argument( "--dry-run", "-n", action="store_true", help="Don't make any changes, only show what would be done", ) sync_parser.set_defaults(func=do_sync) dump_parser = subparsers.add_parser( "dump", help="Dump existing places configuration to a YAML file. The dumped file is suitable for passing to `sync`", ) dump_parser.add_argument( "dest", metavar="FILE", nargs="?", default="-", help="Destination file. Use '-' for stdout. Default is '%(default)s'", ) dump_parser.set_defaults(func=do_dump) args = parser.parse_args() if args.proxy: proxymanager.force_proxy(args.proxy) session = start_session( args.crossbar, os.environ.get("LG_CROSSBAR_REALM", "realm1"), {} ) return session.loop.run_until_complete(args.func(session, args)) if __name__ == "__main__": sys.exit(main()) labgrid-0.4.1/contrib/systemd/000077500000000000000000000000001415016572500162565ustar00rootroot00000000000000labgrid-0.4.1/contrib/systemd/labgrid-coordinator.service000066400000000000000000000006401415016572500235650ustar00rootroot00000000000000[Unit] Description=Labgrid Coordinator After=network.target [Service] ExecStart=/path/to/labgrid-coordinator/venv/bin/crossbar start --logformat=syslogd --cbdir /var/lib/labgrid-coordinator --config /etc/labgrid/coordinator.yaml ExecStop=/usr/bin/labgrid-coordinator stop --cbdir /var/lib/labgrid-coordinator Restart=on-abort DynamicUser=yes StateDirectory=labgrid-coordinator [Install] WantedBy=multi-user.target labgrid-0.4.1/contrib/systemd/labgrid-exporter.service000066400000000000000000000005571415016572500231210ustar00rootroot00000000000000[Unit] Description=Labgrid Exporter After=network.target [Service] ExecStart=/path/to/labgrid/venv/bin/labgrid-exporter /etc/labgrid/exporter.yaml Restart=on-abort User=labgrid Group=labgrid # Adjust to your distribution (most often "dialout" or "tty") SupplementaryGroups=dialout CacheDirectory=labgrid CacheDirectoryMode=1775 [Install] WantedBy=multi-user.target labgrid-0.4.1/contrib/systemd/sysusers.d/000077500000000000000000000000001415016572500204005ustar00rootroot00000000000000labgrid-0.4.1/contrib/systemd/sysusers.d/labgrid.conf000066400000000000000000000001011415016572500226430ustar00rootroot00000000000000# Type Name ID GECOS Home directory Shell u labgrid - "Labgrid" labgrid-0.4.1/contrib/systemd/tmpfiles.d/000077500000000000000000000000001415016572500203235ustar00rootroot00000000000000labgrid-0.4.1/contrib/systemd/tmpfiles.d/labgrid.conf000066400000000000000000000004631415016572500226010ustar00rootroot00000000000000# Labgrid saves files uploaded to the exporter in /var/cache # This configuration file creates the directory and is meant as a starting point # it is advised to at least change the group from the default for your # environment # Path Mode UID GID Age Argument d /var/cache/labgrid 1775 labgrid labgrid 2d labgrid-0.4.1/crossbar-requirements.txt000066400000000000000000000001021415016572500202170ustar00rootroot00000000000000-r requirements.txt setuptools>=38.0.0 crossbar==21.1.1 idna==2.5 labgrid-0.4.1/deb-requirements.txt000066400000000000000000000001551415016572500171430ustar00rootroot00000000000000-r crossbar-requirements.txt -r onewire-requirements.txt -r modbus-requirements.txt -r snmp-requirements.txt labgrid-0.4.1/debian/000077500000000000000000000000001415016572500143505ustar00rootroot00000000000000labgrid-0.4.1/debian/changelog000066400000000000000000000002261415016572500162220ustar00rootroot00000000000000labgrid (0.2.0) UNRELEASED; urgency=low * Initial release. (Closes: #XXXXXX) -- Jan Lübbe Mon, 07 Jan 2019 11:57:16 +0100 labgrid-0.4.1/debian/compat000066400000000000000000000000031415016572500155470ustar00rootroot0000000000000011 labgrid-0.4.1/debian/control000066400000000000000000000020631415016572500157540ustar00rootroot00000000000000Source: labgrid Section: python Priority: extra Maintainer: Jan Lübbe Build-Depends: debhelper (>= 11), python3-all-dev, dh-virtualenv (>= 0.8), libow-dev, libpython3-dev, python3-venv, python3-setuptools, git, libsodium-dev, libffi-dev, libssl-dev Standards-Version: 3.9.5 Package: labgrid Architecture: any Pre-Depends: dpkg (>= 1.16.1), python3, ${misc:Pre-Depends} Depends: ${python3:Depends}, ${misc:Depends}, ${shlibs:Depends} Recommends: openssh-client, microcom, socat, sshfs, rsync Description: embedded board control python library Labgrid is a embedded board control python library with a focus on testing, development and general automation. It includes a remote control layer to control boards connected to other hosts. . The idea behind labgrid is to create an abstraction of the hardware control layer needed for testing of embedded systems, automatic software installation and automation during development. Labgrid itself is not a testing framework, but is intended to be combined with pytest (and additional pytest plugins). labgrid-0.4.1/debian/copyright000066400000000000000000000052321415016572500163050ustar00rootroot00000000000000Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ Upstream-Name: labgrid Source: https://github.com/labgrid-project/labgrid Files: * Copyright: Copyright (C) 2016-2021 Pengutronix, Jan Luebbe Copyright (C) 2016-2021 Pengutronix, Rouven Czerwinski License: LGPL-2.1+ Files: man/* Copyright: Copyright (C) 2016-2017 Pengutronix License: LGPL-2.1+ Files: fastentrypoints.py Copyright: Copyright (c) 2016, Aaron Christianson All rights reserved. License: BSD-2-Clause Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: . 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. . 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. . THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. License: LGPL-2.1+ This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. . This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. . You should have received a copy of the GNU Lesser General Public License along with this package; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA . On Debian systems, the complete text of the GNU Lesser General Public License can be found in `/usr/share/common-licenses/LGPL-2.1'. labgrid-0.4.1/debian/labgrid-client000077500000000000000000000001341415016572500171540ustar00rootroot00000000000000#!/bin/sh cmd="labgrid-client -c /etc/labgrid.yaml" exec /opt/venvs/labgrid/bin/$cmd "$@" labgrid-0.4.1/debian/labgrid-exporter000077500000000000000000000001111415016572500175410ustar00rootroot00000000000000#!/bin/sh cmd="labgrid-exporter" exec /opt/venvs/labgrid/bin/$cmd "$@" labgrid-0.4.1/debian/labgrid-pytest000077500000000000000000000000771415016572500172340ustar00rootroot00000000000000#!/bin/sh cmd="pytest" exec /opt/venvs/labgrid/bin/$cmd "$@" labgrid-0.4.1/debian/labgrid.install000066400000000000000000000002401415016572500173400ustar00rootroot00000000000000debian/labgrid.yaml /etc debian/labgrid-client /usr/bin debian/labgrid-exporter /usr/bin debian/labgrid-pytest /usr/bin helpers/labgrid-bound-connect /usr/sbin labgrid-0.4.1/debian/labgrid.tmpfile000077700000000000000000000000001415016572500272142../contrib/systemd/tmpfiles.d/labgrid.confustar00rootroot00000000000000labgrid-0.4.1/debian/labgrid.triggers000066400000000000000000000005041415016572500175230ustar00rootroot00000000000000# Register interest in Python interpreter changes; and don't make the Python # package dependent on the virtualenv package processing (noawait) interest-noawait /usr/bin/python3.5 interest-noawait /usr/bin/python3.6 # Also provide a symbolic trigger for all dh-virtualenv packages interest dh-virtualenv-interpreter-update labgrid-0.4.1/debian/labgrid.yaml000066400000000000000000000001171415016572500166370ustar00rootroot00000000000000tools: fastboot: /usr/bin/fastboot imx-usb-loader: /usr/bin/imx-usb-loader labgrid-0.4.1/debian/rules000077500000000000000000000005101415016572500154240ustar00rootroot00000000000000#!/usr/bin/make -f %: dh $@ --with python-virtualenv override_dh_shlibs: dh_shlibs -l/opt override_dh_virtualenv: dh_virtualenv \ --python /usr/bin/python3 \ --builtin-venv \ --preinstall 'setuptools>=38.0.0' \ --requirements deb-requirements.txt \ --extra-pip-arg='--no-binary' \ --extra-pip-arg='cffi,numpy' labgrid-0.4.1/dev-requirements.txt000066400000000000000000000006451415016572500171730ustar00rootroot00000000000000pytest-cov==2.10.1 pytest-isort==1.2.0 pytest-mock==3.3.1 pytest-pylint==0.17.0 pytest-dependency==0.5.1 yapf==0.28.0 psutil==5.6.6 -r doc-requirements.txt -r crossbar-requirements.txt -r onewire-requirements.txt -r modbus-requirements.txt -r snmp-requirements.txt -r xena-requirements.txt -r graph-requirements.txt -r docker-requirements.txt -r pyvisa-requirements.txt -r vxi11-requirements.txt -r mqtt-requirements.txt labgrid-0.4.1/doc-requirements.txt000066400000000000000000000000671415016572500171600ustar00rootroot00000000000000Sphinx==2.2.1 sphinx_rtd_theme==0.4.3 docutils==0.15.2 labgrid-0.4.1/doc/000077500000000000000000000000001415016572500136735ustar00rootroot00000000000000labgrid-0.4.1/doc/Makefile000066400000000000000000000011341415016572500153320ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = labgrid SOURCEDIR = . BUILDDIR = .build # Put it first so that "make" without argument is like "make help". help: @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) .PHONY: help Makefile # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)labgrid-0.4.1/doc/RELEASE.rst000066400000000000000000000046611415016572500155140ustar00rootroot00000000000000Step by step guide to releasing a new labgrid version. 0. Preparations =============== Clean the `dist/` directory: .. code-block:: bash rm dist/* Check your commit mail and name: .. code-block:: bash git config --get user.name git config --get user.email 1. Freeze Dependencies ====================== Freeze the dependencies into the `requirements.txt` file and update the separate feature requirements files. This ensures that known good dependencies are available for every release. 2. Update setup.py Dependencies =============================== Update the minimum required dependencies in the `setup.py` file. These are intentionally non restrictive, see `PYPA Discussion `_. A frozen environment is already created in the previous step. 3. Update CHANGES.rst ===================== Update the `CHANGES.rst` file. This should ideally be done in every pull request, but its better to check that the `CHANGES.rst` is up to date. Ensure that no incompatiblities are unlisted and that all major features are described in a separate section. It's best to compare against the git log. 4. Bump Version Number ====================== Bump the version number in `CHANGES.rst` and `setup.py` 5. Create a signed Tag ====================== Create a signed tag of the new release. Your PGP-key has to be available on the computer. .. code-block:: bash git tag -s 6. Create sdist =============== Run the following command: :: python setup.py sdist The sdist file will be available in the `dist/` directory. 7. Test upload to pypi dev ========================== Test the upload by using twine to upload to pypi test service :: twine upload --repository-url https://test.pypi.org/legacy/ dist/* 8. Test download from pypi dev ============================== Test the upload by using pypi dev as a download source :: virtualenv -p python3 labgrid-release- source labgrid-release-/bin/activate pip install --index-url https://test.pypi.org/simple/ labgrid And optionally run the tests: :: pip install -r dev-requirements pytest tests 9. Upload to pypi ================= Upload the tested dist file to pypi. :: twine upload dist/* 10. Upload the signed tag ========================== Upload the signed tag to the upstream repository :: git push upstream labgrid-0.4.1/doc/changes.rst000066400000000000000000000001401415016572500160300ustar00rootroot00000000000000:tocdepth: 2 .. default-role:: any .. _changes: Changes ======= .. include:: ../CHANGES.rst labgrid-0.4.1/doc/conf.py000066400000000000000000000145431415016572500152010ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # labgrid documentation build configuration file, created by # sphinx-quickstart on Mon Feb 20 10:00:00 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) from pkg_resources import get_distribution # Import read_the_docs theme import sphinx_rtd_theme # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.autosectionlabel'] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'labgrid' copyright = '2016-2021 Pengutronix, Jan Luebbe and Rouven Czerwinski' author = 'Jan Luebbe, Rouven Czerwinski' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = get_distribution('labgrid').version # The short X.Y version. version = '.'.join(release.split('.')[:2]) # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['.build', 'Thumbs.db', '.DS_Store', 'RELEASE.rst'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Set correct html_path for rtd theme: html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'labgriddoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'labgrid.tex', 'labgrid Documentation', 'Jan Luebbe, Rouven Czerwinski', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'labgrid', 'labgrid Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'labgrid', 'labgrid Documentation', author, 'labgrid', 'One line description of project.', 'Miscellaneous'), ] # -- Options for autodoc -------------------------------------------------- autodoc_member_order = 'bysource' autodoc_default_options = { 'special-members': True, } autodoc_mock_imports = ['onewire', 'txaio', 'autobahn', 'autobahn.asyncio', 'autobahn.asyncio.wamp', 'autobahn.wamp', 'autobahn.wamp.types', 'autobahn.twisted', 'autobahn.twisted.wamp', 'autobahn.wamp.exception', 'twisted.internet.defer', 'gi', 'gi.repository',] # -- Options for autosection ---------------------------------------------- autosectionlabel_prefix_document = True from unittest.mock import Mock for mod in autodoc_mock_imports: sys.modules[mod] = Mock() def run_apidoc(app): from sphinx.ext.apidoc import main module = os.path.abspath(os.path.join(app.srcdir, '..', 'labgrid')) output = os.path.abspath(os.path.join(app.srcdir, 'modules')) cmd = [module, '-a', '-M', '-H', 'Modules', '-o', output] main(cmd) def setup(app): app.connect('builder-inited', run_apidoc) labgrid-0.4.1/doc/configuration.rst000066400000000000000000002243271415016572500173060ustar00rootroot00000000000000Configuration ============= This chapter describes the individual drivers and resources used in a device configuration. Drivers can depend on resources or other drivers, whereas resources have no dependencies. .. image:: res/config_graph.svg :width: 50% Here the resource `RawSerialPort` provides the information for the `SerialDriver`, which in turn is needed by the `ShellDriver`. Driver dependency resolution is done by searching for the driver which implements the dependent protocol, all drivers implement one or more protocols. Resources --------- Serial Ports ~~~~~~~~~~~~ RawSerialPort +++++++++++++ A RawSerialPort is a serial port which is identified via the device path on the local computer. Take note that re-plugging USB serial converters can result in a different enumeration order. .. code-block:: yaml RawSerialPort: port: /dev/ttyUSB0 speed: 115200 The example would access the serial port /dev/ttyUSB0 on the local computer with a baud rate of 115200. - port (str): path to the serial device - speed (int, default=115200): desired baud rate Used by: - `SerialDriver`_ NetworkSerialPort +++++++++++++++++ A NetworkSerialPort describes a serial port which is exported over the network, usually using RFC2217 or raw tcp. .. code-block:: yaml NetworkSerialPort: host: remote.example.computer port: 53867 speed: 115200 The example would access the serial port on computer remote.example.computer via port 53867 and use a baud rate of 115200 with the RFC2217 protocol. - host (str): hostname of the remote host - port (str): TCP port on the remote host to connect to - speed (int, default=115200): baud rate of the serial port - protocol (str, default="rfc2217"): protocol used for connection: raw or rfc2217 Used by: - `SerialDriver`_ USBSerialPort +++++++++++++ A USBSerialPort describes a serial port which is connected via USB and is identified by matching udev properties. This allows identification through hot-plugging or rebooting. .. code-block:: yaml USBSerialPort: match: 'ID_SERIAL_SHORT': 'P-00-00682' speed: 115200 The example would search for a USB serial converter with the key `ID_SERIAL_SHORT` and the value `P-00-00682` and use it with a baud rate of 115200. - match (str): key and value for a udev match, see `udev Matching`_ - speed (int, default=115200): baud rate of the serial port Used by: - `SerialDriver`_ Power Ports ~~~~~~~~~~~ NetworkPowerPort ++++++++++++++++ A NetworkPowerPort describes a remotely switchable power port. .. code-block:: yaml NetworkPowerPort: model: gude host: powerswitch.example.computer index: 0 The example describes port 0 on the remote power switch `powerswitch.example.computer`, which is a `gude` model. - model (str): model of the power switch - host (str): hostname of the power switch - index (int): number of the port to switch The `model` property selects one of several `backend implementations `_. Currently available are: ``apc`` Controls an APU PDU via SNMP. ``digipower`` Controls a DigiPower PDU via a simple HTTP API. ``gude`` Controls a Gude PDU via a simple HTTP API. ``gude24`` Controls a Gude Expert Power Control 8008 PDU via a simple HTTP API. ``gude8031`` Controls a Gude Expert Power Control 8031 PDU via a simple HTTP API. ``gude8316`` Controls a Gude Expert Power Control 8316 PDU via a simple HTTP API. ``netio`` Controls a NETIO 4-Port PDU via a simple HTTP API. ``netio_kshell`` Controls a NETIO 4C PDU via a Telnet interface. ``rest`` This is a generic backend for PDU implementations which can be controled via HTTP PUT and GET requests. See the `docstring in the module `__ for details. ``senty`` Controls a Sentry PDU via SNMP using Sentry3-MIB. It was tested on CW-24VDD and 4805-XLS-16. ``siglent`` Controls Siglent SPD3000X series modules via the `vxi11 Python module `_. ``simplerest`` This is a generic backend for PDU implementations which can be controled via HTTP GET requests (both set and get). See the `docstring in the module `__ for details. Used by: - `NetworkPowerDriver`_ PDUDaemonPort +++++++++++++ A PDUDaemonPort describes a PDU port accessible via `PDUDaemon `_. As one PDUDaemon instance can control many PDUs, the instance name from the PDUDaemon configuration file needs to be specified. .. code-block:: yaml PDUDaemonPort: host: pduserver pdu: apc-snmpv3-noauth index: 1 The example describes port 1 on the PDU configured as `apc-snmpv3-noauth`, with PDUDaemon running on the host `pduserver`. - host (str): name of the host running the PDUDaemon - pdu (str): name of the PDU in the configuration file - index (int): index of the power port on the PDU Used by: - `PDUDaemonDriver`_ YKUSHPowerPort ++++++++++++++ A YKUSHPowerPort describes a YEPKIT YKUSH USB (HID) switchable USB hub. .. code-block:: yaml YKUSHPowerPort: serial: YK12345 index: 1 The example describes port 1 on the YKUSH USB hub with the serial "YK12345". (use "pykush -l" to get your serial...) - serial (str): serial number of the YKUSH hub - index (int): number of the port to switch Used by: - `YKUSHPowerDriver`_ USBPowerPort ++++++++++++ A USBPowerPort describes a generic switchable USB hub as supported by `uhubctl `_. .. code-block:: yaml USBPowerPort: match: ID_PATH: pci-0000:00:14.0-usb-0:2:1.0 index: 1 The example describes port 1 on the hub with the ID_PATH "pci-0000:00:14.0-usb-0:2:1.0". (use ``udevadm info /sys/bus/usb/devices/...`` to find the ID_PATH value) - index (int): number of the port to switch - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `USBPowerDriver`_ .. note:: Labgrid requires that the interface is contained in the ID_PATH. This usually means that the ID_PATH should end with ``:1.0``. Only this first interface is registered with the ``hub`` driver labgrid is looking for, paths without the interface will fail to match since they use the ``usb`` driver. SiSPMPowerPort ++++++++++++++ A SiSPMPowerPort describes a GEMBIRD SiS-PM as supported by `sispmctl `_. .. code-block:: yaml SiSPMPowerPort: match: ID_PATH: platform-1c1a400.usb-usb-0:2 index: 1 The example describes port 1 on the hub with the ID_PATH "platform-1c1a400.usb-usb-0:2". - index (int): number of the port to switch - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `SiSPMPowerDriver`_ TasmotaPowerPort ++++++++++++++++ A :any:`TasmotaPowerPort` resource describes a switchable Tasmora power outlet accessed over MQTT. .. code-block:: yaml TasmotaPowerPort: host: this.is.an.example.host.com status_topic: stat/tasmota_575A2B/POWER power_topic: cmnd/tasmota_575A2B/POWER avail_topic: tele/tasmota_575A2B/LWT The example uses a mosquitto server at "this.is.an.example.host.com" and has the topics setup for a tasmota power port that has the ID 575A2B. - host (str): hostname of the MQTT server - status_topic (str): topic that signals the current status as "ON" of "OFF" - power_topic (str): topic that allows switchting the status between "ON" and "OFF" - avail_topic (str): topic that signals the availability of the Tasmota power outlet Used by: - `TasmotaPowerDriver`_ Digital Outputs ~~~~~~~~~~~~~~~ ModbusTCPCoil +++++++++++++ A ModbusTCPCoil describes a coil accessible via ModbusTCP. .. code-block:: yaml ModbusTCPCoil: host: "192.168.23.42" coil: 1 The example describes the coil with the address 1 on the ModbusTCP device `192.168.23.42`. - host (str): hostname of the Modbus TCP server e.g. "192.168.23.42:502" - coil (int): index of the coil e.g. 3 - invert (bool, default=False): whether the logic level is inverted (active-low) Used by: - `ModbusCoilDriver`_ DeditecRelais8 ++++++++++++++ A DeditecRelais8 describes a Deditec USB GPO module with 8 relays. .. code-block:: yaml DeditecRelais8: index: 1 invert: false - index (int): number of the relay to use - invert (bool, default=False): whether the logic level is inverted (active-low) - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `DeditecRelaisDriver`_ OneWirePIO ++++++++++ A OneWirePIO describes a onewire programmable I/O pin. .. code-block:: yaml OneWirePIO: host: example.computer path: /29.7D6913000000/PIO.0 invert: false The example describes a `PIO.0` at device address `29.7D6913000000` via the onewire server on `example.computer`. - host (str): hostname of the remote system running the onewire server - path (str): path on the server to the programmable I/O pin - invert (bool, default=False): whether the logic level is inverted (active-low) Used by: - `OneWirePIODriver`_ LXAIOBusPIO +++++++++++ An :any:`LXAIOBusPIO` resource describes a single PIO pin on an LXAIOBusNode. .. code-block:: yaml LXAIOBusPIO: host: localhost:8080 node: IOMux-00000003 pin: OUT0 invert: False The example uses an lxa-iobus-server running on localhost:8080, with node IOMux-00000003 and pin OUT0. - host (str): hostname with port of the lxa-io-bus server - node (str): name of the node to use - pin (str): name of the pin to use - invert (bool, default=False): whether to invert the pin Used by: - `LXAIOBusPIODriver`_ NetworkLXAIOBusPIO ++++++++++++++++++ A NetworkLXAIOBusPIO describes an `LXAIOBusPIO`_ exported over the network. HIDRelay ++++++++ An :any:`HIDRelay` resource describes a single output of a HID protocol based USB relays. It currently supports the widely used "dcttech USBRelay". .. code-block:: yaml HIDRelay: index: 2 invert: False - index (int, default=1): number of the relay to use - invert (bool, default=False): whether to invert the relay - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `HIDRelayDriver`_ NetworkHIDRelay +++++++++++++++ A NetworkHIDRelay descibes an `HIDRelay`_ exported over the network. NetworkService ~~~~~~~~~~~~~~ A NetworkService describes a remote SSH connection. .. code-block:: yaml NetworkService: address: example.computer username: root The example describes a remote SSH connection to the computer `example.computer` with the username `root`. Set the optional password password property to make SSH login with a password instead of the key file. When used with ``labgrid-exporter``, the address can contain a device scope suffix (such as ``%eth1``), which is especially useful with overlapping address ranges or link-local IPv6 addresses. In that case, the SSH connection will be proxied via the exporter, using ``socat`` and the ``labgrid-bound-connect`` sudo helper. These and the sudo configuration needs to be prepared by the administrator. - address (str): hostname of the remote system - username (str): username used by SSH - password (str, default=""): password used by SSH - port (int, default=22): port used by SSH Used by: - `SSHDriver`_ USBMassStorage ~~~~~~~~~~~~~~ A USBMassStorage resource describes a USB memory stick or similar device. .. code-block:: yaml USBMassStorage: match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0-scsi-0:0:0:3' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `USBStorageDriver`_ NetworkUSBMassStorage ~~~~~~~~~~~~~~~~~~~~~ A NetworkUSBMassStorage resource describes a USB memory stick or similar device available on a remote computer. Used by: - `USBStorageDriver`_ The NetworkUSBMassStorage can be used in test cases by calling the `write_image()`, and `get_size()` functions. SigrokDevice ~~~~~~~~~~~~ A SigrokDevice resource describes a sigrok device. To select a specific device from all connected supported devices use the `SigrokUSBDevice`_. .. code-block:: yaml SigrokUSBDevice: driver: fx2lafw channel: "D0=CLK,D1=DATA" - driver (str): name of the sigrok driver to use - channel (str): optional, channel mapping as described in the sigrok-cli man page Used by: - `SigrokDriver`_ IMXUSBLoader ~~~~~~~~~~~~ An IMXUSBLoader resource describes a USB device in the imx loader state. .. code-block:: yaml IMXUSBLoader: match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `IMXUSBDriver`_ - `UUUDriver`_ MXSUSBLoader ~~~~~~~~~~~~ An MXSUSBLoader resource describes a USB device in the mxs loader state. .. code-block:: yaml MXSUSBLoader: match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `MXSUSBDriver`_ - `UUUDriver`_ RKUSBLoader ~~~~~~~~~~~~ An RKUSBLoader resource describes a USB device in the rockchip loader state. .. code-block:: yaml RKUSBLoader: match: 'sys_name': '1-3' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `RKUSBDriver`_ NetworkMXSUSBLoader ~~~~~~~~~~~~~~~~~~~ A NetworkMXSUSBLoader describes an `MXSUSBLoader`_ available on a remote computer. NetworkIMXUSBLoader ~~~~~~~~~~~~~~~~~~~ A NetworkIMXUSBLoader describes an `IMXUSBLoader`_ available on a remote computer. NetworkRKUSBLoader ~~~~~~~~~~~~~~~~~~~ A NetworkRKUSBLoader describes an `RKUSBLoader`_ available on a remote computer. AndroidFastboot ~~~~~~~~~~~~~~~ An AndroidFastboot resource describes a USB device in the fastboot state. .. code-block:: yaml AndroidFastboot: match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `AndroidFastbootDriver`_ USBNetworkInterface ~~~~~~~~~~~~~~~~~~~~ A USBNetworkInterface resource describes a USB network adapter (such as Ethernet or WiFi) .. code-block:: yaml USBNetworkInterface: match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0' - match (str): key and value for a udev match, see `udev Matching`_ RemoteNetworkInterface ~~~~~~~~~~~~~~~~~~~~~~ A :any:`RemoteNetworkInterface` resource describes a :any:`USBNetworkInterface` resource available on a remote computer. AlteraUSBBlaster ~~~~~~~~~~~~~~~~ An AlteraUSBBlaster resource describes an Altera USB blaster. .. code-block:: yaml AlteraUSBBlaster: match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0' - match (dict): key and value for a udev match, see `udev Matching`_ Used by: - `OpenOCDDriver`_ - `QuartusHPSDriver`_ USBDebugger ~~~~~~~~~~~ An USBDebugger resource describes a JTAG USB adapter (for example an FTDI FT2232H). .. code-block:: yaml USBDebugger: match: ID_PATH: 'pci-0000:00:10.0-usb-0:1.4' - match (dict): key and value for a udev match, see `udev Matching`_ Used by: - `OpenOCDDriver`_ SNMPEthernetPort ~~~~~~~~~~~~~~~~ A SNMPEthernetPort resource describes a port on an Ethernet switch, which is accessible via SNMP. .. code-block:: yaml SNMPEthernetPort: switch: "switch-012" interface: "17" - switch (str): host name of the Ethernet switch - interface (str): interface name SigrokUSBDevice ~~~~~~~~~~~~~~~ A SigrokUSBDevice resource describes a sigrok USB device. .. code-block:: yaml SigrokUSBDevice: driver: fx2lafw channel: "D0=CLK,D1=DATA" match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0' - driver (str): name of the sigrok driver to use - channel (str): optional, channel mapping as described in the sigrok-cli man page - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `SigrokDriver`_ NetworkSigrokUSBDevice ~~~~~~~~~~~~~~~~~~~~~~ A NetworkSigrokUSBDevice resource describes a sigrok USB device connected to a host which is exported over the network. The SigrokDriver will access it via SSH. .. code-block:: yaml NetworkSigrokUSBDevice: driver: fx2lafw channel: "D0=CLK,D1=DATA" match: 'ID_PATH': 'pci-0000:06:00.0-usb-0:1.3.2:1.0' host: remote.example.computer - driver (str): name of the sigrok driver to use - channel (str): optional, channel mapping as described in the sigrok-cli man page - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `SigrokDriver`_ SigrokUSBSerialDevice ~~~~~~~~~~~~~~~~~~~~~ A SigrokUSBSerialDevice resource describes a sigrok device which communicates of a USB serial port instead of being a USB device itself (see `SigrokUSBDevice` for that case). .. code-block:: yaml SigrokUSBSerialDevice: driver: manson-hcs-3xxx match: '@ID_SERIAL_SHORT': P-00-02389 - driver (str): name of the sigrok driver to use - channels (str): optional, channel mapping as desribed in the sigrok-cli man page Used by: - `SigrokPowerDriver`_ USBSDMuxDevice ~~~~~~~~~~~~~~ A :any:`USBSDMuxDevice` resource describes a Pengutronix `USB-SD-Mux `_ device. .. code-block:: yaml USBSDMuxDevice: match: '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `USBSDMUXDriver`_ NetworkUSBSDMuxDevice ~~~~~~~~~~~~~~~~~~~~~ A :any:`NetworkUSBSDMuxDevice` resource describes a `USBSDMuxDevice`_ available on a remote computer. LXAUSBMux ~~~~~~~~~ A :any:`LXAUSBMux` resource describes a Linux Automation GmbH USB-Mux device. .. code-block:: yaml LXAUSBMux: match: '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `LXAUSBMuxDriver`_ NetworkLXAUSBMux ~~~~~~~~~~~~~~~~ A :any:`NetworkLXAUSBMux` resource describes a `LXAUSBMux`_ available on a remote computer. USBSDWireDevice ~~~~~~~~~~~~~~~ A :any:`USBSDWireDevice` resource describes a Tizen `SD Wire device `_ device. .. code-block:: yaml USBSDWireDevice: match: '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `USBSDWireDriver`_ NetworkUSBSDWireDevice ~~~~~~~~~~~~~~~~~~~~~~ A :any:`NetworkUSBSDWireDevice` resource describes a `USBSDWireDevice`_ available on a remote computer. USBVideo ~~~~~~~~ A :any:`USBVideo` resource describes a USB video camera which is supported by a Video4Linux2 kernel driver. .. code-block:: yaml USBVideo: match: '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `USBVideoDriver`_ SysfsGPIO ~~~~~~~~~ A :any:`SysfsGPIO` resource describes a GPIO line. .. code-block:: yaml SysfsGPIO: index: 12 Used by: - `GpioDigitalOutputDriver`_ NetworkUSBVideo ~~~~~~~~~~~~~~~ A :any:`NetworkUSBVideo` resource describes a :any:`USBVideo` resource available on a remote computer. USBAudioInput ~~~~~~~~~~~~~ A :any:`USBAudioInput` resource describes a USB audio input which is supported by an ALSA kernel driver. .. code-block:: yaml USBAudioInput: match: '@sys_name': '1-4' - index (int, default=0): ALSA PCM device number (as in `hw:CARD=,DEV=`) - match (str): key and value for a udev match, see `udev Matching`_ Used by: - `USBAudioInputDriver`_ NetworkUSBAudioInput ~~~~~~~~~~~~~~~~~~~~ A :any:`NetworkUSBAudioInput` resource describes a :any:`USBAudioInput` resource available on a remote computer. USBTMC ~~~~~~ A :any:`USBTMC` resource describes an oscilloscope connected via the USB TMC protocol. The low-level communication is handled by the ``usbtmc`` kernel driver. .. code-block:: yaml USBTMC: match: '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' - match (str): key and value for a udev match, see `udev Matching`_ A udev rules file may be needed to allow access for non-root users: .. code-block:: none DRIVERS=="usbtmc", MODE="0660", GROUP="plugdev" Used by: - `USBTMCDriver`_ NetworkUSBTMC ~~~~~~~~~~~~~ A :any:`NetworkUSBTMC` resource describes a :any:`USBTMC` resource available on a remote computer. Flashrom ~~~~~~~~ A Flashrom resource is used to configure the parameters to a local installed flashrom instance. It is assumed that flashrom is installed on the host and the executable is configured in: .. code-block:: yaml tools: flashrom: '/usr/sbin/flashrom' - programmer (str): programmer device as desribed in `-p, --programmer` in `man 8 flashrom` The resource must configure which programmer to use and the parameters to the programmer. The programmer parameter is passed directly to the flashrom bin hence man(8) flashrom can be used for reference. Below an example where the local spidev is used. .. code-block:: yaml Flashrom: programmer: 'linux_spi:dev=/dev/spidev0.1,spispeed=30000' Used by: - `FlashromDriver`_ NetworkFlashRom ~~~~~~~~~~~~~~~ A NetworkFlashrom describes a `Flashrom`_ available on a remote computer. USBFlashableDevice ~~~~~~~~~~~~~~~~~~ Represents an "opaque" USB device used by custom flashing programs. There is usually not anything useful that can be done with the interface other than running a flashing program with `FlashScriptDriver`_. .. note:: This resource is only intended to be used as a last resort when it is impossible or impractical to use a different resource .. code-block:: yaml USBFlashableDevice: match: SUBSYSTEM: 'usb' ID_SERIAL: '1234' - match (str): key and value pairs for a udev match, see `udev Matching`_ Used by: - `FlashScriptDriver`_ NetworkUSBFlashableDevice ~~~~~~~~~~~~~~~~~~~~~~~~~ A :any:`NetworkUSBFlashableDevice` resource describes a :any:`USBFlashableDevice` resource available on a remote computer Used by: - `FlashScriptDriver`_ XenaManager ~~~~~~~~~~~ A XenaManager resource describes a Xena Manager instance which is the instance the `XenaDriver`_ must connect to in order to configure a Xena chassis. .. code-block:: yaml XenaManager: hostname: "example.computer" - hostname (str): hostname or IP of the management address of the Xena tester Used by: - `XenaDriver`_ PyVISADevice ~~~~~~~~~~~~ A PyVISADevice resource describes a test stimuli device controlled by PyVISA. Such device could be a signal generator. .. code-block:: yaml PyVISADevice: type: "TCPIP" url: "192.168.110.11" - type (str): device resource type following the pyVISA resource syntax, e.g. ASRL, TCPIP... - url (str): device identifier on selected resource, e.g. for TCPIP resource Used by: - `PyVISADriver`_ HTTPVideoStream ~~~~~~~~~~~~~~~ A :any:`HTTPVideoStream` resource describes a IP video stream over HTTP or HTTPS. .. code-block:: yaml HTTPVideoStream: url: 'http://192.168.110.11/0.ts' - url (str): URI of the IP video stream Used by: - `HTTPVideoDriver`_ Providers ~~~~~~~~~ Providers describe directories that are accessible by the target over a specific protocol. This is useful for software installation in the bootloader (via TFTP) or downloading update artifacts under Linux (via HTTP). They are used with the ManagedFile helper, which ensures that the file is available on the server and then creates a symlink from the internal directory to the uploaded file. The path for the target is generated by replacing the internal prefix with the external prefix. For now, the TFTP/NFS/HTTP server needs to be configured before using it from labgrid. .. _TFTPProvider: .. _NFSProvider: .. _HTTPProvider: TFTPProvider / NFSProvider / HTTPProvider +++++++++++++++++++++++++++++++++++++++++ .. code-block:: yaml TFTPProvider: internal: "/srv/tftp/board-23/" external: "board-23/" HTTPProvider: internal: "/srv/www/board-23/" external: "http://192.168.1.1/board-23/" - internal (str): path prefix to the local directory accessible by the target - external (str): corresponding path prefix for use by the target Used by: - `TFTPProviderDriver`_ - `NFSProviderDriver`_ - `HTTPProviderDriver`_ .. _RemoteTFTPProvider: .. _RemoteNFSProvider: .. _RemoteHTTPProvider: RemoteTFTPProvider / RemoteNFSProvider / RemoteHTTPProvider +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ These describe a `TFTPProvider`_, `NFSProvider`_ or `HTTPProvider`_ resource available on a remote computer Used by: - `TFTPProviderDriver`_ - `NFSProviderDriver`_ - `HTTPProviderDriver`_ RemotePlace ~~~~~~~~~~~ A RemotePlace describes a set of resources attached to a labgrid remote place. .. code-block:: yaml RemotePlace: name: example-place The example describes the remote place `example-place`. It will connect to the labgrid remote coordinator, wait until the resources become available and expose them to the internal environment. - name (str): name or pattern of the remote place Used by: - potentially all drivers DockerDaemon ~~~~~~~~~~~~ A DockerDaemon describes where to contact a docker daemon process. DockerDaemon also participates in managing `NetworkService` instances created through interaction with that daemon. .. code-block:: yaml DockerDaemon: docker_daemon_url: 'unix://var/run/docker.sock' The example describes a docker daemon accessible via the '/var/run/docker.sock' unix socket. When used by a `DockerDriver`, the `DockerDriver` will first create a docker container which the DockerDaemon resource will subsequently use to create one/more `NetworkService` instances - as specified by `DockerDriver` configuration. Each `NetworkService` instance corresponds to a network service running inside the container. Moreover, DockerDaemon will remove any hanging containers if DockerDaemon is used several times in a row - as is the case when executing test suites. Normally `DockerDriver` - when deactivated - cleans up the created docker container; programming errors, keyboard interrupts or unix kill signals may lead to hanging containers, however; therefore auto-cleanup is important. - docker_daemon_url (str): The url of the daemon to use for this target. Used by: - `DockerDriver`_ udev Matching ~~~~~~~~~~~~~ udev matching allows labgrid to identify resources via their udev properties. Any udev property key and value can be used, path matching USB devices is allowed as well. This allows exporting a specific USB hub port or the correct identification of a USB serial converter across computers. The initial matching and monitoring for udev events is handled by the :any:`UdevManager` class. This manager is automatically created when a resource derived from :any:`USBResource` (such as :any:`USBSerialPort`, :any:`IMXUSBLoader` or :any:`AndroidFastboot`) is instantiated. To identify the kernel device which corresponds to a configured `USBResource`, each existing (and subsequently added) kernel device is matched against the configured resources. This is based on a list of `match entries` which must all be tested successfully against the potential kernel device. Match entries starting with an ``@`` are checked against the device's parents instead of itself; here one matching parent causes the check to be successful. A given `USBResource` class has builtin match entries that are checked first, for example that the ``SUBSYSTEM`` is ``tty`` as in the case of the :any:`USBSerialPort`. Only if these succeed, match entries provided by the user for the resource instance are considered. In addition to the properties reported by ``udevadm monitor --udev --property``, elements of the ``ATTR(S){}`` dictionary (as shown by ``udevadm info -a``) are useable as match keys. Finally ``sys_name`` allows matching against the name of the directory in sysfs. All match entries must succeed for the device to be accepted. The following examples show how to use the udev matches for some common use-cases. Matching a USB Serial Converter on a Hub Port +++++++++++++++++++++++++++++++++++++++++++++ This will match any USB serial converter connected below the hub port 1.2.5.5 on bus 1. The `sys_name` value corresponds to the hierarchy of buses and ports as shown with ``lsusb -t`` and is also usually displayed in the kernel log messages when new devices are detected. .. code-block:: yaml USBSerialPort: match: '@sys_name': '1-1.2.5.5' Note the ``@`` in the ``@sys_name`` match, which applies this match to the device's parents instead of directly to itself. This is necessary for the `USBSerialPort` because we actually want to find the ``ttyUSB?`` device below the USB serial converter device. Matching an Android Fastboot Device +++++++++++++++++++++++++++++++++++ In this case, we want to match the USB device on that port directly, so we don't use a parent match. .. code-block:: yaml AndroidFastboot: match: 'sys_name': '1-1.2.3' Matching a Specific UART in a Dual-Port Adapter +++++++++++++++++++++++++++++++++++++++++++++++ On this board, the serial console is connected to the second port of an on-board dual-port USB-UART. The board itself is connected to the bus 3 and port path 10.2.2.2. The correct value can be shown by running ``udevadm info /dev/ttyUSB9`` in our case: .. code-block:: bash :emphasize-lines: 21 $ udevadm info /dev/ttyUSB9 P: /devices/pci0000:00/0000:00:14.0/usb3/3-10/3-10.2/3-10.2.2/3-10.2.2.2/3-10.2.2.2:1.1/ttyUSB9/tty/ttyUSB9 N: ttyUSB9 S: serial/by-id/usb-FTDI_Dual_RS232-HS-if01-port0 S: serial/by-path/pci-0000:00:14.0-usb-0:10.2.2.2:1.1-port0 E: DEVLINKS=/dev/serial/by-id/usb-FTDI_Dual_RS232-HS-if01-port0 /dev/serial/by-path/pci-0000:00:14.0-usb-0:10.2.2.2:1.1-port0 E: DEVNAME=/dev/ttyUSB9 E: DEVPATH=/devices/pci0000:00/0000:00:14.0/usb3/3-10/3-10.2/3-10.2.2/3-10.2.2.2/3-10.2.2.2:1.1/ttyUSB9/tty/ttyUSB9 E: ID_BUS=usb E: ID_MODEL=Dual_RS232-HS E: ID_MODEL_ENC=Dual\x20RS232-HS E: ID_MODEL_FROM_DATABASE=FT2232C Dual USB-UART/FIFO IC E: ID_MODEL_ID=6010 E: ID_PATH=pci-0000:00:14.0-usb-0:10.2.2.2:1.1 E: ID_PATH_TAG=pci-0000_00_14_0-usb-0_10_2_2_2_1_1 E: ID_REVISION=0700 E: ID_SERIAL=FTDI_Dual_RS232-HS E: ID_TYPE=generic E: ID_USB_DRIVER=ftdi_sio E: ID_USB_INTERFACES=:ffffff: E: ID_USB_INTERFACE_NUM=01 E: ID_VENDOR=FTDI E: ID_VENDOR_ENC=FTDI E: ID_VENDOR_FROM_DATABASE=Future Technology Devices International, Ltd E: ID_VENDOR_ID=0403 E: MAJOR=188 E: MINOR=9 E: SUBSYSTEM=tty E: TAGS=:systemd: E: USEC_INITIALIZED=9129609697 We use the ``ID_USB_INTERFACE_NUM`` to distinguish between the two ports: .. code-block:: yaml USBSerialPort: match: '@sys_name': '3-10.2.2.2' 'ID_USB_INTERFACE_NUM': '01' Matching a USB UART by Serial Number ++++++++++++++++++++++++++++++++++++ Most of the USB serial converters in our lab have been programmed with unique serial numbers. This makes it easy to always match the same one even if the USB topology changes or a board has been moved between host systems. .. code-block:: yaml USBSerialPort: match: 'ID_SERIAL_SHORT': 'P-00-00679' To check if your device has a serial number, you can use ``udevadm info``: .. code-block:: bash $ udevadm info /dev/ttyUSB5 | grep SERIAL_SHORT E: ID_SERIAL_SHORT=P-00-00679 Drivers ------- SerialDriver ~~~~~~~~~~~~ A SerialDriver connects to a serial port. It requires one of the serial port resources. Binds to: port: - `NetworkSerialPort`_ - `RawSerialPort`_ - `USBSerialPort`_ .. code-block:: yaml SerialDriver: txdelay: 0.05 Implements: - :any:`ConsoleProtocol` Arguments: - txdelay (float, default=0.0): time in seconds to wait before sending each byte - timeout (float, default=3.0): time in seconds to wait for a network serial port before an error occurs ShellDriver ~~~~~~~~~~~ A ShellDriver binds on top of a `ConsoleProtocol` and is designed to interact with a login prompt and a Linux shell. Binds to: console: - :any:`ConsoleProtocol` Implements: - :any:`CommandProtocol` .. code-block:: yaml ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' username: 'root' Arguments: - prompt (regex): shell prompt to match after logging in - login_prompt (regex): match for the login prompt - username (str): username to use during login - password (str): optional, password to use during login - keyfile (str): optional, keyfile to upload after login, making the `SSHDriver`_ usable - login_timeout (int, default=60): timeout for login prompt detection in seconds - await_login_timeout (int, default=2): time in seconds of silence that needs to pass before sending a newline to device. - console_ready (regex): optional, pattern used by the kernel to inform the user that a console can be activated by pressing enter. - post_login_settle_time (int, default=0): seconds of silence after logging in before check for a prompt. Useful when the console is interleaved with boot output which may interrupt prompt detection. .. _conf-sshdriver: SSHDriver ~~~~~~~~~ A SSHDriver requires a `NetworkService` resource and allows the execution of commands and file upload via network. It uses SSH's `ServerAliveInterval` option to detect failed connections. If a shared SSH connection to the target is already open, it will reuse it when running commands. In that case, `ServerAliveInterval` should be set outside of labgrid, as it cannot be enabled for an existing connection. Binds to: networkservice: - `NetworkService`_ Implements: - :any:`CommandProtocol` - :any:`FileTransferProtocol` .. code-block:: yaml SSHDriver: keyfile: example.key Arguments: - keyfile (str): optional, filename of private key to login into the remote system (has precedence over `NetworkService`'s password) - stderr_merge (bool, default=False): set to True to make `run()` return stderr merged with stdout, and an empty list as second element. UBootDriver ~~~~~~~~~~~ A UBootDriver interfaces with a U-Boot bootloader via a `ConsoleProtocol`. Binds to: console: - :any:`ConsoleProtocol` Implements: - :any:`CommandProtocol` .. code-block:: yaml UBootDriver: prompt: 'Uboot> ' Arguments: - prompt (regex, default=""): U-Boot prompt to match - autoboot (regex, default="stop autoboot"): autoboot message to match - password (str): optional, U-Boot unlock password - interrupt (str, default="\\n"): string to interrupt autoboot (use "\\x03" for CTRL-C) - init_commands (tuple): optional, tuple of commands to execute after matching the prompt - password_prompt (str): optional, regex to match the U-Boot password prompt, defaults to "enter Password: " - boot_expression (str, default="U-Boot 20\\d+"): regex to match the U-Boot start string - bootstring (str): optional, regex to match on Linux Kernel boot - boot_command (str, default="run bootcmd"): boot command for booting target - login_timeout (int, default=30): timeout for login prompt detection in seconds - boot_timeout (int, default=30): timeout for initial Linux Kernel version detection SmallUBootDriver ~~~~~~~~~~~~~~~~ A SmallUBootDriver interfaces with stripped-down U-Boot variants that are sometimes used in cheap consumer electronics. SmallUBootDriver is meant as a driver for U-Boot with only little functionality compared to a standard U-Boot. Especially is copes with the following limitations: - The U-Boot does not have a real password-prompt but can be activated by entering a "secret" after a message was displayed. - The command line does not have a built-in echo command. Thus this driver uses 'Unknown Command' messages as marker before and after the output of a command. - Since there is no echo we cannot return the exit code of the command. Commands will always return 0 unless the command was not found. This driver needs the following features activated in U-Boot to work: - The U-Boot must not have a real password prompt. Instead it must be keyword activated. For example it should be activated by a dialog like the following: - U-Boot: "Autobooting in 1s..." - labgrid: "secret" - U-Boot: - The U-Boot must be able to parse multiple commands in a single line separated by ";". - The U-Boot must support the "bootm" command to boot from a memory location. Binds to: - :any:`ConsoleProtocol` (see `SerialDriver`_) Implements: - :any:`CommandProtocol` .. code-block:: yaml SmallUBootDriver: prompt: 'ap143-2\.0> ' boot_expression: 'Autobooting in 1 seconds' boot_secret: "tpl" Arguments: - boot_secret (str, default="a"): secret used to unlock prompt - login_timeout (int, default=60): timeout for password/login prompt detection - for other arguments, see `UBootDriver`_ BareboxDriver ~~~~~~~~~~~~~ A BareboxDriver interfaces with a barebox bootloader via a `ConsoleProtocol`. Binds to: console: - :any:`ConsoleProtocol` Implements: - :any:`CommandProtocol` .. code-block:: yaml BareboxDriver: prompt: 'barebox@[^:]+:[^ ]+ ' Arguments: - prompt (regex, default=""): barebox prompt to match - autoboot (regex, default="stop autoboot"): autoboot message to match - interrupt (str, default="\\n"): string to interrupt autoboot (use "\\x03" for CTRL-C) - bootstring (regex, default="Linux version \\d"): regex that indicating that the Linux Kernel is booting - password (str): optional, password to use for access to the shell - login_timeout (int, default=60): timeout for access to the shell ExternalConsoleDriver ~~~~~~~~~~~~~~~~~~~~~ An ExternalConsoleDriver implements the `ConsoleProtocol` on top of a command executed on the local computer. Implements: - :any:`ConsoleProtocol` .. code-block:: yaml ExternalConsoleDriver: cmd: 'microcom /dev/ttyUSB2' txdelay: 0.05 Arguments: - cmd (str): command to execute and then bind to. - txdelay (float, default=0.0): time in seconds to wait before sending each byte AndroidFastbootDriver ~~~~~~~~~~~~~~~~~~~~~ An AndroidFastbootDriver allows the upload of images to a device in the USB fastboot state. Binds to: fastboot: - `AndroidFastboot`_ Implements: - None (yet) .. code-block:: yaml AndroidFastbootDriver: image: mylocal.image sparse_size: 100M Arguments: - boot_image (str): image key referring to the image to boot - flash_images (dict): partition to image key mapping referring to images to flash to the device - sparse_size (str): optional, sparse files greater than given size (see fastboot manpage -S option for allowed size suffixes). The default is the same as the fastboot default, which is computed after querying the target's ``max-download-size`` variable. OpenOCDDriver ~~~~~~~~~~~~~ An OpenOCDDriver controls OpenOCD to bootstrap a target with a bootloader. Note that OpenOCD supports specifying USB paths since `a1b308ab `_ which was released with v0.11. The OpenOCDDriver passes the resource's USB path. Depending on which OpenOCD version is installed it is either used correctly or a warning is displayed and the first resource seen is used, which might be the wrong USB device. Consider updating your OpenOCD version when using multiple USB Blasters. Binds to: interface: - `AlteraUSBBlaster`_ - `USBDebugger`_ Implements: - :any:`BootstrapProtocol` .. code-block:: yaml OpenOCDDriver: config: local-settings.cfg image: bitstream interface_config: ftdi/lambdaconcept_ecpix-5.cfg board_config: lambdaconcept_ecpix-5.cfg load_commands: - "init" - "svf -quiet {filename}" - "exit" Arguments: - config (str/list): optional, OpenOCD configuration file(s) - search (str): optional, include search path for scripts - image (str): optional, name of the image to bootstrap onto the device - interface_config (str): optional, interface config in the ``openocd/scripts/interface/`` directory - board_config (str): optional, board config in the ``openocd/scripts/board/`` directory - load_commands (list of str): optional, load commands to use instead of ``init``, ``bootstrap {filename}``, ``shutdown`` QuartusHPSDriver ~~~~~~~~~~~~~~~~ A QuartusHPSDriver controls the "Quartus Prime Programmer and Tools" to flash a target's QSPI. Binds to: - `AlteraUSBBlaster`_ Implements: - None Arguments: - image (str): optional, filename of image to write into QSPI flash The driver can be used in test cases by calling the `flash` function. An example strategy is included in labgrid. ManualPowerDriver ~~~~~~~~~~~~~~~~~ A ManualPowerDriver requires the user to control the target power states. This is required if a strategy is used with the target, but no automatic power control is available. The driver's name will be displayed during interaction. Implements: - :any:`PowerProtocol` .. code-block:: yaml ManualPowerDriver: name: 'example-board' Arguments: - None ExternalPowerDriver ~~~~~~~~~~~~~~~~~~~ An ExternalPowerDriver is used to control a target power state via an external command. Implements: - :any:`PowerProtocol` .. code-block:: yaml ExternalPowerDriver: cmd_on: example_command on cmd_off: example_command off cmd_cycle: example_command cycle Arguments: - cmd_on (str): command to turn power to the board on - cmd_off (str): command to turn power to the board off - cmd_cycle (str): optional command to switch the board off and on - delay (float, default=2.0): delay in seconds between off and on, if cmd_cycle is not set NetworkPowerDriver ~~~~~~~~~~~~~~~~~~ A NetworkPowerDriver controls a `NetworkPowerPort`, allowing control of the target power state without user interaction. Binds to: port: - `NetworkPowerPort`_ Implements: - :any:`PowerProtocol` .. code-block:: yaml NetworkPowerDriver: delay: 5.0 Arguments: - delay (float, default=2.0): delay in seconds between off and on PDUDaemonDriver ~~~~~~~~~~~~~~~ A PDUDaemonDriver controls a `PDUDaemonPort`, allowing control of the target power state without user interaction. .. note:: PDUDaemon processess commands in the background, so the actual state change may happen several seconds after calls to PDUDaemonDriver return. Binds to: port: - `PDUDaemonPort`_ Implements: - :any:`PowerProtocol` .. code-block:: yaml PDUDaemonDriver: delay: 5.0 Arguments: - delay (float, default=5.0): delay in seconds between off and on YKUSHPowerDriver ~~~~~~~~~~~~~~~~ A YKUSHPowerDriver controls a `YKUSHPowerPort`, allowing control of the target power state without user interaction. Binds to: port: - `YKUSHPowerPort`_ Implements: - :any:`PowerProtocol` .. code-block:: yaml YKUSHPowerDriver: delay: 5.0 Arguments: - delay (float, default=2.0): delay in seconds between off and on DigitalOutputPowerDriver ~~~~~~~~~~~~~~~~~~~~~~~~ A DigitalOutputPowerDriver can be used to control the power of a device using a DigitalOutputDriver. Using this driver you probably want an external relay to switch the power of your DUT. Binds to: output: - :any:`DigitalOutputProtocol` .. code-block:: yaml DigitalOutputPowerDriver: delay: 2.0 Arguments: - delay (float, default=1.0): delay in seconds between off and on USBPowerDriver ~~~~~~~~~~~~~~ A USBPowerDriver controls a `USBPowerPort`, allowing control of the target power state without user interaction. Binds to: - `USBPowerPort`_ Implements: - :any:`PowerProtocol` .. code-block:: yaml USBPowerDriver: delay: 5.0 Arguments: - delay (float, default=2.0): delay in seconds between off and on SiSPMPowerDriver ~~~~~~~~~~~~~~~~ A SiSPMPowerDriver controls a `SiSPMPowerPort`, allowing control of the target power state without user interaction. Binds to: - `SiSPMPowerPort`_ Implements: - :any:`PowerProtocol` .. code-block:: yaml SiSPMPowerDriver: delay: 5.0 Arguments: - delay (float, default=2.0): delay in seconds between off and on TasmotaPowerDriver ~~~~~~~~~~~~~~~~~~ A TasmotaPowerDriver contols a `TasmotaPowerPort`, allowing the outlet to be switched on and off. Binds to: - `TasmotaPowerPort`_ Implements: - :any:`PowerProtocol` .. code-block:: yaml TasmotaPowerDriver: delay: 5.0 Arguments: - delay (float, default=2.0): delay in seconds between off and on GpioDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~ The GpioDigitalOutputDriver writes a digital signal to a GPIO line. This driver configures GPIO lines via `the sysfs kernel interface `. While the driver automatically exports the GPIO, it does not configure it in any other way than as an output. Binds to: - `SysfsGPIO`_ Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml GpioDigitalOutputDriver: {} Arguments: - None SerialPortDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The SerialPortDigitalOutputDriver makes it possible to use a UART as a 1-Bit general-purpose digital output. This driver acts on top of a SerialDriver and uses the its pyserial port to control the flow control lines. Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml SerialPortDigitalOutputDriver: signal: "dtr" bindings: { serial : "nameOfSerial" } Arguments: - signal (str): control signal to use: "dtr" or "rts" - bindings (dict): A named ressource of the type SerialDriver to bind against. This is only needed if you have multiple SerialDriver in your environment (what is likely to be the case if you are using this driver). - invert (bool): whether to invert the signal FileDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~ The FileDigitalOutputDriver uses a file to write arbitrary string representations of booleans to a file and read from it. The file is checked to exist at configuration time. If the file's content does not match any of the representations reading defaults to False. A prime example for using this driver is Linux's sysfs. Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml FileDigitalOutputDriver: filepath: "/sys/class/leds/myled/brightness" Arguments: - filepath (str): file that is used for reads and writes. - false_repr (str, default="0\\n"): representation for False - true_repr (str, default="1\\n"): representation for True DigitalOutputResetDriver ~~~~~~~~~~~~~~~~~~~~~~~~ A DigitalOutputResetDriver uses a DigitalOutput to reset the target. Binds to: output: - :any:`DigitalOutputProtocol` Implements: - :any:`ResetProtocol` .. code-block:: yaml DigitalOutputResetDriver: delay: 2.0 Arguments: - delay (float, default=1.0): delay in seconds between setting the output 0 and 1. ModbusCoilDriver ~~~~~~~~~~~~~~~~ A ModbusCoilDriver controls a `ModbusTCPCoil` resource. It can set and get the current state of the resource. Binds to: coil: - `ModbusTCPCoil`_ Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml ModbusCoilDriver: {} Arguments: - None HIDRelayDriver ~~~~~~~~~~~~~~ A HIDRelayDriver controls a `HIDRelay` or `NetworkHIDRelay` resource. It can set and get the current state of the resource. Binds to: relay: - `HIDRelay`_ - `NetworkHIDRelay`_ Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml HIDRelayDriver: {} Arguments: - None ManualSwitchDriver ~~~~~~~~~~~~~~~~~~ A ManualSwitchDriver requires the user to control a switch or jumper on the target. This can be used if a driver binds to a :any:`DigitalOutputProtocol`, but no automatic control is available. Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml ManualSwitchDriver: description: 'Jumper 5' Arguments: - description (str): optional, description of the switch or jumper on the target DeditecRelaisDriver ~~~~~~~~~~~~~~~~~~~ A DeditecRelaisDriver controls a Deditec relay resource. It can set and get the current state of the resource. Binds to: relais: - `DeditecRelais8`_ Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml DeditecRelaisDriver: {} Arguments: - None MXSUSBDriver ~~~~~~~~~~~~ A MXUSBDriver is used to upload an image into a device in the mxs USB loader state. This is useful to bootstrap a bootloader onto a device. Binds to: loader: - `MXSUSBLoader`_ - `NetworkMXSUSBLoader`_ Implements: - :any:`BootstrapProtocol` .. code-block:: yaml targets: main: drivers: MXSUSBDriver: image: mybootloaderkey images: mybootloaderkey: path/to/mybootloader.img Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target IMXUSBDriver ~~~~~~~~~~~~ A IMXUSBDriver is used to upload an image into a device in the imx USB loader state. This is useful to bootstrap a bootloader onto a device. This driver uses the imx-usb-loader tool from barebox. Binds to: loader: - `IMXUSBLoader`_ - `NetworkIMXUSBLoader`_ Implements: - :any:`BootstrapProtocol` .. code-block:: yaml targets: main: drivers: IMXUSBDriver: image: mybootloaderkey images: mybootloaderkey: path/to/mybootloader.img Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target BDIMXUSBDriver ~~~~~~~~~~~~~~ The BDIMXUSBDriver is used to upload bootloader images into an i.MX device in the USB SDP mode. This driver uses the imx_usb tool by Boundary Devices. Compared to the IMXUSBLoader, it supports two-stage upload of U-Boot images. The images paths need to be specified from code instead of in the YAML environment, as the correct image depends on the system state. Binds to: loader: - `IMXUSBLoader`_ - `NetworkIMXUSBLoader`_ Implements: - :any:`BootstrapProtocol` .. code-block:: yaml targets: main: drivers: BDIMXUSBDriver: {} Arguments: - None RKUSBDriver ~~~~~~~~~~~~ A RKUSBDriver is used to upload an image into a device in the rockchip USB loader state. This is useful to bootstrap a bootloader onto a device. Binds to: loader: - `RKUSBLoader`_ - `NetworkRKUSBLoader`_ Implements: - :any:`BootstrapProtocol` .. code-block:: yaml targets: main: drivers: RKUSBDriver: image: mybootloaderkey usb_loader: myloaderkey images: mybootloaderkey: path/to/mybootloader.img myloaderkey: path/to/myloader.bin Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target - usb_loader (str): optional, key in :ref:`images ` containing the path of a first-stage bootloader image to write UUUDriver ~~~~~~~~~ A UUUDriver is used to upload an image into a device in the NXP USB loader state. This is useful to bootstrap a bootloader onto a device. Binds to: loader: - `MXSUSBLoader`_ - `NetworkMXSUSBLoader`_ - `IMXUSBLoader`_ - `NetworkIMXUSBLoader`_ Implements: - :any:`BootstrapProtocol` .. code-block:: yaml targets: main: drivers: UUUDriver: image: mybootloaderkey cmd: spl images: mybootloaderkey: path/to/mybootloader.img Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target - cmd (str, default="spl"): single command used for mfgtool USBStorageDriver ~~~~~~~~~~~~~~~~ A USBStorageDriver allows access to a USB stick or similar local or remote device. Binds to: - `USBMassStorage`_ - `NetworkUSBMassStorage`_ Implements: - None (yet) .. code-block:: yaml USBStorageDriver: image: flashimage .. code-block:: yaml images: flashimage: ../images/myusb.image Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to write to the target OneWirePIODriver ~~~~~~~~~~~~~~~~ A OneWirePIODriver controls a `OneWirePIO` resource. It can set and get the current state of the resource. Binds to: port: - `OneWirePIO`_ Implements: - :any:`DigitalOutputProtocol` .. code-block:: yaml OneWirePIODriver: {} Arguments: - None .. _TFTPProviderDriver: .. _NFSProviderDriver: .. _HTTPProviderDriver: TFTPProviderDriver / NFSProviderDriver / HTTPProviderDriver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These drivers control their corresponding Provider resources, either locally or remotely. Binds to: provider: - `TFTPProvider`_ - `RemoteTFTPProvider`_ - `NFSProvider`_ - `RemoteNFSProvider`_ - `HTTPProvider`_ - `RemoteHTTPProvider`_ .. code-block:: yaml TFTPProviderDriver: {} Arguments: - None The driver can be used in test cases by calling the `stage()` function, which returns the path to be used by the target. QEMUDriver ~~~~~~~~~~ The QEMUDriver allows the usage of a QEMU instance as a target. It requires several arguments, listed below. The kernel, flash, rootfs and dtb arguments refer to images and paths declared in the environment configuration. Binds to: - None .. code-block:: yaml QEMUDriver: qemu_bin: qemu_arm machine: vexpress-a9 cpu: cortex-a9 memory: 512M boot_args: "root=/dev/root console=ttyAMA0,115200" extra_args: "" kernel: kernel rootfs: rootfs dtb: dtb .. code-block:: yaml tools: qemu_arm: /bin/qemu-system-arm paths: rootfs: ../images/root images: dtb: ../images/mydtb.dtb kernel: ../images/vmlinuz Implements: - :any:`ConsoleProtocol` - :any:`PowerProtocol` Arguments: - qemu_bin (str): reference to the tools key for the QEMU binary - machine (str): QEMU machine type - cpu (str): QEMU cpu type - memory (str): QEMU memory size (ends with M or G) - extra_args (str): extra QEMU arguments, they are passed directly to the QEMU binary - boot_args (str): optional, additional kernel boot argument - kernel (str): optional, reference to the images key for the kernel - disk (str): optional, reference to the images key for the disk image - flash (str): optional, reference to the images key for the flash image - rootfs (str): optional, reference to the paths key for use as the virtio-9p filesystem - dtb (str): optional, reference to the image key for the device tree - bios (str): optional, reference to the image key for the bios image The QEMUDriver also requires the specification of: - a tool key, this contains the path to the QEMU binary - an image key, the path to the kernel image and optionally the dtb key to specify the build device tree - a path key, this is the path to the rootfs SigrokDriver ~~~~~~~~~~~~ The SigrokDriver uses a SigrokDevice resource to record samples and provides them during test runs. Binds to: sigrok: - `SigrokUSBDevice`_ - `SigrokDevice`_ - `NetworkSigrokUSBDevice`_ Implements: - None yet Arguments: - None The driver can be used in test cases by calling the `capture`, `stop` and `analyze` functions. SigrokPowerDriver ~~~~~~~~~~~~~~~~~ The SigrokPowerDriver uses a `SigrokUSBSerialDevice`_ resource to control a programmable power supply. Binds to: sigrok: - `SigrokUSBSerialDevice`_ - NetworkSigrokUSBSerialDevice Implements: - :any:`PowerProtocol` .. code-block:: yaml SigrokPowerDriver: delay: 3.0 Arguments: - delay (float, default=3.0): delay in seconds between off and on - max_voltage (float): optional, maximum allowed voltage for protection against accidental damage (in volts) - max_current (float): optional, maximum allowed current for protection against accidental damage USBSDMuxDriver ~~~~~~~~~~~~~~ The :any:`USBSDMuxDriver` uses a USBSDMuxDevice resource to control a USB-SD-Mux device via `usbsdmux `_ tool. Implements: - None yet Arguments: - None The driver can be used in test cases by calling the `set_mode()` function with argument being `dut`, `host`, `off`, or `client`. LXAUSBMuxDriver ~~~~~~~~~~~~~~~ The :any:`LXAUSBMuxDriver` uses a LXAUSBMux resource to control a USB-Mux device via the `usbmuxctl `_ tool. Implements: - None yet Arguments: - None The driver can be used in test cases by calling the `set_links()` function with a list containing one or more of "dut-device", "host-dut" and "host-device". Not all combinations can be configured at the same time. USBSDWireDriver ~~~~~~~~~~~~~~~ The :any:`USBSDWireDriver` uses a USBSDWireDevice resource to control a USB-SD-Wire device via `sd-mux-ctrl `_ tool. Implements: - None yet Arguments: - None The driver can be used in test cases by calling the `set_mode()` function with argument being `dut`, `host`, `off`, or `client`. USBVideoDriver ~~~~~~~~~~~~~~ The :any:`USBVideoDriver` is used to show a video stream from a remote USB video camera in a local window. It uses the GStreamer command line utility ``gst-launch`` on both sides to stream the video via an SSH connection to the exporter. Binds to: video: - `USBVideo`_ - `NetworkUSBVideo`_ Implements: - :any:`VideoProtocol` Arguments: - None Although the driver can be used from Python code by calling the `stream()` method, it is currently mainly useful for the ``video`` subcommand of ``labgrid-client``. It supports the `Logitech HD Pro Webcam C920` with the USB ID 046d:082d, but other cameras can be added to `get_qualities()` in ``labgrid/driver/usbvideodriver.py``. USBAudioInputDriver ~~~~~~~~~~~~~~~~~~~ The :any:`USBAudioInputDriver` is used to receive a audio stream from a local or remote USB audio input. It uses the GStreamer command line utility ``gst-launch`` on the sender side to stream the audio to the client. For remote resources, this is done via an SSH connection to the exporter. On the receiver, it either uses ``gst-launch`` for simple playback or `gst-python `_ for more complex cases (such as measuring the current volume level). Binds to: video: - `USBAudioInput`_ - `NetworkUSBAudioInput`_ Implements: - None yet Arguments: - None USBTMCDriver ~~~~~~~~~~~~ The :any:`USBTMCDriver` is used to control a oscilloscope via the USB TMC protocol. Binds to: tmc: - `USBTMC`_ - `NetworkUSBTMC`_ Implements: - None yet Arguments: - None Currently, it can be used by the ``labgrid-client`` ``tmc`` subcommands to show (and save) a screenshot, to show per channel measurements and to execute raw TMC commands. It only supports the `Keysight DSO-X 2000` series (with the USB ID 0957:1798), but more devices can be added by extending `on_activate()` in ``labgrid/driver/usbtmcdriver.py`` and writing a corresponding backend in ``labgrid/driver/usbtmc/``. FlashromDriver ~~~~~~~~~~~~~~ The :any:`FlashromDriver` is used to flash a rom, using the flashrom utility. .. code-block:: yaml FlashromDriver: image: 'foo' images: foo: ../images/image_to_load.raw Binds to: flashrom_resource: - `Flashrom`_ - `NetworkFlashrom`_ Implements: - :any:`BootstrapProtocol` Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target The FlashromDriver allows using the linux util "flashrom" to write directly to a ROM e.g. a NOR SPI flash. The assumption is that the device flashing the DUT e.g. an exporter is wired to the Flash to be flashed. The driver implements the bootstrap protocol. The driver uses tool configuration section and the key: flashrom to determine the path of the installed flashrom utility. FlashScriptDriver ~~~~~~~~~~~~~~~~~ The :any:`FlashScriptDriver` is used to run a custom script or program to flash a device. .. note:: This driver is only intended to be used as a last resort when it is impossible or impractical to use a different driver. .. code-block:: yaml FlashScriptDriver: script: 'foo' args: - '{device.devnode}' images: foo: ../images/flash_device.sh Binds to: flashabledevice_resource: - `USBFlashableDevice`_ - `NetworkUSBFlashableDevice`_ Implements: - None (yet) Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target - args (list): optional, list of arguments for flash script execution The FlashScriptDriver allows running arbitrary programs to flash a device. Some SoC or devices may require custom, one-off, or proprietary programs to flash. A target image can be bundled with these programs using a tool like `makeself `_, which can then be executed by labgrid to flash the device using this driver. Additional arguments may be passed with the ``args`` parameter. These arguments will be expanded as `Python format strings `_ with the following keys: HTTPVideoDriver ~~~~~~~~~~~~~~~ The :any:`HTTPVideoDriver` is used to show a video stream over HTTP or HTTPS from a remote IP video source in a local window. Binds to: video: - `HTTPVideoStream`_ Implements: - :any:`VideoProtocol` Although the driver can be used from Python code by calling the `stream()` method, it is currently mainly useful for the ``video`` subcommand of ``labgrid-client``. ========== ========================================================= Key Description ========== ========================================================= ``device`` The :any:`Resource` bound to the driver ``file`` The :any:`ManagedFile` used to track the flashable script ========== ========================================================= Properties of these keys can be selected using the Python format string syntax, e.g. ``{device.devnode}`` to select the device node path of :any:`USBFlashableDevice` XenaDriver ~~~~~~~~~~ The XenaDriver allows to use Xena networking test equipment. Using the `xenavalkyrie` library a full API to control the tester is available. Binds to: xena_manager: - `XenaManager`_ The driver is supposed to work with all Xena products from the "Valkyrie Layer 2-3 Test platform" Currently tested on a `XenaCompact` chassis equipped with a `1 GE test module`. DockerDriver ~~~~~~~~~~~~ A DockerDriver binds to a `DockerDaemon` and is used to create and control one docker container. | The driver uses the docker python module to interact with the docker daemon. | For more information on the parameters see: | https://docker-py.readthedocs.io/en/stable/containers.html#container-objects Binds to: docker_daemon: - `DockerDaemon`_ Implements: - :any:`PowerProtocol` .. code-block:: yaml DockerDriver: image_uri: "rastasheep/ubuntu-sshd:16.04" container_name: "ubuntu-lg-example" host_config: {"network_mode":"bridge"} network_services: [{"port":22,"username":"root","password":"root"}] Arguments: - image_uri (str): identifier of the docker image to use (may have a tag suffix) - command (str): command to run in the container (optional, depends on image) - volumes (list): list to configure volumes mounted inside the container (optional) - container_name (str): name of the container - environment (list): list of environment variables (optional) - host_config (dict): dictionary of host configurations - network_services (list): dictionaries that describe individual `NetworkService`_ instances that come alive when the container is created. The "address" argument which `NetworkService`_ also requires will be derived automatically upon container creation. LXAIOBusPIODriver ~~~~~~~~~~~~~~~~~ An LXAIOBusPIODriver binds to a single `LXAIOBusPIO` to toggle and read the PIO states. Binds to: pio: - `LXAIOBusPIO`_ - `NetworkLXAIOBusPIO`_ .. code-block:: yaml LXAIOBusPIODriver: {} Implements: - :any:`DigitalOutputProtocol` Arguments: - None PyVISADriver ~~~~~~~~~~~~ The PyVISADriver uses a PyVISADevice resource to control test equipment manageable by PyVISA. Binds to: pyvisa_resource: - `PyVISADevice`_ Implements: - None yet Arguments: - None NetworkInterfaceDriver ~~~~~~~~~~~~~~~~~~~~~~ This driver allows controlling a network interface (such as Ethernet or WiFi) on the exporter using NetworkManager. The configuration is based on dictionaries with contents similar to NM's connection files in INI-format. Currently basic wired and wireless configuration options have been tested. To use it, `PyGObject `_ must be installed (on the same system as the network interface). For Debian, the necessary packages are `python3-gi` and `gir1.2-nm-1.0`. It supports: - static and DHCP address configuration - WiFi client or AP - connection sharing (DHCP server with NAT) - listing DHCP leases (if the client has sufficient permissions) Binds to: iface: - `USBNetworkInterface`_ - `RemoteNetworkInterface`_ Implements: - None yet Arguments: - None Strategies ---------- Strategies are used to ensure that the device is in a certain state during a test. Such a state could be the bootloader or a booted Linux kernel with shell. BareboxStrategy ~~~~~~~~~~~~~~~ A BareboxStrategy has four states: - unknown - off - barebox - shell to transition to the shell state: :: t = get_target("main") s = BareboxStrategy(t) s.transition("shell") this command would transition from the bootloader into a Linux shell and activate the shelldriver. ShellStrategy ~~~~~~~~~~~~~ A ShellStrategy has three states: - unknown - off - shell to transition to the shell state: :: t = get_target("main") s = ShellStrategy(t) s.transition("shell") this command would transition directly into a Linux shell and activate the shelldriver. UBootStrategy ~~~~~~~~~~~~~ A UBootStrategy has four states: - unknown - off - uboot - shell to transition to the shell state: :: t = get_target("main") s = UBootStrategy(t) s.transition("shell") this command would transition from the bootloader into a Linux shell and activate the shelldriver. DockerShellStrategy ~~~~~~~~~~~~~~~~~~~ A DockerShellStrategy has three states: - unknown - off - shell To transition to the shell state: :: t = get_target("main") s = DockerShellStrategy(t) s.transition("shell") These commands would activate the docker driver which creates and starts a docker container. This will subsequently make `NetworkService`_ instance(s) available which can be used for e.g. SSH access. Note: Transitioning to the "off" state will make any `NetworkService`_ instance(s) unresponsive - which may in turn invalidate SSH connection sharing. Therefore, during automated test suites, refrain from transitioning to the "off" state. Reporters --------- StepReporter ~~~~~~~~~~~~ The StepReporter outputs individual labgrid steps to `STDOUT`. :: from labgrid.stepreporter import StepReporter StepReporter.start() The Reporter can be stopped with a call to the stop function: :: from labgrid.stepreporter import StepReporter StepReporter.stop() Stopping the StepReporter if it has not been started will raise an AssertionError, as will starting an already started StepReporter. ColoredStepReporter ~~~~~~~~~~~~~~~~~~~ The ColoredStepReporter inherits from the StepReporter. The output is colored using ANSI color code sequences. ConsoleLoggingReporter ~~~~~~~~~~~~~~~~~~~~~~ The ConsoleLoggingReporter outputs read calls from the console transports into files. It takes the path as a parameter. :: from labgrid.consoleloggingreporter import ConsoleLoggingReporter ConsoleLoggingReporter.start(".") The Reporter can be stopped with a call to the stop function: :: from labgrid.consoleloggingreporter import ConsoleLoggingReporter ConsoleLoggingReporter.stop() Stopping the ConsoleLoggingReporter if it has not been started will raise an AssertionError, as will starting an already started StepReporter. Environment Configuration ------------------------- The environment configuration for a test environment consists of a YAML file which contains targets, drivers and resources. .. note:: The order is important here: Objects are instantiated in the order they appear in the YAML file, so if drivers depend on other drivers or resources which are only instantiated later, loading the environment will fail. The skeleton for an environment consists of: .. code-block:: yaml targets: : resources: : : drivers: : : {} # no parameters for driver-2 features: - : resources: drivers: options: : options: : features: - paths: : images: : tools: : imports: - - If you have a single target in your environment, name it "main", as the ``get_target`` function defaults to "main". All the resources and drivers in this chapter have a YAML example snippet which can simply be added (at the correct indentation level, one level deeper) to the environment configuration. If you want to use multiple drivers of the same type, the resources and drivers need to be lists, e.g: .. code-block:: yaml resources: RawSerialPort: port: '/dev/ttyS1' drivers: SerialDriver: {} becomes: .. code-block:: yaml resources: - RawSerialPort: port: '/dev/ttyS1' - RawSerialPort: port: '/dev/ttyS2' drivers: - SerialDriver: {} - SerialDriver: {} This configuration doesn't specifiy which :any:`RawSerialPort` to use for each :any:`SerialDriver`, so it will cause an exception when instantiating the :any:`Target`. To bind the correct driver to the correct resource, explicit ``name`` and ``bindings`` properties are used: .. code-block:: yaml resources: - RawSerialPort: name: 'foo' port: '/dev/ttyS1' - RawSerialPort: name: 'bar' port: '/dev/ttyS2' drivers: - SerialDriver: name: 'foo_driver' bindings: port: 'foo' - SerialDriver: name: 'bar_driver' bindings: port: 'bar' The property name for the binding (e.g. `port` in the example above) is documented for each individual driver in this chapter. The YAML configuration file also supports templating for some substitutions, these are: - LG_* variables, are replaced with their respective LG_* environment variable - BASE is substituted with the base directory of the YAML file. As an example: .. code-block:: yaml targets: main: resources: RemotePlace: name: !template $LG_PLACE tools: qemu_bin: !template "$BASE/bin/qemu-bin" would resolve the qemu_bin path relative to the BASE dir of the YAML file and try to use the RemotePlace with the name set in the LG_PLACE environment variable. See the :ref:`labgrid-device-config` man page for documentation on the top-level ``options``, ``images``, ``tools``, and ``examples`` keys in the environment configuration. Exporter Configuration ---------------------- The exporter is configured by using a YAML file (with a syntax similar to the environment configs used for pytest) or by instantiating the :any:`Environment` object. To configure the exporter, you need to define one or more `resource groups`, each containing one or more `resources`. The syntax for exported resource names is ``///``, which allows the exporter to group resources for various usage scenarios, e.g. all resources of a specific place or for a specific test setup. For information on how the exporter fits into the rest of labgrid, see :any:`remote-resources-and-places`. The ```` part can be specified on the :ref:`labgrid-exporter` command line, and defaults to the hostname of the exporter. The basic structure of an exporter configuration file is: .. code-block:: yaml : : : : : By default, the class name is inferred from the resource name, and `` will be passed to its constructor. For USB resources, you will most likely want to use :ref:`udev-matching` here. As a simple example, here is one group called *usb-hub-in-rack12* containing a single :any:`USBSerialPort` resource (using udev matching), which will be exported as `exportername/usb-hub-in-rack12/NetworkSerialPort/USBSerialPort`: .. code-block:: yaml usb-hub-in-rack12: USBSerialPort: match: '@sys_name': '3-1.3' To export multiple resources of the same class in the same group, you can choose a unique resource name, and then use the ``cls`` parameter to specify the class name instead (which will not be passed as a parameter to the class constructor). In this next example we will export one :any:`USBSerialPort` as `exportername/usb-hub-in-rack12/NetworkSerialPort/console-main`, and another :any:`USBSerialPort` as `exportername/usb-hub-in-rack12/NetworkSerialPort/console-secondary`: .. code-block:: yaml usb-hub-in-rack12: console-main: cls: USBSerialPort match: '@sys_name': '3-1.3' console-secondary: cls: USBSerialPort match: '@sys_name': '3-1.4' Note that you could also split the resources up into distinct groups instead to achieve the same effect: .. code-block:: yaml usb-hub-in-rack12-port3: USBSerialPort: match: '@sys_name': '3-1.3' usb-hub-in-rack12-port4: USBSerialPort: match: '@sys_name': '3-1.4' Templating ~~~~~~~~~~ To reduce the amount of repeated declarations when many similar resources need to be exported, the `Jinja2 template engine `_ is used as a preprocessor for the configuration file: .. code-block:: yaml ## Iterate from group 1001 to 1016 # for idx in range(1, 17) {{ 1000 + idx }}: NetworkSerialPort: {host: rl1, port: {{ 4000 + idx }}} NetworkPowerPort: # if 1 <= idx <= 8 {model: apc, host: apc1, index: {{ idx }}} # elif 9 <= idx <= 12 {model: netio, host: netio4, index: {{ idx - 8 }}} # elif 13 <= idx <= 16 {model: netio, host: netio5, index: {{ idx - 12 }}} # endif # endfor Use ``#`` for line statements (like the for loops in the example) and ``##`` for line comments. Statements like ``{{ 4000 + idx }}`` are expanded based on variables in the Jinja2 template. The template processing also supports use of OS environment variables, using something like `{{ env['FOOBAR'] }}` to insert the content of environment variable `FOOBAR`. labgrid-0.4.1/doc/design_decisions.rst000066400000000000000000000031471415016572500177430ustar00rootroot00000000000000Design Decisions ================ This document outlines the design decisions influencing the development of labgrid. Out of Scope ------------ Out of scope for labgrid are: Integrated Build System ~~~~~~~~~~~~~~~~~~~~~~~ In contrast to some other tools, labgrid explicitly has no support for building target binaries or images. Our reasons for this are: - Several full-featured build systems already exist and work well. - We want to test unmodified images produced by any build system (OE/Yocto, PTXdist, Buildroot, Debian, …). Test Infrastructure ~~~~~~~~~~~~~~~~~~~ labgrid does not include a test framework. The main reason is that with `pytest `_ we already have a test framework which: - makes it easy to write tests - reduces boilerplate code with flexible fixtures - is easy to extend and has many available plugins - allows using any Python library for creating inputs or processing outputs - supports test report generation Furthermore, the hardware control functionality needed for testing is also very useful during development, provisioning and other areas, so we don't want to hide that behind another test framework. In Scope -------- - usable as a library for hardware provisioning - device control via: - serial console - SSH - file management - power and reset - emulation of external services: - USB stick emulation - external update services (Hawkbit) - bootstrap services: - fastboot - imxusbloader Further Goals ------------- - tests should be equivalent for workstations and servers - discoverability of available boards - distributed board access labgrid-0.4.1/doc/development.rst000066400000000000000000000557521415016572500167650ustar00rootroot00000000000000Development ============ The first step is to install labgrid into a local virtualenv. Installation ------------ Clone the git repository: .. code-block:: bash git clone https://github.com/labgrid-project/labgrid && cd labgrid Create and activate a virtualenv for labgrid: .. code-block:: bash virtualenv -p python3 venv source venv/bin/activate Install required dependencies: .. code-block:: bash sudo apt install python3-dev libow-dev libsnappy-dev Install the development requirements: .. code-block:: bash pip install -r dev-requirements.txt Install labgrid into the virtualenv in editable mode: .. code-block:: bash pip install -e . Tests can now be run via: .. code-block:: bash python -m pytest --lg-env Writing a Driver ---------------- To develop a new driver for labgrid, you need to decide which protocol to implement, or implement your own protocol. If you are unsure about a new protocol's API, just use the driver directly from the client code, as deciding on a good API will be much easier when another similar driver is added. labgrid uses the `attrs library `_ for internal classes. First of all import attr, the protocol and the common driver class into your new driver file. :: import attr from labgrid.driver.common import Driver from labgrid.protocol import ConsoleProtocol Next, define your new class and list the protocols as subclasses of the new driver class. Try to avoid subclassing existing other drivers, as this limits the flexibility provided by connecting drivers and resources on a given target at runtime. :: import attr from labgrid.driver.common import Driver from labgrid.protocol import ConsoleProtocol @attr.s(eq=False) class ExampleDriver(Driver, ConsoleProtocol): pass The ConsoleExpectMixin is a mixin class to add expect functionality to any class supporting the :any:`ConsoleProtocol` and has to be the first item in the subclass list. Using the mixin class allows sharing common code, which would otherwise need to be added into multiple drivers. :: import attr from labgrid.driver.common import Driver from labgrid.driver.consoleexpectmixin import ConsoleExpectMixin from labgrid.protocol import ConsoleProtocol @attr.s(eq=False) class ExampleDriver(ConsoleExpectMixin, Driver, ConsoleProtocol) pass Additionally the driver needs to be registered with the :any:`target_factory` and provide a bindings dictionary, so that the :any:`Target` can resolve dependencies on other drivers or resources. :: import attr from labgrid.factory import target_factory from labgrid.driver.common import Driver from labgrid.driver.consoleexpectmixin import ConsoleExpectMixin from labgrid.protocol import ConsoleProtocol @target_factory.reg_driver @attr.s(eq=False) class ExampleDriver(ConsoleExpectMixin, Driver, ConsoleProtocol) bindings = { "port": SerialPort } pass The listed resource :code:`SerialPort` will be bound to :code:`self.port`, making it usable in the class. Checks are performed that the target which the driver binds to has a SerialPort, otherwise an error will be raised. If your driver can support alternative resources, you can use a set of classes instead of a single class:: bindings = { "port": {SerialPort, NetworkSerialPort}} Optional bindings can be declared by including ``None`` in the set:: bindings = { "port": {SerialPort, NetworkSerialPort, None}} If you need to do something during instantiation, you need to add a :code:`__attrs_post_init__` method (instead of the usual :code:`__init__` used for non-attr-classes). The minimum requirement is a call to :code:`super().__attrs_post_init__()`. :: import attr from labgrid.factory import target_factory from labgrid.driver.common import Driver from labgrid.driver.consoleexpectmixin import ConsoleExpectMixin from labgrid.protocol import ConsoleProtocol @target_factory.reg_driver @attr.s(eq=False) class ExampleDriver(ConsoleExpectMixin, Driver, ConsoleProtocol) bindings = { "port": SerialPort } def __attrs_post_init__(self): super().__attrs_post_init__() All that's left now is to implement the functionality described by the used protocol, by using the API of the bound drivers and resources. Writing a Resource ------------------- To add a new resource to labgrid, we import attr into our new resource file. Additionally we need the :any:`target_factory` and the common ``Resource`` class. :: import attr from labgrid.factory import target_factory from labgrid.driver.common import Resource Next we add our own resource with the :code:`Resource` parent class and register it with the :any:`target_factory`. :: import attr from labgrid.factory import target_factory from labgrid.driver.common import Resource @target_factory.reg_resource @attr.s(eq=False) class ExampleResource(Resource): pass All that is left now is to add attributes via :code:`attr.ib()` member variables. :: import attr from labgrid.factory import target_factory from labgrid.driver.common import Resource @target_factory.reg_resource @attr.s(eq=False) class ExampleResource(Resource): examplevar1 = attr.ib() examplevar2 = attr.ib() The :code:`attr.ib()` style of member definition also supports defaults and validators, see the `attrs documentation `_. Writing a Strategy ------------------ labgrid offers only basic strategies, for complex use cases a customized strategy is required. Start by creating a strategy skeleton: :: import enum import attr from labgrid.step import step from labgrid.driver import Strategy, StrategyError from labgrid.factory import target_factory class Status(enum.Enum): unknown = 0 @target_factory.reg_driver class MyStrategy(Strategy): bindings = { } status = attr.ib(default=Status.unknown) @step() def transition(self, status, *, step): if not isinstance(status, Status): status = Status[status] if status == Status.unknown: raise StrategyError(f"can not transition to {status}") elif status == self.status: step.skip("nothing to do") return # nothing to do else: raise StrategyError( f"no transition found from {self.status,} to {status}" ) self.status = status The ``bindings`` variable needs to declare the drivers necessary for the strategy, usually one for power, bootloader and shell. It is possible to reference drivers via their protocol, e.g. ``ConsoleProtocol``. Note that drivers which implement multiple protocols must not be referenced multiple times via different protocols. The ``Status`` class needs to be extended to cover the states of your strategy, then for each state an ``elif`` entry in the transition function needs to be added. Lets take a look at the builtin `BareboxStrategy`. The Status enum for the BareboxStrategy: :: class Status(enum.Enum): unknown = 0 off = 1 barebox = 2 shell = 3 defines 3 custom states and the `unknown` state as the start point. These three states are handled in the transition function: :: elif status == Status.off: self.target.deactivate(self.barebox) self.target.deactivate(self.shell) self.target.activate(self.power) self.power.off() elif status == Status.barebox: self.transition(Status.off) # cycle power self.power.cycle() # interrupt barebox self.target.activate(self.barebox) elif status == Status.shell: # tansition to barebox self.transition(Status.barebox) self.barebox.boot("") self.barebox.await_boot() self.target.activate(self.shell) Here the `barebox` state simply cycles the board and activates the driver, while the `shell` state uses the barebox state to cycle the board and than boot the linux kernel. The `off` states switch the power off. Tips for Writing and Debugging Tests ------------------------------------ Live-Reading Console Output ~~~~~~~~~~~~~~~~~~~~~~~~~~~ When starting labgrid with ``--lg-log`` option, it will dump the input from the serial driver to a file in specified directory:: $ pytest .. --lg-log=logdir test-dir/ This can help understanding what happened and why it happened. However, when debugging tests, it might be more helpful to get a live impression of what is going on. For this, you can use ``tail -f`` to read the content written to the log file as if you would be connected to the devices serial console (except that it is read-only):: $ tail -f logdir/console_main # for the 'main' target For getting information about timing, the ``annotate-output`` command turned out to be quite helpful. On Debian it comes with the ``devscripts`` package and you can install it with:: $ apt-get install devscripts To use it, run:: $ annotate-output tail -f logdir/console_main This will print your system time before each line, allowing you to both see relative delays between steps in your tests as well as absolute timing of things happening in your test environment. Dealing With Kernel Log Verbosity ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ For testing your Linux system it can be quite annoying if the kernel outputs verbosely to the console you use for testing. Note that a too verbose kernel can break tests as kernel logs will pollute the expected command outputs making it unreadable for labgrid regular expressions. However, as the shell driver and most of the tests will depend on seeing console output of what is going on during boot, we cannot turn off kernel logging completely. .. note:: The labgrid ShellDriver itself attempts to disable console printing by calling ``dmesg -n 1`` as soon as having a logged-in shell. However, this may be too late for reliably capturing the initial login and shell prompt. A proper point in time for disabling kernel output to the console is when systemd starts. To achieve this, make use of the ``systemd-sysctl.service`` that uses ``/etc/sysctl.d/`` to configure kernel parameters. This way, the kernel log level can be set to 'error' by the time of service execution with a config file like:: $ cat /etc/sysctl.d/20-quiet-printk.conf kernel.printk = 3 If the *initial* kernel logging is still too high, one could also reduce this. But note that for the standard configuration of the labgrid barebox and uboot drivers, we need to catch the ``Linux version ...`` line to detect we successfully left the bootloader (the ``bootstring`` attribute). This line is only printed when having at least kernel log level 6 (notice) enabled:: loglevel=6 Graph Strategies ---------------- .. warning:: This feature is experimental and brings much complexity to your project. GraphStrategies are made for more complex strategies, with multiple, on each other depending, states. A GraphStrategy graph has to be a directed graph with one root state. Using a GraphStrategy makes only sense if you have board states that are reachable by different ways. In this case GraphStrategies reduce state duplication. Example ~~~~~~~ .. code-block:: python :caption: conftest.py from labgrid.strategy import GraphStrategy class TestStrategy(GraphStrategy): def state_unknown(self): pass @GraphStrategy.depends('unknown') def state_boot_via_nand(self): pass @GraphStrategy.depends('unknown') def state_boot_via_nfs(self): pass @GraphStrategy.depends('boot_via_nand', 'boot_via_nfs') def state_barebox(self): pass @GraphStrategy.depends('barebox') def state_linux_shell(self): pass The class can also render a graph as PNG (using GraphViz): .. code-block:: yaml :caption: test.yaml targets: main: resources: {} drivers: {} .. code-block:: python :caption: render_teststrategy.py from labgrid.environment import Environment from conftest import TestStrategy env = Environment('test.yaml') strategy = TestStrategy(env.get_target(), "strategy name") strategy.transition('barebox', via=['boot_via_nfs']) # returned: ['unknown', 'boot_via_nfs', 'barebox'] strategy.graph.render("teststrategy-via-nfs") # returned: 'teststrategy-via-nfs.png' strategy.transition('barebox', via=['boot_via_nand']) # returned: ['unknown', 'boot_via_nand', 'barebox'] strategy.graph.render("teststrategy-via-nand") # returned: 'teststrategy-via-nand.png' .. figure:: res/graphstrategy-via-nfs.png TestStrategy transitioned to 'barebox' via 'boot_via_nfs' .. figure:: res/graphstrategy-via-nand.png TestStrategy transitioned to 'barebox' via 'boot_via_nand' State ~~~~~ Every graph node describes a board state and how to reach it, A state has to be a class method following this prototype: ``def state_$STATENAME(self):``. A state may not call ``transition()`` in its state definition. Dependency ~~~~~~~~~~ Every state, but the root state, can depend on other States, If a state has multiple dependencies, not all of them, but one, have to be reached before running the current state. When no via is used during a transition the order of the given dependencies decides which one gets called, where the first one has the highest priority and the last one the lowest. Dependencies are represented by graph edges. Root State ~~~~~~~~~~ Every GraphStrategy has to has to define exactly one root state. The root state defines the start of the graph and therefore the start of every transition. A state becomes a root state if it has no dependencies. Transition ~~~~~~~~~~ A transition describes a path, or a part of a path, through a GraphStrategy graph. Every State in the graph has a auto generated default path starting from the root state. So using the given example, the GraphStrategy would call the states `unknown`, `boot_via_nand`, `barebox`, and `linux_shell` in this order if ``transition('linux_shell')`` would be called. The GraphStrategy would prefer `boot_via_nand` over `boot_via_nfs` because `boot_via_nand` is mentioned before `boot_via_nfs` in the dependencies of `barebox`. If you want to reach via `boot_via_nfs` the call would look like this: ``transition('linux_shell', via='boot_via_nfs')``. A transition can be incremental. If we trigger a transition with ``transition('barebox')`` first, the states `unknown`, `boot_via_nand` and `barebox` will be called in this order. If we trigger a transition ``transition('linux_shell')`` afterwards only `linux_shell` gets called. This happens because `linux_shell` is reachable from `barebox` and the Strategy holds state of the last walked path. But there is a catch! The second, incremental path must be *fully* incremental to the previous path! For example: Lets say we reached `barebox` via `boot_via_nfs`, (``transition('barebox', via='boot_via_nfs')``). If we trigger ``transition('linux_shell')`` afterwards the GraphStrategy would compare the last path `'unknown', 'boot_via_nfs', 'barebox'` with the default path to `linux_shell` which would be `'unknown', 'boot_via_nand', 'barebox', 'linux_shell'`, and decides the path is not fully incremental and starts over by the root state. If we had given the second transition `boot_via_nfs` like in the first transition the paths had been incremental. SSHManager ---------- labgrid provides a SSHManager to allow connection reuse with control sockets. To use the SSHManager in your code, import it from :any:`labgrid.util.ssh`: .. code-block:: python from labgrid.util.ssh import sshmanager you can now request or remove forwards: .. code-block:: python from labgrid.util.ssh import sshmanager localport = sshmanager.request_forward('somehost', 3000) sshmanager.remove_forward('somehost', 3000) or get and put files: .. code-block:: python from labgrid.util.ssh import sshmanager sshmanager.put_file('somehost', '/path/to/local/file', '/path/to/remote/file') .. note:: The SSHManager will reuse existing Control Sockets and set up a keepalive loop to prevent timeouts of the socket during tests. ManagedFile ----------- While the `SSHManager` exposes a lower level interface to use SSH Connections, the ManagedFile provides a higher level interface for file upload to another host. It is meant to be used in conjunction with a remote resource, and store the file on the remote host with the following pattern: .. code-block:: bash /tmp/labgrid-// Additionally it provides `get_remote_path()` to retrieve the complete file path, to easily employ it for driver implementations. To use it in conjunction with a `Resource` and a file: .. code-block:: python from labgrid.util.managedfile import ManagedFile mf = ManagedFile(, ) mf.sync_to_resource() path = mf.get_remote_path() Unless constructed with `ManagedFile(..., detect_nfs=False)`, ManagedFile employs the following heuristic to check if a file is stored on a NFS share available both locally and remotely via the same path: - check if GNU coreutils stat(1) with option --format exists on local and remote system - check if inode number, total size and birth/modification timestamps match on local and remote system If this is the case the actual file transfer in ``sync_to_resource`` is skipped. ProxyManager ------------ The proxymanager is used to open connections across proxies via an attribute in the resource. This allows gated testing networks by always using the exporter as an SSH gateway to proxy the connections using SSH Forwarding. Currently this is used in the `SerialDriver` for proxy connections. Usage: .. code-block:: python from labgrid.util.proxy import proxymanager proxymanager.get_host_and_port() .. _contributing: Contributing ------------ Thank you for thinking about contributing to labgrid! Some different backgrounds and use-cases are essential for making labgrid work well for all users. The following should help you with submitting your changes, but don't let these guidelines keep you from opening a pull request. If in doubt, we'd prefer to see the code earlier as a work-in-progress PR and help you with the submission process. Workflow ~~~~~~~~ - Changes should be submitted via a `GitHub pull request `_. - Try to limit each commit to a single conceptual change. - Add a signed-of-by line to your commits according to the `Developer's Certificate of Origin` (see below). - Check that the tests still work before submitting the pull request. Also check the CI's feedback on the pull request after submission. - When adding new drivers or resources, please also add the corresponding documentation and test code. - If your change affects backward compatibility, describe the necessary changes in the commit message and update the examples where needed. Code ~~~~ - Follow the :pep:`8` style. - Use attr.ib attributes for public attributes of your drivers and resources. - Use `isort `_ to sort the import statements. Documentation ~~~~~~~~~~~~~ - Use `semantic linefeeds `_ in .rst files. Building the documentation ++++++++++++++++++++++++++ When contributing to documentation it's practical to be able to build it also locally. .. code-block:: bash # Optional - install requirements in a virtualenv virtualenv -p python3 labgrid-venv source labgrid-venv/bin/activate git clone https://github.com/labgrid-project/labgrid.git cd labgrid pip install -e . pip install -r doc-requirements.txt cd doc make html Once the build is done you can see the results with ``firefox .build/html/index.html``. If for whatever the reason you need to rebuild everything from scratch, use ``make SPHINXOPTS="-a -E" html``. Run Tests ~~~~~~~~~ .. code-block:: bash $ tox -r Developer's Certificate of Origin ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ labgrid uses the `Developer's Certificate of Origin 1.1 `_ with the same `process `_ as used for the Linux kernel: Developer's Certificate of Origin 1.1 By making a contribution to this project, I certify that: (a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or (b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or (c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it. (d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved. Then you just add a line (using ``git commit -s``) saying: Signed-off-by: Random J Developer using your real name (sorry, no pseudonyms or anonymous contributions). .. _ideas: Ideas ----- .. please keep these sorted alphabetically Driver Preemption ~~~~~~~~~~~~~~~~~ To allow better handling of unexpected reboots or crashes, inactive Drivers could register callbacks on their providers (for example the BareboxDriver it's ConsoleProtocol). These callbacks would look for indications that the Target has changed state unexpectedly (by looking for the bootloader startup messages, in this case). The inactive Driver could then cause a preemption and would be activated. The current caller of the originally active driver would be notified via an exception. Step Tracing ~~~~~~~~~~~~ The Step infrastructure already collects timing and nesting information on executed commands, but is currently only used in the pytest plugin or via the standalone StepReporter. By writing these events to a file (or sqlite database) as a trace, we can collect data over multiple runs for later analysis. This would become more useful by passing recognized events (stack traces, crashes, ...) and benchmark results via the Step infrastructure. CommandProtocol Support for Background Processes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently the CommandProtocol does not support long running processes well. An implementation should start a new process, return a handle and forbid running other processes in the foreground. The handle can be used to retrieve output from a command. labgrid-0.4.1/doc/getting_started.rst000066400000000000000000000366531415016572500176310ustar00rootroot00000000000000Getting Started =============== This section of the manual contains introductory tutorials for installing labgrid, running your first test and setting up the distributed infrastructure. For an overview about the basic design and components of `labgrid`, read the :ref:`overview` first. Installation ------------ Depending on your distribution you need some dependencies. On Debian stretch and buster these usually are: .. code-block:: bash $ apt-get install python3 python3-virtualenv python3-pip python3-setuptools virtualenv In many cases, the easiest way is to install labgrid into a virtualenv: .. code-block:: bash $ virtualenv -p python3 labgrid-venv $ source labgrid-venv/bin/activate Start installing labgrid by cloning the repository and installing the requirements from the `requirements.txt` file: .. code-block:: bash labgrid-venv $ git clone https://github.com/labgrid-project/labgrid labgrid-venv $ cd labgrid && pip install -r requirements.txt labgrid-venv $ python3 setup.py install .. note:: Previous documentation recommended the installation as via pip (`pip3 install labgrid`). This lead to broken installations due to unexpected incompatibilities with new releases of the dependencies. Consequently we now recommend using pinned versions from the `requirements.txt` file for most use cases. labgrid also supports the installation as a library via pip, but we only test against library versions specified in the requirements.txt file. Thus when installing directly from pip you have to test compatibility yourself. .. note:: If you are installing via pip and intend to use Serial over IP (RFC2217), it is highly recommended to uninstall pyserial after installation and replace it with the pyserial version from the labgrid project: .. code-block:: bash $ pip uninstall pyserial $ pip install https://github.com/labgrid-project/pyserial/archive/v3.4.0.1.zip#egg=pyserial This pyserial version has two fixes for an Issue we found with Serial over IP multiplexers. Additionally it reduces the Serial over IP traffic considerably since the port is not reconfigured when labgrid changes the timeout (which is done inside the library a lot). Test your installation by running: .. code-block:: bash labgrid-venv $ labgrid-client --help usage: labgrid-client [-h] [-x URL] [-c CONFIG] [-p PLACE] [-d] COMMAND ... ... If the help for labgrid-client does not show up, open an `Issue `_. If everything was successful so far, proceed to the next section: Optional Requirements ~~~~~~~~~~~~~~~~~~~~~ labgrid provides optional features which are not included in the default `requirements.txt`. The tested library version for each feature is included in a seperate requirements file. An example for snmp support is: .. code-block:: bash labgrid-venv $ pip install -r snmp-requirements.txt Onewire +++++++ Onewire support requires the `libow` library with headers, installable on debian via the `libow-dev` package. Use the `onewire-requirements.txt` file to install the correct onewire library version in addition to the normal installation. SNMP ++++ SNMP support requires to additional packages, `pysnmp` and `pysnmpmibs`. They are included in the `snmp-requirements.txt` file. Modbus ++++++ Modbus support requires an additional package `pyModbusTCP`. It is included in the `modbus-requirements.txt` file. Running Your First Test ----------------------- Start by copying the initial example: .. code-block:: bash $ mkdir ../first_test/ $ cp examples/shell/* ../first_test/ $ cd ../first_test/ Connect your embedded board (raspberry pi, riotboard, …) to your computer and adjust the ``port`` parameter of the ``RawSerialPort`` resource and ``username`` and ``password`` of the ShellDriver driver in ``local.yaml``: .. code-block:: yaml targets: main: resources: RawSerialPort: port: "/dev/ttyUSB0" drivers: ManualPowerDriver: name: "example" SerialDriver: {} ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' username: 'root' You can check which device name gets assigned to your USB-Serial converter by unplugging the converter, running ``dmesg -w`` and plugging it back in. Boot up your board (manually) and run your first test: .. code-block:: bash labgrid-venv $ pytest --lg-env local.yaml test_shell.py It should return successfully, in case it does not, open an `Issue `_. .. _remote-getting-started: Setting Up the Distributed Infrastructure ----------------------------------------- The labgrid :ref:`distributed infrastructure ` consists of three components: #. :ref:`overview-coordinator` #. :ref:`overview-exporter` #. :ref:`overview-client` The system needs at least one coordinator and exporter, these can run on the same machine. The client is used to access functionality provided by an exporter. Over the course of this tutorial we will set up a coordinator and exporter, and learn how to access the exporter via the client. .. _remote-getting-started-coordinator: Coordinator ~~~~~~~~~~~ To start the coordinator, we will download the labgrid repository, create an extra virtualenv and install the dependencies via the requirements file. .. code-block:: bash $ sudo apt install libsnappy-dev $ virtualenv -p python3 crossbar-venv $ source crossbar-venv/bin/activate crossbar-venv $ git clone https://github.com/labgrid-project/labgrid crossbar-venv $ cd labgrid && pip install -r crossbar-requirements.txt crossbar-venv $ python setup.py install All necessary dependencies should be installed now, we can start the coordinator by running ``crossbar start`` inside of the repository. .. note:: This is possible because the labgrid repository contains the crossbar configuration the coordinator in the ``.crossbar`` folder. crossbar is a network messaging framework for building distributed applications, which labgrid plugs into. .. note:: For long running deployments, you should copy and customize the ``.crossbar/config.yaml`` file for your use case. This includes setting a different ``workdir`` and may include changing the running port. Exporter ~~~~~~~~ The exporter needs a configuration file written in YAML syntax, listing the resources to be exported from the local machine. The config file contains one or more named resource groups. Each group contains one or more resource declarations and optionally a location string (see the :doc:`configuration reference ` for details). For example, to export a ``USBSerialPort`` with ``ID_SERIAL_SHORT`` of ``ID23421JLK``, the group name `example-group` and the location `example-location`: .. code-block:: yaml example-group: location: example-location USBSerialPort: ID_SERIAL_SHORT: ID23421JLK .. note:: Use ``labgrid-suggest`` to generate the YAML snippets for most exportable resources. The exporter can now be started by running: .. code-block:: bash labgrid-venv $ labgrid-exporter configuration.yaml Additional groups and resources can be added: .. code-block:: yaml example-group: location: example-location USBSerialPort: match: 'ID_SERIAL_SHORT': 'P-00-00682' speed: 115200 NetworkPowerPort: model: netio host: netio1 index: 3 example-group-2: USBSerialPort: ID_SERIAL_SHORT: KSLAH2341J Restart the exporter to activate the new configuration. .. Attention:: The `ManagedFile` will create temporary uploads in the exporters ``/var/cache/labgrid`` directory. This directory needs to be created manually and should allow write access for users. The ``/contrib`` directory in the labgrid-project contains a tmpfiles configuration example to automatically create and clean the directory. It is also highly recommended to enable ``fs.protected_regular=1`` and ``fs.protected_fifos=1`` for kernels>=4.19, to protect the users from opening files not owned by them in world writeable sticky directories. For more information see `this kernel commit`_. .. _`this kernel commit`: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=30aba6656f Client ~~~~~~ Finally we can test the client functionality, run: .. code-block:: bash labgrid-venv $ labgrid-client resources kiwi/example-group/NetworkPowerPort kiwi/example-group/NetworkSerialPort kiwi/example-group-2/NetworkSerialPort You can see the available resources listed by the coordinator. The groups `example-group` and `example-group-2` should be available there. To show more details on the exported resources, use ``-v`` (or ``-vv``): .. code-block:: bash labgrid-venv $ labgrid-client -v resources Exporter 'kiwi': Group 'example-group' (kiwi/example-group/*): Resource 'NetworkPowerPort' (kiwi/example-group/NetworkPowerPort[/NetworkPowerPort]): {'acquired': None, 'avail': True, 'cls': 'NetworkPowerPort', 'params': {'host': 'netio1', 'index': 3, 'model': 'netio'}} ... You can now add a place with: .. code-block:: bash labgrid-venv $ labgrid-client --place example-place create And add resources to this place (``-p`` is short for ``--place``): .. code-block:: bash labgrid-venv $ labgrid-client -p example-place add-match */example-group/* Which adds the previously defined resource from the exporter to the place. To interact with this place, it needs to be acquired first, this is done by .. code-block:: bash labgrid-venv $ labgrid-client -p example-place acquire Now we can connect to the serial console: .. code-block:: bash labgrid-venv $ labgrid-client -p example-place console .. note:: Using remote connection requires ``microcom`` installed on the host where the labgrid-client is called. See :ref:`remote-usage` for some more advanced features. For a complete reference have a look at the :doc:`labgrid-client(1) ` man page. Systemd files ~~~~~~~~~~~~~ Labgrid comes with several systemd files in :file:`contrib/systemd`: - service files for coordinator and exporter - tmpfiles.d file to regularly remove files uploaded to the exporter in :file:`/var/cache/labgrid` - sysusers.d file to create the ``labgrid`` user and group, enabling members of the ``labgrid`` group to upload files to the exporter in :file:`/var/cache/labgrid` Follow these instructions to install the systemd files on your machine(s): #. Copy the service, tmpfiles.d and sysusers.d files to the respective installation paths of your distribution. #. Adapt the ``ExecStart`` paths of the service files to the respective Python virtual environments of the coordinator and exporter. #. Create the coordinator configuration file referenced in the ``ExecStart`` option of the :file:`systemd-coordinator.service` file by using :file:`.crossbar/config.yaml` as a starting point. You most likely want to make sure that the ``workdir`` option matches the path given via the ``--cbdir`` option in the service file; see :ref:`remote-getting-started-coordinator` for further information. #. Adjust the ``SupplementaryGroups`` option in the :file:`labgrid-exporter.service` file to your distribution so that the exporter gains read and write access on TTY devices (for ``ser2net``); most often, this group is called ``dialout`` or ``tty``. #. Set the coordinator URL the exporter should connect to by overriding the exporter service file; i.e. execute ``systemctl edit labgrid-exporter.service`` and add the following snippet: .. code-block:: [Service] Environment="LG_CROSSBAR=ws://:/ws" #. Create the ``labgrid`` user and group: .. code-block:: console # systemd-sysusers #. Reload the systemd manager configuration: .. code-block:: console # systemctl daemon-reload #. Start the coordinator, if applicable: .. code-block:: console # systemctl start labgrid-coordinator #. After creating the exporter configuration file referenced in the ``ExecStart`` option of the :file:`systemd-exporter.service` file, start the exporter: .. code-block:: console # systemctl start labgrid-exporter #. Optionally, for users being able to upload files to the exporter, add them to the `labgrid` group on the exporter machine: .. code-block:: console # usermod -a -G labgrid .. _udev-matching: udev Matching ------------- labgrid allows the exporter (or the client-side environment) to match resources via udev rules. The udev resources become available to the test/exporter as soon es they are plugged into the computer, e.g. allowing an exporter to export all USB ports on a specific hub and making a ``NetworkSerialPort`` available as soon as it is plugged into one of the hub's ports. labgrid also provides a small utility called ``labgrid-suggest`` which will output the proper YAML formatted snippets for you. The information udev has on a device can be viewed by executing: .. code-block:: bash :emphasize-lines: 9 $ udevadm info /dev/ttyUSB0 ... E: ID_MODEL_FROM_DATABASE=CP210x UART Bridge / myAVR mySmartUSB light E: ID_MODEL_ID=ea60 E: ID_PATH=pci-0000:00:14.0-usb-0:5:1.0 E: ID_PATH_TAG=pci-0000_00_14_0-usb-0_5_1_0 E: ID_REVISION=0100 E: ID_SERIAL=Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_P-00-00682 E: ID_SERIAL_SHORT=P-00-00682 E: ID_TYPE=generic ... In this case the device has an ``ID_SERIAL_SHORT`` key with a unique ID embedded in the USB-serial converter. The resource match configuration for this USB serial converter is: .. code-block:: yaml :emphasize-lines: 3 USBSerialPort: match: 'ID_SERIAL_SHORT': 'P-00-00682' This section can now be added under the resource key in an environment configuration or under its own entry in an exporter configuration file. As the USB bus number can change depending on the kernel driver initialization order, it is better to use the ``@ID_PATH`` instead of ``@sys_name`` for USB devices. In the default udev configuration, the path is not available for all USB devices, but that can be changed by creating a udev rules file: .. code-block:: none SUBSYSTEMS=="usb", IMPORT{builtin}="path_id" Using a Strategy ---------------- Strategies allow the labgrid library to automatically bring the board into a defined state, e.g. boot through the bootloader into the Linux kernel and log in to a shell. They have a few requirements: - A driver implementing the ``PowerProtocol``, if no controllable infrastructure is available a ``ManualPowerDriver`` can be used. - A driver implementing the ``LinuxBootProtocol``, usually a specific driver for the board's bootloader - A driver implementing the ``CommandProtocol``, usually a ``ShellDriver`` with a ``SerialDriver`` below it. labgrid ships with two builtin strategies, ``BareboxStrategy`` and ``UBootStrategy``. These can be used as a reference example for simple strategies, more complex tests usually require the implementation of your own strategies. To use a strategy, add it and its dependencies to your configuration YAML, retrieve it in your test and call the ``transition(status)`` function. .. code-block:: python >>> strategy = target.get_driver("Strategy") >>> strategy.transition("barebox") An example using the pytest plugin is provided under `examples/strategy`. labgrid-0.4.1/doc/index.rst000066400000000000000000000037741415016572500155470ustar00rootroot00000000000000Welcome to labgrid's documentation! =================================== labgrid is a embedded board control python library with a focus on testing, development and general automation. It includes a remote control layer to control boards connected to other hosts. The idea behind labgrid is to create an abstraction of the hardware control layer needed for testing of embedded systems, automatic software installation and automation during development. labgrid itself is *not* a testing framework, but is intended to be combined with `pytest `_ (and additional pytest plugins). Please see :doc:`design_decisions` for more background information. It currently supports: - pytest plugin to write tests for embedded systems connecting serial console or SSH - remote client-exporter-coordinator infrastructure to make boards available from different computers on a network - power/reset management via drivers for power switches or onewire PIOs - upload of binaries via USB: imxusbloader/mxsusbloader (bootloader) or fastboot (kernel) - functions to control external services such as emulated USB-Sticks and the `hawkBit `_ deployment service While labgrid is currently used for daily development on embedded boards and for automated testing, several planned features are not yet implemented and the APIs may be changed as more use-cases appear. We appreciate code contributions and feedback on using labgrid on other environments (see :ref:`contributing` for details). Please consider contacting us (via a GitHub issue) before starting larger changes, so we can discuss design trade-offs early and avoid redundant work. You can also look at :ref:`ideas` for enhancements which are not yet implemented. .. toctree:: getting_started overview usage man configuration development design_decisions changes modules/modules :maxdepth: 2 :caption: Contents Indices and Tables ------------------ * :ref:`genindex` * :ref:`modindex` * :ref:`search` labgrid-0.4.1/doc/man.rst000066400000000000000000000001331415016572500151750ustar00rootroot00000000000000Manual Pages ============ .. toctree:: man/client man/device-config man/exporter labgrid-0.4.1/doc/man/000077500000000000000000000000001415016572500144465ustar00rootroot00000000000000labgrid-0.4.1/doc/man/client.rst000066400000000000000000000000761415016572500164610ustar00rootroot00000000000000.. _labgrid-client: .. include:: ../../man/labgrid-client.rst labgrid-0.4.1/doc/man/device-config.rst000066400000000000000000000001141415016572500176760ustar00rootroot00000000000000.. _labgrid-device-config: .. include:: ../../man/labgrid-device-config.rst labgrid-0.4.1/doc/man/exporter.rst000066400000000000000000000001021415016572500170410ustar00rootroot00000000000000.. _labgrid-exporter: .. include:: ../../man/labgrid-exporter.rst labgrid-0.4.1/doc/overview.rst000066400000000000000000000402721415016572500163000ustar00rootroot00000000000000.. _overview: Overview ======== Architecture ------------ labgrid can be used in several ways: - on the command line to control individual embedded systems during development ("board farm") - via a pytest plugin to automate testing of embedded systems - as a python library in other programs In the labgrid library, a controllable embedded system is represented as a :any:`Target`. `Targets` normally have several :any:`Resource` and :any:`Driver` objects, which are used to store the board-specific information and to implement actions on different abstraction levels. For cases where a board needs to be transitioned to specific states (such as `off`, `in bootloader`, `in Linux shell`), a :any:`Strategy` (a special kind of `Driver`) can be added to the `Target`. While labgrid comes with implementations for some resources, drivers and strategies, custom implementations for these can be registered at runtime. It is expected that for complex use-cases, the user would implement and register a custom `Strategy` and possibly some higher-level `Drivers`. Resources ~~~~~~~~~ `Resources` are passive and only store the information to access the corresponding part of the `Target`. Typical examples of resources are :any:`RawSerialPort`, :any:`NetworkPowerPort` and :any:`AndroidFastboot`. An important type of `Resources` are :any:`ManagedResources `. While normal `Resources` are always considered available for use and have fixed properties (such as the ``/dev/ttyUSB0`` device name for a :any:`RawSerialPort`), the `ManagedResources` are used to represent interfaces which are discoverable in some way. They can appear/disappear at runtime and have different properties each time they are discovered. The most common examples of `ManagedResources` are the various USB resources discovered using udev, such as :any:`USBSerialPort`, :any:`IMXUSBLoader` or :any:`AndroidFastboot`. Drivers and Protocols ~~~~~~~~~~~~~~~~~~~~~ A labgrid :any:`Driver` uses one (or more) `Resources` and/or other, lower-level `Drivers` to perform a set of actions on a `Target`. For example, the :any:`NetworkPowerDriver` uses a :any:`NetworkPowerPort` resource to control the `Target's` power supply. In this case, the actions are "on", "off", "cycle" and "get". As another example, the :any:`ShellDriver` uses any driver implementing the :any:`ConsoleProtocol` (such as a :any:`SerialDriver`, see below). The `ConsoleProtocol` allows the `ShellDriver` to work with any specific method of accessing the board's console (locally via USB, over the network using a console server or even an external program). At the `ConsoleProtocol` level, characters are sent to and received from the target, but they are not yet interpreted as specific commands or their output. The `ShellDriver` implements the higher-level :any:`CommandProtocol`, providing actions such as "run" or "run_check". Internally, it interacts with the Linux shell on the target board. For example, it: - waits for the login prompt - enters user name and password - runs the requested shell command (delimited by marker strings) - parses the output - retrieves the exit status Other drivers, such as the :any:`SSHDriver`, also implement the `CommandProtocol`. This way, higher-level code (such as a test suite), can be independent of the concrete control method on a given board. Binding and Activation ~~~~~~~~~~~~~~~~~~~~~~ When a `Target` is configured, each driver is "bound" to the resources (or other drivers) required by it. Each `Driver` class has a "bindings" attribute, which declares which `Resources` or `Protocols` it needs and under which name they should be available to the `Driver` instance. The binding resolution is handled by the `Target` during the initial configuration and results in a directed, acyclic graph of resources and drivers. During the lifetime of a `Target`, the bindings are considered static. In most non-trivial target configurations, some drivers are mutually exclusive. For example, a `Target` may have both a :any:`ShellDriver` and a :any:`BareboxDriver`. Both bind to a driver implementing the `ConsoleProtocol` and provide the `CommandProtocol`. Obviously, the board cannot be in the bootloader and in Linux at the same time, which is represented in labgrid via the :any:`BindingState` (`bound`/`active`). If, during activation of a driver, any other driver in its bindings is not active, they will be activated as well. Activating and deactivating `Drivers` is also used to handle `ManagedResources` becoming available/unavailable at runtime. If some resources bound to by the activating drivers are currently unavailable, the `Target` will wait for them to appear (with a per resource timeout). A realistic sequence of activation might look like this: - enable power (:any:`PowerProtocol.on`) - activate the :any:`IMXUSBDriver` driver on the target (this will wait for the :any:`IMXUSBLoader` resource to be available) - load the bootloader (:any:`BootstrapProtocol.load`) - activate the :any:`AndroidFastbootDriver` driver on the target (this will wait for the :any:`AndroidFastboot` resource to be available) - boot the kernel (:any:`AndroidFastbootDriver.boot`) - activate the :any:`ShellDriver` driver on the target (this will wait for the :any:`USBSerialPort` resource to be available and log in) Any `ManagedResources` which become unavailable at runtime will automatically deactivate the dependent drivers. Multiple Drivers and Names ~~~~~~~~~~~~~~~~~~~~~~~~~~ Each driver and resource can have an optional name. This parameter is required for all manual creations of drivers and resources. To manually bind to a specific driver set a binding mapping before creating the driver: >>> t = Target("Test") >>> SerialPort(t, "First") SerialPort(target=Target(name='Test', env=None), name='First', state=, avail=True, port=None, speed=115200) >>> SerialPort(t, "Second") SerialPort(target=Target(name='Test', env=None), name='Second', state=, avail=True, port=None, speed=115200) >>> t.set_binding_map({"port": "Second"}) >>> sd = SerialDriver(t, "Driver") >>> sd SerialDriver(target=Target(name='Test', env=None), name='Driver', state=, txdelay=0.0) >>> sd.port SerialPort(target=Target(name='Test', env=None), name='Second', state=, avail=True, port=None, speed=115200) Priorities ~~~~~~~~~~ Each driver supports a priorities class variable. This allows drivers which implement the same protocol to add a priority option to each of their protocols. This way a `NetworkPowerDriver` can implement the `ResetProtocol`, but if another `ResetProtocol` driver with a higher protocol is available, it will be selected instead. .. note:: Priority resolution only takes place if you have multiple drivers which implement the same protocol and you are not fetching them by name. The target resolves the driver priority via the Method Resolution Order (MRO) of the driver's base classes. If a base class has a `priorities` dictionary which contains the requested Protocol as a key, that priority is used. Otherwise, `0` is returned as the default priority. To set the priority of a protocol for a driver, add a class variable with the name `priorities`, e.g. .. code-block:: python @attr.s class NetworkPowerDriver(Driver, PowerProtocol, ResetProtocol): priorities: {PowerProtocol: -10} Strategies ~~~~~~~~~~ Especially when using labgrid from pytest, explicitly controlling the board's boot process can distract from the individual test case. Each :any:`Strategy` implements the board- or project-specific actions necessary to transition from one state to another. labgrid includes the :any:`BareboxStrategy` and the :any:`UBootStrategy`, which can be used as-is for simple cases or serve as an example for implementing a custom strategy. `Strategies` themselves are not activated/deactivated. Instead, they control the states of the other drivers explicitly and execute actions to bring the target into the requested state. See the strategy example (``examples/strategy``) and the included strategies in ``labgrid/strategy`` for some more information. For more information on the reasons behind labgrid's architecture, see :doc:`design_decisions`. .. _remote-resources-and-places: Remote Resources and Places --------------------------- labgrid contains components for accessing resources which are not directly accessible on the local machine. The main parts of this are: labgrid-coordinator (crossbar component) Clients and exporters connect to the coordinator to publish resources, manage place configuration and handle mutual exclusion. :ref:`labgrid-exporter` (CLI) Exports explicitly configured local resources to the coordinator and monitors these for changes in availability or parameters. :ref:`labgrid-client` (CLI) Configures places (consisting of exported resources) and allows command line access to some actions (such as power control, bootstrap, fastboot and the console). RemotePlace (managed resource) When used in a `Target`, the RemotePlace expands to the resources configured for the named places. These components communicate over the `WAMP `_ implementation `Autobahn `_ and the `Crossbar `_ WAMP router. The following sections describe the resposibilities of each component. See :ref:`remote-usage` for usage information. .. _overview-coordinator: Coordinator ~~~~~~~~~~~ The `Coordinator` is implemented as a Crossbar component and is started by the router. It provides separate RPC methods for the exporters and clients. The coordinator keeps a list of all resources for clients and notifies them of changes as they occur. The resource access from clients does not pass through the coordinator, but is instead done directly from client to exporter, avoiding the need to specify new interfaces for each resource type. The coordinator also manages the registry of "places". These are used to configure which resources belong together from the user's point of view. A `place` can be a generic rack location, where different boards are connected to a static set of interfaces (resources such as power, network, serial console, …). Alternatively, a `place` can also be created for a specific board, for example when special interfaces such as GPIO buttons need to be controlled and they are not available in the generic locations. Each place can have aliases to simplify accessing a specific board (which might be moved between generic places). It also has a comment, which is used to store a short description of the connected board. To support selecting a specific place from a group containing similar or identical hardware, key-value tags can be added to places and used for scheduling. Finally, a place is configured with one or more `resource matches`. A resource match pattern has the format ``///``, where each component may be replaced with the wildcard ``*``. The ``/`` part is optional and can be left out to match all resources of a class. Some commonly used match patterns are: \*/1001/\* Matches all resources in groups named 1001 from all exporters. \*/1001/NetworkPowerPort Matches only the NetworkPowerPort resource in groups named 1001 from all exporters. This is useful to exclude a NetworkSerialPort in group 1001 in cases where the serial console is connected somewhere else (such as via USB on a different exporter). exporter1/hub1-port1/\* Matches all resources exported from exporter1 in the group hub1-port1. This is an easy way to match several USB resources related to the same board (such as a USB ROM-Loader interface, Android fastboot and a USB serial gadget in Linux). To avoid conflicting access to the same resources, a place must be `acquired` before it is used and the coordinator also keeps track of which user on which client host has currently acquired the place. The resource matches are only evaluated while a place is being acquired and cannot be changed until it is `released` again. .. _overview-exporter: Exporter ~~~~~~~~ An exporters registers all its configured resources when it connects to the router and updates the resource parameters when they change (such as (dis-)connection of USB devices). Internally, the exporter uses the normal :any:`Resource` (and :any:`ManagedResource`) classes as the rest of labgrid. By using `ManagedResources`, availability and parameters for resources such as USB serial ports are tracked and sent to the coordinator. For some specific resources (such as :any:`USBSerialPorts `), the exporter uses external tools to allow access by clients (``ser2net`` in the serial port case). Resources which do not need explicit support in the exporter, are just published as declared in the configuration file. This is useful to register externally configured resources such as network power switches or serial port servers with a labgrid coordinator. .. _overview-client: Client ~~~~~~ The client requests the current lists of resources and places from the coordinator when it connects to it and then registers for change events. Most of its functionality is exposed via the `labgrid-client` CLI tool. It is also used by the :any:`RemotePlace` resource (see below). Besides viewing the list of `resources`, the client is used to configure and access `places` on the coordinator. For more information on using the CLI, see the manual page for :ref:`labgrid-client`. RemotePlace ~~~~~~~~~~~ To use the resources configured for a `place` to control the corresponding board (whether in pytest or directly with the labgrid library), the :any:`RemotePlace` resource should be used. When a `RemotePlace` is configured for a `Target`, it will create a client connection to the coordinator, create additional resource objects for those configured for that place and keep them updated at runtime. The additional resource objects can be bound to by drivers as normal and the drivers do not need to be aware that they were provided by the coordinator. For resource types which do not have an existing, network-transparent protocol (such as USB ROM loaders or JTAG interfaces), the driver needs to be aware of the mapping done by the exporter. For generic USB resources, the exporter for example maps a :any:`AndroidFastboot` resource to a :any:`NetworkAndroidFastboot` resource and adds a hostname property which needs to be used by the client to connect to the exporter. To avoid the need for additional remote access protocols and authentication, labgrid currently expects that the hosts are accessible via SSH and that any file names refer to a shared filesystem (such as NFS or SMB). .. note:: Using SSH's session sharing (``ControlMaster auto``, ``ControlPersist``, …) makes `RemotePlaces` easy to use even for exporters with require passwords or more complex login procedures. For exporters which are not directly accessible via SSH, add the host to your .ssh/config file, with a ProxyCommand when need. .. _overview-proxy-mechanism: Proxy Mechanism ~~~~~~~~~~~~~~~ Both client and exporter support the proxy mechanism which uses SSH to tunnel connections to a remote host. To enable and force proxy mode on the exporter use the :code:`-i` or :code:`--isolated` command line option. This indicates to clients that all connections to remote resources made available by this exporter need to be tunneled using a SSH connection. On the other hand, clients may need to access the remote coordinator infrastrucure using a SSH tunnel. In this case the :code:`LG_PROXY` environment variable needs to be set to the remote host which should tunnel the connection to the coordinator. The client then forwards all network traffic - client-to-coordinator and client-to-exporter - through SSH, via their respective proxies. This means that with :code:`LG_PROXY` and :code:`LG_CROSSBAR` labgrid can be used fully remotely with only a SSH connection as a requirement. .. note:: Labgrid prefers to connect to an exporter-defined proxy over using the LG_PROXY variable. This means that a correct entry for the exporter needs to be set up in the ~/.ssh/config file. You can view exporter proxies with :code:`labgrid-client -v resources`. One remaining issue here is the forward of UDP connections, which is currently not possible. UDP connections are used by some of the power backends in the form of SNMP. labgrid-0.4.1/doc/res/000077500000000000000000000000001415016572500144645ustar00rootroot00000000000000labgrid-0.4.1/doc/res/config_graph.svg000066400000000000000000000334371415016572500176450ustar00rootroot00000000000000 RawSerialPort SerialDriver ShellDriver labgrid-0.4.1/doc/res/graphstrategy-via-nand.png000066400000000000000000000620471415016572500215620ustar00rootroot00000000000000PNG  IHDRb[)bKGD IDATxw\9d9q 4AG*JfYi{i}W62\́["0g޿?T\͸ ~}RH$! F)wANbAB, 3 jh44 EEEa2(..FՖ,koo&^V-lllppp '''j,AKbR(..&../DZZIIIJvv6Mz̶ick.鉗xzz_I1WMIr1>̩S8s,gHNlW7w\=pSWwO읝upe-V6X\ZXؖl0/[b(ϣ0/\ r)#/'T/NFj2BԖӸq#6h@ӦM QFT {Bu% Pn$Iɓp<ęӧ098P/!^ // }UKz1$ǟ#y.s),zkZhIVmۖ-TQ e*..͛7u6mFfl j  9( 7@bN#QN%1ztLΝ޽;nnnrQb69|0Wf5>tkjK4nݎ-CQYTc& Op46c1ܿAO˖!ۇ}*wLXx Ga,Z'WdUߟ6{Ѯ[/CYOuZ-wDoD&+1B aX(5F u|=W}:zGzŧA#UJf3v%W-%_C׮3f4 RB% pO/^~~XMHz EaUrW.FCۣٶol\ ??I&"w''Vмys# H</x'_*p%h|.zhNlذAHB9=J$^|E~Gƾ}G;pFzmQO?1j(# @<߯zwO9g>#{VR19g1oacd݃qJ߾}%114Q|GLxf+’QD McYDӯM'ɡCٿq2&&j <[rǹ# 3yWDCÅ>3T="5FD a^4 z QBA-kCT:* ^|6II|rʐ(5֭[Yv-Koɧ#wB\̛7ƭиurYU0-6ܣ|72#l&jV2i؝}eB?DW,哴-^m>MGH:Y3o㓗|`tSx+vΰwepY; 0n~}q '=^OAT_! q QTTIJ ~|7$TQkx iHr܊ /TPY\Tk4jRC&Ѯ;.͞"ߋ$L9->楡HiG 6zk)\0t|gL淕iH 'Bkq f#Xc7oL6p(ZU-1:=8x`?z7&;QkD u<BPﳄU!P|co| ͋>z(ܻ:xڢỜS1{(,l&3Xt`DVؖS7'-1y˗_|A```lDPY5ŋiب#Πߘ#DF1 lXų?ޞ~%""BXBXɅ xmV,_Na>-Ug /ҋ/{aoo/w,B,޽{y)Km;V"ߺӇٳf w4B,ܑ$IZ/؝;iآN}ި-ō *_Y=2R4x0oLJhhB,ʞ={X a5;Z`29=o,3W^y??? 2X/))),\_'|ѾGE" x, =IJgZn\KΕ ԉѣF1d1,Xx`{aѢEZIr%<ڃhlj`Yi߳[-Ѣe駟mB,Çj*V#P$:Q ѵ1DNˉ=KԖtz_~%B,,bbbaKVN<$Iyz܂f- nOF&K\쳊QBa!wf1L9={i&ڶm{2٤DZZIIIhh$'I!??N&JTT*qvrN~899Qvm-XYY1zh6mZ9X0$1~xV^ͺun[\\\pqq!88ȑ#1 <쳨jL"w$X$1qD~w;;v,3~x# Հ(Bx7_Xlz;Cyh4L8;;;z)# U8Y'~? 2tP㔙SpBΝ;q*Lb\}L>s2n8㔹T1}D!w}ɓy#*I(c|g =B, 2n8}N*wAЄPVXO-~ܑB,d21boΦMhժܑA/0zhlll6mܑ & pI?~LaA(5… 7n|SN; h@+V`Lw}'w>HDn8u1114h@HB=jd21i$oߟaÆ1{l CPj*ڵ+.\ed}] C􈫙e˖1dXn]|rw&22={xbT*IK׮]r ۷o .еkW8w iݺ5FTҪU+6nȅ ҥ ,Y qg{UIΝl߾\u놛 8YfS(%Q;vVo ʢ]vDEEaii)cB,B ??ooo6l >}:S(1F\|F.^l(Ռf"99 lق ƍCV3w\# $zD\\5;jooobbbW K޽С˖-ں̟?/bee%cJ4D/8k0HNN}? !&&^zo*&M"''EɔPG\ \|za0'v*9.T=GGG8m=z4ĉ( N)#RBI&̙3 J&;W_aaaAÆ 2e ,kqi֮]+CJ~qWTT'?R$I0}t~iJ񷷺̟?\Ǝ[oEݺuKݻ7l۶M½IBJ*JJ^JRR(R@@`h4S(Gҏ?(yxxHvvvo!eeeI$I ٳG݈qf2')) IP(( <<<1ccǎw | /oңGׯ_%wDD!OhZ4 a2˻i9H~~-tӉBXYYagg=[d O駟w`̞=3gиqc˹s+:L&f%IfenP(nPT%*DV5gggJjj*YYYdee]13+.,,DW\FS-,,[[[\]]quu+...,^T7n#GϯB͖7|ìYe|FF#&M&$Zvh+J,,,J^*j5jjYIHH >>HKK#Տ)k%˫,,ppr;Gg윝st{GglmejKKjY`ceZԲ񖞮M&E7MFBtEc-,D-$/'l ss| ՖknZ^^x{y剷7ߟ|||D1Lt:t:^^`0`4h4o,_#~B()ʖ7jժUOBWBl2HHHرc9sxΟ||N+{̶888Pa'+L&v͒%KXl9)8qiGhhgBºUO d.r461!O?}cǎ53<4MIK/TN7|qrrɩ\ʭL&6m… Yf-Ӧ[/GƯqpylVhٸ}q99 /z 4QF"wr'IlZr vvvbggW/Bu|RShҺz]pOiOeĮ]AR9h1yꩧ4>䦠ޅ&T\fW`I!$uW_ W7w:J! (B5p^6/صQ _e˖rG{(: D~Jڵz, h䯿g8 IDATϿĉ܍^OUxW pEEĮbI8uL&rG/zG"\\/uǑ_4{&L~B$Ih0{by̚ m۶;]f222W(E-'Nॗ&}{ ] PvGx8g珯>XFͧ|ܱnQTTDRRFQa kRb;̙3oڜ-Z=p`AkV0苊曙=ZH˗̔;P]ٹtSBO>ɡCD>9E9ޯm,*[*SQ!f["?aرdl6DAAA EE`cSW8uԹi;g5UP.]1r+§[qt+/3k&|,?i;Ópv|dFllև=w!%99|7zFr/‡ןE3m?c1yez#l/s;K9/ӣ;\Brr=]+jLL =z׼_FmƧA2 _Mx?S E{g^7@e㌣ ֖2E>M-)4VϦm^|j 9wbl6x":{­1 l, ʡqquVo4c7ŭ(<C\|ܱ;wЪs7ޘ3;2x um+uR12^c,je|*+x៫0[ݧ/EEE2Z!E *H(>fD 4aŲ8Q!t_|V2> W$233h4w\{h42t0@Ugs(:ɚW'!Am9qmDzVr w[ ͞9ob¡@|:}g)&m,q$\fccĽ2z7.bO\6h~[k(w5NCYNxa_Dm+;>b?P>3מCXxPҕR2]Tt+4s)MLީDd6-~/bIt>=K|^2yثl17xøͫ?~<-Z(H27ޛ'sp &YP:.mpRp!>eӡLz6z%mn5Auz6ߣgS@gm4k6Ӧ@9|& w|֯߆Lۜsב⍋<88眉fڍla͈y|ġkc> ί9i0c-_·F2;zV]o3 gdduU{O1^n%a&do hѥ g- ?u:Ecl?Wҷ=X#Q!szCZ D vf4}YJl8߃'>]Zi~eqYzNcY=zfۤc!Աc5nrDzx"ċZh^hò94}UX L?T~k>Y~5D)\*m]նfIϹ9,=E֞v^ٞN]+Q -F oi`ǢX,^DoL'3<cD;.׽5+t:j׮}oqYΟa p ]":ȳsr(ԿY"?YPvaΘ~o+J̃p|%:qpF򯭨xz nfݮt3u,)8wRu4X5Q7*:0v qi,(<}4SuHz~_n )a]\SPP c;a;= &6CV$cJ_ɬNtkL#svMFy=gh"]ٹ~;~ўmG'B(ᝂqqIz*W^kNQ ĶNZ6BI䳣tí@zTw4Iw[O%.$rH2z z=ʇ$I^,F'OP(j.߃XX$4qarn΅:7)pқBN&a9tG?1;CIMŴʛGTM_Mj8dQmT{Kֶ*m^^'>A7 ,ՠe+2IOO/mT Pa[UC =ʁ.vvօuv$B˃Swdc^\+HyVL[~?&4BsYq\^m޸FTTx+lZѹ-tϼm/B@7(XֲBeQy.)W@$'S+Z&-j'dDL99A[{5q9$WW}㇫ Y2 ŽǦ0w4:Z^`dQ$IZqK!vqqGϞ]0B7R Rۙpe0k[8#'wD;ڙumpZ,q WTH΁nC #x%ChI 1诽ήgrhwKjr])QHy_?1gut4QtϷ־ݝs]rdv avZ,'8RĕC{9smߥ`:a1#7q?H8s٥l۵[3\!QiIb ի8-{C\=^}G}7jRRԢnzSw| ?/hVxQ6,`S\: L~3fJG$v. #q#z販3h dY@B.NP?5uqhF%rC4޶˾]d~Zͥ\ M4C; vs%'B ꉯN۩>/Xsu4 E2Xh-hS zҚ.Ty ->T*t:z}*, Ĝv{8$kmߵE/z4аϥl=v92^ ˨psSp2fl<@a6϶I~ .0ql5Ga׎8E<];p,ËݚQm~f5y\oMޟEHdo鋷Κ3dgb숓bggφ=Ƌ]*vDP憍ͭk/ ~/o^`r)wsm_4zܷ9w\N%AAAc!tDt ո{PY]$Z28]ۮņG0axfϞ]a!%%¶'V}]]>X{ۮ g 9?g}$ *^۩/xWe)p29ww ߮P=( |||Z>J5kOJp=z瞥O>\ Q̤Hjzޞ:u]eVotϟϯs%2>AX=zA zsB%$ğ<-k]Ԁc\t:ruRpqqťLʥ_g0bUd^oPCti2R=Xx8E˾-ؿyY2x@ DXaL&jE/ S(䄃C k!d"66("mmiu#o E58L):gخ߽Gb6hҊA0`);FAvvSP`mm3|[QΟ?ϖ-[֭ȼ3B2-BiТrn#3=sGrA=ktBDDD>Gaaa/-W*888`gg}ߛ2Uo$IǏg֭ڵ{t)BA=@"E Pa]\]IMR.>Aܑ;ziT*6jDvԩ]tGUl| K|\߷^/h{v._̾}ػw/{h49uǧAc|4'1 ' }$i]ҹ3$=MRNQ7m۶}vk׎P;za.**V[REq~077R vvvTX*m!dN<9y$GSkKkn>K],:J[X@fZ*WRIO@ '^$=NLfnJiڴ)}Pv$IBףj)..FբjK&^DЯpQ*HT/,,,++++uUߎlŋǗsyp瓣 .nzxT.=p]W]wv{'jxr/EdM&Yd_NJO%e-wgg W`` 2#~ DttkS`]U݊=j5jպUe{IKK#%%TIKK#))tȸAVV-W'g읝em-VvXXZbcg ZV_LeyllᯰX?g r%7)CբSAP|s]j`>5oo/wwwիVsf^`0 yo{w!|Y㰷߯n ZҲ]YV/ĥ!IYYYdgg)..&//k_Kj芋oZo^^na$\8ٕ4(''SVV8R999acc %oԩSy뭷G Ud⩧bl޼PF#&閏fe4ouơ;$ ET*JRyJ T*7}^]liX2P(Ԯ]ڵk徴igggƍZ_;PfFիYnmڴt=|D!ƌC~~> L0AHB% I/7QQQI Qɓ'ĉQՌ;VH̦N/²eիq{1cZcggǓO>)w$A&3fo?o߾rJAjO?`00blmmӧܑoߞyE(%qD5#I&L`\={I{2͌1(֯_OXXܑA Q)ٰa7o.tIGjL3h bcc&$$DH ܆x {5fiiɒ%K gϞ>}ZHBJJJ;PFD!欭Yf 6{$$$I(7oaÆlܸQ(BaxxxAbbܑ{nH޽ڵq2 ƈkL:w^'&&F< :r]t!<<%K`a!nD!a222GVuV\]]$lj 44UVQNtP]B\%''#9Us #00 6`g_ U(5T|| v T QBBBXv-6mb7ͳ'B! tЁ+Vзo_j5KQ$zBnݺb -Z/,wGoPPP wB,ܤgϞ,Z9s0}tz!C?s%L M4h GښzKH՚dbȑܹM6ѤI# LbFhdܸqXZZ2e#UKfQFzj֭[G۶m$@bƌC~~> vvvL0AHՊ$IL8($Db&OLnn.'NDV3vX#USN_eٲeK8D!iƌhZƏO>ܑ;;;+wAfgԦL·~ӧqX(5I0agʕSHP-B,̈#bI|86l`ʹiFHP;Rԩ>|XHZ^^JNbXZZdBBBٳ'O;RvZ;PB,<0kkk֬YCÆ $!!AHʖ-[2d}%00P8B%& PlllXz5DDDx21112+Æ #>>޽ЧO~gJ&ܙ8Y'L:w^gn: Z08 IDAT&%%''']vѱcGټy3 6ȑ#t҅p,YoJ;Q2Axx8j[m6dBRo 8d,--gӦMDDDЪU+V^-JEbL]t0j5 H&BJJJ-:..ƍdz8x vZ)TbJ(S>>>G,1 z0ܙ2L>ssϭTӧO駟Bo+EQS>>,^s}l2*FXTiӦU@``ĉPQ0̘1~rzFdT)<#GO>aÆt:E7ndǎ\a7o&ѣGoϞ=tܙ6mڰrJtAHJa0x9~8~_hdҤITyuN'SLG%""½ 6;r)/^͛;5VxU*`Yf 4Pu: t:z1˼,K,Qu:zQUUU?z5..N]dr4Rx"5!fc޼yp8^SO=Ň~x={dN>ӧ"''fXܫ\.?eHbccQFDFF^tXsDGG{1qD>Zn/СC=z&Жf3g;PPPߟǏs98pIIÍ} Wb벅ZOp앱|2a ?\~9I:Dvp7NO&~n,[_栎O)A ݛH_KWfK6A]_ičN`wS/WӶwSWo!#GuŹu=4*8!,)G}~c=y;z}.6-GyQO#ޞ?S/}ފcyuz?Z?', A># "Dc+^U!\??v;VFD A\=zPv׋֣;GL:K]W_BŊK_KސJu#|(Y0:BA ՠP9,\q =iЌ.C~)Ps|6m/?:J4;ΝsRny7Iәw@o{4Uy2rܷ/u?dhKI)Y^TLo ~'GwxX,)XZAo qNA14 ^铨m-y0-RׯK9d BU!{hce`v ;3gc׉nLDtC3gTmcBSĵ\AA@Ս _R! Gl6s+ס[Lv$389+'m\蝬\m+Y8U+6K)nJpC"(8 FtӅyȦ]ʥ}8v%5"##L@Ox&9]<3]zå] F\i8Zll"aP H=F(Vm8 1UV6h>7Ise)]m mIr |IKn`9zӦftz)K7Ѹۭ(*N < =qq9vv&=Vo|tz=P\|xSǕ8rHfΜqE*G`„ @I/w#2?% 1n8+UMXзo_u+9y`׆|T+TUñϐQ2e̘14nXۢD ܹ3& ^O¼/ZU, / lْ۷SNUMzv1gd'YbE7AAA,]TB n bرXy{Z㪼,2yz͛G۶m.MT&ʸQU_~ɦo(/o.ӧ)?ŚCbb"Ӹ2QdX\ooAq][yԋSm+=;yoҒO%K{-LT; bqQ+VG%7}Y Y4e",9/e˖|\wuW' A,҉'8p ;vpkzm;@n=48v~ f֬YW x p86mǏнurcϻ4S].6]ɂoz{tt4߿Չ@XTؙ3gxXtiwa#}|/ӵOa^.\Țǁܳ>KBB(j bqٶmFBBB˼ԍAs[A4mQuRUC;˅lX56}h^#66V*EM#A,ؖ-[8q"V,dӶq}}>ԏF l\~CfjJ:t(cǎI&U(j2 bqRSS={6ӧO'337nًzxlfߖ l;voXOהk޼9ÇgĈkPĢX,V^Mbb"VchZwV;Ѫ}'C4d;ˡ8k;Gvmx>NGEDDЯ_? DnPγĢJ|rVXusLQ5qkY+b[!E+4Ӥl.*$$9ęcG8} $'=?ӨQ#zE߾}ٳ'\W\ bQN'۶mcڵر ߀%[LC#" %840C# 8N@PHhIŦ"6b3Ŧ"r)ˣ0?œlr2HI&35.YWhh(]t;W^i# 鋫#A,fc׮]lٲ-[{nNAuBT{Đx ʄ8 bt:g…dݺu2,!j< ḇcعsgK,ח{Wʄ bnFZhQnx"11>}QeBT  "11 @rr27oa $W2dlڴ E߮qeB\ [wO$&&2p@|||4LK ^c,Y{o>CNq^#99ƍөS'8u:5D'R5bcc[ؾ}; CNqrt:q\/t.x7rRXX(u:tMI ͨnwoýୈn-Zӧ/xEQ1 F {3M cĢl6, VGmijߒ{%>>B(h___~~~ ҧWNXT˅bl6cZ)..jLQٚA-34"H+f1Lc2ܡ[Y=[Ot(H@@z+50˅ld2QXXbա[Q:=Gpp0JYv;S\\ʕ~DHH[$E9v|wJVC900BBBdZ]-#A,܊ɡv0E:u +s%j$k9Ivv68NԫWPObĵ ++l ^`0 ,,L-q-t: ''G(N~KًH"[Ax___2P^Bl9s"%JǐCCC 'A HII9³+$AΝ;Gvvej( 111j]}K={VB 8ش. JJJ 999Z"JII!77ppz$~/l.+q_ڮ jGWO)_\ukUuRSS%=&//J8+0eL+M-uiUJ-{ ,+J^ zΝl6WK[rH{JzzzS?. 2Ea@3|[P7jr}UOUUΜ9#h=ƈ9sLMOs!z]4xG|(!K^Hyl^:#ly =Wo>ƽvCo:d5'j!Vz7^GaR)8)Ov2|:2r9in/ ]S1{sa~3N>ݺs,=f=\}5ڏ54aW\Oʚ')pf3~C;c$O/eⴟ($G`:m=b4(]WN<7yFAهкAuL&>q+>,ӑǯ.!p44c=˼ N|hZdkVΛݦ%o6ռYu$DQQg)5ߺ,ђ[CGsѼ>9WʯG/wzt3LʇocL$s2%=)%So >) 3MRM_Cr2޻qmЧa&TRm䠝gKzkN\\QyaK@l|Y:"z,;l/%r~#$a9,K7}wBJoIֻ{45f*E2DH{Zsr.;Ĕ6݊=<޺#Q<<~CZIg].2ϥ /"95mDּ7< MxNW+DT1g]K&1eڱE,8$Wy F#$fˣC!$%\!66wBqJ=1~a a/8u9]PzX~D/2{+t9]\zƭkڴ"ر% s9W/[qQE_2DH{:uTo8;i@Zb;oaw*lfuӅSi&q>|%Up&Vg6.柣n@gJ;V"[Ѭnqqѡb;nիج6T ~7b`+_<ϣ/Na߳)oy].Jppp)>!!!A"3ն@ξx1?#/1/-aéBr6"8f[ 4m֏hy94&>mZsb -ڲIDATf\v4%0}mus4;غi6K}S21c7w=8GLjbiݪ.9[I.&8:s]7y~IcK[6̾FҌF#QQQb2^$55\9@#dYL#&Hdd$EEEƶ{?17_n{k +zg  #2Ŝ8qv(`0ТE 1A`'66Vk!EQ4nXBH{` ZHӴiSv /f2HNNrP#661m$̙3L5sQq֭Kdd|`ĵDvv6騪*c/a46q-p8HKK#//EQ$=Pi7""{ Zl6d@J1z , dbMTaaaDDDH{) bj%''\o P-h4NXX$[5rrr02lQJ_kEQSaaaլ+~#A,.nSPP@^^Z?DHHrV\-$A,.pPPP@AA& UUt2qJWUUz=M\k3 bqYTUl6c2(,,bR[WQ "((HNCeHrX,f1L8ly?H@@ʜ_qQĢ9, Jqq1VLPNϯ&C rHjcٰlvmՊn,{?sUMEqhNҒ>>>dZ ĢFPUӉnp8p88N\.N^ - Rz=:^_`( Q$Bc2% BI ! Z!Jk IENDB`labgrid-0.4.1/doc/res/graphstrategy-via-nfs.png000066400000000000000000000616101415016572500214230ustar00rootroot00000000000000PNG  IHDRb[)bKGD IDATxy\%⭨ykVxYiM뛕Rfee^Zx_(ފx r.ײC! ؇ ;o`xٙ($IAd; BE'XAfAdf!wAg2h4\t:fe@P쌵5θ`kk+ OBPf3W^ŋs5nܸ׹~=d224dge:Ֆ899ꊗ|퍏UTZjԭ[{{"[ <)5!IpGԩSܹs\p^-qpv;G'읜ߑ }n.Fí4F%'3# 4i'$517HKIZ͂_zԫWӬY36m* Z(Q'vMvɡC:zcǎZM՚uY5jSfmԪK5qpv;6yF7_#/]$>7s4J%u֣elْӴiSJqHE(BKOOgܹ;8}57F&z 5ZXZY$_# bO\8yT]h׾ر#7;P"իaF0Lji9퀝1UbNʼn};9u`/iTFу]bii)wL E,#99e˖x8~ {GGҺswZtQf.>-g+ϝم_f rG Qpkf͛Yp![lʊ]<,bw?qW8kWr) uc9/// e( N/gr!<~[w _NxeJ\9v5`|4lPhB)'!$$9OVV6qԬ-w2Ϡ׳lX/][̘1͛M(DW0&ŋ'EϑcNnrG+w$-1Ǐ2h`s|}}&2+HFMh2&.wrO$o_O'-)LZ;PJ" ӧO端xQKXNſۙcE\]r/ <4aAcĞ>ɜ7ߔ; 3QXdd$={Ó~Zou# M&a+|;ìY)(rj˖-4z-.SJ=2g8wƪ?+(QСCؑ6}xPTrG#|, ;bڈп? ,* ^G4zm+aIåy@mFfɮ~|b.]g}&wAI8Ws(W8S1W5ݡ'^3fСCrJ](G-Zľ}7sVo#T:)-4hM^hϨQ1rJ(r"//)S>"x(j=D8zc7İb %Hq9m6n⛓ze|?":; <f2ndFa9v{f>`'eq-l U?'48ut6y>g᫑&d z3i;&1vf?=%5u $ ,WZM'wZ Eu=B"cRnCՊm{MKW)?C/Z2vg/c˜8O2~ F}ќJwZ8s~Qq|Ж?fPz?Ocx{>Pt:{=wqڵ]Pj".'?3Ϸ-浨PB"φ5DqoӺ܂$ SB>%Xx ~_FwfRud0E7δxn]ߑ`݊6U0Ѵۭ(CwUS?PĚRŻ"E\HĵkqxT; gYccBFwh|#[˼'=u$rm'Z;>˹c09u`j|[r|.ֺ=KDB"sfz|̯-͕+WwEB!j;9(l6Ǭ8,IcO3*q4x8hSg337lJ`:QZNAC(!kkkI wǣpb Q3TᎅI¥DL@Qv U{ 6xZ ݋X=ۋ4/үd*w ".'|U#1r1EJc~I< ۖ 52>W]v))ښ;)!ob>*;L+ch{=Oi+_zriIE(!ˉN9t%UL†Ez{~^BVgnb֜ff# l|9Y̘%lo݂m3c^Q2&A}`.M=*5wAwmZY`dIpD.nhotw k(.^QM6m!3R\QTww2RkDRwӂ*Ujb57A 5%HNO%Ge6/D\j:l\3Yܖ9LJ/w#~ gȐ!rGd$e|G,r*kWUIf3k{:foKd!ڵk'w,Af`]q1ƥ+yi̻zuL)RsA} ΞiӘ5j9?C8{h?1'g4W{w޴nZD `0Ɏ;عsG=irєrefsD]hDRU*»fç ^yyBQ2Mi܈' Ro 1 .x9/;u>lق-ZТE ZlIJbPD En޼y?Yfj9䞏t:_NBB׮]#!!d4 tod2|nff.Gmqgg\\qqvggg|||J*x{{qOXڷoOj ޾Cp/QBYv-筷o;cߟ i&$T"F^=z4sΕ;;y$jՊceU66 e(bEDD̀Xpau!:wL`` VBHB9Wc8p޽{ӣG,XPKulڴ0#TbD,<ǏӱcGYzu9#w E, _ѭ[7B"X࣏>"$$KҳgOB#Up_|_~%?3;Nc6IOO;Pʉ"Ν'|?ȨQS.}GtЁ45QA-Z^{3g2yd[ׯ_}899ܑRH+K2j(O.JURp&;;[HB)$Fڵk߿?Ǐ'$$D8FLL 4lؐM6amm-w$E\ѫW/FܹsSv 5kFXX2(2 <<.]< B… >|x &Czz:AAA\vp6lx2:uBVm6 EML_+fΜЋHlW_e%L(jv7|s4iaaaÈ#=kV$^'!!QBPTTT W'XEq2`'~Wt$wZ~1j_={f$RSS1A(zx{{ckkXW"޼y3Ç@mk˸/Фʯ MB+6|8sxRYYY\~SrqqrʅV"0a}0zLlvSy9_K\I41zx#$sjɗq2>LJ~џVZuWr%>=˗K6m&+W 2'm]G(L!Hi\-As~5z=/^$++>?S3Isw,5?xggHIF[SuK9.g$Hq_M&[Ԫ#8G6Ƣd2|2ZټO%]۱>5b؎6N`'v\437, :.G#n8"ްa_|v=yqv*nHY\/(p3-[?+bϾ?M7(2Nn|497SS:lX$8z}SX^fKŰOf*'WS9,$Iڵk>p~333=u:4-,gX]*%!:ifbܼ*ټy3K3%%V[gEhcX>cV_5>zYTzwb!!! FN^!~]ɑS86À>M 븱;2lCuǫ[IÜ4~ae&l+:d"&w=21qWֲgV#Bx+?!ŭ:sCߥkaFf2nf׺iK͇ ;޷#=r1Dޖx 2g`sU F^ӅcRB-[~wl62h%{vs&6n`tӡyQYO}p-~)1yX췾{CpIG!ʋ o­#ØQ*a%ϽѭD¹A3ܕ*'UJ<[MBͯ!ܢ@N}l=t ,4_yλt"K7TsaVۇX8 j_? Kzz }F/RɼDoT-A$RRR;HϞ=ˍxv]" ЁGć@5`gcNo;*pL}ugw D4;ѵ/Vp IICT|]RyZU^\~Yo7GΙU!ЃNq֍P^S!d{X>ϳñς)s!|SI56lb] =v۱j=hba$13^mFwZQ$=r~7u|mx9{7OR&{whBg;Yf#,7Z+kOYc;V͹50ykF'j5XVQPO,Q{:)j@2HI$1秮pSG-x={y {ٱ*0]y np|O\ɩ s71+q+er>,۟ =7l6?k59*4У6j_/y$>jziI{Qp 4٫@GmI\1oO UzL1mO3q[s:B{ ]m1ޭQkZ{c!ep) zWQb) $sɉ{ ||S,^ܘ ??S"g  Yߎ^3/7@EQQ*v @gjx`!esJݻ$#F#yySQxq^i'7n`![6LY02Ÿ֣T)QIH餪0=EܲeK>XAJ8՜Z]veXnӲkk %*JGޓ*Ui׿#V:7JVɒwjeRַvQՠcK$#!I@)xtm"6mؾfYʏT!'9_Q%J/3lB?l/[CzXeJ.|k9ť@S< /Q?%}.FɌp Q(Q`8qGWey3 \:{k;쒗Gބd6aNE/Itwlb^ItS3WNʏE?sQ\@߾{rKJIVlT.?j3%V@SLJIko_ewO'H9ͩx-Gq1c׎䒉q,IÀxv|| DobB/.sYb d$n=z u-)?瓕'IՓ~o6#$< O)XYYaeuմiӦIg'' ZvkUXQ^U &:z~6Z`sK#r"%N.Ʉ­)YFٷe?9_!/˪ u7z|-YȑKdh8WKf-H0OiΝܬFn-p}{/3)|F31۠BXX,γdLGj8*rvrBC͉M;1vΐ.>K]$I|7q VeOu]VVViCaOmo7۰}GNqHϾǧ5F׺vXÎ=qhס|n{<]F3 ;B3/ҳcnx4T՛g uH:WJy֠nTg9Gh9CFx>>-^}M$:rF"G`YJ.e(M~f}%r[\R* ժUx̸86k_EbE0lezر+5}}:Qb9]B"o~`̘1%ޔKl}3`jx=+:N̴E|Xu ZMZP=Gz={ҹK޷Xm&76tH]gN7o, !k֬y=g^Uߖ q,^9y=t#,N$I*P(qu'G筙ߋ$ Ig)X_HrUL&(cxxxY텾g]JJ cǍct0Sf`a`f~d*`#0L$&&.,]nFugcaV0a2/ B%NtY6FAHHrǺ\ ,Q'}c1@jj*SN+zAQ; Cˬ֯Q&kU(Rq wꊇj]퉊8˗:m˗-J:x/Ǻf*QlZ+{7F|>c:/r)effN,ǢT*qssF?U;{,gf--`y j5Rhгw:6/Ỏ4m֜wߙAdQrrrHKK&WWWEH8_FF/&z:ҡO쇳VHf3G#ٷy={6E&nxg„2 qL&222HOO'77W+Rgg:We3Llݺ˗~ vM> 茣[QR("fQۺ֑zM6cA 4ooo#NGff&zQ"wQwa,_0L&4y@ZtLg}IFZ vGpdG8' #=j3x++ԯ__3 $Is9?$Rprr;;b/;eff?uV6mB\=+smi9>n&/54fFl:~xT#z.G& =sg1uXZYѼy ڷkKvhӦQ/f3:VKnn.Z[,k?k=bkk-n'11cǎq1c\| K+kԬwx׬MZuQ *}%)7]z/]ƕX_!-9 GG6iJh֣~v#Yף &`1.%666XYYamme璛'##sq9Ο?Ϲ牎>GlEYZZ{e*SJ^xxꆣNn8ˠӑ&%2SION"%!8nēOFzZT򪌟~R^=ׯOݺuQiyyy zpߒ]:_+++ju\?dի\v8n5\BB|>GeaN.ncec#V6Xbk=* Tjl_R z+hDuܜltZ-\- \ZtdjHM!WmԖ-4jܘZ5kPjUV/T^''V lh4b4+իQ(Tҿq J ju#w>_". @JJ )))$''Tlrrrh4d䐝}xFFFJEk0{F -fB agg..  Qwww*UDRR۷Zji}-]Ç駟2ydL&S#//I0Lf$I*xl6}]h EsJBJBR=-,, ~]*&&&6lȦMwHvZ ;Dr'O$ -[a9 KXXzb̝;W8SE\DFFH`` V*\DD 8 멷BE\F߿]ҧO/^,~*H޽?vPNb /vZV^ͨQ&WΘ1cXd(rD˘m۶/o0gP2&((˗3`1cܑAxJˠ}`F SL; OAq5l05jL4IH |XH#,_ݻw*J#r`0зo_GDD͚5;/_Frd"%883gΰsNׯ/w$ACq9j ڵ5kIE\dddHRRwZjrG+:t``xyyIĬ ݝT*]t!55UHμy8r1RHqIxx8.w _eܸq߿_(B)$R $%%LvvܑʽK2f>3~mG\AOÆ ٴib1Yv-g񄄄G(DW`'O$ VZ~zTѫW/Fܹs#b+H dժUXX@8p ,w\Jlܳ>˶mW_l6\fĈB#b={2h ~7 ܑU vZ-[;#wAPA@PP˗/g8::2c # B XK߾}Y`#GƆ)SI=Q= F^^FҒI&QB6m4COfpvvfҤIxyyѲeK#:7oޤM6L&{9e 4a4 cǎΎ!Ch4t D#qbքPSNeɌ1?󞏧3`dHWΝ˒%Ky>33.]Bxx8+W!PH_~%FCbggG=HLLߟeNZt={6ׯ_'//^{ uǓ={Ǯ]^AAB0믿.YYYI[nիKjZR*RƍX/_.T*iرB{IKAAAt# 8N(4СCY~=b4 >}v:v(c¢} IDATӢE ֭ˊ+5k'O]v:u;vФI# (bᱜ9sNwW [XXбcGBCCeLW4oN`` u/;PD v:tFyǏbnDDDe1bos8X'JTT:u";;tej5!!!iPV>}P6mtZfȐ!F͛'& 1"ŋ4oޜlXXXpUK(]9r$>}#ޔ @DD'O櫯!Pވ΅G]6t W(|%Hdz|r&NxO GQF N>MHHTX8wxTU& !4A+ JAʪர`HYD]A,D.Mz 63&H|_3͜o&9sׯd2WHJJ aaa:Tǽ,\53gӦMh4 /b4mNxxcǎF ܌F6sLK,\-<<\:uiM6MЬV6qD-;;[*#bqM3f 񥎐cbbHJJl6}.\ 11$INN&33,233̤rss=!!!.`Zn4lؐ8ׯOxԙ3gꫯraVZԩSl1W^y~ b;={`0PUEѶm[8Ç=?~^5*BݺuiѢ-[EjՊ&Mxp89r$?<U^y$Ewe\pxũnWb0 8<`CB=gA(ʥeG"Ga!Yd(,,WufuƄ B\ b񇩪޽{ٰaׯg۶m [tzԿ [U/k¬Q\ ?;4Sq\"i)HOI&%4Npcѭ[7wN.]nQx bQ.Yz5.\(s?L-CSZ~ 4'I3C˩n7I =zӇp|N27L{ߟ>}`Zb$U9r~!| ɗo;46nhu yS#vncߒr}, ]vO>W<$n֮]{Ƿ~{Yuuqg>Z*I'ܱWݱ >Ç{홄z )**bL2REDעrO>4MVE_ޔf3 b\wu:V)p\,\ɓ'svEQ=|IѨcK={uK>Kl7 2qѠA+FXcz)fq_AW5A'Ga!?]K铞;ѣGcXtPx .''1c0o<l&3/U[>vO79g{͙7o:tб: $k{裏r)϶ɓPn}+NnKxN0aƍIPcԨQSkխWsG:W.vg[׮]Yd :V&+ h+֋9P9{":yqӦMoWΕF4^zz 4籯'uw$b'8s 5bƍ4jHDu"A\kL8?6tֹ*WX[صk*-<$k/|UU bGhu.[r%Z=.Ga!Sz=?\2tn0n p9(oͭ>x3mÐGgX.zQ ^M[aNsU ƍGNΥϼHn*q[L#w26w2үk?- N:D#AǢE}|ExN/!=۾ ccl7NDu}dQ.g17t̫Mս7u8&Xx1iii:W$&A\.}u5*!M9rCsCDZ/Wl(>|VN{WG@ h8SwL'dhǿm /Ǎ`#бl>c/W`i̝4%SF1G:RW1`SlŠ9ɏ0e;fΊ&=\]^E؇m޼ |QӾ/l}=sysٝ_<;y(#M9y=Z6ϖD}p}Ќ MøO>CYW6vNØ9f/9 v0d[t`Ϸ@+y5{uY?Fא a'N|]QSʢu^Ν:vƌC V~X: k]z)DF[?qt5}pЂhY ĸO?%kөg=ƴvj}~ Gv_;=Gra;SX|rXd4q7\8~x%$ts~OTlJkG %n}i6s7܇y~Ck0(@P )kThdo'l4힖ҹo "eU=inoa0UsqDW!JT$=DRRR7&5 bV?Gp҅?ML^Z '=i/=M Gt;7 `l@1FW+|BN]\Iׄ|VՖ+^)̬).7 &5uSufer1I%t*Xs*RSͬZ_޽kq~Xt+B*QQQ5+Ą*kWK9قt}3Ȧa{S4n~< #[$#?Vb_-mkVxл2ũ c\<(nvo 5t5}Lr%wQխ[yF;Hn}U -0@+ȦMj筂'gHK (8CϞOgLE8HP*?M,ʘg'l^*=lU<#c6O3~|-YIV tk{&KSҷCG:==3`i*+OΪ+{5qҤI.BT(VZŋD!1׀GL)[ٲKc۷v_g„ |?W8?$B*Ouh1W:5 y<DB?ޜV\ZIBXHpƏVCpUy|J6\ @ƍyꩧtHT5#뮻8g+N>1#0L|2mM5lf͚5ԩSF#U0B]NtJT5DڵY|9[sƾ1Q)$7L_Թ*QȬfՋ\nkLm+M-`Ŀ{Ȃ 0ɺТ h۶mՋK5GE[sK\Q23|Z#F`֬Y AT&A\C%&&ү_?oem;w7fUy7Mn' dϴi9rՉJs8K{0m4=e?[S'rh6϶&Mb Znce l޼~CyYxh3W ߑpz_f~iLBPPNXt:1cSN=V DԊѱEuٹq=}-ߗ عsgfϞMuPx bQJZZӧOgܹl6vL1njsbr֬`ò+u_vxҥN o%A,ʔ̙3裏*u_xT4{<=;Pʼ.egjUWQ:uѣ޽U o&A,~ngŊ۷C#"inn#wUӇ[pK€xgiժN _!A,ڮ]X|9+V(uab@f7;hzm4ks; ӡ),ݳc{;^b<PEĢTU%>>+W~zN8q}ìQ4l~#q74#I3nhFQ_vxǎt(g!QΝ:.Ztܙ޽{ӧOVkW-j bqN>͆ ذa[n%--w'08:U/֪MH+aᄄG3}?0je\TUE4 rs(r:"/'ҿd_$-i)縐thꕮvwlost֍ iI w mٹs'G)5P rM7Ѷm[ڶmmYI"A,*9sCq!?Nbb"$%%pTcbb#..FѢE ZlIͥWT BW 33snS6O>n6mڠ( 0, VJDDV(4h(9 bUÙ1cÆ ӻ!*': BI !$BgB3 bUF%W>G !X!t&A,: BI !$BgB3 bUy߯wBT( bU&MΝ;.C %A,: BI !$BgB3 b!Й*EBLBgB3 b!ЙBLX!t&A,: ^E#ȤwB\ɡC?#GrJm 0rBʂ ~w?ȹs]vT%Dœ#bQm 0 _Ӹq26lFQBXx5 bQmw}İsNvzlݺsPGDe4yGYdI\z*Ijm9r̙˖-gϞPGXTkm۶QF,[/i& SeBT bQ)BYl%',[@zcuBT bQ 0Dnٶl2~atL!A,VZѲeKOӧٹstK!A,€XbEEE,](twYBT b Dzz:7ndI 8 Ѯ];\.v">>]B0`v"..v]F>ۉjA4n7v4 UUK߽{wF#<yyy G(`(u3z(B4. Ӊ墨MQQ.s+ݫ!;СWP6LfF#ffXPZ~\!0#vcq88NVU2䊃iݺum뗾dbk iQ$Us8l6vv+*(b! (5"ĵ eRU =s[esPPaXtPx+ b\ N~~>l6O*R#j gHpp0AAA`6uNx LUU!//s+/?d0FXX-$A\hFnn.999{BW^Ϗ0%e$kAvv6:j.~ iCzz:v] EQZDFFJr 'A4M#;;/r$|P **J-j( bMjj*.KRD9rxx8111\ #A# IIIn+䘘"##匾BiFjj*z"*b!..KL؋9Nz"*(bZ.ET" b/GRRRV.IQBBB_tU(rssILL.'w Eٳg$Lnnn r9kzsһ爻۬LZ>f顷[rhFAA >H؋8OWy؝ZZ 0Y죱LnŜL>ojKY*Wq_pUK&+z MHJJvjN_IV9UbBĽ/JorC 3 eD޼i9s?/T~/3䡉|u>A{eЫKt7;3-L;n 9j]֭Zsss1Ë]G?xգkGVW31=Z^jZ<ѥ8Vt3V7^sO:\.z!*\ifdQxv _8CgN["b<} X>w ܿrL{;Iy/]Jhsz {7_]Y!YF͘HX(Y Rdq3/wK/?V%FIFOC ]=,Y-Ͻ?C/dhf/wTߑ(>D l6*D\w+vhS&4 X=AkQƇycGHR/wڑQ|x!oϙͻG;3GL^DΜKsDGo&dS7r0Eټnv nP?cXzu`j@c SI4B2Xo \V(Dh@`+W协63|sĖ! IScȱ;-ﺫ>&?,ZM@~.yFtI|A>7WJp9 |rd M ATU.dza>V<SV7q7W%Ԡ ?[WQgP׈F ]^&,ob@+3H9ρӊp)KY:qg3eoOl;7]v!joǰoV&;Xɟ3nJXZ"9z É*¥]D,kZ6/NNmR*( UڦRr25Ѭi$?;H .l}o'88v~ϦH?)ECO_y5{3KJmk@JS YKĖIDATwaÆz!*t0y`"##*[Ur~7/( z]@rDE4Mɓ8u _T Ȃ?>F뼈(4j⥳(ĵArD\.OfuSpu%""B2D% R.$ :b t5%AҸx"EQ#.."k2 b`HNNA<( QV-e< Ʌ ^8CBBDǸ\.ʒ@2\KGn222HOO#@eZZ &An7dddt:=B?}XVtHMldee֙F?Vp 9J\"A\J^^i)WFhh(~~~z%! N4<7%?oHHH2A. bQᠠ|=}i-dBPPAAA`` to select only a specific place (even if it has no custom tags). .. code-block:: bash $ labgrid-client reserve board=imx6-foo Reservation 'SP37P5OQRU': owner: rettich/jlu token: SP37P5OQRU state: waiting filters: main: board=imx6-foo created: 2019-08-06 12:56:49.779982 timeout: 2019-08-06 12:57:49.779983 As soon as any matching place becomes free, the reservation state will change from ``waiting`` to ``allocated``. Then, you can use the reservation token prefixed by ``+`` to refer to the allocated place for locking and usage. While a place is allocated for a reservation, only the owner of the reservation can lock that place. .. code-block:: bash $ labgrid-client wait SP37P5OQRU owner: rettich/jlu token: SP37P5OQRU state: waiting filters: main: board=imx6-foo created: 2019-08-06 12:56:49.779982 timeout: 2019-08-06 12:58:14.900621 … owner: rettich/jlu token: SP37P5OQRU state: allocated filters: main: board=imx6-foo allocations: main: board-2 created: 2019-08-06 12:56:49.779982 timeout: 2019-08-06 12:58:46.145851 $ labgrid-client -p +SP37P5OQRU lock acquired place board-2 $ labgrid-client reservations Reservation 'SP37P5OQRU': owner: rettich/jlu token: SP37P5OQRU state: acquired filters: main: board=imx6-foo allocations: main: board-2 created: 2019-08-06 12:56:49.779982 timeout: 2019-08-06 12:59:11.840780 $ labgrid-client -p +SP37P5OQRU console When using reservation in a CI job or to save some typing, the ``labgrid-client reserve`` command supports a ``--shell`` command to print code for evaluating in the shell. This sets the ``LG_TOKEN`` environment variable, which is then automatically used by ``wait`` and expanded via ``-p +``. .. code-block:: bash $ eval `labgrid-client reserve --shell board=imx6-foo` $ echo $LG_TOKEN ZDMZJZNLBF $ labgrid-client wait owner: rettich/jlu token: ZDMZJZNLBF state: waiting filters: main: board=imx6-foo created: 2019-08-06 13:05:30.987072 timeout: 2019-08-06 13:06:44.629736 … owner: rettich/jlu token: ZDMZJZNLBF state: allocated filters: main: board=imx6-foo allocations: main: board-1 created: 2019-08-06 13:05:30.987072 timeout: 2019-08-06 13:06:56.196684 $ labgrid-client -p + lock acquired place board-1 $ labgrid-client -p + show Place 'board-1': tags: bar=baz, board=imx6-foo, jlu=2, rcz=1 matches: rettich/Testport1/NetworkSerialPort acquired: rettich/jlu acquired resources: created: 2019-07-29 16:11:52.006269 changed: 2019-08-06 13:06:09.667682 reservation: ZDMZJZNLBF Finally, to avoid calling the ``wait`` command explicitly, you can add ``--wait`` to the ``reserve`` command, so it waits until the reservation is allocated before returning. A reservation will time out after a short time, if it is neither refreshed nor used by locked places. Library ------- labgrid can be used directly as a Python library, without the infrastructure provided by the pytest plugin. The labgrid library provides two ways to configure targets with resources and drivers: either create the :any:`Target` directly or use :any:`Environment` to load a configuration file. .. note:: On exit of your script/application, labgrid will call ``cleanup()`` on the targets using the python atexit module. Targets ~~~~~~~ .. note:: In most cases it is easier to :ref:`use a complete environment from a YAML file ` instead of manually creating and activating objects. Nevertheless, we explain this in the following to clarify the underlying concepts, and how to work with targets on a lower level, e.g. in strategies. At the lower level, a :any:`Target` can be created directly:: >>> from labgrid import Target >>> t = Target('example') Next, any required :any:`Resource` objects can be created, which each represent a piece of hardware to be used with labgrid:: >>> from labgrid.resource import RawSerialPort >>> rsp = RawSerialPort(t, name=None, port='/dev/ttyUSB0') .. note:: Since we support multiple drivers of the same type, resources and drivers have a required ``name`` attribute. If you don't use multiple drivers of the same type, you can set the name to ``None``. Further on, a :any:`Driver` encapsulates logic how to work with resources. Drivers need to be created on the :any:`Target`:: >>> from labgrid.driver import SerialDriver >>> sd = SerialDriver(t, name=None) As the :any:`SerialDriver` declares a binding to a :any:`SerialPort`, the target binds it to the resource object created above:: >>> sd.port RawSerialPort(target=Target(name='example', env=None), name=None, state=, avail=True, port='/dev/ttyUSB0', speed=115200) >>> sd.port is rsp True Driver Activation ^^^^^^^^^^^^^^^^^ Before a bound driver can be used, it needs to be activated. During activation, the driver makes sure that all hardware represented by the resources it is bound to can be used, and, if necessary, it acquires the underlying hardware on the OS level. For example, activating a :any:`SerialDriver` makes sure that the hardware represented by its bound :any:`RawSerialPort` object (e.g. something like ``/dev/ttyUSB0``) is available, and that it can only be used labgrid and not by other applications while the :any:`SerialDriver` is activated. If we use a car analogy here, binding is the process of screwing the car parts together, and activation is igniting the engine. After activation, we can use the driver to do our work:: >>> t.activate(sd) >>> sd.write(b'test') If an underlying hardware resource is not available (or not available after a certain timeout, depending on the driver), the activation step will raise an exception, e.g.:: >>> t.activate(sd) Traceback (most recent call last): File "/usr/lib/python3.8/site-packages/serial/serialposix.py", line 288, in open self.fd = os.open(self.portstr, os.O_RDWR | os.O_NOCTTY | os.O_NONBLOCK) FileNotFoundError: [Errno 2] No such file or directory: '/dev/ttyUSB0' Active drivers can be accessed by class (any :any:`Driver ` or :any:`Protocol `) using some syntactic sugar:: >>> target = Target('main') >>> console = FakeConsoleDriver(target, 'console') >>> target.activate(console) >>> target[FakeConsoleDriver] FakeConsoleDriver(target=Target(name='main', …), name='console', …) >>> target[FakeConsoleDriver, 'console'] FakeConsoleDriver(target=Target(name='main', …), name='console', …) Driver Deactivation ^^^^^^^^^^^^^^^^^^^ Driver deactivation works in a similar manner:: >>> t.deactivate(sd) Drivers need to be deactivated in the following cases: * Some drivers have internal logic depending on the state of the target. For example, the :any:`ShellDriver` remembers whether it has already logged in to the shell. If the target reboots, e.g. through a hardware watchdog timeout, a power cycle, or by issuing a ``reboot`` command on the shell, the ShellDriver's internal state becomes outdated, and the ShellDriver needs to be deactivated and re-activated. * One of the driver's bound resources is required by another driver which is to be activated. For example, the :any:`ShellDriver` and the :any:`BareboxDriver` both require access to a :any:`SerialPort` resource. If both drivers are bound to the same resource object, labgrid will automatically deactivate the BareboxDriver when activating the ShellDriver. Target Cleanup ^^^^^^^^^^^^^^ After you are done with the target, optionally call the cleanup method on your target. While labgrid registers an ``atexit`` handler to cleanup targets, this has the advantage that exceptions can be handled by your application:: >>> try: >>> target.cleanup() >>> except Exception as e: >>> .. _usage_environments: Environments ~~~~~~~~~~~~ In practice, it is often useful to separate the `Target` configuration from the code which needs to control the board (such as a test case or installation script). For this use-case, labgrid can construct targets from a configuration file in YAML format: .. code-block:: yaml targets: example: resources: RawSerialPort: port: '/dev/ttyUSB0' drivers: SerialDriver: {} To parse this configuration file, use the :any:`Environment` class:: >>> from labgrid import Environment >>> env = Environment('example-env.yaml') Using :any:`Environment.get_target`, the configured `Targets` can be retrieved by name. Without an argument, `get_target` would default to 'main':: >>> t = env.get_target('example') To access the target's console, the correct driver object can be found by using :any:`Target.get_driver`:: >>> cp = t.get_driver('ConsoleProtocol') >>> cp SerialDriver(target=Target(name='example', env=Environment(config_file='example.yaml')), name=None, state=, txdelay=0.0) >>> cp.write(b'test') When using the ``get_driver`` method, the driver is automatically activated. The driver activation will also wait for unavailable resources when needed. For more information on the environment configuration files and the usage of multiple drivers, see :ref:`configuration:Environment Configuration`. pytest Plugin ------------- labgrid includes a `pytest `_ plugin to simplify writing tests which involve embedded boards. The plugin is configured by providing an environment config file (via the --lg-env pytest option, or the LG_ENV environment variable) and automatically creates the targets described in the environment. These `pytest fixtures `_ are provided: env (session scope) Used to access the :any:`Environment` object created from the configuration file. This is mostly used for defining custom fixtures at the test suite level. target (session scope) Used to access the 'main' :any:`Target` defined in the configuration file. strategy (session scope) Used to access the :any:`Strategy` configured in the 'main' :any:`Target`. Command-Line Options ~~~~~~~~~~~~~~~~~~~~ The pytest plugin also supports the verbosity argument of pytest: - ``-vv``: activates the step reporting feature, showing function parameters and/or results - ``-vvv``: activates debug logging This allows debugging during the writing of tests and inspection during test runs. Other labgrid-related pytest plugin options are: ``--lg-env=LG_ENV`` (was ``--env-config=ENV_CONFIG``) Specify a labgrid environment config file. This is equivalent to labgrid-client's ``-c``/``--config``. ``--lg-coordinator=CROSSBAR_URL`` Specify labgrid coordinator websocket URL. Defaults to ``ws://127.0.0.1:20408/ws``. This is equivalent to labgrid-client's ``-x``/``--crossbar``. ``--lg-log=[path to logfiles]`` Path to store console log file. If option is specified without path the current working directory is used. ``--lg-colored-steps`` Enables the ColoredStepReporter. Different events have different colors. The more colorful, the more important. In order to make less important output "blend into the background" different color schemes are available. See :ref:`LG_COLOR_SCHEME `. ``pytest --help`` shows these options in a separate *labgrid* section. Environment Variables ~~~~~~~~~~~~~~~~~~~~~ LG_ENV ^^^^^^ Behaves like ``LG_ENV`` for :doc:`labgrid-client `. .. _usage-lgcolorscheme: LG_COLOR_SCHEME ^^^^^^^^^^^^^^^ Influences the color scheme used for the Colored Step Reporter. ``dark`` is meant for dark terminal background. ``light`` is optimized for light terminal background. ``dark-256color`` and ``light-256color`` are respective variants for terminals that support 256 colors. By default, ``dark`` or ``dark-256color`` (depending on the terminal) are used. Takes effect only when used with ``--lg-colored-steps``. LG_PROXY ^^^^^^^^ Specifies a SSH proxy host to be used for port forwards to access the coordinator. Network resources made available by the exporter will prefer their own proxy, and only fallback to LG_PROXY. See also :ref:`overview-proxy-mechanism`. Simple Example ~~~~~~~~~~~~~~ As a minimal example, we have a target connected via a USB serial converter ('/dev/ttyUSB0') and booted to the Linux shell. The following environment config file (``shell-example.yaml``) describes how to access this board: .. code-block:: yaml targets: main: resources: RawSerialPort: port: '/dev/ttyUSB0' drivers: SerialDriver: {} ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' username: 'root' We then add the following test in a file called ``test_example.py``:: def test_echo(target): command = target.get_driver('CommandProtocol') result = command.run_check('echo OK') assert 'OK' in result To run this test, we simply execute pytest in the same directory with the environment config: .. code-block:: bash $ pytest --lg-env shell-example.yaml --verbose ============================= test session starts ============================== platform linux -- Python 3.5.3, pytest-3.0.6, py-1.4.32, pluggy-0.4.0 … collected 1 items test_example.py::test_echo PASSED =========================== 1 passed in 0.51 seconds =========================== pytest has automatically found the test case and executed it on the target. Custom Fixture Example ~~~~~~~~~~~~~~~~~~~~~~ When writing many test cases which use the same driver, we can get rid of some common code by wrapping the `CommandProtocol` in a fixture. As pytest always executes the ``conftest.py`` file in the test suite directory, we can define additional fixtures there:: import pytest @pytest.fixture(scope='session') def command(target): return target.get_driver('CommandProtocol') With this fixture, we can simplify the ``test_example.py`` file to:: def test_echo(command): result = command.run_check('echo OK') assert 'OK' in result Strategy Fixture Example ~~~~~~~~~~~~~~~~~~~~~~~~ When using a :any:`Strategy` to transition the target between states, it is useful to define a function scope fixture per state in ``conftest.py``:: import pytest @pytest.fixture(scope='function') def switch_off(target, strategy, capsys): with capsys.disabled(): strategy.transition('off') @pytest.fixture(scope='function') def bootloader_command(target, strategy, capsys): with capsys.disabled(): strategy.transition('barebox') return target.get_active_driver('CommandProtocol') @pytest.fixture(scope='function') def shell_command(target, strategy, capsys): with capsys.disabled(): strategy.transition('shell') return target.get_active_driver('CommandProtocol') .. note:: The ``capsys.disabled()`` context manager is only needed when using the :any:`ManualPowerDriver`, as it will not be able to access the console otherwise. See the corresponding `pytest documentation for details `_. With the fixtures defined above, switching between bootloader and Linux shells is easy:: def test_barebox_initial(bootloader_command): stdout = bootloader_command.run_check('version') assert 'barebox' in '\n'.join(stdout) def test_shell(shell_command): stdout = shell_command.run_check('cat /proc/version') assert 'Linux' in stdout[0] def test_barebox_after_reboot(bootloader_command): bootloader_command.run_check('true') .. note:: The `bootloader_command` and `shell_command` fixtures use :any:`Target.get_active_driver` to get the currently active `CommandProtocol` driver (either :any:`BareboxDriver` or :any:`ShellDriver`). Activation and deactivation of drivers is handled by the :any:`BareboxStrategy` in this example. The `Strategy` needs additional drivers to control the target. Adapt the following environment config file (``strategy-example.yaml``) to your setup: .. code-block:: yaml targets: main: resources: RawSerialPort: port: '/dev/ttyUSB0' drivers: ManualPowerDriver: name: 'example-board' SerialDriver: {} BareboxDriver: prompt: 'barebox@[^:]+:[^ ]+ ' ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' username: 'root' BareboxStrategy: {} For this example, you should get a report similar to this: .. code-block:: bash $ pytest --lg-env strategy-example.yaml -v ============================= test session starts ============================== platform linux -- Python 3.5.3, pytest-3.0.6, py-1.4.32, pluggy-0.4.0 … collected 3 items test_strategy.py::test_barebox_initial main: CYCLE the target example-board and press enter PASSED test_strategy.py::test_shell PASSED test_strategy.py::test_barebox_after_reboot main: CYCLE the target example-board and press enter PASSED ========================== 3 passed in 29.77 seconds =========================== Feature Flags ~~~~~~~~~~~~~ labgrid includes support for feature flags on a global and target scope. Adding a ``@pytest.mark.lg_feature`` decorator to a test ensures it is only executed if the desired feature is available:: import pytest @pytest.mark.lg_feature("camera") def test_camera(target): [...] Here's an example environment configuration: .. code-block:: yaml targets: main: features: - camera resources: {} drivers: {} This would run the above test, however the following configuration would skip the test because of the missing feature: .. code-block:: yaml targets: main: features: - console resources: {} drivers: {} pytest will record the missing feature as the skip reason. For tests with multiple required features, pass them as a list to pytest:: import pytest @pytest.mark.lg_feature(["camera", "console"]) def test_camera(target): [...] Features do not have to be set per target, they can also be set via the global features key: .. code-block:: yaml features: - camera targets: main: features: - console resources: {} drivers: {} This YAML configuration would combine both the global and the target features. Test Reports ~~~~~~~~~~~~ pytest-html ^^^^^^^^^^^ With the `pytest-html plugin `_, the test results can be converted directly to a single-page HTML report: .. code-block:: bash $ pip install pytest-html $ pytest --lg-env shell-example.yaml --html=report.html JUnit XML ^^^^^^^^^ JUnit XML reports can be generated directly by pytest and are especially useful for use in CI systems such as `Jenkins `_ with the `JUnit Plugin `_. They can also be converted to other formats, such as HTML with `junit2html tool `_: .. code-block:: bash $ pip install junit2html $ pytest --lg-env shell-example.yaml --junit-xml=report.xml $ junit2html report.xml labgrid adds additional xml properties to a test run, these are: - ENV_CONFIG: Name of the configuration file - TARGETS: List of target names - TARGET_{NAME}_REMOTE: optional, if the target uses a RemotePlace resource, its name is recorded here - PATH_{NAME}: optional, labgrid records the name and path - PATH_{NAME}_GIT_COMMIT: optional, labgrid tries to record git sha1 values for every path - IMAGE_{NAME}: optional, labgrid records the name and path to the image - IMAGE_{NAME}_GIT_COMMIT: optional, labgrid tries to record git sha1 values for every image Command-Line ------------ labgrid contains some command line tools which are used for remote access to resources. See :doc:`man/client`, :doc:`man/device-config` and :doc:`man/exporter` for more information. labgrid-0.4.1/docker-requirements.txt000066400000000000000000000000161415016572500176540ustar00rootroot00000000000000docker==4.1.0 labgrid-0.4.1/dockerfiles/000077500000000000000000000000001415016572500154205ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/Dockerfile000066400000000000000000000061441415016572500174170ustar00rootroot00000000000000FROM debian:buster-slim AS labgrid-base LABEL maintainer="eha@deif.com" ENV DEBIAN_FRONTEND=noninteractive COPY ./ /opt/labgrid/ RUN set -e ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends python3 python3-dev python3-pip python3-setuptools python3-wheel git build-essential libsnappy-dev ;\ pip3 install -U pip;\ apt clean ;\ rm -rf /var/lib/apt/lists/* ;\ git clone git://github.com/vishnubob/wait-for-it.git opt/wait-for-it && cd opt/wait-for-it && git reset --hard 54d1f0bfeb6557adf8a3204455389d0901652242 # # Client # FROM labgrid-base AS labgrid-client ARG VERSION RUN set -e ;\ cd /opt/labgrid ;\ pip3 install yq ;\ pip3 install --no-cache-dir -r requirements.txt ;\ SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" python3 setup.py install ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends microcom openssh-client rsync jq qemu-system; \ apt clean ;\ rm -rf /var/lib/apt/lists/* CMD ["/bin/bash"] # # Coordinator # FROM labgrid-base AS labgrid-coordinator ARG VERSION ENV CROSSBAR_DIR=/opt/crossbar RUN set -e ;\ cd /opt/labgrid ;\ pip3 install --no-cache-dir -r crossbar-requirements.txt ;\ SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" python3 setup.py install VOLUME /opt/crossbar EXPOSE 20408 CMD ["crossbar", "start", "--config", "/opt/labgrid/.crossbar/config.yaml"] # # ser2net # # These have to be built from source to bring in a newer version that has a # needed bugfix. This can be removed once the images are switched to # debian:bullseye, as it has a new enough version # FROM debian:buster-slim AS ser2net RUN apt update && \ apt install --yes --no-install-recommends \ build-essential \ cmake \ python3-dev \ wget \ ca-certificates \ libsctp-dev \ libssl-dev \ pkg-config \ file \ libyaml-dev \ && \ apt clean && \ rm -rf /var/lib/apt/lists/* RUN mkdir -p /src RUN cd /src && \ wget https://downloads.sourceforge.net/project/ser2net/ser2net/gensio-2.2.4.tar.gz && \ tar -xvzf gensio-2.2.4.tar.gz && \ cd gensio-2.2.4 && \ mkdir build && \ cd build && \ ../configure --prefix=/usr && \ make && \ make install && \ make install DESTDIR=/opt/ser2net RUN cd /src && \ wget https://downloads.sourceforge.net/project/ser2net/ser2net/ser2net-4.3.3.tar.gz && \ tar -xvzf ser2net-4.3.3.tar.gz -C /src && \ cd /src/ser2net-4.3.3 && \ mkdir build && \ cd build && \ ../configure --prefix=/usr && \ make && \ make install DESTDIR=/opt/ser2net # # Exporter # FROM labgrid-base AS labgrid-exporter ARG VERSION COPY dockerfiles/exporter/entrypoint.sh /entrypoint.sh RUN set -e ;\ cd /opt/labgrid ;\ pip3 install --no-cache-dir -r requirements.txt ;\ SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" python3 setup.py install ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends \ libyaml-0-2 \ libsctp1 \ ; \ apt clean ;\ rm -rf /var/lib/apt/lists/* COPY --from=ser2net /opt/ser2net / VOLUME /opt/conf CMD ["/entrypoint.sh"] labgrid-0.4.1/dockerfiles/README.rst000066400000000000000000000071731415016572500171170ustar00rootroot00000000000000Labgrid Docker images ===================== This folder contains Dockerfile's for building Docker images for the 3 different components of a Labgrid distributed infrastructure. - **labgrid-coordinator** An image for with crossbar which can be used to run a Labgrid coordinator instance. - **labgrid-client** An image with the Labgrid client tools and pytest integration. - **labgrid-exporter** An image with the Labgrid exporter tools. Build ----- To build one of the above images, you need to run the ``docker build`` command in the root of this repository. Example showing how to build labgrid-client image: .. code-block:: bash $ docker build --target labgrid-client -t labgrid-client -f dockerfiles/Dockerfile . Using `BuildKit `_ is recommended to reduce build times. You can also choose to build all 3 images, with the included script (which also must be run from the root of this repository): .. code-block:: bash $ ./dockerfiles/build.sh Usage ----- All 3 images are to be considered base images with the required software installed. No policy or configuration is done. labgrid-coordinator usage ~~~~~~~~~~~~~~~~~~~~~~~~~ The labgrid-coordinator comes with a preconfigured Crossbar.io server. It listens to port 20408, so you probably want to publish that so you can talk to the coordinator. State is written to ``/opt/crossbar``. You might want to bind a volume to that so you can restart the service without loosing state. .. code-block:: bash $ docker run -t -p 20408:20408 -v $HOME/crossbar:/opt/crossbar labgrid-coordinator labgrid-client usage ~~~~~~~~~~~~~~~~~~~~ The labgrid-client image can be used to run ``labgrid-client`` and ``pytest`` commands. For example listing available places registered at coordinator at ws://192.168.1.42:20408/ws .. code-block:: bash $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws labgrid-client \ labgrid-client places Or running all pytest/labgrid tests at current directory: .. code-block:: bash $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws labgrid-client \ pytest labgrid-exporter usage ~~~~~~~~~~~~~~~~~~~~~~ The labgrid-exporter image runs a labgrid-exporter and optionally an ser2net service. Configuration is not included, but needs to be bind mounted to /opt/conf/exporter.yaml and /opt/conf/ser2net.conf (optional). Start it with something like: .. code-block:: bash $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws \ -v $HOME/exporter-conf:/opt/conf \ labgrid-exporter If using ser2net or if "exporting" e.g. a serial device, the devices needed must be added to Docker container (``docker run --device`` option). Moreover, if using udev this must be mounted in as well: ``docker run -v run/udev:/run/udev:ro``. Staging ------- The ``staging`` folder contains a docker compose based example setup, where the images described above are used to create a setup with the following instances - **coordinator** - **exporter** - **client** - **dut** The environment serves both to allow checking if the environment still function after changes, and can act as an example how to configure the docker images needed to run a minimal setup. To use the staging environment to conduct a smoke test first build the images as instructed below: .. code-block:: bash $ ./dockerfiles/build.sh Then use docker compose to start all services except the client: .. code-block:: bash $ cd dockerfiles/staging $ CURRENT_UID=$(id -u):$(id -g) docker-compose up -d coordinator exporter dut To run the smoke test just run the client: .. code-block:: bash $ docker-compose up client labgrid-0.4.1/dockerfiles/build.sh000077500000000000000000000004051415016572500170550ustar00rootroot00000000000000#!/bin/sh set -ex export DOCKER_BUILDKIT=1 VERSION="$(./setup.py --version | tail -1)" for t in client exporter coordinator; do docker build --build-arg VERSION="$VERSION" \ --target labgrid-${t} -t labgrid-${t} -f dockerfiles/Dockerfile . done labgrid-0.4.1/dockerfiles/exporter/000077500000000000000000000000001415016572500172705ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/exporter/entrypoint.sh000077500000000000000000000002211415016572500220350ustar00rootroot00000000000000#!/bin/sh set -e if [ -f /opt/conf/ser2net.conf ]; then ser2net -c /opt/conf/ser2net.conf fi labgrid-exporter "$@" /opt/conf/exporter.yaml labgrid-0.4.1/dockerfiles/staging/000077500000000000000000000000001415016572500170545ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/staging/client/000077500000000000000000000000001415016572500203325ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/staging/client/.ssh/000077500000000000000000000000001415016572500212055ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/staging/client/.ssh/id_rsa000066400000000000000000000034371415016572500224000ustar00rootroot00000000000000-----BEGIN OPENSSH PRIVATE KEY----- b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn NhAAAAAwEAAQAAAQEApthp1T1I7e3c0wdKXt0U3gG7PNyP9D+bOtsIbPKPJxGL2V69WOl+ D2zO1AcqjcRSSPmSI/TXyV+Pkbjt12xoJ+AJoeUfsCOMJffTolEPwfIJjTd7C4EU/z9/s3 BMNrP/RzjXyp9mQ97gvblG9ZlfA8pxBQP4M7870LCVBl2oYmu4fNAxC3BVPPXFUgPEzJao DsW7gepRE9ehT2LppvRS2zpDfwVmbRQ99EM+hGIclFiPJrpCSnYJn/MQdIZxLSdpFnQsCA kIx+M/xCfbtSFlMCb1Tkh9soLzB4EN/Z/qzt3szsYwz/tK77g7cghC7I6Zixjtx/KOoxnz AEF6aDnU9QAAA8j4D1di+A9XYgAAAAdzc2gtcnNhAAABAQCm2GnVPUjt7dzTB0pe3RTeAb s83I/0P5s62whs8o8nEYvZXr1Y6X4PbM7UByqNxFJI+ZIj9NfJX4+RuO3XbGgn4Amh5R+w I4wl99OiUQ/B8gmNN3sLgRT/P3+zcEw2s/9HONfKn2ZD3uC9uUb1mV8DynEFA/gzvzvQsJ UGXahia7h80DELcFU89cVSA8TMlqgOxbuB6lET16FPYumm9FLbOkN/BWZtFD30Qz6EYhyU WI8mukJKdgmf8xB0hnEtJ2kWdCwICQjH4z/EJ9u1IWUwJvVOSH2ygvMHgQ39n+rO3ezOxj DP+0rvuDtyCELsjpmLGO3H8o6jGfMAQXpoOdT1AAAAAwEAAQAAAQBcj5c3K8a36MwnbtX9 ht06xO8hNqPONzNhFX65Il/0prFVKFAXcYH0AMNFsawT9iRIQ9ylggHsv80gZN3eM1AdPx dY74oC59WqgrCwThRV3ncaFvt2SGhjqtCntdcySe2Hj6t3x7KuWImJ9628NxgVPEwzIh6i ZqdCgZRLcc4muwJfhkeFjyuL8V/+i9rUGjNvtxT3wYaNk1pHoqtgW6GHP5Bbby95obv8gC VD63F8CZu3o6iOtqVw4NRlIy1tJKYT10PaASzzQMzsui7Zik8GyVHricyo8zVKThYNa5cY D5XufwfeMqD+tO/R3V1ywT2HUt1jJeDomnLJEBNycVCJAAAAgQCLpduRf/R9dJIsBsvkzi DJsDLfxu43aV8b4m07OAiOauTyiNutgrD2QWSJNFa8QmnJWjIx2Fd+xQPKuYO1rktks6Qh 8LlS6Tzrt63hzlrmFpkJQBuJHMEbRpe3oUZ9SzOxCxZdfm+PWHKavIGGKMwCPuB7Fc1VBy miZNZH2BlLJgAAAIEA2+dKwjrPMVg4dp+TIxZLwwCHagxgfJrhf5eHrcdZ3cDwWwdsSbAo ykHwpoiPwbo/rP5PwLv01VNcUug7x8z0P5ARE+YvRKTO29VI3oQQjkqLlBFq3Nxd+QwBRe ZLhLzW+99gvqOuzEOxUvgPKUK2hfYFXm1i1ZRS3ArW/QGc/l8AAACBAMI7iA8SQvqcXVoT dAQMWwlC8wtLuZSaICsFAYfBTjQF0dFoWa4BBtgvdLsFCmDFJjiSSlAVVUfnqPCRaA+uOo v4VKXqspUytm7LNQi9skYKNEBD7EIdTXmkDZ1lveYYEEcW4D10bJudlELoFl4sJEJUydlM mNkFWN9sBi3KZMUrAAAAD3JldnNiZWNoQHVidW50dQECAw== -----END OPENSSH PRIVATE KEY----- labgrid-0.4.1/dockerfiles/staging/client/simple-test/000077500000000000000000000000001415016572500226005ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/staging/client/simple-test/remote.yaml000066400000000000000000000002051415016572500247540ustar00rootroot00000000000000targets: main: resources: RemotePlace: name: example-place drivers: - SSHDriver: keyfile: "" labgrid-0.4.1/dockerfiles/staging/client/simple-test/remote_shell_test.py000066400000000000000000000003261415016572500266740ustar00rootroot00000000000000def test_shell(target): ssh_driver = target.get_driver('SSHDriver') target.activate(ssh_driver) stdout, stderr, returncode = ssh_driver.run('uname -r') assert stdout print(f'Kernel {stdout}') labgrid-0.4.1/dockerfiles/staging/crossbar/000077500000000000000000000000001415016572500206725ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/staging/crossbar/places_example.yaml000066400000000000000000000004411415016572500245370ustar00rootroot00000000000000example-place: acquired: null acquired_resources: [] aliases: [] allowed: [] changed: 1575386386.3115733 comment: '' created: 1575376664.3120446 matches: - cls: '*' exporter: '*' group: example-group name: null rename: null reservation: null tags: {} labgrid-0.4.1/dockerfiles/staging/docker-compose.yml000066400000000000000000000033201415016572500225070ustar00rootroot00000000000000version: '3.3' services: coordinator: image: "labgrid-coordinator" volumes: - "./crossbar:/home/root/crossbar" tty: true network_mode: "host" command: bash -c "cp /home/root/crossbar/places_example.yaml /opt/crossbar/places.yaml && crossbar start --config /opt/labgrid/.crossbar/config.yaml" client: image: "labgrid-client" volumes: - "./client/simple-test:/simple-test" - "./client/.ssh:/root/.ssh" tty: true stdin_open: true network_mode: "host" tmpfs: "/tmp" # Use wait-for-it to ensure exporter service is up, as exporter is assuming exporter to # Use labgrid-client r to ensure the exporter has populated the resource list in the coordinator # Use sleep to fix the problem that sometimes the coordinator is not ready even though the service is up command: bash -c "set -e && cd /simple-test && /opt/wait-for-it/wait-for-it.sh 127.0.0.1:20408 && sleep 5 && while [ -z $$(/usr/local/bin/labgrid-client r) ]; do echo 'Wait one sec on coordinator' && sleep 1; done && /usr/local/bin/labgrid-client -p example-place lock && /usr/local/bin/pytest --lg-env remote.yaml -s -vv && /usr/local/bin/labgrid-client -p example-place unlock" depends_on: - coordinator - exporter - dut exporter: image: "labgrid-exporter" volumes: - "./exporter-conf:/opt/conf" - "/run/udev:/run/udev:ro" depends_on: - coordinator tty: true network_mode: "host" stdin_open: true command: bash -c "set -e && /opt/wait-for-it/wait-for-it.sh 127.0.0.1:20408 -- labgrid-exporter /opt/conf/exporter.yaml" dut: build: context: "./dut" network_mode: "host" labgrid-0.4.1/dockerfiles/staging/dut/000077500000000000000000000000001415016572500176505ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/staging/dut/Dockerfile000066400000000000000000000015631415016572500216470ustar00rootroot00000000000000FROM debian:buster-slim MAINTAINER "Kasper Revsbech" ENV DEBIAN_FRONTEND=noninteractive RUN set -e ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends openssh-server;\ apt clean ;\ rm -rf /var/lib/apt/lists/* ;\ mkdir /var/run/sshd ;\ echo 'root:PASSWORD' | chpasswd ;\ echo 'PermitRootLogin yes' >> /etc/ssh/sshd_config ;\ sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd # SSH login fix. Otherwise user is kicked off after login COPY [--chown=root:root] ./authorized_keys /root/.ssh/authorized_keys # As sshd scrubs ENV variables if they are set by the ENV varibale ensure to put the into /etc/profile as shown below ENV NOTVISIBLE "in users profile" RUN echo "export VISIBLE=now" >> /etc/profile EXPOSE 2222 CMD ["/usr/sbin/sshd", "-D", "-p", "2222"] labgrid-0.4.1/dockerfiles/staging/dut/authorized_keys000066400000000000000000000005751415016572500230130ustar00rootroot00000000000000ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCm2GnVPUjt7dzTB0pe3RTeAbs83I/0P5s62whs8o8nEYvZXr1Y6X4PbM7UByqNxFJI+ZIj9NfJX4+RuO3XbGgn4Amh5R+wI4wl99OiUQ/B8gmNN3sLgRT/P3+zcEw2s/9HONfKn2ZD3uC9uUb1mV8DynEFA/gzvzvQsJUGXahia7h80DELcFU89cVSA8TMlqgOxbuB6lET16FPYumm9FLbOkN/BWZtFD30Qz6EYhyUWI8mukJKdgmf8xB0hnEtJ2kWdCwICQjH4z/EJ9u1IWUwJvVOSH2ygvMHgQ39n+rO3ezOxjDP+0rvuDtyCELsjpmLGO3H8o6jGfMAQXpoOdT1 labgrid-0.4.1/dockerfiles/staging/exporter-conf/000077500000000000000000000000001415016572500216475ustar00rootroot00000000000000labgrid-0.4.1/dockerfiles/staging/exporter-conf/exporter.yaml000066400000000000000000000001671415016572500244070ustar00rootroot00000000000000example-group: location: docker-exporter NetworkService: address: 127.0.0.1 port: 2222 username: root labgrid-0.4.1/examples/000077500000000000000000000000001415016572500147445ustar00rootroot00000000000000labgrid-0.4.1/examples/barebox/000077500000000000000000000000001415016572500163665ustar00rootroot00000000000000labgrid-0.4.1/examples/barebox/conftest.py000066400000000000000000000002511415016572500205630ustar00rootroot00000000000000import pytest @pytest.fixture(scope='session') def command(target): barebox = target.get_driver('CommandProtocol') target.activate(barebox) return barebox labgrid-0.4.1/examples/barebox/local-usb.yaml000066400000000000000000000003751415016572500211400ustar00rootroot00000000000000targets: main: resources: USBSerialPort: match: ID_SERIAL_SHORT: 'P-00-01084' drivers: ManualPowerDriver: name: "example" SerialDriver: {} BareboxDriver: prompt: 'barebox@[^:]+:[^ ]+ ' labgrid-0.4.1/examples/barebox/local.yaml000066400000000000000000000003431415016572500203440ustar00rootroot00000000000000targets: main: resources: RawSerialPort: port: "/dev/ttyUSB0" drivers: ManualPowerDriver: name: "example" SerialDriver: {} BareboxDriver: prompt: 'barebox@[^:]+:[^ ]+ ' labgrid-0.4.1/examples/barebox/test_barebox.py000066400000000000000000000004761415016572500214300ustar00rootroot00000000000000def test_barebox(command): stdout, stderr, returncode = command.run('version') assert returncode == 0 assert stdout assert not stderr assert 'barebox' in '\n'.join(stdout) stdout, stderr, returncode = command.run('false') assert returncode == 1 assert not stdout assert not stderr labgrid-0.4.1/examples/barebox/test_bootchooser.py000066400000000000000000000005011415016572500223210ustar00rootroot00000000000000import pytest def test_bootchooser(command): stdout, stderr, returncode = command.run('bootchooser -i') if returncode == 127: pytest.skip("bootchooser command not available") assert returncode == 0 assert not stderr assert stdout[0].startswith('Good targets') assert stdout[1] != 'none' labgrid-0.4.1/examples/barebox/test_sleep.py000066400000000000000000000011511415016572500211050ustar00rootroot00000000000000from time import monotonic from pytest import approx def test_sleep(command): # measure the round-trip-time timestamp = monotonic() stdout, stderr, returncode = command.run('true') elapsed_true = monotonic() - timestamp assert returncode == 0 assert not stdout assert not stderr timestamp = monotonic() stdout, stderr, returncode = command.run('sleep 1') elapsed_sleep = monotonic() - timestamp assert returncode == 0 assert not stdout assert not stderr assert elapsed_true < elapsed_sleep assert elapsed_sleep - elapsed_true == approx(1.0, abs=1e-2) labgrid-0.4.1/examples/barebox/test_state.py000066400000000000000000000004561415016572500211240ustar00rootroot00000000000000import pytest def test_state(command): stdout, stderr, returncode = command.run('state') if returncode == 127: pytest.skip("state command not available") assert returncode == 0 assert not stderr assert stdout[0] == 'registered state instances:' assert len(stdout) > 1 labgrid-0.4.1/examples/barebox/test_watchdog.py000066400000000000000000000006101415016572500215740ustar00rootroot00000000000000import pytest def test_watchdog(command): stdout, stderr, returncode = command.run('wd 1') if returncode == 127: pytest.skip("wd command not available") assert returncode == 0 assert not stderr assert not stdout command._await_prompt() stdout = command.run_check('echo ${global.system.reset}') assert len(stdout) == 1 assert stdout[0] == 'WDG' labgrid-0.4.1/examples/deditec-relais8/000077500000000000000000000000001415016572500177125ustar00rootroot00000000000000labgrid-0.4.1/examples/deditec-relais8/deditec.py000066400000000000000000000013061415016572500216650ustar00rootroot00000000000000import sys import labgrid import logging import time from labgrid import Environment, StepReporter from labgrid.strategy.bareboxstrategy import Status from labgrid.driver.deditecrelaisdriver import DeditecRelaisDriver # enable debug logging logging.basicConfig( level=logging.DEBUG, format='%(levelname)7s: %(message)s', stream=sys.stderr, ) # show labgrid steps on the console StepReporter() t = labgrid.Target('main') r = labgrid.resource.udev.DeditecRelais8(t, name=None, index=1) d = DeditecRelaisDriver(t, name=None) p = t.get_driver("DigitalOutputProtocol") print(t.resources) p.set(True) print(p.get()) time.sleep(2) p.set(False) print(p.get()) time.sleep(2) p.set(True) print(p.get()) labgrid-0.4.1/examples/deditec-relais8/deditec_remote.py000066400000000000000000000012131415016572500232350ustar00rootroot00000000000000import sys import labgrid import logging import time from labgrid import Environment, StepReporter from labgrid.strategy.bareboxstrategy import Status from labgrid.driver.deditecrelaisdriver import DeditecRelaisDriver # enable debug logging logging.basicConfig( level=logging.DEBUG, format='%(levelname)7s: %(message)s', stream=sys.stderr, ) # show labgrid steps on the console StepReporter() e = labgrid.Environment('import-dedicontrol.yaml') t = e.get_target() p = t.get_driver("DigitalOutputProtocol") print(t.resources) p.set(True) print(p.get()) time.sleep(2) p.set(False) print(p.get()) time.sleep(2) p.set(True) print(p.get()) labgrid-0.4.1/examples/deditec-relais8/export-didicontrol.yaml000066400000000000000000000000531415016572500244250ustar00rootroot00000000000000desk: DeditecRelais8: index: 2 labgrid-0.4.1/examples/deditec-relais8/import-dedicontrol.yaml000066400000000000000000000002421415016572500244120ustar00rootroot00000000000000targets: main: resources: RemotePlace: name: dedi drivers: DeditecRelaisDriver: {} options: crossbar_url: 'ws://labgrid:20408/ws' labgrid-0.4.1/examples/docker/000077500000000000000000000000001415016572500162135ustar00rootroot00000000000000labgrid-0.4.1/examples/docker/README.md000066400000000000000000000012671415016572500175000ustar00rootroot00000000000000# Prerequisites # To run the docker example one has to have docker-ce installed and accessible via "unix:///var/run/docker.sock" (the default). The default docker bridge network also needs to be accessible from the pytest executor since the test tries to establish an ssh connection to the container (again the default after a standard installation of docker-ce). After following steps similar to [Getting started](https://labgrid.readthedocs.io/en/latest/getting_started.html#running-your-first-test) the demo can be run with: pytest -s --lg-env env.yaml test_shell.py Successfully tested against Docker version 18.06.1-ce, build e68fc7a. But it should work with later versions as well. labgrid-0.4.1/examples/docker/conftest.py000066400000000000000000000003341415016572500204120ustar00rootroot00000000000000import pytest @pytest.fixture(scope='session') def command(target): strategy = target.get_driver('DockerStrategy') strategy.transition("shell") shell = target.get_driver('CommandProtocol') return shell labgrid-0.4.1/examples/docker/env.yaml000066400000000000000000000006711415016572500176730ustar00rootroot00000000000000targets: main: resources: - DockerDaemon: docker_daemon_url: "unix:///var/run/docker.sock" drivers: - DockerDriver: image_uri: "rastasheep/ubuntu-sshd:16.04" container_name: "ubuntu-lg-example" host_config: {"network_mode":"bridge"} network_services: [{"port":22,"username":"root","password":"root"}] - DockerStrategy: {} - SSHDriver: keyfile: "" labgrid-0.4.1/examples/docker/test_shell.py000066400000000000000000000005271415016572500207370ustar00rootroot00000000000000def test_shell(command): stdout, stderr, returncode = command.run('cat /proc/version') assert returncode == 0 assert len(stdout) > 0 assert len(stderr) == 0 assert 'Linux' in stdout[0] stdout, stderr, returncode = command.run('false') assert returncode != 0 assert len(stdout) == 0 assert len(stderr) == 0 labgrid-0.4.1/examples/library/000077500000000000000000000000001415016572500164105ustar00rootroot00000000000000labgrid-0.4.1/examples/library/phytec.yaml000066400000000000000000000005241415016572500205710ustar00rootroot00000000000000targets: main: resources: RemotePlace: name: phycore-imx6 drivers: NetworkPowerDriver: {} SerialDriver: {} BareboxDriver: prompt: 'barebox@[^:]+:[^ ]+ ' ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' username: 'root' BareboxStrategy: {} labgrid-0.4.1/examples/library/test.py000077500000000000000000000013631415016572500177470ustar00rootroot00000000000000#!/usr/bin/env python3 """Power-cycle a target until the /dev/nand0 device is missing.""" import sys import logging from labgrid import Environment, StepReporter from labgrid.strategy.bareboxstrategy import Status # enable debug logging logging.basicConfig( level=logging.DEBUG, format='%(levelname)7s: %(message)s', stream=sys.stderr, ) # show labgrid steps on the console StepReporter() def run_once(target): s = target.get_driver('BareboxStrategy') s.status = Status.unknown # force a power-cycle s.transition('barebox') cmd = target['CommandProtocol'] cmd.run_check('test -e /dev/nand0') target.deactivate(cmd) env = Environment(sys.argv[1]) target = env.get_target('main') while True: run_once(target) labgrid-0.4.1/examples/networkmanager/000077500000000000000000000000001415016572500177705ustar00rootroot00000000000000labgrid-0.4.1/examples/networkmanager/nm.env000066400000000000000000000002421415016572500211120ustar00rootroot00000000000000targets: main: resources: RemotePlace: name: nm-test drivers: NetworkInterfaceDriver: {} options: crossbar_url: 'ws://labgrid/ws' labgrid-0.4.1/examples/networkmanager/nm.py000066400000000000000000000034171415016572500207610ustar00rootroot00000000000000import logging, sys from pprint import pprint from labgrid import * # enable debug logging logging.basicConfig( level=logging.DEBUG, format='%(levelname)7s: %(message)s', stream=sys.stderr, ) # show labgrid steps on the console StepReporter() e = Environment('nm.env') t = e.get_target() d = t.get_driver('NetworkInterfaceDriver') # based on https://developer.gnome.org/NetworkManager/stable/ch01.html, but adapted to python dicts s_client = { 'connection': { 'type': "802-11-wireless", }, '802-11-wireless': { 'mode': "infrastructure", 'ssid': "local-rpi", }, '802-11-wireless-security': { 'key-mgmt': "wpa-psk", 'psk': "obMinwyurArc5", }, 'ipv4': { 'method': "auto", 'ignore-auto-dns': True, 'ignore-auto-routes': True, 'never-default': True, }, 'ipv6': { 'method': "link-local", }, } s_ap = { 'connection': { 'type': "802-11-wireless", }, '802-11-wireless': { 'mode': "ap", 'ssid': "local-rpi", }, '802-11-wireless-security': { 'key-mgmt': "wpa-psk", 'psk': "obMinwyurArc5", }, 'ipv4': { #'method': "auto", #'method': "link-local", 'method': "shared", 'addresses': ["172.16.0.2/29"], }, 'ipv6': { 'method': "link-local", }, } d.disable() d.wait_state('disconnected') print("access points after scan") pprint(d.get_access_points()) d.configure(s_ap) d.wait_state('activated') print("settings in AP mode") pprint(d.get_settings()) print("state in AP mode") pprint(d.get_state()) #d.configure(s_client) #d.wait_state('activated') #print("settings in client mode") #pprint(d.get_settings()) #print("state in client mode") #pprint(d.get_state()) labgrid-0.4.1/examples/pytest.ini000066400000000000000000000000411415016572500167700ustar00rootroot00000000000000# ignore the top-level setup.cfg labgrid-0.4.1/examples/pyvisa/000077500000000000000000000000001415016572500162575ustar00rootroot00000000000000labgrid-0.4.1/examples/pyvisa/env.yaml000066400000000000000000000002471415016572500177360ustar00rootroot00000000000000targets: main: resources: PyVISADevice: type: "TCPIP" url: "192.168.110.11" drivers: PyVISADriver: name: "PyVisa_device" labgrid-0.4.1/examples/pyvisa/pyvisa_example.py000066400000000000000000000006151415016572500216610ustar00rootroot00000000000000import pytest @pytest.fixture() def signal_generator(target): return target.get_driver('PyVISADriver').get_session() def test_with_signal_generator_example(signal_generator): signal_generator.write('*RST') # Setup channel 1 signal_generator.write('C1:BSWV WVTP,SQUARE,HLEV,5,LLEV,0,DUTY,50') # Switch on channel 1 signal_generator.write('C1:OUTP ON,LOAD,HZ,PLRT,NOR') labgrid-0.4.1/examples/remote/000077500000000000000000000000001415016572500162375ustar00rootroot00000000000000labgrid-0.4.1/examples/remote/remote.yaml000066400000000000000000000002461415016572500204200ustar00rootroot00000000000000targets: main: resources: RemotePlace: name: test drivers: SerialDriver: {} BareboxDriver: prompt: 'barebox@[^:]+:[^ ]+ ' labgrid-0.4.1/examples/remote/test_barebox.py000066400000000000000000000006151415016572500212740ustar00rootroot00000000000000def test_target(target): barebox = target.get_driver('CommandProtocol') target.activate(barebox) stdout, stderr, returncode = barebox.run('version') assert returncode == 0 assert stdout assert not stderr assert 'barebox' in '\n'.join(stdout) stdout, stderr, returncode = barebox.run('false') assert returncode == 1 assert not stdout assert not stderr labgrid-0.4.1/examples/shell/000077500000000000000000000000001415016572500160535ustar00rootroot00000000000000labgrid-0.4.1/examples/shell/conftest.py000066400000000000000000000002431415016572500202510ustar00rootroot00000000000000import pytest @pytest.fixture(scope='session') def command(target): shell = target.get_driver('CommandProtocol') target.activate(shell) return shell labgrid-0.4.1/examples/shell/local.yaml000066400000000000000000000004261415016572500200330ustar00rootroot00000000000000targets: main: resources: RawSerialPort: port: "/dev/ttyUSB0" drivers: ManualPowerDriver: name: "example" SerialDriver: {} ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' username: 'root' labgrid-0.4.1/examples/shell/test_hwclock.py000066400000000000000000000017431415016572500211230ustar00rootroot00000000000000from datetime import datetime def test_hwclock_rate(command): """Test that the hardware clock rate is not too inaccurate.""" result = command.run_check('hwclock -c | head -n 3') hw_time, sys_time, freq_offset_ppm, tick = result[-1].strip().split() assert abs(int(freq_offset_ppm)) < 1000 def test_hwclock_value(command): """Test that the hardware clock has the correct time. If the time is wrong, it is set once and tested again. """ def get_time(): result = command.run_check('hwclock --utc --show')[0].strip() return datetime.strptime(result, '%Y-%m-%d %H:%M:%S.%f+0:00') def set_time(time): time = time.strftime('%Y-%m-%d %H:%M:%S.%f+0:00') command.run_check(f'hwclock --utc --set --date "{time}"') offset = abs((get_time() - datetime.utcnow()).total_seconds()) if offset > 60: set_time(datetime.utcnow()) offset = abs((get_time() - datetime.utcnow()).total_seconds()) assert offset < 60 labgrid-0.4.1/examples/shell/test_memory.py000066400000000000000000000014351415016572500207770ustar00rootroot00000000000000import re import pytest from labgrid.driver import ExecutionError def test_memory_mbw(command): """Test memcopy bandwidth""" try: command.run_check('which mbw') except ExecutionError: pytest.skip("mbw missing") result = command.run_check('mbw -qt0 8M') result = result[-1].strip() pattern = r"AVG\s+.*Copy:\s+(?P\S+)\s+MiB/s" bw, = map(float, re.fullmatch(pattern, result).groups()) assert bw > 40 # > 40 MiB/second def test_memory_memtester_short(command): """Test RAM for errors""" try: command.run_check('which memtester') except ExecutionError: pytest.skip("memtester missing") result = command.run_check('memtester 128k 1 | tail -n 1') result = result[-1].strip() assert result == "Done." labgrid-0.4.1/examples/shell/test_rt.py000066400000000000000000000020461415016572500201130ustar00rootroot00000000000000import re import pytest from labgrid.driver import ExecutionError def test_rt_cyclictest_short(command): """Test a basic cyclictest run""" try: command.run_check('which cyclictest') except ExecutionError: pytest.skip("cyclictest missing") result = command.run_check('cyclictest -SN -D 5 -q') result = result[-1].strip() pattern = r"Min:\s+(?P\w+)\s+Act:\s+\w+\s+Avg:\s+(?P\w+)\s+Max:\s+(?P\w+)" min, avg, max = map(int, re.search(pattern, result).groups()) assert min <= avg <= max assert avg < 1e6 # avg < 1 milliseconds assert max < 10e6 # max < 10 milliseconds def test_rt_hackbench_short(command): """Test a basic hackbench run""" try: command.run_check('which hackbench') except ExecutionError: pytest.skip("hackbench missing") result = command.run_check('hackbench -f 10') result = result[-1].strip() pattern = r"Time:\s+(?P