pax_global_header00006660000000000000000000000064143751527000014516gustar00rootroot0000000000000052 comment=4abd1f3e1b5e5d128bc24e45ec9a37d61723be87 buildstream-1.6.9/000077500000000000000000000000001437515270000140465ustar00rootroot00000000000000buildstream-1.6.9/.coveragerc000066400000000000000000000006441437515270000161730ustar00rootroot00000000000000[run] concurrency = multiprocessing include = */buildstream/* omit = # Omit some internals */buildstream/_profile.py */buildstream/__main__.py */buildstream/_version.py # Omit generated code */buildstream/_protos/* */.eggs/* # Omit .tox directory */.tox/* [report] show_missing = True precision = 2 [paths] source = buildstream/ */site-packages/buildstream/ */buildstream/buildstream/ buildstream-1.6.9/.gitattributes000066400000000000000000000000451437515270000167400ustar00rootroot00000000000000buildstream/_version.py export-subst buildstream-1.6.9/.github/000077500000000000000000000000001437515270000154065ustar00rootroot00000000000000buildstream-1.6.9/.github/CODEOWNERS000066400000000000000000000003641437515270000170040ustar00rootroot00000000000000# Each line is a file pattern followed by one or more owners. # These owners will be the default owners for everything in # the repo, unless a later match takes precedence. # * @gtristan @juergbi @BenjaminSchubert @cs-shadow @abderrahim buildstream-1.6.9/.github/common.env000066400000000000000000000002761437515270000174150ustar00rootroot00000000000000# Shared common variables CI_IMAGE_VERSION=master-784208155 CI_TOXENV_MAIN=py36-nocover,py37-nocover,py38-nocover,py39-nocover,py310-nocover,py311-nocover CI_TOXENV_ALL="${CI_TOXENV_MAIN}" buildstream-1.6.9/.github/compose/000077500000000000000000000000001437515270000170535ustar00rootroot00000000000000buildstream-1.6.9/.github/compose/ci.docker-compose.yml000066400000000000000000000023051437515270000231020ustar00rootroot00000000000000version: '3.4' x-tests-template: &tests-template image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:36-${CI_IMAGE_VERSION:-latest} command: tox -vvvvv -- --color=yes --integration environment: TOXENV: ${CI_TOXENV_ALL} # Enable privileges to run the sandbox # privileged: true devices: - /dev/fuse:/dev/fuse # Mount the local directory and set the working directory # to run the tests from. # volumes: - ../..:/home/testuser/buildstream working_dir: /home/testuser/buildstream services: fedora-36: <<: *tests-template image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:36-${CI_IMAGE_VERSION:-latest} fedora-37: <<: *tests-template image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:37-${CI_IMAGE_VERSION:-latest} debian-10: <<: *tests-template image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-debian:10-${CI_IMAGE_VERSION:-latest} docs: <<: *tests-template command: tox -e docs environment: BST_FORCE_SESSION_REBUILD: 1 lint: <<: *tests-template command: tox -e lint buildstream-1.6.9/.github/run-ci.sh000077500000000000000000000030361437515270000171440ustar00rootroot00000000000000#!/bin/bash topdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" function usage () { echo "Usage: " echo " run-ci.sh [OPTIONS] [TEST NAME [TEST NAME...]]" echo echo "Runs the CI tests locally using docker" echo echo "The test names are based on the names of tests in the CI yaml files" echo echo "If no test names are specified, all tests will be run" echo echo "Options:" echo echo " -h --help Display this help message and exit" echo " " exit 1; } while : ; do case "$1" in -h|--help) usage; shift ;; *) break ;; esac done test_names="${@}" # We need to give ownership to the docker image user `testuser`, # chances are high that this will be the same UID as the primary # user on this host # user_uid="$(id -u)" user_gid="$(id -g)" if [ "${user_uid}" -ne "1000" ] || [ "${user_gid}" -ne "1000" ]; then sudo chown -R 1000:1000 "${topdir}/.." fi # runTest() # # $1 = test name # function runTest() { test_name=$1 # Run docker-compose from it's directory, because it will use # relative paths cd "${topdir}/compose" docker-compose \ --env-file ${topdir}/common.env \ --file ${topdir}/compose/ci.docker-compose.yml \ run "${test_name}" } # Lazily ensure that the script exits when a command fails # set -e if [ -z "${test_names}" ]; then runTest "lint" runTest "debian-10" runTest "fedora-36" runTest "fedora-37" else for test_name in "${test_names}"; do runTest "${test_name}" done fi buildstream-1.6.9/.github/workflows/000077500000000000000000000000001437515270000174435ustar00rootroot00000000000000buildstream-1.6.9/.github/workflows/ci.yml000066400000000000000000000034371437515270000205700ustar00rootroot00000000000000name: PR Checks # Pre-merge CI to run on push and pull_request events, even if this seems # redundant, we avoid concurrency with the below configuration. # on: push: pull_request: # Use the concurrency feature to ensure we don't run redundant workflows # concurrency: group: ${{ github.repository }}-${{ github.ref }}-${{ github.workflow }} cancel-in-progress: true jobs: tests: runs-on: ubuntu-20.04 continue-on-error: ${{ matrix.allow-failure || false }} strategy: fail-fast: false matrix: # The names here should map to a valid service defined in # "../compose/ci.docker-compose.yml" test-name: - debian-10 - fedora-36 - fedora-37 - lint steps: - name: Check out repository uses: actions/checkout@v2 # BuildStream requires tags to be able to find its version. with: fetch-depth: 0 - name: Run tests with Docker Compose run: | ${GITHUB_WORKSPACE}/.github/run-ci.sh ${{ matrix.test-name }} docs: runs-on: ubuntu-20.04 steps: - name: Check out repository uses: actions/checkout@v2 # BuildStream requires tags to be able to find its version. with: fetch-depth: 0 - name: Give `testuser` ownership of the source directory run: sudo chown -R 1000:1000 ${GITHUB_WORKSPACE} - name: Build documentation using Docker Compose run: | docker-compose \ --env-file ${GITHUB_WORKSPACE}/.github/common.env \ --file ${GITHUB_WORKSPACE}/.github/compose/ci.docker-compose.yml \ run \ docs - name: Upload artifacts uses: actions/upload-artifact@v2 with: name: docs path: doc/build/html buildstream-1.6.9/.github/workflows/release.yml000066400000000000000000000021071437515270000216060ustar00rootroot00000000000000name: Upload Release Asset on: push: tags: - '*.*.*' jobs: build: name: Upload Release Asset runs-on: ubuntu-20.04 steps: - name: Checkout code uses: actions/checkout@v2 # BuildStream requires tags to be able to find its version. with: fetch-depth: 0 - name: Give `testuser` ownership of the source directory run: sudo chown -R 1000:1000 ${GITHUB_WORKSPACE} - name: Build documentation using Docker Compose run: | docker-compose \ --env-file ${GITHUB_WORKSPACE}/.github/common.env \ --file ${GITHUB_WORKSPACE}/.github/compose/ci.docker-compose.yml \ run \ docs # Restore permissions to the current user sudo chown -R ${USER} ${GITHUB_WORKSPACE} tar -C doc/build/html -zcf docs.tgz . - name: Upload release assets run: | tag_name="${GITHUB_REF##*/}" hub release create -a "docs.tgz" -m "$tag_name" "$tag_name" env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} buildstream-1.6.9/.gitignore000066400000000000000000000011511437515270000160340ustar00rootroot00000000000000# Compiled python modules. buildstream/**/*.pyc tests/**/*.pyc # Setuptools distribution folder. /dist/ # Python egg metadata, regenerated from source files by setuptools. /*.egg-info .eggs # Some testing related things integration-cache/ tmp .coverage .coverage-reports/ .coverage.* .cache .pytest_cache/ *.bst/ .tox/ # Pycache, in case buildstream is ran directly from within the source # tree __pycache__/ # Generated version file buildstream/__version__.py #Autogenerated doc doc/source/elements/ doc/source/sources/ doc/source/modules.rst doc/source/buildstream.rst doc/source/buildstream.*.rst doc/build/ buildstream-1.6.9/.pylintrc000066400000000000000000000402171437515270000157170ustar00rootroot00000000000000[MASTER] # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code extension-pkg-whitelist=ujson # Add files or directories to the blacklist. They should be base names, not # paths. ignore=CVS,tests,doc # Add files or directories matching the regex patterns to the blacklist. The # regex matches against base names, not paths. ignore-patterns=.*_pb2.py,.*_pb2_grpc.py # Python code to execute, usually for sys.path manipulation such as # pygtk.require(). #init-hook= # Use multiple processes to speed up Pylint. jobs=1 # List of plugins (as comma separated values of python modules names) to load, # usually to register additional checkers. load-plugins= # Pickle collected data for later comparisons. persistent=yes # Specify a configuration file. #rcfile= # When enabled, pylint would attempt to guess common misconfiguration and emit # user-friendly hints instead of false-positive error messages suggestion-mode=yes # Allow loading of arbitrary C extensions. Extensions are imported into the # active Python interpreter and may run arbitrary code. unsafe-load-any-extension=no [MESSAGES CONTROL] # Only show warnings with the listed confidence levels. Leave empty to show # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED confidence= # Disable the message, report, category or checker with the given id(s). You # can either give multiple identifiers separated by comma (,) or put this # option multiple times (only on the command line, not in the configuration # file where it should appear only once).You can also use "--disable=all" to # disable everything first and then reenable specific checks. For example, if # you want to run only the similarities checker, you can use "--disable=all # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use"--disable=all --enable=classes # --disable=W" # We have two groups of disabled messages: # # 1) Messages that are of no use to us # This is either because we don't follow the convention # (missing-docstring and protected-access come to mind), or because # it's not very useful in CI (too-many-arguments, for example) # # 2) Messages that we would like to enable at some point # We introduced linting quite late into the project, so there are # some issues that just grew out of control. Resolving these would # be nice, but too much work atm. # disable=, ##################################### # Messages that are of no use to us # ##################################### consider-using-f-string, fixme, missing-docstring, no-else-return, protected-access, too-few-public-methods, too-many-arguments, too-many-boolean-expressions, too-many-branches, too-many-instance-attributes, too-many-lines, too-many-locals, too-many-nested-blocks, too-many-public-methods, too-many-statements, too-many-return-statements, too-many-ancestors, # Chained comparisons let us write confusing statements # such as "version >= 1 <= CURRENT_VERSION" and pylint # complains when we use more clear and easier to understand # statements like "version >= 1 and version <= CURRENT_VERSION" # # Disable this nonsense. chained-comparison, ####################################################### # Messages that we would like to enable at some point # ####################################################### # Overriden methods don't actually override but redefine arguments-differ, duplicate-code, # Some invalid names are alright, we should configure pylint # to accept them, and curb the others invalid-name, unused-argument, # We can probably enable this soon, it is a bit experimental # for the moment and current releases of pylint (August 2021) raise # a lot of false positives. unused-private-member, ########################################################### # Messages that report warnings which should be addressed # ########################################################### logging-format-interpolation, cyclic-import, # Enable the message, report, category or checker with the given id(s). You can # either give multiple identifier separated by comma (,) or put this option # multiple time (only on the command line, not in the configuration file where # it should appear only once). See also the "--disable" option for examples. enable=c-extension-no-member [REPORTS] # Python expression which should return a note less than 10 (10 is the highest # note). You have access to the variables errors warning, statement which # respectively contain the number of errors / warnings messages and the total # number of statements analyzed. This is used by the global evaluation report # (RP0004). evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) # Template used to display messages. This is a python new-style format string # used to format the message information. See doc for all details #msg-template= # Set the output format. Available formats are text, parseable, colorized, json # and msvs (visual studio).You can also give a reporter class, eg # mypackage.mymodule.MyReporterClass. output-format=colorized # Tells whether to display a full report or only the messages reports=no # Activate the evaluation score. score=yes [REFACTORING] # Maximum number of nested blocks for function / method body max-nested-blocks=5 # Complete name of functions that never returns. When checking for # inconsistent-return-statements if a never returning function is called then # it will be considered as an explicit return statement and no message will be # printed. never-returning-functions=optparse.Values,sys.exit [TYPECHECK] # List of decorators that produce context managers, such as # contextlib.contextmanager. Add to this list to register other decorators that # produce valid context managers. contextmanager-decorators=contextlib.contextmanager # List of members which are set dynamically and missed by pylint inference # system, and so shouldn't trigger E1101 when accessed. Python regular # expressions are accepted. generated-members=__enter__ # Tells whether missing members accessed in mixin class should be ignored. A # mixin class is detected if its name ends with "mixin" (case insensitive). ignore-mixin-members=yes # This flag controls whether pylint should warn about no-member and similar # checks whenever an opaque object is returned when inferring. The inference # can return multiple potential results while evaluating a Python object, but # some branches might not be evaluated, which results in partial inference. In # that case, it might be useful to still emit no-member and other checks for # the rest of the inferred objects. ignore-on-opaque-inference=yes # List of class names for which member attributes should not be checked (useful # for classes with dynamically set attributes). This supports the use of # qualified names. ignored-classes=optparse.Values,thread._local,_thread._local,contextlib.closing,gi.repository.GLib.GError,pathlib.PurePath # List of module names for which member attributes should not be checked # (useful for modules/projects where namespaces are manipulated during runtime # and thus existing member attributes cannot be deduced by static analysis. It # supports qualified module names, as well as Unix pattern matching. ignored-modules=pkg_resources,gi.repository,grpc,buildstream._protos.* # Show a hint with possible names when a member name was not found. The aspect # of finding the hint is based on edit distance. missing-member-hint=yes # The minimum edit distance a name should have in order to be considered a # similar match for a missing member name. missing-member-hint-distance=1 # The total number of similar names that should be taken in consideration when # showing a hint for a missing member. missing-member-max-choices=1 [BASIC] # Naming style matching correct argument names argument-naming-style=snake_case # Regular expression matching correct argument names. Overrides argument- # naming-style #argument-rgx= # Naming style matching correct attribute names attr-naming-style=snake_case # Regular expression matching correct attribute names. Overrides attr-naming- # style #attr-rgx= # Bad variable names which should always be refused, separated by a comma bad-names=foo, bar, baz, toto, tutu, tata # Naming style matching correct class attribute names class-attribute-naming-style=any # Regular expression matching correct class attribute names. Overrides class- # attribute-naming-style #class-attribute-rgx= # Naming style matching correct class names class-naming-style=PascalCase # Regular expression matching correct class names. Overrides class-naming-style #class-rgx= # Naming style matching correct constant names const-naming-style=UPPER_CASE # Regular expression matching correct constant names. Overrides const-naming- # style #const-rgx= # Minimum line length for functions/classes that require docstrings, shorter # ones are exempt. docstring-min-length=-1 # Naming style matching correct function names function-naming-style=snake_case # Regular expression matching correct function names. Overrides function- # naming-style #function-rgx= # Good variable names which should always be accepted, separated by a comma good-names=i,j,k,ex,Run,_,e,f # Include a hint for the correct naming format with invalid-name include-naming-hint=no # Naming style matching correct inline iteration names inlinevar-naming-style=any # Regular expression matching correct inline iteration names. Overrides # inlinevar-naming-style #inlinevar-rgx= # Naming style matching correct method names method-naming-style=snake_case # Regular expression matching correct method names. Overrides method-naming- # style #method-rgx= # Naming style matching correct module names module-naming-style=snake_case # Regular expression matching correct module names. Overrides module-naming- # style #module-rgx= # Colon-delimited sets of names that determine each other's naming style when # the name regexes allow several styles. name-group= # Regular expression which should only match function or class names that do # not require a docstring. no-docstring-rgx=^_ # List of decorators that produce properties, such as abc.abstractproperty. Add # to this list to register other decorators that produce valid properties. property-classes=abc.abstractproperty # Naming style matching correct variable names variable-naming-style=snake_case # Regular expression matching correct variable names. Overrides variable- # naming-style #variable-rgx= [VARIABLES] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. additional-builtins= # Tells whether unused global variables should be treated as a violation. allow-global-unused-variables=yes # List of strings which can identify a callback function by name. A callback # name must start or end with one of those strings. callbacks=cb_, _cb # A regular expression matching the name of dummy variables (i.e. expectedly # not used). dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ # Argument names that match this expression will be ignored. Default to name # with leading underscore ignored-argument-names=_.*|^ignored_|^unused_ # Tells whether we should check for unused import in __init__ files. init-import=no # List of qualified module names which can have objects that can redefine # builtins. redefining-builtins-modules=six.moves,past.builtins,future.builtins [LOGGING] # Logging modules to check that the string format arguments are in logging # function parameter format logging-modules=logging [SPELLING] # Limits count of emitted suggestions for spelling mistakes max-spelling-suggestions=4 # Spelling dictionary name. Available dictionaries: none. To make it working # install python-enchant package. spelling-dict= # List of comma separated words that should not be checked. spelling-ignore-words= # A path to a file that contains private dictionary; one word per line. spelling-private-dict-file= # Tells whether to store unknown words to indicated private dictionary in # --spelling-private-dict-file option instead of raising a message. spelling-store-unknown-words=no [MISCELLANEOUS] # List of note tags to take in consideration, separated by a comma. notes=FIXME, XXX, TODO [SIMILARITIES] # Ignore comments when computing similarities. ignore-comments=yes # Ignore docstrings when computing similarities. ignore-docstrings=yes # Ignore imports when computing similarities. ignore-imports=no # Minimum lines number of a similarity. min-similarity-lines=4 [FORMAT] # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. expected-line-ending-format= # Regexp for a line that is allowed to be longer than the limit. ignore-long-lines=^\s*(# )??$ # Number of spaces of indent required inside a hanging or continued line. indent-after-paren=4 # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 # tab). indent-string=' ' # Maximum number of characters on a single line. max-line-length=119 # Maximum number of lines in a module max-module-lines=1000 # Allow the body of a class to be on the same line as the declaration if body # contains single statement. single-line-class-stmt=no # Allow the body of an if to be on the same line as the test if there is no # else. single-line-if-stmt=no [IMPORTS] # Allow wildcard imports from modules that define __all__. allow-wildcard-with-all=no # Analyse import fallback blocks. This can be used to support both Python 2 and # 3 compatible code, which means that the block might have code that exists # only in one or another interpreter, leading to false positives when analysed. analyse-fallback-blocks=no # Deprecated modules which should not be used, separated by a comma deprecated-modules=optparse,tkinter.tix # Create a graph of external dependencies in the given file (report RP0402 must # not be disabled) ext-import-graph= # Create a graph of every (i.e. internal and external) dependencies in the # given file (report RP0402 must not be disabled) import-graph= # Create a graph of internal dependencies in the given file (report RP0402 must # not be disabled) int-import-graph= # Force import order to recognize a module as part of the standard # compatibility libraries. known-standard-library= # Force import order to recognize a module as part of a third party library. known-third-party=enchant [DESIGN] # Maximum number of arguments for function / method max-args=5 # Maximum number of attributes for a class (see R0902). max-attributes=7 # Maximum number of boolean expressions in a if statement max-bool-expr=5 # Maximum number of branch for function / method body max-branches=12 # Maximum number of locals for function / method body max-locals=15 # Maximum number of parents for a class (see R0901). max-parents=7 # Maximum number of public methods for a class (see R0904). max-public-methods=20 # Maximum number of return / yield for function / method body max-returns=6 # Maximum number of statements in function / method body max-statements=50 # Minimum number of public methods for a class (see R0903). min-public-methods=2 [CLASSES] # List of method names used to declare (i.e. assign) instance attributes. defining-attr-methods=__init__, __new__, setUp # List of member names, which should be excluded from the protected access # warning. exclude-protected=_asdict, _fields, _replace, _source, _make # List of valid names for the first argument in a class method. valid-classmethod-first-arg=cls # List of valid names for the first argument in a metaclass class method. valid-metaclass-classmethod-first-arg=mcs [EXCEPTIONS] # Exceptions that will emit a warning when being caught. Defaults to # "Exception" overgeneral-exceptions=Exception buildstream-1.6.9/BuildStream.doap000066400000000000000000000025711437515270000171330ustar00rootroot00000000000000 BuildStream Build tool for running abstract, deterministic build pipelines python3 Tristan Van Berkom tvb Jürg Billeter juergbi buildstream-1.6.9/COPYING000066400000000000000000000636421437515270000151140ustar00rootroot00000000000000 GNU LESSER GENERAL PUBLIC LICENSE Version 2.1, February 1999 Copyright (C) 1991, 1999 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. [This is the first released version of the Lesser GPL. It also counts as the successor of the GNU Library Public License, version 2, hence the version number 2.1.] Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. GNU LESSER GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) "Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. 1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) The modified work must itself be a software library. b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. (For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. This option is useful when you wish to copy part of the code of the Library into a program that is not a library. 4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. 5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. 6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. 7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. 10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. 11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. 14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Libraries If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the library `Frob' (a library for tweaking knobs) written by James Random Hacker. , 1 April 1990 Ty Coon, President of Vice That's all there is to it! buildstream-1.6.9/HACKING.rst000066400000000000000000000515151437515270000156530ustar00rootroot00000000000000Contributing ============ Some tips and guidelines for developers hacking on BuildStream Feature additions ----------------- Major feature additions should be proposed on the `mailing list `_ before being considered for inclusion, we strongly recommend proposing in advance of commencing work. New features must be well documented and tested either in our main test suite if possible, or otherwise in the integration tests. It is expected that the individual submitting the work take ownership of their feature within BuildStream for a reasonable timeframe of at least one release cycle after their work has landed on the master branch. This is to say that the submitter is expected to address and fix any side effects and bugs which may have fell through the cracks in the review process, giving us a reasonable timeframe for identifying these. Patch submissions ----------------- Branches must be submitted as merge requests in gitlab and should usually be associated to an issue report on gitlab. Commits in the branch which address specific issues must specify the issue number in the commit message. Merge requests that are not yet ready for review must be prefixed with the ``WIP:`` identifier. A merge request is not ready for review until the submitter expects that the patch is ready to actually land. Submitted branches must not contain a history of the work done in the feature branch. Please use git's interactive rebase feature in order to compose a clean patch series suitable for submission. We prefer that test case and documentation changes be submitted in separate commits from the code changes which they test. Ideally every commit in the history of master passes its test cases. This makes bisections more easy to perform, but is not always practical with more complex branches. Commit messages ~~~~~~~~~~~~~~~ Commit messages must be formatted with a brief summary line, optionally followed by an empty line and then a free form detailed description of the change. The summary line must start with what changed, followed by a colon and a very brief description of the change. If there is an associated issue, it **must** be mentioned somewhere in the commit message. **Example**:: element.py: Added the frobnicator so that foos are properly frobbed. The new frobnicator frobnicates foos all the way throughout the element. Elements that are not properly frobnicated raise an error to inform the user of invalid frobnication rules. This fixes issue #123 Coding style ------------ Coding style details for BuildStream Style guide ~~~~~~~~~~~ Python coding style for BuildStream is pep8, which is documented here: https://www.python.org/dev/peps/pep-0008/ We have a couple of minor exceptions to this standard, we dont want to compromise code readability by being overly restrictive on line length for instance. The pep8 linter will run automatically when running the test suite. Imports ~~~~~~~ Module imports inside BuildStream are done with relative ``.`` notation Good:: from .context import Context Bad:: from buildstream.context import Context The exception to the above rule is when authoring plugins, plugins do not reside in the same namespace so they must address buildstream in the imports. An element plugin will derive from Element by importing:: from buildstream import Element When importing utilities specifically, dont import function names from there, instead import the module itself:: from . import utils This makes things clear when reading code that said functions are not defined in the same file but come from utils.py for example. Policy for private symbols ~~~~~~~~~~~~~~~~~~~~~~~~~~ Private symbols are expressed via a leading ``_`` single underscore, or in some special circumstances with a leading ``__`` double underscore. Before understanding the naming policy, it is first important to understand that in BuildStream, there are two levels of privateness which need to be considered. These are treated subtly differently and thus need to be understood: * API Private A symbol is considered to be *API private* if it is not exposed in the *public API*. Even if a symbol does not have any leading underscore, it may still be *API private* if the containing *class* or *module* is named with a leading underscore. * Local private A symbol is considered to be *local private* if it is not intended for access outside of the defining *scope*. If a symbol has a leading underscore, it might not be *local private* if it is declared on a publicly visible class, but needs to be accessed internally by other modules in the BuildStream core. Ordering '''''''' For better readability and consistency, we try to keep private symbols below public symbols. In the case of public modules where we may have a mix of *API private* and *local private* symbols, *API private* symbols should come before *local private* symbols. Symbol naming ''''''''''''' Any private symbol must start with a single leading underscore for two reasons: * So that it does not bleed into documentation and *public API*. * So that it is clear to developers which symbols are not used outside of the declaring *scope* Remember that with python, the modules (python files) are also symbols within their containing *package*, as such; modules which are entirely private to BuildStream are named as such, e.g. ``_thismodule.py``. Cases for double underscores '''''''''''''''''''''''''''' The double underscore in python has a special function. When declaring a symbol in class scope which has a leading underscore, it can only be accessed within the class scope using the same name. Outside of class scope, it can only be accessed with a *cheat*. We use the double underscore in cases where the type of privateness can be ambiguous. * For private modules and classes We never need to disambiguate with a double underscore * For private symbols declared in a public *scope* In the case that we declare a private method on a public object, it becomes ambiguous whether: * The symbol is *local private*, and only used within the given scope * The symbol is *API private*, and will be used internally by BuildStream from other parts of the codebase. In this case, we use a single underscore for *API private* methods which are not *local private*, and we use a double underscore for *local private* methods declared in public scope. Documenting private symbols ''''''''''''''''''''''''''' Any symbol which is *API Private* (regardless of whether it is also *local private*), should have some documentation for developers to better understand the codebase. Contrary to many other python projects, we do not use docstrings to document private symbols, but prefer to keep *API Private* symbols documented in code comments placed *above* the symbol (or *beside* the symbol in some cases, such as variable declarations in a class where a shorter comment is more desirable), rather than docstrings placed *below* the symbols being documented. Other than this detail, follow the same guidelines for documenting symbols as described below. Documenting BuildStream ----------------------- BuildStream starts out as a documented project from day one and uses sphinx to document itself. Documentation formatting policy ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The BuildStream documentation style is as follows: * Titles and headings require two leading empty lines above them. Only the first word should be capitalized. * If there is an ``.. _internal_link`` anchor, there should be two empty lines above the anchor, followed by one leading empty line. * Within a section, paragraphs should be separated by one empty line. * Notes are defined using: ``.. note::`` blocks, followed by an empty line and then indented (3 spaces) text. * Code blocks are defined using: ``.. code:: LANGUAGE`` blocks, followed by an empty line and then indented (3 spaces) text. Note that the default language is `python`. * Cross references should be of the form ``:role:`target```. * To cross reference arbitrary locations with, for example, the anchor ``_anchor_name``, you must give the link an explicit title: ``:ref:`Link text ```. Note that the "_" prefix is not required. Useful links: For further information, please see the `Sphinx Documentation `_. Building Docs ~~~~~~~~~~~~~ The documentation build is not integrated into the ``setup.py`` and is difficult (or impossible) to do so, so there is a little bit of setup you need to take care of first. Before you can build the BuildStream documentation yourself, you need to first install ``sphinx`` along with some additional plugins and dependencies, using pip or some other mechanism:: # Install sphinx pip3 install --user sphinx # Install some sphinx extensions pip3 install --user sphinx-click pip3 install --user sphinx_rtd_theme # Additional optional dependencies required pip3 install --user arpy To build the documentation, just run the following:: make -C doc This will give you a ``doc/build/html`` directory with the html docs which you can view in your browser locally to test. Regenerating session html ''''''''''''''''''''''''' The documentation build will build the session files if they are missing, or if explicitly asked to rebuild. We revision the generated session html files in order to reduce the burden on documentation contributors. To explicitly rebuild the session snapshot html files, it is recommended that you first set the ``BST_SOURCE_CACHE`` environment variable to your source cache, this will make the docs build reuse already downloaded sources:: export BST_SOURCE_CACHE=~/.cache/buildstream/sources To force rebuild session html while building the doc, simply build the docs like this:: make BST_FORCE_SESSION_REBUILD=1 -C doc Man pages ~~~~~~~~~ Unfortunately it is quite difficult to integrate the man pages build into the ``setup.py``, as such, whenever the frontend command line interface changes, the static man pages should be regenerated and committed with that. To do this, first ensure you have ``click_man`` installed, possibly with:: pip install --user click_man Then, in the toplevel directory of buildstream, run the following:: python3 setup.py --command-packages=click_man.commands man_pages And commit the result, ensuring that you have added anything in the ``man/`` subdirectory, which will be automatically included in the buildstream distribution. Documenting conventions ~~~~~~~~~~~~~~~~~~~~~~~ We use the sphinx.ext.napoleon extension for the purpose of having a bit nicer docstrings than the default sphinx docstrings. A docstring for a method, class or function should have the following format:: """Brief description of entity Args: argument1 (type): Description of arg argument2 (type): Description of arg Returns: (type): Description of returned thing of the specified type Raises: (SomeError): When some error occurs (SomeOtherError): When some other error occurs A detailed description can go here if one is needed, only after the above part documents the calling conventions. """ Documentation Examples ~~~~~~~~~~~~~~~~~~~~~~ The examples section of the documentation contains a series of standalone examples, here are the criteria for an example addition. * The example has a ``${name}`` * The example has a project users can copy and use * This project is added in the directory ``doc/examples/${name}`` * The example has a documentation component * This is added at ``doc/source/examples/${name}.rst`` * A reference to ``examples/${name}`` is added to the toctree in ``doc/source/examples.rst`` * This documentation discusses the project elements declared in the project and may provide some BuildStream command examples * This documentation links out to the reference manual at every opportunity * The example has a CI test component * This is an integration test added at ``tests/examples/${name}`` * This test runs BuildStream in the ways described in the example and assert that we get the results which we advertize to users in the said examples. Adding BuildStream command output ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As a part of building the docs, BuildStream will run itself and extract some html for the colorized output which is produced. If you want to run BuildStream to produce some nice html for your documentation, then you can do so by adding new ``.run`` files to the ``doc/sessions/`` directory. Any files added as ``doc/sessions/${example}.run`` will result in generated file at ``doc/source/sessions/${example}.html``, and these files can be included in the reStructuredText documentation at any time with:: .. raw:: html :file: sessions/${example}.html The ``.run`` file format is just another YAML dictionary which consists of a ``commands`` list, instructing the program what to do command by command. Each *command* is a dictionary, the members of which are listed here: * ``directory``: The input file relative project directory * ``output``: The input file relative output html file to generate (optional) * ``fake-output``: Don't really run the command, just pretend to and pretend this was the output, an empty string will enable this too. * ``command``: The command to run, without the leading ``bst`` When adding a new ``.run`` file, one should normally also commit the new resulting generated ``.html`` file(s) into the ``doc/source/sessions-stored/`` directory at the same time, this ensures that other developers do not need to regenerate them locally in order to build the docs. **Example**: .. code:: yaml commands: # Make it fetch first - directory: ../examples/foo command: fetch hello.bst # Capture a build output - directory: ../examples/foo output: ../source/sessions/foo-build.html command: build hello.bst Protocol Buffers ---------------- BuildStream uses protobuf and gRPC for serialization and communication with artifact cache servers. This requires ``.proto`` files and Python code generated from the ``.proto`` files using protoc. All these files live in the ``buildstream/_protos`` directory. The generated files are included in the git repository to avoid depending on grpcio-tools for user installations. Regenerating code ~~~~~~~~~~~~~~~~~ When ``.proto`` files are modified, the corresponding Python code needs to be regenerated. As a prerequisite for code generation you need to install ``grpcio-tools`` using pip or some other mechanism:: pip3 install --user grpcio-tools To actually regenerate the code:: ./setup.py build_grpc Testing BuildStream ------------------- BuildStream uses pytest for regression tests and testing out the behavior of newly added components. The elaborate documentation for pytest can be found here: http://doc.pytest.org/en/latest/contents.html Don't get lost in the docs if you don't need to, follow existing examples instead. Running tests ~~~~~~~~~~~~~ To run the tests, just type:: ./setup.py test At the toplevel. When debugging a test, it can be desirable to see the stdout and stderr generated by a test, to do this use the --addopts function to feed arguments to pytest as such:: ./setup.py test --addopts -s You can always abort on the first failure by running:: ./setup.py test --addopts -x If you want to run a specific test or a group of tests, you can specify a prefix to match. E.g. if you want to run all of the frontend tests you can do:: ./setup.py test --addopts '-k tests/frontend/' We also have a set of slow integration tests that are disabled by default - you will notice most of them marked with SKIP in the pytest output. To run them, you can use:: ./setup.py test --addopts '--integration' By default, buildstream also runs pylint on all files. Should you want to run just pylint (these checks are a lot faster), you can do so with:: ./setup.py test --addopts '-m pylint' Alternatively, any IDE plugin that uses pytest should automatically detect the ``.pylintrc`` in the project's root directory. Adding tests ~~~~~~~~~~~~ Tests are found in the tests subdirectory, inside of which there is a separarate directory for each *domain* of tests. All tests are collected as:: tests/*/*.py If the new test is not appropriate for the existing test domains, then simply create a new directory for it under the tests subdirectory. Various tests may include data files to test on, there are examples of this in the existing tests. When adding data for a test, create a subdirectory beside your test in which to store data. When creating a test that needs data, use the datafiles extension to decorate your test case (again, examples exist in the existing tests for this), documentation on the datafiles extension can be found here: https://pypi.python.org/pypi/pytest-datafiles Tests that run a sandbox should be decorated with:: @pytest.mark.integration and use the integration cli helper. Measuring BuildStream performance --------------------------------- Benchmarking framework ~~~~~~~~~~~~~~~~~~~~~~~ BuildStream has a utility to measure performance which is available from a separate repository at https://gitlab.com/BuildStream/benchmarks. This tool allows you to run a fixed set of workloads with multiple versions of BuildStream. From this you can see whether one version performs better or worse than another which is useful when looking for regressions and when testing potential optimizations. For full documentation on how to use the benchmarking tool see the README in the 'benchmarks' repository. Profiling tools ~~~~~~~~~~~~~~~ When looking for ways to speed up the code you should make use of a profiling tool. Python provides `cProfile `_ which gives you a list of all functions called during execution and how much time was spent in each function. Here is an example of running `bst --help` under cProfile: python3 -m cProfile -o bst.cprofile -- $(which bst) --help You can then analyze the results interactively using the 'pstats' module: python3 -m pstats ./bst.cprofile For more detailed documentation of cProfile and 'pstats', see: https://docs.python.org/3/library/profile.html. For a richer visualisation of the callstack you can try `Pyflame `_. Once you have followed the instructions in Pyflame's README to install the tool, you can profile `bst` commands as in the following example: pyflame --output bst.flame --trace bst --help You may see an `Unexpected ptrace(2) exception:` error. Note that the `bst` operation will continue running in the background in this case, you will need to wait for it to complete or kill it. Once this is done, rerun the above command which appears to fix the issue. Once you have output from pyflame, you can use the ``flamegraph.pl`` script from the `Flamegraph project `_ to generate an .svg image: ./flamegraph.pl bst.flame > bst-flamegraph.svg The generated SVG file can then be viewed in your preferred web browser. Profiling specific parts of BuildStream with BST_PROFILE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BuildStream can also turn on cProfile for specific parts of execution using BST_PROFILE. BST_PROFILE can be set to a section name, or 'all' for all sections. There is a list of topics in `buildstream/_profile.py`. For example, running:: BST_PROFILE=load-pipeline bst build bootstrap-system-x86.bst will produce a profile in the current directory for the time take to call most of `initialized`, for each element. These profile files are in the same cProfile format as those mentioned in the previous section, and can be analysed with `pstats` or `pyflame`. Profiling the artifact cache receiver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Since the artifact cache receiver is not normally run directly, it's necessary to alter the ForceCommand part of sshd_config to enable profiling. See the main documentation in `doc/source/artifacts.rst` for general information on setting up the artifact cache. It's also useful to change directory to a logging directory before starting `bst-artifact-receive` with profiling on. This is an example of a ForceCommand section of sshd_config used to obtain profiles:: Match user artifacts ForceCommand BST_PROFILE=artifact-receive cd /tmp && bst-artifact-receive --pull-url https://example.com/ /home/artifacts/artifacts The MANIFEST.in and setup.py ---------------------------- When adding a dependency to BuildStream, it's important to update the setup.py accordingly. When adding data files which need to be discovered at runtime by BuildStream, update setup.py accordingly. When adding data files for the purpose of docs or tests, or anything that is not covered by setup.py, update the MANIFEST.in accordingly. At any time, running the following command to create a source distribution should result in creating a tarball which contains everything we want it to include:: ./setup.py sdist buildstream-1.6.9/MAINTAINERS000066400000000000000000000002201437515270000155350ustar00rootroot00000000000000Tristan Van Berkom E-mail: tristan.vanberkom@codethink.co.uk Userid: tvb Jürg Billeter E-mail: juerg.billeter@codethink.co.uk Userid: juergbi buildstream-1.6.9/MANIFEST.in000066400000000000000000000014671437515270000156140ustar00rootroot00000000000000# Basic toplevel package includes include BuildStream.doap include COPYING include HACKING.rst include MAINTAINERS include NEWS include README.rst # Documentation package includes include doc/Makefile include doc/source/conf.py include doc/source/index.rst # Tests recursive-include tests *.py recursive-include tests *.yaml recursive-include tests *.bst recursive-include tests *.conf recursive-include tests *.sh recursive-include tests *.expected # Protocol Buffers recursive-include buildstream/_protos *.proto # Requirements files include requirements/requirements.in include requirements/requirements.txt include requirements/dev-requirements.in include requirements/dev-requirements.txt include requirements/plugin-requirements.in include requirements/plugin-requirements.txt # Versioneer include versioneer.py buildstream-1.6.9/NEWS000066400000000000000000000416121437515270000145510ustar00rootroot00000000000000================= buildstream 1.6.9 ================= o Further Python 3.11 fixes to regex flags. ================= buildstream 1.6.8 ================= o Only test remote CAS push capabilities if push is enabled. This improves compatibility with new grpcio releases. o Dummy sandbox for checking out from dirrefernt arches. Ability to check out build artifacts with incompatible arch assuming no commands need to be run. o Backport regex flags fix to support newer versions of Python ================= buildstream 1.6.7 ================= o Some documentation updates o Support newer versions of ruamel.yam (issue #1623) ================= buildstream 1.6.6 ================= o BuildStream git tests have always assumed default git branch is master. This is now explicit with test helpers o project.refs of subprojects are properly taken into account o ostree regressed as part of migrating to command-line that it left whitespace into ref when tracking. Any whitespace around ref is now removed. o pb2 files are regenerated with protobuf 3.20.1 to mitigate forward compatibility issues. This has the implication that protobuf 3.19.0 or higher is now required. ================= buildstream 1.6.5 ================= o Make it easier to override parameters to `make` in the `make` element o ostree: Remove `--mirror` parameter which has been causing some issues o Fix test suite to work on some CI runners which hang while resolving localhost when trying to open a port for the CAS server ================= buildstream 1.6.4 ================= o BuildElement classes now support `create-dev-shm` o script element plugin now supports `create-dev-shm` o Python 3.6 is no longer tested in CI but support is maintained on best effort level. o New fatal warnings for unaliased sources o New errors raised when using an unresolved source alias o Add support for .netrc in remote/tar/zip sources o Bugfixes and better stability in fuse layer o Drop CI support for EOL python 3.6 (although BuildStream should still work when installed in python 3.6 environments) o Various bug fixes, documentation updates and CI related cleanup ================= buildstream 1.6.3 ================= o Support for python 3.9 now being tested in CI o CI overhaul to work more like it does in master o Refresh all dependencies which are being tested in CI, addressing a corner case crash that would occur when using bash completions and bleeding edge versions of the click library o Updated minimum required version of grpcio library to 1.30, as older versions are not working properly with existing artifact cache services. ================= buildstream 1.6.2 ================= o Fix some issues with a previous fix for #532 o Ported to github CI infrastructure o Ensure paths specified in user configuration are absolute o Import some symbols from collections.abc, required for python 3.10 ================= buildstream 1.6.1 ================= o Fix failure handling with CAS (#1403) ================= buildstream 1.6.0 ================= o Fixed edge case issue when dealing with git remotes (#1372) ================= buildstream 1.5.1 ================= o Support `buildstream1.conf` as well as `buildstream.conf` for parallel installations. o Lazy resolution of variables, this allows junctions to use variables without requiring the project to have fully resolved variables, while still reporting the right error messages if a junction uses unresolved variables. o Fix an issue where conditional statements were being lost instead of processed at include time, only when the include happens in project.conf o Backport some artifact cache related structural changes, and allow BuildStream 1 clients to interface with BuildStream 2 remote asset caches, while still allowing BuildStream 1 to interface with its own bst-artifact-server implementation. o Added sandbox configuration for `os` and `architecture` to mirror the added options in BuildStream 2, fixing issue #523. ================= buildstream 1.5.0 ================= o Process options in included files in the context of the project they were included from. This is technically a breaking change, however it is highly unlikely that this will break projects. In some cases projects were working around the broken behavior by ensuring matching project option names in junctioned projects, and in other cases simply avoiding including files which have project option conditional statements. o Added errors when trying to load BuildStream 2 projects, recommending to install the appropriate BuildStream version for the project. o Added errors when loading BuildStream 2 plugins in a BuildStream 1 project, recommending to use BuildStream 1 plugins with BuildStream 1 projects. ================= buildstream 1.4.3 ================= o Fix support for conditional list append/prepend in project.conf, Merge request !1857 o Fix internal imports to import from "collections" instead of "collections.abc", this improves support for Python 3.8, see issue #831 o Fix some downloads from gitlab.com by setting custom user agent, fixes issue #1285 o Work around python API break from ostree's repo.remote_gpg_import(), this was changed in ostree commit v2019.2-10-gaa5df899, and we now have a fallback to support both versions of the API, see merge request !1917. ================= buildstream 1.4.2 ================= o Support for python 3.8 o Fix a stacktrace with a hang we can experience when we CTRL-C a job twice. o Workaround some servers which do not honor the 'If-None-Match' HTTP header and avoid downloading files redundantly in these cases. o Allow specifying absolute paths in overlap-whitelist (issue #721) o Support systems with fuse3 (avoid passing unsupported argument to fusermount3) ================= buildstream 1.4.1 ================= o Depend on a newer version of ruamel.yaml (>= 0.16). ================= buildstream 1.4.0 ================= o Elements may now specify 'build-depends' and 'runtime-depends' fields to avoid having to specify the dependency type for every entry in 'depends'. o Elements may now specify cross-junction dependencies as simple strings using the format '{junction-name}:{element-name}'. o New `fatal-warnings` has been added to the project.conf format, allowing projects to specify which warnings they want to consider as fatal. Support for the following warnings is included: o overlaps: When staged artifact files overlap (deprecates: 'fail-on-overlap') o ref-not-in-track: When the source implementation finds that the ref is out of bounds for the tracking config o git:inconsistent-submodule: A .gitmodules file is present but the submodule was never added to the repo. o git:unlisted-submodule: A submodule exists but is not specified in the YAML declaration. o git:invalid-submodule: A submodule is specified in the YAML declaration but does not exist at the given ref in the git repository. o BuildStream now depends on python3 ujson (for some internal serializations) o Workspaces can now be opened as relative paths. Existing open workspaces will not be converted to relative paths, (they need to be closed and opened again to get the new behavior). o Dependencies can now be specified as strict to force rebuild in non-strict mode. This is useful for statically linked dependencies (#254). o Git source plugins can optionally track human readable refs using the output of `git describe`. ================= buildstream 1.3.1 ================= o The `max-jobs` variable is now controllable in user configuration and on the command line. o Source plugins may now request access access to previous during track and fetch by setting `BST_REQUIRES_PREVIOUS_SOURCES_TRACK` and/or `BST_REQUIRES_PREVIOUS_SOURCES_FETCH` attributes. o Add new `pip` source plugin for downloading python packages using pip, based on requirements files from previous sources. ================= buildstream 1.2.8 ================= o Fixed issues with workspaced junctions which need fetches (#1030) o Bail out with informative error if stdout/stderr are O_NONBLOCK (#929) ================= buildstream 1.2.7 ================= o Improved messaging around unknown artifact cache keys (#981) o Fixed crash which occurs when deleting artifact cache with open workspaces (#1017) o Fixed `bst --no-strict build --track-all ...` which sometimes exited successfully without building anything (#1014) o Fixed incorrect error message with malformed YAML in project.conf (#1019) ================= buildstream 1.2.6 ================= o Fix 'quit' option when interrupting a build (#525) o Only queue one cache size calculation job at a time o Fix stack traces on forceful termination o Fix scheduler processing order regression (#712) o Fix race condition in bzr source plugin o Better error messages for insufficient disk space o UI/Logging improvements regarding cache quota usage o Fix `bst push` in non-strict mode (#990) o Fix crash (regression) when tracking a single element (#1012) ================= buildstream 1.2.5 ================= o Fixed failure to process some elements when workspaces are open (#919) o Better error reporting when files are missing, or when encountering errors in sub projects (#947) o Do not require exact versions of dependencies for running tests (#916) o Fail on overlap policy no longer inherited from subprojects (#926) ================= buildstream 1.2.4 ================= o Migration of scripts to use tox o Force updating tags when fetching from git repos (#812) o Avoid downloading unused submodules (#804) o Fixed cleanup of cache server with disk is full (#609) o Fixed possible artifact cache corruption (#749) o Fixed `bst checkout --deps none` behavior (#670) ================= buildstream 1.2.3 ================= o Fixed an unhandled exception when cleaning up a build sandbox (#153) o Fixed race condition when calculating cache size and commiting artifacts o Fixed regression where terminating with `^C` results in a double user interrogation (#693) o Fixed regression in summary when builds are terminated (#479) o Fixed regression where irrelevant status messages appear from git sources o Improve performance of artifact uploads by batching file transfers (#676/#677) o Fixed performance of artifact downloads by batching file transfers (#554) o Fixed checks for paths which escape the project directory (#673) ================= buildstream 1.2.2 ================= * Fixed incomplete removal of blessings dependency ================= buildstream 1.2.1 ================= o Fixed corruption of artifact cache at cache cleanup time (#623) o Fixed accidental deletion of artifacts when tracking is enabled o Error out when protected variables are set by project authors (#287) o Fixed option resolution in project wide element & source configurations (#658) o Error out gracefully when push remote is mal-specified (#625) o Improved logging regarding skipped push / pull jobs (#515) o Fixed crash in `bst fetch` when project.refs and source mirroring are in use (#666) o Removed blessings dependency o Support for batch file downloads on the artifact cache server ================= buildstream 1.2.0 ================= o Various last minute bug fixes o Final update to the SourceFetcher related mirroring APIs ================= buildstream 1.1.7 ================= o Fix CAS resource_name format Artifact servers need to be updated. o Improved startup performance and performance of calculating artifact cache size o Various other bug fixes ================= buildstream 1.1.6 ================= o A lot of bug fixes ================= buildstream 1.1.5 ================= o Add a `--tar` option to `bst checkout` which allows a tarball to be created from the artifact contents. o Fetching and tracking will consult mirrors defined in project config, and the preferred mirror to fetch from can be defined in the command line or user config. o Added new `remote` source plugin for downloading file blobs o Add support for the new include '(@)' directive in project.conf and .bst files ================= buildstream 1.1.4 ================= o `bst workspace` commands and `bst track` will substitute their source elements when performing those operations, e.g. performing `bst track` on a filter element will track the sources on the element that it depends on (if it has sources). o Added new simple `make` element o Switch to Remote Execution CAS-based artifact cache on all platforms. Artifact servers need to be migrated. o BuildStream now requires python version >= 3.5 o BuildStream will now automatically clean up old artifacts when it runs out of space. The exact behavior is configurable in the user's buildstream.conf. ================= buildstream 1.1.3 ================= o Added new `bst init` command to initialize a new project. o Cross junction tracking is now disabled by default for projects which can support this by using project.refs ref-storage New options have been added to explicitly enable cross-junction tracking. o Failed jobs are now summarised at the end of a build. Use `--verbose` and `--no-verbose` to adjust the amount of detail given. o BuildElements' `configure-commands` are only run once for workspaces now, which allows for incremental builds. Appropriate API for plugins is also exposed through `Element.prepare`. o The `cmake` plugin now supports building with ninja with the newly added `generator` configuration option. o `bst workspace close` and `bst workspace reset` now support multiple elements. All elements can be specified using `--all`. o The elements whose cache keys had to be determined during the build are summarised at the end of the build. o Fixed versioning introspection to be dynamic, many users use a developer install mode so they can update with git, now the version information is always up to date in logs. This causes a minor API break: The --version output now only outputs the version. ================= buildstream 1.1.2 ================= o New ref-storage option allows one to store source refs, such as git shas, in one central project.refs file instead of inline with the source declarations. o Deprecated `--track-save` optionality in `bst build`, this does not make sense to support now that we have project.refs. o Added the `sandbox` configuration option which can be used in `project.conf` and elements, to control the user ID and group ID used in build sandboxes. o Added new `deb` source implementation, for staging of downloaded deb package files. ================= buildstream 1.1.1 ================= o New project configuration controlling how the sandbox behaves when `bst shell` is used; allowing projects to provide a more functional shell environment. o The `bst shell` command now has a `--mount` option allowing users to mount files and directories into the sandbox for testing purposes. o Log lines are now configurable with the new "message-format" user configuration, allowing one to express optional fields such as microsecond precision and wallclock time. o Newly added filter element o Git source plugin now allows disabling of submodule checkouts o In the same way we allow overriding element configurations by their 'kind' in project.conf, we now support the same for source plugin configurations. o Tar and zip sources now automatically recall an `etag` from the http headers, optimizing tracking of tarballs significantly (issue #62) ================= buildstream 1.1.0 ================= o Multiple artifact caches are now supported in project and user configuration with a priority order (issue #85) o Add junction support for subprojects o Changes towards incremental builds in workspaces o `bst shell --build` now creates true build sandbox o Many bug fixes ================= buildstream 1.0.0 ================= First stable release of BuildStream BuildStream 1.0.0 is all about API stability - for the past months we have been reviewing our various API surfaces, implementing strategies for revisioning of our interfaces and cleaning up. Long term stability is very important for build reproducibility over time, and this release is the first promise we are making on any API surfaces. Stable API surfaces include: o The command line interface o The YAML user configuration file format o The YAML project `.bst` file format o The core Python module imported by external plugins buildstream-1.6.9/README.rst000066400000000000000000000065411437515270000155430ustar00rootroot00000000000000About ----- .. image:: https://img.shields.io/github/workflow/status/apache/buildstream/PR%20Checks/bst-1 :alt: GitHub Workflow Status :target: https://github.com/apache/buildstream/actions/workflows/ci.yml?query=branch%3Abst-1 What is BuildStream? ==================== BuildStream is a Free Software tool for building/integrating software stacks. It takes inspiration, lessons and use-cases from various projects including OBS, Reproducible Builds, Yocto, Baserock, Buildroot, Aboriginal, GNOME Continuous, JHBuild, Flatpak Builder and Android repo. BuildStream supports multiple build-systems (e.g. autotools, cmake, cpan, distutils, make, meson, qmake), and can create outputs in a range of formats (e.g. debian packages, flatpak runtimes, sysroots, system images) for multiple platforms and chipsets. Why should I use BuildStream? ============================= BuildStream offers the following advantages: * **Declarative build instructions/definitions** BuildStream provides a a flexible and extensible framework for the modelling of software build pipelines in a declarative YAML format, which allows you to manipulate filesystem data in a controlled, reproducible sandboxed environment. * **Support for developer and integrator workflows** BuildStream provides traceability and reproducibility for integrators handling stacks of hundreds/thousands of components, as well as workspace features and shortcuts to minimise cycle-time for developers. * **Fast and predictable** BuildStream can cache previous builds and track changes to source file content and build/config commands. BuildStream only rebuilds the things that have changed. * **Extensible** You can extend BuildStream to support your favourite build-system. * **Bootstrap toolchains and bootable systems** BuildStream can create full systems and complete toolchains from scratch, for a range of ISAs including x86_32, x86_64, ARMv7, ARMv8, MIPS. How do I use BuildStream? ========================= Please refer to the `documentation `_ for information about installing BuildStream, and about the BuildStream YAML format and plugin options. How does BuildStream work? ========================== BuildStream operates on a set of YAML files (.bst files), as follows: * loads the YAML files which describe the target(s) and all dependencies * evaluates the version information and build instructions to calculate a build graph for the target(s) and all dependencies and unique cache-keys for each element * retrieves elements from cache if they are already built, or builds them in a sandboxed environment using the instructions declared in the .bst files * transforms/configures and/or deploys the resulting target(s) based on the instructions declared in the .bst files. How can I get started? ====================== The easiest way to get started is to explore some existing .bst files, for example: * https://gitlab.gnome.org/GNOME/gnome-build-meta/ * https://gitlab.com/freedesktop-sdk/freedesktop-sdk * https://gitlab.com/baserock/definitions * https://gitlab.com/BuildStream/buildstream-examples/tree/master/build-x86image * https://gitlab.com/BuildStream/buildstream-examples/tree/master/netsurf-flatpak If you have any questions please ask on our `#buildstream `_ channel in `irc.gnome.org `_ buildstream-1.6.9/buildstream/000077500000000000000000000000001437515270000163615ustar00rootroot00000000000000buildstream-1.6.9/buildstream/__init__.py000066400000000000000000000026251437515270000204770ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Plugin author facing APIs import os if "_BST_COMPLETION" not in os.environ: # Special sauce to get the version from versioneer from ._version import get_versions __version__ = get_versions()['version'] del get_versions from .utils import UtilError, ProgramNotFoundError from .sandbox import Sandbox, SandboxFlags from .types import Scope, Consistency, CoreWarnings from .plugin import Plugin from .source import Source, SourceError, Consistency, SourceFetcher from .element import Element, ElementError, Scope from .buildelement import BuildElement from .scriptelement import ScriptElement buildstream-1.6.9/buildstream/__main__.py000066400000000000000000000011351437515270000204530ustar00rootroot00000000000000################################################################## # Private Entry Point # ################################################################## # # This allows running the cli when BuildStream is uninstalled, # as long as BuildStream repo is in PYTHONPATH, one can run it # with: # # python3 -m buildstream [program args] # # This is used when we need to run BuildStream before installing, # like when we build documentation. # if __name__ == '__main__': # pylint: disable=no-value-for-parameter from ._frontend.cli import cli cli() buildstream-1.6.9/buildstream/_artifactcache/000077500000000000000000000000001437515270000213015ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_artifactcache/__init__.py000066400000000000000000000016231437515270000234140ustar00rootroot00000000000000# # Copyright (C) 2017-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .artifactcache import ArtifactCache, ArtifactCacheSpec, CACHE_SIZE_FILE from .artifactcache import ArtifactCacheUsage buildstream-1.6.9/buildstream/_artifactcache/artifactcache.py000066400000000000000000001104111437515270000244320ustar00rootroot00000000000000# # Copyright (C) 2017-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat import multiprocessing import os import signal import string from collections import namedtuple from collections.abc import Mapping from ..types import _KeyStrength from .._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason from .._message import Message, MessageType from .. import _signals from .. import utils from .. import _yaml from .cascache import CASCache, CASRemote, BlobNotFound CACHE_SIZE_FILE = "cache_size" # An ArtifactCacheSpec holds the user configuration for a single remote # artifact cache. # # Args: # url (str): Location of the remote artifact cache # push (bool): Whether we should attempt to push artifacts to this cache, # in addition to pulling from it. # class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert client_key client_cert')): # _new_from_config_node # # Creates an ArtifactCacheSpec() from a YAML loaded node # @staticmethod def _new_from_config_node(spec_node, basedir=None): _yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert']) url = _yaml.node_get(spec_node, str, 'url') push = _yaml.node_get(spec_node, bool, 'push', default_value=False) if not url: provenance = _yaml.node_get_provenance(spec_node, 'url') raise LoadError(LoadErrorReason.INVALID_DATA, "{}: empty artifact cache URL".format(provenance)) server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None) if server_cert and basedir: server_cert = os.path.join(basedir, server_cert) client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None) if client_key and basedir: client_key = os.path.join(basedir, client_key) client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None) if client_cert and basedir: client_cert = os.path.join(basedir, client_cert) if client_key and not client_cert: provenance = _yaml.node_get_provenance(spec_node, 'client-key') raise LoadError(LoadErrorReason.INVALID_DATA, "{}: 'client-key' was specified without 'client-cert'".format(provenance)) if client_cert and not client_key: provenance = _yaml.node_get_provenance(spec_node, 'client-cert') raise LoadError(LoadErrorReason.INVALID_DATA, "{}: 'client-cert' was specified without 'client-key'".format(provenance)) return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert) ArtifactCacheSpec.__new__.__defaults__ = (None, None, None) # ArtifactCacheUsage # # A simple object to report the current artifact cache # usage details. # # Note that this uses the user configured cache quota # rather than the internal quota with protective headroom # removed, to provide a more sensible value to display to # the user. # # Args: # artifacts (ArtifactCache): The artifact cache to get the status of # class ArtifactCacheUsage(): def __init__(self, artifacts): context = artifacts.context self.quota_config = context.config_cache_quota # Configured quota self.quota_size = artifacts._cache_quota_original # Resolved cache quota in bytes self.used_size = artifacts.get_cache_size() # Size used by artifacts in bytes self.used_percent = 0 # Percentage of the quota used if self.quota_size is not None: self.used_percent = int(self.used_size * 100 / self.quota_size) # Formattable into a human readable string # def __str__(self): return "{} / {} ({}%)" \ .format(utils._pretty_size(self.used_size, dec_places=1), self.quota_config, self.used_percent) # An ArtifactCache manages artifacts. # # Args: # context (Context): The BuildStream context # class ArtifactCache(): def __init__(self, context): self.context = context self.extractdir = os.path.join(context.artifactdir, 'extract') self.cas = CASCache(context.artifactdir) self.global_remote_specs = [] self.project_remote_specs = {} self._required_elements = set() # The elements required for this session self._cache_size = None # The current cache size, sometimes it's an estimate self._cache_quota = None # The cache quota self._cache_quota_original = None # The cache quota as specified by the user, in bytes self._cache_lower_threshold = None # The target cache size for a cleanup # Per-project list of _CASRemote instances. self._remotes = {} self._has_fetch_remotes = False self._has_push_remotes = False os.makedirs(self.extractdir, exist_ok=True) self._calculate_cache_quota() # get_artifact_fullname() # # Generate a full name for an artifact, including the # project namespace, element name and cache key. # # This can also be used as a relative path safely, and # will normalize parts of the element name such that only # digits, letters and some select characters are allowed. # # Args: # element (Element): The Element object # key (str): The element's cache key # # Returns: # (str): The relative path for the artifact # def get_artifact_fullname(self, element, key): project = element._get_project() # Normalize ostree ref unsupported chars valid_chars = string.digits + string.ascii_letters + '-._' element_name = ''.join([ x if x in valid_chars else '_' for x in element.normal_name ]) assert key is not None # assume project and element names are not allowed to contain slashes return '{0}/{1}/{2}'.format(project.name, element_name, key) # setup_remotes(): # # Sets up which remotes to use # # Args: # use_config (bool): Whether to use project configuration # remote_url (str): Remote artifact cache URL # # This requires that all of the projects which are to be processed in the session # have already been loaded and are observable in the Context. # def setup_remotes(self, *, use_config=False, remote_url=None): # Initialize remote artifact caches. We allow the commandline to override # the user config in some cases (for example `bst push --remote=...`). has_remote_caches = False if remote_url: self._set_remotes([ArtifactCacheSpec(remote_url, push=True)]) has_remote_caches = True if use_config: for project in self.context.get_projects(): artifact_caches = _configured_remote_artifact_cache_specs(self.context, project) if artifact_caches: # artifact_caches is a list of ArtifactCacheSpec instances self._set_remotes(artifact_caches, project=project) has_remote_caches = True if has_remote_caches: self._initialize_remotes() # specs_from_config_node() # # Parses the configuration of remote artifact caches from a config block. # # Args: # config_node (dict): The config block, which may contain the 'artifacts' key # basedir (str): The base directory for relative paths # # Returns: # A list of ArtifactCacheSpec instances. # # Raises: # LoadError, if the config block contains invalid keys. # @staticmethod def specs_from_config_node(config_node, basedir=None): cache_specs = [] artifacts = config_node.get('artifacts', []) if isinstance(artifacts, Mapping): cache_specs.append(ArtifactCacheSpec._new_from_config_node(artifacts, basedir)) elif isinstance(artifacts, list): for spec_node in artifacts: cache_specs.append(ArtifactCacheSpec._new_from_config_node(spec_node, basedir)) else: provenance = _yaml.node_get_provenance(config_node, key='artifacts') raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA, "%s: 'artifacts' must be a single 'url:' mapping, or a list of mappings" % (str(provenance))) return cache_specs # mark_required_elements(): # # Mark elements whose artifacts are required for the current run. # # Artifacts whose elements are in this list will be locked by the artifact # cache and not touched for the duration of the current pipeline. # # Args: # elements (iterable): A set of elements to mark as required # def mark_required_elements(self, elements): # We risk calling this function with a generator, so we # better consume it first. # elements = list(elements) # Mark the elements as required. We cannot know that we know the # cache keys yet, so we only check that later when deleting. # self._required_elements.update(elements) # For the cache keys which were resolved so far, we bump # the mtime of them. # # This is just in case we have concurrent instances of # BuildStream running with the same artifact cache, it will # reduce the likelyhood of one instance deleting artifacts # which are required by the other. for element in elements: strong_key = element._get_cache_key(strength=_KeyStrength.STRONG) weak_key = element._get_cache_key(strength=_KeyStrength.WEAK) for key in (strong_key, weak_key): if key: try: ref = self.get_artifact_fullname(element, key) self.cas.update_mtime(ref) except CASError: pass # clean(): # # Clean the artifact cache as much as possible. # # Args: # progress (callable): A callback to call when a ref is removed # # Returns: # (int): The size of the cache after having cleaned up # def clean(self, progress=None): artifacts = self.list_artifacts() context = self.context # Some accumulative statistics removed_ref_count = 0 space_saved = 0 # Start off with an announcement with as much info as possible volume_size, volume_avail = self._get_cache_volume_size() self._message(MessageType.STATUS, "Starting cache cleanup", detail=("Elements required by the current build plan: {}\n" + "User specified quota: {} ({})\n" + "Cache usage: {}\n" + "Cache volume: {} total, {} available") .format(len(self._required_elements), context.config_cache_quota, utils._pretty_size(self._cache_quota_original, dec_places=2), utils._pretty_size(self.get_cache_size(), dec_places=2), utils._pretty_size(volume_size, dec_places=2), utils._pretty_size(volume_avail, dec_places=2))) # Build a set of the cache keys which are required # based on the required elements at cleanup time # # We lock both strong and weak keys - deleting one but not the # other won't save space, but would be a user inconvenience. required_artifacts = set() for element in self._required_elements: required_artifacts.update([ element._get_cache_key(strength=_KeyStrength.STRONG), element._get_cache_key(strength=_KeyStrength.WEAK) ]) # Do a real computation of the cache size once, just in case self.compute_cache_size() while self.get_cache_size() >= self._cache_lower_threshold: try: to_remove = artifacts.pop(0) except IndexError as e: # If too many artifacts are required, and we therefore # can't remove them, we have to abort the build. # # FIXME: Asking the user what to do may be neater # default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'], 'buildstream.conf') detail = ("Aborted after removing {} refs and saving {} disk space.\n" "The remaining {} in the cache is required by the {} elements in your build plan\n\n" "There is not enough space to complete the build.\n" "Please increase the cache-quota in {} and/or make more disk space." .format(removed_ref_count, utils._pretty_size(space_saved, dec_places=2), utils._pretty_size(self.get_cache_size(), dec_places=2), len(self._required_elements), (context.config_origin or default_conf))) if self.has_quota_exceeded(): raise ArtifactError("Cache too full. Aborting.", detail=detail, reason="cache-too-full") from e break key = to_remove.rpartition('/')[2] if key not in required_artifacts: # Remove the actual artifact, if it's not required. size = self.remove(to_remove) removed_ref_count += 1 space_saved += size self._message(MessageType.STATUS, "Freed {: <7} {}".format( utils._pretty_size(size, dec_places=2), to_remove)) # Remove the size from the removed size self.set_cache_size(self._cache_size - size) # User callback # # Currently this process is fairly slow, but we should # think about throttling this progress() callback if this # becomes too intense. if progress: progress() # Informational message about the side effects of the cleanup self._message(MessageType.INFO, "Cleanup completed", detail=("Removed {} refs and saving {} disk space.\n" + "Cache usage is now: {}") .format(removed_ref_count, utils._pretty_size(space_saved, dec_places=2), utils._pretty_size(self.get_cache_size(), dec_places=2))) return self.get_cache_size() # compute_cache_size() # # Computes the real artifact cache size by calling # the abstract calculate_cache_size() method. # # Returns: # (int): The size of the artifact cache. # def compute_cache_size(self): old_cache_size = self._cache_size new_cache_size = self.cas.calculate_cache_size() if old_cache_size != new_cache_size: self._cache_size = new_cache_size usage = ArtifactCacheUsage(self) self._message(MessageType.STATUS, "Cache usage recomputed: {}".format(usage)) return self._cache_size # add_artifact_size() # # Adds the reported size of a newly cached artifact to the # overall estimated size. # # Args: # artifact_size (int): The size to add. # def add_artifact_size(self, artifact_size): cache_size = self.get_cache_size() cache_size += artifact_size self.set_cache_size(cache_size) # get_cache_size() # # Fetches the cached size of the cache, this is sometimes # an estimate and periodically adjusted to the real size # when a cache size calculation job runs. # # When it is an estimate, the value is either correct, or # it is greater than the actual cache size. # # Returns: # (int) An approximation of the artifact cache size, in bytes. # def get_cache_size(self): # If we don't currently have an estimate, figure out the real cache size. if self._cache_size is None: stored_size = self._read_cache_size() if stored_size is not None: self._cache_size = stored_size else: self.compute_cache_size() # Computing cache doesn't actually write the value. # Write cache size explicitly here since otherwise # in some cases it's not stored on disk. self.set_cache_size(self._cache_size) return self._cache_size # set_cache_size() # # Forcefully set the overall cache size. # # This is used to update the size in the main process after # having calculated in a cleanup or a cache size calculation job. # # Args: # cache_size (int): The size to set. # def set_cache_size(self, cache_size): assert cache_size is not None self._cache_size = cache_size self._write_cache_size(self._cache_size) # has_quota_exceeded() # # Checks if the current artifact cache size exceeds the quota. # # Returns: # (bool): True of the quota is exceeded # def has_quota_exceeded(self): return self.get_cache_size() > self._cache_quota # preflight(): # # Preflight check. # def preflight(self): self.cas.preflight() # initialize_remotes(): # # This will contact each remote cache. # # Args: # on_failure (callable): Called if we fail to contact one of the caches. # def initialize_remotes(self, *, on_failure=None): remote_specs = self.global_remote_specs for _, project_specs in self.project_remote_specs.items(): remote_specs += project_specs remote_specs = list(utils._deduplicate(remote_specs)) remotes = {} q = multiprocessing.Queue() for remote_spec in remote_specs: # Use subprocess to avoid creation of gRPC threads in main BuildStream process # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details p = multiprocessing.Process(target=self.cas.initialize_remote, args=(remote_spec, q)) try: # Keep SIGINT blocked in the child process with _signals.blocked([signal.SIGINT], ignore=False): p.start() error = q.get() p.join() except KeyboardInterrupt: utils._kill_process_tree(p.pid) raise if error and on_failure: on_failure(remote_spec.url, error) elif error: raise ArtifactError(error) else: self._has_fetch_remotes = True if remote_spec.push: self._has_push_remotes = True remotes[remote_spec.url] = CASRemote(remote_spec) for project in self.context.get_projects(): remote_specs = self.global_remote_specs if project in self.project_remote_specs: remote_specs = list(utils._deduplicate(remote_specs + self.project_remote_specs[project])) project_remotes = [] for remote_spec in remote_specs: # Errors are already handled in the loop above, # skip unreachable remotes here. if remote_spec.url not in remotes: continue remote = remotes[remote_spec.url] project_remotes.append(remote) self._remotes[project] = project_remotes # contains(): # # Check whether the artifact for the specified Element is already available # in the local artifact cache. # # Args: # element (Element): The Element to check # key (str): The cache key to use # # Returns: True if the artifact is in the cache, False otherwise # def contains(self, element, key): ref = self.get_artifact_fullname(element, key) return self.cas.contains(ref) # list_artifacts(): # # List artifacts in this cache in LRU order. # # Returns: # ([str]) - A list of artifact names as generated by # `ArtifactCache.get_artifact_fullname` in LRU order # def list_artifacts(self): return self.cas.list_refs() # remove(): # # Removes the artifact for the specified ref from the local # artifact cache. # # Args: # ref (artifact_name): The name of the artifact to remove (as # generated by # `ArtifactCache.get_artifact_fullname`) # # Returns: # (int|None) The amount of space pruned from the repository in # Bytes, or None if defer_prune is True # def remove(self, ref): # Remove extract if not used by other ref tree = self.cas.resolve_ref(ref) ref_name, ref_hash = os.path.split(ref) extract = os.path.join(self.extractdir, ref_name, tree.hash) keys_file = os.path.join(extract, 'meta', 'keys.yaml') if os.path.exists(keys_file): keys_meta = _yaml.load(keys_file) keys = [keys_meta['strong'], keys_meta['weak']] remove_extract = True for other_hash in keys: if other_hash == ref_hash: continue remove_extract = False break if remove_extract: utils._force_rmtree(extract) return self.cas.remove(ref) # extract(): # # Extract cached artifact for the specified Element if it hasn't # already been extracted. # # Assumes artifact has previously been fetched or committed. # # Args: # element (Element): The Element to extract # key (str): The cache key to use # # Raises: # ArtifactError: In cases there was an OSError, or if the artifact # did not exist. # # Returns: path to extracted artifact # def extract(self, element, key): ref = self.get_artifact_fullname(element, key) path = os.path.join(self.extractdir, element._get_project().name, element.normal_name) return self.cas.extract(ref, path) # commit(): # # Commit built artifact to cache. # # Args: # element (Element): The Element commit an artifact for # content (str): The element's content directory # keys (list): The cache keys to use # def commit(self, element, content, keys): refs = [self.get_artifact_fullname(element, key) for key in keys] self.cas.commit(refs, content) # diff(): # # Return a list of files that have been added or modified between # the artifacts described by key_a and key_b. # # Args: # element (Element): The element whose artifacts to compare # key_a (str): The first artifact key # key_b (str): The second artifact key # subdir (str): A subdirectory to limit the comparison to # def diff(self, element, key_a, key_b, *, subdir=None): ref_a = self.get_artifact_fullname(element, key_a) ref_b = self.get_artifact_fullname(element, key_b) return self.cas.diff(ref_a, ref_b, subdir=subdir) # has_fetch_remotes(): # # Check whether any remote repositories are available for fetching. # # Args: # element (Element): The Element to check # # Returns: True if any remote repositories are configured, False otherwise # def has_fetch_remotes(self, *, element=None): if not self._has_fetch_remotes: # No project has fetch remotes return False elif element is None: # At least one (sub)project has fetch remotes return True else: # Check whether the specified element's project has fetch remotes remotes_for_project = self._remotes[element._get_project()] return bool(remotes_for_project) # has_push_remotes(): # # Check whether any remote repositories are available for pushing. # # Args: # element (Element): The Element to check # # Returns: True if any remote repository is configured, False otherwise # def has_push_remotes(self, *, element=None): if not self._has_push_remotes: # No project has push remotes return False elif element is None: # At least one (sub)project has push remotes return True else: # Check whether the specified element's project has push remotes remotes_for_project = self._remotes[element._get_project()] return any(remote.spec.push for remote in remotes_for_project) # push(): # # Push committed artifact to remote repository. # # Args: # element (Element): The Element whose artifact is to be pushed # keys (list): The cache keys to use # # Returns: # (bool): True if any remote was updated, False if no pushes were required # # Raises: # (ArtifactError): if there was an error # def push(self, element, keys): refs = [self.get_artifact_fullname(element, key) for key in list(keys)] project = element._get_project() push_remotes = [r for r in self._remotes[project] if r.spec.push] pushed = False for remote in push_remotes: remote.init() display_key = element._get_brief_display_key() element.status("Pushing artifact {} -> {}".format(display_key, remote.spec.url)) if self.cas.push(refs, remote): element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url)) pushed = True else: element.info("Remote ({}) already has {} cached".format( remote.spec.url, element._get_brief_display_key() )) return pushed # pull(): # # Pull artifact from one of the configured remote repositories. # # Args: # element (Element): The Element whose artifact is to be fetched # key (str): The cache key to use # progress (callable): The progress callback, if any # # Returns: # (bool): True if pull was successful, False if artifact was not available # def pull(self, element, key, *, progress=None): ref = self.get_artifact_fullname(element, key) display_key = key[:self.context.log_key_length] project = element._get_project() for remote in self._remotes[project]: try: element.status("Pulling artifact {} <- {}".format(display_key, remote.spec.url)) if self.cas.pull(ref, remote, progress=progress): element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url)) # no need to pull from additional remotes return True else: element.info("Remote ({}) does not have {} cached".format( remote.spec.url, display_key )) except BlobNotFound: element.info("Remote ({}) does not have {} cached".format( remote.spec.url, display_key )) except CASError as e: raise ArtifactError("Failed to pull artifact {}: {}".format( display_key, e)) from e return False # link_key(): # # Add a key for an existing artifact. # # Args: # element (Element): The Element whose artifact is to be linked # oldkey (str): An existing cache key for the artifact # newkey (str): A new cache key for the artifact # def link_key(self, element, oldkey, newkey): oldref = self.get_artifact_fullname(element, oldkey) newref = self.get_artifact_fullname(element, newkey) self.cas.link_ref(oldref, newref) ################################################ # Local Private Methods # ################################################ # _message() # # Local message propagator # def _message(self, message_type, message, **kwargs): args = dict(kwargs) self.context.message( Message(None, message_type, message, **args)) # _set_remotes(): # # Set the list of remote caches. If project is None, the global list of # remote caches will be set, which is used by all projects. If a project is # specified, the per-project list of remote caches will be set. # # Args: # remote_specs (list): List of ArtifactCacheSpec instances, in priority order. # project (Project): The Project instance for project-specific remotes def _set_remotes(self, remote_specs, *, project=None): if project is None: # global remotes self.global_remote_specs = remote_specs else: self.project_remote_specs[project] = remote_specs # _initialize_remotes() # # An internal wrapper which calls the abstract method and # reports takes care of messaging # def _initialize_remotes(self): def remote_failed(url, error): self._message(MessageType.WARN, "Failed to initialize remote {}: {}".format(url, error)) with self.context.timed_activity("Initializing remote caches", silent_nested=True): self.initialize_remotes(on_failure=remote_failed) # _write_cache_size() # # Writes the given size of the artifact to the cache's size file # # Args: # size (int): The size of the artifact cache to record # def _write_cache_size(self, size): assert isinstance(size, int) size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE) with utils.save_file_atomic(size_file_path, "w") as f: f.write(str(size)) # _read_cache_size() # # Reads and returns the size of the artifact cache that's stored in the # cache's size file # # Returns: # (int): The size of the artifact cache, as recorded in the file # def _read_cache_size(self): size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE) try: with open(size_file_path, "r", encoding="utf-8") as f: size = f.read() except FileNotFoundError: return None try: num_size = int(size) except ValueError: self._message(MessageType.WARN, "Failure resolving cache size", detail="Size '{}' parsed from '{}' was not an integer" .format(size, size_file_path)) return None else: return num_size # _calculate_cache_quota() # # Calculates and sets the cache quota and lower threshold based on the # quota set in Context. # It checks that the quota is both a valid expression, and that there is # enough disk space to satisfy that quota # def _calculate_cache_quota(self): # Headroom intended to give BuildStream a bit of leeway. # This acts as the minimum size of cache_quota and also # is taken from the user requested cache_quota. # if 'BST_TEST_SUITE' in os.environ: headroom = 0 else: headroom = 2e9 try: cache_quota = utils._parse_size(self.context.config_cache_quota, self.context.artifactdir) except utils.UtilError as e: raise LoadError(LoadErrorReason.INVALID_DATA, "{}\nPlease specify the value in bytes or as a % of full disk space.\n" "\nValid values are, for example: 800M 10G 1T 50%\n" .format(str(e))) from e total_size, available_space = self._get_cache_volume_size() cache_size = self.get_cache_size() # Ensure system has enough storage for the cache_quota # # If cache_quota is none, set it to the maximum it could possibly be. # # Also check that cache_quota is at least as large as our headroom. # if cache_quota is None: # Infinity, set to max system storage cache_quota = cache_size + available_space if cache_quota < headroom: # Check minimum raise LoadError(LoadErrorReason.INVALID_DATA, "Invalid cache quota ({}): ".format(utils._pretty_size(cache_quota)) + "BuildStream requires a minimum cache quota of 2G.") if cache_quota > cache_size + available_space: # Check maximum if '%' in self.context.config_cache_quota: available = (available_space / total_size) * 100 available = '{}% of total disk space'.format(round(available, 1)) else: available = utils._pretty_size(available_space) raise ArtifactError("Your system does not have enough available " + "space to support the cache quota specified.", detail=("You have specified a quota of {quota} total disk space.\n" + "The filesystem containing {local_cache_path} only " + "has {available_size} available.") .format( quota=self.context.config_cache_quota, local_cache_path=self.context.artifactdir, available_size=available), reason='insufficient-storage-for-quota') # Place a slight headroom (2e9 (2GB) on the cache_quota) into # cache_quota to try and avoid exceptions. # # Of course, we might still end up running out during a build # if we end up writing more than 2G, but hey, this stuff is # already really fuzzy. # self._cache_quota_original = cache_quota self._cache_quota = cache_quota - headroom self._cache_lower_threshold = self._cache_quota / 2 # _get_cache_volume_size() # # Get the available space and total space for the volume on # which the artifact cache is located. # # Returns: # (int): The total number of bytes on the volume # (int): The number of available bytes on the volume # # NOTE: We use this stub to allow the test cases # to override what an artifact cache thinks # about it's disk size and available bytes. # def _get_cache_volume_size(self): return utils._get_volume_size(self.context.artifactdir) # _configured_remote_artifact_cache_specs(): # # Return the list of configured artifact remotes for a given project, in priority # order. This takes into account the user and project configuration. # # Args: # context (Context): The BuildStream context # project (Project): The BuildStream project # # Returns: # A list of ArtifactCacheSpec instances describing the remote artifact caches. # def _configured_remote_artifact_cache_specs(context, project): project_overrides = context.get_overrides(project.name) project_extra_specs = ArtifactCache.specs_from_config_node(project_overrides) return list(utils._deduplicate( project_extra_specs + project.artifact_cache_specs + context.artifact_cache_specs)) buildstream-1.6.9/buildstream/_artifactcache/cascache.py000066400000000000000000001325201437515270000234100ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jürg Billeter import hashlib import itertools import os import stat import tempfile import uuid import errno import contextlib from urllib.parse import urlparse import grpc from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc from .._protos.build.bazel.remote.asset.v1 import remote_asset_pb2, remote_asset_pb2_grpc from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc from .. import utils from .._exceptions import CASError # The default limit for gRPC messages is 4 MiB. # Limit payload to 1 MiB to leave sufficient headroom for metadata. _MAX_PAYLOAD_BYTES = 1024 * 1024 # How often is a keepalive ping sent to the server to make sure the transport is still alive _KEEPALIVE_TIME_MS = 60000 REMOTE_ASSET_URN_TEMPLATE = "urn:fdc:buildstream.build:2020:v1:{}" class _Attempt(): def __init__(self, last_attempt=False): self.__passed = None self.__last_attempt = last_attempt def passed(self): return self.__passed def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): try: if exc_type is None: self.__passed = True else: self.__passed = False if exc_value is not None: raise exc_value except grpc.RpcError as e: if e.code() == grpc.StatusCode.UNAVAILABLE: return not self.__last_attempt elif e.code() == grpc.StatusCode.ABORTED: raise CASError("grpc aborted: {}".format(str(e)), detail=e.details(), temporary=True) from e else: return False return False def _retry(tries=5): for a in range(tries): attempt = _Attempt(last_attempt=(a == tries - 1)) yield attempt if attempt.passed(): break class BlobNotFound(CASError): def __init__(self, blob, msg): self.blob = blob super().__init__(msg) # A CASCache manages a CAS repository as specified in the Remote Execution API. # # Args: # path (str): The root directory for the CAS repository # class CASCache(): def __init__(self, path): self.casdir = os.path.join(path, 'cas') self.tmpdir = os.path.join(path, 'tmp') os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True) os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True) os.makedirs(self.tmpdir, exist_ok=True) # preflight(): # # Preflight check. # def preflight(self): if (not os.path.isdir(os.path.join(self.casdir, 'refs', 'heads')) or not os.path.isdir(os.path.join(self.casdir, 'objects'))): raise CASError("CAS repository check failed for '{}'".format(self.casdir)) # contains(): # # Check whether the specified ref is already available in the local CAS cache. # # Args: # ref (str): The ref to check # # Returns: True if the ref is in the cache, False otherwise # def contains(self, ref): refpath = self._refpath(ref) # This assumes that the repository doesn't have any dangling pointers return os.path.exists(refpath) # extract(): # # Extract cached directory for the specified ref if it hasn't # already been extracted. # # Args: # ref (str): The ref whose directory to extract # path (str): The destination path # # Raises: # CASError: In cases there was an OSError, or if the ref did not exist. # # Returns: path to extracted directory # def extract(self, ref, path): tree = self.resolve_ref(ref, update_mtime=True) dest = os.path.join(path, tree.hash) if os.path.isdir(dest): # directory has already been extracted return dest with tempfile.TemporaryDirectory(prefix='tmp', dir=self.tmpdir) as tmpdir: checkoutdir = os.path.join(tmpdir, ref) self._checkout(checkoutdir, tree) os.makedirs(os.path.dirname(dest), exist_ok=True) try: os.rename(checkoutdir, dest) except OSError as e: # With rename it's possible to get either ENOTEMPTY or EEXIST # in the case that the destination path is a not empty directory. # # If rename fails with these errors, another process beat # us to it so just ignore. if e.errno not in [errno.ENOTEMPTY, errno.EEXIST]: raise CASError("Failed to extract directory for ref '{}': {}".format(ref, e)) from e return dest # commit(): # # Commit directory to cache. # # Args: # refs (list): The refs to set # path (str): The directory to import # def commit(self, refs, path): tree = self._commit_directory(path) for ref in refs: self.set_ref(ref, tree) # diff(): # # Return a list of files that have been added or modified between # the refs described by ref_a and ref_b. # # Args: # ref_a (str): The first ref # ref_b (str): The second ref # subdir (str): A subdirectory to limit the comparison to # def diff(self, ref_a, ref_b, *, subdir=None): tree_a = self.resolve_ref(ref_a) tree_b = self.resolve_ref(ref_b) if subdir: tree_a = self._get_subdir(tree_a, subdir) tree_b = self._get_subdir(tree_b, subdir) added = [] removed = [] modified = [] self._diff_trees(tree_a, tree_b, added=added, removed=removed, modified=modified) return modified, removed, added def initialize_remote(self, remote_spec, q): try: remote = CASRemote(remote_spec) remote.init() if remote.asset_fetch_supported: if remote_spec.push and not remote.asset_push_supported: q.put('Remote Asset server does not allow push') else: # No error q.put(None) else: request = buildstream_pb2.StatusRequest() for attempt in _retry(): with attempt: response = remote.ref_storage.Status(request) if remote_spec.push and not response.allow_updates: q.put('CAS server does not allow push') else: # No error q.put(None) except grpc.RpcError as e: # str(e) is too verbose for errors reported to the user q.put(e.details()) except Exception as e: # pylint: disable=broad-except # Whatever happens, we need to return it to the calling process # q.put(str(e)) # pull(): # # Pull a ref from a remote repository. # # Args: # ref (str): The ref to pull # remote (CASRemote): The remote repository to pull from # progress (callable): The progress callback, if any # # Returns: # (bool): True if pull was successful, False if ref was not available # def pull(self, ref, remote, *, progress=None): try: remote.init() if remote.asset_fetch_supported: request = remote_asset_pb2.FetchDirectoryRequest() request.uris.append(REMOTE_ASSET_URN_TEMPLATE.format(ref)) for attempt in _retry(): with attempt: response = remote.remote_asset_fetch.FetchDirectory(request) digest = response.root_directory_digest else: request = buildstream_pb2.GetReferenceRequest() request.key = ref for attempt in _retry(): with attempt: response = remote.ref_storage.GetReference(request) digest = response.digest tree = remote_execution_pb2.Digest() tree.hash = digest.hash tree.size_bytes = digest.size_bytes self._fetch_directory(remote, tree) self.set_ref(ref, tree) return True except grpc.RpcError as e: if e.code() != grpc.StatusCode.NOT_FOUND: raise CASError("Failed to pull ref {}: {}".format(ref, e)) from e return False # link_ref(): # # Add an alias for an existing ref. # # Args: # oldref (str): An existing ref # newref (str): A new ref for the same directory # def link_ref(self, oldref, newref): tree = self.resolve_ref(oldref) self.set_ref(newref, tree) # push(): # # Push committed refs to remote repository. # # Args: # refs (list): The refs to push # remote (CASRemote): The remote to push to # # Returns: # (bool): True if any remote was updated, False if no pushes were required # # Raises: # (CASError): if there was an error # def push(self, refs, remote): skipped_remote = True try: for ref in refs: tree = self.resolve_ref(ref) # Check whether ref is already on the server in which case # there is no need to push the ref try: if remote.asset_fetch_supported: request = remote_asset_pb2.FetchDirectoryRequest() request.uris.append(REMOTE_ASSET_URN_TEMPLATE.format(ref)) for attempt in _retry(): with attempt: response = remote.remote_asset_fetch.FetchDirectory(request) digest = response.root_directory_digest else: request = buildstream_pb2.GetReferenceRequest() request.key = ref for attempt in _retry(): with attempt: response = remote.ref_storage.GetReference(request) digest = response.digest if digest.hash == tree.hash and digest.size_bytes == tree.size_bytes: # ref is already on the server with the same tree continue except grpc.RpcError as e: if e.code() != grpc.StatusCode.NOT_FOUND: # Intentionally re-raise RpcError for outer except block. raise self._send_directory(remote, tree) if remote.asset_push_supported: request = remote_asset_pb2.PushDirectoryRequest() request.uris.append(REMOTE_ASSET_URN_TEMPLATE.format(ref)) request.root_directory_digest.hash = tree.hash request.root_directory_digest.size_bytes = tree.size_bytes for attempt in _retry(): with attempt: remote.remote_asset_push.PushDirectory(request) else: request = buildstream_pb2.UpdateReferenceRequest() request.keys.append(ref) request.digest.hash = tree.hash request.digest.size_bytes = tree.size_bytes for attempt in _retry(): with attempt: remote.ref_storage.UpdateReference(request) skipped_remote = False except grpc.RpcError as e: if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED: raise CASError("Failed to push ref {}: {}".format(refs, e), temporary=True) from e return not skipped_remote # objpath(): # # Return the path of an object based on its digest. # # Args: # digest (Digest): The digest of the object # # Returns: # (str): The path of the object # def objpath(self, digest): return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:]) # add_object(): # # Hash and write object to CAS. # # Args: # digest (Digest): An optional Digest object to populate # path (str): Path to file to add # buffer (bytes): Byte buffer to add # link_directly (bool): Whether file given by path can be linked # # Returns: # (Digest): The digest of the added object # # Either `path` or `buffer` must be passed, but not both. # def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False): # Exactly one of the two parameters has to be specified assert (path is None) != (buffer is None) if digest is None: digest = remote_execution_pb2.Digest() try: h = hashlib.sha256() # Always write out new file to avoid corruption if input file is modified with contextlib.ExitStack() as stack: if path is not None and link_directly: tmp = stack.enter_context(open(path, 'rb')) for chunk in iter(lambda: tmp.read(4096), b""): h.update(chunk) else: tmp = stack.enter_context(self._temporary_object()) if path: with open(path, 'rb') as f: for chunk in iter(lambda: f.read(4096), b""): h.update(chunk) tmp.write(chunk) else: h.update(buffer) tmp.write(buffer) tmp.flush() digest.hash = h.hexdigest() digest.size_bytes = os.fstat(tmp.fileno()).st_size # Place file at final location objpath = self.objpath(digest) os.makedirs(os.path.dirname(objpath), exist_ok=True) os.link(tmp.name, objpath) except FileExistsError: # We can ignore the failed link() if the object is already in the repo. pass except OSError as e: raise CASError("Failed to hash object: {}".format(e)) from e return digest # set_ref(): # # Create or replace a ref. # # Args: # ref (str): The name of the ref # def set_ref(self, ref, tree): refpath = self._refpath(ref) os.makedirs(os.path.dirname(refpath), exist_ok=True) with utils.save_file_atomic(refpath, 'wb', tempdir=self.tmpdir) as f: f.write(tree.SerializeToString()) # resolve_ref(): # # Resolve a ref to a digest. # # Args: # ref (str): The name of the ref # update_mtime (bool): Whether to update the mtime of the ref # # Returns: # (Digest): The digest stored in the ref # def resolve_ref(self, ref, *, update_mtime=False): refpath = self._refpath(ref) try: with open(refpath, 'rb') as f: if update_mtime: os.utime(refpath) digest = remote_execution_pb2.Digest() digest.ParseFromString(f.read()) return digest except FileNotFoundError as e: raise CASError("Attempt to access unavailable ref: {}".format(e)) from e # update_mtime() # # Update the mtime of a ref. # # Args: # ref (str): The ref to update # def update_mtime(self, ref): try: os.utime(self._refpath(ref)) except FileNotFoundError as e: raise CASError("Attempt to access unavailable ref: {}".format(e)) from e # calculate_cache_size() # # Return the real disk usage of the CAS cache. # # Returns: # (int): The size of the cache. # def calculate_cache_size(self): return utils._get_dir_size(self.casdir) # list_refs(): # # List refs in Least Recently Modified (LRM) order. # # Returns: # (list) - A list of refs in LRM order # def list_refs(self): # string of: /path/to/repo/refs/heads ref_heads = os.path.join(self.casdir, 'refs', 'heads') refs = [] mtimes = [] for root, _, files in os.walk(ref_heads): for filename in files: ref_path = os.path.join(root, filename) refs.append(os.path.relpath(ref_path, ref_heads)) # Obtain the mtime (the time a file was last modified) mtimes.append(os.path.getmtime(ref_path)) # NOTE: Sorted will sort from earliest to latest, thus the # first ref of this list will be the file modified earliest. return [ref for _, ref in sorted(zip(mtimes, refs))] # list_objects(): # # List cached objects in Least Recently Modified (LRM) order. # # Returns: # (list) - A list of objects and timestamps in LRM order # def list_objects(self): objs = [] mtimes = [] for root, _, files in os.walk(os.path.join(self.casdir, 'objects')): for filename in files: obj_path = os.path.join(root, filename) try: mtimes.append(os.path.getmtime(obj_path)) except FileNotFoundError: pass else: objs.append(obj_path) # NOTE: Sorted will sort from earliest to latest, thus the # first element of this list will be the file modified earliest. return sorted(zip(mtimes, objs)) def clean_up_refs_until(self, time): ref_heads = os.path.join(self.casdir, 'refs', 'heads') for root, _, files in os.walk(ref_heads): for filename in files: ref_path = os.path.join(root, filename) # Obtain the mtime (the time a file was last modified) if os.path.getmtime(ref_path) < time: os.unlink(ref_path) # remove(): # # Removes the given symbolic ref from the repo. # # Args: # ref (str): A symbolic ref # defer_prune (bool): Whether to defer pruning to the caller. NOTE: # The space won't be freed until you manually # call prune. # # Returns: # (int|None) The amount of space pruned from the repository in # Bytes, or None if defer_prune is True # def remove(self, ref, *, defer_prune=False): # Remove cache ref refpath = self._refpath(ref) if not os.path.exists(refpath): raise CASError("Could not find ref '{}'".format(ref)) os.unlink(refpath) if not defer_prune: pruned = self.prune() return pruned return None # prune(): # # Prune unreachable objects from the repo. # def prune(self): ref_heads = os.path.join(self.casdir, 'refs', 'heads') pruned = 0 reachable = set() # Check which objects are reachable for root, _, files in os.walk(ref_heads): for filename in files: ref_path = os.path.join(root, filename) ref = os.path.relpath(ref_path, ref_heads) tree = self.resolve_ref(ref) self._reachable_refs_dir(reachable, tree) # Prune unreachable objects for root, _, files in os.walk(os.path.join(self.casdir, 'objects')): for filename in files: objhash = os.path.basename(root) + filename if objhash not in reachable: obj_path = os.path.join(root, filename) pruned += os.stat(obj_path).st_size os.unlink(obj_path) return pruned def update_tree_mtime(self, tree): reachable = set() self._reachable_refs_dir(reachable, tree, update_mtime=True) ################################################ # Local Private Methods # ################################################ def _checkout(self, dest, tree): os.makedirs(dest, exist_ok=True) directory = remote_execution_pb2.Directory() with open(self.objpath(tree), 'rb') as f: directory.ParseFromString(f.read()) for filenode in directory.files: # regular file, create hardlink fullpath = os.path.join(dest, filenode.name) os.link(self.objpath(filenode.digest), fullpath) if filenode.is_executable: os.chmod(fullpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) for dirnode in directory.directories: fullpath = os.path.join(dest, dirnode.name) self._checkout(fullpath, dirnode.digest) for symlinknode in directory.symlinks: # symlink fullpath = os.path.join(dest, symlinknode.name) os.symlink(symlinknode.target, fullpath) def _refpath(self, ref): return os.path.join(self.casdir, 'refs', 'heads', ref) # _commit_directory(): # # Adds local directory to content addressable store. # # Adds files, symbolic links and recursively other directories in # a local directory to the content addressable store. # # Args: # path (str): Path to the directory to add. # dir_digest (Digest): An optional Digest object to use. # # Returns: # (Digest): Digest object for the directory added. # def _commit_directory(self, path, *, dir_digest=None): directory = remote_execution_pb2.Directory() for name in sorted(os.listdir(path)): full_path = os.path.join(path, name) mode = os.lstat(full_path).st_mode if stat.S_ISDIR(mode): dirnode = directory.directories.add() dirnode.name = name self._commit_directory(full_path, dir_digest=dirnode.digest) elif stat.S_ISREG(mode): filenode = directory.files.add() filenode.name = name self.add_object(path=full_path, digest=filenode.digest) filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR elif stat.S_ISLNK(mode): symlinknode = directory.symlinks.add() symlinknode.name = name symlinknode.target = os.readlink(full_path) else: raise CASError("Unsupported file type for {}".format(full_path)) return self.add_object(digest=dir_digest, buffer=directory.SerializeToString()) def _get_subdir(self, tree, subdir): head, name = os.path.split(subdir) if head: tree = self._get_subdir(tree, head) directory = remote_execution_pb2.Directory() with open(self.objpath(tree), 'rb') as f: directory.ParseFromString(f.read()) for dirnode in directory.directories: if dirnode.name == name: return dirnode.digest raise CASError("Subdirectory {} not found".format(name)) def _diff_trees(self, tree_a, tree_b, *, added, removed, modified, path=""): dir_a = remote_execution_pb2.Directory() dir_b = remote_execution_pb2.Directory() if tree_a: with open(self.objpath(tree_a), 'rb') as f: dir_a.ParseFromString(f.read()) if tree_b: with open(self.objpath(tree_b), 'rb') as f: dir_b.ParseFromString(f.read()) a = 0 b = 0 while a < len(dir_a.files) or b < len(dir_b.files): if b < len(dir_b.files) and (a >= len(dir_a.files) or dir_a.files[a].name > dir_b.files[b].name): added.append(os.path.join(path, dir_b.files[b].name)) b += 1 elif a < len(dir_a.files) and (b >= len(dir_b.files) or dir_b.files[b].name > dir_a.files[a].name): removed.append(os.path.join(path, dir_a.files[a].name)) a += 1 else: # File exists in both directories if dir_a.files[a].digest.hash != dir_b.files[b].digest.hash: modified.append(os.path.join(path, dir_a.files[a].name)) a += 1 b += 1 a = 0 b = 0 while a < len(dir_a.directories) or b < len(dir_b.directories): if b < len(dir_b.directories) and (a >= len(dir_a.directories) or dir_a.directories[a].name > dir_b.directories[b].name): self._diff_trees(None, dir_b.directories[b].digest, added=added, removed=removed, modified=modified, path=os.path.join(path, dir_b.directories[b].name)) b += 1 elif a < len(dir_a.directories) and (b >= len(dir_b.directories) or dir_b.directories[b].name > dir_a.directories[a].name): self._diff_trees(dir_a.directories[a].digest, None, added=added, removed=removed, modified=modified, path=os.path.join(path, dir_a.directories[a].name)) a += 1 else: # Subdirectory exists in both directories if dir_a.directories[a].digest.hash != dir_b.directories[b].digest.hash: self._diff_trees(dir_a.directories[a].digest, dir_b.directories[b].digest, added=added, removed=removed, modified=modified, path=os.path.join(path, dir_a.directories[a].name)) a += 1 b += 1 def _reachable_refs_dir(self, reachable, tree, update_mtime=False): if tree.hash in reachable: return if update_mtime: os.utime(self.objpath(tree)) reachable.add(tree.hash) directory = remote_execution_pb2.Directory() with open(self.objpath(tree), 'rb') as f: directory.ParseFromString(f.read()) for filenode in directory.files: if update_mtime: os.utime(self.objpath(filenode.digest)) reachable.add(filenode.digest.hash) for dirnode in directory.directories: self._reachable_refs_dir(reachable, dirnode.digest, update_mtime=update_mtime) def _required_blobs(self, directory_digest): # parse directory, and recursively add blobs d = remote_execution_pb2.Digest() d.hash = directory_digest.hash d.size_bytes = directory_digest.size_bytes yield d directory = remote_execution_pb2.Directory() with open(self.objpath(directory_digest), 'rb') as f: directory.ParseFromString(f.read()) for filenode in directory.files: d = remote_execution_pb2.Digest() d.hash = filenode.digest.hash d.size_bytes = filenode.digest.size_bytes yield d for dirnode in directory.directories: yield from self._required_blobs(dirnode.digest) def _fetch_blob(self, remote, digest, stream): resource_name = '/'.join(['blobs', digest.hash, str(digest.size_bytes)]) request = bytestream_pb2.ReadRequest() request.resource_name = resource_name request.read_offset = 0 for response in remote.bytestream.Read(request): stream.write(response.data) stream.flush() assert digest.size_bytes == os.fstat(stream.fileno()).st_size # _temporary_object(): # # Returns: # (file): A file object to a named temporary file. # # Create a named temporary file with 0o0644 access rights. @contextlib.contextmanager def _temporary_object(self): with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f: os.chmod(f.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) yield f # _ensure_blob(): # # Fetch and add blob if it's not already local. # # Args: # remote (Remote): The remote to use. # digest (Digest): Digest object for the blob to fetch. # # Returns: # (str): The path of the object # def _ensure_blob(self, remote, digest): objpath = self.objpath(digest) if os.path.exists(objpath): # already in local repository return objpath with self._temporary_object() as f: self._fetch_blob(remote, digest, f) added_digest = self.add_object(path=f.name, link_directly=True) assert added_digest.hash == digest.hash return objpath def _batch_download_complete(self, batch): for digest, data in batch.send(): with self._temporary_object() as f: f.write(data) f.flush() added_digest = self.add_object(path=f.name, link_directly=True) assert added_digest.hash == digest.hash # Helper function for _fetch_directory(). def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue): self._batch_download_complete(batch) # All previously scheduled directories are now locally available, # move them to the processing queue. fetch_queue.extend(fetch_next_queue) fetch_next_queue.clear() return _CASBatchRead(remote) # Helper function for _fetch_directory(). def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False): in_local_cache = os.path.exists(self.objpath(digest)) if in_local_cache: # Skip download, already in local cache. pass elif (digest.size_bytes >= remote.max_batch_total_size_bytes or not remote.batch_read_supported): # Too large for batch request, download in independent request. self._ensure_blob(remote, digest) in_local_cache = True else: if not batch.add(digest): # Not enough space left in batch request. # Complete pending batch first. batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue) batch.add(digest) if recursive: if in_local_cache: # Add directory to processing queue. fetch_queue.append(digest) else: # Directory will be available after completing pending batch. # Add directory to deferred processing queue. fetch_next_queue.append(digest) return batch # _fetch_directory(): # # Fetches remote directory and adds it to content addressable store. # # Fetches files, symbolic links and recursively other directories in # the remote directory and adds them to the content addressable # store. # # Args: # remote (Remote): The remote to use. # dir_digest (Digest): Digest object for the directory to fetch. # def _fetch_directory(self, remote, dir_digest): fetch_queue = [dir_digest] fetch_next_queue = [] batch = _CASBatchRead(remote) while len(fetch_queue) + len(fetch_next_queue) > 0: if len(fetch_queue) == 0: batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue) dir_digest = fetch_queue.pop(0) objpath = self._ensure_blob(remote, dir_digest) directory = remote_execution_pb2.Directory() with open(objpath, 'rb') as f: directory.ParseFromString(f.read()) for dirnode in directory.directories: batch = self._fetch_directory_node(remote, dirnode.digest, batch, fetch_queue, fetch_next_queue, recursive=True) for filenode in directory.files: batch = self._fetch_directory_node(remote, filenode.digest, batch, fetch_queue, fetch_next_queue) # Fetch final batch self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue) def _send_blob(self, remote, digest, stream, u_uid=uuid.uuid4()): resource_name = '/'.join(['uploads', str(u_uid), 'blobs', digest.hash, str(digest.size_bytes)]) def request_stream(resname, instream): offset = 0 finished = False remaining = digest.size_bytes while not finished: chunk_size = min(remaining, _MAX_PAYLOAD_BYTES) remaining -= chunk_size request = bytestream_pb2.WriteRequest() request.write_offset = offset # max. _MAX_PAYLOAD_BYTES chunks request.data = instream.read(chunk_size) request.resource_name = resname request.finish_write = remaining <= 0 yield request offset += chunk_size finished = request.finish_write for attempt in _retry(): with attempt: response = remote.bytestream.Write(request_stream(resource_name, stream)) assert response.committed_size == digest.size_bytes def _send_directory(self, remote, digest, u_uid=uuid.uuid4()): required_blobs = self._required_blobs(digest) missing_blobs = {} # Limit size of FindMissingBlobs request for required_blobs_group in _grouper(required_blobs, 512): request = remote_execution_pb2.FindMissingBlobsRequest() for required_digest in required_blobs_group: d = request.blob_digests.add() d.hash = required_digest.hash d.size_bytes = required_digest.size_bytes for attempt in _retry(): with attempt: response = remote.cas.FindMissingBlobs(request) for missing_digest in response.missing_blob_digests: d = remote_execution_pb2.Digest() d.hash = missing_digest.hash d.size_bytes = missing_digest.size_bytes missing_blobs[d.hash] = d # Upload any blobs missing on the server self._send_blobs(remote, missing_blobs.values(), u_uid) def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()): batch = _CASBatchUpdate(remote) for digest in digests: with open(self.objpath(digest), 'rb') as f: assert os.fstat(f.fileno()).st_size == digest.size_bytes if (digest.size_bytes >= remote.max_batch_total_size_bytes or not remote.batch_update_supported): # Too large for batch request, upload in independent request. self._send_blob(remote, digest, f, u_uid=u_uid) else: if not batch.add(digest, f): # Not enough space left in batch request. # Complete pending batch first. batch.send() batch = _CASBatchUpdate(remote) batch.add(digest, f) # Send final batch batch.send() # Represents a single remote CAS cache. # class CASRemote(): # pylint: disable=attribute-defined-outside-init def __init__(self, spec): self.spec = spec self._initialized = False self.channel = None self.bytestream = None self.cas = None self.ref_storage = None def init(self): if not self._initialized: url = urlparse(self.spec.url) if url.scheme == 'http': port = url.port or 80 self.channel = grpc.insecure_channel('{}:{}'.format(url.hostname, port), options=[("grpc.keepalive_time_ms", _KEEPALIVE_TIME_MS)]) elif url.scheme == 'https': port = url.port or 443 if self.spec.server_cert: with open(self.spec.server_cert, 'rb') as f: server_cert_bytes = f.read() else: server_cert_bytes = None if self.spec.client_key: with open(self.spec.client_key, 'rb') as f: client_key_bytes = f.read() else: client_key_bytes = None if self.spec.client_cert: with open(self.spec.client_cert, 'rb') as f: client_cert_bytes = f.read() else: client_cert_bytes = None credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_bytes, private_key=client_key_bytes, certificate_chain=client_cert_bytes) self.channel = grpc.secure_channel('{}:{}'.format(url.hostname, port), credentials, options=[("grpc.keepalive_time_ms", _KEEPALIVE_TIME_MS)]) else: raise CASError("Unsupported URL: {}".format(self.spec.url)) self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel) self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel) self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel) self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel) self.remote_asset_fetch = remote_asset_pb2_grpc.FetchStub(self.channel) self.remote_asset_push = remote_asset_pb2_grpc.PushStub(self.channel) self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES try: request = remote_execution_pb2.GetCapabilitiesRequest() for attempt in _retry(): with attempt: response = self.capabilities.GetCapabilities(request) server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes: self.max_batch_total_size_bytes = server_max_batch_total_size_bytes except grpc.RpcError as e: # Simply use the defaults for servers that don't implement GetCapabilities() if e.code() != grpc.StatusCode.UNIMPLEMENTED: raise # Check whether the server supports BatchReadBlobs() self.batch_read_supported = False try: request = remote_execution_pb2.BatchReadBlobsRequest() for attempt in _retry(): with attempt: response = self.cas.BatchReadBlobs(request) self.batch_read_supported = True except grpc.RpcError as e: if e.code() != grpc.StatusCode.UNIMPLEMENTED: raise self.asset_fetch_supported = False try: request = remote_asset_pb2.FetchDirectoryRequest() for attempt in _retry(): with attempt: response = self.remote_asset_fetch.FetchDirectory(request) except grpc.RpcError as e: if e.code() == grpc.StatusCode.INVALID_ARGUMENT: # Expected error as the request doesn't specify any URIs. self.asset_fetch_supported = True elif e.code() != grpc.StatusCode.UNIMPLEMENTED: raise self.batch_update_supported = False self.asset_push_supported = False if self.spec.push: # Check whether the server supports BatchUpdateBlobs() try: request = remote_execution_pb2.BatchUpdateBlobsRequest() for attempt in _retry(): with attempt: response = self.cas.BatchUpdateBlobs(request) self.batch_update_supported = True except grpc.RpcError as e: if (e.code() != grpc.StatusCode.UNIMPLEMENTED and e.code() != grpc.StatusCode.PERMISSION_DENIED): raise # Check whether the server supports PushDirectory() try: request = remote_asset_pb2.PushDirectoryRequest() for attempt in _retry(): with attempt: response = self.remote_asset_push.PushDirectory(request) except grpc.RpcError as e: if e.code() == grpc.StatusCode.INVALID_ARGUMENT: # Expected error as the request doesn't specify any URIs. self.asset_push_supported = True elif (e.code() != grpc.StatusCode.UNIMPLEMENTED and e.code() != grpc.StatusCode.PERMISSION_DENIED): raise self._initialized = True # Represents a batch of blobs queued for fetching. # class _CASBatchRead(): def __init__(self, remote): self._remote = remote self._max_total_size_bytes = remote.max_batch_total_size_bytes self._request = remote_execution_pb2.BatchReadBlobsRequest() self._size = 0 self._sent = False def add(self, digest): assert not self._sent new_batch_size = self._size + digest.size_bytes if new_batch_size > self._max_total_size_bytes: # Not enough space left in current batch return False request_digest = self._request.digests.add() request_digest.hash = digest.hash request_digest.size_bytes = digest.size_bytes self._size = new_batch_size return True def send(self): assert not self._sent self._sent = True if len(self._request.digests) == 0: return for attempt in _retry(): with attempt: batch_response = self._remote.cas.BatchReadBlobs(self._request) for response in batch_response.responses: if response.status.code == grpc.StatusCode.NOT_FOUND.value[0]: raise BlobNotFound(response.digest.hash, "Failed to download blob {}: {}".format( response.digest.hash, response.status.code)) if response.status.code != grpc.StatusCode.OK.value[0]: raise CASError("Failed to download blob {}: {}".format( response.digest.hash, response.status.code)) if response.digest.size_bytes != len(response.data): raise CASError("Failed to download blob {}: expected {} bytes, received {} bytes".format( response.digest.hash, response.digest.size_bytes, len(response.data))) yield (response.digest, response.data) # Represents a batch of blobs queued for upload. # class _CASBatchUpdate(): def __init__(self, remote): self._remote = remote self._max_total_size_bytes = remote.max_batch_total_size_bytes self._request = remote_execution_pb2.BatchUpdateBlobsRequest() self._size = 0 self._sent = False def add(self, digest, stream): assert not self._sent new_batch_size = self._size + digest.size_bytes if new_batch_size > self._max_total_size_bytes: # Not enough space left in current batch return False blob_request = self._request.requests.add() blob_request.digest.hash = digest.hash blob_request.digest.size_bytes = digest.size_bytes blob_request.data = stream.read(digest.size_bytes) self._size = new_batch_size return True def send(self): assert not self._sent self._sent = True if len(self._request.requests) == 0: return for attempt in _retry(): with attempt: batch_response = self._remote.cas.BatchUpdateBlobs(self._request) for response in batch_response.responses: if response.status.code != grpc.StatusCode.OK.value[0]: raise CASError("Failed to upload blob {}: {}".format( response.digest.hash, response.status.code)) def _grouper(iterable, n): while True: try: current = next(iterable) except StopIteration: return yield itertools.chain([current], itertools.islice(iterable, n - 1)) buildstream-1.6.9/buildstream/_artifactcache/casserver.py000066400000000000000000000462511437515270000236600ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jürg Billeter from concurrent import futures import logging import os import signal import sys import tempfile import uuid import errno import threading import click import grpc from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc from .._exceptions import CASError from .cascache import CASCache # The default limit for gRPC messages is 4 MiB. # Limit payload to 1 MiB to leave sufficient headroom for metadata. _MAX_PAYLOAD_BYTES = 1024 * 1024 # Trying to push an artifact that is too large class ArtifactTooLargeException(Exception): pass # We need a message handler because this will own an ArtifactCache # which can in turn fire messages. def message_handler(message, context): logging.info(message.message) logging.info(message.detail) # create_server(): # # Create gRPC CAS artifact server as specified in the Remote Execution API. # # Args: # repo (str): Path to CAS repository # enable_push (bool): Whether to allow blob uploads and artifact updates # def create_server(repo, *, enable_push, max_head_size=int(10e9), min_head_size=int(2e9)): cas = CASCache(os.path.abspath(repo)) # Use max_workers default from Python 3.5+ max_workers = (os.cpu_count() or 1) * 5 server = grpc.server(futures.ThreadPoolExecutor(max_workers)) cache_cleaner = _CacheCleaner(cas, max_head_size, min_head_size) bytestream_pb2_grpc.add_ByteStreamServicer_to_server( _ByteStreamServicer(cas, cache_cleaner, enable_push=enable_push), server) remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server( _ContentAddressableStorageServicer(cas, cache_cleaner, enable_push=enable_push), server) remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server( _CapabilitiesServicer(), server) buildstream_pb2_grpc.add_ReferenceStorageServicer_to_server( _ReferenceStorageServicer(cas, enable_push=enable_push), server) return server @click.command(short_help="CAS Artifact Server") @click.option('--port', '-p', type=click.INT, required=True, help="Port number") @click.option('--server-key', help="Private server key for TLS (PEM-encoded)") @click.option('--server-cert', help="Public server certificate for TLS (PEM-encoded)") @click.option('--client-certs', help="Public client certificates for TLS (PEM-encoded)") @click.option('--enable-push', default=False, is_flag=True, help="Allow clients to upload blobs and update artifact cache") @click.option('--head-room-min', type=click.INT, help="Disk head room minimum in bytes", default=2e9) @click.option('--head-room-max', type=click.INT, help="Disk head room maximum in bytes", default=10e9) @click.argument('repo') def server_main(repo, port, server_key, server_cert, client_certs, enable_push, head_room_min, head_room_max): server = create_server(repo, max_head_size=head_room_max, min_head_size=head_room_min, enable_push=enable_push) use_tls = bool(server_key) if bool(server_cert) != use_tls: click.echo("ERROR: --server-key and --server-cert are both required for TLS", err=True) sys.exit(-1) if client_certs and not use_tls: click.echo("ERROR: --client-certs can only be used with --server-key", err=True) sys.exit(-1) if use_tls: # Read public/private key pair with open(server_key, 'rb') as f: server_key_bytes = f.read() with open(server_cert, 'rb') as f: server_cert_bytes = f.read() if client_certs: with open(client_certs, 'rb') as f: client_certs_bytes = f.read() else: client_certs_bytes = None credentials = grpc.ssl_server_credentials([(server_key_bytes, server_cert_bytes)], root_certificates=client_certs_bytes, require_client_auth=bool(client_certs)) server.add_secure_port('[::]:{}'.format(port), credentials) else: server.add_insecure_port('[::]:{}'.format(port)) # Run artifact server server.start() try: while True: signal.pause() except KeyboardInterrupt: server.stop(0) class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer): def __init__(self, cas, cache_cleaner, *, enable_push): super().__init__() self.cas = cas self.enable_push = enable_push self.cache_cleaner = cache_cleaner def Read(self, request, context): resource_name = request.resource_name client_digest = _digest_from_download_resource_name(resource_name) if client_digest is None: context.set_code(grpc.StatusCode.NOT_FOUND) return if request.read_offset > client_digest.size_bytes: context.set_code(grpc.StatusCode.OUT_OF_RANGE) return try: with open(self.cas.objpath(client_digest), 'rb') as f: if os.fstat(f.fileno()).st_size != client_digest.size_bytes: context.set_code(grpc.StatusCode.NOT_FOUND) return if request.read_offset > 0: f.seek(request.read_offset) remaining = client_digest.size_bytes - request.read_offset while remaining > 0: chunk_size = min(remaining, _MAX_PAYLOAD_BYTES) remaining -= chunk_size response = bytestream_pb2.ReadResponse() # max. 64 kB chunks response.data = f.read(chunk_size) yield response except FileNotFoundError: context.set_code(grpc.StatusCode.NOT_FOUND) def Write(self, request_iterator, context): response = bytestream_pb2.WriteResponse() if not self.enable_push: context.set_code(grpc.StatusCode.PERMISSION_DENIED) return response offset = 0 finished = False resource_name = None with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out: for request in request_iterator: if finished or request.write_offset != offset: context.set_code(grpc.StatusCode.FAILED_PRECONDITION) return response if resource_name is None: # First request resource_name = request.resource_name client_digest = _digest_from_upload_resource_name(resource_name) if client_digest is None: context.set_code(grpc.StatusCode.NOT_FOUND) return response while True: if client_digest.size_bytes == 0: break try: self.cache_cleaner.clean_up(client_digest.size_bytes) except ArtifactTooLargeException as e: context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED) context.set_details(str(e)) return response try: os.posix_fallocate(out.fileno(), 0, client_digest.size_bytes) break except OSError as e: # Multiple upload can happen in the same time if e.errno != errno.ENOSPC: raise elif request.resource_name: # If it is set on subsequent calls, it **must** match the value of the first request. if request.resource_name != resource_name: context.set_code(grpc.StatusCode.FAILED_PRECONDITION) return response if (offset + len(request.data)) > client_digest.size_bytes: context.set_code(grpc.StatusCode.FAILED_PRECONDITION) return response out.write(request.data) offset += len(request.data) if request.finish_write: if client_digest.size_bytes != offset: context.set_code(grpc.StatusCode.FAILED_PRECONDITION) return response out.flush() digest = self.cas.add_object(path=out.name, link_directly=True) if digest.hash != client_digest.hash: context.set_code(grpc.StatusCode.FAILED_PRECONDITION) return response finished = True assert finished response.committed_size = offset return response class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer): def __init__(self, cas, cache_cleaner, *, enable_push): super().__init__() self.cas = cas self.enable_push = enable_push self.cache_cleaner = cache_cleaner def FindMissingBlobs(self, request, context): response = remote_execution_pb2.FindMissingBlobsResponse() for digest in request.blob_digests: objpath = self.cas.objpath(digest) try: os.utime(objpath) except OSError as e: if e.errno != errno.ENOENT: raise d = response.missing_blob_digests.add() d.hash = digest.hash d.size_bytes = digest.size_bytes return response def BatchReadBlobs(self, request, context): response = remote_execution_pb2.BatchReadBlobsResponse() batch_size = 0 for digest in request.digests: batch_size += digest.size_bytes if batch_size > _MAX_PAYLOAD_BYTES: context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return response blob_response = response.responses.add() blob_response.digest.hash = digest.hash blob_response.digest.size_bytes = digest.size_bytes try: with open(self.cas.objpath(digest), 'rb') as f: if os.fstat(f.fileno()).st_size != digest.size_bytes: blob_response.status.code = grpc.StatusCode.NOT_FOUND.value[0] continue blob_response.data = f.read(digest.size_bytes) except FileNotFoundError: blob_response.status.code = grpc.StatusCode.NOT_FOUND.value[0] return response def BatchUpdateBlobs(self, request, context): response = remote_execution_pb2.BatchUpdateBlobsResponse() if not self.enable_push: context.set_code(grpc.StatusCode.PERMISSION_DENIED) return response batch_size = 0 for blob_request in request.requests: digest = blob_request.digest batch_size += digest.size_bytes if batch_size > _MAX_PAYLOAD_BYTES: context.set_code(grpc.StatusCode.INVALID_ARGUMENT) return response blob_response = response.responses.add() blob_response.digest.hash = digest.hash blob_response.digest.size_bytes = digest.size_bytes if len(blob_request.data) != digest.size_bytes: blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION continue try: self.cache_cleaner.clean_up(digest.size_bytes) with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out: out.write(blob_request.data) out.flush() server_digest = self.cas.add_object(path=out.name) if server_digest.hash != digest.hash: blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION except ArtifactTooLargeException: blob_response.status.code = grpc.StatusCode.RESOURCE_EXHAUSTED return response class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer): def GetCapabilities(self, request, context): response = remote_execution_pb2.ServerCapabilities() cache_capabilities = response.cache_capabilities cache_capabilities.digest_function.append(remote_execution_pb2.SHA256) cache_capabilities.action_cache_update_capabilities.update_enabled = False cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED response.deprecated_api_version.major = 2 response.low_api_version.major = 2 response.high_api_version.major = 2 return response class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer): def __init__(self, cas, *, enable_push): super().__init__() self.cas = cas self.enable_push = enable_push def GetReference(self, request, context): response = buildstream_pb2.GetReferenceResponse() try: tree = self.cas.resolve_ref(request.key, update_mtime=True) try: self.cas.update_tree_mtime(tree) except FileNotFoundError: self.cas.remove(request.key, defer_prune=True) context.set_code(grpc.StatusCode.NOT_FOUND) return response response.digest.hash = tree.hash response.digest.size_bytes = tree.size_bytes except CASError: context.set_code(grpc.StatusCode.NOT_FOUND) return response def UpdateReference(self, request, context): response = buildstream_pb2.UpdateReferenceResponse() if not self.enable_push: context.set_code(grpc.StatusCode.PERMISSION_DENIED) return response for key in request.keys: self.cas.set_ref(key, request.digest) return response def Status(self, request, context): response = buildstream_pb2.StatusResponse() response.allow_updates = self.enable_push return response def _digest_from_download_resource_name(resource_name): parts = resource_name.split('/') # Accept requests from non-conforming BuildStream 1.1.x clients if len(parts) == 2: parts.insert(0, 'blobs') if len(parts) != 3 or parts[0] != 'blobs': return None try: digest = remote_execution_pb2.Digest() digest.hash = parts[1] digest.size_bytes = int(parts[2]) return digest except ValueError: return None def _digest_from_upload_resource_name(resource_name): parts = resource_name.split('/') # Accept requests from non-conforming BuildStream 1.1.x clients if len(parts) == 2: parts.insert(0, 'uploads') parts.insert(1, str(uuid.uuid4())) parts.insert(2, 'blobs') if len(parts) < 5 or parts[0] != 'uploads' or parts[2] != 'blobs': return None try: uuid_ = uuid.UUID(hex=parts[1]) if uuid_.version != 4: return None digest = remote_execution_pb2.Digest() digest.hash = parts[3] digest.size_bytes = int(parts[4]) return digest except ValueError: return None class _CacheCleaner: __cleanup_cache_lock = threading.Lock() def __init__(self, cas, max_head_size, min_head_size=int(2e9)): self.__cas = cas self.__max_head_size = max_head_size self.__min_head_size = min_head_size def __has_space(self, object_size): stats = os.statvfs(self.__cas.casdir) free_disk_space = (stats.f_bavail * stats.f_bsize) - self.__min_head_size total_disk_space = (stats.f_blocks * stats.f_bsize) - self.__min_head_size if object_size > total_disk_space: raise ArtifactTooLargeException("Artifact of size: {} is too large for " "the filesystem which mounts the remote " "cache".format(object_size)) return object_size <= free_disk_space # _clean_up_cache() # # Keep removing Least Recently Pushed (LRP) artifacts in a cache until there # is enough space for the incoming artifact # # Args: # object_size: The size of the object being received in bytes # # Returns: # int: The total bytes removed on the filesystem # def clean_up(self, object_size): if self.__has_space(object_size): return 0 with _CacheCleaner.__cleanup_cache_lock: if self.__has_space(object_size): # Another thread has done the cleanup for us return 0 stats = os.statvfs(self.__cas.casdir) target_disk_space = (stats.f_bavail * stats.f_bsize) - self.__max_head_size # obtain a list of LRP artifacts LRP_objects = self.__cas.list_objects() removed_size = 0 # in bytes last_mtime = 0 while object_size - removed_size > target_disk_space: try: last_mtime, to_remove = LRP_objects.pop(0) # The first element in the list is the LRP artifact except IndexError as e: # This exception is caught if there are no more artifacts in the list # LRP_artifacts. This means the the artifact is too large for the filesystem # so we abort the process raise ArtifactTooLargeException("Artifact of size {} is too large for " "the filesystem which mounts the remote " "cache".format(object_size)) from e try: size = os.stat(to_remove).st_size os.unlink(to_remove) removed_size += size except FileNotFoundError: pass self.__cas.clean_up_refs_until(last_mtime) if removed_size > 0: logging.info("Successfully removed {} bytes from the cache".format(removed_size)) else: logging.info("No artifacts were removed from the cache.") return removed_size buildstream-1.6.9/buildstream/_cachekey.py000066400000000000000000000025061437515270000206510ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import hashlib import ujson from . import _yaml # generate_key() # # Generate an sha256 hex digest from the given value. The value # can be a simple value or recursive dictionary with lists etc, # anything simple enough to serialize. # # Args: # value: A value to get a key for # # Returns: # (str): An sha256 hex digest of the given value # def generate_key(value): ordered = _yaml.node_sanitize(value) ustring = ujson.dumps(ordered, sort_keys=True, escape_forward_slashes=False).encode('utf-8') return hashlib.sha256(ustring).hexdigest() buildstream-1.6.9/buildstream/_context.py000066400000000000000000000517671437515270000205760ustar00rootroot00000000000000# # Copyright (C) 2016-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import datetime from collections import deque from collections.abc import Mapping from contextlib import contextmanager from . import utils from . import _cachekey from . import _signals from . import _site from . import _yaml from ._exceptions import LoadError, LoadErrorReason, BstError from ._message import Message, MessageType from ._profile import Topics, profile_start, profile_end from ._artifactcache import ArtifactCache, ArtifactCacheUsage from ._workspaces import Workspaces from .plugin import Plugin # Context() # # The Context object holds all of the user preferences # and context for a given invocation of BuildStream. # # This is a collection of data from configuration files and command # line arguments and consists of information such as where to store # logs and artifacts, where to perform builds and cache downloaded sources, # verbosity levels and basically anything pertaining to the context # in which BuildStream was invoked. # class Context(): def __init__(self): # Filename indicating which configuration file was used, or None for the defaults self.config_origin = None # The directory where various sources are stored self.sourcedir = None # The directory where build sandboxes will be created self.builddir = None # The local binary artifact cache directory self.artifactdir = None # The locations from which to push and pull prebuilt artifacts self.artifact_cache_specs = [] # The directory to store build logs self.logdir = None # The abbreviated cache key length to display in the UI self.log_key_length = 0 # Whether debug mode is enabled self.log_debug = False # Whether verbose mode is enabled self.log_verbose = False # Maximum number of lines to print from build logs self.log_error_lines = 0 # Maximum number of lines to print in the master log for a detailed message self.log_message_lines = 0 # Format string for printing the pipeline at startup time self.log_element_format = None # Format string for printing message lines in the master log self.log_message_format = None # Maximum number of fetch or refresh tasks self.sched_fetchers = 4 # Maximum number of build tasks self.sched_builders = 4 # Maximum number of push tasks self.sched_pushers = 4 # Maximum number of retries for network tasks self.sched_network_retries = 2 # What to do when a build fails in non interactive mode self.sched_error_action = 'continue' # Maximum jobs per build self.build_max_jobs = None # Whether elements must be rebuilt when their dependencies have changed self._strict_build_plan = None # Make sure the XDG vars are set in the environment before loading anything self._init_xdg() # Private variables self._cache_key = None self._message_handler = None self._message_depth = deque() self._artifactcache = None self._projects = [] self._project_overrides = {} self._workspaces = None self._log_handle = None self._log_filename = None self.config_cache_quota = 'infinity' self.artifactdir_volume = None # load() # # Loads the configuration files # # Args: # config (filename): The user specified configuration file, if any # # Raises: # LoadError # # This will first load the BuildStream default configuration and then # override that configuration with the configuration file indicated # by *config*, if any was specified. # def load(self, config=None): profile_start(Topics.LOAD_CONTEXT, 'load') # If a specific config file is not specified, default to trying # a $XDG_CONFIG_HOME/buildstream.conf file # if not config: # # Support parallel installations of BuildStream by first # trying buildstream1.conf and then falling back to buildstream.conf. # for config_filename in ("buildstream1.conf", "buildstream.conf"): default_config = os.path.join(os.environ["XDG_CONFIG_HOME"], config_filename) if os.path.exists(default_config): config = default_config break # Load default config # defaults = _yaml.load(_site.default_user_config) if config: self.config_origin = os.path.abspath(config) user_config = _yaml.load(config) _yaml.composite(defaults, user_config) _yaml.node_validate(defaults, [ 'sourcedir', 'builddir', 'artifactdir', 'logdir', 'scheduler', 'artifacts', 'logging', 'projects', 'cache', 'build' ]) for directory in ['sourcedir', 'builddir', 'artifactdir', 'logdir']: # Allow the ~ tilde expansion and any environment variables in # path specification in the config files. # path = _yaml.node_get(defaults, str, directory) path = os.path.expanduser(path) path = os.path.expandvars(path) path = os.path.normpath(path) setattr(self, directory, path) # Relative paths don't make sense in user configuration. The exception is # workspacedir where `.` is useful as it will be combined with the name # specified on the command line. if not os.path.isabs(path) and not (directory == 'workspacedir' and path == '.'): raise LoadError("{} must be an absolute path".format(directory), LoadErrorReason.INVALID_DATA) # Load quota configuration # We need to find the first existing directory in the path of # our artifactdir - the artifactdir may not have been created # yet. cache = _yaml.node_get(defaults, Mapping, 'cache') _yaml.node_validate(cache, ['quota']) self.config_cache_quota = _yaml.node_get(cache, str, 'quota', default_value='infinity') # Load artifact share configuration self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults) # Load logging config logging = _yaml.node_get(defaults, Mapping, 'logging') _yaml.node_validate(logging, [ 'key-length', 'verbose', 'error-lines', 'message-lines', 'debug', 'element-format', 'message-format' ]) self.log_key_length = _yaml.node_get(logging, int, 'key-length') self.log_debug = _yaml.node_get(logging, bool, 'debug') self.log_verbose = _yaml.node_get(logging, bool, 'verbose') self.log_error_lines = _yaml.node_get(logging, int, 'error-lines') self.log_message_lines = _yaml.node_get(logging, int, 'message-lines') self.log_element_format = _yaml.node_get(logging, str, 'element-format') self.log_message_format = _yaml.node_get(logging, str, 'message-format') # Load scheduler config scheduler = _yaml.node_get(defaults, Mapping, 'scheduler') _yaml.node_validate(scheduler, [ 'on-error', 'fetchers', 'builders', 'pushers', 'network-retries' ]) self.sched_error_action = _yaml.node_get(scheduler, str, 'on-error') self.sched_fetchers = _yaml.node_get(scheduler, int, 'fetchers') self.sched_builders = _yaml.node_get(scheduler, int, 'builders') self.sched_pushers = _yaml.node_get(scheduler, int, 'pushers') self.sched_network_retries = _yaml.node_get(scheduler, int, 'network-retries') # Load build config build = _yaml.node_get(defaults, dict, 'build') _yaml.node_validate(build, ['max-jobs']) self.build_max_jobs = _yaml.node_get(build, int, 'max-jobs') # Load per-projects overrides self._project_overrides = _yaml.node_get(defaults, Mapping, 'projects', default_value={}) # Shallow validation of overrides, parts of buildstream which rely # on the overrides are expected to validate elsewhere. for _, overrides in _yaml.node_items(self._project_overrides): _yaml.node_validate(overrides, ['artifacts', 'options', 'strict', 'default-mirror']) profile_end(Topics.LOAD_CONTEXT, 'load') valid_actions = ['continue', 'quit'] if self.sched_error_action not in valid_actions: provenance = _yaml.node_get_provenance(scheduler, 'on-error') raise LoadError(LoadErrorReason.INVALID_DATA, "{}: on-error should be one of: {}".format( provenance, ", ".join(valid_actions))) @property def artifactcache(self): if not self._artifactcache: self._artifactcache = ArtifactCache(self) return self._artifactcache # get_artifact_cache_usage() # # Fetches the current usage of the artifact cache # # Returns: # (ArtifactCacheUsage): The current status # def get_artifact_cache_usage(self): return ArtifactCacheUsage(self.artifactcache) # add_project(): # # Add a project to the context. # # Args: # project (Project): The project to add # def add_project(self, project): if not self._projects: self._workspaces = Workspaces(project) self._projects.append(project) # get_projects(): # # Return the list of projects in the context. # # Returns: # (list): The list of projects # def get_projects(self): return self._projects # get_toplevel_project(): # # Return the toplevel project, the one which BuildStream was # invoked with as opposed to a junctioned subproject. # # Returns: # (list): The list of projects # def get_toplevel_project(self): return self._projects[0] def get_workspaces(self): return self._workspaces # get_overrides(): # # Fetch the override dictionary for the active project. This returns # a node loaded from YAML and as such, values loaded from the returned # node should be loaded using the _yaml.node_get() family of functions. # # Args: # project_name (str): The project name # # Returns: # (Mapping): The overrides dictionary for the specified project # def get_overrides(self, project_name): return _yaml.node_get(self._project_overrides, Mapping, project_name, default_value={}) # get_strict(): # # Fetch whether we are strict or not # # Returns: # (bool): Whether or not to use strict build plan # def get_strict(self): # If it was set by the CLI, it overrides any config if self._strict_build_plan is not None: return self._strict_build_plan toplevel = self.get_toplevel_project() overrides = self.get_overrides(toplevel.name) return _yaml.node_get(overrides, bool, 'strict', default_value=True) # get_cache_key(): # # Returns the cache key, calculating it if necessary # # Returns: # (str): A hex digest cache key for the Context # def get_cache_key(self): if self._cache_key is None: # Anything that alters the build goes into the unique key self._cache_key = _cachekey.generate_key({}) return self._cache_key # set_message_handler() # # Sets the handler for any status messages propagated through # the context. # # The message handler should have the same signature as # the message() method def set_message_handler(self, handler): self._message_handler = handler # silent_messages(): # # Returns: # (bool): Whether messages are currently being silenced # def silent_messages(self): for silent in self._message_depth: if silent: return True return False # message(): # # Proxies a message back to the caller, this is the central # point through which all messages pass. # # Args: # message: A Message object # def message(self, message): # Tag message only once if message.depth is None: message.depth = len(list(self._message_depth)) # If we are recording messages, dump a copy into the open log file. self._record_message(message) # Send it off to the log handler (can be the frontend, # or it can be the child task which will propagate # to the frontend) assert self._message_handler self._message_handler(message, context=self) # silence() # # A context manager to silence messages, this behaves in # the same way as the `silent_nested` argument of the # Context._timed_activity() context manager: especially # important messages will not be silenced. # @contextmanager def silence(self): self._push_message_depth(True) try: yield finally: self._pop_message_depth() # timed_activity() # # Context manager for performing timed activities and logging those # # Args: # context (Context): The invocation context object # activity_name (str): The name of the activity # detail (str): An optional detailed message, can be multiline output # silent_nested (bool): If specified, nested messages will be silenced # @contextmanager def timed_activity(self, activity_name, *, unique_id=None, detail=None, silent_nested=False): starttime = datetime.datetime.now() stopped_time = None def stop_time(): nonlocal stopped_time stopped_time = datetime.datetime.now() def resume_time(): nonlocal stopped_time nonlocal starttime sleep_time = datetime.datetime.now() - stopped_time starttime += sleep_time with _signals.suspendable(stop_time, resume_time): try: # Push activity depth for status messages message = Message(unique_id, MessageType.START, activity_name, detail=detail) self.message(message) self._push_message_depth(silent_nested) yield except BstError: # Note the failure in status messages and reraise, the scheduler # expects an error when there is an error. elapsed = datetime.datetime.now() - starttime message = Message(unique_id, MessageType.FAIL, activity_name, elapsed=elapsed) self._pop_message_depth() self.message(message) raise elapsed = datetime.datetime.now() - starttime message = Message(unique_id, MessageType.SUCCESS, activity_name, elapsed=elapsed) self._pop_message_depth() self.message(message) # recorded_messages() # # Records all messages in a log file while the context manager # is active. # # In addition to automatically writing all messages to the # specified logging file, an open file handle for process stdout # and stderr will be available via the Context.get_log_handle() API, # and the full logfile path will be available via the # Context.get_log_filename() API. # # Args: # filename (str): A logging directory relative filename, # the pid and .log extension will be automatically # appended # # Yields: # (str): The fully qualified log filename # @contextmanager def recorded_messages(self, filename): # We dont allow recursing in this context manager, and # we also do not allow it in the main process. assert self._log_handle is None assert self._log_filename is None assert not utils._is_main_process() # Create the fully qualified logfile in the log directory, # appending the pid and .log extension at the end. self._log_filename = os.path.join(self.logdir, '{}.{}.log'.format(filename, os.getpid())) # Ensure the directory exists first directory = os.path.dirname(self._log_filename) os.makedirs(directory, exist_ok=True) with open(self._log_filename, 'a', encoding='utf-8') as logfile: # Write one last line to the log and flush it to disk def flush_log(): # If the process currently had something happening in the I/O stack # then trying to reenter the I/O stack will fire a runtime error. # # So just try to flush as well as we can at SIGTERM time try: logfile.write('\n\nForcefully terminated\n') logfile.flush() except RuntimeError: os.fsync(logfile.fileno()) self._log_handle = logfile with _signals.terminator(flush_log): yield self._log_filename self._log_handle = None self._log_filename = None # get_log_handle() # # Fetches the active log handle, this will return the active # log file handle when the Context.recorded_messages() context # manager is active # # Returns: # (file): The active logging file handle, or None # def get_log_handle(self): return self._log_handle # get_log_filename() # # Fetches the active log filename, this will return the active # log filename when the Context.recorded_messages() context # manager is active # # Returns: # (str): The active logging filename, or None # def get_log_filename(self): return self._log_filename # _record_message() # # Records the message if recording is enabled # # Args: # message (Message): The message to record # def _record_message(self, message): if self._log_handle is None: return INDENT = " " EMPTYTIME = "--:--:--" template = "[{timecode: <8}] {type: <7}" # If this message is associated with a plugin, print what # we know about the plugin. plugin_name = "" if message.unique_id: template += " {plugin}" plugin = Plugin._lookup(message.unique_id) plugin_name = plugin.name template += ": {message}" detail = '' if message.detail is not None: template += "\n\n{detail}" detail = message.detail.rstrip('\n') detail = INDENT + INDENT.join(detail.splitlines(True)) timecode = EMPTYTIME if message.message_type in (MessageType.SUCCESS, MessageType.FAIL): hours, remainder = divmod(int(message.elapsed.total_seconds()), 60**2) minutes, seconds = divmod(remainder, 60) timecode = "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds) text = template.format(timecode=timecode, plugin=plugin_name, type=message.message_type.upper(), message=message.message, detail=detail) # Write to the open log file self._log_handle.write('{}\n'.format(text)) self._log_handle.flush() # _push_message_depth() / _pop_message_depth() # # For status messages, send the depth of timed # activities inside a given task through the message # def _push_message_depth(self, silent_nested): self._message_depth.appendleft(silent_nested) def _pop_message_depth(self): assert self._message_depth self._message_depth.popleft() # Force the resolved XDG variables into the environment, # this is so that they can be used directly to specify # preferred locations of things from user configuration # files. def _init_xdg(self): if not os.environ.get('XDG_CACHE_HOME'): os.environ['XDG_CACHE_HOME'] = os.path.expanduser('~/.cache') if not os.environ.get('XDG_CONFIG_HOME'): os.environ['XDG_CONFIG_HOME'] = os.path.expanduser('~/.config') if not os.environ.get('XDG_DATA_HOME'): os.environ['XDG_DATA_HOME'] = os.path.expanduser('~/.local/share') buildstream-1.6.9/buildstream/_elementfactory.py000066400000000000000000000045221437515270000221160ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from . import _site from ._plugincontext import PluginContext from .element import Element # A ElementFactory creates Element instances # in the context of a given factory # # Args: # plugin_base (PluginBase): The main PluginBase object to work with # plugin_origins (list): Data used to search for external Element plugins # class ElementFactory(PluginContext): def __init__(self, plugin_base, *, format_versions=None, plugin_origins=None): if format_versions is None: format_versions = {} super().__init__(plugin_base, Element, [_site.element_plugins], plugin_origins=plugin_origins, format_versions=format_versions) # create(): # # Create an Element object, the pipeline uses this to create Element # objects on demand for a given pipeline. # # Args: # context (object): The Context object for processing # project (object): The project object # meta (object): The loaded MetaElement # # Returns: A newly created Element object of the appropriate kind # # Raises: # PluginError (if the kind lookup failed) # LoadError (if the element itself took issue with the config) # def create(self, context, project, meta): element_type, default_config = self.lookup(meta.kind) element = element_type(context, project, meta, default_config) version = self._format_versions.get(meta.kind, 0) self._assert_plugin_format(element, version) return element buildstream-1.6.9/buildstream/_exceptions.py000066400000000000000000000207561437515270000212650ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Tiago Gomes from enum import Enum # Disable pylint warnings for whole file here: # pylint: disable=global-statement # The last raised exception, this is used in test cases only _last_task_error_domain = None _last_task_error_reason = None # get_last_task_error() # # Fetches the last exception from a task # # Used by regression tests # def get_last_task_error(): global _last_task_error_domain global _last_task_error_reason d = _last_task_error_domain r = _last_task_error_reason _last_task_error_domain = _last_task_error_reason = None return (d, r) # set_last_task_error() # # Sets the last exception of a task # # This is set by some internals to inform regression # tests about how things failed in a machine readable way # def set_last_task_error(domain, reason): global _last_task_error_domain global _last_task_error_reason _last_task_error_domain = domain _last_task_error_reason = reason class ErrorDomain(Enum): PLUGIN = 1 LOAD = 2 IMPL = 3 PLATFORM = 4 SANDBOX = 5 ARTIFACT = 6 PIPELINE = 7 OSTREE = 8 UTIL = 9 PROG_NOT_FOUND = 12 SOURCE = 10 ELEMENT = 11 APP = 12 STREAM = 13 CAS = 15 # BstError is an internal base exception class for BuildSream # exceptions. # # The sole purpose of using the base class is to add additional # context to exceptions raised by plugins in child tasks, this # context can then be communicated back to the main process. # class BstError(Exception): def __init__(self, message, *, detail=None, domain=None, reason=None, temporary=False): super().__init__(message) # Additional error detail, these are used to construct detail # portions of the logging messages when encountered. # self.detail = detail # The build sandbox in which the error occurred, if the # error occurred at element assembly time. # self.sandbox = None # When this exception occurred during the handling of a job, indicate # whether or not there is any point retrying the job. # self.temporary = temporary # Error domain and reason # self.domain = domain self.reason = reason # PluginError # # Raised on plugin related errors. # # This exception is raised either by the plugin loading process, # or by the base :class:`.Plugin` element itself. # class PluginError(BstError): def __init__(self, message, *, detail=None, reason=None, temporary=False): super().__init__(message, domain=ErrorDomain.PLUGIN, detail=detail, reason=reason, temporary=False) # LoadErrorReason # # Describes the reason why a :class:`.LoadError` was raised. # class LoadErrorReason(Enum): # A file was not found. MISSING_FILE = 1 # The parsed data was not valid YAML. INVALID_YAML = 2 # Data was malformed, a value was not of the expected type, etc INVALID_DATA = 3 # An error occurred during YAML dictionary composition. # # This can happen by overriding a value with a new differently typed # value, or by overwriting some named value when that was not allowed. ILLEGAL_COMPOSITE = 4 # An circular dependency chain was detected CIRCULAR_DEPENDENCY = 5 # A variable could not be resolved. This can happen if your project # has cyclic dependencies in variable declarations, or, when substituting # a string which refers to an undefined variable. UNRESOLVED_VARIABLE = 6 # BuildStream does not support the required project format version UNSUPPORTED_PROJECT = 7 # Project requires a newer version of a plugin than the one which was loaded UNSUPPORTED_PLUGIN = 8 # A conditional expression failed to resolve EXPRESSION_FAILED = 9 # An assertion was intentionally encoded into project YAML USER_ASSERTION = 10 # A list composition directive did not apply to any underlying list TRAILING_LIST_DIRECTIVE = 11 # Conflicting junctions in subprojects CONFLICTING_JUNCTION = 12 # Failure to load a project from a specified junction INVALID_JUNCTION = 13 # Subproject needs to be fetched SUBPROJECT_FETCH_NEEDED = 14 # Subproject has no ref SUBPROJECT_INCONSISTENT = 15 # An invalid symbol name was encountered INVALID_SYMBOL_NAME = 16 # A project.conf file was missing MISSING_PROJECT_CONF = 17 # Try to load a directory not a yaml file LOADING_DIRECTORY = 18 # A project path leads outside of the project directory PROJ_PATH_INVALID = 19 # A project path points to a file of the not right kind (e.g. a # socket) PROJ_PATH_INVALID_KIND = 20 # A recursive include has been encountered. RECURSIVE_INCLUDE = 21 # A recursive variable has been encountered CIRCULAR_REFERENCE_VARIABLE = 22 # An attempt so set the value of a protected variable PROTECTED_VARIABLE_REDEFINED = 23 # LoadError # # Raised while loading some YAML. # # Args: # reason (LoadErrorReason): machine readable error reason # message (str): human readable error explanation # # This exception is raised when loading or parsing YAML, or when # interpreting project YAML # class LoadError(BstError): def __init__(self, reason, message, *, detail=None): super().__init__(message, detail=detail, domain=ErrorDomain.LOAD, reason=reason) # ImplError # # Raised when a :class:`.Source` or :class:`.Element` plugin fails to # implement a mandatory method # class ImplError(BstError): def __init__(self, message, reason=None): super().__init__(message, domain=ErrorDomain.IMPL, reason=reason) # PlatformError # # Raised if the current platform is not supported. class PlatformError(BstError): def __init__(self, message, reason=None): super().__init__(message, domain=ErrorDomain.PLATFORM, reason=reason) # SandboxError # # Raised when errors are encountered by the sandbox implementation # class SandboxError(BstError): def __init__(self, message, reason=None): super().__init__(message, domain=ErrorDomain.SANDBOX, reason=reason) # ArtifactError # # Raised when errors are encountered in the artifact caches # class ArtifactError(BstError): def __init__(self, message, *, detail=None, reason=None, temporary=False): super().__init__(message, detail=detail, domain=ErrorDomain.ARTIFACT, reason=reason, temporary=True) # CASError # # Raised when errors are encountered in the CAS # class CASError(BstError): def __init__(self, message, *, detail=None, reason=None, temporary=False): super().__init__(message, detail=detail, domain=ErrorDomain.CAS, reason=reason, temporary=True) # PipelineError # # Raised from pipeline operations # class PipelineError(BstError): def __init__(self, message, *, detail=None, reason=None): super().__init__(message, detail=detail, domain=ErrorDomain.PIPELINE, reason=reason) # StreamError # # Raised when a stream operation fails # class StreamError(BstError): def __init__(self, message=None, *, detail=None, reason=None, terminated=False): # The empty string should never appear to a user, # this only allows us to treat this internal error as # a BstError from the frontend. if message is None: message = "" super().__init__(message, detail=detail, domain=ErrorDomain.STREAM, reason=reason) self.terminated = terminated # AppError # # Raised from the frontend App directly # class AppError(BstError): def __init__(self, message, detail=None, reason=None): super().__init__(message, detail=detail, domain=ErrorDomain.APP, reason=reason) # SkipJob # # Raised from a child process within a job when the job should be # considered skipped by the parent process. # class SkipJob(Exception): pass buildstream-1.6.9/buildstream/_frontend/000077500000000000000000000000001437515270000203375ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_frontend/__init__.py000066400000000000000000000016721437515270000224560ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os from .cli import cli if "_BST_COMPLETION" not in os.environ: from .profile import Profile from .status import Status from .widget import LogLine buildstream-1.6.9/buildstream/_frontend/app.py000066400000000000000000001105621437515270000214760ustar00rootroot00000000000000# # Copyright (C) 2016-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import sys import resource import traceback import datetime from enum import Enum from textwrap import TextWrapper from contextlib import contextmanager import ujson import click from click import UsageError # Import buildstream public symbols from .. import Scope # Import various buildstream internals from .._context import Context from .._platform import Platform from .._project import Project from .._exceptions import BstError, StreamError, LoadError, LoadErrorReason, AppError, get_last_task_error from .._message import Message, MessageType, unconditional_messages from .._stream import Stream from .._versions import BST_FORMAT_VERSION from .. import _yaml from .._scheduler import ElementJob, JobStatus # Import frontend assets from . import Profile, LogLine, Status # Intendation for all logging INDENT = 4 # App() # # Main Application State # # Args: # main_options (dict): The main CLI options of the `bst` # command, before any subcommand # class App(): def __init__(self, main_options): # # Public members # self.context = None # The Context object self.stream = None # The Stream object self.project = None # The toplevel Project object self.logger = None # The LogLine object self.interactive = None # Whether we are running in interactive mode self.colors = None # Whether to use colors in logging # # Private members # self._session_start = datetime.datetime.now() self._session_name = None self._main_options = main_options # Main CLI options, before any command self._status = None # The Status object self._fail_messages = {} # Failure messages by unique plugin id self._interactive_failures = None # Whether to handle failures interactively self._started = False # Whether a session has started # UI Colors Profiles self._content_profile = Profile(fg='yellow') self._format_profile = Profile(fg='cyan', dim=True) self._success_profile = Profile(fg='green') self._error_profile = Profile(fg='red', dim=True) self._detail_profile = Profile(dim=True) # # Earily initialization # is_a_tty = sys.stdout.isatty() and sys.stderr.isatty() # Enable interactive mode if we're attached to a tty if main_options['no_interactive']: self.interactive = False else: self.interactive = is_a_tty # Handle errors interactively if we're in interactive mode # and --on-error was not specified on the command line if main_options.get('on_error') is not None: self._interactive_failures = False else: self._interactive_failures = self.interactive # Use color output if we're attached to a tty, unless # otherwise specified on the comand line if main_options['colors'] is None: self.colors = is_a_tty elif main_options['colors']: self.colors = True else: self.colors = False # Increase the soft limit for open file descriptors to the maximum. # SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox. # Avoid hitting the limit too quickly. limits = resource.getrlimit(resource.RLIMIT_NOFILE) if limits[0] != limits[1]: # Set soft limit to hard limit resource.setrlimit(resource.RLIMIT_NOFILE, (limits[1], limits[1])) # create() # # Should be used instead of the regular constructor. # # This will select a platform specific App implementation # # Args: # The same args as the App() constructor # @classmethod def create(cls, *args, **kwargs): if sys.platform.startswith('linux'): # Use an App with linux specific features from .linuxapp import LinuxApp # pylint: disable=import-outside-toplevel return LinuxApp(*args, **kwargs) else: # The base App() class is default return App(*args, **kwargs) # initialized() # # Context manager to initialize the application and optionally run a session # within the context manager. # # This context manager will take care of catching errors from within the # context and report them consistently, so the CLI need not take care of # reporting the errors and exiting with a consistent error status. # # Args: # session_name (str): The name of the session, or None for no session # # Note that the except_ argument may have a subtly different meaning depending # on the activity performed on the Pipeline. In normal circumstances the except_ # argument excludes elements from the `elements` list. In a build session, the # except_ elements are excluded from the tracking plan. # # If a session_name is provided, we treat the block as a session, and print # the session header and summary, and time the main session from startup time. # @contextmanager def initialized(self, *, session_name=None): directory = self._main_options['directory'] config = self._main_options['config'] self._session_name = session_name # # Load the Context # try: self.context = Context() self.context.load(config) except BstError as e: self._error_exit(e, "Error loading user configuration") # Override things in the context from our command line options, # the command line when used, trumps the config files. # override_map = { 'strict': '_strict_build_plan', 'debug': 'log_debug', 'verbose': 'log_verbose', 'error_lines': 'log_error_lines', 'message_lines': 'log_message_lines', 'on_error': 'sched_error_action', 'fetchers': 'sched_fetchers', 'builders': 'sched_builders', 'pushers': 'sched_pushers', 'max_jobs': 'build_max_jobs', 'network_retries': 'sched_network_retries' } for cli_option, context_attr in override_map.items(): option_value = self._main_options.get(cli_option) if option_value is not None: setattr(self.context, context_attr, option_value) try: Platform.get_platform() except BstError as e: self._error_exit(e, "Error instantiating platform") # Create the logger right before setting the message handler self.logger = LogLine(self.context, self._content_profile, self._format_profile, self._success_profile, self._error_profile, self._detail_profile, indent=INDENT) # Propagate pipeline feedback to the user self.context.set_message_handler(self._message_handler) # Preflight the artifact cache after initializing logging, # this can cause messages to be emitted. try: self.context.artifactcache.preflight() except BstError as e: self._error_exit(e, "Error instantiating artifact cache") # # Load the Project # try: self.project = Project(directory, self.context, cli_options=self._main_options['option'], default_mirror=self._main_options.get('default_mirror')) except LoadError as e: # Let's automatically start a `bst init` session in this case if e.reason == LoadErrorReason.MISSING_PROJECT_CONF and self.interactive: click.echo("A project was not detected in the directory: {}".format(directory), err=True) click.echo("", err=True) if click.confirm("Would you like to create a new project here ?"): self.init_project(None) self._error_exit(e, "Error loading project") except BstError as e: self._error_exit(e, "Error loading project") # Now that we have a logger and message handler, # we can override the global exception hook. sys.excepthook = self._global_exception_handler # Create the stream right away, we'll need to pass it around self.stream = Stream(self.context, self.project, self._session_start, session_start_callback=self.session_start_cb, interrupt_callback=self._interrupt_handler, ticker_callback=self._tick, job_start_callback=self._job_started, job_complete_callback=self._job_completed) # Create our status printer, only available in interactive self._status = Status(self.context, self._content_profile, self._format_profile, self._success_profile, self._error_profile, self.stream, colors=self.colors) # Mark the beginning of the session if session_name: self._message(MessageType.START, session_name) # Run the body of the session here, once everything is loaded try: yield except BstError as e: # Print a nice summary if this is a session if session_name: elapsed = self.stream.elapsed_time if isinstance(e, StreamError) and e.terminated: # pylint: disable=no-member self._message(MessageType.WARN, session_name + ' Terminated', elapsed=elapsed) else: self._message(MessageType.FAIL, session_name, elapsed=elapsed) # Notify session failure self._notify("{} failed".format(session_name), "{}".format(e)) if self._started: self._print_summary() # Exit with the error self._error_exit(e) except RecursionError: click.echo("RecursionError: Depency depth is too large. Maximum recursion depth exceeded.", err=True) sys.exit(-1) else: # No exceptions occurred, print session time and summary if session_name: self._message(MessageType.SUCCESS, session_name, elapsed=self.stream.elapsed_time) if self._started: self._print_summary() # Notify session success self._notify("{} succeeded".format(session_name), "") # init_project() # # Initialize a new BuildStream project, either with the explicitly passed options, # or by starting an interactive session if project_name is not specified and the # application is running in interactive mode. # # Args: # project_name (str): The project name, must be a valid symbol name # format_version (int): The project format version, default is the latest version # element_path (str): The subdirectory to store elements in, default is 'elements' # force (bool): Allow overwriting an existing project.conf # def init_project(self, project_name, format_version=BST_FORMAT_VERSION, element_path='elements', force=False): directory = self._main_options['directory'] directory = os.path.abspath(directory) project_path = os.path.join(directory, 'project.conf') elements_path = os.path.join(directory, element_path) try: # Abort if the project.conf already exists, unless `--force` was specified in `bst init` if not force and os.path.exists(project_path): raise AppError("A project.conf already exists at: {}".format(project_path), reason='project-exists') if project_name: # If project name was specified, user interaction is not desired, just # perform some validation and write the project.conf _yaml.assert_symbol_name(None, project_name, 'project name') self._assert_format_version(format_version) self._assert_element_path(element_path) elif not self.interactive: raise AppError("Cannot initialize a new project without specifying the project name", reason='unspecified-project-name') else: # Collect the parameters using an interactive session project_name, format_version, element_path = \ self._init_project_interactive(project_name, format_version, element_path) # Create the directory if it doesnt exist try: os.makedirs(directory, exist_ok=True) except IOError as e: raise AppError("Error creating project directory {}: {}".format(directory, e)) from e # Create the elements sub-directory if it doesnt exist try: os.makedirs(elements_path, exist_ok=True) except IOError as e: raise AppError("Error creating elements sub-directory {}: {}" .format(elements_path, e)) from e # Dont use ruamel.yaml here, because it doesnt let # us programatically insert comments or whitespace at # the toplevel. try: with open(project_path, 'w', encoding='utf-8') as f: f.write("# Unique project name\n" + "name: {}\n\n".format(project_name) + "# Required BuildStream format version\n" + "format-version: {}\n\n".format(format_version) + "# Subdirectory where elements are stored\n" + "element-path: {}\n".format(element_path)) except IOError as e: raise AppError("Error writing {}: {}".format(project_path, e)) from e except BstError as e: self._error_exit(e) click.echo("", err=True) click.echo("Created project.conf at: {}".format(project_path), err=True) sys.exit(0) # shell_prompt(): # # Creates a prompt for a shell environment, using ANSI color codes # if they are available in the execution context. # # Args: # element (Element): The Element object to resolve a prompt for # # Returns: # (str): The formatted prompt to display in the shell # def shell_prompt(self, element): _, key, dim = element._get_display_key() element_name = element._get_full_name() if self.colors: prompt = self._format_profile.fmt('[') + \ self._content_profile.fmt(key, dim=dim) + \ self._format_profile.fmt('@') + \ self._content_profile.fmt(element_name) + \ self._format_profile.fmt(':') + \ self._content_profile.fmt('$PWD') + \ self._format_profile.fmt(']$') + ' ' else: prompt = '[{}@{}:${{PWD}}]$ '.format(key, element_name) return prompt # cleanup() # # Cleans up application state # # This is called by Click at exit time # def cleanup(self): if self.stream: self.stream.cleanup() ############################################################ # Abstract Class Methods # ############################################################ # notify() # # Notify the user of something which occurred, this # is intended to grab attention from the user. # # This is guaranteed to only be called in interactive mode # # Args: # title (str): The notification title # text (str): The notification text # def notify(self, title, text): pass ############################################################ # Local Functions # ############################################################ # Local function for calling the notify() virtual method # def _notify(self, title, text): if self.interactive: self.notify(title, text) # Local message propagator # def _message(self, message_type, message, **kwargs): args = dict(kwargs) self.context.message( Message(None, message_type, message, **args)) # Exception handler # def _global_exception_handler(self, etype, value, tb): # Print the regular BUG message formatted = "".join(traceback.format_exception(etype, value, tb)) self._message(MessageType.BUG, str(value), detail=formatted) # If the scheduler has started, try to terminate all jobs gracefully, # otherwise exit immediately. if self.stream.running: self.stream.terminate() else: sys.exit(-1) # # Render the status area, conditional on some internal state # def _maybe_render_status(self): # If we're suspended or terminating, then dont render the status area if self._status and self.stream and \ not (self.stream.suspended or self.stream.terminated): self._status.render() # # Handle ^C SIGINT interruptions in the scheduling main loop # def _interrupt_handler(self): # Only handle ^C interactively in interactive mode if not self.interactive: self._status.clear() self.stream.terminate() return # Here we can give the user some choices, like whether they would # like to continue, abort immediately, or only complete processing of # the currently ongoing tasks. We can also print something more # intelligent, like how many tasks remain to complete overall. with self._interrupted(): click.echo("\nUser interrupted with ^C\n" + "\n" "Choose one of the following options:\n" + " (c)ontinue - Continue queueing jobs as much as possible\n" + " (q)uit - Exit after all ongoing jobs complete\n" + " (t)erminate - Terminate any ongoing jobs and exit\n" + "\n" + "Pressing ^C again will terminate jobs and exit\n", err=True) try: choice = click.prompt("Choice:", value_proc=_prefix_choice_value_proc(['continue', 'quit', 'terminate']), default='continue', err=True) except (click.Abort, SystemError): # In some cases, the readline buffer underlying the prompt gets corrupted on the second CTRL+C # This throws a SystemError, which doesn't seem to be problematic for the rest of the program # Ensure a newline after automatically printed '^C' click.echo("", err=True) choice = 'terminate' if choice == 'terminate': click.echo("\nTerminating all jobs at user request\n", err=True) self.stream.terminate() else: if choice == 'quit': click.echo("\nCompleting ongoing tasks before quitting\n", err=True) self.stream.quit() elif choice == 'continue': click.echo("\nContinuing\n", err=True) def _tick(self, elapsed): self._maybe_render_status() def _job_started(self, job): self._status.add_job(job) self._maybe_render_status() def _job_completed(self, job, status): self._status.remove_job(job) self._maybe_render_status() # Dont attempt to handle a failure if the user has already opted to # terminate if status == JobStatus.FAIL and not self.stream.terminated: if isinstance(job, ElementJob): element = job.element queue = job.queue # Get the last failure message for additional context failure = self._fail_messages.get(element._unique_id) # XXX This is dangerous, sometimes we get the job completed *before* # the failure message reaches us ?? if not failure: self._status.clear() click.echo("\n\n\nBUG: Message handling out of sync, " + "unable to retrieve failure message for element {}\n\n\n\n\n" .format(element), err=True) else: self._handle_failure(element, queue, failure) else: click.echo("\nTerminating all jobs\n", err=True) self.stream.terminate() def _handle_failure(self, element, queue, failure): # Handle non interactive mode setting of what to do when a job fails. if not self._interactive_failures: if self.context.sched_error_action == 'terminate': self.stream.terminate() elif self.context.sched_error_action == 'quit': self.stream.quit() elif self.context.sched_error_action == 'continue': pass return # Interactive mode for element failures with self._interrupted(): summary = ("\n{} failure on element: {}\n".format(failure.action_name, element.name) + "\n" + "Choose one of the following options:\n" + " (c)ontinue - Continue queueing jobs as much as possible\n" + " (q)uit - Exit after all ongoing jobs complete\n" + " (t)erminate - Terminate any ongoing jobs and exit\n" + " (r)etry - Retry this job\n") if failure.logfile: summary += " (l)og - View the full log file\n" if failure.sandbox: summary += " (s)hell - Drop into a shell in the failed build sandbox\n" summary += "\nPressing ^C will terminate jobs and exit\n" choices = ['continue', 'quit', 'terminate', 'retry'] if failure.logfile: choices += ['log'] if failure.sandbox: choices += ['shell'] choice = '' while choice not in ['continue', 'quit', 'terminate', 'retry']: click.echo(summary, err=True) self._notify("BuildStream failure", "{} on element {}" .format(failure.action_name, element.name)) try: choice = click.prompt("Choice:", default='continue', err=True, value_proc=_prefix_choice_value_proc(choices)) except (click.Abort, SystemError): # In some cases, the readline buffer underlying the prompt gets corrupted on the second CTRL+C # This throws a SystemError, which doesn't seem to be problematic for the rest of the program # Ensure a newline after automatically printed '^C' click.echo("", err=True) choice = 'terminate' # Handle choices which you can come back from # if choice == 'shell': click.echo("\nDropping into an interactive shell in the failed build sandbox\n", err=True) try: prompt = self.shell_prompt(element) self.stream.shell(element, Scope.BUILD, prompt, directory=failure.sandbox, isolate=True) except BstError as e: click.echo("Error while attempting to create interactive shell: {}".format(e), err=True) elif choice == 'log': with open(failure.logfile, 'r', encoding='utf-8') as logfile: content = logfile.read() click.echo_via_pager(content) if choice == 'terminate': click.echo("\nTerminating all jobs\n", err=True) self.stream.terminate() else: if choice == 'quit': click.echo("\nCompleting ongoing tasks before quitting\n", err=True) self.stream.quit() elif choice == 'continue': click.echo("\nContinuing with other non failing elements\n", err=True) elif choice == 'retry': click.echo("\nRetrying failed job\n", err=True) queue.failed_elements.remove(element) queue.enqueue([element]) # # Print the session heading if we've loaded a pipeline and there # is going to be a session # def session_start_cb(self): self._started = True if self._session_name: self.logger.print_heading(self.project, self.stream, log_file=self._main_options['log_file'], styling=self.colors) # # Print a summary of the queues # def _print_summary(self): click.echo("", err=True) self.logger.print_summary(self.stream, self._main_options['log_file'], styling=self.colors) # _error_exit() # # Exit with an error # # This will print the passed error to stderr and exit the program # with -1 status # # Args: # error (BstError): A BstError exception to print # prefix (str): An optional string to prepend to the error message # def _error_exit(self, error, prefix=None): click.echo("", err=True) main_error = "{}".format(error) if prefix is not None: main_error = "{}: {}".format(prefix, main_error) click.echo(main_error, err=True) if error.detail: indent = " " * INDENT detail = '\n' + indent + indent.join(error.detail.splitlines(True)) click.echo("{}".format(detail), err=True) # Record machine readable errors in a tempfile for the test harness to read back if 'BST_TEST_ERROR_CODES' in os.environ: task_error_domain, task_error_reason = get_last_task_error () error_codes = ujson.dumps ({ 'main_error_domain': error.domain.value if error.domain else None, 'main_error_reason': error.reason.value if isinstance (error.reason, Enum) else error.reason, 'task_error_domain': task_error_domain.value if task_error_domain else None, 'task_error_reason': ( task_error_reason.value if isinstance (task_error_reason, Enum) else task_error_reason ) }) with open (os.environ['BST_TEST_ERROR_CODES'], "w", encoding="utf-8") as f: f.write (error_codes) sys.exit(-1) # # Handle messages from the pipeline # def _message_handler(self, message, context): # Drop status messages from the UI if not verbose, we'll still see # info messages and status messages will still go to the log files. if not context.log_verbose and message.message_type == MessageType.STATUS: return # Hold on to the failure messages if message.message_type in [MessageType.FAIL, MessageType.BUG] and message.unique_id is not None: self._fail_messages[message.unique_id] = message # Send to frontend if appropriate if self.context.silent_messages() and (message.message_type not in unconditional_messages): return if self._status: self._status.clear() text = self.logger.render(message) click.echo(text, color=self.colors, nl=False, err=True) # Maybe render the status area self._maybe_render_status() # Additionally log to a file if self._main_options['log_file']: click.echo(text, file=self._main_options['log_file'], color=False, nl=False) @contextmanager def _interrupted(self): self._status.clear() try: with self.stream.suspend(): yield finally: self._maybe_render_status() # Some validation routines for project initialization # def _assert_format_version(self, format_version): message = "The version must be supported by this " + \ "version of buildstream (0 - {})\n".format(BST_FORMAT_VERSION) # Validate that it is an integer try: number = int(format_version) except ValueError as e: raise AppError(message, reason='invalid-format-version') from e # Validate that the specified version is supported if number < 0 or number > BST_FORMAT_VERSION: raise AppError(message, reason='invalid-format-version') def _assert_element_path(self, element_path): message = "The element path cannot be an absolute path or contain any '..' components\n" # Validate the path is not absolute if os.path.isabs(element_path): raise AppError(message, reason='invalid-element-path') # Validate that the path does not contain any '..' components path = element_path while path: split = os.path.split(path) path = split[0] basename = split[1] if basename == '..': raise AppError(message, reason='invalid-element-path') # _init_project_interactive() # # Collect the user input for an interactive session for App.init_project() # # Args: # project_name (str): The project name, must be a valid symbol name # format_version (int): The project format version, default is the latest version # element_path (str): The subdirectory to store elements in, default is 'elements' # # Returns: # project_name (str): The user selected project name # format_version (int): The user selected format version # element_path (str): The user selected element path # def _init_project_interactive(self, project_name, format_version=BST_FORMAT_VERSION, element_path='elements'): def project_name_proc(user_input): try: _yaml.assert_symbol_name(None, user_input, 'project name') except LoadError as e: message = "{}\n\n{}\n".format(e, e.detail) raise UsageError(message) from e return user_input def format_version_proc(user_input): try: self._assert_format_version(user_input) except AppError as e: raise UsageError(str(e)) from e return user_input def element_path_proc(user_input): try: self._assert_element_path(user_input) except AppError as e: raise UsageError(str(e)) from e return user_input w = TextWrapper(initial_indent=' ', subsequent_indent=' ', width=79) # Collect project name click.echo("", err=True) click.echo(self._content_profile.fmt("Choose a unique name for your project"), err=True) click.echo(self._format_profile.fmt("-------------------------------------"), err=True) click.echo("", err=True) click.echo(self._detail_profile.fmt( w.fill("The project name is a unique symbol for your project and will be used " "to distinguish your project from others in user preferences, namspaceing " "of your project's artifacts in shared artifact caches, and in any case where " "BuildStream needs to distinguish between multiple projects.")), err=True) click.echo("", err=True) click.echo(self._detail_profile.fmt( w.fill("The project name must contain only alphanumeric characters, " "may not start with a digit, and may contain dashes or underscores.")), err=True) click.echo("", err=True) project_name = click.prompt(self._content_profile.fmt("Project name"), value_proc=project_name_proc, err=True) click.echo("", err=True) # Collect format version click.echo(self._content_profile.fmt("Select the minimum required format version for your project"), err=True) click.echo(self._format_profile.fmt("-----------------------------------------------------------"), err=True) click.echo("", err=True) click.echo(self._detail_profile.fmt( w.fill("The format version is used to provide users who build your project " "with a helpful error message in the case that they do not have a recent " "enough version of BuildStream supporting all the features which your " "project might use.")), err=True) click.echo("", err=True) click.echo(self._detail_profile.fmt( w.fill("The lowest version allowed is 0, the currently installed version of BuildStream " "supports up to format version {}.".format(BST_FORMAT_VERSION))), err=True) click.echo("", err=True) format_version = click.prompt(self._content_profile.fmt("Format version"), value_proc=format_version_proc, default=format_version, err=True) click.echo("", err=True) # Collect element path click.echo(self._content_profile.fmt("Select the element path"), err=True) click.echo(self._format_profile.fmt("-----------------------"), err=True) click.echo("", err=True) click.echo(self._detail_profile.fmt( w.fill("The element path is a project subdirectory where element .bst files are stored " "within your project.")), err=True) click.echo("", err=True) click.echo(self._detail_profile.fmt( w.fill("Elements will be displayed in logs as filenames relative to " "the element path, and similarly, dependencies must be expressed as filenames " "relative to the element path.")), err=True) click.echo("", err=True) element_path = click.prompt(self._content_profile.fmt("Element path"), value_proc=element_path_proc, default=element_path, err=True) return (project_name, format_version, element_path) # # Return a value processor for partial choice matching. # The returned values processor will test the passed value with all the item # in the 'choices' list. If the value is a prefix of one of the 'choices' # element, the element is returned. If no element or several elements match # the same input, a 'click.UsageError' exception is raised with a description # of the error. # # Note that Click expect user input errors to be signaled by raising a # 'click.UsageError' exception. That way, Click display an error message and # ask for a new input. # def _prefix_choice_value_proc(choices): def value_proc(user_input): remaining_candidate = [choice for choice in choices if choice.startswith(user_input)] if not remaining_candidate: raise UsageError("Expected one of {}, got {}".format(choices, user_input)) if len(remaining_candidate) == 1: return remaining_candidate[0] raise UsageError("Ambiguous input. '{}' can refer to one of {}".format(user_input, remaining_candidate)) return value_proc buildstream-1.6.9/buildstream/_frontend/cli.py000066400000000000000000001055651437515270000214740ustar00rootroot00000000000000import os import sys import fcntl import click from .. import _yaml from .._exceptions import BstError, LoadError, AppError from .._versions import BST_FORMAT_VERSION from .complete import main_bashcomplete, complete_path, CompleteUnhandled ################################################################## # Override of click's main entry point # ################################################################## # search_command() # # Helper function to get a command and context object # for a given command. # # Args: # commands (list): A list of command words following `bst` invocation # context (click.Context): An existing toplevel context, or None # # Returns: # context (click.Context): The context of the associated command, or None # def search_command(args, *, context=None): if context is None: context = cli.make_context('bst', args, resilient_parsing=True) # Loop into the deepest command command = cli command_ctx = context for cmd in args: command = command_ctx.command.get_command(command_ctx, cmd) if command is None: return None command_ctx = command.make_context(command.name, [command.name], parent=command_ctx, resilient_parsing=True) return command_ctx # Completion for completing command names as help arguments def complete_commands(cmd, args, incomplete): command_ctx = search_command(args[1:]) if command_ctx and command_ctx.command and isinstance(command_ctx.command, click.MultiCommand): return [subcommand + " " for subcommand in command_ctx.command.list_commands(command_ctx)] return [] # Special completion for completing the bst elements in a project dir def complete_target(args, incomplete): """ :param args: full list of args typed before the incomplete arg :param incomplete: the incomplete text to autocomplete :return: all the possible user-specified completions for the param """ project_conf = 'project.conf' def ensure_project_dir(directory): directory = os.path.abspath(directory) while not os.path.isfile(os.path.join(directory, project_conf)): parent_dir = os.path.dirname(directory) if directory == parent_dir: break directory = parent_dir return directory # First resolve the directory, in case there is an # active --directory/-C option # base_directory = '.' idx = -1 try: idx = args.index('-C') except ValueError: try: idx = args.index('--directory') except ValueError: pass if idx >= 0 and len(args) > idx + 1: base_directory = args[idx + 1] else: # Check if this directory or any of its parent directories # contain a project config file base_directory = ensure_project_dir(base_directory) # Now parse the project.conf just to find the element path, # this is unfortunately a bit heavy. project_file = os.path.join(base_directory, project_conf) try: project = _yaml.load(project_file) except LoadError: # If there is no project directory in context, just dont # even bother trying to complete anything. return [] # The project is not required to have an element-path element_directory = project.get('element-path') # If a project was loaded, use it's element-path to # adjust our completion's base directory if element_directory: base_directory = os.path.join(base_directory, element_directory) return complete_path("File", incomplete, base_directory=base_directory) def override_completions(cmd, cmd_param, args, incomplete): """ :param cmd_param: command definition :param args: full list of args typed before the incomplete arg :param incomplete: the incomplete text to autocomplete :return: all the possible user-specified completions for the param """ if cmd.name == 'help': return complete_commands(cmd, args, incomplete) # We can't easily extend click's data structures without # modifying click itself, so just do some weak special casing # right here and select which parameters we want to handle specially. if isinstance(cmd_param.type, click.Path) and \ (cmd_param.name == 'elements' or cmd_param.name == 'element' or cmd_param.name == 'except_' or cmd_param.opts == ['--track'] or cmd_param.opts == ['--track-except']): return complete_target(args, incomplete) raise CompleteUnhandled() def override_main(self, args=None, prog_name=None, complete_var=None, standalone_mode=True, **extra): # Hook for the Bash completion. This only activates if the Bash # completion is actually enabled, otherwise this is quite a fast # noop. if main_bashcomplete(self, prog_name, override_completions): # If we're running tests we cant just go calling exit() # from the main process. # # The below is a quicker exit path for the sake # of making completions respond faster. if 'BST_TEST_SUITE' not in os.environ: sys.stdout.flush() sys.stderr.flush() os._exit(0) # Regular client return for test cases return # Check output file descriptor at earliest opportunity, to # provide a reasonable error message instead of a stack trace # in the case that it is blocking for stream in (sys.stdout, sys.stderr): fileno = stream.fileno() flags = fcntl.fcntl(fileno, fcntl.F_GETFL) if flags & os.O_NONBLOCK: click.echo("{} is currently set to O_NONBLOCK, try opening a new shell" .format(stream.name), err=True) sys.exit(-1) original_main(self, args=args, prog_name=prog_name, complete_var=None, standalone_mode=standalone_mode, **extra) original_main = click.BaseCommand.main click.BaseCommand.main = override_main ################################################################## # Main Options # ################################################################## def print_version(ctx, param, value): if not value or ctx.resilient_parsing: return from .. import __version__ # pylint: disable=import-outside-toplevel click.echo(__version__) ctx.exit() @click.group(context_settings=dict(help_option_names=['-h', '--help'])) @click.option('--version', is_flag=True, callback=print_version, expose_value=False, is_eager=True) @click.option('--config', '-c', type=click.Path(exists=True, dir_okay=False, readable=True), help="Configuration file to use") @click.option('--directory', '-C', default=os.getcwd(), type=click.Path(file_okay=False, readable=True), help="Project directory (default: current directory)") @click.option('--on-error', default=None, type=click.Choice(['continue', 'quit', 'terminate']), help="What to do when an error is encountered") @click.option('--fetchers', type=click.INT, default=None, help="Maximum simultaneous download tasks") @click.option('--builders', type=click.INT, default=None, help="Maximum simultaneous build tasks") @click.option('--pushers', type=click.INT, default=None, help="Maximum simultaneous upload tasks") @click.option('--max-jobs', type=click.INT, default=None, help="Number of parallel jobs allowed for a given build task") @click.option('--network-retries', type=click.INT, default=None, help="Maximum retries for network tasks") @click.option('--no-interactive', is_flag=True, default=False, help="Force non interactive mode, otherwise this is automatically decided") @click.option('--verbose/--no-verbose', default=None, help="Be extra verbose") @click.option('--debug/--no-debug', default=None, help="Print debugging output") @click.option('--error-lines', type=click.INT, default=None, help="Maximum number of lines to show from a task log") @click.option('--message-lines', type=click.INT, default=None, help="Maximum number of lines to show in a detailed message") @click.option('--log-file', type=click.File(mode='w', encoding='UTF-8'), help="A file to store the main log (allows storing the main log while in interactive mode)") @click.option('--colors/--no-colors', default=None, help="Force enable/disable ANSI color codes in output") @click.option('--strict/--no-strict', default=None, is_flag=True, help="Elements must be rebuilt when their dependencies have changed") @click.option('--option', '-o', type=click.Tuple([str, str]), multiple=True, metavar='OPTION VALUE', help="Specify a project option") @click.option('--default-mirror', default=None, help="The mirror to fetch from first, before attempting other mirrors") @click.pass_context def cli(context, **kwargs): """Build and manipulate BuildStream projects Most of the main options override options in the user preferences configuration file. """ from .app import App # pylint: disable=import-outside-toplevel # Create the App, giving it the main arguments context.obj = App.create(dict(kwargs)) context.call_on_close(context.obj.cleanup) ################################################################## # Help Command # ################################################################## @cli.command(name="help", short_help="Print usage information", context_settings={"help_option_names": []}) @click.argument("command", nargs=-1, metavar='COMMAND') @click.pass_context def help_command(ctx, command): """Print usage information about a given command """ command_ctx = search_command(command, context=ctx.parent) if not command_ctx: click.echo("Not a valid command: '{} {}'" .format(ctx.parent.info_name, " ".join(command)), err=True) sys.exit(-1) click.echo(command_ctx.command.get_help(command_ctx), err=True) # Hint about available sub commands if isinstance(command_ctx.command, click.MultiCommand): detail = " " if command: detail = " {} ".format(" ".join(command)) click.echo("\nFor usage on a specific command: {} help{}COMMAND" .format(ctx.parent.info_name, detail), err=True) ################################################################## # Init Command # ################################################################## @cli.command(short_help="Initialize a new BuildStream project") @click.option('--project-name', type=click.STRING, help="The project name to use") @click.option('--format-version', type=click.INT, default=BST_FORMAT_VERSION, help="The required format version (default: {})".format(BST_FORMAT_VERSION)) @click.option('--element-path', type=click.Path(), default="elements", help="The subdirectory to store elements in (default: elements)") @click.option('--force', '-f', default=False, is_flag=True, help="Allow overwriting an existing project.conf") @click.pass_obj def init(app, project_name, format_version, element_path, force): """Initialize a new BuildStream project Creates a new BuildStream project.conf in the project directory. Unless `--project-name` is specified, this will be an interactive session. """ app.init_project(project_name, format_version, element_path, force) ################################################################## # Build Command # ################################################################## @cli.command(short_help="Build elements in a pipeline") @click.option('--all', 'all_', default=False, is_flag=True, help="Build elements that would not be needed for the current build plan") @click.option('--track', 'track_', multiple=True, type=click.Path(readable=False), help="Specify elements to track during the build. Can be used " "repeatedly to specify multiple elements") @click.option('--track-all', default=False, is_flag=True, help="Track all elements in the pipeline") @click.option('--track-except', multiple=True, type=click.Path(readable=False), help="Except certain dependencies from tracking") @click.option('--track-cross-junctions', '-J', default=False, is_flag=True, help="Allow tracking to cross junction boundaries") @click.option('--track-save', default=False, is_flag=True, help="Deprecated: This is ignored") @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def build(app, elements, all_, track_, track_save, track_all, track_except, track_cross_junctions): """Build elements in a pipeline""" if (track_except or track_cross_junctions) and not (track_ or track_all): click.echo("ERROR: The --track-except and --track-cross-junctions options " "can only be used with --track or --track-all", err=True) sys.exit(-1) if track_save: click.echo("WARNING: --track-save is deprecated, saving is now unconditional", err=True) if track_all: track_ = elements with app.initialized(session_name="Build"): app.stream.build(elements, track_targets=track_, track_except=track_except, track_cross_junctions=track_cross_junctions, build_all=all_) ################################################################## # Fetch Command # ################################################################## @cli.command(short_help="Fetch sources in a pipeline") @click.option('--except', 'except_', multiple=True, type=click.Path(readable=False), help="Except certain dependencies from fetching") @click.option('--deps', '-d', default='plan', type=click.Choice(['none', 'plan', 'all']), help='The dependencies to fetch (default: plan)') @click.option('--track', 'track_', default=False, is_flag=True, help="Track new source references before fetching") @click.option('--track-cross-junctions', '-J', default=False, is_flag=True, help="Allow tracking to cross junction boundaries") @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def fetch(app, elements, deps, track_, except_, track_cross_junctions): """Fetch sources required to build the pipeline By default this will only try to fetch sources which are required for the build plan of the specified target element, omitting sources for any elements which are already built and available in the artifact cache. Specify `--deps` to control which sources to fetch: \b none: No dependencies, just the element itself plan: Only dependencies required for the build plan all: All dependencies """ from .._pipeline import PipelineSelection # pylint: disable=import-outside-toplevel if track_cross_junctions and not track_: click.echo("ERROR: The --track-cross-junctions option can only be used with --track", err=True) sys.exit(-1) if track_ and deps == PipelineSelection.PLAN: click.echo("WARNING: --track specified for tracking of a build plan\n\n" "Since tracking modifies the build plan, all elements will be tracked.", err=True) deps = PipelineSelection.ALL with app.initialized(session_name="Fetch"): app.stream.fetch(elements, selection=deps, except_targets=except_, track_targets=track_, track_cross_junctions=track_cross_junctions) ################################################################## # Track Command # ################################################################## @cli.command(short_help="Track new source references") @click.option('--except', 'except_', multiple=True, type=click.Path(readable=False), help="Except certain dependencies from tracking") @click.option('--deps', '-d', default='none', type=click.Choice(['none', 'all']), help='The dependencies to track (default: none)') @click.option('--cross-junctions', '-J', default=False, is_flag=True, help="Allow crossing junction boundaries") @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def track(app, elements, deps, except_, cross_junctions): """Consults the specified tracking branches for new versions available to build and updates the project with any newly available references. By default this will track just the specified element, but you can also update a whole tree of dependencies in one go. Specify `--deps` to control which sources to track: \b none: No dependencies, just the specified elements all: All dependencies of all specified elements """ with app.initialized(session_name="Track"): # Substitute 'none' for 'redirect' so that element redirections # will be done if deps == 'none': deps = 'redirect' app.stream.track(elements, selection=deps, except_targets=except_, cross_junctions=cross_junctions) ################################################################## # Pull Command # ################################################################## @cli.command(short_help="Pull a built artifact") @click.option('--deps', '-d', default='none', type=click.Choice(['none', 'all']), help='The dependency artifacts to pull (default: none)') @click.option('--remote', '-r', help="The URL of the remote cache (defaults to the first configured cache)") @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def pull(app, elements, deps, remote): """Pull a built artifact from the configured remote artifact cache. By default the artifact will be pulled one of the configured caches if possible, following the usual priority order. If the `--remote` flag is given, only the specified cache will be queried. Specify `--deps` to control which artifacts to pull: \b none: No dependencies, just the element itself all: All dependencies """ with app.initialized(session_name="Pull"): app.stream.pull(elements, selection=deps, remote=remote) ################################################################## # Push Command # ################################################################## @cli.command(short_help="Push a built artifact") @click.option('--deps', '-d', default='none', type=click.Choice(['none', 'all']), help='The dependencies to push (default: none)') @click.option('--remote', '-r', default=None, help="The URL of the remote cache (defaults to the first configured cache)") @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def push(app, elements, deps, remote): """Push a built artifact to a remote artifact cache. The default destination is the highest priority configured cache. You can override this by passing a different cache URL with the `--remote` flag. Specify `--deps` to control which artifacts to push: \b none: No dependencies, just the element itself all: All dependencies """ with app.initialized(session_name="Push"): app.stream.push(elements, selection=deps, remote=remote) ################################################################## # Show Command # ################################################################## @cli.command(short_help="Show elements in the pipeline") @click.option('--except', 'except_', multiple=True, type=click.Path(readable=False), help="Except certain dependencies") @click.option('--deps', '-d', default='all', type=click.Choice(['none', 'plan', 'run', 'build', 'all']), help='The dependencies to show (default: all)') @click.option('--order', default="stage", type=click.Choice(['stage', 'alpha']), help='Staging or alphabetic ordering of dependencies') @click.option('--format', '-f', 'format_', metavar='FORMAT', default=None, type=click.STRING, help='Format string for each element') @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def show(app, elements, deps, except_, order, format_): """Show elements in the pipeline By default this will show all of the dependencies of the specified target element. Specify `--deps` to control which elements to show: \b none: No dependencies, just the element itself plan: Dependencies required for a build plan run: Runtime dependencies, including the element itself build: Build time dependencies, excluding the element itself all: All dependencies \b FORMAT ~~~~~~ The --format option controls what should be printed for each element, the following symbols can be used in the format string: \b %{name} The element name %{key} The abbreviated cache key (if all sources are consistent) %{full-key} The full cache key (if all sources are consistent) %{state} cached, buildable, waiting or inconsistent %{config} The element configuration %{vars} Variable configuration %{env} Environment settings %{public} Public domain data %{workspaced} If the element is workspaced %{workspace-dirs} A list of workspace directories The value of the %{symbol} without the leading '%' character is understood as a pythonic formatting string, so python formatting features apply, examle: \b bst show target.bst --format \\ 'Name: %{name: ^20} Key: %{key: ^8} State: %{state}' If you want to use a newline in a format string in bash, use the '$' modifier: \b bst show target.bst --format \\ $'---------- %{name} ----------\\n%{vars}' """ with app.initialized(): dependencies = app.stream.load_selection(elements, selection=deps, except_targets=except_) if order == "alpha": dependencies = sorted(dependencies) if not format_: format_ = app.context.log_element_format report = app.logger.show_pipeline(dependencies, format_) click.echo(report, color=app.colors) ################################################################## # Shell Command # ################################################################## @cli.command(short_help="Shell into an element's sandbox environment") @click.option('--build', '-b', 'build_', is_flag=True, default=False, help='Stage dependencies and sources to build') @click.option('--sysroot', '-s', default=None, type=click.Path(exists=True, file_okay=False, readable=True), help="An existing sysroot") @click.option('--mount', type=click.Tuple([click.Path(exists=True), str]), multiple=True, metavar='HOSTPATH PATH', help="Mount a file or directory into the sandbox") @click.option('--isolate', is_flag=True, default=False, help='Create an isolated build sandbox') @click.argument('element', type=click.Path(readable=False)) @click.argument('command', type=click.STRING, nargs=-1) @click.pass_obj def shell(app, element, sysroot, mount, isolate, build_, command): """Run a command in the target element's sandbox environment This will stage a temporary sysroot for running the target element, assuming it has already been built and all required artifacts are in the local cache. Use the --build option to create a temporary sysroot for building the element instead. Use the --sysroot option with an existing failed build directory or with a checkout of the given target, in order to use a specific sysroot. If no COMMAND is specified, the default is to attempt to run an interactive shell. """ # pylint: disable=import-outside-toplevel from ..element import Scope from .._project import HostMount from .._pipeline import PipelineSelection if build_: scope = Scope.BUILD else: scope = Scope.RUN with app.initialized(): dependencies = app.stream.load_selection((element,), selection=PipelineSelection.NONE) element = dependencies[0] prompt = app.shell_prompt(element) mounts = [ HostMount(path, host_path) for host_path, path in mount ] try: exitcode = app.stream.shell(element, scope, prompt, directory=sysroot, mounts=mounts, isolate=isolate, command=command) except BstError as e: raise AppError("Error launching shell: {}".format(e), detail=e.detail) from e # If there were no errors, we return the shell's exit code here. sys.exit(exitcode) ################################################################## # Checkout Command # ################################################################## @cli.command(short_help="Checkout a built artifact") @click.option('--force', '-f', default=False, is_flag=True, help="Allow files to be overwritten") @click.option('--deps', '-d', default='run', type=click.Choice(['run', 'none']), help='The dependencies to checkout (default: run)') @click.option('--integrate/--no-integrate', default=True, is_flag=True, help="Whether to run integration commands") @click.option('--hardlinks', default=False, is_flag=True, help="Checkout hardlinks instead of copies (handle with care)") @click.option('--tar', default=False, is_flag=True, help="Create a tarball from the artifact contents instead " "of a file tree. If LOCATION is '-', the tarball " "will be dumped to the standard output.") @click.argument('element', type=click.Path(readable=False)) @click.argument('location', type=click.Path()) @click.pass_obj def checkout(app, element, location, force, deps, integrate, hardlinks, tar): """Checkout a built artifact to the specified location """ if hardlinks and tar: click.echo("ERROR: options --hardlinks and --tar conflict", err=True) sys.exit(-1) with app.initialized(): app.stream.checkout(element, location=location, force=force, deps=deps, integrate=integrate, hardlinks=hardlinks, tar=tar) ################################################################## # Workspace Command # ################################################################## @cli.group(short_help="Manipulate developer workspaces") def workspace(): """Manipulate developer workspaces""" ################################################################## # Workspace Open Command # ################################################################## @workspace.command(name='open', short_help="Open a new workspace") @click.option('--no-checkout', default=False, is_flag=True, help="Do not checkout the source, only link to the given directory") @click.option('--force', '-f', default=False, is_flag=True, help="Overwrite files existing in checkout directory") @click.option('--track', 'track_', default=False, is_flag=True, help="Track and fetch new source references before checking out the workspace") @click.argument('element', type=click.Path(readable=False)) @click.argument('directory', type=click.Path(file_okay=False)) @click.pass_obj def workspace_open(app, no_checkout, force, track_, element, directory): """Open a workspace for manual source modification""" if os.path.exists(directory): if not os.path.isdir(directory): click.echo("Checkout directory is not a directory: {}".format(directory), err=True) sys.exit(-1) if not (no_checkout or force) and os.listdir(directory): click.echo("Checkout directory is not empty: {}".format(directory), err=True) sys.exit(-1) with app.initialized(): app.stream.workspace_open(element, directory, no_checkout=no_checkout, track_first=track_, force=force) ################################################################## # Workspace Close Command # ################################################################## @workspace.command(name='close', short_help="Close workspaces") @click.option('--remove-dir', default=False, is_flag=True, help="Remove the path that contains the closed workspace") @click.option('--all', '-a', 'all_', default=False, is_flag=True, help="Close all open workspaces") @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def workspace_close(app, remove_dir, all_, elements): """Close a workspace""" if not (all_ or elements): click.echo('ERROR: no elements specified', err=True) sys.exit(-1) with app.initialized(): # Early exit if we specified `all` and there are no workspaces if all_ and not app.stream.workspace_exists(): click.echo('No open workspaces to close', err=True) sys.exit(0) if all_: elements = [element_name for element_name, _ in app.context.get_workspaces().list()] elements = app.stream.redirect_element_names(elements) # Check that the workspaces in question exist nonexisting = [] for element_name in elements: if not app.stream.workspace_exists(element_name): nonexisting.append(element_name) if nonexisting: raise AppError("Workspace does not exist", detail="\n".join(nonexisting)) if app.interactive and remove_dir: if not click.confirm('This will remove all your changes, are you sure?'): click.echo('Aborting', err=True) sys.exit(-1) for element_name in elements: app.stream.workspace_close(element_name, remove_dir=remove_dir) ################################################################## # Workspace Reset Command # ################################################################## @workspace.command(name='reset', short_help="Reset a workspace to its original state") @click.option('--soft', default=False, is_flag=True, help="Reset workspace state without affecting its contents") @click.option('--track', 'track_', default=False, is_flag=True, help="Track and fetch the latest source before resetting") @click.option('--all', '-a', 'all_', default=False, is_flag=True, help="Reset all open workspaces") @click.argument('elements', nargs=-1, type=click.Path(readable=False)) @click.pass_obj def workspace_reset(app, soft, track_, all_, elements): """Reset a workspace to its original state""" # Check that the workspaces in question exist with app.initialized(): if not (all_ or elements): raise AppError('No elements specified to reset') if all_ and not app.stream.workspace_exists(): raise AppError("No open workspaces to reset") if app.interactive and not soft: if not click.confirm('This will remove all your changes, are you sure?'): click.echo('Aborting', err=True) sys.exit(-1) if all_: elements = tuple(element_name for element_name, _ in app.context.get_workspaces().list()) app.stream.workspace_reset(elements, soft=soft, track_first=track_) ################################################################## # Workspace List Command # ################################################################## @workspace.command(name='list', short_help="List open workspaces") @click.pass_obj def workspace_list(app): """List open workspaces""" with app.initialized(): app.stream.workspace_list() ################################################################## # Source Bundle Command # ################################################################## @cli.command(name="source-bundle", short_help="Produce a build bundle to be manually executed") @click.option('--except', 'except_', multiple=True, type=click.Path(readable=False), help="Elements to except from the tarball") @click.option('--compression', default='gz', type=click.Choice(['none', 'gz', 'bz2', 'xz']), help="Compress the tar file using the given algorithm.") @click.option('--track', 'track_', default=False, is_flag=True, help="Track new source references before bundling") @click.option('--force', '-f', default=False, is_flag=True, help="Overwrite an existing tarball") @click.option('--directory', default=os.getcwd(), help="The directory to write the tarball to") @click.argument('element', type=click.Path(readable=False)) @click.pass_obj def source_bundle(app, element, force, directory, track_, compression, except_): """Produce a source bundle to be manually executed """ with app.initialized(): app.stream.source_bundle(element, directory, track_first=track_, force=force, compression=compression, except_targets=except_) buildstream-1.6.9/buildstream/_frontend/complete.py000066400000000000000000000313051437515270000225230ustar00rootroot00000000000000# # Copyright (c) 2014 by Armin Ronacher. # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # This module was forked from the python click library, Included # original copyright notice from the Click library and following disclaimer # as per their LICENSE requirements. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections.abc import copy import os import click from click.core import MultiCommand, Option, Argument from click.parser import split_arg_string WORDBREAK = '=' COMPLETION_SCRIPT = ''' %(complete_func)s() { local IFS=$'\n' COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ COMP_CWORD=$COMP_CWORD \\ %(autocomplete_var)s=complete $1 ) ) return 0 } complete -F %(complete_func)s -o nospace %(script_names)s ''' # An exception for our custom completion handler to # indicate that it does not want to handle completion # for this parameter # class CompleteUnhandled(Exception): pass def complete_path(path_type, incomplete, base_directory='.'): """Helper method for implementing the completions() method for File and Path parameter types. """ # Try listing the files in the relative or absolute path # specified in `incomplete` minus the last path component, # otherwise list files starting from the current working directory. entries = [] base_path = '' # This is getting a bit messy listed_base_directory = False if os.path.sep in incomplete: split = incomplete.rsplit(os.path.sep, 1) base_path = split[0] # If there was nothing on the left of the last separator, # we are completing files in the filesystem root base_path = os.path.join(base_directory, base_path) else: incomplete_base_path = os.path.join(base_directory, incomplete) if os.path.isdir(incomplete_base_path): base_path = incomplete_base_path try: if base_path: if os.path.isdir(base_path): entries = [os.path.join(base_path, e) for e in os.listdir(base_path)] else: entries = os.listdir(base_directory) listed_base_directory = True except OSError: # If for any reason the os reports an error from os.listdir(), just # ignore this and avoid a stack trace pass base_directory_slash = base_directory if not base_directory_slash.endswith(os.sep): base_directory_slash += os.sep base_directory_len = len(base_directory_slash) def entry_is_dir(entry): if listed_base_directory: entry = os.path.join(base_directory, entry) return os.path.isdir(entry) def fix_path(path): # Append slashes to any entries which are directories, or # spaces for other files since they cannot be further completed if entry_is_dir(path) and not path.endswith(os.sep): path = path + os.sep else: path = path + " " # Remove the artificial leading path portion which # may have been prepended for search purposes. if path.startswith(base_directory_slash): path = path[base_directory_len:] return path return [ # Return an appropriate path for each entry fix_path(e) for e in sorted(entries) # Filter out non directory elements when searching for a directory, # the opposite is fine, however. if not (path_type == 'Directory' and not entry_is_dir(e)) ] # Instead of delegating completions to the param type, # hard code all of buildstream's completions here. # # This whole module should be removed in favor of more # generic code in click once this issue is resolved: # https://github.com/pallets/click/issues/780 # def get_param_type_completion(param_type, incomplete): if isinstance(param_type, click.Choice): return [c + " " for c in param_type.choices] elif isinstance(param_type, click.File): return complete_path("File", incomplete) elif isinstance(param_type, click.Path): # Workaround click 8.x API break: # # https://github.com/pallets/click/issues/2037 # if param_type.file_okay and not param_type.dir_okay: path_type = "File" elif param_type.dir_okay and not param_type.file_okay: path_type = "Directory" else: path_type = "Path" return complete_path(path_type, incomplete) return [] def resolve_ctx(cli, prog_name, args): """ Parse into a hierarchy of contexts. Contexts are connected through the parent variable. :param cli: command definition :param prog_name: the program that is running :param args: full list of args typed before the incomplete arg :return: the final context/command parsed """ ctx = cli.make_context(prog_name, args, resilient_parsing=True) args_remaining = ctx.protected_args + ctx.args while ctx is not None and args_remaining: if isinstance(ctx.command, MultiCommand): cmd = ctx.command.get_command(ctx, args_remaining[0]) if cmd is None: return None ctx = cmd.make_context(args_remaining[0], args_remaining[1:], parent=ctx, resilient_parsing=True) args_remaining = ctx.protected_args + ctx.args else: ctx = ctx.parent return ctx def start_of_option(param_str): """ :param param_str: param_str to check :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--") """ return param_str and param_str[:1] == '-' def is_incomplete_option(all_args, cmd_param): """ :param all_args: the full original list of args supplied :param cmd_param: the current command paramter :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and corresponds to this cmd_param. In other words whether this cmd_param option can still accept values """ if cmd_param.is_flag: return False last_option = None for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])): if index + 1 > cmd_param.nargs: break if start_of_option(arg_str): last_option = arg_str return last_option and last_option in cmd_param.opts def is_incomplete_argument(current_params, cmd_param): """ :param current_params: the current params and values for this argument as already entered :param cmd_param: the current command parameter :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In other words whether or not the this cmd_param argument can still accept values """ current_param_values = current_params[cmd_param.name] if current_param_values is None: return True if cmd_param.nargs == -1: return True if isinstance(current_param_values, collections.abc.Iterable) \ and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs: return True return False def get_user_autocompletions(args, incomplete, cmd, cmd_param, override): """ :param args: full list of args typed before the incomplete arg :param incomplete: the incomplete text of the arg to autocomplete :param cmd_param: command definition :param override: a callable (cmd_param, args, incomplete) that will be called to override default completion based on parameter type. Should raise 'CompleteUnhandled' if it could not find a completion. :return: all the possible user-specified completions for the param """ # Use the type specific default completions unless it was overridden try: return override(cmd=cmd, cmd_param=cmd_param, args=args, incomplete=incomplete) except CompleteUnhandled: return get_param_type_completion(cmd_param.type, incomplete) or [] def get_choices(cli, prog_name, args, incomplete, override): """ :param cli: command definition :param prog_name: the program that is running :param args: full list of args typed before the incomplete arg :param incomplete: the incomplete text of the arg to autocomplete :param override: a callable (cmd_param, args, incomplete) that will be called to override default completion based on parameter type. Should raise 'CompleteUnhandled' if it could not find a completion. :return: all the possible completions for the incomplete """ all_args = copy.deepcopy(args) ctx = resolve_ctx(cli, prog_name, args) if ctx is None: return # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse # without the '=' if start_of_option(incomplete) and WORDBREAK in incomplete: partition_incomplete = incomplete.partition(WORDBREAK) all_args.append(partition_incomplete[0]) incomplete = partition_incomplete[2] elif incomplete == WORDBREAK: incomplete = '' choices = [] found_param = False if start_of_option(incomplete): # completions for options for param in ctx.command.params: if isinstance(param, Option): choices.extend([param_opt + " " for param_opt in param.opts + param.secondary_opts if param_opt not in all_args or param.multiple]) found_param = True if not found_param: # completion for option values by choices for cmd_param in ctx.command.params: if isinstance(cmd_param, Option) and is_incomplete_option(all_args, cmd_param): choices.extend(get_user_autocompletions(all_args, incomplete, ctx.command, cmd_param, override)) found_param = True break if not found_param: # completion for argument values by choices for cmd_param in ctx.command.params: if isinstance(cmd_param, Argument) and is_incomplete_argument(ctx.params, cmd_param): choices.extend(get_user_autocompletions(all_args, incomplete, ctx.command, cmd_param, override)) found_param = True break if not found_param and isinstance(ctx.command, MultiCommand): # completion for any subcommands choices.extend([cmd + " " for cmd in ctx.command.list_commands(ctx)]) if not start_of_option(incomplete) and ctx.parent is not None \ and isinstance(ctx.parent.command, MultiCommand) and ctx.parent.command.chain: # completion for chained commands remaining_comands = set(ctx.parent.command.list_commands(ctx.parent)) - set(ctx.parent.protected_args) choices.extend([cmd + " " for cmd in remaining_comands]) for item in choices: if item.startswith(incomplete): yield item def do_complete(cli, prog_name, override): cwords = split_arg_string(os.environ['COMP_WORDS']) cword = int(os.environ['COMP_CWORD']) args = cwords[1:cword] try: incomplete = cwords[cword] except IndexError: incomplete = '' for item in get_choices(cli, prog_name, args, incomplete, override): click.echo(item) # Main function called from main.py at startup here # def main_bashcomplete(cmd, prog_name, override): """Internal handler for the bash completion support.""" if '_BST_COMPLETION' in os.environ: do_complete(cmd, prog_name, override) return True return False buildstream-1.6.9/buildstream/_frontend/linuxapp.py000066400000000000000000000037151437515270000225570ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import click from .app import App # This trick is currently only supported on some terminals, # avoid using it where it can cause garbage to be printed # to the terminal. # def _osc_777_supported(): term = os.environ.get('TERM') if term and (term.startswith('xterm') or term.startswith('vte')): # Since vte version 4600, upstream silently ignores # the OSC 777 without printing garbage to the terminal. # # For distros like Fedora who have patched vte, this # will trigger a desktop notification and bring attention # to the terminal. # vte_version = os.environ.get('VTE_VERSION') try: vte_version_int = int(vte_version) except (ValueError, TypeError): return False if vte_version_int >= 4600: return True return False # A linux specific App implementation # class LinuxApp(App): def notify(self, title, text): # Currently we only try this notification method # of sending an escape sequence to the terminal # if _osc_777_supported(): click.echo("\033]777;notify;{};{}\007".format(title, text), err=True) buildstream-1.6.9/buildstream/_frontend/profile.py000066400000000000000000000047301437515270000223550ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import re import copy import click # Profile() # # A class for formatting text with ansi color codes # # Kwargs: # The same keyword arguments which can be used with click.style() # class Profile(): def __init__(self, **kwargs): self._kwargs = dict(kwargs) # fmt() # # Format some text with ansi color codes # # Args: # text (str): The text to format # # Kwargs: # Keyword arguments to apply on top of the base click.style() # arguments # def fmt(self, text, **kwargs): kwargs = dict(kwargs) fmtargs = copy.copy(self._kwargs) fmtargs.update(kwargs) return click.style(text, **fmtargs) # fmt_subst() # # Substitute a variable of the %{varname} form, formatting # only the substituted text with the given click.style() configurations # # Args: # text (str): The text to format, with possible variables # varname (str): The variable name to substitute # value (str): The value to substitute the variable with # # Kwargs: # Keyword arguments to apply on top of the base click.style() # arguments # def fmt_subst(self, text, varname, value, **kwargs): def subst_callback(match): # Extract and format the "{(varname)...}" portion of the match inner_token = match.group(1) formatted = inner_token.format(**{varname: value}) # Colorize after the pythonic format formatting, which may have padding return self.fmt(formatted, **kwargs) # Lazy regex, after our word, match anything that does not have '%' return re.sub(r"%(\{(" + varname + r")[^%]*\})", subst_callback, text) buildstream-1.6.9/buildstream/_frontend/status.py000066400000000000000000000427041437515270000222430ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import sys import curses import shutil import click # Import a widget internal for formatting time codes from .widget import TimeCode from .._scheduler import ElementJob # Status() # # A widget for formatting overall status. # # Note that the render() and clear() methods in this class are # simply noops in the case that the application is not connected # to a terminal, or if the terminal does not support ANSI escape codes. # # Args: # context (Context): The Context # content_profile (Profile): Formatting profile for content text # format_profile (Profile): Formatting profile for formatting text # success_profile (Profile): Formatting profile for success text # error_profile (Profile): Formatting profile for error text # stream (Stream): The Stream # colors (bool): Whether to print the ANSI color codes in the output # class Status(): # Table of the terminal capabilities we require and use _TERM_CAPABILITIES = { 'move_up': 'cuu1', 'move_x': 'hpa', 'clear_eol': 'el' } def __init__(self, context, content_profile, format_profile, success_profile, error_profile, stream, colors=False): self._context = context self._content_profile = content_profile self._format_profile = format_profile self._success_profile = success_profile self._error_profile = error_profile self._stream = stream self._jobs = [] self._last_lines = 0 # Number of status lines we last printed to console self._spacing = 1 self._colors = colors self._header = _StatusHeader(context, content_profile, format_profile, success_profile, error_profile, stream) self._term_width, _ = shutil.get_terminal_size() self._alloc_lines = 0 self._alloc_columns = None self._line_length = 0 self._need_alloc = True self._term_caps = self._init_terminal() # add_job() # # Adds a job to track in the status area # # Args: # element (Element): The element of the job to track # action_name (str): The action name for this job # def add_job(self, job): elapsed = self._stream.elapsed_time job = _StatusJob(self._context, job, self._content_profile, self._format_profile, elapsed) self._jobs.append(job) self._need_alloc = True # remove_job() # # Removes a job currently being tracked in the status area # # Args: # element (Element): The element of the job to track # action_name (str): The action name for this job # def remove_job(self, job): action_name = job.action_name if not isinstance(job, ElementJob): element = None else: element = job.element self._jobs = [ job for job in self._jobs if not (job.element is element and job.action_name == action_name) ] self._need_alloc = True # clear() # # Clear the status area, it is necessary to call # this before printing anything to the console if # a status area is in use. # # To print some logging to the output and then restore # the status, use the following: # # status.clear() # ... print something to console ... # status.render() # def clear(self): if not self._term_caps: return for _ in range(self._last_lines): self._move_up() self._clear_line() self._last_lines = 0 # render() # # Render the status area. # # If you are not printing a line in addition to rendering # the status area, for instance in a timeout, then it is # not necessary to call clear(). def render(self): if not self._term_caps: return elapsed = self._stream.elapsed_time self.clear() self._check_term_width() self._allocate() # Nothing to render, early return if self._alloc_lines == 0: return # Before rendering the actual lines, we need to add some line # feeds for the amount of lines we intend to print first, and # move cursor position back to the first line for _ in range(self._alloc_lines + self._header.lines): click.echo('', err=True) for _ in range(self._alloc_lines + self._header.lines): self._move_up() # Render the one line header text = self._header.render(self._term_width, elapsed) click.echo(text, color=self._colors, err=True) # Now we have the number of columns, and an allocation for # alignment of each column n_columns = len(self._alloc_columns) for line in self._job_lines(n_columns): text = '' for job in line: column = line.index(job) text += job.render(self._alloc_columns[column] - job.size, elapsed) # Add spacing between columns if column < (n_columns - 1): text += ' ' * self._spacing # Print the line click.echo(text, color=self._colors, err=True) # Track what we printed last, for the next clear self._last_lines = self._alloc_lines + self._header.lines ################################################### # Private Methods # ################################################### # _init_terminal() # # Initialize the terminal and return the resolved terminal # capabilities dictionary. # # Returns: # (dict|None): The resolved terminal capabilities dictionary, # or None if the terminal does not support all # of the required capabilities. # def _init_terminal(self): # We need both output streams to be connected to a terminal if not (sys.stdout.isatty() and sys.stderr.isatty()): return None # Initialized terminal, curses might decide it doesnt # support this terminal try: curses.setupterm(os.environ.get('TERM', 'dumb')) except curses.error: return None term_caps = {} # Resolve the string capabilities we need for the capability # names we need. # for capname, capval in self._TERM_CAPABILITIES.items(): code = curses.tigetstr(capval) # If any of the required capabilities resolve empty strings or None, # then we don't have the capabilities we need for a status bar on # this terminal. if not code: return None # Decode sequences as latin1, as they are always 8-bit bytes, # so when b'\xff' is returned, this must be decoded to u'\xff'. # # This technique is employed by the python blessings library # as well, and should provide better compatibility with most # terminals. # term_caps[capname] = code.decode('latin1') return term_caps def _check_term_width(self): term_width, _ = shutil.get_terminal_size() if self._term_width != term_width: self._term_width = term_width self._need_alloc = True def _move_up(self): assert self._term_caps is not None # Explicitly move to beginning of line, fixes things up # when there was a ^C or ^Z printed to the terminal. move_x = curses.tparm(self._term_caps['move_x'].encode('latin1'), 0) move_x = move_x.decode('latin1') move_up = curses.tparm(self._term_caps['move_up'].encode('latin1')) move_up = move_up.decode('latin1') click.echo(move_x + move_up, nl=False, err=True) def _clear_line(self): assert self._term_caps is not None clear_eol = curses.tparm(self._term_caps['clear_eol'].encode('latin1')) clear_eol = clear_eol.decode('latin1') click.echo(clear_eol, nl=False, err=True) def _allocate(self): if not self._need_alloc: return # State when there is no jobs to display alloc_lines = 0 alloc_columns = [] line_length = 0 # Test for the widest width which fits columnized jobs for columns in reversed(range(len(self._jobs))): alloc_lines, alloc_columns = self._allocate_columns(columns + 1) # If the sum of column widths with spacing in between # fits into the terminal width, this is a good allocation. line_length = sum(alloc_columns) + (columns * self._spacing) if line_length < self._term_width: break self._alloc_lines = alloc_lines self._alloc_columns = alloc_columns self._line_length = line_length self._need_alloc = False def _job_lines(self, columns): for i in range(0, len(self._jobs), columns): yield self._jobs[i:i + columns] # Returns an array of integers representing the maximum # length in characters for each column, given the current # list of jobs to render. # def _allocate_columns(self, columns): column_widths = [0 for _ in range(columns)] lines = 0 for line in self._job_lines(columns): line_len = len(line) lines += 1 for col in range(columns): if col < line_len: job = line[col] column_widths[col] = max(column_widths[col], job.size) return lines, column_widths # _StatusHeader() # # A delegate object for rendering the header part of the Status() widget # # Args: # context (Context): The Context # content_profile (Profile): Formatting profile for content text # format_profile (Profile): Formatting profile for formatting text # success_profile (Profile): Formatting profile for success text # error_profile (Profile): Formatting profile for error text # stream (Stream): The Stream # class _StatusHeader(): def __init__(self, context, content_profile, format_profile, success_profile, error_profile, stream): # # Public members # self.lines = 3 # # Private members # self._content_profile = content_profile self._format_profile = format_profile self._success_profile = success_profile self._error_profile = error_profile self._stream = stream self._time_code = TimeCode(context, content_profile, format_profile) self._context = context def render(self, line_length, elapsed): project = self._context.get_toplevel_project() line_length = max(line_length, 80) # # Line 1: Session time, project name, session / total elements # # ========= 00:00:00 project-name (143/387) ========= # session = str(len(self._stream.session_elements)) total = str(len(self._stream.total_elements)) size = 0 text = '' size += len(total) + len(session) + 4 # Size for (N/N) with a leading space size += 8 # Size of time code size += len(project.name) + 1 text += self._time_code.render_time(elapsed) text += ' ' + self._content_profile.fmt(project.name) text += ' ' + self._format_profile.fmt('(') + \ self._content_profile.fmt(session) + \ self._format_profile.fmt('/') + \ self._content_profile.fmt(total) + \ self._format_profile.fmt(')') line1 = self._centered(text, size, line_length, '=') # # Line 2: Dynamic list of queue status reports # # (Fetched:0 117 0)→ (Built:4 0 0) # size = 0 text = '' # Format and calculate size for each queue progress for queue in self._stream.queues: # Add spacing if self._stream.queues.index(queue) > 0: size += 2 text += self._format_profile.fmt('→ ') queue_text, queue_size = self._render_queue(queue) size += queue_size text += queue_text line2 = self._centered(text, size, line_length, ' ') # # Line 3: Cache usage percentage report # # ~~~~~~ cache: 69% ~~~~~~ # usage = self._context.get_artifact_cache_usage() usage_percent = '{}%'.format(usage.used_percent) size = 21 size += len(usage_percent) if usage.used_percent >= 95: formatted_usage_percent = self._error_profile.fmt(usage_percent) elif usage.used_percent >= 80: formatted_usage_percent = self._content_profile.fmt(usage_percent) else: formatted_usage_percent = self._success_profile.fmt(usage_percent) text = self._format_profile.fmt("~~~~~~ ") + \ self._content_profile.fmt('cache') + \ self._format_profile.fmt(': ') + \ formatted_usage_percent + \ self._format_profile.fmt(' ~~~~~~') line3 = self._centered(text, size, line_length, ' ') return line1 + '\n' + line2 + '\n' + line3 ################################################### # Private Methods # ################################################### def _render_queue(self, queue): processed = str(len(queue.processed_elements)) skipped = str(len(queue.skipped_elements)) failed = str(len(queue.failed_elements)) size = 5 # Space for the formatting '[', ':', ' ', ' ' and ']' size += len(queue.complete_name) size += len(processed) + len(skipped) + len(failed) text = self._format_profile.fmt("(") + \ self._content_profile.fmt(queue.complete_name) + \ self._format_profile.fmt(":") + \ self._success_profile.fmt(processed) + ' ' + \ self._content_profile.fmt(skipped) + ' ' + \ self._error_profile.fmt(failed) + \ self._format_profile.fmt(")") return (text, size) def _centered(self, text, size, line_length, fill): remaining = line_length - size remaining -= 2 final_text = self._format_profile.fmt(fill * (remaining // 2)) + ' ' final_text += text final_text += ' ' + self._format_profile.fmt(fill * (remaining // 2)) return final_text # _StatusJob() # # A delegate object for rendering a job in the status area # # Args: # context (Context): The Context # job (Job): The job being processed # content_profile (Profile): Formatting profile for content text # format_profile (Profile): Formatting profile for formatting text # elapsed (datetime): The offset into the session when this job is created # class _StatusJob(): def __init__(self, context, job, content_profile, format_profile, elapsed): action_name = job.action_name if not isinstance(job, ElementJob): element = None else: element = job.element # # Public members # self.element = element # The Element self.action_name = action_name # The action name self.size = None # The number of characters required to render self.full_name = element._get_full_name() if element else action_name # # Private members # self._offset = elapsed self._content_profile = content_profile self._format_profile = format_profile self._time_code = TimeCode(context, content_profile, format_profile) # Calculate the size needed to display self.size = 10 # Size of time code with brackets self.size += len(action_name) self.size += len(self.full_name) self.size += 3 # '[' + ':' + ']' # render() # # Render the Job, return a rendered string # # Args: # padding (int): Amount of padding to print in order to align with columns # elapsed (datetime): The session elapsed time offset # def render(self, padding, elapsed): text = self._format_profile.fmt('[') + \ self._time_code.render_time(elapsed - self._offset) + \ self._format_profile.fmt(']') # Add padding after the display name, before terminating ']' name = self.full_name + (' ' * padding) text += self._format_profile.fmt('[') + \ self._content_profile.fmt(self.action_name) + \ self._format_profile.fmt(':') + \ self._content_profile.fmt(name) + \ self._format_profile.fmt(']') return text buildstream-1.6.9/buildstream/_frontend/widget.py000066400000000000000000000715171437515270000222070ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import datetime import os from collections import defaultdict, OrderedDict from contextlib import ExitStack from mmap import mmap import re import textwrap import click from . import Profile from .. import Element, Consistency from .. import _yaml from .. import __version__ as bst_version from .._exceptions import ImplError from .._message import MessageType from ..plugin import Plugin # These messages are printed a bit differently ERROR_MESSAGES = [MessageType.FAIL, MessageType.ERROR, MessageType.BUG] # Widget() # # Args: # content_profile (Profile): The profile to use for rendering content # format_profile (Profile): The profile to use for rendering formatting # # An abstract class for printing output columns in our text UI. # class Widget(): def __init__(self, context, content_profile, format_profile): # The context self.context = context # The content profile self.content_profile = content_profile # The formatting profile self.format_profile = format_profile # render() # # Renders a string to be printed in the UI # # Args: # message (Message): A message to print # # Returns: # (str): The string this widget prints for the given message # def render(self, message): raise ImplError("{} does not implement render()".format(type(self).__name__)) # Used to add spacing between columns class Space(Widget): def render(self, message): return ' ' # Used to add fixed text between columns class FixedText(Widget): def __init__(self, context, text, content_profile, format_profile): super().__init__(context, content_profile, format_profile) self.text = text def render(self, message): return self.format_profile.fmt(self.text) # Used to add the wallclock time this message was created at class WallclockTime(Widget): def render(self, message): fields = [self.content_profile.fmt("{:02d}".format(x)) for x in [message.creation_time.hour, message.creation_time.minute, message.creation_time.second]] return self.format_profile.fmt(":").join(fields) # A widget for rendering the debugging column class Debug(Widget): def render(self, message): unique_id = 0 if message.unique_id is None else message.unique_id text = self.format_profile.fmt('pid:') text += self.content_profile.fmt("{: <5}".format(message.pid)) text += self.format_profile.fmt(" id:") text += self.content_profile.fmt("{:0>3}".format(unique_id)) return text # A widget for rendering the time codes class TimeCode(Widget): def __init__(self, context, content_profile, format_profile, microseconds=False): self._microseconds = microseconds super().__init__(context, content_profile, format_profile) def render(self, message): return self.render_time(message.elapsed) def render_time(self, elapsed): if elapsed is None: fields = [ self.content_profile.fmt('--') for i in range(3) ] else: hours, remainder = divmod(int(elapsed.total_seconds()), 60 * 60) minutes, seconds = divmod(remainder, 60) fields = [ self.content_profile.fmt("{0:02d}".format(field)) for field in [hours, minutes, seconds] ] text = self.format_profile.fmt(':').join(fields) if self._microseconds: if elapsed is not None: text += self.content_profile.fmt(".{0:06d}".format(elapsed.microseconds)) else: text += self.content_profile.fmt(".------") return text # A widget for rendering the MessageType class TypeName(Widget): _action_colors = { MessageType.DEBUG: "cyan", MessageType.STATUS: "cyan", MessageType.INFO: "magenta", MessageType.WARN: "yellow", MessageType.START: "blue", MessageType.SUCCESS: "green", MessageType.FAIL: "red", MessageType.SKIPPED: "yellow", MessageType.ERROR: "red", MessageType.BUG: "red", } def render(self, message): return self.content_profile.fmt("{: <7}" .format(message.message_type.upper()), bold=True, dim=True, fg=self._action_colors[message.message_type]) # A widget for displaying the Element name class ElementName(Widget): def __init__(self, context, content_profile, format_profile): super().__init__(context, content_profile, format_profile) # Pre initialization format string, before we know the length of # element names in the pipeline self._fmt_string = '{: <30}' def render(self, message): element_id = message.task_id or message.unique_id if element_id is None: return "" plugin = Plugin._lookup(element_id) name = plugin._get_full_name() # Sneak the action name in with the element name action_name = message.action_name if not action_name: action_name = "Main" return self.content_profile.fmt("{: >5}".format(action_name.lower())) + \ self.format_profile.fmt(':') + \ self.content_profile.fmt(self._fmt_string.format(name)) # A widget for displaying the primary message text class MessageText(Widget): def render(self, message): return message.message # A widget for formatting the element cache key class CacheKey(Widget): def __init__(self, context, content_profile, format_profile, err_profile): super().__init__(context, content_profile, format_profile) self._err_profile = err_profile self._key_length = context.log_key_length def render(self, message): element_id = message.task_id or message.unique_id if element_id is None or not self._key_length: return "" missing = False key = ' ' * self._key_length plugin = Plugin._lookup(element_id) if isinstance(plugin, Element): _, key, missing = plugin._get_display_key() if message.message_type in ERROR_MESSAGES: text = self._err_profile.fmt(key) else: text = self.content_profile.fmt(key, dim=missing) return text # A widget for formatting the log file class LogFile(Widget): def __init__(self, context, content_profile, format_profile, err_profile): super().__init__(context, content_profile, format_profile) self._err_profile = err_profile self._logdir = context.logdir def render(self, message, abbrev=True): if message.logfile and message.scheduler: logfile = message.logfile if abbrev and self._logdir != "" and logfile.startswith(self._logdir): logfile = logfile[len(self._logdir):] logfile = logfile.lstrip(os.sep) if message.message_type in ERROR_MESSAGES: text = self._err_profile.fmt(logfile) else: text = self.content_profile.fmt(logfile, dim=True) else: text = '' return text # START and SUCCESS messages are expected to have no useful # information in the message text, so we display the logfile name for # these messages, and the message text for other types. # class MessageOrLogFile(Widget): def __init__(self, context, content_profile, format_profile, err_profile): super().__init__(context, content_profile, format_profile) self._message_widget = MessageText(context, content_profile, format_profile) self._logfile_widget = LogFile(context, content_profile, format_profile, err_profile) def render(self, message): # Show the log file only in the main start/success messages if message.logfile and message.scheduler and \ message.message_type in [MessageType.START, MessageType.SUCCESS]: text = self._logfile_widget.render(message) else: text = self._message_widget.render(message) return text # LogLine # # A widget for formatting a log line # # Args: # context (Context): The Context # content_profile (Profile): Formatting profile for content text # format_profile (Profile): Formatting profile for formatting text # success_profile (Profile): Formatting profile for success text # error_profile (Profile): Formatting profile for error text # detail_profile (Profile): Formatting profile for detail text # indent (int): Number of spaces to use for general indentation # class LogLine(Widget): def __init__(self, context, content_profile, format_profile, success_profile, err_profile, detail_profile, indent=4): super().__init__(context, content_profile, format_profile) self._columns = [] self._failure_messages = defaultdict(list) self._success_profile = success_profile self._err_profile = err_profile self._detail_profile = detail_profile self._indent = ' ' * indent self._log_lines = context.log_error_lines self._message_lines = context.log_message_lines self._resolved_keys = None self._space_widget = Space(context, content_profile, format_profile) self._logfile_widget = LogFile(context, content_profile, format_profile, err_profile) if context.log_debug: self._columns.extend([ Debug(context, content_profile, format_profile) ]) self.logfile_variable_names = { "elapsed": TimeCode(context, content_profile, format_profile, microseconds=False), "elapsed-us": TimeCode(context, content_profile, format_profile, microseconds=True), "wallclock": WallclockTime(context, content_profile, format_profile), "key": CacheKey(context, content_profile, format_profile, err_profile), "element": ElementName(context, content_profile, format_profile), "action": TypeName(context, content_profile, format_profile), "message": MessageOrLogFile(context, content_profile, format_profile, err_profile) } logfile_tokens = self._parse_logfile_format(context.log_message_format, content_profile, format_profile) self._columns.extend(logfile_tokens) # show_pipeline() # # Display a list of elements in the specified format. # # The formatting string is the one currently documented in `bst show`, this # is used in pipeline session headings and also to implement `bst show`. # # Args: # dependencies (list of Element): A list of Element objects # format_: A formatting string, as specified by `bst show` # # Returns: # (str): The formatted list of elements # def show_pipeline(self, dependencies, format_): report = '' p = Profile() for element in dependencies: line = format_ full_key, cache_key, dim_keys = element._get_display_key() line = p.fmt_subst(line, 'name', element._get_full_name(), fg='blue', bold=True) line = p.fmt_subst(line, 'key', cache_key, fg='yellow', dim=dim_keys) line = p.fmt_subst(line, 'full-key', full_key, fg='yellow', dim=dim_keys) consistency = element._get_consistency() if consistency == Consistency.INCONSISTENT: line = p.fmt_subst(line, 'state', "no reference", fg='red') else: if element._cached(): line = p.fmt_subst(line, 'state', "cached", fg='magenta') elif consistency == Consistency.RESOLVED: line = p.fmt_subst(line, 'state', "fetch needed", fg='red') elif element._buildable(): line = p.fmt_subst(line, 'state', "buildable", fg='green') else: line = p.fmt_subst(line, 'state', "waiting", fg='blue') # Element configuration if "%{config" in format_: config = _yaml.node_sanitize(element._Element__config) line = p.fmt_subst(line, 'config', _yaml.dump_string(config)) # Variables if "%{vars" in format_: variables = dict(element._Element__variables) line = p.fmt_subst(line, 'vars', _yaml.dump_string(variables)) # Environment if "%{env" in format_: environment = _yaml.node_sanitize(element._Element__environment) line = p.fmt_subst(line, 'env', _yaml.dump_string(environment)) # Public if "%{public" in format_: environment = _yaml.node_sanitize(element._Element__public) line = p.fmt_subst(line, 'public', _yaml.dump_string(environment)) # Workspaced if "%{workspaced" in format_: line = p.fmt_subst( line, 'workspaced', '(workspaced)' if element._get_workspace() else '', fg='yellow') # Workspace-dirs if "%{workspace-dirs" in format_: workspace = element._get_workspace() if workspace is not None: path = workspace.get_absolute_path() if path.startswith("~/"): path = os.path.join(os.getenv('HOME', '/root'), path[2:]) line = p.fmt_subst(line, 'workspace-dirs', "Workspace: {}".format(path)) else: line = p.fmt_subst( line, 'workspace-dirs', '') report += line + '\n' return report.rstrip('\n') # print_heading() # # A message to be printed at program startup, indicating # some things about user configuration and BuildStream version # and so on. # # Args: # project (Project): The toplevel project we were invoked from # stream (Stream): The stream # log_file (file): An optional file handle for additional logging # styling (bool): Whether to enable ansi escape codes in the output # def print_heading(self, project, stream, *, log_file, styling=False): context = self.context starttime = datetime.datetime.now() text = '' self._resolved_keys = {element: element._get_cache_key() for element in stream.session_elements} # Main invocation context text += '\n' text += self.content_profile.fmt("BuildStream Version {}\n".format(bst_version), bold=True) values = OrderedDict() values["Session Start"] = starttime.strftime('%A, %d-%m-%Y at %H:%M:%S') values["Project"] = "{} ({})".format(project.name, project.directory) values["Targets"] = ", ".join([t.name for t in stream.targets]) values["Cache Usage"] = "{}".format(context.get_artifact_cache_usage()) text += self._format_values(values) # User configurations text += '\n' text += self.content_profile.fmt("User Configuration\n", bold=True) values = OrderedDict() values["Configuration File"] = \ "Default Configuration" if not context.config_origin else context.config_origin values["Log Files"] = context.logdir values["Source Mirrors"] = context.sourcedir values["Build Area"] = context.builddir values["Artifact Cache"] = context.artifactdir values["Strict Build Plan"] = "Yes" if context.get_strict() else "No" values["Maximum Fetch Tasks"] = context.sched_fetchers values["Maximum Build Tasks"] = context.sched_builders values["Maximum Push Tasks"] = context.sched_pushers values["Maximum Network Retries"] = context.sched_network_retries text += self._format_values(values) text += '\n' # Project Options values = OrderedDict() project.options.printable_variables(values) if values: text += self.content_profile.fmt("Project Options\n", bold=True) text += self._format_values(values) text += '\n' # Plugins text += self._format_plugins(project.first_pass_config.element_factory.loaded_dependencies, project.first_pass_config.source_factory.loaded_dependencies) if project.config.element_factory and project.config.source_factory: text += self._format_plugins(project.config.element_factory.loaded_dependencies, project.config.source_factory.loaded_dependencies) # Pipeline state text += self.content_profile.fmt("Pipeline\n", bold=True) text += self.show_pipeline(stream.total_elements, context.log_element_format) text += '\n' # Separator line before following output text += self.format_profile.fmt("=" * 79 + '\n') click.echo(text, color=styling, nl=False, err=True) if log_file: click.echo(text, file=log_file, color=False, nl=False) # print_summary() # # Print a summary of activities at the end of a session # # Args: # stream (Stream): The Stream # log_file (file): An optional file handle for additional logging # styling (bool): Whether to enable ansi escape codes in the output # def print_summary(self, stream, log_file, styling=False): # Early silent return if there are no queues, can happen # only in the case that the stream early returned due to # an inconsistent pipeline state. if not stream.queues: return text = '' assert self._resolved_keys is not None elements = sorted(e for (e, k) in self._resolved_keys.items() if k != e._get_cache_key()) if elements: text += self.content_profile.fmt("Resolved key Summary\n", bold=True) text += self.show_pipeline(elements, self.context.log_element_format) text += "\n\n" if self._failure_messages: values = OrderedDict() for element, messages in sorted(self._failure_messages.items(), key=lambda x: x[0].name): for queue in stream.queues: if any(el.name == element.name for el in queue.failed_elements): values[element.name] = ''.join(self._render(v) for v in messages) if values: text += self.content_profile.fmt("Failure Summary\n", bold=True) text += self._format_values(values, style_value=False) text += self.content_profile.fmt("Pipeline Summary\n", bold=True) values = OrderedDict() values['Total'] = self.content_profile.fmt(str(len(stream.total_elements))) values['Session'] = self.content_profile.fmt(str(len(stream.session_elements))) processed_maxlen = 1 skipped_maxlen = 1 failed_maxlen = 1 for queue in stream.queues: processed_maxlen = max(len(str(len(queue.processed_elements))), processed_maxlen) skipped_maxlen = max(len(str(len(queue.skipped_elements))), skipped_maxlen) failed_maxlen = max(len(str(len(queue.failed_elements))), failed_maxlen) for queue in stream.queues: processed = str(len(queue.processed_elements)) skipped = str(len(queue.skipped_elements)) failed = str(len(queue.failed_elements)) processed_align = ' ' * (processed_maxlen - len(processed)) skipped_align = ' ' * (skipped_maxlen - len(skipped)) failed_align = ' ' * (failed_maxlen - len(failed)) status_text = self.content_profile.fmt("processed ") + \ self._success_profile.fmt(processed) + \ self.format_profile.fmt(', ') + processed_align status_text += self.content_profile.fmt("skipped ") + \ self.content_profile.fmt(skipped) + \ self.format_profile.fmt(', ') + skipped_align status_text += self.content_profile.fmt("failed ") + \ self._err_profile.fmt(failed) + ' ' + failed_align values["{} Queue".format(queue.action_name)] = status_text text += self._format_values(values, style_value=False) click.echo(text, color=styling, nl=False, err=True) if log_file: click.echo(text, file=log_file, color=False, nl=False) ################################################### # Widget Abstract Methods # ################################################### def render(self, message): # Track logfiles for later use element_id = message.task_id or message.unique_id if message.message_type in ERROR_MESSAGES and element_id is not None: plugin = Plugin._lookup(element_id) self._failure_messages[plugin].append(message) return self._render(message) ################################################### # Private Methods # ################################################### def _parse_logfile_format(self, format_string, content_profile, format_profile): logfile_tokens = [] while format_string: if format_string.startswith("%%"): logfile_tokens.append(FixedText(self.context, "%", content_profile, format_profile)) format_string = format_string[2:] continue m = re.search(r"^%\{([^\}]+)\}", format_string) if m is not None: variable = m.group(1) format_string = format_string[m.end(0):] if variable not in self.logfile_variable_names: raise Exception("'{0}' is not a valid log variable name.".format(variable)) logfile_tokens.append(self.logfile_variable_names[variable]) else: m = re.search("^[^%]+", format_string) if m is not None: text = FixedText(self.context, m.group(0), content_profile, format_profile) format_string = format_string[m.end(0):] logfile_tokens.append(text) else: # No idea what to do now raise Exception("'{0}' could not be parsed into a valid logging format.".format(format_string)) return logfile_tokens def _render(self, message): # Render the column widgets first text = '' for widget in self._columns: text += widget.render(message) text += '\n' extra_nl = False # Now add some custom things if message.detail: # Identify frontend messages, we never abbreviate these frontend_message = not (message.task_id or message.unique_id) # Split and truncate message detail down to message_lines lines lines = message.detail.splitlines(True) n_lines = len(lines) abbrev = False if message.message_type not in ERROR_MESSAGES \ and not frontend_message and n_lines > self._message_lines: abbrev = True lines = lines[0:self._message_lines] else: lines[n_lines - 1] = lines[n_lines - 1].rstrip('\n') detail = self._indent + self._indent.join(lines) text += '\n' if message.message_type in ERROR_MESSAGES: text += self._err_profile.fmt(detail, bold=True) else: text += self._detail_profile.fmt(detail) if abbrev: text += self._indent + \ self.content_profile.fmt('Message contains {} additional lines' .format(n_lines - self._message_lines), dim=True) text += '\n' extra_nl = True if message.sandbox is not None: sandbox = self._indent + 'Sandbox directory: ' + message.sandbox text += '\n' if message.message_type == MessageType.FAIL: text += self._err_profile.fmt(sandbox, bold=True) else: text += self._detail_profile.fmt(sandbox) text += '\n' extra_nl = True if message.scheduler and message.message_type == MessageType.FAIL: text += '\n' if self.context is not None and not self.context.log_verbose: text += self._indent + self._err_profile.fmt("Log file: ") text += self._indent + self._logfile_widget.render(message) + '\n' else: text += self._indent + self._err_profile.fmt("Printing the last {} lines from log file:" .format(self._log_lines)) + '\n' text += self._indent + self._logfile_widget.render(message, abbrev=False) + '\n' text += self._indent + self._err_profile.fmt("=" * 70) + '\n' log_content = self._read_last_lines(message.logfile) log_content = textwrap.indent(log_content, self._indent) text += self._detail_profile.fmt(log_content) text += '\n' text += self._indent + self._err_profile.fmt("=" * 70) + '\n' extra_nl = True if extra_nl: text += '\n' return text def _read_last_lines(self, logfile): with ExitStack() as stack: # mmap handles low-level memory details, allowing for # faster searches f = stack.enter_context(open(logfile, 'r+', encoding='utf-8')) log = stack.enter_context(mmap(f.fileno(), os.path.getsize(f.name))) count = 0 end = log.size() - 1 while count < self._log_lines and end >= 0: location = log.rfind(b'\n', 0, end) count += 1 # If location is -1 (none found), this will print the # first character because of the later +1 end = location # end+1 is correct whether or not a newline was found at # that location. If end is -1 (seek before beginning of file) # then we get the first characther. If end is a newline position, # we discard it and only want to print the beginning of the next # line. lines = log[(end + 1):].splitlines() return '\n'.join([line.decode('utf-8') for line in lines]).rstrip() def _format_plugins(self, element_plugins, source_plugins): text = "" if not (element_plugins or source_plugins): return text text += self.content_profile.fmt("Loaded Plugins\n", bold=True) if element_plugins: text += self.format_profile.fmt(" Element Plugins\n") for plugin in element_plugins: text += self.content_profile.fmt(" - {}\n".format(plugin)) if source_plugins: text += self.format_profile.fmt(" Source Plugins\n") for plugin in source_plugins: text += self.content_profile.fmt(" - {}\n".format(plugin)) text += '\n' return text # _format_values() # # Formats an indented dictionary of titles / values, ensuring # the values are aligned. # # Args: # values: A dictionary, usually an OrderedDict() # style_value: Whether to use the content profile for the values # # Returns: # (str): The formatted values # def _format_values(self, values, style_value=True): text = '' max_key_len = 0 for key, value in values.items(): max_key_len = max(len(key), max_key_len) for key, value in values.items(): if isinstance(value, str) and '\n' in value: text += self.format_profile.fmt(" {}:\n".format(key)) text += textwrap.indent(value, self._indent) continue text += self.format_profile.fmt(" {}: {}".format(key, ' ' * (max_key_len - len(key)))) if style_value: text += self.content_profile.fmt(str(value)) else: text += str(value) text += '\n' return text buildstream-1.6.9/buildstream/_fuse/000077500000000000000000000000001437515270000174625ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_fuse/__init__.py000066400000000000000000000014701437515270000215750ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .hardlinks import SafeHardlinks buildstream-1.6.9/buildstream/_fuse/fuse.py000066400000000000000000000765201437515270000210100ustar00rootroot00000000000000# This is an embedded copy of fuse.py taken from the following upstream commit: # # https://github.com/terencehonles/fusepy/commit/0eafeb557e0e70926ed9450008ef17057d302391 # # Our local modifications are recorded in the Git history of this repo. # Copyright (c) 2012 Terence Honles (maintainer) # Copyright (c) 2008 Giorgos Verigakis (author) # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # pylint: skip-file from __future__ import print_function, absolute_import, division from ctypes import * from ctypes.util import find_library from errno import * from os import strerror from platform import machine, system from signal import signal, SIGINT, SIG_DFL from stat import S_IFDIR from traceback import print_exc import logging try: from functools import partial except ImportError: # http://docs.python.org/library/functools.html#functools.partial def partial(func, *args, **keywords): def newfunc(*fargs, **fkeywords): newkeywords = keywords.copy() newkeywords.update(fkeywords) return func(*(args + fargs), **newkeywords) newfunc.func = func newfunc.args = args newfunc.keywords = keywords return newfunc try: basestring except NameError: basestring = str class c_timespec(Structure): _fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)] class c_utimbuf(Structure): _fields_ = [('actime', c_timespec), ('modtime', c_timespec)] class c_stat(Structure): pass # Platform dependent _system = system() _machine = machine() if _system == 'Darwin': _libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency _libfuse_path = (find_library('fuse4x') or find_library('osxfuse') or find_library('fuse')) else: _libfuse_path = find_library('fuse') if not _libfuse_path: raise EnvironmentError('Unable to find libfuse') else: _libfuse = CDLL(_libfuse_path) if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'): _system = 'Darwin-MacFuse' if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'): ENOTSUP = 45 c_dev_t = c_int32 c_fsblkcnt_t = c_ulong c_fsfilcnt_t = c_ulong c_gid_t = c_uint32 c_mode_t = c_uint16 c_off_t = c_int64 c_pid_t = c_int32 c_uid_t = c_uint32 setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int, c_uint32) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_uint32) if _system == 'Darwin': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_mode', c_mode_t), ('st_nlink', c_uint16), ('st_ino', c_uint64), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_birthtimespec', c_timespec), ('st_size', c_off_t), ('st_blocks', c_int64), ('st_blksize', c_int32), ('st_flags', c_int32), ('st_gen', c_int32), ('st_lspare', c_int32), ('st_qspare', c_int64)] else: c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_uint32), ('st_mode', c_mode_t), ('st_nlink', c_uint16), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_size', c_off_t), ('st_blocks', c_int64), ('st_blksize', c_int32)] elif _system == 'Linux': ENOTSUP = 95 c_dev_t = c_ulonglong c_fsblkcnt_t = c_ulonglong c_fsfilcnt_t = c_ulonglong c_gid_t = c_uint c_mode_t = c_uint c_off_t = c_longlong c_pid_t = c_int c_uid_t = c_uint setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) if _machine == 'x86_64': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulong), ('st_nlink', c_ulong), ('st_mode', c_mode_t), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('__pad0', c_int), ('st_rdev', c_dev_t), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_long), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] elif _machine == 'mips': c_stat._fields_ = [ ('st_dev', c_dev_t), ('__pad1_1', c_ulong), ('__pad1_2', c_ulong), ('__pad1_3', c_ulong), ('st_ino', c_ulong), ('st_mode', c_mode_t), ('st_nlink', c_ulong), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad2_1', c_ulong), ('__pad2_2', c_ulong), ('st_size', c_off_t), ('__pad3', c_ulong), ('st_atimespec', c_timespec), ('__pad4', c_ulong), ('st_mtimespec', c_timespec), ('__pad5', c_ulong), ('st_ctimespec', c_timespec), ('__pad6', c_ulong), ('st_blksize', c_long), ('st_blocks', c_long), ('__pad7_1', c_ulong), ('__pad7_2', c_ulong), ('__pad7_3', c_ulong), ('__pad7_4', c_ulong), ('__pad7_5', c_ulong), ('__pad7_6', c_ulong), ('__pad7_7', c_ulong), ('__pad7_8', c_ulong), ('__pad7_9', c_ulong), ('__pad7_10', c_ulong), ('__pad7_11', c_ulong), ('__pad7_12', c_ulong), ('__pad7_13', c_ulong), ('__pad7_14', c_ulong)] elif _machine == 'ppc': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulonglong), ('st_mode', c_mode_t), ('st_nlink', c_uint), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad2', c_ushort), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_longlong), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] elif _machine == 'ppc64' or _machine == 'ppc64le': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulong), ('st_nlink', c_ulong), ('st_mode', c_mode_t), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('__pad', c_uint), ('st_rdev', c_dev_t), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_long), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] elif _machine == 'aarch64': c_stat._fields_ = [ ('st_dev', c_dev_t), ('st_ino', c_ulong), ('st_mode', c_mode_t), ('st_nlink', c_uint), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad1', c_ulong), ('st_size', c_off_t), ('st_blksize', c_int), ('__pad2', c_int), ('st_blocks', c_long), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec)] else: # i686, use as fallback for everything else c_stat._fields_ = [ ('st_dev', c_dev_t), ('__pad1', c_ushort), ('__st_ino', c_ulong), ('st_mode', c_mode_t), ('st_nlink', c_uint), ('st_uid', c_uid_t), ('st_gid', c_gid_t), ('st_rdev', c_dev_t), ('__pad2', c_ushort), ('st_size', c_off_t), ('st_blksize', c_long), ('st_blocks', c_longlong), ('st_atimespec', c_timespec), ('st_mtimespec', c_timespec), ('st_ctimespec', c_timespec), ('st_ino', c_ulonglong)] else: raise NotImplementedError('{} is not supported.'.format(_system)) class c_statvfs(Structure): _fields_ = [ ('f_bsize', c_ulong), ('f_frsize', c_ulong), ('f_blocks', c_fsblkcnt_t), ('f_bfree', c_fsblkcnt_t), ('f_bavail', c_fsblkcnt_t), ('f_files', c_fsfilcnt_t), ('f_ffree', c_fsfilcnt_t), ('f_favail', c_fsfilcnt_t), ('f_fsid', c_ulong), #('unused', c_int), ('f_flag', c_ulong), ('f_namemax', c_ulong)] if _system == 'FreeBSD': c_fsblkcnt_t = c_uint64 c_fsfilcnt_t = c_uint64 setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t, c_int) getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte), c_size_t) class c_statvfs(Structure): _fields_ = [ ('f_bavail', c_fsblkcnt_t), ('f_bfree', c_fsblkcnt_t), ('f_blocks', c_fsblkcnt_t), ('f_favail', c_fsfilcnt_t), ('f_ffree', c_fsfilcnt_t), ('f_files', c_fsfilcnt_t), ('f_bsize', c_ulong), ('f_flag', c_ulong), ('f_frsize', c_ulong)] class fuse_file_info(Structure): _fields_ = [ ('flags', c_int), ('fh_old', c_ulong), ('writepage', c_int), ('direct_io', c_uint, 1), ('keep_cache', c_uint, 1), ('flush', c_uint, 1), ('padding', c_uint, 29), ('fh', c_uint64), ('lock_owner', c_uint64)] class fuse_context(Structure): _fields_ = [ ('fuse', c_voidp), ('uid', c_uid_t), ('gid', c_gid_t), ('pid', c_pid_t), ('private_data', c_voidp)] _libfuse.fuse_get_context.restype = POINTER(fuse_context) class fuse_operations(Structure): _fields_ = [ ('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))), ('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), ('getdir', c_voidp), # Deprecated, use readdir ('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)), ('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)), ('unlink', CFUNCTYPE(c_int, c_char_p)), ('rmdir', CFUNCTYPE(c_int, c_char_p)), ('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('link', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)), ('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)), ('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)), ('utime', c_voidp), # Deprecated, use utimens ('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, POINTER(fuse_file_info))), ('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t, c_off_t, POINTER(fuse_file_info))), ('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))), ('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), ('setxattr', setxattr_t), ('getxattr', getxattr_t), ('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)), ('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)), ('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp, CFUNCTYPE(c_int, c_voidp, c_char_p, POINTER(c_stat), c_off_t), c_off_t, POINTER(fuse_file_info))), ('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))), ('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))), ('init', CFUNCTYPE(c_voidp, c_voidp)), ('destroy', CFUNCTYPE(c_voidp, c_voidp)), ('access', CFUNCTYPE(c_int, c_char_p, c_int)), ('create', CFUNCTYPE(c_int, c_char_p, c_mode_t, POINTER(fuse_file_info))), ('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t, POINTER(fuse_file_info))), ('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat), POINTER(fuse_file_info))), ('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info), c_int, c_voidp)), ('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))), ('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong))), ('flag_nullpath_ok', c_uint, 1), ('flag_nopath', c_uint, 1), ('flag_utime_omit_ok', c_uint, 1), ('flag_reserved', c_uint, 29), ] def time_of_timespec(ts): return ts.tv_sec + ts.tv_nsec / 10 ** 9 def set_st_attrs(st, attrs): for key, val in attrs.items(): if key in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'): timespec = getattr(st, key + 'spec', None) if timespec is None: continue timespec.tv_sec = int(val) timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9) elif hasattr(st, key): setattr(st, key, val) def fuse_get_context(): 'Returns a (uid, gid, pid) tuple' ctxp = _libfuse.fuse_get_context() ctx = ctxp.contents return ctx.uid, ctx.gid, ctx.pid class FuseOSError(OSError): def __init__(self, errno): super(FuseOSError, self).__init__(errno, strerror(errno)) class FUSE(object): ''' This class is the lower level interface and should not be subclassed under normal use. Its methods are called by fuse. Assumes API version 2.6 or later. ''' OPTIONS = ( ('foreground', '-f'), ('debug', '-d'), ('nothreads', '-s'), ) def __init__(self, operations, mountpoint, raw_fi=False, encoding='utf-8', **kwargs): ''' Setting raw_fi to True will cause FUSE to pass the fuse_file_info class as is to Operations, instead of just the fh field. This gives you access to direct_io, keep_cache, etc. ''' self.operations = operations self.raw_fi = raw_fi self.encoding = encoding args = ['fuse'] args.extend(flag for arg, flag in self.OPTIONS if kwargs.pop(arg, False)) kwargs.setdefault('fsname', operations.__class__.__name__) args.append('-o') args.append(','.join(self._normalize_fuse_options(**kwargs))) args.append(mountpoint) args = [arg.encode(encoding) for arg in args] argv = (c_char_p * len(args))(*args) fuse_ops = fuse_operations() for ent in fuse_operations._fields_: name, prototype = ent[:2] val = getattr(operations, name, None) if val is None: continue # Function pointer members are tested for using the # getattr(operations, name) above but are dynamically # invoked using self.operations(name) if hasattr(prototype, 'argtypes'): val = prototype(partial(self._wrapper, getattr(self, name))) setattr(fuse_ops, name, val) try: old_handler = signal(SIGINT, SIG_DFL) except ValueError: old_handler = SIG_DFL err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops), sizeof(fuse_ops), None) try: signal(SIGINT, old_handler) except ValueError: pass del self.operations # Invoke the destructor if err: raise RuntimeError(err) @staticmethod def _normalize_fuse_options(**kargs): for key, value in kargs.items(): if isinstance(value, bool): if value is True: yield key else: yield '{}={}'.format(key, value) @staticmethod def _wrapper(func, *args, **kwargs): 'Decorator for the methods that follow' try: return func(*args, **kwargs) or 0 except OSError as e: return -(e.errno or EFAULT) except: print_exc() return -EFAULT def _decode_optional_path(self, path): # NB: this method is intended for fuse operations that # allow the path argument to be NULL, # *not* as a generic path decoding method if path is None: return None return path.decode(self.encoding) def getattr(self, path, buf): return self.fgetattr(path, buf, None) def readlink(self, path, buf, bufsize): ret = self.operations('readlink', path.decode(self.encoding)) \ .encode(self.encoding) # copies a string into the given buffer # (null terminated and truncated if necessary) data = create_string_buffer(ret[:bufsize - 1]) memmove(buf, data, len(data)) return 0 def mknod(self, path, mode, dev): return self.operations('mknod', path.decode(self.encoding), mode, dev) def mkdir(self, path, mode): return self.operations('mkdir', path.decode(self.encoding), mode) def unlink(self, path): return self.operations('unlink', path.decode(self.encoding)) def rmdir(self, path): return self.operations('rmdir', path.decode(self.encoding)) def symlink(self, source, target): 'creates a symlink `target -> source` (e.g. ln -s source target)' return self.operations('symlink', target.decode(self.encoding), source.decode(self.encoding)) def rename(self, old, new): return self.operations('rename', old.decode(self.encoding), new.decode(self.encoding)) def link(self, source, target): 'creates a hard link `target -> source` (e.g. ln source target)' return self.operations('link', target.decode(self.encoding), source.decode(self.encoding)) def chmod(self, path, mode): return self.operations('chmod', path.decode(self.encoding), mode) def chown(self, path, uid, gid): # Check if any of the arguments is a -1 that has overflowed if c_uid_t(uid + 1).value == 0: uid = -1 if c_gid_t(gid + 1).value == 0: gid = -1 return self.operations('chown', path.decode(self.encoding), uid, gid) def truncate(self, path, length): return self.operations('truncate', path.decode(self.encoding), length) def open(self, path, fip): fi = fip.contents if self.raw_fi: return self.operations('open', path.decode(self.encoding), fi) else: fi.fh = self.operations('open', path.decode(self.encoding), fi.flags) return 0 def read(self, path, buf, size, offset, fip): if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh ret = self.operations('read', self._decode_optional_path(path), size, offset, fh) if not ret: return 0 retsize = len(ret) assert retsize <= size, \ 'actual amount read {:d} greater than expected {:d}'.format(retsize, size) data = create_string_buffer(ret, retsize) memmove(buf, data, retsize) return retsize def write(self, path, buf, size, offset, fip): data = string_at(buf, size) if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh return self.operations('write', self._decode_optional_path(path), data, offset, fh) def statfs(self, path, buf): stv = buf.contents attrs = self.operations('statfs', path.decode(self.encoding)) for key, val in attrs.items(): if hasattr(stv, key): setattr(stv, key, val) return 0 def flush(self, path, fip): if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh return self.operations('flush', self._decode_optional_path(path), fh) def release(self, path, fip): if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh return self.operations('release', self._decode_optional_path(path), fh) def fsync(self, path, datasync, fip): if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh return self.operations('fsync', self._decode_optional_path(path), datasync, fh) def setxattr(self, path, name, value, size, options, *args): return self.operations('setxattr', path.decode(self.encoding), name.decode(self.encoding), string_at(value, size), options, *args) def getxattr(self, path, name, value, size, *args): ret = self.operations('getxattr', path.decode(self.encoding), name.decode(self.encoding), *args) retsize = len(ret) # allow size queries if not value: return retsize # do not truncate if retsize > size: return -ERANGE buf = create_string_buffer(ret, retsize) # Does not add trailing 0 memmove(value, buf, retsize) return retsize def listxattr(self, path, namebuf, size): attrs = self.operations('listxattr', path.decode(self.encoding)) or '' ret = '\x00'.join(attrs).encode(self.encoding) if len(ret) > 0: ret += '\x00'.encode(self.encoding) retsize = len(ret) # allow size queries if not namebuf: return retsize # do not truncate if retsize > size: return -ERANGE buf = create_string_buffer(ret, retsize) memmove(namebuf, buf, retsize) return retsize def removexattr(self, path, name): return self.operations('removexattr', path.decode(self.encoding), name.decode(self.encoding)) def opendir(self, path, fip): # Ignore raw_fi fip.contents.fh = self.operations('opendir', path.decode(self.encoding)) return 0 def readdir(self, path, buf, filler, offset, fip): # Ignore raw_fi for item in self.operations('readdir', self._decode_optional_path(path), fip.contents.fh): if isinstance(item, basestring): name, st, offset = item, None, 0 else: name, attrs, offset = item if attrs: st = c_stat() set_st_attrs(st, attrs) else: st = None if filler(buf, name.encode(self.encoding), st, offset) != 0: break return 0 def releasedir(self, path, fip): # Ignore raw_fi return self.operations('releasedir', self._decode_optional_path(path), fip.contents.fh) def fsyncdir(self, path, datasync, fip): # Ignore raw_fi return self.operations('fsyncdir', self._decode_optional_path(path), datasync, fip.contents.fh) def init(self, conn): return self.operations('init', '/') def destroy(self, private_data): return self.operations('destroy', '/') def access(self, path, amode): return self.operations('access', path.decode(self.encoding), amode) def create(self, path, mode, fip): fi = fip.contents path = path.decode(self.encoding) if self.raw_fi: return self.operations('create', path, mode, fi) else: # This line is different from upstream to fix issues # reading file opened with O_CREAT|O_RDWR. # See issue #143. fi.fh = self.operations('create', path, mode, fi.flags) # END OF MODIFICATION return 0 def ftruncate(self, path, length, fip): if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh return self.operations('truncate', self._decode_optional_path(path), length, fh) def fgetattr(self, path, buf, fip): memset(buf, 0, sizeof(c_stat)) st = buf.contents if not fip: fh = fip elif self.raw_fi: fh = fip.contents else: fh = fip.contents.fh attrs = self.operations('getattr', self._decode_optional_path(path), fh) set_st_attrs(st, attrs) return 0 def lock(self, path, fip, cmd, lock): if self.raw_fi: fh = fip.contents else: fh = fip.contents.fh return self.operations('lock', self._decode_optional_path(path), fh, cmd, lock) def utimens(self, path, buf): if buf: atime = time_of_timespec(buf.contents.actime) mtime = time_of_timespec(buf.contents.modtime) times = (atime, mtime) else: times = None return self.operations('utimens', path.decode(self.encoding), times) def bmap(self, path, blocksize, idx): return self.operations('bmap', path.decode(self.encoding), blocksize, idx) class Operations(object): ''' This class should be subclassed and passed as an argument to FUSE on initialization. All operations should raise a FuseOSError exception on error. When in doubt of what an operation should do, check the FUSE header file or the corresponding system call man page. ''' def __call__(self, op, *args): if not hasattr(self, op): raise FuseOSError(EFAULT) return getattr(self, op)(*args) def access(self, path, amode): return 0 bmap = None def chmod(self, path, mode): raise FuseOSError(EROFS) def chown(self, path, uid, gid): raise FuseOSError(EROFS) def create(self, path, mode, fi=None): ''' When raw_fi is False (default case), fi is None and create should return a numerical file handle. When raw_fi is True the file handle should be set directly by create and return 0. ''' raise FuseOSError(EROFS) def destroy(self, path): 'Called on filesystem destruction. Path is always /' pass def flush(self, path, fh): return 0 def fsync(self, path, datasync, fh): return 0 def fsyncdir(self, path, datasync, fh): return 0 def getattr(self, path, fh=None): ''' Returns a dictionary with keys identical to the stat C structure of stat(2). st_atime, st_mtime and st_ctime should be floats. NOTE: There is an incombatibility between Linux and Mac OS X concerning st_nlink of directories. Mac OS X counts all files inside the directory, while Linux counts only the subdirectories. ''' if path != '/': raise FuseOSError(ENOENT) return dict(st_mode=(S_IFDIR | 0o755), st_nlink=2) def getxattr(self, path, name, position=0): raise FuseOSError(ENOTSUP) def init(self, path): ''' Called on filesystem initialization. (Path is always /) Use it instead of __init__ if you start threads on initialization. ''' pass def link(self, target, source): 'creates a hard link `target -> source` (e.g. ln source target)' raise FuseOSError(EROFS) def listxattr(self, path): return [] lock = None def mkdir(self, path, mode): raise FuseOSError(EROFS) def mknod(self, path, mode, dev): raise FuseOSError(EROFS) def open(self, path, flags): ''' When raw_fi is False (default case), open should return a numerical file handle. When raw_fi is True the signature of open becomes: open(self, path, fi) and the file handle should be set directly. ''' return 0 def opendir(self, path): 'Returns a numerical file handle.' return 0 def read(self, path, size, offset, fh): 'Returns a string containing the data requested.' raise FuseOSError(EIO) def readdir(self, path, fh): ''' Can return either a list of names, or a list of (name, attrs, offset) tuples. attrs is a dict as in getattr. ''' return ['.', '..'] def readlink(self, path): raise FuseOSError(ENOENT) def release(self, path, fh): return 0 def releasedir(self, path, fh): return 0 def removexattr(self, path, name): raise FuseOSError(ENOTSUP) def rename(self, old, new): raise FuseOSError(EROFS) def rmdir(self, path): raise FuseOSError(EROFS) def setxattr(self, path, name, value, options, position=0): raise FuseOSError(ENOTSUP) def statfs(self, path): ''' Returns a dictionary with keys identical to the statvfs C structure of statvfs(3). On Mac OS X f_bsize and f_frsize must be a power of 2 (minimum 512). ''' return {} def symlink(self, target, source): 'creates a symlink `target -> source` (e.g. ln -s source target)' raise FuseOSError(EROFS) def truncate(self, path, length, fh=None): raise FuseOSError(EROFS) def unlink(self, path): raise FuseOSError(EROFS) def utimens(self, path, times=None): 'Times is a (atime, mtime) tuple. If None use current time.' return 0 def write(self, path, data, offset, fh): raise FuseOSError(EROFS) class LoggingMixIn: log = logging.getLogger('fuse.log-mixin') def __call__(self, op, path, *args): self.log.debug('-> %s %s %s', op, path, repr(args)) ret = '[Unhandled Exception]' try: ret = getattr(self, op)(path, *args) return ret except OSError as e: ret = str(e) raise finally: self.log.debug('<- %s %s', op, repr(ret)) buildstream-1.6.9/buildstream/_fuse/hardlinks.py000066400000000000000000000163071437515270000220220ustar00rootroot00000000000000# # Copyright (C) 2016 Stavros Korokithakis # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # # The filesystem operations implementation here is based # on some example code written by Stavros Korokithakis. import errno import os import shutil import stat import tempfile from .fuse import FuseOSError, Operations from .mount import Mount # SafeHardlinks() # # A FUSE mount which implements a copy on write hardlink experience. # # Args: # root (str): The underlying filesystem path to mirror # tmp (str): A directory on the same filesystem for creating temp files # class SafeHardlinks(Mount): def __init__(self, directory, tempdir): self.directory = directory self.tempdir = tempdir def create_operations(self): return SafeHardlinkOps(self.directory, self.tempdir) # SafeHardlinkOps() # # The actual FUSE Operations implementation below. # class SafeHardlinkOps(Operations): def __init__(self, root, tmp): self.root = root self.tmp = tmp def _full_path(self, partial): if partial.startswith("/"): partial = partial[1:] path = os.path.join(self.root, partial) return path def _ensure_copy(self, full_path, follow_symlinks=True): try: if follow_symlinks: # Follow symbolic links manually here real_path = os.path.realpath(full_path) else: real_path = full_path file_stat = os.stat(real_path, follow_symlinks=False) # Skip the file if it's not a hardlink if file_stat.st_nlink <= 1: return # For some reason directories may have st_nlink > 1, but they # cannot be hardlinked, so just ignore those. # if not stat.S_ISDIR(file_stat.st_mode): with tempfile.TemporaryDirectory(dir=self.tmp) as tempdir: basename = os.path.basename(real_path) temp_path = os.path.join(tempdir, basename) # First copy, then unlink origin and rename shutil.copy2(real_path, temp_path, follow_symlinks=False) os.unlink(real_path) os.rename(temp_path, real_path) except FileNotFoundError: # This doesnt exist yet, assume we're about to create it # so it's not a problem. pass ########################################################### # Fuse Methods # ########################################################### def access(self, path, amode): full_path = self._full_path(path) if not os.access(full_path, amode): raise FuseOSError(errno.EACCES) def chmod(self, path, mode): full_path = self._full_path(path) # Ensure copies on chmod self._ensure_copy(full_path) return os.chmod(full_path, mode) def chown(self, path, uid, gid): full_path = self._full_path(path) # Ensure copies on chown self._ensure_copy(full_path, follow_symlinks=False) return os.chown(full_path, uid, gid, follow_symlinks=False) def getattr(self, path, fh=None): full_path = self._full_path(path) st = os.lstat(full_path) return dict((key, getattr(st, key)) for key in ( 'st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid', 'st_ino')) def readdir(self, path, fh): full_path = self._full_path(path) dir_entries = ['.', '..'] if os.path.isdir(full_path): dir_entries.extend(os.listdir(full_path)) for entry in dir_entries: entry_full_path = os.path.join(full_path, entry) st = os.stat(entry_full_path, follow_symlinks=False) attrs = dict((key, getattr(st, key)) for key in ( 'st_ino', 'st_mode')) yield entry, attrs, 0 def readlink(self, path): pathname = os.readlink(self._full_path(path)) if pathname.startswith("/"): # Path name is absolute, sanitize it. return os.path.relpath(pathname, self.root) else: return pathname def mknod(self, path, mode, dev): return os.mknod(self._full_path(path), mode, dev) def rmdir(self, path): full_path = self._full_path(path) return os.rmdir(full_path) def mkdir(self, path, mode): return os.mkdir(self._full_path(path), mode) def statfs(self, path): full_path = self._full_path(path) stv = os.statvfs(full_path) return dict((key, getattr(stv, key)) for key in ( 'f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')) def unlink(self, path): return os.unlink(self._full_path(path)) def symlink(self, target, source): return os.symlink(source, self._full_path(target)) def rename(self, old, new): return os.rename(self._full_path(old), self._full_path(new)) def link(self, target, source): # When creating a hard link here, should we ensure the original # file is not a hardlink itself first ? # return os.link(self._full_path(source), self._full_path(target)) def utimens(self, path, times=None): return os.utime(self._full_path(path), times) def open(self, path, flags): full_path = self._full_path(path) # If we're opening for writing, ensure it's a copy first if flags & os.O_WRONLY or flags & os.O_RDWR: self._ensure_copy(full_path) return os.open(full_path, flags) def create(self, path, mode, fi=None): full_path = self._full_path(path) # If it already exists, ensure it's a copy first self._ensure_copy(full_path) return os.open(full_path, fi, mode) def read(self, path, size, offset, fh): os.lseek(fh, offset, os.SEEK_SET) return os.read(fh, size) def write(self, path, data, offset, fh): os.lseek(fh, offset, os.SEEK_SET) return os.write(fh, data) def truncate(self, path, length, fh=None): full_path = self._full_path(path) with open(full_path, 'r+', encoding='utf-8') as f: f.truncate(length) def flush(self, path, fh): return os.fsync(fh) def release(self, path, fh): return os.close(fh) def fsync(self, path, datasync, fh): return self.flush(path, fh) buildstream-1.6.9/buildstream/_fuse/mount.py000066400000000000000000000147331437515270000212060ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import signal import time import sys from contextlib import contextmanager from multiprocessing import Process from .fuse import FUSE from .._exceptions import ImplError from .. import _signals # Just a custom exception to raise here, for identifying possible # bugs with a fuse layer implementation # class FuseMountError(Exception): pass # This is a convenience class which takes care of synchronizing the # startup of FUSE and shutting it down. # # The implementations / subclasses should: # # - Overload the instance initializer to add any parameters # needed for their fuse Operations implementation # # - Implement create_operations() to create the Operations # instance on behalf of the superclass, using any additional # parameters collected in the initializer. # # Mount objects can be treated as contextmanagers, the volume # will be mounted during the context. # # UGLY CODE NOTE: # # This is a horrible little piece of code. The problem we face # here is that the highlevel libfuse API has fuse_main(), which # will either block in the foreground, or become a full daemon. # # With the daemon approach, we know that the fuse is mounted right # away when fuse_main() returns, then the daemon will go and handle # requests on it's own, but then we have no way to shut down the # daemon. # # With the blocking approach, we still have it as a child process # so we can tell it to gracefully terminate; but it's impossible # to know when the mount is done, there is no callback for that # # The solution we use here without digging too deep into the # low level fuse API, is to fork a child process which will # fun the fuse loop in foreground, and we block the parent # process until the volume is mounted with a busy loop with timeouts. # class Mount(): # These are not really class data, they are # just here for the sake of having None setup instead # of missing attributes, since we do not provide any # initializer and leave the initializer to the subclass. # __mountpoint = None __operations = None __process = None ################################################ # User Facing API # ################################################ # mount(): # # User facing API for mounting a fuse subclass implementation # # Args: # (str): Location to mount this fuse fs # def mount(self, mountpoint): assert self.__process is None self.__mountpoint = mountpoint self.__process = Process(target=self.__run_fuse) # Ensure the child fork() does not inherit our signal handlers, if the # child wants to handle a signal then it will first set it's own # handler, and then unblock it. with _signals.blocked([signal.SIGTERM, signal.SIGTSTP, signal.SIGINT], ignore=False): self.__process.start() # This is horrible, we're going to wait until mountpoint is mounted and that's it. while not os.path.ismount(mountpoint): time.sleep(1 / 100) # unmount(): # # User facing API for unmounting a fuse subclass implementation # def unmount(self): # Terminate child process and join if self.__process is not None: self.__process.terminate() self.__process.join() # Report an error if ever the underlying operations crashed for some reason. if self.__process.exitcode != 0: raise FuseMountError("{} reported exit code {} when unmounting" .format(type(self).__name__, self.__process.exitcode)) self.__mountpoint = None self.__process = None # mounted(): # # A context manager to run a code block with this fuse Mount # mounted, this will take care of automatically unmounting # in the case that the calling process is terminated. # # Args: # (str): Location to mount this fuse fs # @contextmanager def mounted(self, mountpoint): self.mount(mountpoint) try: with _signals.terminator(self.unmount): yield finally: self.unmount() ################################################ # Abstract Methods # ################################################ # create_operations(): # # Create an Operations class (from fusepy) and return it # # Returns: # (Operations): A FUSE Operations implementation def create_operations(self): raise ImplError("Mount subclass '{}' did not implement create_operations()" .format(type(self).__name__)) ################################################ # Child Process # ################################################ def __run_fuse(self): # First become session leader while signals are still blocked # # Then reset the SIGTERM handler to the default and finally # unblock SIGTERM. # os.setsid() signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM]) # Ask the subclass to give us an Operations object # self.__operations = self.create_operations() # Run fuse in foreground in this child process, internally libfuse # will handle SIGTERM and gracefully exit it's own little main loop. # FUSE(self.__operations, self.__mountpoint, nothreads=True, foreground=True, use_ino=True) # Explicit 0 exit code, if the operations crashed for some reason, the exit # code will not be 0, and we want to know about it. # sys.exit(0) buildstream-1.6.9/buildstream/_includes.py000066400000000000000000000167631437515270000207150ustar00rootroot00000000000000import os from collections.abc import Mapping from . import _yaml from ._exceptions import LoadError, LoadErrorReason # Includes() # # This takes care of processing include directives "(@)". # # Args: # loader (Loader): The Loader object # copy_tree (bool): Whether to make a copy, of tree in # provenance. Should be true if intended to be # serialized. class Includes: def __init__(self, loader, *, copy_tree=False): self._loader = loader self._loaded = {} self._copy_tree = copy_tree # process() # # Process recursively include directives in a YAML node. # # Args: # node (dict): A YAML node # only_local (bool): Whether to ignore junction files # process_project_options (bool): Whether to process options from current project # def process(self, node, *, only_local=False, process_project_options=True): self._process(node, only_local=only_local, process_project_options=process_project_options) # _process() # # Process recursively include directives in a YAML node. This # method is a recursively called on loaded nodes from files. # # Args: # node (dict): A YAML node # included (set): Fail for recursion if trying to load any files in this set # current_loader (Loader): Use alternative loader (for junction files) # only_local (bool): Whether to ignore junction files # process_project_options (bool): Whether to process options from current project # def _process(self, node, *, included=None, current_loader=None, only_local=False, process_project_options=True): if included is None: included = set() if current_loader is None: current_loader = self._loader if process_project_options: current_loader.project.options.process_node(node) self._process_node( node, included=included, only_local=only_local, current_loader=current_loader, process_project_options=process_project_options, ) # _process_node() # # Process recursively include directives in a YAML node. This # method is recursively called on all nodes. # # Args: # node (dict): A YAML node # included (set): Fail for recursion if trying to load any files in this set # current_loader (Loader): Use alternative loader (for junction files) # only_local (bool): Whether to ignore junction files # process_project_options (bool): Whether to process options from current project # def _process_node( self, node, *, included=None, current_loader=None, only_local=False, process_project_options=True ): if included is None: included = set() if isinstance(node.get('(@)'), str): includes = [_yaml.node_get(node, str, '(@)')] else: includes = _yaml.node_get(node, list, '(@)', default_value=None) if '(@)' in node: del node['(@)'] if includes: for include in reversed(includes): if only_local and ':' in include: continue include_node, file_path, sub_loader = self._include_file(include, current_loader) if file_path in included: provenance = _yaml.node_get_provenance(node) raise LoadError(LoadErrorReason.RECURSIVE_INCLUDE, "{}: trying to recursively include {}". format(provenance, file_path)) # Because the included node will be modified, we need # to copy it so that we do not modify the toplevel # node of the provenance. include_node = _yaml.node_chain_copy(include_node) try: included.add(file_path) self._process( include_node, included=included, current_loader=sub_loader, only_local=only_local, process_project_options=process_project_options or current_loader != sub_loader, ) finally: included.remove(file_path) _yaml.composite(include_node, node) to_delete = [key for key, _ in _yaml.node_items(node) if key not in include_node] for key, value in include_node.items(): node[key] = value for key in to_delete: del node[key] for _, value in _yaml.node_items(node): self._process_value( value, included=included, current_loader=current_loader, only_local=only_local, process_project_options=process_project_options, ) # _include_file() # # Load include YAML file from with a loader. # # Args: # include (str): file path relative to loader's project directory. # Can be prefixed with junctio name. # loader (Loader): Loader for the current project. def _include_file(self, include, loader): shortname = include if ':' in include: junction, include = include.split(':', 1) junction_loader = loader._get_loader(junction, fetch_subprojects=True) current_loader = junction_loader current_loader.project.ensure_fully_loaded() else: current_loader = loader project = current_loader.project directory = project.directory file_path = os.path.join(directory, include) key = (current_loader, file_path) if key not in self._loaded: self._loaded[key] = _yaml.load(os.path.join(directory, include), shortname=shortname, project=project, copy_tree=self._copy_tree) return self._loaded[key], file_path, current_loader # _process_value() # # Select processing for value that could be a list or a dictionary. # # Args: # value: Value to process. Can be a list or a dictionary. # included (set): Fail for recursion if trying to load any files in this set # current_loader (Loader): Use alternative loader (for junction files) # only_local (bool): Whether to ignore junction files # process_project_options (bool): Whether to process options from current project # def _process_value( self, value, *, included=None, current_loader=None, only_local=False, process_project_options=True ): if included is None: included = set() if isinstance(value, Mapping): self._process_node( value, included=included, current_loader=current_loader, only_local=only_local, process_project_options=process_project_options, ) elif isinstance(value, list): for v in value: self._process_value( v, included=included, current_loader=current_loader, only_local=only_local, process_project_options=process_project_options, ) buildstream-1.6.9/buildstream/_loader/000077500000000000000000000000001437515270000177665ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_loader/__init__.py000066400000000000000000000015661437515270000221070ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .metasource import MetaSource from .metaelement import MetaElement from .loader import Loader buildstream-1.6.9/buildstream/_loader/loadelement.py000066400000000000000000000116611437515270000226360ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # BuildStream toplevel imports from .. import _yaml # Local package imports from .types import Symbol, Dependency # LoadElement(): # # A transient object breaking down what is loaded allowing us to # do complex operations in multiple passes. # # Args: # node (dict): A YAML loaded dictionary # name (str): The element name # loader (Loader): The Loader object for this element # class LoadElement(): def __init__(self, node, filename, loader): # # Public members # self.node = node # The YAML node self.name = filename # The element name self.full_name = None # The element full name (with associated junction) self.deps = None # The list of Dependency objects # # Private members # self._loader = loader # The Loader object self._dep_cache = None # The dependency cache, to speed up depends() # # Initialization # if loader.project.junction: # dependency is in subproject, qualify name self.full_name = '{}:{}'.format(loader.project.junction.name, self.name) else: # dependency is in top-level project self.full_name = self.name # Ensure the root node is valid _yaml.node_validate(self.node, [ 'kind', 'depends', 'sources', 'sandbox', 'variables', 'environment', 'environment-nocache', 'config', 'public', 'description', 'build-depends', 'runtime-depends', ]) # Extract the Dependencies self.deps = _extract_depends_from_node(self.node) # depends(): # # Checks if this element depends on another element, directly # or indirectly. # # Args: # other (LoadElement): Another LoadElement # # Returns: # (bool): True if this LoadElement depends on 'other' # def depends(self, other): self._ensure_depends_cache() return self._dep_cache.get(other.full_name) is not None ########################################### # Private Methods # ########################################### def _ensure_depends_cache(self): if self._dep_cache: return self._dep_cache = {} for dep in self.deps: elt = self._loader.get_element_for_dep(dep) # Ensure the cache of the element we depend on elt._ensure_depends_cache() # We depend on this element self._dep_cache[elt.full_name] = True # And we depend on everything this element depends on self._dep_cache.update(elt._dep_cache) # _extract_depends_from_node(): # # Creates an array of Dependency objects from a given dict node 'node', # allows both strings and dicts for expressing the dependency and # throws a comprehensive LoadError in the case that the node is malformed. # # After extracting depends, the symbol is deleted from the node # # Args: # node (dict): A YAML loaded dictionary # # Returns: # (list): a list of Dependency objects # def _extract_depends_from_node(node, *, key=None): if key is None: build_depends = _extract_depends_from_node(node, key=Symbol.BUILD_DEPENDS) runtime_depends = _extract_depends_from_node(node, key=Symbol.RUNTIME_DEPENDS) depends = _extract_depends_from_node(node, key=Symbol.DEPENDS) return build_depends + runtime_depends + depends elif key == Symbol.BUILD_DEPENDS: default_dep_type = Symbol.BUILD elif key == Symbol.RUNTIME_DEPENDS: default_dep_type = Symbol.RUNTIME elif key == Symbol.DEPENDS: default_dep_type = None else: assert False, "Unexpected value of key '{}'".format(key) depends = _yaml.node_get(node, list, key, default_value=[]) output_deps = [] for index, dep in enumerate(depends): dep_provenance = _yaml.node_get_provenance(node, key=key, indices=[index]) dependency = Dependency(dep, dep_provenance, default_dep_type=default_dep_type) output_deps.append(dependency) # Now delete the field, we dont want it anymore if key in node: del node[key] return output_deps buildstream-1.6.9/buildstream/_loader/loader.py000066400000000000000000000612421437515270000216130ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os from functools import cmp_to_key from collections import namedtuple from collections.abc import Mapping import tempfile import shutil from .._exceptions import LoadError, LoadErrorReason from .. import Consistency from .. import _yaml from ..element import Element from .._profile import Topics, profile_start, profile_end from .._includes import Includes from .types import Symbol, Dependency from .loadelement import LoadElement from . import MetaElement from . import MetaSource # Loader(): # # The Loader class does the heavy lifting of parsing target # bst files and ultimately transforming them into a list of MetaElements # with their own MetaSources, ready for instantiation by the core. # # Args: # context (Context): The Context object # project (Project): The toplevel Project object # parent (Loader): A parent Loader object, in the case this is a junctioned Loader # tempdir (str): A directory to cleanup with the Loader, given to the loader by a parent # loader in the case that this loader is a subproject loader. # class Loader(): def __init__(self, context, project, *, parent=None, tempdir=None): # Ensure we have an absolute path for the base directory basedir = project.element_path if not os.path.isabs(basedir): basedir = os.path.abspath(basedir) # # Public members # self.project = project # The associated Project # # Private members # self._context = context self._options = project.options # Project options (OptionPool) self._basedir = basedir # Base project directory self._first_pass_options = project.first_pass_config.options # Project options (OptionPool) self._tempdir = tempdir # A directory to cleanup self._parent = parent # The parent loader self._meta_elements = {} # Dict of resolved meta elements by name self._elements = {} # Dict of elements self._loaders = {} # Dict of junction loaders self._includes = Includes(self, copy_tree=True) # load(): # # Loads the project based on the parameters given to the constructor # # Args: # rewritable (bool): Whether the loaded files should be rewritable # this is a bit more expensive due to deep copies # ticker (callable): An optional function for tracking load progress # targets (list of str): Target, element-path relative bst filenames in the project # fetch_subprojects (bool): Whether to fetch subprojects while loading # # Raises: LoadError # # Returns: The toplevel LoadElement def load(self, targets, rewritable=False, ticker=None, fetch_subprojects=False): for filename in targets: if os.path.isabs(filename): # XXX Should this just be an assertion ? # Expect that the caller gives us the right thing at least ? raise LoadError(LoadErrorReason.INVALID_DATA, "Target '{}' was not specified as a relative " "path to the base project directory: {}" .format(filename, self._basedir)) # First pass, recursively load files and populate our table of LoadElements # deps = [] for target in targets: profile_start(Topics.LOAD_PROJECT, target) _, name, loader = self._parse_name(target, rewritable, ticker, fetch_subprojects=fetch_subprojects) loader._load_file(name, rewritable, ticker, fetch_subprojects) deps.append(Dependency(target, provenance="[command line]")) profile_end(Topics.LOAD_PROJECT, target) # # Now that we've resolve the dependencies, scan them for circular dependencies # # Set up a dummy element that depends on all top-level targets # to resolve potential circular dependencies between them DummyTarget = namedtuple('DummyTarget', ['name', 'full_name', 'deps']) dummy = DummyTarget(name='', full_name='', deps=deps) self._elements[''] = dummy profile_key = "_".join(t for t in targets) profile_start(Topics.CIRCULAR_CHECK, profile_key) self._check_circular_deps('') profile_end(Topics.CIRCULAR_CHECK, profile_key) ret = [] # # Sort direct dependencies of elements by their dependency ordering # for target in targets: profile_start(Topics.SORT_DEPENDENCIES, target) _, name, loader = self._parse_name(target, rewritable, ticker, fetch_subprojects=fetch_subprojects) loader._sort_dependencies(name) profile_end(Topics.SORT_DEPENDENCIES, target) # Finally, wrap what we have into LoadElements and return the target # ret.append(loader._collect_element(name)) return ret # cleanup(): # # Remove temporary checkout directories of subprojects # def cleanup(self): if self._parent and not self._tempdir: # already done return # recurse for loader in self._loaders.values(): # value may be None with nested junctions without overrides if loader is not None: loader.cleanup() if not self._parent: # basedir of top-level loader is never a temporary directory return # safe guard to not accidentally delete directories outside builddir if self._tempdir.startswith(self._context.builddir + os.sep): if os.path.exists(self._tempdir): shutil.rmtree(self._tempdir) # get_element_for_dep(): # # Gets a cached LoadElement by Dependency object # # This is used by LoadElement # # Args: # dep (Dependency): The dependency to search for # # Returns: # (LoadElement): The cached LoadElement # def get_element_for_dep(self, dep): loader = self._get_loader_for_dep(dep) return loader._elements[dep.name] ########################################### # Private Methods # ########################################### # _load_file(): # # Recursively load bst files # # Args: # filename (str): The element-path relative bst file # rewritable (bool): Whether we should load in round trippable mode # ticker (callable): A callback to report loaded filenames to the frontend # fetch_subprojects (bool): Whether to fetch subprojects while loading # provenance (Provenance): The location from where the file was referred to, or None # # Returns: # (LoadElement): A loaded LoadElement # def _load_file(self, filename, rewritable, ticker, fetch_subprojects, provenance=None): # Silently ignore already loaded files if filename in self._elements: return self._elements[filename] # Call the ticker if ticker: ticker(filename) # Load the data and process any conditional statements therein fullpath = os.path.join(self._basedir, filename) try: node = _yaml.load(fullpath, shortname=filename, copy_tree=rewritable, project=self.project) except LoadError as e: if e.reason == LoadErrorReason.MISSING_FILE: if self.project.junction: message = "Could not find element '{}' in project referred to by junction element '{}'" \ .format(filename, self.project.junction.name) else: message = "Could not find element '{}' in elements directory '{}'".format(filename, self._basedir) if provenance: message = "{}: {}".format(provenance, message) # If we can't find the file, try to suggest plausible # alternatives by stripping the element-path from the given # filename, and verifying that it exists. detail = None elements_dir = os.path.relpath(self._basedir, self.project.directory) element_relpath = os.path.relpath(filename, elements_dir) if filename.startswith(elements_dir) and os.path.exists(os.path.join(self._basedir, element_relpath)): detail = "Did you mean '{}'?".format(element_relpath) raise LoadError(LoadErrorReason.MISSING_FILE, message, detail=detail) from e if e.reason == LoadErrorReason.LOADING_DIRECTORY: # If a .bst file exists in the element path, # let's suggest this as a plausible alternative. message = str(e) if provenance: message = "{}: {}".format(provenance, message) detail = None if os.path.exists(os.path.join(self._basedir, filename + '.bst')): element_name = filename + '.bst' detail = "Did you mean '{}'?\n".format(element_name) raise LoadError(LoadErrorReason.LOADING_DIRECTORY, message, detail=detail) from e # Raise the unmodified LoadError raise kind = _yaml.node_get(node, str, Symbol.KIND) if kind == "junction": self._first_pass_options.process_node(node) else: self.project.ensure_fully_loaded() self._includes.process(node) element = LoadElement(node, filename, self) self._elements[filename] = element # Load all dependency files for the new LoadElement for dep in element.deps: if dep.junction: self._load_file(dep.junction, rewritable, ticker, fetch_subprojects, dep.provenance) loader = self._get_loader(dep.junction, rewritable=rewritable, ticker=ticker, fetch_subprojects=fetch_subprojects) else: loader = self dep_element = loader._load_file(dep.name, rewritable, ticker, fetch_subprojects, dep.provenance) if _yaml.node_get(dep_element.node, str, Symbol.KIND) == 'junction': raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Cannot depend on junction" .format(dep.provenance)) return element # _check_circular_deps(): # # Detect circular dependencies on LoadElements with # dependencies already resolved. # # Args: # element_name (str): The element-path relative element name to check # # Raises: # (LoadError): In case there was a circular dependency error # def _check_circular_deps(self, element_name, check_elements=None, validated=None): if check_elements is None: check_elements = {} if validated is None: validated = {} element = self._elements[element_name] # element name must be unique across projects # to be usable as key for the check_elements and validated dicts element_name = element.full_name # Skip already validated branches if validated.get(element_name) is not None: return if check_elements.get(element_name) is not None: raise LoadError(LoadErrorReason.CIRCULAR_DEPENDENCY, "Circular dependency detected for element: {}" .format(element.name)) # Push / Check each dependency / Pop check_elements[element_name] = True for dep in element.deps: loader = self._get_loader_for_dep(dep) loader._check_circular_deps(dep.name, check_elements, validated) del check_elements[element_name] # Eliminate duplicate paths validated[element_name] = True # _sort_dependencies(): # # Sort dependencies of each element by their dependencies, # so that direct dependencies which depend on other direct # dependencies (directly or indirectly) appear later in the # list. # # This avoids the need for performing multiple topological # sorts throughout the build process. # # Args: # element_name (str): The element-path relative element name to sort # def _sort_dependencies(self, element_name, visited=None): if visited is None: visited = {} element = self._elements[element_name] # element name must be unique across projects # to be usable as key for the visited dict element_name = element.full_name if visited.get(element_name) is not None: return for dep in element.deps: loader = self._get_loader_for_dep(dep) loader._sort_dependencies(dep.name, visited=visited) def dependency_cmp(dep_a, dep_b): element_a = self.get_element_for_dep(dep_a) element_b = self.get_element_for_dep(dep_b) # Sort on inter element dependency first if element_a.depends(element_b): return 1 elif element_b.depends(element_a): return -1 # If there are no inter element dependencies, place # runtime only dependencies last if dep_a.dep_type != dep_b.dep_type: if dep_a.dep_type == Symbol.RUNTIME: return 1 elif dep_b.dep_type == Symbol.RUNTIME: return -1 # All things being equal, string comparison. if dep_a.name > dep_b.name: return 1 elif dep_a.name < dep_b.name: return -1 # Sort local elements before junction elements # and use string comparison between junction elements if dep_a.junction and dep_b.junction: if dep_a.junction > dep_b.junction: return 1 elif dep_a.junction < dep_b.junction: return -1 elif dep_a.junction: return -1 elif dep_b.junction: return 1 # This wont ever happen return 0 # Now dependency sort, we ensure that if any direct dependency # directly or indirectly depends on another direct dependency, # it is found later in the list. element.deps.sort(key=cmp_to_key(dependency_cmp)) visited[element_name] = True # _collect_element() # # Collect the toplevel elements we have # # Args: # element_name (str): The element-path relative element name to sort # # Returns: # (MetaElement): A recursively loaded MetaElement # def _collect_element(self, element_name): element = self._elements[element_name] # Return the already built one, if we already built it meta_element = self._meta_elements.get(element_name) if meta_element: return meta_element node = element.node elt_provenance = _yaml.node_get_provenance(node) meta_sources = [] sources = _yaml.node_get(node, list, Symbol.SOURCES, default_value=[]) element_kind = _yaml.node_get(node, str, Symbol.KIND) # Safe loop calling into _yaml.node_get() for each element ensures # we have good error reporting for i in range(len(sources)): source = _yaml.node_get(node, Mapping, Symbol.SOURCES, indices=[i]) kind = _yaml.node_get(source, str, Symbol.KIND) del source[Symbol.KIND] # Directory is optional directory = _yaml.node_get(source, str, Symbol.DIRECTORY, default_value=None) if directory: del source[Symbol.DIRECTORY] index = sources.index(source) meta_source = MetaSource(element_name, index, element_kind, kind, source, directory) meta_sources.append(meta_source) meta_element = MetaElement(self.project, element_name, element_kind, elt_provenance, meta_sources, _yaml.node_get(node, Mapping, Symbol.CONFIG, default_value={}), _yaml.node_get(node, Mapping, Symbol.VARIABLES, default_value={}), _yaml.node_get(node, Mapping, Symbol.ENVIRONMENT, default_value={}), _yaml.node_get(node, list, Symbol.ENV_NOCACHE, default_value=[]), _yaml.node_get(node, Mapping, Symbol.PUBLIC, default_value={}), _yaml.node_get(node, Mapping, Symbol.SANDBOX, default_value={}), element_kind == 'junction') # Cache it now, make sure it's already there before recursing self._meta_elements[element_name] = meta_element # Descend for dep in element.deps: loader = self._get_loader_for_dep(dep) meta_dep = loader._collect_element(dep.name) if dep.dep_type != 'runtime': meta_element.build_dependencies.append(meta_dep) if dep.dep_type != 'build': meta_element.dependencies.append(meta_dep) if dep.strict: meta_element.strict_dependencies.append(meta_dep) return meta_element # _get_loader(): # # Return loader for specified junction # # Args: # filename (str): Junction name # fetch_subprojects (bool): Whether to fetch subprojects while loading # # Raises: LoadError # # Returns: A Loader or None if specified junction does not exist def _get_loader(self, filename, *, rewritable=False, ticker=None, level=0, fetch_subprojects=False): # return previously determined result if filename in self._loaders: loader = self._loaders[filename] if loader is None: # do not allow junctions with the same name in different # subprojects raise LoadError(LoadErrorReason.CONFLICTING_JUNCTION, "Conflicting junction {} in subprojects, define junction in {}" .format(filename, self.project.name)) return loader if self._parent: # junctions in the parent take precedence over junctions defined # in subprojects loader = self._parent._get_loader(filename, rewritable=rewritable, ticker=ticker, level=level + 1, fetch_subprojects=fetch_subprojects) if loader: self._loaders[filename] = loader return loader try: self._load_file(filename, rewritable, ticker, fetch_subprojects) except LoadError as e: if e.reason != LoadErrorReason.MISSING_FILE: # other load error raise if level == 0: # junction element not found in this or ancestor projects raise # mark junction as not available to allow detection of # conflicting junctions in subprojects self._loaders[filename] = None return None # meta junction element meta_element = self._collect_element(filename) if meta_element.kind != 'junction': raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Expected junction but element kind is {}".format(filename, meta_element.kind)) element = Element._new_from_meta(meta_element) element._preflight() element._update_state() # Handle the case where a subproject needs to be fetched # if element._get_consistency() == Consistency.RESOLVED: if fetch_subprojects: sources = list(element.sources()) for idx, source in enumerate(sources): if ticker: ticker(filename, 'Fetching subproject from {} source'.format(source.get_kind())) if source._get_consistency() != Consistency.CACHED: source._fetch(sources[0:idx]) else: detail = "Try fetching the project with `bst fetch {}`".format(filename) raise LoadError(LoadErrorReason.SUBPROJECT_FETCH_NEEDED, "Subproject fetch needed for junction: {}".format(filename), detail=detail) # Handle the case where a subproject has no ref # elif element._get_consistency() == Consistency.INCONSISTENT: detail = "Try tracking the junction element with `bst track {}`".format(filename) raise LoadError(LoadErrorReason.SUBPROJECT_INCONSISTENT, "Subproject has no ref for junction: {}".format(filename), detail=detail) # Stage sources os.makedirs(self._context.builddir, exist_ok=True) basedir = tempfile.mkdtemp(prefix="{}-".format(element.normal_name), dir=self._context.builddir) element._stage_sources_at(basedir, mount_workspaces=False) # Load the project project_dir = os.path.join(basedir, element.path) try: from .._project import Project # pylint: disable=import-outside-toplevel project = Project(project_dir, self._context, junction=element, parent_loader=self, tempdir=basedir) except LoadError as e: if e.reason == LoadErrorReason.MISSING_PROJECT_CONF: raise LoadError(reason=LoadErrorReason.INVALID_JUNCTION, message="Could not find the project.conf file for {}. " "Expecting a project at path '{}'" .format(element, element.path or '.')) from e raise loader = project.loader self._loaders[filename] = loader return loader # _get_loader_for_dep(): # # Gets the appropriate Loader for a Dependency object # # Args: # dep (Dependency): A Dependency object # # Returns: # (Loader): The Loader object to use for this Dependency # def _get_loader_for_dep(self, dep): if dep.junction: # junction dependency, delegate to appropriate loader return self._loaders[dep.junction] else: return self # _parse_name(): # # Get junction and base name of element along with loader for the sub-project # # Args: # name (str): Name of target # rewritable (bool): Whether the loaded files should be rewritable # this is a bit more expensive due to deep copies # ticker (callable): An optional function for tracking load progress # fetch_subprojects (bool): Whether to fetch subprojects while loading # # Returns: # (tuple): - (str): name of the junction element # - (str): name of the element # - (Loader): loader for sub-project # def _parse_name(self, name, rewritable, ticker, fetch_subprojects=False): # We allow to split only once since deep junctions names are forbidden. # Users who want to refer to elements in sub-sub-projects are required # to create junctions on the top level project. junction_path = name.rsplit(':', 1) if len(junction_path) == 1: return None, junction_path[-1], self else: self._load_file(junction_path[-2], rewritable, ticker, fetch_subprojects) loader = self._get_loader(junction_path[-2], rewritable=rewritable, ticker=ticker, fetch_subprojects=fetch_subprojects) return junction_path[-2], junction_path[-1], loader buildstream-1.6.9/buildstream/_loader/metaelement.py000066400000000000000000000044511437515270000226440ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom class MetaElement(): # MetaElement() # # An abstract object holding data suitable for constructing an Element # # Args: # project: The project that contains the element # name: The resolved element name # kind: The element kind # provenance: The provenance of the element # sources: An array of MetaSource objects # config: The configuration data for the element # variables: The variables declared or overridden on this element # environment: The environment variables declared or overridden on this element # env_nocache: List of environment vars which should not be considered in cache keys # public: Public domain data dictionary # sandbox: Configuration specific to the sandbox environment # first_pass: The element is to be loaded with first pass configuration (junction) # def __init__(self, project, name, kind, provenance, sources, config, variables, environment, env_nocache, public, sandbox, first_pass): self.project = project self.name = name self.kind = kind self.provenance = provenance self.sources = sources self.config = config self.variables = variables self.environment = environment self.env_nocache = env_nocache self.public = public self.sandbox = sandbox self.build_dependencies = [] self.dependencies = [] self.strict_dependencies = [] self.first_pass = first_pass buildstream-1.6.9/buildstream/_loader/metasource.py000066400000000000000000000031741437515270000225140ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom class MetaSource(): # MetaSource() # # An abstract object holding data suitable for constructing a Source # # Args: # element_name: The name of the owning element # element_index: The index of the source in the owning element's source list # element_kind: The kind of the owning element # kind: The kind of the source # config: The configuration data for the source # first_pass: This source will be used with first project pass configuration (used for junctions). # def __init__(self, element_name, element_index, element_kind, kind, config, directory): self.element_name = element_name self.element_index = element_index self.element_kind = element_kind self.kind = kind self.config = config self.directory = directory self.first_pass = False buildstream-1.6.9/buildstream/_loader/types.py000066400000000000000000000133551437515270000215130ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from collections.abc import Mapping from .._exceptions import LoadError, LoadErrorReason from .. import _yaml # Symbol(): # # A simple object to denote the symbols we load with from YAML # class Symbol(): FILENAME = "filename" KIND = "kind" DEPENDS = "depends" BUILD_DEPENDS = "build-depends" RUNTIME_DEPENDS = "runtime-depends" SOURCES = "sources" CONFIG = "config" VARIABLES = "variables" ENVIRONMENT = "environment" ENV_NOCACHE = "environment-nocache" PUBLIC = "public" TYPE = "type" BUILD = "build" RUNTIME = "runtime" ALL = "all" DIRECTORY = "directory" JUNCTION = "junction" SANDBOX = "sandbox" STRICT = "strict" # Dependency() # # A simple object describing a dependency # # Args: # name (str): The element name # dep_type (str): The type of dependency, can be # Symbol.ALL, Symbol.BUILD, or Symbol.RUNTIME # junction (str): The element name of the junction, or None # provenance (Provenance): The YAML node provenance of where this # dependency was declared # class Dependency(): def __init__(self, dep, provenance, default_dep_type=None): self.provenance = provenance if isinstance(dep, str): self.name = dep self.dep_type = default_dep_type self.junction = None self.strict = False elif isinstance(dep, Mapping): if default_dep_type: _yaml.node_validate(dep, ['filename', 'junction', 'strict']) dep_type = default_dep_type else: _yaml.node_validate(dep, ['filename', 'type', 'junction', 'strict']) # Make type optional, for this we set it to None dep_type = _yaml.node_get(dep, str, Symbol.TYPE, default_value=None) if dep_type is None or dep_type == Symbol.ALL: dep_type = None elif dep_type not in [Symbol.BUILD, Symbol.RUNTIME]: provenance = _yaml.node_get_provenance(dep, key=Symbol.TYPE) raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Dependency type '{}' is not 'build', 'runtime' or 'all'" .format(provenance, dep_type)) self.name = _yaml.node_get(dep, str, Symbol.FILENAME) self.dep_type = dep_type self.junction = _yaml.node_get(dep, str, Symbol.JUNCTION, default_value=None) self.strict = _yaml.node_get(dep, bool, Symbol.STRICT, default_value=False) # Here we disallow explicitly setting 'strict' to False. # # This is in order to keep the door open to allowing the project.conf # set the default of dependency 'strict'-ness which might be useful # for projects which use mostly static linking and the like, in which # case we can later interpret explicitly non-strict dependencies # as an override of the project default. # if self.strict is False and Symbol.STRICT in dep: provenance = _yaml.node_get_provenance(dep, key=Symbol.STRICT) raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Setting 'strict' to False is unsupported" .format(provenance)) else: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Dependency is not specified as a string or a dictionary".format(provenance)) # Only build dependencies are allowed to be strict # if self.strict and self.dep_type == Symbol.RUNTIME: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Runtime dependency {} specified as `strict`.".format(self.provenance, self.name), detail="Only dependencies required at build time may be declared `strict`.") # `:` characters are not allowed in filename if a junction was # explicitly specified if self.junction and ':' in self.name: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Dependency {} contains `:` in its name. " "`:` characters are not allowed in filename when " "junction attribute is specified.".format(self.provenance, self.name)) # Name of the element should never contain more than one `:` characters if self.name.count(':') > 1: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Dependency {} contains multiple `:` in its name. " "Recursive lookups for cross-junction elements is not " "allowed.".format(self.provenance, self.name)) # Attempt to split name if no junction was specified explicitly if not self.junction and self.name.count(':') == 1: self.junction, self.name = self.name.split(':') buildstream-1.6.9/buildstream/_message.py000066400000000000000000000062201437515270000205160ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import datetime import os # Types of status messages. # class MessageType(): DEBUG = "debug" # Debugging message STATUS = "status" # Status message, verbose details INFO = "info" # Informative messages WARN = "warning" # Warning messages ERROR = "error" # Error messages BUG = "bug" # An unhandled exception was raised in a plugin LOG = "log" # Messages for log files _only_, never in the frontend # Timed Messages: SUCCESS and FAIL have duration timestamps START = "start" # Status start message SUCCESS = "success" # Successful status complete message FAIL = "failure" # Failing status complete message SKIPPED = "skipped" # Messages which should be reported regardless of whether # they are currently silenced or not unconditional_messages = [ MessageType.INFO, MessageType.WARN, MessageType.FAIL, MessageType.ERROR, MessageType.BUG ] # Message object # class Message(): def __init__(self, unique_id, message_type, message, task_id=None, detail=None, action_name=None, elapsed=None, depth=None, logfile=None, sandbox=None, scheduler=False): self.message_type = message_type # Message type self.message = message # The message string self.detail = detail # An additional detail string self.action_name = action_name # Name of the task queue (fetch, refresh, build, etc) self.elapsed = elapsed # The elapsed time, in timed messages self.depth = depth # The depth of a timed message self.logfile = logfile # The log file path where commands took place self.sandbox = sandbox # The sandbox directory where an error occurred (if any) self.pid = os.getpid() # The process pid self.unique_id = unique_id # The plugin object ID issueing the message self.task_id = task_id # The plugin object ID of the task self.scheduler = scheduler # Whether this is a scheduler level message self.creation_time = datetime.datetime.now() if message_type in (MessageType.SUCCESS, MessageType.FAIL): assert elapsed is not None buildstream-1.6.9/buildstream/_options/000077500000000000000000000000001437515270000202135ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_options/__init__.py000066400000000000000000000014661437515270000223330ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .optionpool import OptionPool buildstream-1.6.9/buildstream/_options/option.py000066400000000000000000000062111437515270000220750ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .. import _yaml # Shared symbols for validation purposes # OPTION_SYMBOLS = [ 'type', 'description', 'variable' ] # Option() # # An abstract class representing a project option. # # Concrete classes must be created to handle option types, # the loaded project options is a collection of typed Option # instances. # class Option(): # Subclasses use this to specify the type name used # for the yaml format and error messages OPTION_TYPE = None def __init__(self, name, definition, pool): self.name = name self.description = None self.variable = None self.value = None self.pool = pool self.load(definition) # load() # # Loads the option attributes from the descriptions # in the project.conf # # Args: # node (dict): The loaded YAML dictionary describing # the option def load(self, node): self.description = _yaml.node_get(node, str, 'description') self.variable = _yaml.node_get(node, str, 'variable', default_value=None) # Assert valid symbol name for variable name if self.variable is not None: p = _yaml.node_get_provenance(node, 'variable') _yaml.assert_symbol_name(p, self.variable, 'variable name') # load_value() # # Loads the value of the option in string form. # # Args: # node (Mapping): The YAML loaded key/value dictionary # to load the value from # transform (callbable): Transform function for variable substitution # def load_value(self, node, *, transform=None): pass # pragma: nocover # set_value() # # Sets the value of an option from a string passed # to buildstream on the command line # # Args: # value (str): The value in string form # def set_value(self, value): pass # pragma: nocover # get_value() # # Gets the value of an option in string form, this # is for the purpose of exporting option values to # variables which must be in string form. # # Returns: # (str): The value in string form # def get_value(self): pass # pragma: nocover # resolve() # # Called on each option once, after all configuration # and cli options have been passed. # def resolve(self): pass # pragma: nocover buildstream-1.6.9/buildstream/_options/optionarch.py000066400000000000000000000035611437515270000227400ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os from .optionenum import OptionEnum # OptionArch # # An enumeration project option which does not allow # definition of a default value, but instead tries to set # the default value to the machine architecture introspected # using `uname` # # Note that when using OptionArch in a project, it will automatically # bail out of the host machine `uname` reports a machine architecture # not supported by the project, in the case that no option was # specifically specified # class OptionArch(OptionEnum): OPTION_TYPE = 'arch' def load(self, node): super().load(node, allow_default_definition=False) def load_default_value(self, node): _, _, _, _, machine_arch = os.uname() return machine_arch def resolve(self): # Validate that the default machine arch reported by uname() is # explicitly supported by the project, only if it was not # overridden by user configuration or cli. # # If the value is specified on the cli or user configuration, # then it will already be valid. # self.validate(self.value) buildstream-1.6.9/buildstream/_options/optionbool.py000066400000000000000000000035021437515270000227510ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .. import _yaml from .._exceptions import LoadError, LoadErrorReason from .option import Option, OPTION_SYMBOLS # OptionBool # # A boolean project option # class OptionBool(Option): OPTION_TYPE = 'bool' def load(self, node): super().load(node) _yaml.node_validate(node, OPTION_SYMBOLS + ['default']) self.value = _yaml.node_get(node, bool, 'default') def load_value(self, node, *, transform=None): if transform: self.set_value(transform(_yaml.node_get(node, str, self.name))) else: self.value = _yaml.node_get(node, bool, self.name) def set_value(self, value): if value in ('True', 'true'): self.value = True elif value in ('False', 'false'): self.value = False else: raise LoadError(LoadErrorReason.INVALID_DATA, "Invalid value for boolean option {}: {}".format(self.name, value)) def get_value(self): if self.value: return "1" else: return "0" buildstream-1.6.9/buildstream/_options/optioneltmask.py000066400000000000000000000031351437515270000234600ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .. import utils from .optionflags import OptionFlags # OptionEltMask # # A flags option which automatically only allows element # names as values. # class OptionEltMask(OptionFlags): OPTION_TYPE = 'element-mask' def load(self, node): # Ask the parent constructor to disallow value definitions, # we define those automatically only. super().load(node, allow_value_definitions=False) # Here we want all valid elements as possible values, # but we'll settle for just the relative filenames # of files ending with ".bst" in the project element directory def load_valid_values(self, node): values = [] for filename in utils.list_relative_paths(self.pool.element_path): if filename.endswith('.bst'): values.append(filename) return values buildstream-1.6.9/buildstream/_options/optionenum.py000066400000000000000000000053321437515270000227650ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .. import _yaml from .._exceptions import LoadError, LoadErrorReason from .option import Option, OPTION_SYMBOLS # OptionEnum # # An enumeration project option # class OptionEnum(Option): OPTION_TYPE = 'enum' def load(self, node, allow_default_definition=True): super().load(node) valid_symbols = OPTION_SYMBOLS + ['values'] if allow_default_definition: valid_symbols += ['default'] _yaml.node_validate(node, valid_symbols) self.values = _yaml.node_get(node, list, 'values', default_value=[]) if not self.values: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: No values specified for {} option '{}'" .format(_yaml.node_get_provenance(node), self.OPTION_TYPE, self.name)) # Allow subclass to define the default value self.value = self.load_default_value(node) def load_value(self, node, *, transform=None): self.value = _yaml.node_get(node, str, self.name) if transform: self.value = transform(self.value) self.validate(self.value, _yaml.node_get_provenance(node, self.name)) def set_value(self, value): self.validate(value) self.value = value def get_value(self): return self.value def validate(self, value, provenance=None): if value not in self.values: prefix = "" if provenance: prefix = "{}: ".format(provenance) raise LoadError(LoadErrorReason.INVALID_DATA, "{}Invalid value for {} option '{}': {}\n" .format(prefix, self.OPTION_TYPE, self.name, value) + "Valid values: {}".format(", ".join(self.values))) def load_default_value(self, node): value = _yaml.node_get(node, str, 'default') self.validate(value, _yaml.node_get_provenance(node, 'default')) return value buildstream-1.6.9/buildstream/_options/optionflags.py000066400000000000000000000062441437515270000231200ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .. import _yaml from .._exceptions import LoadError, LoadErrorReason from .option import Option, OPTION_SYMBOLS # OptionFlags # # A flags project option # class OptionFlags(Option): OPTION_TYPE = 'flags' def load(self, node, allow_value_definitions=True): super().load(node) valid_symbols = OPTION_SYMBOLS + ['default'] if allow_value_definitions: valid_symbols += ['values'] _yaml.node_validate(node, valid_symbols) # Allow subclass to define the valid values self.values = self.load_valid_values(node) if not self.values: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: No values specified for {} option '{}'" .format(_yaml.node_get_provenance(node), self.OPTION_TYPE, self.name)) self.value = _yaml.node_get(node, list, 'default', default_value=[]) self.validate(self.value, _yaml.node_get_provenance(node, 'default')) def load_value(self, node, *, transform=None): self.value = _yaml.node_get(node, list, self.name) if transform: self.value = [transform(x) for x in self.value] self.value = sorted(self.value) self.validate(self.value, _yaml.node_get_provenance(node, self.name)) def set_value(self, value): # Strip out all whitespace, allowing: "value1, value2 , value3" stripped = "".join(value.split()) # Get the comma separated values list_value = stripped.split(',') self.validate(list_value) self.value = sorted(list_value) def get_value(self): return ",".join(self.value) def validate(self, value, provenance=None): for flag in value: if flag not in self.values: prefix = "" if provenance: prefix = "{}: ".format(provenance) raise LoadError(LoadErrorReason.INVALID_DATA, "{}Invalid value for flags option '{}': {}\n" .format(prefix, self.name, value) + "Valid values: {}".format(", ".join(self.values))) def load_valid_values(self, node): # Allow the more descriptive error to raise when no values # exist rather than bailing out here (by specifying default_value) return _yaml.node_get(node, list, 'values', default_value=[]) buildstream-1.6.9/buildstream/_options/optionpool.py000066400000000000000000000247001437515270000227720ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # from collections.abc import Mapping import jinja2 from .. import _yaml from .._exceptions import LoadError, LoadErrorReason from .optionbool import OptionBool from .optionenum import OptionEnum from .optionflags import OptionFlags from .optioneltmask import OptionEltMask from .optionarch import OptionArch _OPTION_TYPES = { OptionBool.OPTION_TYPE: OptionBool, OptionEnum.OPTION_TYPE: OptionEnum, OptionFlags.OPTION_TYPE: OptionFlags, OptionEltMask.OPTION_TYPE: OptionEltMask, OptionArch.OPTION_TYPE: OptionArch, } class OptionPool(): def __init__(self, element_path): # We hold on to the element path for the sake of OptionEltMask self.element_path = element_path # # Private members # self._options = {} # The Options self._variables = None # The Options resolved into typed variables # jinja2 environment, with default globals cleared out of the way self._environment = jinja2.Environment(undefined=jinja2.StrictUndefined) self._environment.globals = [] # load() # # Loads the options described in the project.conf # # Args: # node (dict): The loaded YAML options # def load(self, options): for option_name, option_definition in _yaml.node_items(options): # Assert that the option name is a valid symbol p = _yaml.node_get_provenance(options, option_name) _yaml.assert_symbol_name(p, option_name, "option name", allow_dashes=False) opt_type_name = _yaml.node_get(option_definition, str, 'type') try: opt_type = _OPTION_TYPES[opt_type_name] except KeyError as e: p = _yaml.node_get_provenance(option_definition, 'type') raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Invalid option type '{}'".format(p, opt_type_name)) from e option = opt_type(option_name, option_definition, self) self._options[option_name] = option # load_yaml_values() # # Loads the option values specified in a key/value # dictionary loaded from YAML # # Args: # node (dict): The loaded YAML options # def load_yaml_values(self, node, *, transform=None): for option_name, _ in _yaml.node_items(node): try: option = self._options[option_name] except KeyError as e: p = _yaml.node_get_provenance(node, option_name) raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Unknown option '{}' specified" .format(p, option_name)) from e option.load_value(node, transform=transform) # load_cli_values() # # Loads the option values specified in a list of tuples # collected from the command line # # Args: # cli_options (list): A list of (str, str) tuples # ignore_unknown (bool): Whether to silently ignore unknown options. # def load_cli_values(self, cli_options, *, ignore_unknown=False): for option_name, option_value in cli_options: try: option = self._options[option_name] except KeyError as e: if not ignore_unknown: raise LoadError(LoadErrorReason.INVALID_DATA, "Unknown option '{}' specified on the command line" .format(option_name)) from e else: option.set_value(option_value) # resolve() # # Resolves the loaded options, this is just a step which must be # performed after loading all options and their values, and before # ever trying to evaluate an expression # def resolve(self): self._variables = {} for option_name, option in self._options.items(): # Delegate one more method for options to # do some last minute validation once any # overrides have been performed. # option.resolve() self._variables[option_name] = option.value # export_variables() # # Exports the option values which are declared # to be exported, to the passed dictionary. # # Variable values are exported in string form # # Args: # variables (dict): A variables dictionary # def export_variables(self, variables): for _, option in self._options.items(): if option.variable: variables[option.variable] = option.get_value() # printable_variables() # # Exports all option names and string values # to the passed dictionary in alphabetical order. # # Args: # variables (dict): A variables dictionary # def printable_variables(self, variables): for key in sorted(self._options): variables[key] = self._options[key].get_value() # process_node() # # Args: # node (Mapping): A YAML Loaded dictionary # def process_node(self, node): # A conditional will result in composition, which can # in turn add new conditionals to the root. # # Keep processing conditionals on the root node until # all directly nested conditionals are resolved. # while self._process_one_node(node): pass # Now recurse into nested dictionaries and lists # and process any indirectly nested conditionals. # for _, value in _yaml.node_items(node): if isinstance(value, Mapping): self.process_node(value) elif isinstance(value, list): self._process_list(value) ####################################################### # Private Methods # ####################################################### # _evaluate() # # Evaluates a jinja2 style expression with the loaded options in context. # # Args: # expression (str): The jinja2 style expression # # Returns: # (bool): Whether the expression resolved to a truthy value or a falsy one. # # Raises: # LoadError: If the expression failed to resolve for any reason # def _evaluate(self, expression): # # Variables must be resolved at this point. # try: template_string = "{{% if {} %}} True {{% else %}} False {{% endif %}}".format(expression) template = self._environment.from_string(template_string) context = template.new_context(self._variables, shared=True) result = template.root_render_func(context) evaluated = jinja2.utils.concat(result) val = evaluated.strip() if val == "True": return True elif val == "False": return False else: # pragma: nocover raise LoadError(LoadErrorReason.EXPRESSION_FAILED, "Failed to evaluate expression: {}".format(expression)) except jinja2.exceptions.TemplateError as e: raise LoadError(LoadErrorReason.EXPRESSION_FAILED, "Failed to evaluate expression ({}): {}".format(expression, e)) from e # Recursion assistent for lists, in case there # are lists of lists. # def _process_list(self, values): for value in values: if isinstance(value, Mapping): self.process_node(value) elif isinstance(value, list): self._process_list(value) # Process a single conditional, resulting in composition # at the root level on the passed node # # Return true if a conditional was processed. # def _process_one_node(self, node): conditions = _yaml.node_get(node, list, '(?)', default_value=None) assertion = _yaml.node_get(node, str, '(!)', default_value=None) # Process assersions first, we want to abort on the first encountered # assertion in a given dictionary, and not lose an assertion due to # it being overwritten by a later assertion which might also trigger. if assertion is not None: p = _yaml.node_get_provenance(node, '(!)') raise LoadError(LoadErrorReason.USER_ASSERTION, "{}: {}".format(p, assertion.strip())) if conditions is not None: # Collect provenance first, we need to delete the (?) key # before any composition occurs. provenance = [ _yaml.node_get_provenance(node, '(?)', indices=[i]) for i in range(len(conditions)) ] del node['(?)'] for condition, p in zip(conditions, provenance): tuples = list(_yaml.node_items(condition)) if len(tuples) > 1: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Conditional statement has more than one key".format(p)) expression, value = tuples[0] try: apply_fragment = self._evaluate(expression) except LoadError as e: # Prepend the provenance of the error raise LoadError(e.reason, "{}: {}".format(p, e)) from e if not hasattr(value, 'get'): raise LoadError(LoadErrorReason.ILLEGAL_COMPOSITE, "{}: Only values of type 'dict' can be composed.".format(p)) # Apply the yaml fragment if its condition evaluates to true if apply_fragment: _yaml.composite(node, value) return True return False buildstream-1.6.9/buildstream/_pipeline.py000066400000000000000000000430511437515270000207020ustar00rootroot00000000000000# # Copyright (C) 2016-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # Tristan Maat import os import itertools from operator import itemgetter from ._exceptions import PipelineError from ._message import Message, MessageType from ._profile import Topics, profile_start, profile_end from . import Scope, Consistency from ._project import ProjectRefStorage # PipelineSelection() # # Defines the kind of pipeline selection to make when the pipeline # is provided a list of targets, for whichever purpose. # # These values correspond to the CLI `--deps` arguments for convenience. # class PipelineSelection(): # Select only the target elements in the associated targets NONE = 'none' # As NONE, but redirect elements that are capable of it REDIRECT = 'redirect' # Select elements which must be built for the associated targets to be built PLAN = 'plan' # All dependencies of all targets, including the targets ALL = 'all' # All direct build dependencies and their recursive runtime dependencies, # excluding the targets BUILD = 'build' # All direct runtime dependencies and their recursive runtime dependencies, # including the targets RUN = 'run' # Pipeline() # # Args: # project (Project): The Project object # context (Context): The Context object # artifacts (Context): The ArtifactCache object # class Pipeline(): def __init__(self, context, project, artifacts): self._context = context # The Context self._project = project # The toplevel project # # Private members # self._artifacts = artifacts # load() # # Loads elements from target names. # # This function is called with a list of lists, such that multiple # target groups may be specified. Element names specified in `targets` # are allowed to be redundant. # # Args: # target_groups (list of lists): Groups of toplevel targets to load # fetch_subprojects (bool): Whether we should fetch subprojects as a part of the # loading process, if they are not yet locally cached # rewritable (bool): Whether the loaded files should be rewritable # this is a bit more expensive due to deep copies # # Returns: # (tuple of lists): A tuple of grouped Element objects corresponding to target_groups # def load(self, target_groups, *, fetch_subprojects=True, rewritable=False): # First concatenate all the lists for the loader's sake targets = list(itertools.chain(*target_groups)) profile_start(Topics.LOAD_PIPELINE, "_".join(t.replace(os.sep, '-') for t in targets)) elements = self._project.load_elements(targets, rewritable=rewritable, fetch_subprojects=fetch_subprojects) # Now create element groups to match the input target groups elt_iter = iter(elements) element_groups = [ [next(elt_iter) for i in range(len(group))] for group in target_groups ] profile_end(Topics.LOAD_PIPELINE, "_".join(t.replace(os.sep, '-') for t in targets)) return tuple(element_groups) # resolve_elements() # # Resolve element state and cache keys. # # Args: # targets (list of Element): The list of toplevel element targets # def resolve_elements(self, targets): with self._context.timed_activity("Resolving cached state", silent_nested=True): for element in self.dependencies(targets, Scope.ALL): # Preflight element._preflight() # Determine initial element state. element._update_state() # dependencies() # # Generator function to iterate over elements and optionally # also iterate over sources. # # Args: # targets (list of Element): The target Elements to loop over # scope (Scope): The scope to iterate over # recurse (bool): Whether to recurse into dependencies # def dependencies(self, targets, scope, *, recurse=True): # Keep track of 'visited' in this scope, so that all targets # share the same context. visited = {} for target in targets: for element in target.dependencies(scope, recurse=recurse, visited=visited): yield element # plan() # # Generator function to iterate over only the elements # which are required to build the pipeline target, omitting # cached elements. The elements are yielded in a depth sorted # ordering for optimal build plans # # Args: # elements (list of Element): List of target elements to plan # # Returns: # (list of Element): A depth sorted list of the build plan # def plan(self, elements): # Keep locally cached elements in the plan if remote artifact cache is used # to allow pulling artifact with strict cache key, if available. plan_cached = not self._context.get_strict() and self._artifacts.has_fetch_remotes() return _Planner().plan(elements, plan_cached) # get_selection() # # Gets a full list of elements based on a toplevel # list of element targets # # Args: # targets (list of Element): The target Elements # mode (PipelineSelection): The PipelineSelection mode # # Various commands define a --deps option to specify what elements to # use in the result, this function reports a list that is appropriate for # the selected option. # def get_selection(self, targets, mode, *, silent=True): elements = None if mode == PipelineSelection.NONE: elements = targets elif mode == PipelineSelection.REDIRECT: # Redirect and log if permitted elements = [] for t in targets: new_elm = t._get_source_element() if new_elm != t and not silent: self._message(MessageType.INFO, "Element '{}' redirected to '{}'" .format(t.name, new_elm.name)) if new_elm not in elements: elements.append(new_elm) elif mode == PipelineSelection.PLAN: elements = self.plan(targets) else: if mode == PipelineSelection.ALL: scope = Scope.ALL elif mode == PipelineSelection.BUILD: scope = Scope.BUILD elif mode == PipelineSelection.RUN: scope = Scope.RUN elements = list(self.dependencies(targets, scope)) return elements # except_elements(): # # Return what we are left with after the intersection between # excepted and target elements and their unique dependencies is # gone. # # Args: # targets (list of Element): List of toplevel targetted elements # elements (list of Element): The list to remove elements from # except_targets (list of Element): List of toplevel except targets # # Returns: # (list of Element): The elements list with the intersected # exceptions removed # def except_elements(self, targets, elements, except_targets): if not except_targets: return elements targeted = list(self.dependencies(targets, Scope.ALL)) visited = [] def find_intersection(element): if element in visited: return visited.append(element) # Intersection elements are those that are also in # 'targeted', as long as we don't recurse into them. if element in targeted: yield element else: for dep in element.dependencies(Scope.ALL, recurse=False): yield from find_intersection(dep) # Build a list of 'intersection' elements, i.e. the set of # elements that lie on the border closest to excepted elements # between excepted and target elements. intersection = list(itertools.chain.from_iterable( find_intersection(element) for element in except_targets )) # Now use this set of elements to traverse the targeted # elements, except 'intersection' elements and their unique # dependencies. queue = [] visited = [] queue.extend(targets) while queue: element = queue.pop() if element in visited or element in intersection: continue visited.append(element) queue.extend(element.dependencies(Scope.ALL, recurse=False)) # That looks like a lot, but overall we only traverse (part # of) the graph twice. This could be reduced to once if we # kept track of parent elements, but is probably not # significant. # Ensure that we return elements in the same order they were # in before. return [element for element in elements if element in visited] # targets_include() # # Checks whether the given targets are, or depend on some elements # # Args: # targets (list of Element): A list of targets # elements (list of Element): List of elements to check # # Returns: # (bool): True if all of `elements` are the `targets`, or are # somehow depended on by `targets`. # def targets_include(self, targets, elements): target_element_set = set(self.dependencies(targets, Scope.ALL)) element_set = set(elements) return element_set.issubset(target_element_set) # subtract_elements() # # Subtract a subset of elements # # Args: # elements (list of Element): The element list # subtract (list of Element): List of elements to subtract from elements # # Returns: # (list): The original elements list, with elements in subtract removed # def subtract_elements(self, elements, subtract): subtract_set = set(subtract) return [ e for e in elements if e not in subtract_set ] # track_cross_junction_filter() # # Filters out elements which are across junction boundaries, # otherwise asserts that there are no such elements. # # This is currently assumed to be only relevant for element # lists targetted at tracking. # # Args: # project (Project): Project used for cross_junction filtering. # All elements are expected to belong to that project. # elements (list of Element): The list of elements to filter # cross_junction_requested (bool): Whether the user requested # cross junction tracking # # Returns: # (list of Element): The filtered or asserted result # def track_cross_junction_filter(self, project, elements, cross_junction_requested): # Filter out cross junctioned elements if not cross_junction_requested: elements = self._filter_cross_junctions(project, elements) self._assert_junction_tracking(elements) return elements # assert_consistent() # # Asserts that the given list of elements are in a consistent state, that # is to say that all sources are consistent and can at least be fetched. # # Consequently it also means that cache keys can be resolved. # def assert_consistent(self, elements): inconsistent = [] inconsistent_workspaced = [] with self._context.timed_activity("Checking sources"): for element in elements: if element._get_consistency() == Consistency.INCONSISTENT: if element._get_workspace(): inconsistent_workspaced.append(element) else: inconsistent.append(element) if inconsistent: detail = "Exact versions are missing for the following elements:\n\n" for element in inconsistent: detail += " Element: {} is inconsistent\n".format(element._get_full_name()) for source in element.sources(): if source._get_consistency() == Consistency.INCONSISTENT: detail += " Source {} is missing ref\n".format(source) detail += '\n' detail += "Try tracking these elements first with `bst track`\n" raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline") if inconsistent_workspaced: detail = "Some workspaces do not exist but are not closed\n" + \ "Try closing them with `bst workspace close`\n\n" for element in inconsistent_workspaced: detail += " " + element._get_full_name() + "\n" raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline-workspaced") ############################################################# # Private Methods # ############################################################# # _filter_cross_junction() # # Filters out cross junction elements from the elements # # Args: # project (Project): The project on which elements are allowed # elements (list of Element): The list of elements to be tracked # # Returns: # (list): A filtered list of `elements` which does # not contain any cross junction elements. # def _filter_cross_junctions(self, project, elements): return [ element for element in elements if element._get_project() is project ] # _assert_junction_tracking() # # Raises an error if tracking is attempted on junctioned elements and # a project.refs file is not enabled for the toplevel project. # # Args: # elements (list of Element): The list of elements to be tracked # def _assert_junction_tracking(self, elements): # We can track anything if the toplevel project uses project.refs # if self._project.ref_storage == ProjectRefStorage.PROJECT_REFS: return # Ideally, we would want to report every cross junction element but not # their dependencies, unless those cross junction elements dependencies # were also explicitly requested on the command line. # # But this is too hard, lets shoot for a simple error. for element in elements: element_project = element._get_project() if element_project is not self._project: detail = "Requested to track sources across junction boundaries\n" + \ "in a project which does not use project.refs ref-storage." raise PipelineError("Untrackable sources", detail=detail, reason="untrackable-sources") # _message() # # Local message propagator # def _message(self, message_type, message, **kwargs): args = dict(kwargs) self._context.message( Message(None, message_type, message, **args)) # _Planner() # # An internal object used for constructing build plan # from a given resolved toplevel element, while considering what # parts need to be built depending on build only dependencies # being cached, and depth sorting for more efficient processing. # class _Planner(): def __init__(self): self.depth_map = {} self.visiting_elements = set() # Here we want to traverse the same element more than once when # it is reachable from multiple places, with the interest of finding # the deepest occurance of every element def plan_element(self, element, depth): if element in self.visiting_elements: # circular dependency, already being processed return prev_depth = self.depth_map.get(element) if prev_depth is not None and prev_depth >= depth: # element and dependencies already processed at equal or greater depth return self.visiting_elements.add(element) for dep in element.dependencies(Scope.RUN, recurse=False): self.plan_element(dep, depth) # Dont try to plan builds of elements that are cached already if not element._cached(): for dep in element.dependencies(Scope.BUILD, recurse=False): self.plan_element(dep, depth + 1) self.depth_map[element] = depth self.visiting_elements.remove(element) def plan(self, roots, plan_cached): for root in roots: self.plan_element(root, 0) depth_sorted = sorted(self.depth_map.items(), key=itemgetter(1), reverse=True) return [item[0] for item in depth_sorted if plan_cached or not item[0]._cached()] buildstream-1.6.9/buildstream/_platform/000077500000000000000000000000001437515270000203445ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_platform/__init__.py000066400000000000000000000014471437515270000224630ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat from .platform import Platform buildstream-1.6.9/buildstream/_platform/linux.py000066400000000000000000000106531437515270000220620ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat import os import subprocess from .. import _site from .. import utils from ..sandbox import SandboxBwrap, SandboxDummy from . import Platform class Linux(Platform): ARCHITECTURES = { 'amd64': 'x86_64', 'arm64': 'aarch64', 'i386': 'i686', 'armhf': 'armv7l', 'ppc64el': 'ppc64le', } def __init__(self): super().__init__() self._uid = os.geteuid() self._gid = os.getegid() self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8) self._user_ns_available = self._check_user_ns_available() def create_sandbox(self, *args, **kwargs): # Inform the bubblewrap sandbox as to whether it can use user namespaces or not kwargs['user_ns_available'] = self._user_ns_available kwargs['die_with_parent_available'] = self._die_with_parent_available kwargs['linux32'] = False host_os, _, _, _, host_arch = os.uname() config = kwargs['config'] # We can't do builds for another host OS if config.build_os != host_os: return SandboxDummy("Configured and host OS don't match.", *args, **kwargs) if config.build_arch != host_arch: try: archtest = utils.get_host_tool('arch-test') supported = subprocess.getoutput(archtest).splitlines() supported_architectures = map(self.ARCHITECTURES.get, supported, supported) except utils.ProgramNotFoundError: supported_architectures = [] if host_arch == "x86_64": supported_architectures = ["i686"] elif host_arch == "aarch64": supported_architectures = ["armv7l"] if config.build_arch not in supported_architectures: return SandboxDummy("Configured and host architecture don't match.", *args, **kwargs) if ((config.build_arch == "i686" and host_arch == "x86_64") or (config.build_arch == "armv7l" and host_arch == "aarch64")): # check whether linux32 is available try: utils.get_host_tool('linux32') kwargs['linux32'] = True except utils.ProgramNotFoundError: return SandboxDummy("Configured and host architecture don't match.", *args, **kwargs) return SandboxBwrap(*args, **kwargs) def check_sandbox_config(self, config): if self._user_ns_available: # User namespace support allows arbitrary build UID/GID settings. return True else: # Without user namespace support, the UID/GID in the sandbox # will match the host UID/GID. return config.build_uid == self._uid and config.build_gid == self._gid ################################################ # Private Methods # ################################################ def _check_user_ns_available(self): # Here, lets check if bwrap is able to create user namespaces, # issue a warning if it's not available, and save the state # locally so that we can inform the sandbox to not try it # later on. bwrap = utils.get_host_tool('bwrap') whoami = utils.get_host_tool('whoami') try: output = subprocess.check_output([ bwrap, '--ro-bind', '/', '/', '--unshare-user', '--uid', '0', '--gid', '0', whoami, ]) output = output.decode('UTF-8').strip() except subprocess.CalledProcessError: output = '' return output == 'root' buildstream-1.6.9/buildstream/_platform/platform.py000066400000000000000000000055251437515270000225510ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat import os import sys from .._exceptions import PlatformError, ImplError class Platform(): _instance = None # Platform() # # A class to manage platform-specific details. Currently holds the # sandbox factory as well as platform helpers. # def __init__(self): pass @classmethod def _create_instance(cls): # pylint: disable=import-outside-toplevel if sys.platform.startswith('linux'): backend = 'linux' else: backend = 'unix' # Meant for testing purposes and therefore hidden in the # deepest corners of the source code. Try not to abuse this, # please? if os.getenv('BST_FORCE_BACKEND'): backend = os.getenv('BST_FORCE_BACKEND') if backend == 'linux': from .linux import Linux as PlatformImpl elif backend == 'unix': from .unix import Unix as PlatformImpl else: raise PlatformError("No such platform: '{}'".format(backend)) cls._instance = PlatformImpl() @classmethod def get_platform(cls): if not cls._instance: cls._create_instance() return cls._instance ################################################################## # Sandbox functions # ################################################################## # create_sandbox(): # # Create a build sandbox suitable for the environment # # Args: # args (dict): The arguments to pass to the sandbox constructor # kwargs (file): The keyword arguments to pass to the sandbox constructor # # Returns: # (Sandbox) A sandbox # def create_sandbox(self, *args, **kwargs): raise ImplError("Platform {platform} does not implement create_sandbox()" .format(platform=type(self).__name__)) def check_sandbox_config(self, config): raise ImplError("Platform {platform} does not implement check_sandbox_config()" .format(platform=type(self).__name__)) buildstream-1.6.9/buildstream/_platform/unix.py000066400000000000000000000030211437515270000216750ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat import os from .._exceptions import PlatformError from ..sandbox import SandboxChroot from . import Platform class Unix(Platform): def __init__(self): super().__init__() self._uid = os.geteuid() self._gid = os.getegid() # Not necessarily 100% reliable, but we want to fail early. if self._uid != 0: raise PlatformError("Root privileges are required to run without bubblewrap.") def create_sandbox(self, *args, **kwargs): return SandboxChroot(*args, **kwargs) def check_sandbox_config(self, config): # With the chroot sandbox, the UID/GID in the sandbox # will match the host UID/GID (typically 0/0). return config.build_uid == self._uid and config.build_gid == self._gid buildstream-1.6.9/buildstream/_plugincontext.py000066400000000000000000000252401437515270000220000ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import inspect from ._exceptions import PluginError, LoadError, LoadErrorReason from . import utils from .utils import UtilError # A Context for loading plugin types # # Args: # plugin_base (PluginBase): The main PluginBase object to work with # base_type (type): A base object type for this context # site_plugin_path (str): Path to where buildstream keeps plugins # plugin_origins (list): Data used to search for plugins # # Since multiple pipelines can be processed recursively # within the same interpretor, it's important that we have # one context associated to the processing of a given pipeline, # this way sources and element types which are particular to # a given BuildStream project are isolated to their respective # Pipelines. # class PluginContext(): def __init__(self, plugin_base, base_type, site_plugin_path, *, plugin_origins=None, dependencies=None, format_versions=None): if format_versions is None: format_versions = {} # The plugin kinds which were loaded self.loaded_dependencies = [] # # Private members # self._dependencies = dependencies self._base_type = base_type # The base class plugins derive from self._types = {} # Plugin type lookup table by kind self._plugin_origins = plugin_origins or [] # The PluginSource object self._plugin_base = plugin_base self._site_source = plugin_base.make_plugin_source(searchpath=site_plugin_path) self._alternate_sources = {} self._format_versions = format_versions # lookup(): # # Fetches a type loaded from a plugin in this plugin context # # Args: # kind (str): The kind of Plugin to create # # Returns: the type associated with the given kind # # Raises: PluginError # def lookup(self, kind): return self._ensure_plugin(kind) def _get_local_plugin_source(self, path): if ('local', path) not in self._alternate_sources: # key by a tuple to avoid collision source = self._plugin_base.make_plugin_source(searchpath=[path]) # Ensure that sources never get garbage collected, # as they'll take the plugins with them. self._alternate_sources[('local', path)] = source else: source = self._alternate_sources[('local', path)] return source def _get_pip_plugin_source(self, package_name, kind): defaults = None if ('pip', package_name) not in self._alternate_sources: import pkg_resources # pylint: disable=import-outside-toplevel # key by a tuple to avoid collision try: package = pkg_resources.get_entry_info(package_name, 'buildstream.plugins', kind) except pkg_resources.DistributionNotFound as e: raise PluginError("Failed to load {} plugin '{}': {}" .format(self._base_type.__name__, kind, e)) from e if package is None: raise PluginError("Pip package {} does not contain a plugin named '{}'" .format(package_name, kind)) location = package.dist.get_resource_filename( pkg_resources._manager, package.module_name.replace('.', os.sep) + '.py' ) # Also load the defaults - required since setuptools # may need to extract the file. try: defaults = package.dist.get_resource_filename( pkg_resources._manager, package.module_name.replace('.', os.sep) + '.yaml' ) except KeyError: # The plugin didn't have an accompanying YAML file defaults = None source = self._plugin_base.make_plugin_source(searchpath=[os.path.dirname(location)]) self._alternate_sources[('pip', package_name)] = source else: source = self._alternate_sources[('pip', package_name)] return source, defaults def _ensure_plugin(self, kind): if kind not in self._types: # Check whether the plugin is specified in plugins source = None defaults = None loaded_dependency = False for origin in self._plugin_origins: if kind not in origin['plugins']: continue if origin['origin'] == 'local': source = self._get_local_plugin_source(origin['path']) elif origin['origin'] == 'pip': source, defaults = self._get_pip_plugin_source(origin['package-name'], kind) else: raise PluginError("Failed to load plugin '{}': " "Unexpected plugin origin '{}'" .format(kind, origin['origin'])) loaded_dependency = True break # Fall back to getting the source from site if not source: if kind not in self._site_source.list_plugins(): raise PluginError("No {} type registered for kind '{}'" .format(self._base_type.__name__, kind)) source = self._site_source self._types[kind] = self._load_plugin(source, kind, defaults) if loaded_dependency: self.loaded_dependencies.append(kind) return self._types[kind] def _load_plugin(self, source, kind, defaults): try: plugin = source.load_plugin(kind) if not defaults: plugin_file = inspect.getfile(plugin) plugin_dir = os.path.dirname(plugin_file) plugin_conf_name = "{}.yaml".format(kind) defaults = os.path.join(plugin_dir, plugin_conf_name) except ImportError as e: raise PluginError("Failed to load {} plugin '{}': {}" .format(self._base_type.__name__, kind, e)) from e try: plugin_type = plugin.setup() except AttributeError as e: raise PluginError("{} plugin '{}' did not provide a setup() function" .format(self._base_type.__name__, kind)) from e except TypeError as e: raise PluginError("setup symbol in {} plugin '{}' is not a function" .format(self._base_type.__name__, kind)) from e self._assert_plugin(kind, plugin_type) self._assert_version(kind, plugin_type) return (plugin_type, defaults) def _assert_plugin(self, kind, plugin_type): if kind in self._types: raise PluginError("Tried to register {} plugin for existing kind '{}' " "(already registered {})" .format(self._base_type.__name__, kind, self._types[kind].__name__)) try: if not issubclass(plugin_type, self._base_type): raise PluginError("{} plugin '{}' returned type '{}', which is not a subclass of {}" .format(self._base_type.__name__, kind, plugin_type.__name__, self._base_type.__name__)) except TypeError as e: raise PluginError("{} plugin '{}' returned something that is not a type (expected subclass of {})" .format(self._base_type.__name__, kind, self._base_type.__name__)) from e def _assert_version(self, kind, plugin_type): # Now assert BuildStream version bst_major, bst_minor = utils.get_bst_version() if bst_major < plugin_type.BST_REQUIRED_VERSION_MAJOR or \ (bst_major == plugin_type.BST_REQUIRED_VERSION_MAJOR and bst_minor < plugin_type.BST_REQUIRED_VERSION_MINOR): raise PluginError("BuildStream {}.{} is too old for {} plugin '{}' (requires {}.{})" .format( bst_major, bst_minor, self._base_type.__name__, kind, plugin_type.BST_REQUIRED_VERSION_MAJOR, plugin_type.BST_REQUIRED_VERSION_MINOR)) # If a BST_MIN_VERSION was specified, then we need to raise an error # that we are loading a plugin which targets the wrong BuildStream version. # try: min_version = plugin_type.BST_MIN_VERSION except AttributeError: return # Handle malformed version string specified by plugin # try: major, _ = utils._parse_version(min_version) except UtilError as e: raise PluginError( "Loaded plugin '{}' is not a BuildStream 1 plugin".format(kind), detail="Error parsing BST_MIN_VERSION: {}".format(e), reason="plugin-version-mismatch" ) from e raise PluginError( "Loaded plugin '{}' is a BuildStream {} plugin".format(kind, major), detail="You need to use BuildStream 1 plugins with BuildStream 1 projects", reason="plugin-version-mismatch" ) # _assert_plugin_format() # # Helper to raise a PluginError if the loaded plugin is of a lesser version then # the required version for this plugin # def _assert_plugin_format(self, plugin, version): if plugin.BST_FORMAT_VERSION < version: raise LoadError(LoadErrorReason.UNSUPPORTED_PLUGIN, "{}: Format version {} is too old for requested version {}" .format(plugin, plugin.BST_FORMAT_VERSION, version)) buildstream-1.6.9/buildstream/_profile.py000066400000000000000000000100121437515270000205240ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import cProfile import pstats import os import datetime import time # Track what profile topics are active active_topics = {} active_profiles = {} initialized = False # Use the topic values here to decide what to profile # by setting them in the BST_PROFILE environment variable. # # Multiple topics can be set with the ':' separator. # # E.g.: # # BST_PROFILE=circ-dep-check:sort-deps bst # # The special 'all' value will enable all profiles. class Topics(): CIRCULAR_CHECK = 'circ-dep-check' SORT_DEPENDENCIES = 'sort-deps' LOAD_LOADER = 'load-loader' LOAD_CONTEXT = 'load-context' LOAD_PROJECT = 'load-project' LOAD_PIPELINE = 'load-pipeline' SHOW = 'show' ARTIFACT_RECEIVE = 'artifact-receive' ALL = 'all' class Profile(): def __init__(self, topic, key, message): self.message = message self.key = topic + '-' + key self.start = time.time() self.profiler = cProfile.Profile() self.profiler.enable() def end(self): self.profiler.disable() filename = self.key.replace('/', '-') filename = filename.replace('.', '-') filename = os.path.join(os.getcwd(), 'profile-' + filename + '.log') with open(filename, "a", encoding="utf-8") as f: dt = datetime.datetime.fromtimestamp(self.start) time_ = dt.strftime('%Y-%m-%d %H:%M:%S') heading = '================================================================\n' heading += 'Profile for key: {}\n'.format(self.key) heading += 'Started at: {}\n'.format(time_) if self.message: heading += '\n {}'.format(self.message) heading += '================================================================\n' f.write(heading) ps = pstats.Stats(self.profiler, stream=f).sort_stats('cumulative') ps.print_stats() # profile_start() # # Start profiling for a given topic. # # Args: # topic (str): A topic name # key (str): A key for this profile run # message (str): An optional message to print in profile results # def profile_start(topic, key, message=None): if not profile_enabled(topic): return # Start profiling and hold on to the key profile = Profile(topic, key, message) assert active_profiles.get(profile.key) is None active_profiles[profile.key] = profile # profile_end() # # Ends a profiling session previously # started with profile_start() # # Args: # topic (str): A topic name # key (str): A key for this profile run # def profile_end(topic, key): if not profile_enabled(topic): return topic_key = topic + '-' + key profile = active_profiles.get(topic_key) assert profile profile.end() del active_profiles[topic_key] def profile_init(): global initialized # pylint: disable=global-statement if not initialized: setting = os.getenv('BST_PROFILE') if setting: topics = setting.split(':') for topic in topics: active_topics[topic] = True initialized = True def profile_enabled(topic): profile_init() if active_topics.get(topic): return True if active_topics.get(Topics.ALL): return True return False buildstream-1.6.9/buildstream/_project.py000066400000000000000000000764411437515270000205540ustar00rootroot00000000000000# # Copyright (C) 2016-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Tiago Gomes import os from collections import OrderedDict from collections.abc import Mapping from pluginbase import PluginBase from . import utils from . import _cachekey from . import _site from . import _yaml from .utils import UtilError from ._profile import Topics, profile_start, profile_end from ._exceptions import LoadError, LoadErrorReason from ._options import OptionPool from ._artifactcache import ArtifactCache from ._elementfactory import ElementFactory from ._sourcefactory import SourceFactory from .types import CoreWarnings from ._projectrefs import ProjectRefs, ProjectRefStorage from ._versions import BST_FORMAT_VERSION from ._loader import Loader from .element import Element from ._message import Message, MessageType from ._includes import Includes # Project Configuration file _PROJECT_CONF_FILE = 'project.conf' # HostMount() # # A simple object describing the behavior of # a host mount. # class HostMount(): def __init__(self, path, host_path=None, optional=False): # Support environment variable expansion in host mounts path = os.path.expandvars(path) if host_path is not None: host_path = os.path.expandvars(host_path) self.path = path # Path inside the sandbox self.host_path = host_path # Path on the host self.optional = optional # Optional mounts do not incur warnings or errors if self.host_path is None: self.host_path = self.path # Represents project configuration that can have different values for junctions. class ProjectConfig: def __init__(self): self.element_factory = None self.source_factory = None self.options = None # OptionPool self.base_variables = {} # The base set of variables self.element_overrides = {} # Element specific configurations self.source_overrides = {} # Source specific configurations self.mirrors = OrderedDict() # contains dicts of alias-mappings to URIs. self.default_mirror = None # The name of the preferred mirror. self._aliases = {} # Aliases dictionary # Project() # # The Project Configuration # class Project(): def __init__(self, directory, context, *, junction=None, cli_options=None, default_mirror=None, parent_loader=None, tempdir=None): # The project name self.name = None # The project directory self.directory = self._ensure_project_dir(directory) # Absolute path to where elements are loaded from within the project self.element_path = None # ProjectRefs for the main refs and also for junctions self.refs = ProjectRefs(self.directory, 'project.refs') self.junction_refs = ProjectRefs(self.directory, 'junction.refs') self.config = ProjectConfig() self.first_pass_config = ProjectConfig() self.junction = junction # The junction Element object, if this is a subproject self.ref_storage = None # ProjectRefStorage setting self.base_environment = {} # The base set of environment variables self.base_env_nocache = None # The base nocache mask (list) for the environment # # Private Members # self._context = context # The invocation Context self._default_mirror = default_mirror # The name of the preferred mirror. self._cli_options = cli_options self._cache_key = None self._fatal_warnings = [] # A list of warnings which should trigger an error self._shell_command = [] # The default interactive shell command self._shell_environment = {} # Statically set environment vars self._shell_host_files = [] # A list of HostMount objects self.artifact_cache_specs = None self._sandbox = None self._splits = None self._context.add_project(self) self._partially_loaded = False self._fully_loaded = False self._project_includes = None profile_start(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-')) self._load(parent_loader=parent_loader, tempdir=tempdir) profile_end(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-')) self._partially_loaded = True @property def options(self): return self.config.options @property def base_variables(self): return self.config.base_variables @property def element_overrides(self): return self.config.element_overrides @property def source_overrides(self): return self.config.source_overrides # translate_url(): # # Translates the given url which may be specified with an alias # into a fully qualified url. # # Args: # url (str): A url, which may be using an alias # first_pass (bool): Whether to use first pass configuration (for junctions) # # Returns: # str: The fully qualified url, with aliases resolved # # This method is provided for :class:`.Source` objects to resolve # fully qualified urls based on the shorthand which is allowed # to be specified in the YAML def translate_url(self, url, *, first_pass=False): if first_pass: config = self.first_pass_config else: config = self.config if url and utils._ALIAS_SEPARATOR in url: url_alias, url_body = url.split(utils._ALIAS_SEPARATOR, 1) alias_url = config._aliases.get(url_alias) if alias_url: url = alias_url + url_body return url # get_shell_config() # # Gets the project specified shell configuration # # Returns: # (list): The shell command # (dict): The shell environment # (list): The list of HostMount objects # def get_shell_config(self): return (self._shell_command, self._shell_environment, self._shell_host_files) # get_cache_key(): # # Returns the cache key, calculating it if necessary # # Returns: # (str): A hex digest cache key for the Context # def get_cache_key(self): if self._cache_key is None: # Anything that alters the build goes into the unique key # (currently nothing here) self._cache_key = _cachekey.generate_key({}) return self._cache_key # create_element() # # Instantiate and return an element # # Args: # meta (MetaElement): The loaded MetaElement # first_pass (bool): Whether to use first pass configuration (for junctions) # # Returns: # (Element): A newly created Element object of the appropriate kind # def create_element(self, meta, *, first_pass=False): if first_pass: return self.first_pass_config.element_factory.create(self._context, self, meta) else: return self.config.element_factory.create(self._context, self, meta) # create_source() # # Instantiate and return a Source # # Args: # meta (MetaSource): The loaded MetaSource # first_pass (bool): Whether to use first pass configuration (for junctions) # # Returns: # (Source): A newly created Source object of the appropriate kind # def create_source(self, meta, *, first_pass=False): if first_pass: return self.first_pass_config.source_factory.create(self._context, self, meta) else: return self.config.source_factory.create(self._context, self, meta) # get_alias_uri() # # Returns the URI for a given alias, if it exists # # Args: # alias (str): The alias. # first_pass (bool): Whether to use first pass configuration (for junctions) # # Returns: # str: The URI for the given alias; or None: if there is no URI for # that alias. def get_alias_uri(self, alias, *, first_pass=False): if first_pass: config = self.first_pass_config else: config = self.config return config._aliases.get(alias) # get_alias_uris() # # Args: # alias (str): The alias. # first_pass (bool): Whether to use first pass configuration (for junctions) # # Returns a list of every URI to replace an alias with def get_alias_uris(self, alias, *, first_pass=False): if first_pass: config = self.first_pass_config else: config = self.config if not alias or alias not in config._aliases: return [None] mirror_list = [] for key, alias_mapping in config.mirrors.items(): if alias in alias_mapping: if key == config.default_mirror: mirror_list = alias_mapping[alias] + mirror_list else: mirror_list += alias_mapping[alias] mirror_list.append(config._aliases[alias]) return mirror_list # load_elements() # # Loads elements from target names. # # Args: # targets (list): Target names # rewritable (bool): Whether the loaded files should be rewritable # this is a bit more expensive due to deep copies # fetch_subprojects (bool): Whether we should fetch subprojects as a part of the # loading process, if they are not yet locally cached # # Returns: # (list): A list of loaded Element # def load_elements(self, targets, *, rewritable=False, fetch_subprojects=False): with self._context.timed_activity("Loading elements", silent_nested=True): meta_elements = self.loader.load(targets, rewritable=rewritable, ticker=None, fetch_subprojects=fetch_subprojects) with self._context.timed_activity("Resolving elements"): elements = [ Element._new_from_meta(meta) for meta in meta_elements ] # Now warn about any redundant source references which may have # been discovered in the resolve() phase. redundant_refs = Element._get_redundant_source_refs() if redundant_refs: detail = "The following inline specified source references will be ignored:\n\n" lines = [ "{}:{}".format(source._get_provenance(), ref) for source, ref in redundant_refs ] detail += "\n".join(lines) self._context.message( Message(None, MessageType.WARN, "Ignoring redundant source references", detail=detail)) return elements # ensure_fully_loaded() # # Ensure project has finished loading. At first initialization, a # project can only load junction elements. Other elements require # project to be fully loaded. # def ensure_fully_loaded(self): if self._fully_loaded: return assert self._partially_loaded self._fully_loaded = True if self.junction: self.junction._get_project().ensure_fully_loaded() self._load_second_pass() # cleanup() # # Cleans up resources used loading elements # def cleanup(self): self.loader.cleanup() # Reset the element loader state Element._reset_load_state() # _load(): # # Loads the project configuration file in the project # directory process the first pass. # # Raises: LoadError if there was a problem with the project.conf # def _load(self, parent_loader=None, tempdir=None): # Load builtin default projectfile = os.path.join(self.directory, _PROJECT_CONF_FILE) self._default_config_node = _yaml.load(_site.default_project_config) # Load project local config and override the builtin try: self._project_conf = _yaml.load(projectfile) except LoadError as e: # Raise a more specific error here if e.reason == LoadErrorReason.MISSING_FILE: raise LoadError(LoadErrorReason.MISSING_PROJECT_CONF, str(e)) from e raise pre_config_node = _yaml.node_copy(self._default_config_node) _yaml.composite(pre_config_node, self._project_conf) # Assert project's format version early, before validating toplevel keys format_version = _yaml.node_get(pre_config_node, int, 'format-version') if BST_FORMAT_VERSION < format_version: major, minor = utils.get_bst_version() raise LoadError( LoadErrorReason.UNSUPPORTED_PROJECT, "Project requested format version {}, but BuildStream {}.{} only supports up until format version {}" .format(format_version, major, minor, BST_FORMAT_VERSION)) # Since BuildStream 2, project.conf is required to specify min-version. # # Detect this and raise an error, indicating which major version of BuildStream # should be used for this project. # min_version = _yaml.node_get(pre_config_node, str, 'min-version', default_value=None) if min_version: # Handle case of malformed min-version # try: major, minor = utils._parse_version(min_version) except UtilError as e: raise LoadError( LoadErrorReason.UNSUPPORTED_PROJECT, "This is not a BuildStream 1 project: {}".format(e) ) from e # Raise a helpful error indicating what the user should do to # use this project. # raise LoadError( LoadErrorReason.UNSUPPORTED_PROJECT, "Tried to load a BuildStream {} project with BuildStream 1".format(major), # TODO: Include a link to the appropriate documentation for parallel # installing different BuildStream versions. # detail="Please install at least BuildStream {}.{} to use this project".format(major, minor) ) # FIXME: # # Performing this check manually in the absense # of proper support from _yaml.node_get(), this should # be removed in favor of a proper accessor function # from the _yaml module when #591 is fixed. # if self._project_conf.get('name') is None: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: project.conf does not contain expected key '{}'".format(projectfile, 'name')) # The project name, element path and option declarations # are constant and cannot be overridden by option conditional statements self.name = _yaml.node_get(pre_config_node, str, 'name') # Validate that project name is a valid symbol name _yaml.assert_symbol_name(_yaml.node_get_provenance(pre_config_node, 'name'), self.name, "project name") self.element_path = os.path.join( self.directory, _yaml.node_get_project_path(pre_config_node, 'element-path', self.directory, check_is_dir=True) ) self.config.options = OptionPool(self.element_path) self.first_pass_config.options = OptionPool(self.element_path) self.loader = Loader(self._context, self, parent=parent_loader, tempdir=tempdir) self._project_includes = Includes(self.loader, copy_tree=False) project_conf_first_pass = _yaml.node_copy(self._project_conf) self._project_includes.process(project_conf_first_pass, only_local=True, process_project_options=False) config_no_include = _yaml.node_copy(self._default_config_node) _yaml.composite(config_no_include, project_conf_first_pass) self._load_pass(config_no_include, self.first_pass_config, ignore_unknown=True) # Use separate file for storing source references self.ref_storage = _yaml.node_get(pre_config_node, str, 'ref-storage') if self.ref_storage not in [ProjectRefStorage.INLINE, ProjectRefStorage.PROJECT_REFS]: p = _yaml.node_get_provenance(pre_config_node, 'ref-storage') raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Invalid value '{}' specified for ref-storage" .format(p, self.ref_storage)) if self.ref_storage == ProjectRefStorage.PROJECT_REFS: self.junction_refs.load(self.first_pass_config.options) # _load_second_pass() # # Process the second pass of loading the project configuration. # def _load_second_pass(self): project_conf_second_pass = _yaml.node_copy(self._project_conf) self._project_includes.process(project_conf_second_pass, process_project_options=False) config = _yaml.node_copy(self._default_config_node) _yaml.composite(config, project_conf_second_pass) self._load_pass(config, self.config) _yaml.node_validate(config, [ 'format-version', 'element-path', 'variables', 'environment', 'environment-nocache', 'split-rules', 'elements', 'plugins', 'aliases', 'name', 'artifacts', 'options', 'fail-on-overlap', 'shell', 'fatal-warnings', 'ref-storage', 'sandbox', 'mirrors' ]) # # Now all YAML composition is done, from here on we just load # the values from our loaded configuration dictionary. # # Load artifacts pull/push configuration for this project self.artifact_cache_specs = ArtifactCache.specs_from_config_node(config, self.directory) # Load sandbox environment variables self.base_environment = _yaml.node_get(config, Mapping, 'environment') self.base_env_nocache = _yaml.node_get(config, list, 'environment-nocache') # Load sandbox configuration self._sandbox = _yaml.node_get(config, Mapping, 'sandbox') # Load project split rules self._splits = _yaml.node_get(config, Mapping, 'split-rules') # Fatal warnings self._fatal_warnings = _yaml.node_get(config, list, 'fatal-warnings', default_value=[]) # Support backwards compatibility for fail-on-overlap fail_on_overlap = _yaml.node_get(config, bool, 'fail-on-overlap', default_value=None) if (CoreWarnings.OVERLAPS not in self._fatal_warnings) and fail_on_overlap: self._fatal_warnings.append(CoreWarnings.OVERLAPS) # Deprecation check if fail_on_overlap is not None: self._context.message( Message( None, MessageType.WARN, "Use of fail-on-overlap within project.conf " + "is deprecated. Consider using fatal-warnings instead." ) ) # Load project.refs if it exists, this may be ignored. if self.ref_storage == ProjectRefStorage.PROJECT_REFS: self.refs.load(self.options) # Parse shell options shell_options = _yaml.node_get(config, Mapping, 'shell') _yaml.node_validate(shell_options, ['command', 'environment', 'host-files']) self._shell_command = _yaml.node_get(shell_options, list, 'command') # Perform environment expansion right away shell_environment = _yaml.node_get(shell_options, Mapping, 'environment', default_value={}) for key, _ in _yaml.node_items(shell_environment): value = _yaml.node_get(shell_environment, str, key) self._shell_environment[key] = os.path.expandvars(value) # Host files is parsed as a list for convenience host_files = _yaml.node_get(shell_options, list, 'host-files', default_value=[]) for host_file in host_files: if isinstance(host_file, str): mount = HostMount(host_file) else: # Some validation index = host_files.index(host_file) host_file_desc = _yaml.node_get(shell_options, Mapping, 'host-files', indices=[index]) _yaml.node_validate(host_file_desc, ['path', 'host_path', 'optional']) # Parse the host mount path = _yaml.node_get(host_file_desc, str, 'path') host_path = _yaml.node_get(host_file_desc, str, 'host_path', default_value=None) optional = _yaml.node_get(host_file_desc, bool, 'optional', default_value=False) mount = HostMount(path, host_path, optional) self._shell_host_files.append(mount) # _load_pass(): # # Loads parts of the project configuration that are different # for first and second pass configurations. # # Args: # config (dict) - YaML node of the configuration file. # output (ProjectConfig) - ProjectConfig to load configuration onto. # ignore_unknown (bool) - Whether option loader shoud ignore unknown options. # def _load_pass(self, config, output, *, ignore_unknown=False): self._load_plugin_factories(config, output) # Load project options options_node = _yaml.node_get(config, Mapping, 'options', default_value={}) output.options.load(options_node) if self.junction: # load before user configuration output.options.load_yaml_values(self.junction.options, transform=self.junction._subst_string) # Collect option values specified in the user configuration overrides = self._context.get_overrides(self.name) override_options = _yaml.node_get(overrides, Mapping, 'options', default_value={}) output.options.load_yaml_values(override_options) if self._cli_options: output.options.load_cli_values(self._cli_options, ignore_unknown=ignore_unknown) # We're done modifying options, now we can use them for substitutions output.options.resolve() # # Now resolve any conditionals in the remaining configuration, # any conditionals specified for project option declarations, # or conditionally specifying the project name; will be ignored. output.options.process_node(config) # Element and Source type configurations will be composited later onto # element/source types, so we delete it from here and run our final # assertion after. output.element_overrides = _yaml.node_get(config, Mapping, 'elements', default_value={}) output.source_overrides = _yaml.node_get(config, Mapping, 'sources', default_value={}) config.pop('elements', None) config.pop('sources', None) _yaml.node_final_assertions(config) # Load base variables output.base_variables = _yaml.node_get(config, Mapping, 'variables') # Add the project name as a default variable output.base_variables['project-name'] = self.name # Extend variables with automatic variables and option exports # Initialize it as a string as all variables are processed as strings. # Based on some testing (mainly on AWS), maximum effective # max-jobs value seems to be around 8-10 if we have enough cores # users should set values based on workload and build infrastructure if self._context.build_max_jobs == 0: # User requested automatic max-jobs output.base_variables['max-jobs'] = str(min(len(os.sched_getaffinity(0)), 8)) else: # User requested explicit max-jobs setting output.base_variables['max-jobs'] = str(self._context.build_max_jobs) # Export options into variables, if that was requested output.options.export_variables(output.base_variables) # Override default_mirror if not set by command-line output.default_mirror = self._default_mirror or _yaml.node_get(overrides, str, 'default-mirror', default_value=None) mirrors = _yaml.node_get(config, list, 'mirrors', default_value=[]) for mirror in mirrors: allowed_mirror_fields = [ 'name', 'aliases' ] _yaml.node_validate(mirror, allowed_mirror_fields) mirror_name = _yaml.node_get(mirror, str, 'name') alias_mappings = {} for alias_mapping, uris in _yaml.node_items(mirror['aliases']): assert isinstance(uris, list) alias_mappings[alias_mapping] = list(uris) output.mirrors[mirror_name] = alias_mappings if not output.default_mirror: output.default_mirror = mirror_name # Source url aliases output._aliases = _yaml.node_get(config, Mapping, 'aliases', default_value={}) # _ensure_project_dir() # # Returns path of the project directory, if a configuration file is found # in given directory or any of its parent directories. # # Args: # directory (str) - directory from where the command was invoked # # Raises: # LoadError if project.conf is not found # def _ensure_project_dir(self, directory): directory = os.path.abspath(directory) while not os.path.isfile(os.path.join(directory, _PROJECT_CONF_FILE)): parent_dir = os.path.dirname(directory) if directory == parent_dir: raise LoadError( LoadErrorReason.MISSING_PROJECT_CONF, '{} not found in current directory or any of its parent directories' .format(_PROJECT_CONF_FILE)) directory = parent_dir return directory def _load_plugin_factories(self, config, output): plugin_source_origins = [] # Origins of custom sources plugin_element_origins = [] # Origins of custom elements # Plugin origins and versions origins = _yaml.node_get(config, list, 'plugins', default_value=[]) source_format_versions = {} element_format_versions = {} for origin in origins: allowed_origin_fields = [ 'origin', 'sources', 'elements', 'package-name', 'path', ] allowed_origins = ['core', 'local', 'pip'] _yaml.node_validate(origin, allowed_origin_fields) if origin['origin'] not in allowed_origins: raise LoadError( LoadErrorReason.INVALID_YAML, "Origin '{}' is not one of the allowed types" .format(origin['origin'])) # Store source versions for checking later source_versions = _yaml.node_get(origin, Mapping, 'sources', default_value={}) for key, _ in _yaml.node_items(source_versions): if key in source_format_versions: raise LoadError( LoadErrorReason.INVALID_YAML, "Duplicate listing of source '{}'".format(key)) source_format_versions[key] = _yaml.node_get(source_versions, int, key) # Store element versions for checking later element_versions = _yaml.node_get(origin, Mapping, 'elements', default_value={}) for key, _ in _yaml.node_items(element_versions): if key in element_format_versions: raise LoadError( LoadErrorReason.INVALID_YAML, "Duplicate listing of element '{}'".format(key)) element_format_versions[key] = _yaml.node_get(element_versions, int, key) # Store the origins if they're not 'core'. # core elements are loaded by default, so storing is unnecessary. if _yaml.node_get(origin, str, 'origin') != 'core': self._store_origin(origin, 'sources', plugin_source_origins) self._store_origin(origin, 'elements', plugin_element_origins) pluginbase = PluginBase(package='buildstream.plugins') output.element_factory = ElementFactory(pluginbase, plugin_origins=plugin_element_origins, format_versions=element_format_versions) output.source_factory = SourceFactory(pluginbase, plugin_origins=plugin_source_origins, format_versions=source_format_versions) # _store_origin() # # Helper function to store plugin origins # # Args: # origin (dict) - a dictionary indicating the origin of a group of # plugins. # plugin_group (str) - The name of the type of plugin that is being # loaded # destination (list) - A list of dicts to store the origins in # # Raises: # LoadError if 'origin' is an unexpected value def _store_origin(self, origin, plugin_group, destination): expected_groups = ['sources', 'elements'] if plugin_group not in expected_groups: raise LoadError(LoadErrorReason.INVALID_DATA, "Unexpected plugin group: {}, expecting {}" .format(plugin_group, expected_groups)) if plugin_group in origin: origin_dict = _yaml.node_copy(origin) plugins = _yaml.node_get(origin, Mapping, plugin_group, default_value={}) origin_dict['plugins'] = [k for k, _ in _yaml.node_items(plugins)] for group in expected_groups: if group in origin_dict: del origin_dict[group] if origin_dict['origin'] == 'local': path = _yaml.node_get_project_path(origin, 'path', self.directory, check_is_dir=True) # paths are passed in relative to the project, but must be absolute origin_dict['path'] = os.path.join(self.directory, path) destination.append(origin_dict) # _warning_is_fatal(): # # Returns true if the warning in question should be considered fatal based on # the project configuration. # # Args: # warning_str (str): The warning configuration string to check against # # Returns: # (bool): True if the warning should be considered fatal and cause an error. # def _warning_is_fatal(self, warning_str): return warning_str in self._fatal_warnings buildstream-1.6.9/buildstream/_projectrefs.py000066400000000000000000000121521437515270000214210ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os from . import _yaml from ._exceptions import LoadError, LoadErrorReason # ProjectRefStorage() # # Indicates the type of ref storage class ProjectRefStorage(): # Source references are stored inline # INLINE = 'inline' # Source references are stored in a central project.refs file # PROJECT_REFS = 'project.refs' # ProjectRefs() # # The project.refs file management # # Args: # directory (str): The project directory # base_name (str): The project.refs basename # class ProjectRefs(): def __init__(self, directory, base_name): directory = os.path.abspath(directory) self._fullpath = os.path.join(directory, base_name) self._base_name = base_name self._toplevel_node = None self._toplevel_save = None # load() # # Load the project.refs file # # Args: # options (OptionPool): To resolve conditional statements # def load(self, options): try: self._toplevel_node = _yaml.load(self._fullpath, shortname=self._base_name, copy_tree=True) provenance = _yaml.node_get_provenance(self._toplevel_node) self._toplevel_save = provenance.toplevel # Process any project options immediately options.process_node(self._toplevel_node) # Run any final assertions on the project.refs, just incase there # are list composition directives or anything left unprocessed. _yaml.node_final_assertions(self._toplevel_node) except LoadError as e: if e.reason != LoadErrorReason.MISSING_FILE: raise # Ignore failure if the file doesnt exist, it'll be created and # for now just assumed to be empty self._toplevel_node = {} self._toplevel_save = self._toplevel_node _yaml.node_validate(self._toplevel_node, ['projects']) # Ensure we create our toplevel entry point on the fly here for node in [self._toplevel_node, self._toplevel_save]: if 'projects' not in node: node['projects'] = {} # save() # # Save the project.refs file with any local changes # def save(self): _yaml.dump(self._toplevel_save, self._fullpath) # lookup_ref() # # Fetch the ref node for a given Source. If the ref node does not # exist and `write` is specified, it will be automatically created. # # Args: # project (str): The project to lookup # element (str): The element name to lookup # source_index (int): The index of the Source in the specified element # write (bool): Whether we want to read the node or write to it # # Returns: # (node): The YAML dictionary where the ref is stored # def lookup_ref(self, project, element, source_index, *, write=False): node = self._lookup(self._toplevel_node, project, element, source_index) if write: if node is not None: provenance = _yaml.node_get_provenance(node) if provenance: node = provenance.node # If we couldnt find the orignal, create a new one. # if node is None: node = self._lookup(self._toplevel_save, project, element, source_index, ensure=True) return node # _lookup() # # Looks up a ref node in the project.refs file, creates one if ensure is True. # def _lookup(self, toplevel, project, element, source_index, *, ensure=False): # Fetch the project try: project_node = toplevel['projects'][project] except KeyError: if not ensure: return None project_node = toplevel['projects'][project] = {} # Fetch the element try: element_list = project_node[element] except KeyError: if not ensure: return None element_list = project_node[element] = [] # Fetch the source index try: node = element_list[source_index] except IndexError: if not ensure: return None # Pad the list with empty newly created dictionaries element_list.extend({} for _ in range(len(element_list), source_index + 1)) node = element_list[source_index] return node buildstream-1.6.9/buildstream/_protos/000077500000000000000000000000001437515270000200465ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/__init__.py000066400000000000000000000000001437515270000221450ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/000077500000000000000000000000001437515270000211455ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/__init__.py000066400000000000000000000000001437515270000232440ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/000077500000000000000000000000001437515270000222425ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/__init__.py000066400000000000000000000000001437515270000243410ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/000077500000000000000000000000001437515270000235355ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/__init__.py000066400000000000000000000000001437515270000256340ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/000077500000000000000000000000001437515270000246545ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/__init__.py000066400000000000000000000000001437515270000267530ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/000077500000000000000000000000001437515270000252025ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/__init__.py000066400000000000000000000000001437515270000273010ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto000066400000000000000000000521061437515270000307650ustar00rootroot00000000000000// Copyright 2020 The Bazel Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package build.bazel.remote.asset.v1; import "build/bazel/remote/execution/v2/remote_execution.proto"; import "google/api/annotations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; option csharp_namespace = "Build.Bazel.Remote.Asset.v1"; option go_package = "remoteasset"; option java_multiple_files = true; option java_outer_classname = "RemoteAssetProto"; option java_package = "build.bazel.remote.asset.v1"; option objc_class_prefix = "RA"; // The Remote Asset API provides a mapping from a URI and Qualifiers to // Digests. // // Multiple URIs may be used to refer to the same content. For example, the // same tarball may exist at multiple mirrors and thus be retrievable from // multiple URLs. When URLs are used, these should refer to actual content as // Fetch service implementations may choose to fetch the content directly // from the origin. For example, the HEAD of a git repository's active branch // can be referred to as: // // uri: https://github.com/bazelbuild/remote-apis.git // // URNs may be used to strongly identify content, for instance by using the // uuid namespace identifier: urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6. // This is most applicable to named content that is Push'd, where the URN // serves as an agreed-upon key, but carries no other inherent meaning. // // Service implementations may choose to support only URLs, only URNs for // Push'd content, only other URIs for which the server and client agree upon // semantics of, or any mixture of the above. // Qualifiers are used to disambiguate or sub-select content that shares a URI. // This may include specifying a particular commit or branch, in the case of // URIs referencing a repository; they could also be used to specify a // particular subdirectory of a repository or tarball. Qualifiers may also be // used to ensure content matches what the client expects, even when there is // no ambiguity to be had - for example, a qualifier specifying a checksum // value. // // In cases where the semantics of the request are not immediately clear from // the URL and/or qualifiers - e.g. dictated by URL scheme - it is recommended // to use an additional qualifier to remove the ambiguity. The `resource_type` // qualifier is recommended for this purpose. // // Qualifiers may be supplied in any order. message Qualifier { // The "name" of the qualifier, for example "resource_type". // No separation is made between 'standard' and 'nonstandard' // qualifiers, in accordance with https://tools.ietf.org/html/rfc6648, // however implementers *SHOULD* take care to avoid ambiguity. string name = 1; // The "value" of the qualifier. Semantics will be dictated by the name. string value = 2; } // The Fetch service resolves or fetches assets referenced by URI and // Qualifiers, returning a Digest for the content in // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. // // As with other services in the Remote Execution API, any call may return an // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing // information about when the client should retry the request; clients SHOULD // respect the information provided. service Fetch { // Resolve or fetch referenced assets, making them available to the caller and // other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. // // Servers *MAY* fetch content that they do not already have cached, for any // URLs they support. // // Servers *SHOULD* ensure that referenced files are present in the CAS at the // time of the response, and (if supported) that they will remain available // for a reasonable period of time. The TTLs of the referenced blobs *SHOULD* // be increased if necessary and applicable. // In the event that a client receives a reference to content that is no // longer present, it *MAY* re-issue the request with // `oldest_content_accepted` set to a more recent timestamp than the original // attempt, to induce a re-fetch from origin. // // Servers *MAY* cache fetched content and reuse it for subsequent requests, // subject to `oldest_content_accepted`. // // Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push] // API and allow content to be directly inserted for use in future fetch // responses. // // Servers *MUST* ensure Fetch'd content matches all the specified // qualifiers except in the case of previously Push'd resources, for which // the server *MAY* trust the pushing client to have set the qualifiers // correctly, without validation. // // Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push] // API *MUST* reject requests containing qualifiers it does not support. // // Servers *MAY* transform assets as part of the fetch. For example a // tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory] // might be unpacked, or a Git repository // fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob] // might be passed through `git-archive`. // // Errors handling the requested assets will be returned as gRPC Status errors // here; errors outside the server's control will be returned inline in the // `status` field of the response (see comment there for details). // The possible RPC errors include: // * `INVALID_ARGUMENT`: One or more arguments were invalid, such as a // qualifier that is not supported by the server. // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to // perform the requested operation. The client may retry after a delay. // * `UNAVAILABLE`: Due to a transient condition the operation could not be // completed. The client should retry. // * `INTERNAL`: An internal error occurred while performing the operation. // The client should retry. // * `DEADLINE_EXCEEDED`: The fetch could not be completed within the given // RPC deadline. The client should retry for at least as long as the value // provided in `timeout` field of the request. // // In the case of unsupported qualifiers, the server *SHOULD* additionally // send a [BadRequest][google.rpc.BadRequest] error detail where, for each // unsupported qualifier, there is a `FieldViolation` with a `field` of // `qualifiers.name` and a `description` of `"{qualifier}" not supported` // indicating the name of the unsupported qualifier. rpc FetchBlob(FetchBlobRequest) returns (FetchBlobResponse) { option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchBlob" body: "*" }; } rpc FetchDirectory(FetchDirectoryRequest) returns (FetchDirectoryResponse) { option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchDirectory" body: "*" }; } } // A request message for // [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]. message FetchBlobRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The timeout for the underlying fetch, if content needs to be retrieved from // origin. // // If unset, the server *MAY* apply an implementation-defined timeout. // // If set, and the user-provided timeout exceeds the RPC deadline, the server // *SHOULD* keep the fetch going after the RPC completes, to be made // available for future Fetch calls. The server may also enforce (via clamping // and/or an INVALID_ARGUMENT error) implementation-defined minimum and // maximum timeout values. // // If this timeout is exceeded on an attempt to retrieve content from origin // the client will receive DEADLINE_EXCEEDED in [FetchBlobResponse.status]. google.protobuf.Duration timeout = 2; // The oldest content the client is willing to accept, as measured from the // time it was Push'd or when the underlying retrieval from origin was // started. // Upon retries of Fetch requests that cannot be completed within a single // RPC, clients *SHOULD* provide the same value for subsequent requests as the // original, to simplify combining the request with the previous attempt. // // If unset, the client *SHOULD* accept content of any age. google.protobuf.Timestamp oldest_content_accepted = 3; // The URI(s) of the content to fetch. These may be resources that the server // can directly fetch from origin, in which case multiple URIs *SHOULD* // represent the same content available at different locations (such as an // origin and secondary mirrors). These may also be URIs for content known to // the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push] // service. // // Clients *MUST* supply at least one URI. Servers *MAY* match any one of the // supplied URIs. repeated string uris = 4; // Qualifiers sub-specifying the content to fetch - see comments on // [Qualifier][build.bazel.remote.asset.v1.Qualifier]. // The same qualifiers apply to all URIs. // // Specified qualifier names *MUST* be unique. repeated Qualifier qualifiers = 5; } // A response message for // [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]. message FetchBlobResponse { // If the status has a code other than `OK`, it indicates that the operation // was unable to be completed for reasons outside the servers' control. // The possible fetch errors include: // * `DEADLINE_EXCEEDED`: The operation could not be completed within the // specified timeout. // * `NOT_FOUND`: The requested asset was not found at the specified location. // * `PERMISSION_DENIED`: The request was rejected by a remote server, or // requested an asset from a disallowed origin. // * `ABORTED`: The operation could not be completed, typically due to a // failed consistency check. google.rpc.Status status = 1; // The uri from the request that resulted in a successful retrieval, or from // which the error indicated in `status` was obtained. string uri = 2; // Any qualifiers known to the server and of interest to clients. repeated Qualifier qualifiers = 3; // A minimum timestamp the content is expected to be available through. // Servers *MAY* omit this field, if not known with confidence. google.protobuf.Timestamp expires_at = 4; // The result of the fetch, if the status had code `OK`. // The digest of the file's contents, available for download through the CAS. build.bazel.remote.execution.v2.Digest blob_digest = 5; } // A request message for // [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]. message FetchDirectoryRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The timeout for the underlying fetch, if content needs to be retrieved from // origin. This value is allowed to exceed the RPC deadline, in which case the // server *SHOULD* keep the fetch going after the RPC completes, to be made // available for future Fetch calls. // // If this timeout is exceeded on an attempt to retrieve content from origin // the client will receive DEADLINE_EXCEEDED in [FetchDirectoryResponse.status]. google.protobuf.Duration timeout = 2; // The oldest content the client is willing to accept, as measured from the // time it was Push'd or when the underlying retrieval from origin was // started. // Upon retries of Fetch requests that cannot be completed within a single // RPC, clients *SHOULD* provide the same value for subsequent requests as the // original, to simplify combining the request with the previous attempt. // // If unset, the client *SHOULD* accept content of any age. google.protobuf.Timestamp oldest_content_accepted = 3; // The URI(s) of the content to fetch. These may be resources that the server // can directly fetch from origin, in which case multiple URIs *SHOULD* // represent the same content available at different locations (such as an // origin and secondary mirrors). These may also be URIs for content known to // the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push] // service. // // Clients *MUST* supply at least one URI. Servers *MAY* match any one of the // supplied URIs. repeated string uris = 4; // Qualifiers sub-specifying the content to fetch - see comments on // [Qualifier][build.bazel.remote.asset.v1.Qualifier]. // The same qualifiers apply to all URIs. // // Specified qualifier names *MUST* be unique. repeated Qualifier qualifiers = 5; } // A response message for // [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]. message FetchDirectoryResponse { // If the status has a code other than `OK`, it indicates that the operation // was unable to be completed for reasons outside the servers' control. // The possible fetch errors include: // * `DEADLINE_EXCEEDED`: The operation could not be completed within the // specified timeout. // * `NOT_FOUND`: The requested asset was not found at the specified location. // * `PERMISSION_DENIED`: The request was rejected by a remote server, or // requested an asset from a disallowed origin. // * `ABORTED`: The operation could not be completed, typically due to a // failed consistency check. google.rpc.Status status = 1; // The uri from the request that resulted in a successful retrieval, or from // which the error indicated in `status` was obtained. string uri = 2; // Any qualifiers known to the server and of interest to clients. repeated Qualifier qualifiers = 3; // A minimum timestamp the content is expected to be available through. // Servers *MAY* omit this field, if not known with confidence. google.protobuf.Timestamp expires_at = 4; // The result of the fetch, if the status had code `OK`. // the root digest of a directory tree, suitable for fetching via // [ContentAddressableStorage.GetTree]. build.bazel.remote.execution.v2.Digest root_directory_digest = 5; } // The Push service is complementary to the Fetch, and allows for // associating contents of URLs to be returned in future Fetch API calls. // // As with other services in the Remote Execution API, any call may return an // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing // information about when the client should retry the request; clients SHOULD // respect the information provided. service Push { // These APIs associate the identifying information of a resource, as // indicated by URI and optionally Qualifiers, with content available in the // CAS. For example, associating a repository url and a commit id with a // Directory Digest. // // Servers *SHOULD* only allow trusted clients to associate content, and *MAY* // only allow certain URIs to be pushed. // // Clients *MUST* ensure associated content is available in CAS prior to // pushing. // // Clients *MUST* ensure the Qualifiers listed correctly match the contents, // and Servers *MAY* trust these values without validation. // Fetch servers *MAY* require exact match of all qualifiers when returning // content previously pushed, or allow fetching content with only a subset of // the qualifiers specified on Push. // // Clients can specify expiration information that the server *SHOULD* // respect. Subsequent requests can be used to alter the expiration time. // // A minimal compliant Fetch implementation may support only Push'd content // and return `NOT_FOUND` for any resource that was not pushed first. // Alternatively, a compliant implementation may choose to not support Push // and only return resources that can be Fetch'd from origin. // // Errors will be returned as gRPC Status errors. // The possible RPC errors include: // * `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid. // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to // perform the requested operation. The client may retry after a delay. // * `UNAVAILABLE`: Due to a transient condition the operation could not be // completed. The client should retry. // * `INTERNAL`: An internal error occurred while performing the operation. // The client should retry. rpc PushBlob(PushBlobRequest) returns (PushBlobResponse) { option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushBlob" body: "*" }; } rpc PushDirectory(PushDirectoryRequest) returns (PushDirectoryResponse) { option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushDirectory" body: "*" }; } } // A request message for // [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob]. message PushBlobRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The URI(s) of the content to associate. If multiple URIs are specified, the // pushed content will be available to fetch by specifying any of them. repeated string uris = 2; // Qualifiers sub-specifying the content that is being pushed - see comments // on [Qualifier][build.bazel.remote.asset.v1.Qualifier]. // The same qualifiers apply to all URIs. repeated Qualifier qualifiers = 3; // A time after which this content should stop being returned via [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]. // Servers *MAY* expire content early, e.g. due to storage pressure. google.protobuf.Timestamp expire_at = 4; // The blob to associate. build.bazel.remote.execution.v2.Digest blob_digest = 5; // Referenced blobs or directories that need to not expire before expiration // of this association, in addition to `blob_digest` itself. // These fields are hints - clients *MAY* omit them, and servers *SHOULD* // respect them, at the risk of increased incidents of Fetch responses // indirectly referencing unavailable blobs. repeated build.bazel.remote.execution.v2.Digest references_blobs = 6; repeated build.bazel.remote.execution.v2.Digest references_directories = 7; } // A response message for // [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob]. message PushBlobResponse { /* empty */ } // A request message for // [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory]. message PushDirectoryRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The URI(s) of the content to associate. If multiple URIs are specified, the // pushed content will be available to fetch by specifying any of them. repeated string uris = 2; // Qualifiers sub-specifying the content that is being pushed - see comments // on [Qualifier][build.bazel.remote.asset.v1.Qualifier]. // The same qualifiers apply to all URIs. repeated Qualifier qualifiers = 3; // A time after which this content should stop being returned via // [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]. // Servers *MAY* expire content early, e.g. due to storage pressure. google.protobuf.Timestamp expire_at = 4; // Directory to associate build.bazel.remote.execution.v2.Digest root_directory_digest = 5; // Referenced blobs or directories that need to not expire before expiration // of this association, in addition to `root_directory_digest` itself. // These fields are hints - clients *MAY* omit them, and servers *SHOULD* // respect them, at the risk of increased incidents of Fetch responses // indirectly referencing unavailable blobs. repeated build.bazel.remote.execution.v2.Digest references_blobs = 6; repeated build.bazel.remote.execution.v2.Digest references_directories = 7; } // A response message for // [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory]. message PushDirectoryResponse { /* empty */ } buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py000066400000000000000000000253371437515270000310230ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: build/bazel/remote/asset/v1/remote_asset.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2 from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.build/bazel/remote/asset/v1/remote_asset.proto\x12\x1b\x62uild.bazel.remote.asset.v1\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"(\n\tQualifier\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xdc\x01\n\x10\x46\x65tchBlobRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12*\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x17oldest_content_accepted\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04uris\x18\x04 \x03(\t\x12:\n\nqualifiers\x18\x05 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\"\xee\x01\n\x11\x46\x65tchBlobResponse\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12.\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12<\n\x0b\x62lob_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe1\x01\n\x15\x46\x65tchDirectoryRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12*\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x17oldest_content_accepted\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04uris\x18\x04 \x03(\t\x12:\n\nqualifiers\x18\x05 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\"\xfd\x01\n\x16\x46\x65tchDirectoryResponse\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12.\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x15root_directory_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xeb\x02\n\x0fPushBlobRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04uris\x18\x02 \x03(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12-\n\texpire_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12<\n\x0b\x62lob_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x41\n\x10references_blobs\x18\x06 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12G\n\x16references_directories\x18\x07 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x12\n\x10PushBlobResponse\"\xfa\x02\n\x14PushDirectoryRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04uris\x18\x02 \x03(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12-\n\texpire_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x15root_directory_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x41\n\x10references_blobs\x18\x06 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12G\n\x16references_directories\x18\x07 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x17\n\x15PushDirectoryResponse2\xdd\x02\n\x05\x46\x65tch\x12\x9e\x01\n\tFetchBlob\x12-.build.bazel.remote.asset.v1.FetchBlobRequest\x1a..build.bazel.remote.asset.v1.FetchBlobResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1/{instance_name=**}/assets:fetchBlob:\x01*\x12\xb2\x01\n\x0e\x46\x65tchDirectory\x12\x32.build.bazel.remote.asset.v1.FetchDirectoryRequest\x1a\x33.build.bazel.remote.asset.v1.FetchDirectoryResponse\"7\x82\xd3\xe4\x93\x02\x31\",/v1/{instance_name=**}/assets:fetchDirectory:\x01*2\xd4\x02\n\x04Push\x12\x9a\x01\n\x08PushBlob\x12,.build.bazel.remote.asset.v1.PushBlobRequest\x1a-.build.bazel.remote.asset.v1.PushBlobResponse\"1\x82\xd3\xe4\x93\x02+\"&/v1/{instance_name=**}/assets:pushBlob:\x01*\x12\xae\x01\n\rPushDirectory\x12\x31.build.bazel.remote.asset.v1.PushDirectoryRequest\x1a\x32.build.bazel.remote.asset.v1.PushDirectoryResponse\"6\x82\xd3\xe4\x93\x02\x30\"+/v1/{instance_name=**}/assets:pushDirectory:\x01*Ba\n\x1b\x62uild.bazel.remote.asset.v1B\x10RemoteAssetProtoP\x01Z\x0bremoteasset\xa2\x02\x02RA\xaa\x02\x1b\x42uild.Bazel.Remote.Asset.v1b\x06proto3') _QUALIFIER = DESCRIPTOR.message_types_by_name['Qualifier'] _FETCHBLOBREQUEST = DESCRIPTOR.message_types_by_name['FetchBlobRequest'] _FETCHBLOBRESPONSE = DESCRIPTOR.message_types_by_name['FetchBlobResponse'] _FETCHDIRECTORYREQUEST = DESCRIPTOR.message_types_by_name['FetchDirectoryRequest'] _FETCHDIRECTORYRESPONSE = DESCRIPTOR.message_types_by_name['FetchDirectoryResponse'] _PUSHBLOBREQUEST = DESCRIPTOR.message_types_by_name['PushBlobRequest'] _PUSHBLOBRESPONSE = DESCRIPTOR.message_types_by_name['PushBlobResponse'] _PUSHDIRECTORYREQUEST = DESCRIPTOR.message_types_by_name['PushDirectoryRequest'] _PUSHDIRECTORYRESPONSE = DESCRIPTOR.message_types_by_name['PushDirectoryResponse'] Qualifier = _reflection.GeneratedProtocolMessageType('Qualifier', (_message.Message,), { 'DESCRIPTOR' : _QUALIFIER, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.Qualifier) }) _sym_db.RegisterMessage(Qualifier) FetchBlobRequest = _reflection.GeneratedProtocolMessageType('FetchBlobRequest', (_message.Message,), { 'DESCRIPTOR' : _FETCHBLOBREQUEST, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchBlobRequest) }) _sym_db.RegisterMessage(FetchBlobRequest) FetchBlobResponse = _reflection.GeneratedProtocolMessageType('FetchBlobResponse', (_message.Message,), { 'DESCRIPTOR' : _FETCHBLOBRESPONSE, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchBlobResponse) }) _sym_db.RegisterMessage(FetchBlobResponse) FetchDirectoryRequest = _reflection.GeneratedProtocolMessageType('FetchDirectoryRequest', (_message.Message,), { 'DESCRIPTOR' : _FETCHDIRECTORYREQUEST, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchDirectoryRequest) }) _sym_db.RegisterMessage(FetchDirectoryRequest) FetchDirectoryResponse = _reflection.GeneratedProtocolMessageType('FetchDirectoryResponse', (_message.Message,), { 'DESCRIPTOR' : _FETCHDIRECTORYRESPONSE, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchDirectoryResponse) }) _sym_db.RegisterMessage(FetchDirectoryResponse) PushBlobRequest = _reflection.GeneratedProtocolMessageType('PushBlobRequest', (_message.Message,), { 'DESCRIPTOR' : _PUSHBLOBREQUEST, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushBlobRequest) }) _sym_db.RegisterMessage(PushBlobRequest) PushBlobResponse = _reflection.GeneratedProtocolMessageType('PushBlobResponse', (_message.Message,), { 'DESCRIPTOR' : _PUSHBLOBRESPONSE, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushBlobResponse) }) _sym_db.RegisterMessage(PushBlobResponse) PushDirectoryRequest = _reflection.GeneratedProtocolMessageType('PushDirectoryRequest', (_message.Message,), { 'DESCRIPTOR' : _PUSHDIRECTORYREQUEST, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushDirectoryRequest) }) _sym_db.RegisterMessage(PushDirectoryRequest) PushDirectoryResponse = _reflection.GeneratedProtocolMessageType('PushDirectoryResponse', (_message.Message,), { 'DESCRIPTOR' : _PUSHDIRECTORYRESPONSE, '__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushDirectoryResponse) }) _sym_db.RegisterMessage(PushDirectoryResponse) _FETCH = DESCRIPTOR.services_by_name['Fetch'] _PUSH = DESCRIPTOR.services_by_name['Push'] if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\033build.bazel.remote.asset.v1B\020RemoteAssetProtoP\001Z\013remoteasset\242\002\002RA\252\002\033Build.Bazel.Remote.Asset.v1' _FETCH.methods_by_name['FetchBlob']._options = None _FETCH.methods_by_name['FetchBlob']._serialized_options = b'\202\323\344\223\002,\"\'/v1/{instance_name=**}/assets:fetchBlob:\001*' _FETCH.methods_by_name['FetchDirectory']._options = None _FETCH.methods_by_name['FetchDirectory']._serialized_options = b'\202\323\344\223\0021\",/v1/{instance_name=**}/assets:fetchDirectory:\001*' _PUSH.methods_by_name['PushBlob']._options = None _PUSH.methods_by_name['PushBlob']._serialized_options = b'\202\323\344\223\002+\"&/v1/{instance_name=**}/assets:pushBlob:\001*' _PUSH.methods_by_name['PushDirectory']._options = None _PUSH.methods_by_name['PushDirectory']._serialized_options = b'\202\323\344\223\0020\"+/v1/{instance_name=**}/assets:pushDirectory:\001*' _QUALIFIER._serialized_start=255 _QUALIFIER._serialized_end=295 _FETCHBLOBREQUEST._serialized_start=298 _FETCHBLOBREQUEST._serialized_end=518 _FETCHBLOBRESPONSE._serialized_start=521 _FETCHBLOBRESPONSE._serialized_end=759 _FETCHDIRECTORYREQUEST._serialized_start=762 _FETCHDIRECTORYREQUEST._serialized_end=987 _FETCHDIRECTORYRESPONSE._serialized_start=990 _FETCHDIRECTORYRESPONSE._serialized_end=1243 _PUSHBLOBREQUEST._serialized_start=1246 _PUSHBLOBREQUEST._serialized_end=1609 _PUSHBLOBRESPONSE._serialized_start=1611 _PUSHBLOBRESPONSE._serialized_end=1629 _PUSHDIRECTORYREQUEST._serialized_start=1632 _PUSHDIRECTORYREQUEST._serialized_end=2010 _PUSHDIRECTORYRESPONSE._serialized_start=2012 _PUSHDIRECTORYRESPONSE._serialized_end=2035 _FETCH._serialized_start=2038 _FETCH._serialized_end=2387 _PUSH._serialized_start=2390 _PUSH._serialized_end=2730 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py000066400000000000000000000406171437515270000320340ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from buildstream._protos.build.bazel.remote.asset.v1 import remote_asset_pb2 as build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2 class FetchStub(object): """The Fetch service resolves or fetches assets referenced by URI and Qualifiers, returning a Digest for the content in [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.FetchBlob = channel.unary_unary( '/build.bazel.remote.asset.v1.Fetch/FetchBlob', request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString, ) self.FetchDirectory = channel.unary_unary( '/build.bazel.remote.asset.v1.Fetch/FetchDirectory', request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString, ) class FetchServicer(object): """The Fetch service resolves or fetches assets referenced by URI and Qualifiers, returning a Digest for the content in [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def FetchBlob(self, request, context): """Resolve or fetch referenced assets, making them available to the caller and other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. Servers *MAY* fetch content that they do not already have cached, for any URLs they support. Servers *SHOULD* ensure that referenced files are present in the CAS at the time of the response, and (if supported) that they will remain available for a reasonable period of time. The TTLs of the referenced blobs *SHOULD* be increased if necessary and applicable. In the event that a client receives a reference to content that is no longer present, it *MAY* re-issue the request with `oldest_content_accepted` set to a more recent timestamp than the original attempt, to induce a re-fetch from origin. Servers *MAY* cache fetched content and reuse it for subsequent requests, subject to `oldest_content_accepted`. Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push] API and allow content to be directly inserted for use in future fetch responses. Servers *MUST* ensure Fetch'd content matches all the specified qualifiers except in the case of previously Push'd resources, for which the server *MAY* trust the pushing client to have set the qualifiers correctly, without validation. Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push] API *MUST* reject requests containing qualifiers it does not support. Servers *MAY* transform assets as part of the fetch. For example a tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory] might be unpacked, or a Git repository fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob] might be passed through `git-archive`. Errors handling the requested assets will be returned as gRPC Status errors here; errors outside the server's control will be returned inline in the `status` field of the response (see comment there for details). The possible RPC errors include: * `INVALID_ARGUMENT`: One or more arguments were invalid, such as a qualifier that is not supported by the server. * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to perform the requested operation. The client may retry after a delay. * `UNAVAILABLE`: Due to a transient condition the operation could not be completed. The client should retry. * `INTERNAL`: An internal error occurred while performing the operation. The client should retry. * `DEADLINE_EXCEEDED`: The fetch could not be completed within the given RPC deadline. The client should retry for at least as long as the value provided in `timeout` field of the request. In the case of unsupported qualifiers, the server *SHOULD* additionally send a [BadRequest][google.rpc.BadRequest] error detail where, for each unsupported qualifier, there is a `FieldViolation` with a `field` of `qualifiers.name` and a `description` of `"{qualifier}" not supported` indicating the name of the unsupported qualifier. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def FetchDirectory(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_FetchServicer_to_server(servicer, server): rpc_method_handlers = { 'FetchBlob': grpc.unary_unary_rpc_method_handler( servicer.FetchBlob, request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.SerializeToString, ), 'FetchDirectory': grpc.unary_unary_rpc_method_handler( servicer.FetchDirectory, request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'build.bazel.remote.asset.v1.Fetch', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Fetch(object): """The Fetch service resolves or fetches assets referenced by URI and Qualifiers, returning a Digest for the content in [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ @staticmethod def FetchBlob(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchBlob', build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString, build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def FetchDirectory(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchDirectory', build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString, build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) class PushStub(object): """The Push service is complementary to the Fetch, and allows for associating contents of URLs to be returned in future Fetch API calls. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.PushBlob = channel.unary_unary( '/build.bazel.remote.asset.v1.Push/PushBlob', request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString, ) self.PushDirectory = channel.unary_unary( '/build.bazel.remote.asset.v1.Push/PushDirectory', request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString, ) class PushServicer(object): """The Push service is complementary to the Fetch, and allows for associating contents of URLs to be returned in future Fetch API calls. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def PushBlob(self, request, context): """These APIs associate the identifying information of a resource, as indicated by URI and optionally Qualifiers, with content available in the CAS. For example, associating a repository url and a commit id with a Directory Digest. Servers *SHOULD* only allow trusted clients to associate content, and *MAY* only allow certain URIs to be pushed. Clients *MUST* ensure associated content is available in CAS prior to pushing. Clients *MUST* ensure the Qualifiers listed correctly match the contents, and Servers *MAY* trust these values without validation. Fetch servers *MAY* require exact match of all qualifiers when returning content previously pushed, or allow fetching content with only a subset of the qualifiers specified on Push. Clients can specify expiration information that the server *SHOULD* respect. Subsequent requests can be used to alter the expiration time. A minimal compliant Fetch implementation may support only Push'd content and return `NOT_FOUND` for any resource that was not pushed first. Alternatively, a compliant implementation may choose to not support Push and only return resources that can be Fetch'd from origin. Errors will be returned as gRPC Status errors. The possible RPC errors include: * `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid. * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to perform the requested operation. The client may retry after a delay. * `UNAVAILABLE`: Due to a transient condition the operation could not be completed. The client should retry. * `INTERNAL`: An internal error occurred while performing the operation. The client should retry. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def PushDirectory(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_PushServicer_to_server(servicer, server): rpc_method_handlers = { 'PushBlob': grpc.unary_unary_rpc_method_handler( servicer.PushBlob, request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.SerializeToString, ), 'PushDirectory': grpc.unary_unary_rpc_method_handler( servicer.PushDirectory, request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'build.bazel.remote.asset.v1.Push', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Push(object): """The Push service is complementary to the Fetch, and allows for associating contents of URLs to be returned in future Fetch API calls. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ @staticmethod def PushBlob(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushBlob', build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString, build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def PushDirectory(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushDirectory', build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString, build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/000077500000000000000000000000001437515270000255405ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/__init__.py000066400000000000000000000000001437515270000276370ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/000077500000000000000000000000001437515270000260675ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py000066400000000000000000000000001437515270000301660ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto000066400000000000000000001612141437515270000325370ustar00rootroot00000000000000// Copyright 2018 The Bazel Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package build.bazel.remote.execution.v2; import "build/bazel/semver/semver.proto"; import "google/api/annotations.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/duration.proto"; import "google/protobuf/timestamp.proto"; import "google/rpc/status.proto"; option csharp_namespace = "Build.Bazel.Remote.Execution.V2"; option go_package = "remoteexecution"; option java_multiple_files = true; option java_outer_classname = "RemoteExecutionProto"; option java_package = "build.bazel.remote.execution.v2"; option objc_class_prefix = "REX"; // The Remote Execution API is used to execute an // [Action][build.bazel.remote.execution.v2.Action] on the remote // workers. // // As with other services in the Remote Execution API, any call may return an // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing // information about when the client should retry the request; clients SHOULD // respect the information provided. service Execution { // Execute an action remotely. // // In order to execute an action, the client must first upload all of the // inputs, the // [Command][build.bazel.remote.execution.v2.Command] to run, and the // [Action][build.bazel.remote.execution.v2.Action] into the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. // It then calls `Execute` with an `action_digest` referring to them. The // server will run the action and eventually return the result. // // The input `Action`'s fields MUST meet the various canonicalization // requirements specified in the documentation for their types so that it has // the same digest as other logically equivalent `Action`s. The server MAY // enforce the requirements and return errors if a non-canonical input is // received. It MAY also proceed without verifying some or all of the // requirements, such as for performance reasons. If the server does not // verify the requirement, then it will treat the `Action` as distinct from // another logically equivalent action if they hash differently. // // Returns a stream of // [google.longrunning.Operation][google.longrunning.Operation] messages // describing the resulting execution, with eventual `response` // [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The // `metadata` on the operation is of type // [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata]. // // If the client remains connected after the first response is returned after // the server, then updates are streamed as if the client had called // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution] // until the execution completes or the request reaches an error. The // operation can also be queried using [Operations // API][google.longrunning.Operations.GetOperation]. // // The server NEED NOT implement other methods or functionality of the // Operations API. // // Errors discovered during creation of the `Operation` will be reported // as gRPC Status errors, while errors that occurred while running the // action will be reported in the `status` field of the `ExecuteResponse`. The // server MUST NOT set the `error` field of the `Operation` proto. // The possible errors include: // * `INVALID_ARGUMENT`: One or more arguments are invalid. // * `FAILED_PRECONDITION`: One or more errors occurred in setting up the // action requested, such as a missing input or command or no worker being // available. The client may be able to fix the errors and retry. // * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run // the action. // * `UNAVAILABLE`: Due to a transient condition, such as all workers being // occupied (and the server does not support a queue), the action could not // be started. The client should retry. // * `INTERNAL`: An internal error occurred in the execution engine or the // worker. // * `DEADLINE_EXCEEDED`: The execution timed out. // // In the case of a missing input or command, the server SHOULD additionally // send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail // where, for each requested blob not present in the CAS, there is a // `Violation` with a `type` of `MISSING` and a `subject` of // `"blobs/{hash}/{size}"` indicating the digest of the missing blob. rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" }; } // Wait for an execution operation to complete. When the client initially // makes the request, the server immediately responds with the current status // of the execution. The server will leave the request stream open until the // operation completes, and then respond with the completed operation. The // server MAY choose to stream additional updates as execution progresses, // such as to provide an update as to the state of the execution. rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) { option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" }; } } // The action cache API is used to query whether a given action has already been // performed and, if so, retrieve its result. Unlike the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage], // which addresses blobs by their own content, the action cache addresses the // [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a // digest of the encoded [Action][build.bazel.remote.execution.v2.Action] // which produced them. // // The lifetime of entries in the action cache is implementation-specific, but // the server SHOULD assume that more recently used entries are more likely to // be used again. Additionally, action cache implementations SHOULD ensure that // any blobs referenced in the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] // are still valid when returning a result. // // As with other services in the Remote Execution API, any call may return an // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing // information about when the client should retry the request; clients SHOULD // respect the information provided. service ActionCache { // Retrieve a cached execution result. // // Errors: // * `NOT_FOUND`: The requested `ActionResult` is not in the cache. rpc GetActionResult(GetActionResultRequest) returns (ActionResult) { option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" }; } // Upload a new execution result. // // This method is intended for servers which implement the distributed cache // independently of the // [Execution][build.bazel.remote.execution.v2.Execution] API. As a // result, it is OPTIONAL for servers to implement. // // In order to allow the server to perform access control based on the type of // action, and to assist with client debugging, the client MUST first upload // the [Action][build.bazel.remote.execution.v2.Execution] that produced the // result, along with its // [Command][build.bazel.remote.execution.v2.Command], into the // `ContentAddressableStorage`. // // Errors: // * `NOT_IMPLEMENTED`: This method is not supported by the server. // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the // entry to the cache. rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) { option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" }; } } // The CAS (content-addressable storage) is used to store the inputs to and // outputs from the execution service. Each piece of content is addressed by the // digest of its binary data. // // Most of the binary data stored in the CAS is opaque to the execution engine, // and is only used as a communication medium. In order to build an // [Action][build.bazel.remote.execution.v2.Action], // however, the client will need to also upload the // [Command][build.bazel.remote.execution.v2.Command] and input root // [Directory][build.bazel.remote.execution.v2.Directory] for the Action. // The Command and Directory messages must be marshalled to wire format and then // uploaded under the hash as with any other piece of content. In practice, the // input root directory is likely to refer to other Directories in its // hierarchy, which must also each be uploaded on their own. // // For small file uploads the client should group them together and call // [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs] // on chunks of no more than 10 MiB. For large uploads, the client must use the // [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The // `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`, // where `instance_name` is as described in the next paragraph, `uuid` is a // version 4 UUID generated by the client, and `hash` and `size` are the // [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The // `uuid` is used only to avoid collisions when multiple clients try to upload // the same file (or the same client tries to upload the file multiple times at // once on different threads), so the client MAY reuse the `uuid` for uploading // different blobs. The `resource_name` may optionally have a trailing filename // (or other metadata) for a client to use if it is storing URLs, as in // `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything // after the `size` is ignored. // // A single server MAY support multiple instances of the execution system, each // with their own workers, storage, cache, etc. The exact relationship between // instances is up to the server. If the server does, then the `instance_name` // is an identifier, possibly containing multiple path segments, used to // distinguish between the various instances on the server, in a manner defined // by the server. For servers which do not support multiple instances, then the // `instance_name` is the empty path and the leading slash is omitted, so that // the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`. // // When attempting an upload, if another client has already completed the upload // (which may occur in the middle of a single upload if another client uploads // the same blob concurrently), the request will terminate immediately with // a response whose `committed_size` is the full size of the uploaded file // (regardless of how much data was transmitted by the client). If the client // completes the upload but the // [Digest][build.bazel.remote.execution.v2.Digest] does not match, an // `INVALID_ARGUMENT` error will be returned. In either case, the client should // not attempt to retry the upload. // // For downloading blobs, the client must use the // [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with // a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where // `instance_name` is the instance name (see above), and `hash` and `size` are // the [Digest][build.bazel.remote.execution.v2.Digest] of the blob. // // The lifetime of entries in the CAS is implementation specific, but it SHOULD // be long enough to allow for newly-added and recently looked-up entries to be // used in subsequent calls (e.g. to // [Execute][build.bazel.remote.execution.v2.Execution.Execute]). // // As with other services in the Remote Execution API, any call may return an // error with a [RetryInfo][google.rpc.RetryInfo] error detail providing // information about when the client should retry the request; clients SHOULD // respect the information provided. service ContentAddressableStorage { // Determine if blobs are present in the CAS. // // Clients can use this API before uploading blobs to determine which ones are // already present in the CAS and do not need to be uploaded again. // // There are no method-specific errors. rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) { option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" }; } // Upload many blobs at once. // // The server may enforce a limit of the combined total size of blobs // to be uploaded using this API. This limit may be obtained using the // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. // Requests exceeding the limit should either be split into smaller // chunks or uploaded using the // [ByteStream API][google.bytestream.ByteStream], as appropriate. // // This request is equivalent to calling a Bytestream `Write` request // on each individual blob, in parallel. The requests may succeed or fail // independently. // // Errors: // * `INVALID_ARGUMENT`: The client attempted to upload more than the // server supported limit. // // Individual requests may return the following errors, additionally: // * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob. // * `INVALID_ARGUMENT`: The // [Digest][build.bazel.remote.execution.v2.Digest] does not match the // provided data. rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) { option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" }; } // Download many blobs at once. // // The server may enforce a limit of the combined total size of blobs // to be downloaded using this API. This limit may be obtained using the // [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. // Requests exceeding the limit should either be split into smaller // chunks or downloaded using the // [ByteStream API][google.bytestream.ByteStream], as appropriate. // // This request is equivalent to calling a Bytestream `Read` request // on each individual blob, in parallel. The requests may succeed or fail // independently. // // Errors: // * `INVALID_ARGUMENT`: The client attempted to read more than the // server supported limit. // // Every error on individual read will be returned in the corresponding digest // status. rpc BatchReadBlobs(BatchReadBlobsRequest) returns (BatchReadBlobsResponse) { option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchRead" body: "*" }; } // Fetch the entire directory tree rooted at a node. // // This request must be targeted at a // [Directory][build.bazel.remote.execution.v2.Directory] stored in the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] // (CAS). The server will enumerate the `Directory` tree recursively and // return every node descended from the root. // // The GetTreeRequest.page_token parameter can be used to skip ahead in // the stream (e.g. when retrying a partially completed and aborted request), // by setting it to a value taken from GetTreeResponse.next_page_token of the // last successfully processed GetTreeResponse). // // The exact traversal order is unspecified and, unless retrieving subsequent // pages from an earlier request, is not guaranteed to be stable across // multiple invocations of `GetTree`. // // If part of the tree is missing from the CAS, the server will return the // portion present and omit the rest. // // * `NOT_FOUND`: The requested tree root is not present in the CAS. rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) { option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" }; } } // The Capabilities service may be used by remote execution clients to query // various server properties, in order to self-configure or return meaningful // error messages. // // The query may include a particular `instance_name`, in which case the values // returned will pertain to that instance. service Capabilities { // GetCapabilities returns the server capabilities configuration. rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) { option (google.api.http) = { get: "/v2/{instance_name=**}/capabilities" }; } } // An `Action` captures all the information about an execution which is required // to reproduce it. // // `Action`s are the core component of the [Execution] service. A single // `Action` represents a repeatable action that can be performed by the // execution service. `Action`s can be succinctly identified by the digest of // their wire format encoding and, once an `Action` has been executed, will be // cached in the action cache. Future requests can then use the cached result // rather than needing to run afresh. // // When a server completes execution of an // [Action][build.bazel.remote.execution.v2.Action], it MAY choose to // cache the [result][build.bazel.remote.execution.v2.ActionResult] in // the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless // `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By // default, future calls to // [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same // `Action` will also serve their results from the cache. Clients must take care // to understand the caching behaviour. Ideally, all `Action`s will be // reproducible so that serving a result from cache is always desirable and // correct. message Action { // The digest of the [Command][build.bazel.remote.execution.v2.Command] // to run, which MUST be present in the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. Digest command_digest = 1; // The digest of the root // [Directory][build.bazel.remote.execution.v2.Directory] for the input // files. The files in the directory tree are available in the correct // location on the build machine before the command is executed. The root // directory, as well as every subdirectory and content blob referred to, MUST // be in the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. Digest input_root_digest = 2; reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command]. // A timeout after which the execution should be killed. If the timeout is // absent, then the client is specifying that the execution should continue // as long as the server will let it. The server SHOULD impose a timeout if // the client does not specify one, however, if the client does specify a // timeout that is longer than the server's maximum timeout, the server MUST // reject the request. // // The timeout is a part of the // [Action][build.bazel.remote.execution.v2.Action] message, and // therefore two `Actions` with different timeouts are different, even if they // are otherwise identical. This is because, if they were not, running an // `Action` with a lower timeout than is required might result in a cache hit // from an execution run with a longer timeout, hiding the fact that the // timeout is too short. By encoding it directly in the `Action`, a lower // timeout will result in a cache miss and the execution timeout will fail // immediately, rather than whenever the cache entry gets evicted. google.protobuf.Duration timeout = 6; // If true, then the `Action`'s result cannot be cached. bool do_not_cache = 7; } // A `Command` is the actual command executed by a worker running an // [Action][build.bazel.remote.execution.v2.Action] and specifications of its // environment. // // Except as otherwise required, the environment (such as which system // libraries or binaries are available, and what filesystems are mounted where) // is defined by and specific to the implementation of the remote execution API. message Command { // An `EnvironmentVariable` is one variable to set in the running program's // environment. message EnvironmentVariable { // The variable name. string name = 1; // The variable value. string value = 2; } // The arguments to the command. The first argument must be the path to the // executable, which must be either a relative path, in which case it is // evaluated with respect to the input root, or an absolute path. repeated string arguments = 1; // The environment variables to set when running the program. The worker may // provide its own default environment variables; these defaults can be // overridden using this field. Additional variables can also be specified. // // In order to ensure that equivalent `Command`s always hash to the same // value, the environment variables MUST be lexicographically sorted by name. // Sorting of strings is done by code point, equivalently, by the UTF-8 bytes. repeated EnvironmentVariable environment_variables = 2; // A list of the output files that the client expects to retrieve from the // action. Only the listed files, as well as directories listed in // `output_directories`, will be returned to the client as output. // Other files that may be created during command execution are discarded. // // The paths are relative to the working directory of the action execution. // The paths are specified using a single forward slash (`/`) as a path // separator, even if the execution platform natively uses a different // separator. The path MUST NOT include a trailing slash, nor a leading slash, // being a relative path. // // In order to ensure consistent hashing of the same Action, the output paths // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 // bytes). // // An output file cannot be duplicated, be a parent of another output file, be // a child of a listed output directory, or have the same path as any of the // listed output directories. repeated string output_files = 3; // A list of the output directories that the client expects to retrieve from // the action. Only the contents of the indicated directories (recursively // including the contents of their subdirectories) will be // returned, as well as files listed in `output_files`. Other files that may // be created during command execution are discarded. // // The paths are relative to the working directory of the action execution. // The paths are specified using a single forward slash (`/`) as a path // separator, even if the execution platform natively uses a different // separator. The path MUST NOT include a trailing slash, nor a leading slash, // being a relative path. The special value of empty string is allowed, // although not recommended, and can be used to capture the entire working // directory tree, including inputs. // // In order to ensure consistent hashing of the same Action, the output paths // MUST be sorted lexicographically by code point (or, equivalently, by UTF-8 // bytes). // // An output directory cannot be duplicated, be a parent of another output // directory, be a parent of a listed output file, or have the same path as // any of the listed output files. repeated string output_directories = 4; // The platform requirements for the execution environment. The server MAY // choose to execute the action on any worker satisfying the requirements, so // the client SHOULD ensure that running the action on any such worker will // have the same result. Platform platform = 5; // The working directory, relative to the input root, for the command to run // in. It must be a directory which exists in the input tree. If it is left // empty, then the action is run in the input root. string working_directory = 6; } // A `Platform` is a set of requirements, such as hardware, operating system, or // compiler toolchain, for an // [Action][build.bazel.remote.execution.v2.Action]'s execution // environment. A `Platform` is represented as a series of key-value pairs // representing the properties that are required of the platform. message Platform { // A single property for the environment. The server is responsible for // specifying the property `name`s that it accepts. If an unknown `name` is // provided in the requirements for an // [Action][build.bazel.remote.execution.v2.Action], the server SHOULD // reject the execution request. If permitted by the server, the same `name` // may occur multiple times. // // The server is also responsible for specifying the interpretation of // property `value`s. For instance, a property describing how much RAM must be // available may be interpreted as allowing a worker with 16GB to fulfill a // request for 8GB, while a property describing the OS environment on which // the action must be performed may require an exact match with the worker's // OS. // // The server MAY use the `value` of one or more properties to determine how // it sets up the execution environment, such as by making specific system // files available to the worker. message Property { // The property name. string name = 1; // The property value. string value = 2; } // The properties that make up this platform. In order to ensure that // equivalent `Platform`s always hash to the same value, the properties MUST // be lexicographically sorted by name, and then by value. Sorting of strings // is done by code point, equivalently, by the UTF-8 bytes. repeated Property properties = 1; } // A `Directory` represents a directory node in a file tree, containing zero or // more children [FileNodes][build.bazel.remote.execution.v2.FileNode], // [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and // [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode]. // Each `Node` contains its name in the directory, either the digest of its // content (either a file blob or a `Directory` proto) or a symlink target, as // well as possibly some metadata about the file or directory. // // In order to ensure that two equivalent directory trees hash to the same // value, the following restrictions MUST be obeyed when constructing a // a `Directory`: // - Every child in the directory must have a path of exactly one segment. // Multiple levels of directory hierarchy may not be collapsed. // - Each child in the directory must have a unique path segment (file name). // - The files, directories and symlinks in the directory must each be sorted // in lexicographical order by path. The path strings must be sorted by code // point, equivalently, by UTF-8 bytes. // // A `Directory` that obeys the restrictions is said to be in canonical form. // // As an example, the following could be used for a file named `bar` and a // directory named `foo` with an executable file named `baz` (hashes shortened // for readability): // // ```json // // (Directory proto) // { // files: [ // { // name: "bar", // digest: { // hash: "4a73bc9d03...", // size: 65534 // } // } // ], // directories: [ // { // name: "foo", // digest: { // hash: "4cf2eda940...", // size: 43 // } // } // ] // } // // // (Directory proto with hash "4cf2eda940..." and size 43) // { // files: [ // { // name: "baz", // digest: { // hash: "b2c941073e...", // size: 1294, // }, // is_executable: true // } // ] // } // ``` message Directory { // The files in the directory. repeated FileNode files = 1; // The subdirectories in the directory. repeated DirectoryNode directories = 2; // The symlinks in the directory. repeated SymlinkNode symlinks = 3; } // A `FileNode` represents a single file and associated metadata. message FileNode { // The name of the file. string name = 1; // The digest of the file's content. Digest digest = 2; reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`. // True if file is executable, false otherwise. bool is_executable = 4; } // A `DirectoryNode` represents a child of a // [Directory][build.bazel.remote.execution.v2.Directory] which is itself // a `Directory` and its associated metadata. message DirectoryNode { // The name of the directory. string name = 1; // The digest of the // [Directory][build.bazel.remote.execution.v2.Directory] object // represented. See [Digest][build.bazel.remote.execution.v2.Digest] // for information about how to take the digest of a proto message. Digest digest = 2; } // A `SymlinkNode` represents a symbolic link. message SymlinkNode { // The name of the symlink. string name = 1; // The target path of the symlink. The path separator is a forward slash `/`. // The target path can be relative to the parent directory of the symlink or // it can be an absolute path starting with `/`. Support for absolute paths // can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities] // API. The canonical form forbids the substrings `/./` and `//` in the target // path. `..` components are allowed anywhere in the target path. string target = 2; } // A content digest. A digest for a given blob consists of the size of the blob // and its hash. The hash algorithm to use is defined by the server, but servers // SHOULD use SHA-256. // // The size is considered to be an integral part of the digest and cannot be // separated. That is, even if the `hash` field is correctly specified but // `size_bytes` is not, the server MUST reject the request. // // The reason for including the size in the digest is as follows: in a great // many cases, the server needs to know the size of the blob it is about to work // with prior to starting an operation with it, such as flattening Merkle tree // structures or streaming it to a worker. Technically, the server could // implement a separate metadata store, but this results in a significantly more // complicated implementation as opposed to having the client specify the size // up-front (or storing the size along with the digest in every message where // digests are embedded). This does mean that the API leaks some implementation // details of (what we consider to be) a reasonable server implementation, but // we consider this to be a worthwhile tradeoff. // // When a `Digest` is used to refer to a proto message, it always refers to the // message in binary encoded form. To ensure consistent hashing, clients and // servers MUST ensure that they serialize messages according to the following // rules, even if there are alternate valid encodings for the same message. // - Fields are serialized in tag order. // - There are no unknown fields. // - There are no duplicate fields. // - Fields are serialized according to the default semantics for their type. // // Most protocol buffer implementations will always follow these rules when // serializing, but care should be taken to avoid shortcuts. For instance, // concatenating two messages to merge them may produce duplicate fields. message Digest { // The hash. In the case of SHA-256, it will always be a lowercase hex string // exactly 64 characters long. string hash = 1; // The size of the blob, in bytes. int64 size_bytes = 2; } // ExecutedActionMetadata contains details about a completed execution. message ExecutedActionMetadata { // The name of the worker which ran the execution. string worker = 1; // When was the action added to the queue. google.protobuf.Timestamp queued_timestamp = 2; // When the worker received the action. google.protobuf.Timestamp worker_start_timestamp = 3; // When the worker completed the action, including all stages. google.protobuf.Timestamp worker_completed_timestamp = 4; // When the worker started fetching action inputs. google.protobuf.Timestamp input_fetch_start_timestamp = 5; // When the worker finished fetching action inputs. google.protobuf.Timestamp input_fetch_completed_timestamp = 6; // When the worker started executing the action command. google.protobuf.Timestamp execution_start_timestamp = 7; // When the worker completed executing the action command. google.protobuf.Timestamp execution_completed_timestamp = 8; // When the worker started uploading action outputs. google.protobuf.Timestamp output_upload_start_timestamp = 9; // When the worker finished uploading action outputs. google.protobuf.Timestamp output_upload_completed_timestamp = 10; } // An ActionResult represents the result of an // [Action][build.bazel.remote.execution.v2.Action] being run. message ActionResult { reserved 1; // Reserved for use as the resource name. // The output files of the action. For each output file requested in the // `output_files` field of the Action, if the corresponding file existed after // the action completed, a single entry will be present in the output list. // // If the action does not produce the requested output, or produces a // directory where a regular file is expected or vice versa, then that output // will be omitted from the list. The server is free to arrange the output // list as desired; clients MUST NOT assume that the output list is sorted. repeated OutputFile output_files = 2; // The output directories of the action. For each output directory requested // in the `output_directories` field of the Action, if the corresponding // directory existed after the action completed, a single entry will be // present in the output list, which will contain the digest of a // [Tree][build.bazel.remote.execution.v2.Tree] message containing the // directory tree, and the path equal exactly to the corresponding Action // output_directories member. // // As an example, suppose the Action had an output directory `a/b/dir` and the // execution produced the following contents in `a/b/dir`: a file named `bar` // and a directory named `foo` with an executable file named `baz`. Then, // output_directory will contain (hashes shortened for readability): // // ```json // // OutputDirectory proto: // { // path: "a/b/dir" // tree_digest: { // hash: "4a73bc9d03...", // size: 55 // } // } // // Tree proto with hash "4a73bc9d03..." and size 55: // { // root: { // files: [ // { // name: "bar", // digest: { // hash: "4a73bc9d03...", // size: 65534 // } // } // ], // directories: [ // { // name: "foo", // digest: { // hash: "4cf2eda940...", // size: 43 // } // } // ] // } // children : { // // (Directory proto with hash "4cf2eda940..." and size 43) // files: [ // { // name: "baz", // digest: { // hash: "b2c941073e...", // size: 1294, // }, // is_executable: true // } // ] // } // } // ``` repeated OutputDirectory output_directories = 3; // The exit code of the command. int32 exit_code = 4; // The standard output buffer of the action. The server will determine, based // on the size of the buffer, whether to return it in raw form or to return // a digest in `stdout_digest` that points to the buffer. If neither is set, // then the buffer is empty. The client SHOULD NOT assume it will get one of // the raw buffer or a digest on any given request and should be prepared to // handle either. bytes stdout_raw = 5; // The digest for a blob containing the standard output of the action, which // can be retrieved from the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. // See `stdout_raw` for when this will be set. Digest stdout_digest = 6; // The standard error buffer of the action. The server will determine, based // on the size of the buffer, whether to return it in raw form or to return // a digest in `stderr_digest` that points to the buffer. If neither is set, // then the buffer is empty. The client SHOULD NOT assume it will get one of // the raw buffer or a digest on any given request and should be prepared to // handle either. bytes stderr_raw = 7; // The digest for a blob containing the standard error of the action, which // can be retrieved from the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. // See `stderr_raw` for when this will be set. Digest stderr_digest = 8; // The details of the execution that originally produced this result. ExecutedActionMetadata execution_metadata = 9; } // An `OutputFile` is similar to a // [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an // output in an `ActionResult`. It allows a full file path rather than // only a name. // // `OutputFile` is binary-compatible with `FileNode`. message OutputFile { // The full path of the file relative to the input root, including the // filename. The path separator is a forward slash `/`. Since this is a // relative path, it MUST NOT begin with a leading forward slash. string path = 1; // The digest of the file's content. Digest digest = 2; reserved 3; // Used for a removed field in an earlier version of the API. // True if file is executable, false otherwise. bool is_executable = 4; } // A `Tree` contains all the // [Directory][build.bazel.remote.execution.v2.Directory] protos in a // single directory Merkle tree, compressed into one message. message Tree { // The root directory in the tree. Directory root = 1; // All the child directories: the directories referred to by the root and, // recursively, all its children. In order to reconstruct the directory tree, // the client must take the digests of each of the child directories and then // build up a tree starting from the `root`. repeated Directory children = 2; } // An `OutputDirectory` is the output in an `ActionResult` corresponding to a // directory's full contents rather than a single file. message OutputDirectory { // The full path of the directory relative to the working directory. The path // separator is a forward slash `/`. Since this is a relative path, it MUST // NOT begin with a leading forward slash. The empty string value is allowed, // and it denotes the entire working directory. string path = 1; reserved 2; // Used for a removed field in an earlier version of the API. // The digest of the encoded // [Tree][build.bazel.remote.execution.v2.Tree] proto containing the // directory's contents. Digest tree_digest = 3; } // An `ExecutionPolicy` can be used to control the scheduling of the action. message ExecutionPolicy { // The priority (relative importance) of this action. Generally, a lower value // means that the action should be run sooner than actions having a greater // priority value, but the interpretation of a given value is server- // dependent. A priority of 0 means the *default* priority. Priorities may be // positive or negative, and such actions should run later or sooner than // actions having the default priority, respectively. The particular semantics // of this field is up to the server. In particular, every server will have // their own supported range of priorities, and will decide how these map into // scheduling policy. int32 priority = 1; } // A `ResultsCachePolicy` is used for fine-grained control over how action // outputs are stored in the CAS and Action Cache. message ResultsCachePolicy { // The priority (relative importance) of this content in the overall cache. // Generally, a lower value means a longer retention time or other advantage, // but the interpretation of a given value is server-dependent. A priority of // 0 means a *default* value, decided by the server. // // The particular semantics of this field is up to the server. In particular, // every server will have their own supported range of priorities, and will // decide how these map into retention/eviction policy. int32 priority = 1; } // A request message for // [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute]. message ExecuteRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // If true, the action will be executed anew even if its result was already // present in the cache. If false, the result may be served from the // [ActionCache][build.bazel.remote.execution.v2.ActionCache]. bool skip_cache_lookup = 3; reserved 2, 4, 5; // Used for removed fields in an earlier version of the API. // The digest of the [Action][build.bazel.remote.execution.v2.Action] to // execute. Digest action_digest = 6; // An optional policy for execution of the action. // The server will have a default policy if this is not provided. ExecutionPolicy execution_policy = 7; // An optional policy for the results of this execution in the remote cache. // The server will have a default policy if this is not provided. // This may be applied to both the ActionResult and the associated blobs. ResultsCachePolicy results_cache_policy = 8; } // A `LogFile` is a log stored in the CAS. message LogFile { // The digest of the log contents. Digest digest = 1; // This is a hint as to the purpose of the log, and is set to true if the log // is human-readable text that can be usefully displayed to a user, and false // otherwise. For instance, if a command-line client wishes to print the // server logs to the terminal for a failed action, this allows it to avoid // displaying a binary file. bool human_readable = 2; } // The response message for // [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute], // which will be contained in the [response // field][google.longrunning.Operation.response] of the // [Operation][google.longrunning.Operation]. message ExecuteResponse { // The result of the action. ActionResult result = 1; // True if the result was served from cache, false if it was executed. bool cached_result = 2; // If the status has a code other than `OK`, it indicates that the action did // not finish execution. For example, if the operation times out during // execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST // use this field for errors in execution, rather than the error field on the // `Operation` object. // // If the status code is other than `OK`, then the result MUST NOT be cached. // For an error status, the `result` field is optional; the server may // populate the output-, stdout-, and stderr-related fields if it has any // information available, such as the stdout and stderr of a timed-out action. google.rpc.Status status = 3; // An optional list of additional log outputs the server wishes to provide. A // server can use this to return execution-specific logs however it wishes. // This is intended primarily to make it easier for users to debug issues that // may be outside of the actual job execution, such as by identifying the // worker executing the action or by providing logs from the worker's setup // phase. The keys SHOULD be human readable so that a client can display them // to a user. map server_logs = 4; } // Metadata about an ongoing // [execution][build.bazel.remote.execution.v2.Execution.Execute], which // will be contained in the [metadata // field][google.longrunning.Operation.response] of the // [Operation][google.longrunning.Operation]. message ExecuteOperationMetadata { // The current stage of execution. enum Stage { UNKNOWN = 0; // Checking the result against the cache. CACHE_CHECK = 1; // Currently idle, awaiting a free machine to execute. QUEUED = 2; // Currently being executed by a worker. EXECUTING = 3; // Finished execution. COMPLETED = 4; } Stage stage = 1; // The digest of the [Action][build.bazel.remote.execution.v2.Action] // being executed. Digest action_digest = 2; // If set, the client can use this name with // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the // standard output. string stdout_stream_name = 3; // If set, the client can use this name with // [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the // standard error. string stderr_stream_name = 4; } // A request message for // [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]. message WaitExecutionRequest { // The name of the [Operation][google.longrunning.operations.v1.Operation] // returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute]. string name = 1; } // A request message for // [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult]. message GetActionResultRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The digest of the [Action][build.bazel.remote.execution.v2.Action] // whose result is requested. Digest action_digest = 2; } // A request message for // [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult]. message UpdateActionResultRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The digest of the [Action][build.bazel.remote.execution.v2.Action] // whose result is being uploaded. Digest action_digest = 2; // The [ActionResult][build.bazel.remote.execution.v2.ActionResult] // to store in the cache. ActionResult action_result = 3; // An optional policy for the results of this execution in the remote cache. // The server will have a default policy if this is not provided. // This may be applied to both the ActionResult and the associated blobs. ResultsCachePolicy results_cache_policy = 4; } // A request message for // [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs]. message FindMissingBlobsRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // A list of the blobs to check. repeated Digest blob_digests = 2; } // A response message for // [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs]. message FindMissingBlobsResponse { // A list of the blobs requested *not* present in the storage. repeated Digest missing_blob_digests = 2; } // A request message for // [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. message BatchUpdateBlobsRequest { // A request corresponding to a single blob that the client wants to upload. message Request { // The digest of the blob. This MUST be the digest of `data`. Digest digest = 1; // The raw binary data. bytes data = 2; } // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The individual upload requests. repeated Request requests = 2; } // A response message for // [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]. message BatchUpdateBlobsResponse { // A response corresponding to a single blob that the client tried to upload. message Response { // The blob digest to which this response corresponds. Digest digest = 1; // The result of attempting to upload that blob. google.rpc.Status status = 2; } // The responses to the requests. repeated Response responses = 1; } // A request message for // [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. message BatchReadBlobsRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The individual blob digests. repeated Digest digests = 2; } // A response message for // [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs]. message BatchReadBlobsResponse { // A response corresponding to a single blob that the client tried to upload. message Response { // The digest to which this response corresponds. Digest digest = 1; // The raw binary data. bytes data = 2; // The result of attempting to download that blob. google.rpc.Status status = 3; } // The responses to the requests. repeated Response responses = 1; } // A request message for // [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree]. message GetTreeRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The digest of the root, which must be an encoded // [Directory][build.bazel.remote.execution.v2.Directory] message // stored in the // [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. Digest root_digest = 2; // A maximum page size to request. If present, the server will request no more // than this many items. Regardless of whether a page size is specified, the // server may place its own limit on the number of items to be returned and // require the client to retrieve more items using a subsequent request. int32 page_size = 3; // A page token, which must be a value received in a previous // [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse]. // If present, the server will use it to return the following page of results. string page_token = 4; } // A response message for // [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree]. message GetTreeResponse { // The directories descended from the requested root. repeated Directory directories = 1; // If present, signifies that there are more results which the client can // retrieve by passing this as the page_token in a subsequent // [request][build.bazel.remote.execution.v2.GetTreeRequest]. // If empty, signifies that this is the last page of results. string next_page_token = 2; } // A request message for // [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities]. message GetCapabilitiesRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; } // A response message for // [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities]. message ServerCapabilities { // Capabilities of the remote cache system. CacheCapabilities cache_capabilities = 1; // Capabilities of the remote execution system. ExecutionCapabilities execution_capabilities = 2; // Earliest RE API version supported, including deprecated versions. build.bazel.semver.SemVer deprecated_api_version = 3; // Earliest non-deprecated RE API version supported. build.bazel.semver.SemVer low_api_version = 4; // Latest RE API version supported. build.bazel.semver.SemVer high_api_version = 5; } // The digest function used for converting values into keys for CAS and Action // Cache. enum DigestFunction { UNKNOWN = 0; SHA256 = 1; SHA1 = 2; MD5 = 3; } // Describes the server/instance capabilities for updating the action cache. message ActionCacheUpdateCapabilities { bool update_enabled = 1; } // Allowed values for priority in // [ResultsCachePolicy][google.devtools.remoteexecution.v2.ResultsCachePolicy] // Used for querying both cache and execution valid priority ranges. message PriorityCapabilities { // Supported range of priorities, including boundaries. message PriorityRange { int32 min_priority = 1; int32 max_priority = 2; } repeated PriorityRange priorities = 1; } // Capabilities of the remote cache system. message CacheCapabilities { // Describes how the server treats absolute symlink targets. enum SymlinkAbsolutePathStrategy { UNKNOWN = 0; // Server will return an INVALID_ARGUMENT on input symlinks with absolute targets. // If an action tries to create an output symlink with an absolute target, a // FAILED_PRECONDITION will be returned. DISALLOWED = 1; // Server will allow symlink targets to escape the input root tree, possibly // resulting in non-hermetic builds. ALLOWED = 2; } // All the digest functions supported by the remote cache. // Remote cache may support multiple digest functions simultaneously. repeated DigestFunction digest_function = 1; // Capabilities for updating the action cache. ActionCacheUpdateCapabilities action_cache_update_capabilities = 2; // Supported cache priority range for both CAS and ActionCache. PriorityCapabilities cache_priority_capabilities = 3; // Maximum total size of blobs to be uploaded/downloaded using // batch methods. A value of 0 means no limit is set, although // in practice there will always be a message size limitation // of the protocol in use, e.g. GRPC. int64 max_batch_total_size_bytes = 4; // Whether absolute symlink targets are supported. SymlinkAbsolutePathStrategy symlink_absolute_path_strategy = 5; } // Capabilities of the remote execution system. message ExecutionCapabilities { // Remote execution may only support a single digest function. DigestFunction digest_function = 1; // Whether remote execution is enabled for the particular server/instance. bool exec_enabled = 2; // Supported execution priority range. PriorityCapabilities execution_priority_capabilities = 3; } // Details for the tool used to call the API. message ToolDetails { // Name of the tool, e.g. bazel. string tool_name = 1; // Version of the tool used for the request, e.g. 5.0.3. string tool_version = 2; } // An optional Metadata to attach to any RPC request to tell the server about an // external context of the request. The server may use this for logging or other // purposes. To use it, the client attaches the header to the call using the // canonical proto serialization: // name: build.bazel.remote.execution.v2.requestmetadata-bin // contents: the base64 encoded binary RequestMetadata message. message RequestMetadata { // The details for the tool invoking the requests. ToolDetails tool_details = 1; // An identifier that ties multiple requests to the same action. // For example, multiple requests to the CAS, Action Cache, and Execution // API are used in order to compile foo.cc. string action_id = 2; // An identifier that ties multiple actions together to a final result. // For example, multiple actions are required to build and run foo_test. string tool_invocation_id = 3; // An identifier to tie multiple tool invocations together. For example, // runs of foo_test, bar_test and baz_test on a post-submit of a given patch. string correlated_invocations_id = 4; } buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py000066400000000000000000001216101437515270000325630ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: build/bazel/remote/execution/v2/remote_execution.proto """Generated protocol buffer code.""" from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from buildstream._protos.build.bazel.semver import semver_pb2 as build_dot_bazel_dot_semver_dot_semver__pb2 from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xd5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x14\n\x0c\x64o_not_cache\x18\x07 \x01(\x08J\x04\x08\x03\x10\x06\"\xb7\x02\n\x07\x43ommand\x12\x11\n\targuments\x18\x01 \x03(\t\x12[\n\x15\x65nvironment_variables\x18\x02 \x03(\x0b\x32<.build.bazel.remote.execution.v2.Command.EnvironmentVariable\x12\x14\n\x0coutput_files\x18\x03 \x03(\t\x12\x1a\n\x12output_directories\x18\x04 \x03(\t\x12;\n\x08platform\x18\x05 \x01(\x0b\x32).build.bazel.remote.execution.v2.Platform\x12\x19\n\x11working_directory\x18\x06 \x01(\t\x1a\x32\n\x13\x45nvironmentVariable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"{\n\x08Platform\x12\x46\n\nproperties\x18\x01 \x03(\x0b\x32\x32.build.bazel.remote.execution.v2.Platform.Property\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xca\x01\n\tDirectory\x12\x38\n\x05\x66iles\x18\x01 \x03(\x0b\x32).build.bazel.remote.execution.v2.FileNode\x12\x43\n\x0b\x64irectories\x18\x02 \x03(\x0b\x32..build.bazel.remote.execution.v2.DirectoryNode\x12>\n\x08symlinks\x18\x03 \x03(\x0b\x32,.build.bazel.remote.execution.v2.SymlinkNode\"n\n\x08\x46ileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"V\n\rDirectoryNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"+\n\x0bSymlinkNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"*\n\x06\x44igest\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xec\x04\n\x16\x45xecutedActionMetadata\x12\x0e\n\x06worker\x18\x01 \x01(\t\x12\x34\n\x10queued_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16worker_start_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12>\n\x1aworker_completed_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1binput_fetch_start_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x43\n\x1finput_fetch_completed_timestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12=\n\x19\x65xecution_start_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1d\x65xecution_completed_timestamp\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1doutput_upload_start_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x45\n!output_upload_completed_timestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb5\x03\n\x0c\x41\x63tionResult\x12\x41\n\x0coutput_files\x18\x02 \x03(\x0b\x32+.build.bazel.remote.execution.v2.OutputFile\x12L\n\x12output_directories\x18\x03 \x03(\x0b\x32\x30.build.bazel.remote.execution.v2.OutputDirectory\x12\x11\n\texit_code\x18\x04 \x01(\x05\x12\x12\n\nstdout_raw\x18\x05 \x01(\x0c\x12>\n\rstdout_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x12\n\nstderr_raw\x18\x07 \x01(\x0c\x12>\n\rstderr_digest\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12S\n\x12\x65xecution_metadata\x18\t \x01(\x0b\x32\x37.build.bazel.remote.execution.v2.ExecutedActionMetadataJ\x04\x08\x01\x10\x02\"p\n\nOutputFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"~\n\x04Tree\x12\x38\n\x04root\x18\x01 \x01(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12<\n\x08\x63hildren\x18\x02 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\"c\n\x0fOutputDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.DigestJ\x04\x08\x02\x10\x03\"#\n\x0f\x45xecutionPolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"&\n\x12ResultsCachePolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"\xb3\x02\n\x0e\x45xecuteRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x19\n\x11skip_cache_lookup\x18\x03 \x01(\x08\x12>\n\raction_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12J\n\x10\x65xecution_policy\x18\x07 \x01(\x0b\x32\x30.build.bazel.remote.execution.v2.ExecutionPolicy\x12Q\n\x14results_cache_policy\x18\x08 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicyJ\x04\x08\x02\x10\x03J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"Z\n\x07LogFile\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x16\n\x0ehuman_readable\x18\x02 \x01(\x08\"\xbf\x02\n\x0f\x45xecuteResponse\x12=\n\x06result\x18\x01 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12\x15\n\rcached_result\x18\x02 \x01(\x08\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12U\n\x0bserver_logs\x18\x04 \x03(\x0b\x32@.build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry\x1a[\n\x0fServerLogsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.build.bazel.remote.execution.v2.LogFile:\x02\x38\x01\"\xb3\x02\n\x18\x45xecuteOperationMetadata\x12N\n\x05stage\x18\x01 \x01(\x0e\x32?.build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x1a\n\x12stdout_stream_name\x18\x03 \x01(\t\x12\x1a\n\x12stderr_stream_name\x18\x04 \x01(\t\"O\n\x05Stage\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x43\x41\x43HE_CHECK\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\r\n\tEXECUTING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\"$\n\x14WaitExecutionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"o\n\x16GetActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x8b\x02\n\x19UpdateActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x44\n\raction_result\x18\x03 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12Q\n\x14results_cache_policy\x18\x04 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicy\"o\n\x17\x46indMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"a\n\x18\x46indMissingBlobsResponse\x12\x45\n\x14missing_blob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xd6\x01\n\x17\x42\x61tchUpdateBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12R\n\x08requests\x18\x02 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request\x1aP\n\x07Request\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xda\x01\n\x18\x42\x61tchUpdateBlobsResponse\x12U\n\tresponses\x18\x01 \x03(\x0b\x32\x42.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"h\n\x15\x42\x61tchReadBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x38\n\x07\x64igests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe4\x01\n\x16\x42\x61tchReadBlobsResponse\x12S\n\tresponses\x18\x01 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response\x1au\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"\x8c\x01\n\x0eGetTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"k\n\x0fGetTreeResponse\x12?\n\x0b\x64irectories\x18\x01 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"/\n\x16GetCapabilitiesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\xe3\x02\n\x12ServerCapabilities\x12N\n\x12\x63\x61\x63he_capabilities\x18\x01 \x01(\x0b\x32\x32.build.bazel.remote.execution.v2.CacheCapabilities\x12V\n\x16\x65xecution_capabilities\x18\x02 \x01(\x0b\x32\x36.build.bazel.remote.execution.v2.ExecutionCapabilities\x12:\n\x16\x64\x65precated_api_version\x18\x03 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x33\n\x0flow_api_version\x18\x04 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x34\n\x10high_api_version\x18\x05 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\"7\n\x1d\x41\x63tionCacheUpdateCapabilities\x12\x16\n\x0eupdate_enabled\x18\x01 \x01(\x08\"\xac\x01\n\x14PriorityCapabilities\x12W\n\npriorities\x18\x01 \x03(\x0b\x32\x43.build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange\x1a;\n\rPriorityRange\x12\x14\n\x0cmin_priority\x18\x01 \x01(\x05\x12\x14\n\x0cmax_priority\x18\x02 \x01(\x05\"\x88\x04\n\x11\x43\x61\x63heCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x03(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12h\n action_cache_update_capabilities\x18\x02 \x01(\x0b\x32>.build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities\x12Z\n\x1b\x63\x61\x63he_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\x12\"\n\x1amax_batch_total_size_bytes\x18\x04 \x01(\x03\x12v\n\x1esymlink_absolute_path_strategy\x18\x05 \x01(\x0e\x32N.build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy\"G\n\x1bSymlinkAbsolutePathStrategy\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0e\n\nDISALLOWED\x10\x01\x12\x0b\n\x07\x41LLOWED\x10\x02\"\xd7\x01\n\x15\x45xecutionCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x01(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12\x14\n\x0c\x65xec_enabled\x18\x02 \x01(\x08\x12^\n\x1f\x65xecution_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\"6\n\x0bToolDetails\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x14\n\x0ctool_version\x18\x02 \x01(\t\"\xa7\x01\n\x0fRequestMetadata\x12\x42\n\x0ctool_details\x18\x01 \x01(\x0b\x32,.build.bazel.remote.execution.v2.ToolDetails\x12\x11\n\taction_id\x18\x02 \x01(\t\x12\x1a\n\x12tool_invocation_id\x18\x03 \x01(\t\x12!\n\x19\x63orrelated_invocations_id\x18\x04 \x01(\t*<\n\x0e\x44igestFunction\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SHA256\x10\x01\x12\x08\n\x04SHA1\x10\x02\x12\x07\n\x03MD5\x10\x03\x32\xb9\x02\n\tExecution\x12\x8e\x01\n\x07\x45xecute\x12/.build.bazel.remote.execution.v2.ExecuteRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/actions:execute:\x01*0\x01\x12\x9a\x01\n\rWaitExecution\x12\x35.build.bazel.remote.execution.v2.WaitExecutionRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{name=operations/**}:waitExecution:\x01*0\x01\x32\xd6\x03\n\x0b\x41\x63tionCache\x12\xd7\x01\n\x0fGetActionResult\x12\x37.build.bazel.remote.execution.v2.GetActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"\\\x82\xd3\xe4\x93\x02V\x12T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}\x12\xec\x01\n\x12UpdateActionResult\x12:.build.bazel.remote.execution.v2.UpdateActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"k\x82\xd3\xe4\x93\x02\x65\x1aT/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result2\x9b\x06\n\x19\x43ontentAddressableStorage\x12\xbc\x01\n\x10\x46indMissingBlobs\x12\x38.build.bazel.remote.execution.v2.FindMissingBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.FindMissingBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:findMissing:\x01*\x12\xbc\x01\n\x10\x42\x61tchUpdateBlobs\x12\x38.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:batchUpdate:\x01*\x12\xb4\x01\n\x0e\x42\x61tchReadBlobs\x12\x36.build.bazel.remote.execution.v2.BatchReadBlobsRequest\x1a\x37.build.bazel.remote.execution.v2.BatchReadBlobsResponse\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/blobs:batchRead:\x01*\x12\xc8\x01\n\x07GetTree\x12/.build.bazel.remote.execution.v2.GetTreeRequest\x1a\x30.build.bazel.remote.execution.v2.GetTreeResponse\"X\x82\xd3\xe4\x93\x02R\x12P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree0\x01\x32\xbd\x01\n\x0c\x43\x61pabilities\x12\xac\x01\n\x0fGetCapabilities\x12\x37.build.bazel.remote.execution.v2.GetCapabilitiesRequest\x1a\x33.build.bazel.remote.execution.v2.ServerCapabilities\"+\x82\xd3\xe4\x93\x02%\x12#/v2/{instance_name=**}/capabilitiesBr\n\x1f\x62uild.bazel.remote.execution.v2B\x14RemoteExecutionProtoP\x01Z\x0fremoteexecution\xa2\x02\x03REX\xaa\x02\x1f\x42uild.Bazel.Remote.Execution.V2b\x06proto3') _DIGESTFUNCTION = DESCRIPTOR.enum_types_by_name['DigestFunction'] DigestFunction = enum_type_wrapper.EnumTypeWrapper(_DIGESTFUNCTION) UNKNOWN = 0 SHA256 = 1 SHA1 = 2 MD5 = 3 _ACTION = DESCRIPTOR.message_types_by_name['Action'] _COMMAND = DESCRIPTOR.message_types_by_name['Command'] _COMMAND_ENVIRONMENTVARIABLE = _COMMAND.nested_types_by_name['EnvironmentVariable'] _PLATFORM = DESCRIPTOR.message_types_by_name['Platform'] _PLATFORM_PROPERTY = _PLATFORM.nested_types_by_name['Property'] _DIRECTORY = DESCRIPTOR.message_types_by_name['Directory'] _FILENODE = DESCRIPTOR.message_types_by_name['FileNode'] _DIRECTORYNODE = DESCRIPTOR.message_types_by_name['DirectoryNode'] _SYMLINKNODE = DESCRIPTOR.message_types_by_name['SymlinkNode'] _DIGEST = DESCRIPTOR.message_types_by_name['Digest'] _EXECUTEDACTIONMETADATA = DESCRIPTOR.message_types_by_name['ExecutedActionMetadata'] _ACTIONRESULT = DESCRIPTOR.message_types_by_name['ActionResult'] _OUTPUTFILE = DESCRIPTOR.message_types_by_name['OutputFile'] _TREE = DESCRIPTOR.message_types_by_name['Tree'] _OUTPUTDIRECTORY = DESCRIPTOR.message_types_by_name['OutputDirectory'] _EXECUTIONPOLICY = DESCRIPTOR.message_types_by_name['ExecutionPolicy'] _RESULTSCACHEPOLICY = DESCRIPTOR.message_types_by_name['ResultsCachePolicy'] _EXECUTEREQUEST = DESCRIPTOR.message_types_by_name['ExecuteRequest'] _LOGFILE = DESCRIPTOR.message_types_by_name['LogFile'] _EXECUTERESPONSE = DESCRIPTOR.message_types_by_name['ExecuteResponse'] _EXECUTERESPONSE_SERVERLOGSENTRY = _EXECUTERESPONSE.nested_types_by_name['ServerLogsEntry'] _EXECUTEOPERATIONMETADATA = DESCRIPTOR.message_types_by_name['ExecuteOperationMetadata'] _WAITEXECUTIONREQUEST = DESCRIPTOR.message_types_by_name['WaitExecutionRequest'] _GETACTIONRESULTREQUEST = DESCRIPTOR.message_types_by_name['GetActionResultRequest'] _UPDATEACTIONRESULTREQUEST = DESCRIPTOR.message_types_by_name['UpdateActionResultRequest'] _FINDMISSINGBLOBSREQUEST = DESCRIPTOR.message_types_by_name['FindMissingBlobsRequest'] _FINDMISSINGBLOBSRESPONSE = DESCRIPTOR.message_types_by_name['FindMissingBlobsResponse'] _BATCHUPDATEBLOBSREQUEST = DESCRIPTOR.message_types_by_name['BatchUpdateBlobsRequest'] _BATCHUPDATEBLOBSREQUEST_REQUEST = _BATCHUPDATEBLOBSREQUEST.nested_types_by_name['Request'] _BATCHUPDATEBLOBSRESPONSE = DESCRIPTOR.message_types_by_name['BatchUpdateBlobsResponse'] _BATCHUPDATEBLOBSRESPONSE_RESPONSE = _BATCHUPDATEBLOBSRESPONSE.nested_types_by_name['Response'] _BATCHREADBLOBSREQUEST = DESCRIPTOR.message_types_by_name['BatchReadBlobsRequest'] _BATCHREADBLOBSRESPONSE = DESCRIPTOR.message_types_by_name['BatchReadBlobsResponse'] _BATCHREADBLOBSRESPONSE_RESPONSE = _BATCHREADBLOBSRESPONSE.nested_types_by_name['Response'] _GETTREEREQUEST = DESCRIPTOR.message_types_by_name['GetTreeRequest'] _GETTREERESPONSE = DESCRIPTOR.message_types_by_name['GetTreeResponse'] _GETCAPABILITIESREQUEST = DESCRIPTOR.message_types_by_name['GetCapabilitiesRequest'] _SERVERCAPABILITIES = DESCRIPTOR.message_types_by_name['ServerCapabilities'] _ACTIONCACHEUPDATECAPABILITIES = DESCRIPTOR.message_types_by_name['ActionCacheUpdateCapabilities'] _PRIORITYCAPABILITIES = DESCRIPTOR.message_types_by_name['PriorityCapabilities'] _PRIORITYCAPABILITIES_PRIORITYRANGE = _PRIORITYCAPABILITIES.nested_types_by_name['PriorityRange'] _CACHECAPABILITIES = DESCRIPTOR.message_types_by_name['CacheCapabilities'] _EXECUTIONCAPABILITIES = DESCRIPTOR.message_types_by_name['ExecutionCapabilities'] _TOOLDETAILS = DESCRIPTOR.message_types_by_name['ToolDetails'] _REQUESTMETADATA = DESCRIPTOR.message_types_by_name['RequestMetadata'] _EXECUTEOPERATIONMETADATA_STAGE = _EXECUTEOPERATIONMETADATA.enum_types_by_name['Stage'] _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY = _CACHECAPABILITIES.enum_types_by_name['SymlinkAbsolutePathStrategy'] Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), { 'DESCRIPTOR' : _ACTION, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Action) }) _sym_db.RegisterMessage(Action) Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), { 'EnvironmentVariable' : _reflection.GeneratedProtocolMessageType('EnvironmentVariable', (_message.Message,), { 'DESCRIPTOR' : _COMMAND_ENVIRONMENTVARIABLE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command.EnvironmentVariable) }) , 'DESCRIPTOR' : _COMMAND, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command) }) _sym_db.RegisterMessage(Command) _sym_db.RegisterMessage(Command.EnvironmentVariable) Platform = _reflection.GeneratedProtocolMessageType('Platform', (_message.Message,), { 'Property' : _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), { 'DESCRIPTOR' : _PLATFORM_PROPERTY, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform.Property) }) , 'DESCRIPTOR' : _PLATFORM, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform) }) _sym_db.RegisterMessage(Platform) _sym_db.RegisterMessage(Platform.Property) Directory = _reflection.GeneratedProtocolMessageType('Directory', (_message.Message,), { 'DESCRIPTOR' : _DIRECTORY, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Directory) }) _sym_db.RegisterMessage(Directory) FileNode = _reflection.GeneratedProtocolMessageType('FileNode', (_message.Message,), { 'DESCRIPTOR' : _FILENODE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FileNode) }) _sym_db.RegisterMessage(FileNode) DirectoryNode = _reflection.GeneratedProtocolMessageType('DirectoryNode', (_message.Message,), { 'DESCRIPTOR' : _DIRECTORYNODE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.DirectoryNode) }) _sym_db.RegisterMessage(DirectoryNode) SymlinkNode = _reflection.GeneratedProtocolMessageType('SymlinkNode', (_message.Message,), { 'DESCRIPTOR' : _SYMLINKNODE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.SymlinkNode) }) _sym_db.RegisterMessage(SymlinkNode) Digest = _reflection.GeneratedProtocolMessageType('Digest', (_message.Message,), { 'DESCRIPTOR' : _DIGEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Digest) }) _sym_db.RegisterMessage(Digest) ExecutedActionMetadata = _reflection.GeneratedProtocolMessageType('ExecutedActionMetadata', (_message.Message,), { 'DESCRIPTOR' : _EXECUTEDACTIONMETADATA, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutedActionMetadata) }) _sym_db.RegisterMessage(ExecutedActionMetadata) ActionResult = _reflection.GeneratedProtocolMessageType('ActionResult', (_message.Message,), { 'DESCRIPTOR' : _ACTIONRESULT, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionResult) }) _sym_db.RegisterMessage(ActionResult) OutputFile = _reflection.GeneratedProtocolMessageType('OutputFile', (_message.Message,), { 'DESCRIPTOR' : _OUTPUTFILE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputFile) }) _sym_db.RegisterMessage(OutputFile) Tree = _reflection.GeneratedProtocolMessageType('Tree', (_message.Message,), { 'DESCRIPTOR' : _TREE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Tree) }) _sym_db.RegisterMessage(Tree) OutputDirectory = _reflection.GeneratedProtocolMessageType('OutputDirectory', (_message.Message,), { 'DESCRIPTOR' : _OUTPUTDIRECTORY, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputDirectory) }) _sym_db.RegisterMessage(OutputDirectory) ExecutionPolicy = _reflection.GeneratedProtocolMessageType('ExecutionPolicy', (_message.Message,), { 'DESCRIPTOR' : _EXECUTIONPOLICY, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionPolicy) }) _sym_db.RegisterMessage(ExecutionPolicy) ResultsCachePolicy = _reflection.GeneratedProtocolMessageType('ResultsCachePolicy', (_message.Message,), { 'DESCRIPTOR' : _RESULTSCACHEPOLICY, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ResultsCachePolicy) }) _sym_db.RegisterMessage(ResultsCachePolicy) ExecuteRequest = _reflection.GeneratedProtocolMessageType('ExecuteRequest', (_message.Message,), { 'DESCRIPTOR' : _EXECUTEREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteRequest) }) _sym_db.RegisterMessage(ExecuteRequest) LogFile = _reflection.GeneratedProtocolMessageType('LogFile', (_message.Message,), { 'DESCRIPTOR' : _LOGFILE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.LogFile) }) _sym_db.RegisterMessage(LogFile) ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), { 'ServerLogsEntry' : _reflection.GeneratedProtocolMessageType('ServerLogsEntry', (_message.Message,), { 'DESCRIPTOR' : _EXECUTERESPONSE_SERVERLOGSENTRY, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry) }) , 'DESCRIPTOR' : _EXECUTERESPONSE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse) }) _sym_db.RegisterMessage(ExecuteResponse) _sym_db.RegisterMessage(ExecuteResponse.ServerLogsEntry) ExecuteOperationMetadata = _reflection.GeneratedProtocolMessageType('ExecuteOperationMetadata', (_message.Message,), { 'DESCRIPTOR' : _EXECUTEOPERATIONMETADATA, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteOperationMetadata) }) _sym_db.RegisterMessage(ExecuteOperationMetadata) WaitExecutionRequest = _reflection.GeneratedProtocolMessageType('WaitExecutionRequest', (_message.Message,), { 'DESCRIPTOR' : _WAITEXECUTIONREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.WaitExecutionRequest) }) _sym_db.RegisterMessage(WaitExecutionRequest) GetActionResultRequest = _reflection.GeneratedProtocolMessageType('GetActionResultRequest', (_message.Message,), { 'DESCRIPTOR' : _GETACTIONRESULTREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetActionResultRequest) }) _sym_db.RegisterMessage(GetActionResultRequest) UpdateActionResultRequest = _reflection.GeneratedProtocolMessageType('UpdateActionResultRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATEACTIONRESULTREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.UpdateActionResultRequest) }) _sym_db.RegisterMessage(UpdateActionResultRequest) FindMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('FindMissingBlobsRequest', (_message.Message,), { 'DESCRIPTOR' : _FINDMISSINGBLOBSREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsRequest) }) _sym_db.RegisterMessage(FindMissingBlobsRequest) FindMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('FindMissingBlobsResponse', (_message.Message,), { 'DESCRIPTOR' : _FINDMISSINGBLOBSRESPONSE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsResponse) }) _sym_db.RegisterMessage(FindMissingBlobsResponse) BatchUpdateBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsRequest', (_message.Message,), { 'Request' : _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), { 'DESCRIPTOR' : _BATCHUPDATEBLOBSREQUEST_REQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request) }) , 'DESCRIPTOR' : _BATCHUPDATEBLOBSREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsRequest) }) _sym_db.RegisterMessage(BatchUpdateBlobsRequest) _sym_db.RegisterMessage(BatchUpdateBlobsRequest.Request) BatchUpdateBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsResponse', (_message.Message,), { 'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), { 'DESCRIPTOR' : _BATCHUPDATEBLOBSRESPONSE_RESPONSE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response) }) , 'DESCRIPTOR' : _BATCHUPDATEBLOBSRESPONSE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse) }) _sym_db.RegisterMessage(BatchUpdateBlobsResponse) _sym_db.RegisterMessage(BatchUpdateBlobsResponse.Response) BatchReadBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchReadBlobsRequest', (_message.Message,), { 'DESCRIPTOR' : _BATCHREADBLOBSREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsRequest) }) _sym_db.RegisterMessage(BatchReadBlobsRequest) BatchReadBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchReadBlobsResponse', (_message.Message,), { 'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), { 'DESCRIPTOR' : _BATCHREADBLOBSRESPONSE_RESPONSE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response) }) , 'DESCRIPTOR' : _BATCHREADBLOBSRESPONSE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsResponse) }) _sym_db.RegisterMessage(BatchReadBlobsResponse) _sym_db.RegisterMessage(BatchReadBlobsResponse.Response) GetTreeRequest = _reflection.GeneratedProtocolMessageType('GetTreeRequest', (_message.Message,), { 'DESCRIPTOR' : _GETTREEREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeRequest) }) _sym_db.RegisterMessage(GetTreeRequest) GetTreeResponse = _reflection.GeneratedProtocolMessageType('GetTreeResponse', (_message.Message,), { 'DESCRIPTOR' : _GETTREERESPONSE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeResponse) }) _sym_db.RegisterMessage(GetTreeResponse) GetCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('GetCapabilitiesRequest', (_message.Message,), { 'DESCRIPTOR' : _GETCAPABILITIESREQUEST, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetCapabilitiesRequest) }) _sym_db.RegisterMessage(GetCapabilitiesRequest) ServerCapabilities = _reflection.GeneratedProtocolMessageType('ServerCapabilities', (_message.Message,), { 'DESCRIPTOR' : _SERVERCAPABILITIES, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ServerCapabilities) }) _sym_db.RegisterMessage(ServerCapabilities) ActionCacheUpdateCapabilities = _reflection.GeneratedProtocolMessageType('ActionCacheUpdateCapabilities', (_message.Message,), { 'DESCRIPTOR' : _ACTIONCACHEUPDATECAPABILITIES, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities) }) _sym_db.RegisterMessage(ActionCacheUpdateCapabilities) PriorityCapabilities = _reflection.GeneratedProtocolMessageType('PriorityCapabilities', (_message.Message,), { 'PriorityRange' : _reflection.GeneratedProtocolMessageType('PriorityRange', (_message.Message,), { 'DESCRIPTOR' : _PRIORITYCAPABILITIES_PRIORITYRANGE, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange) }) , 'DESCRIPTOR' : _PRIORITYCAPABILITIES, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities) }) _sym_db.RegisterMessage(PriorityCapabilities) _sym_db.RegisterMessage(PriorityCapabilities.PriorityRange) CacheCapabilities = _reflection.GeneratedProtocolMessageType('CacheCapabilities', (_message.Message,), { 'DESCRIPTOR' : _CACHECAPABILITIES, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.CacheCapabilities) }) _sym_db.RegisterMessage(CacheCapabilities) ExecutionCapabilities = _reflection.GeneratedProtocolMessageType('ExecutionCapabilities', (_message.Message,), { 'DESCRIPTOR' : _EXECUTIONCAPABILITIES, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionCapabilities) }) _sym_db.RegisterMessage(ExecutionCapabilities) ToolDetails = _reflection.GeneratedProtocolMessageType('ToolDetails', (_message.Message,), { 'DESCRIPTOR' : _TOOLDETAILS, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ToolDetails) }) _sym_db.RegisterMessage(ToolDetails) RequestMetadata = _reflection.GeneratedProtocolMessageType('RequestMetadata', (_message.Message,), { 'DESCRIPTOR' : _REQUESTMETADATA, '__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2' # @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.RequestMetadata) }) _sym_db.RegisterMessage(RequestMetadata) _EXECUTION = DESCRIPTOR.services_by_name['Execution'] _ACTIONCACHE = DESCRIPTOR.services_by_name['ActionCache'] _CONTENTADDRESSABLESTORAGE = DESCRIPTOR.services_by_name['ContentAddressableStorage'] _CAPABILITIES = DESCRIPTOR.services_by_name['Capabilities'] if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001Z\017remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2' _EXECUTERESPONSE_SERVERLOGSENTRY._options = None _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_options = b'8\001' _EXECUTION.methods_by_name['Execute']._options = None _EXECUTION.methods_by_name['Execute']._serialized_options = b'\202\323\344\223\002+\"&/v2/{instance_name=**}/actions:execute:\001*' _EXECUTION.methods_by_name['WaitExecution']._options = None _EXECUTION.methods_by_name['WaitExecution']._serialized_options = b'\202\323\344\223\002+\"&/v2/{name=operations/**}:waitExecution:\001*' _ACTIONCACHE.methods_by_name['GetActionResult']._options = None _ACTIONCACHE.methods_by_name['GetActionResult']._serialized_options = b'\202\323\344\223\002V\022T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}' _ACTIONCACHE.methods_by_name['UpdateActionResult']._options = None _ACTIONCACHE.methods_by_name['UpdateActionResult']._serialized_options = b'\202\323\344\223\002e\032T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result' _CONTENTADDRESSABLESTORAGE.methods_by_name['FindMissingBlobs']._options = None _CONTENTADDRESSABLESTORAGE.methods_by_name['FindMissingBlobs']._serialized_options = b'\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:findMissing:\001*' _CONTENTADDRESSABLESTORAGE.methods_by_name['BatchUpdateBlobs']._options = None _CONTENTADDRESSABLESTORAGE.methods_by_name['BatchUpdateBlobs']._serialized_options = b'\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:batchUpdate:\001*' _CONTENTADDRESSABLESTORAGE.methods_by_name['BatchReadBlobs']._options = None _CONTENTADDRESSABLESTORAGE.methods_by_name['BatchReadBlobs']._serialized_options = b'\202\323\344\223\002+\"&/v2/{instance_name=**}/blobs:batchRead:\001*' _CONTENTADDRESSABLESTORAGE.methods_by_name['GetTree']._options = None _CONTENTADDRESSABLESTORAGE.methods_by_name['GetTree']._serialized_options = b'\202\323\344\223\002R\022P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree' _CAPABILITIES.methods_by_name['GetCapabilities']._options = None _CAPABILITIES.methods_by_name['GetCapabilities']._serialized_options = b'\202\323\344\223\002%\022#/v2/{instance_name=**}/capabilities' _DIGESTFUNCTION._serialized_start=7213 _DIGESTFUNCTION._serialized_end=7273 _ACTION._serialized_start=282 _ACTION._serialized_end=495 _COMMAND._serialized_start=498 _COMMAND._serialized_end=809 _COMMAND_ENVIRONMENTVARIABLE._serialized_start=759 _COMMAND_ENVIRONMENTVARIABLE._serialized_end=809 _PLATFORM._serialized_start=811 _PLATFORM._serialized_end=934 _PLATFORM_PROPERTY._serialized_start=895 _PLATFORM_PROPERTY._serialized_end=934 _DIRECTORY._serialized_start=937 _DIRECTORY._serialized_end=1139 _FILENODE._serialized_start=1141 _FILENODE._serialized_end=1251 _DIRECTORYNODE._serialized_start=1253 _DIRECTORYNODE._serialized_end=1339 _SYMLINKNODE._serialized_start=1341 _SYMLINKNODE._serialized_end=1384 _DIGEST._serialized_start=1386 _DIGEST._serialized_end=1428 _EXECUTEDACTIONMETADATA._serialized_start=1431 _EXECUTEDACTIONMETADATA._serialized_end=2051 _ACTIONRESULT._serialized_start=2054 _ACTIONRESULT._serialized_end=2491 _OUTPUTFILE._serialized_start=2493 _OUTPUTFILE._serialized_end=2605 _TREE._serialized_start=2607 _TREE._serialized_end=2733 _OUTPUTDIRECTORY._serialized_start=2735 _OUTPUTDIRECTORY._serialized_end=2834 _EXECUTIONPOLICY._serialized_start=2836 _EXECUTIONPOLICY._serialized_end=2871 _RESULTSCACHEPOLICY._serialized_start=2873 _RESULTSCACHEPOLICY._serialized_end=2911 _EXECUTEREQUEST._serialized_start=2914 _EXECUTEREQUEST._serialized_end=3221 _LOGFILE._serialized_start=3223 _LOGFILE._serialized_end=3313 _EXECUTERESPONSE._serialized_start=3316 _EXECUTERESPONSE._serialized_end=3635 _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_start=3544 _EXECUTERESPONSE_SERVERLOGSENTRY._serialized_end=3635 _EXECUTEOPERATIONMETADATA._serialized_start=3638 _EXECUTEOPERATIONMETADATA._serialized_end=3945 _EXECUTEOPERATIONMETADATA_STAGE._serialized_start=3866 _EXECUTEOPERATIONMETADATA_STAGE._serialized_end=3945 _WAITEXECUTIONREQUEST._serialized_start=3947 _WAITEXECUTIONREQUEST._serialized_end=3983 _GETACTIONRESULTREQUEST._serialized_start=3985 _GETACTIONRESULTREQUEST._serialized_end=4096 _UPDATEACTIONRESULTREQUEST._serialized_start=4099 _UPDATEACTIONRESULTREQUEST._serialized_end=4366 _FINDMISSINGBLOBSREQUEST._serialized_start=4368 _FINDMISSINGBLOBSREQUEST._serialized_end=4479 _FINDMISSINGBLOBSRESPONSE._serialized_start=4481 _FINDMISSINGBLOBSRESPONSE._serialized_end=4578 _BATCHUPDATEBLOBSREQUEST._serialized_start=4581 _BATCHUPDATEBLOBSREQUEST._serialized_end=4795 _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_start=4715 _BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_end=4795 _BATCHUPDATEBLOBSRESPONSE._serialized_start=4798 _BATCHUPDATEBLOBSRESPONSE._serialized_end=5016 _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_start=4913 _BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_end=5016 _BATCHREADBLOBSREQUEST._serialized_start=5018 _BATCHREADBLOBSREQUEST._serialized_end=5122 _BATCHREADBLOBSRESPONSE._serialized_start=5125 _BATCHREADBLOBSRESPONSE._serialized_end=5353 _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_start=5236 _BATCHREADBLOBSRESPONSE_RESPONSE._serialized_end=5353 _GETTREEREQUEST._serialized_start=5356 _GETTREEREQUEST._serialized_end=5496 _GETTREERESPONSE._serialized_start=5498 _GETTREERESPONSE._serialized_end=5605 _GETCAPABILITIESREQUEST._serialized_start=5607 _GETCAPABILITIESREQUEST._serialized_end=5654 _SERVERCAPABILITIES._serialized_start=5657 _SERVERCAPABILITIES._serialized_end=6012 _ACTIONCACHEUPDATECAPABILITIES._serialized_start=6014 _ACTIONCACHEUPDATECAPABILITIES._serialized_end=6069 _PRIORITYCAPABILITIES._serialized_start=6072 _PRIORITYCAPABILITIES._serialized_end=6244 _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_start=6185 _PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_end=6244 _CACHECAPABILITIES._serialized_start=6247 _CACHECAPABILITIES._serialized_end=6767 _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY._serialized_start=6696 _CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY._serialized_end=6767 _EXECUTIONCAPABILITIES._serialized_start=6770 _EXECUTIONCAPABILITIES._serialized_end=6985 _TOOLDETAILS._serialized_start=6987 _TOOLDETAILS._serialized_end=7041 _REQUESTMETADATA._serialized_start=7044 _REQUESTMETADATA._serialized_end=7211 _EXECUTION._serialized_start=7276 _EXECUTION._serialized_end=7589 _ACTIONCACHE._serialized_start=7592 _ACTIONCACHE._serialized_end=8062 _CONTENTADDRESSABLESTORAGE._serialized_start=8065 _CONTENTADDRESSABLESTORAGE._serialized_end=8860 _CAPABILITIES._serialized_start=8863 _CAPABILITIES._serialized_end=9052 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py000066400000000000000000001325531437515270000336060ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2 from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 class ExecutionStub(object): """The Remote Execution API is used to execute an [Action][build.bazel.remote.execution.v2.Action] on the remote workers. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Execute = channel.unary_stream( '/build.bazel.remote.execution.v2.Execution/Execute', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.WaitExecution = channel.unary_stream( '/build.bazel.remote.execution.v2.Execution/WaitExecution', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) class ExecutionServicer(object): """The Remote Execution API is used to execute an [Action][build.bazel.remote.execution.v2.Action] on the remote workers. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def Execute(self, request, context): """Execute an action remotely. In order to execute an action, the client must first upload all of the inputs, the [Command][build.bazel.remote.execution.v2.Command] to run, and the [Action][build.bazel.remote.execution.v2.Action] into the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]. It then calls `Execute` with an `action_digest` referring to them. The server will run the action and eventually return the result. The input `Action`'s fields MUST meet the various canonicalization requirements specified in the documentation for their types so that it has the same digest as other logically equivalent `Action`s. The server MAY enforce the requirements and return errors if a non-canonical input is received. It MAY also proceed without verifying some or all of the requirements, such as for performance reasons. If the server does not verify the requirement, then it will treat the `Action` as distinct from another logically equivalent action if they hash differently. Returns a stream of [google.longrunning.Operation][google.longrunning.Operation] messages describing the resulting execution, with eventual `response` [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The `metadata` on the operation is of type [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata]. If the client remains connected after the first response is returned after the server, then updates are streamed as if the client had called [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution] until the execution completes or the request reaches an error. The operation can also be queried using [Operations API][google.longrunning.Operations.GetOperation]. The server NEED NOT implement other methods or functionality of the Operations API. Errors discovered during creation of the `Operation` will be reported as gRPC Status errors, while errors that occurred while running the action will be reported in the `status` field of the `ExecuteResponse`. The server MUST NOT set the `error` field of the `Operation` proto. The possible errors include: * `INVALID_ARGUMENT`: One or more arguments are invalid. * `FAILED_PRECONDITION`: One or more errors occurred in setting up the action requested, such as a missing input or command or no worker being available. The client may be able to fix the errors and retry. * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run the action. * `UNAVAILABLE`: Due to a transient condition, such as all workers being occupied (and the server does not support a queue), the action could not be started. The client should retry. * `INTERNAL`: An internal error occurred in the execution engine or the worker. * `DEADLINE_EXCEEDED`: The execution timed out. In the case of a missing input or command, the server SHOULD additionally send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail where, for each requested blob not present in the CAS, there is a `Violation` with a `type` of `MISSING` and a `subject` of `"blobs/{hash}/{size}"` indicating the digest of the missing blob. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def WaitExecution(self, request, context): """Wait for an execution operation to complete. When the client initially makes the request, the server immediately responds with the current status of the execution. The server will leave the request stream open until the operation completes, and then respond with the completed operation. The server MAY choose to stream additional updates as execution progresses, such as to provide an update as to the state of the execution. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ExecutionServicer_to_server(servicer, server): rpc_method_handlers = { 'Execute': grpc.unary_stream_rpc_method_handler( servicer.Execute, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), 'WaitExecution': grpc.unary_stream_rpc_method_handler( servicer.WaitExecution, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'build.bazel.remote.execution.v2.Execution', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Execution(object): """The Remote Execution API is used to execute an [Action][build.bazel.remote.execution.v2.Action] on the remote workers. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ @staticmethod def Execute(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.Execution/Execute', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString, google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def WaitExecution(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.Execution/WaitExecution', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString, google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) class ActionCacheStub(object): """The action cache API is used to query whether a given action has already been performed and, if so, retrieve its result. Unlike the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage], which addresses blobs by their own content, the action cache addresses the [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a digest of the encoded [Action][build.bazel.remote.execution.v2.Action] which produced them. The lifetime of entries in the action cache is implementation-specific, but the server SHOULD assume that more recently used entries are more likely to be used again. Additionally, action cache implementations SHOULD ensure that any blobs referenced in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] are still valid when returning a result. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetActionResult = channel.unary_unary( '/build.bazel.remote.execution.v2.ActionCache/GetActionResult', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString, ) self.UpdateActionResult = channel.unary_unary( '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString, ) class ActionCacheServicer(object): """The action cache API is used to query whether a given action has already been performed and, if so, retrieve its result. Unlike the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage], which addresses blobs by their own content, the action cache addresses the [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a digest of the encoded [Action][build.bazel.remote.execution.v2.Action] which produced them. The lifetime of entries in the action cache is implementation-specific, but the server SHOULD assume that more recently used entries are more likely to be used again. Additionally, action cache implementations SHOULD ensure that any blobs referenced in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] are still valid when returning a result. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def GetActionResult(self, request, context): """Retrieve a cached execution result. Errors: * `NOT_FOUND`: The requested `ActionResult` is not in the cache. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateActionResult(self, request, context): """Upload a new execution result. This method is intended for servers which implement the distributed cache independently of the [Execution][build.bazel.remote.execution.v2.Execution] API. As a result, it is OPTIONAL for servers to implement. In order to allow the server to perform access control based on the type of action, and to assist with client debugging, the client MUST first upload the [Action][build.bazel.remote.execution.v2.Execution] that produced the result, along with its [Command][build.bazel.remote.execution.v2.Command], into the `ContentAddressableStorage`. Errors: * `NOT_IMPLEMENTED`: This method is not supported by the server. * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the entry to the cache. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ActionCacheServicer_to_server(servicer, server): rpc_method_handlers = { 'GetActionResult': grpc.unary_unary_rpc_method_handler( servicer.GetActionResult, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString, ), 'UpdateActionResult': grpc.unary_unary_rpc_method_handler( servicer.UpdateActionResult, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'build.bazel.remote.execution.v2.ActionCache', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ActionCache(object): """The action cache API is used to query whether a given action has already been performed and, if so, retrieve its result. Unlike the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage], which addresses blobs by their own content, the action cache addresses the [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a digest of the encoded [Action][build.bazel.remote.execution.v2.Action] which produced them. The lifetime of entries in the action cache is implementation-specific, but the server SHOULD assume that more recently used entries are more likely to be used again. Additionally, action cache implementations SHOULD ensure that any blobs referenced in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] are still valid when returning a result. As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ @staticmethod def GetActionResult(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ActionCache/GetActionResult', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString, build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateActionResult(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString, build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) class ContentAddressableStorageStub(object): """The CAS (content-addressable storage) is used to store the inputs to and outputs from the execution service. Each piece of content is addressed by the digest of its binary data. Most of the binary data stored in the CAS is opaque to the execution engine, and is only used as a communication medium. In order to build an [Action][build.bazel.remote.execution.v2.Action], however, the client will need to also upload the [Command][build.bazel.remote.execution.v2.Command] and input root [Directory][build.bazel.remote.execution.v2.Directory] for the Action. The Command and Directory messages must be marshalled to wire format and then uploaded under the hash as with any other piece of content. In practice, the input root directory is likely to refer to other Directories in its hierarchy, which must also each be uploaded on their own. For small file uploads the client should group them together and call [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs] on chunks of no more than 10 MiB. For large uploads, the client must use the [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`, where `instance_name` is as described in the next paragraph, `uuid` is a version 4 UUID generated by the client, and `hash` and `size` are the [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The `uuid` is used only to avoid collisions when multiple clients try to upload the same file (or the same client tries to upload the file multiple times at once on different threads), so the client MAY reuse the `uuid` for uploading different blobs. The `resource_name` may optionally have a trailing filename (or other metadata) for a client to use if it is storing URLs, as in `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything after the `size` is ignored. A single server MAY support multiple instances of the execution system, each with their own workers, storage, cache, etc. The exact relationship between instances is up to the server. If the server does, then the `instance_name` is an identifier, possibly containing multiple path segments, used to distinguish between the various instances on the server, in a manner defined by the server. For servers which do not support multiple instances, then the `instance_name` is the empty path and the leading slash is omitted, so that the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`. When attempting an upload, if another client has already completed the upload (which may occur in the middle of a single upload if another client uploads the same blob concurrently), the request will terminate immediately with a response whose `committed_size` is the full size of the uploaded file (regardless of how much data was transmitted by the client). If the client completes the upload but the [Digest][build.bazel.remote.execution.v2.Digest] does not match, an `INVALID_ARGUMENT` error will be returned. In either case, the client should not attempt to retry the upload. For downloading blobs, the client must use the [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where `instance_name` is the instance name (see above), and `hash` and `size` are the [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The lifetime of entries in the CAS is implementation specific, but it SHOULD be long enough to allow for newly-added and recently looked-up entries to be used in subsequent calls (e.g. to [Execute][build.bazel.remote.execution.v2.Execution.Execute]). As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.FindMissingBlobs = channel.unary_unary( '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString, ) self.BatchUpdateBlobs = channel.unary_unary( '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString, ) self.BatchReadBlobs = channel.unary_unary( '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString, ) self.GetTree = channel.unary_stream( '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString, ) class ContentAddressableStorageServicer(object): """The CAS (content-addressable storage) is used to store the inputs to and outputs from the execution service. Each piece of content is addressed by the digest of its binary data. Most of the binary data stored in the CAS is opaque to the execution engine, and is only used as a communication medium. In order to build an [Action][build.bazel.remote.execution.v2.Action], however, the client will need to also upload the [Command][build.bazel.remote.execution.v2.Command] and input root [Directory][build.bazel.remote.execution.v2.Directory] for the Action. The Command and Directory messages must be marshalled to wire format and then uploaded under the hash as with any other piece of content. In practice, the input root directory is likely to refer to other Directories in its hierarchy, which must also each be uploaded on their own. For small file uploads the client should group them together and call [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs] on chunks of no more than 10 MiB. For large uploads, the client must use the [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`, where `instance_name` is as described in the next paragraph, `uuid` is a version 4 UUID generated by the client, and `hash` and `size` are the [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The `uuid` is used only to avoid collisions when multiple clients try to upload the same file (or the same client tries to upload the file multiple times at once on different threads), so the client MAY reuse the `uuid` for uploading different blobs. The `resource_name` may optionally have a trailing filename (or other metadata) for a client to use if it is storing URLs, as in `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything after the `size` is ignored. A single server MAY support multiple instances of the execution system, each with their own workers, storage, cache, etc. The exact relationship between instances is up to the server. If the server does, then the `instance_name` is an identifier, possibly containing multiple path segments, used to distinguish between the various instances on the server, in a manner defined by the server. For servers which do not support multiple instances, then the `instance_name` is the empty path and the leading slash is omitted, so that the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`. When attempting an upload, if another client has already completed the upload (which may occur in the middle of a single upload if another client uploads the same blob concurrently), the request will terminate immediately with a response whose `committed_size` is the full size of the uploaded file (regardless of how much data was transmitted by the client). If the client completes the upload but the [Digest][build.bazel.remote.execution.v2.Digest] does not match, an `INVALID_ARGUMENT` error will be returned. In either case, the client should not attempt to retry the upload. For downloading blobs, the client must use the [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where `instance_name` is the instance name (see above), and `hash` and `size` are the [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The lifetime of entries in the CAS is implementation specific, but it SHOULD be long enough to allow for newly-added and recently looked-up entries to be used in subsequent calls (e.g. to [Execute][build.bazel.remote.execution.v2.Execution.Execute]). As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ def FindMissingBlobs(self, request, context): """Determine if blobs are present in the CAS. Clients can use this API before uploading blobs to determine which ones are already present in the CAS and do not need to be uploaded again. There are no method-specific errors. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BatchUpdateBlobs(self, request, context): """Upload many blobs at once. The server may enforce a limit of the combined total size of blobs to be uploaded using this API. This limit may be obtained using the [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. Requests exceeding the limit should either be split into smaller chunks or uploaded using the [ByteStream API][google.bytestream.ByteStream], as appropriate. This request is equivalent to calling a Bytestream `Write` request on each individual blob, in parallel. The requests may succeed or fail independently. Errors: * `INVALID_ARGUMENT`: The client attempted to upload more than the server supported limit. Individual requests may return the following errors, additionally: * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob. * `INVALID_ARGUMENT`: The [Digest][build.bazel.remote.execution.v2.Digest] does not match the provided data. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def BatchReadBlobs(self, request, context): """Download many blobs at once. The server may enforce a limit of the combined total size of blobs to be downloaded using this API. This limit may be obtained using the [Capabilities][build.bazel.remote.execution.v2.Capabilities] API. Requests exceeding the limit should either be split into smaller chunks or downloaded using the [ByteStream API][google.bytestream.ByteStream], as appropriate. This request is equivalent to calling a Bytestream `Read` request on each individual blob, in parallel. The requests may succeed or fail independently. Errors: * `INVALID_ARGUMENT`: The client attempted to read more than the server supported limit. Every error on individual read will be returned in the corresponding digest status. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetTree(self, request, context): """Fetch the entire directory tree rooted at a node. This request must be targeted at a [Directory][build.bazel.remote.execution.v2.Directory] stored in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage] (CAS). The server will enumerate the `Directory` tree recursively and return every node descended from the root. The GetTreeRequest.page_token parameter can be used to skip ahead in the stream (e.g. when retrying a partially completed and aborted request), by setting it to a value taken from GetTreeResponse.next_page_token of the last successfully processed GetTreeResponse). The exact traversal order is unspecified and, unless retrieving subsequent pages from an earlier request, is not guaranteed to be stable across multiple invocations of `GetTree`. If part of the tree is missing from the CAS, the server will return the portion present and omit the rest. * `NOT_FOUND`: The requested tree root is not present in the CAS. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ContentAddressableStorageServicer_to_server(servicer, server): rpc_method_handlers = { 'FindMissingBlobs': grpc.unary_unary_rpc_method_handler( servicer.FindMissingBlobs, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.SerializeToString, ), 'BatchUpdateBlobs': grpc.unary_unary_rpc_method_handler( servicer.BatchUpdateBlobs, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.SerializeToString, ), 'BatchReadBlobs': grpc.unary_unary_rpc_method_handler( servicer.BatchReadBlobs, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.SerializeToString, ), 'GetTree': grpc.unary_stream_rpc_method_handler( servicer.GetTree, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'build.bazel.remote.execution.v2.ContentAddressableStorage', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ContentAddressableStorage(object): """The CAS (content-addressable storage) is used to store the inputs to and outputs from the execution service. Each piece of content is addressed by the digest of its binary data. Most of the binary data stored in the CAS is opaque to the execution engine, and is only used as a communication medium. In order to build an [Action][build.bazel.remote.execution.v2.Action], however, the client will need to also upload the [Command][build.bazel.remote.execution.v2.Command] and input root [Directory][build.bazel.remote.execution.v2.Directory] for the Action. The Command and Directory messages must be marshalled to wire format and then uploaded under the hash as with any other piece of content. In practice, the input root directory is likely to refer to other Directories in its hierarchy, which must also each be uploaded on their own. For small file uploads the client should group them together and call [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs] on chunks of no more than 10 MiB. For large uploads, the client must use the [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`, where `instance_name` is as described in the next paragraph, `uuid` is a version 4 UUID generated by the client, and `hash` and `size` are the [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The `uuid` is used only to avoid collisions when multiple clients try to upload the same file (or the same client tries to upload the file multiple times at once on different threads), so the client MAY reuse the `uuid` for uploading different blobs. The `resource_name` may optionally have a trailing filename (or other metadata) for a client to use if it is storing URLs, as in `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything after the `size` is ignored. A single server MAY support multiple instances of the execution system, each with their own workers, storage, cache, etc. The exact relationship between instances is up to the server. If the server does, then the `instance_name` is an identifier, possibly containing multiple path segments, used to distinguish between the various instances on the server, in a manner defined by the server. For servers which do not support multiple instances, then the `instance_name` is the empty path and the leading slash is omitted, so that the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`. When attempting an upload, if another client has already completed the upload (which may occur in the middle of a single upload if another client uploads the same blob concurrently), the request will terminate immediately with a response whose `committed_size` is the full size of the uploaded file (regardless of how much data was transmitted by the client). If the client completes the upload but the [Digest][build.bazel.remote.execution.v2.Digest] does not match, an `INVALID_ARGUMENT` error will be returned. In either case, the client should not attempt to retry the upload. For downloading blobs, the client must use the [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where `instance_name` is the instance name (see above), and `hash` and `size` are the [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The lifetime of entries in the CAS is implementation specific, but it SHOULD be long enough to allow for newly-added and recently looked-up entries to be used in subsequent calls (e.g. to [Execute][build.bazel.remote.execution.v2.Execution.Execute]). As with other services in the Remote Execution API, any call may return an error with a [RetryInfo][google.rpc.RetryInfo] error detail providing information about when the client should retry the request; clients SHOULD respect the information provided. """ @staticmethod def FindMissingBlobs(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString, build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def BatchUpdateBlobs(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString, build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def BatchReadBlobs(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString, build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetTree(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString, build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) class CapabilitiesStub(object): """The Capabilities service may be used by remote execution clients to query various server properties, in order to self-configure or return meaningful error messages. The query may include a particular `instance_name`, in which case the values returned will pertain to that instance. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetCapabilities = channel.unary_unary( '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities', request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString, response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString, ) class CapabilitiesServicer(object): """The Capabilities service may be used by remote execution clients to query various server properties, in order to self-configure or return meaningful error messages. The query may include a particular `instance_name`, in which case the values returned will pertain to that instance. """ def GetCapabilities(self, request, context): """GetCapabilities returns the server capabilities configuration. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_CapabilitiesServicer_to_server(servicer, server): rpc_method_handlers = { 'GetCapabilities': grpc.unary_unary_rpc_method_handler( servicer.GetCapabilities, request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.FromString, response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'build.bazel.remote.execution.v2.Capabilities', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Capabilities(object): """The Capabilities service may be used by remote execution clients to query various server properties, in order to self-configure or return meaningful error messages. The query may include a particular `instance_name`, in which case the values returned will pertain to that instance. """ @staticmethod def GetCapabilities(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities', build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString, build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) buildstream-1.6.9/buildstream/_protos/build/bazel/semver/000077500000000000000000000000001437515270000235435ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/semver/__init__.py000066400000000000000000000000001437515270000256420ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/build/bazel/semver/semver.proto000066400000000000000000000013521437515270000261320ustar00rootroot00000000000000// Copyright 2018 The Bazel Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package build.bazel.semver; message SemVer { int32 major = 1; int32 minor = 2; int32 patch = 3; string prerelease = 4; } buildstream-1.6.9/buildstream/_protos/build/bazel/semver/semver_pb2.py000066400000000000000000000024521437515270000261640ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: build/bazel/semver/semver.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x62uild/bazel/semver/semver.proto\x12\x12\x62uild.bazel.semver\"I\n\x06SemVer\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x12\n\nprerelease\x18\x04 \x01(\tb\x06proto3') _SEMVER = DESCRIPTOR.message_types_by_name['SemVer'] SemVer = _reflection.GeneratedProtocolMessageType('SemVer', (_message.Message,), { 'DESCRIPTOR' : _SEMVER, '__module__' : 'build.bazel.semver.semver_pb2' # @@protoc_insertion_point(class_scope:build.bazel.semver.SemVer) }) _sym_db.RegisterMessage(SemVer) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _SEMVER._serialized_start=55 _SEMVER._serialized_end=128 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py000066400000000000000000000002371437515270000271760ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc buildstream-1.6.9/buildstream/_protos/buildstream/000077500000000000000000000000001437515270000223615ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/buildstream/__init__.py000066400000000000000000000000001437515270000244600ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/buildstream/v2/000077500000000000000000000000001437515270000227105ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/buildstream/v2/__init__.py000066400000000000000000000000001437515270000250070ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/buildstream/v2/buildstream.proto000066400000000000000000000065661437515270000263250ustar00rootroot00000000000000// Copyright 2018 Codethink Limited // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package buildstream.v2; import "build/bazel/remote/execution/v2/remote_execution.proto"; import "google/api/annotations.proto"; service ReferenceStorage { // Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory] // digest by name. // // Errors: // * `NOT_FOUND`: The requested reference is not in the cache. rpc GetReference(GetReferenceRequest) returns (GetReferenceResponse) { option (google.api.http) = { get: "/v2/{instance_name=**}/buildstream/refs/{key}" }; } // Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory] // digest. // // Errors: // * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the // entry to the cache. rpc UpdateReference(UpdateReferenceRequest) returns (UpdateReferenceResponse) { option (google.api.http) = { put: "/v2/{instance_name=**}/buildstream/refs/{key}" body: "digest" }; } rpc Status(StatusRequest) returns (StatusResponse) { option (google.api.http) = { put: "/v2/{instance_name=**}/buildstream/refs:status" }; } } message GetReferenceRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The name of the reference. string key = 2; } message GetReferenceResponse { // The digest of the CAS [Directory][build.bazel.remote.execution.v2.Directory]. build.bazel.remote.execution.v2.Digest digest = 1; } message UpdateReferenceRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; // The name of the reference. repeated string keys = 2; // The digest of the CAS [Directory][build.bazel.remote.execution.v2.Directory] // to store in the cache. build.bazel.remote.execution.v2.Digest digest = 3; } message UpdateReferenceResponse { } message StatusRequest { // The instance of the execution system to operate against. A server may // support multiple instances of the execution system (with their own workers, // storage, caches, etc.). The server MAY require use of this field to select // between them in an implementation-defined fashion, otherwise it can be // omitted. string instance_name = 1; } message StatusResponse { // Whether reference updates are allowed for the connected client. bool allow_updates = 1; } buildstream-1.6.9/buildstream/_protos/buildstream/v2/buildstream_pb2.py000066400000000000000000000136461437515270000263520ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: buildstream/v2/buildstream.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2 from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n buildstream/v2/buildstream.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"9\n\x13GetReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"O\n\x14GetReferenceResponse\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"v\n\x16UpdateReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x37\n\x06\x64igest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x19\n\x17UpdateReferenceResponse\"&\n\rStatusRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\'\n\x0eStatusResponse\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\x32\xca\x03\n\x10ReferenceStorage\x12\x90\x01\n\x0cGetReference\x12#.buildstream.v2.GetReferenceRequest\x1a$.buildstream.v2.GetReferenceResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v2/{instance_name=**}/buildstream/refs/{key}\x12\xa1\x01\n\x0fUpdateReference\x12&.buildstream.v2.UpdateReferenceRequest\x1a\'.buildstream.v2.UpdateReferenceResponse\"=\x82\xd3\xe4\x93\x02\x37\x1a-/v2/{instance_name=**}/buildstream/refs/{key}:\x06\x64igest\x12\x7f\n\x06Status\x12\x1d.buildstream.v2.StatusRequest\x1a\x1e.buildstream.v2.StatusResponse\"6\x82\xd3\xe4\x93\x02\x30\x1a./v2/{instance_name=**}/buildstream/refs:statusb\x06proto3') _GETREFERENCEREQUEST = DESCRIPTOR.message_types_by_name['GetReferenceRequest'] _GETREFERENCERESPONSE = DESCRIPTOR.message_types_by_name['GetReferenceResponse'] _UPDATEREFERENCEREQUEST = DESCRIPTOR.message_types_by_name['UpdateReferenceRequest'] _UPDATEREFERENCERESPONSE = DESCRIPTOR.message_types_by_name['UpdateReferenceResponse'] _STATUSREQUEST = DESCRIPTOR.message_types_by_name['StatusRequest'] _STATUSRESPONSE = DESCRIPTOR.message_types_by_name['StatusResponse'] GetReferenceRequest = _reflection.GeneratedProtocolMessageType('GetReferenceRequest', (_message.Message,), { 'DESCRIPTOR' : _GETREFERENCEREQUEST, '__module__' : 'buildstream.v2.buildstream_pb2' # @@protoc_insertion_point(class_scope:buildstream.v2.GetReferenceRequest) }) _sym_db.RegisterMessage(GetReferenceRequest) GetReferenceResponse = _reflection.GeneratedProtocolMessageType('GetReferenceResponse', (_message.Message,), { 'DESCRIPTOR' : _GETREFERENCERESPONSE, '__module__' : 'buildstream.v2.buildstream_pb2' # @@protoc_insertion_point(class_scope:buildstream.v2.GetReferenceResponse) }) _sym_db.RegisterMessage(GetReferenceResponse) UpdateReferenceRequest = _reflection.GeneratedProtocolMessageType('UpdateReferenceRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATEREFERENCEREQUEST, '__module__' : 'buildstream.v2.buildstream_pb2' # @@protoc_insertion_point(class_scope:buildstream.v2.UpdateReferenceRequest) }) _sym_db.RegisterMessage(UpdateReferenceRequest) UpdateReferenceResponse = _reflection.GeneratedProtocolMessageType('UpdateReferenceResponse', (_message.Message,), { 'DESCRIPTOR' : _UPDATEREFERENCERESPONSE, '__module__' : 'buildstream.v2.buildstream_pb2' # @@protoc_insertion_point(class_scope:buildstream.v2.UpdateReferenceResponse) }) _sym_db.RegisterMessage(UpdateReferenceResponse) StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), { 'DESCRIPTOR' : _STATUSREQUEST, '__module__' : 'buildstream.v2.buildstream_pb2' # @@protoc_insertion_point(class_scope:buildstream.v2.StatusRequest) }) _sym_db.RegisterMessage(StatusRequest) StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), { 'DESCRIPTOR' : _STATUSRESPONSE, '__module__' : 'buildstream.v2.buildstream_pb2' # @@protoc_insertion_point(class_scope:buildstream.v2.StatusResponse) }) _sym_db.RegisterMessage(StatusResponse) _REFERENCESTORAGE = DESCRIPTOR.services_by_name['ReferenceStorage'] if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None _REFERENCESTORAGE.methods_by_name['GetReference']._options = None _REFERENCESTORAGE.methods_by_name['GetReference']._serialized_options = b'\202\323\344\223\002/\022-/v2/{instance_name=**}/buildstream/refs/{key}' _REFERENCESTORAGE.methods_by_name['UpdateReference']._options = None _REFERENCESTORAGE.methods_by_name['UpdateReference']._serialized_options = b'\202\323\344\223\0027\032-/v2/{instance_name=**}/buildstream/refs/{key}:\006digest' _REFERENCESTORAGE.methods_by_name['Status']._options = None _REFERENCESTORAGE.methods_by_name['Status']._serialized_options = b'\202\323\344\223\0020\032./v2/{instance_name=**}/buildstream/refs:status' _GETREFERENCEREQUEST._serialized_start=138 _GETREFERENCEREQUEST._serialized_end=195 _GETREFERENCERESPONSE._serialized_start=197 _GETREFERENCERESPONSE._serialized_end=276 _UPDATEREFERENCEREQUEST._serialized_start=278 _UPDATEREFERENCEREQUEST._serialized_end=396 _UPDATEREFERENCERESPONSE._serialized_start=398 _UPDATEREFERENCERESPONSE._serialized_end=423 _STATUSREQUEST._serialized_start=425 _STATUSREQUEST._serialized_end=463 _STATUSRESPONSE._serialized_start=465 _STATUSRESPONSE._serialized_end=504 _REFERENCESTORAGE._serialized_start=507 _REFERENCESTORAGE._serialized_end=965 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py000066400000000000000000000145321437515270000273600ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from buildstream._protos.buildstream.v2 import buildstream_pb2 as buildstream_dot_v2_dot_buildstream__pb2 class ReferenceStorageStub(object): """Missing associated documentation comment in .proto file.""" def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.GetReference = channel.unary_unary( '/buildstream.v2.ReferenceStorage/GetReference', request_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString, response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString, ) self.UpdateReference = channel.unary_unary( '/buildstream.v2.ReferenceStorage/UpdateReference', request_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString, response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString, ) self.Status = channel.unary_unary( '/buildstream.v2.ReferenceStorage/Status', request_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString, response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString, ) class ReferenceStorageServicer(object): """Missing associated documentation comment in .proto file.""" def GetReference(self, request, context): """Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory] digest by name. Errors: * `NOT_FOUND`: The requested reference is not in the cache. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def UpdateReference(self, request, context): """Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory] digest. Errors: * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the entry to the cache. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Status(self, request, context): """Missing associated documentation comment in .proto file.""" context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ReferenceStorageServicer_to_server(servicer, server): rpc_method_handlers = { 'GetReference': grpc.unary_unary_rpc_method_handler( servicer.GetReference, request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.FromString, response_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.SerializeToString, ), 'UpdateReference': grpc.unary_unary_rpc_method_handler( servicer.UpdateReference, request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.FromString, response_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.SerializeToString, ), 'Status': grpc.unary_unary_rpc_method_handler( servicer.Status, request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.FromString, response_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'buildstream.v2.ReferenceStorage', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ReferenceStorage(object): """Missing associated documentation comment in .proto file.""" @staticmethod def GetReference(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/GetReference', buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString, buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def UpdateReference(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/UpdateReference', buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString, buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Status(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/Status', buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString, buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) buildstream-1.6.9/buildstream/_protos/google/000077500000000000000000000000001437515270000213225ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/__init__.py000066400000000000000000000000001437515270000234210ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/api/000077500000000000000000000000001437515270000220735ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/api/__init__.py000066400000000000000000000000001437515270000241720ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/api/annotations.proto000066400000000000000000000020331437515270000255130ustar00rootroot00000000000000// Copyright (c) 2015, Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.api; import "google/api/http.proto"; import "google/protobuf/descriptor.proto"; option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; option java_multiple_files = true; option java_outer_classname = "AnnotationsProto"; option java_package = "com.google.api"; option objc_class_prefix = "GAPI"; extend google.protobuf.MethodOptions { // See `HttpRule`. HttpRule http = 72295728; } buildstream-1.6.9/buildstream/_protos/google/api/annotations_pb2.py000066400000000000000000000030351437515270000255460ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/api/annotations.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from buildstream._protos.google.api import http_pb2 as google_dot_api_dot_http__pb2 from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/api/annotations.proto\x12\ngoogle.api\x1a\x15google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc\" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3') HTTP_FIELD_NUMBER = 72295728 http = DESCRIPTOR.extensions_by_name['http'] if _descriptor._USE_C_DESCRIPTORS == False: google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http) DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI' # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/google/api/annotations_pb2_grpc.py000066400000000000000000000002371437515270000265620ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc buildstream-1.6.9/buildstream/_protos/google/api/http.proto000066400000000000000000000271141437515270000241440ustar00rootroot00000000000000// Copyright 2018 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.api; option cc_enable_arenas = true; option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations"; option java_multiple_files = true; option java_outer_classname = "HttpProto"; option java_package = "com.google.api"; option objc_class_prefix = "GAPI"; // Defines the HTTP configuration for an API service. It contains a list of // [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method // to one or more HTTP REST API methods. message Http { // A list of HTTP configuration rules that apply to individual API methods. // // **NOTE:** All service configuration rules follow "last one wins" order. repeated HttpRule rules = 1; // When set to true, URL path parmeters will be fully URI-decoded except in // cases of single segment matches in reserved expansion, where "%2F" will be // left encoded. // // The default behavior is to not decode RFC 6570 reserved characters in multi // segment matches. bool fully_decode_reserved_expansion = 2; } // `HttpRule` defines the mapping of an RPC method to one or more HTTP // REST API methods. The mapping specifies how different portions of the RPC // request message are mapped to URL path, URL query parameters, and // HTTP request body. The mapping is typically specified as an // `google.api.http` annotation on the RPC method, // see "google/api/annotations.proto" for details. // // The mapping consists of a field specifying the path template and // method kind. The path template can refer to fields in the request // message, as in the example below which describes a REST GET // operation on a resource collection of messages: // // // service Messaging { // rpc GetMessage(GetMessageRequest) returns (Message) { // option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; // } // } // message GetMessageRequest { // message SubMessage { // string subfield = 1; // } // string message_id = 1; // mapped to the URL // SubMessage sub = 2; // `sub.subfield` is url-mapped // } // message Message { // string text = 1; // content of the resource // } // // The same http annotation can alternatively be expressed inside the // `GRPC API Configuration` YAML file. // // http: // rules: // - selector: .Messaging.GetMessage // get: /v1/messages/{message_id}/{sub.subfield} // // This definition enables an automatic, bidrectional mapping of HTTP // JSON to RPC. Example: // // HTTP | RPC // -----|----- // `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` // // In general, not only fields but also field paths can be referenced // from a path pattern. Fields mapped to the path pattern cannot be // repeated and must have a primitive (non-message) type. // // Any fields in the request message which are not bound by the path // pattern automatically become (optional) HTTP query // parameters. Assume the following definition of the request message: // // // service Messaging { // rpc GetMessage(GetMessageRequest) returns (Message) { // option (google.api.http).get = "/v1/messages/{message_id}"; // } // } // message GetMessageRequest { // message SubMessage { // string subfield = 1; // } // string message_id = 1; // mapped to the URL // int64 revision = 2; // becomes a parameter // SubMessage sub = 3; // `sub.subfield` becomes a parameter // } // // // This enables a HTTP JSON to RPC mapping as below: // // HTTP | RPC // -----|----- // `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` // // Note that fields which are mapped to HTTP parameters must have a // primitive type or a repeated primitive type. Message types are not // allowed. In the case of a repeated type, the parameter can be // repeated in the URL, as in `...?param=A¶m=B`. // // For HTTP method kinds which allow a request body, the `body` field // specifies the mapping. Consider a REST update method on the // message resource collection: // // // service Messaging { // rpc UpdateMessage(UpdateMessageRequest) returns (Message) { // option (google.api.http) = { // put: "/v1/messages/{message_id}" // body: "message" // }; // } // } // message UpdateMessageRequest { // string message_id = 1; // mapped to the URL // Message message = 2; // mapped to the body // } // // // The following HTTP JSON to RPC mapping is enabled, where the // representation of the JSON in the request body is determined by // protos JSON encoding: // // HTTP | RPC // -----|----- // `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` // // The special name `*` can be used in the body mapping to define that // every field not bound by the path template should be mapped to the // request body. This enables the following alternative definition of // the update method: // // service Messaging { // rpc UpdateMessage(Message) returns (Message) { // option (google.api.http) = { // put: "/v1/messages/{message_id}" // body: "*" // }; // } // } // message Message { // string message_id = 1; // string text = 2; // } // // // The following HTTP JSON to RPC mapping is enabled: // // HTTP | RPC // -----|----- // `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` // // Note that when using `*` in the body mapping, it is not possible to // have HTTP parameters, as all fields not bound by the path end in // the body. This makes this option more rarely used in practice of // defining REST APIs. The common usage of `*` is in custom methods // which don't use the URL at all for transferring data. // // It is possible to define multiple HTTP methods for one RPC by using // the `additional_bindings` option. Example: // // service Messaging { // rpc GetMessage(GetMessageRequest) returns (Message) { // option (google.api.http) = { // get: "/v1/messages/{message_id}" // additional_bindings { // get: "/v1/users/{user_id}/messages/{message_id}" // } // }; // } // } // message GetMessageRequest { // string message_id = 1; // string user_id = 2; // } // // // This enables the following two alternative HTTP JSON to RPC // mappings: // // HTTP | RPC // -----|----- // `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` // `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` // // # Rules for HTTP mapping // // The rules for mapping HTTP path, query parameters, and body fields // to the request message are as follows: // // 1. The `body` field specifies either `*` or a field path, or is // omitted. If omitted, it indicates there is no HTTP request body. // 2. Leaf fields (recursive expansion of nested messages in the // request) can be classified into three types: // (a) Matched in the URL template. // (b) Covered by body (if body is `*`, everything except (a) fields; // else everything under the body field) // (c) All other fields. // 3. URL query parameters found in the HTTP request are mapped to (c) fields. // 4. Any body sent with an HTTP request can contain only (b) fields. // // The syntax of the path template is as follows: // // Template = "/" Segments [ Verb ] ; // Segments = Segment { "/" Segment } ; // Segment = "*" | "**" | LITERAL | Variable ; // Variable = "{" FieldPath [ "=" Segments ] "}" ; // FieldPath = IDENT { "." IDENT } ; // Verb = ":" LITERAL ; // // The syntax `*` matches a single path segment. The syntax `**` matches zero // or more path segments, which must be the last part of the path except the // `Verb`. The syntax `LITERAL` matches literal text in the path. // // The syntax `Variable` matches part of the URL path as specified by its // template. A variable template must not contain other variables. If a variable // matches a single path segment, its template may be omitted, e.g. `{var}` // is equivalent to `{var=*}`. // // If a variable contains exactly one path segment, such as `"{var}"` or // `"{var=*}"`, when such a variable is expanded into a URL path, all characters // except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the // Discovery Document as `{var}`. // // If a variable contains one or more path segments, such as `"{var=foo/*}"` // or `"{var=**}"`, when such a variable is expanded into a URL path, all // characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables // show up in the Discovery Document as `{+var}`. // // NOTE: While the single segment variable matches the semantics of // [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 // Simple String Expansion, the multi segment variable **does not** match // RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion // does not expand special characters like `?` and `#`, which would lead // to invalid URLs. // // NOTE: the field paths in variables and in the `body` must not refer to // repeated fields or map fields. message HttpRule { // Selects methods to which this rule applies. // // Refer to [selector][google.api.DocumentationRule.selector] for syntax details. string selector = 1; // Determines the URL pattern is matched by this rules. This pattern can be // used with any of the {get|put|post|delete|patch} methods. A custom method // can be defined using the 'custom' field. oneof pattern { // Used for listing and getting information about resources. string get = 2; // Used for updating a resource. string put = 3; // Used for creating a resource. string post = 4; // Used for deleting a resource. string delete = 5; // Used for updating a resource. string patch = 6; // The custom pattern is used for specifying an HTTP method that is not // included in the `pattern` field, such as HEAD, or "*" to leave the // HTTP method unspecified for this rule. The wild-card rule is useful // for services that provide content to Web (HTML) clients. CustomHttpPattern custom = 8; } // The name of the request field whose value is mapped to the HTTP body, or // `*` for mapping all fields not captured by the path pattern to the HTTP // body. NOTE: the referred field must not be a repeated field and must be // present at the top-level of request message type. string body = 7; // Additional HTTP bindings for the selector. Nested bindings must // not contain an `additional_bindings` field themselves (that is, // the nesting may only be one level deep). repeated HttpRule additional_bindings = 11; } // A custom pattern is used for defining custom HTTP verb. message CustomHttpPattern { // The name of this custom HTTP verb. string kind = 1; // The path matched by this custom verb. string path = 2; } buildstream-1.6.9/buildstream/_protos/google/api/http_pb2.py000066400000000000000000000056141437515270000241750ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/api/http.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15google/api/http.proto\x12\ngoogle.api\"T\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\x12\'\n\x1f\x66ully_decode_reserved_expansion\x18\x02 \x01(\x08\"\xea\x01\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3') _HTTP = DESCRIPTOR.message_types_by_name['Http'] _HTTPRULE = DESCRIPTOR.message_types_by_name['HttpRule'] _CUSTOMHTTPPATTERN = DESCRIPTOR.message_types_by_name['CustomHttpPattern'] Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), { 'DESCRIPTOR' : _HTTP, '__module__' : 'google.api.http_pb2' # @@protoc_insertion_point(class_scope:google.api.Http) }) _sym_db.RegisterMessage(Http) HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), { 'DESCRIPTOR' : _HTTPRULE, '__module__' : 'google.api.http_pb2' # @@protoc_insertion_point(class_scope:google.api.HttpRule) }) _sym_db.RegisterMessage(HttpRule) CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), { 'DESCRIPTOR' : _CUSTOMHTTPPATTERN, '__module__' : 'google.api.http_pb2' # @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern) }) _sym_db.RegisterMessage(CustomHttpPattern) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI' _HTTP._serialized_start=37 _HTTP._serialized_end=121 _HTTPRULE._serialized_start=124 _HTTPRULE._serialized_end=358 _CUSTOMHTTPPATTERN._serialized_start=360 _CUSTOMHTTPPATTERN._serialized_end=407 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/google/api/http_pb2_grpc.py000066400000000000000000000002371437515270000252040ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc buildstream-1.6.9/buildstream/_protos/google/bytestream/000077500000000000000000000000001437515270000235015ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/bytestream/__init__.py000066400000000000000000000000001437515270000256000ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/bytestream/bytestream.proto000066400000000000000000000166611437515270000267570ustar00rootroot00000000000000// Copyright 2016 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.bytestream; import "google/api/annotations.proto"; import "google/protobuf/wrappers.proto"; option go_package = "google.golang.org/genproto/googleapis/bytestream;bytestream"; option java_outer_classname = "ByteStreamProto"; option java_package = "com.google.bytestream"; // #### Introduction // // The Byte Stream API enables a client to read and write a stream of bytes to // and from a resource. Resources have names, and these names are supplied in // the API calls below to identify the resource that is being read from or // written to. // // All implementations of the Byte Stream API export the interface defined here: // // * `Read()`: Reads the contents of a resource. // // * `Write()`: Writes the contents of a resource. The client can call `Write()` // multiple times with the same resource and can check the status of the write // by calling `QueryWriteStatus()`. // // #### Service parameters and metadata // // The ByteStream API provides no direct way to access/modify any metadata // associated with the resource. // // #### Errors // // The errors returned by the service are in the Google canonical error space. service ByteStream { // `Read()` is used to retrieve the contents of a resource as a sequence // of bytes. The bytes are returned in a sequence of responses, and the // responses are delivered as the results of a server-side streaming RPC. rpc Read(ReadRequest) returns (stream ReadResponse); // `Write()` is used to send the contents of a resource as a sequence of // bytes. The bytes are sent in a sequence of request protos of a client-side // streaming RPC. // // A `Write()` action is resumable. If there is an error or the connection is // broken during the `Write()`, the client should check the status of the // `Write()` by calling `QueryWriteStatus()` and continue writing from the // returned `committed_size`. This may be less than the amount of data the // client previously sent. // // Calling `Write()` on a resource name that was previously written and // finalized could cause an error, depending on whether the underlying service // allows over-writing of previously written resources. // // When the client closes the request channel, the service will respond with // a `WriteResponse`. The service will not view the resource as `complete` // until the client has sent a `WriteRequest` with `finish_write` set to // `true`. Sending any requests on a stream after sending a request with // `finish_write` set to `true` will cause an error. The client **should** // check the `WriteResponse` it receives to determine how much data the // service was able to commit and whether the service views the resource as // `complete` or not. rpc Write(stream WriteRequest) returns (WriteResponse); // `QueryWriteStatus()` is used to find the `committed_size` for a resource // that is being written, which can then be used as the `write_offset` for // the next `Write()` call. // // If the resource does not exist (i.e., the resource has been deleted, or the // first `Write()` has not yet reached the service), this method returns the // error `NOT_FOUND`. // // The client **may** call `QueryWriteStatus()` at any time to determine how // much data has been processed for this resource. This is useful if the // client is buffering data and needs to know which data can be safely // evicted. For any sequence of `QueryWriteStatus()` calls for a given // resource name, the sequence of returned `committed_size` values will be // non-decreasing. rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse); } // Request object for ByteStream.Read. message ReadRequest { // The name of the resource to read. string resource_name = 1; // The offset for the first byte to return in the read, relative to the start // of the resource. // // A `read_offset` that is negative or greater than the size of the resource // will cause an `OUT_OF_RANGE` error. int64 read_offset = 2; // The maximum number of `data` bytes the server is allowed to return in the // sum of all `ReadResponse` messages. A `read_limit` of zero indicates that // there is no limit, and a negative `read_limit` will cause an error. // // If the stream returns fewer bytes than allowed by the `read_limit` and no // error occurred, the stream includes all data from the `read_offset` to the // end of the resource. int64 read_limit = 3; } // Response object for ByteStream.Read. message ReadResponse { // A portion of the data for the resource. The service **may** leave `data` // empty for any given `ReadResponse`. This enables the service to inform the // client that the request is still live while it is running an operation to // generate more data. bytes data = 10; } // Request object for ByteStream.Write. message WriteRequest { // The name of the resource to write. This **must** be set on the first // `WriteRequest` of each `Write()` action. If it is set on subsequent calls, // it **must** match the value of the first request. string resource_name = 1; // The offset from the beginning of the resource at which the data should be // written. It is required on all `WriteRequest`s. // // In the first `WriteRequest` of a `Write()` action, it indicates // the initial offset for the `Write()` call. The value **must** be equal to // the `committed_size` that a call to `QueryWriteStatus()` would return. // // On subsequent calls, this value **must** be set and **must** be equal to // the sum of the first `write_offset` and the sizes of all `data` bundles // sent previously on this stream. // // An incorrect value will cause an error. int64 write_offset = 2; // If `true`, this indicates that the write is complete. Sending any // `WriteRequest`s subsequent to one in which `finish_write` is `true` will // cause an error. bool finish_write = 3; // A portion of the data for the resource. The client **may** leave `data` // empty for any given `WriteRequest`. This enables the client to inform the // service that the request is still live while it is running an operation to // generate more data. bytes data = 10; } // Response object for ByteStream.Write. message WriteResponse { // The number of bytes that have been processed for the given resource. int64 committed_size = 1; } // Request object for ByteStream.QueryWriteStatus. message QueryWriteStatusRequest { // The name of the resource whose write status is being requested. string resource_name = 1; } // Response object for ByteStream.QueryWriteStatus. message QueryWriteStatusResponse { // The number of bytes that have been processed for the given resource. int64 committed_size = 1; // `complete` is `true` only if the client has sent a `WriteRequest` with // `finish_write` set to true, and the server has processed that request. bool complete = 2; } buildstream-1.6.9/buildstream/_protos/google/bytestream/bytestream_pb2.py000066400000000000000000000122061437515270000267760ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/bytestream/bytestream.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"google/bytestream/bytestream.proto\x12\x11google.bytestream\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/wrappers.proto\"M\n\x0bReadRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x13\n\x0bread_offset\x18\x02 \x01(\x03\x12\x12\n\nread_limit\x18\x03 \x01(\x03\"\x1c\n\x0cReadResponse\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"_\n\x0cWriteRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x14\n\x0cwrite_offset\x18\x02 \x01(\x03\x12\x14\n\x0c\x66inish_write\x18\x03 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"\'\n\rWriteResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\"0\n\x17QueryWriteStatusRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"D\n\x18QueryWriteStatusResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\x12\x10\n\x08\x63omplete\x18\x02 \x01(\x08\x32\x92\x02\n\nByteStream\x12I\n\x04Read\x12\x1e.google.bytestream.ReadRequest\x1a\x1f.google.bytestream.ReadResponse0\x01\x12L\n\x05Write\x12\x1f.google.bytestream.WriteRequest\x1a .google.bytestream.WriteResponse(\x01\x12k\n\x10QueryWriteStatus\x12*.google.bytestream.QueryWriteStatusRequest\x1a+.google.bytestream.QueryWriteStatusResponseBe\n\x15\x63om.google.bytestreamB\x0f\x42yteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestreamb\x06proto3') _READREQUEST = DESCRIPTOR.message_types_by_name['ReadRequest'] _READRESPONSE = DESCRIPTOR.message_types_by_name['ReadResponse'] _WRITEREQUEST = DESCRIPTOR.message_types_by_name['WriteRequest'] _WRITERESPONSE = DESCRIPTOR.message_types_by_name['WriteResponse'] _QUERYWRITESTATUSREQUEST = DESCRIPTOR.message_types_by_name['QueryWriteStatusRequest'] _QUERYWRITESTATUSRESPONSE = DESCRIPTOR.message_types_by_name['QueryWriteStatusResponse'] ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), { 'DESCRIPTOR' : _READREQUEST, '__module__' : 'google.bytestream.bytestream_pb2' # @@protoc_insertion_point(class_scope:google.bytestream.ReadRequest) }) _sym_db.RegisterMessage(ReadRequest) ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), { 'DESCRIPTOR' : _READRESPONSE, '__module__' : 'google.bytestream.bytestream_pb2' # @@protoc_insertion_point(class_scope:google.bytestream.ReadResponse) }) _sym_db.RegisterMessage(ReadResponse) WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), { 'DESCRIPTOR' : _WRITEREQUEST, '__module__' : 'google.bytestream.bytestream_pb2' # @@protoc_insertion_point(class_scope:google.bytestream.WriteRequest) }) _sym_db.RegisterMessage(WriteRequest) WriteResponse = _reflection.GeneratedProtocolMessageType('WriteResponse', (_message.Message,), { 'DESCRIPTOR' : _WRITERESPONSE, '__module__' : 'google.bytestream.bytestream_pb2' # @@protoc_insertion_point(class_scope:google.bytestream.WriteResponse) }) _sym_db.RegisterMessage(WriteResponse) QueryWriteStatusRequest = _reflection.GeneratedProtocolMessageType('QueryWriteStatusRequest', (_message.Message,), { 'DESCRIPTOR' : _QUERYWRITESTATUSREQUEST, '__module__' : 'google.bytestream.bytestream_pb2' # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusRequest) }) _sym_db.RegisterMessage(QueryWriteStatusRequest) QueryWriteStatusResponse = _reflection.GeneratedProtocolMessageType('QueryWriteStatusResponse', (_message.Message,), { 'DESCRIPTOR' : _QUERYWRITESTATUSRESPONSE, '__module__' : 'google.bytestream.bytestream_pb2' # @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusResponse) }) _sym_db.RegisterMessage(QueryWriteStatusResponse) _BYTESTREAM = DESCRIPTOR.services_by_name['ByteStream'] if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\025com.google.bytestreamB\017ByteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestream' _READREQUEST._serialized_start=119 _READREQUEST._serialized_end=196 _READRESPONSE._serialized_start=198 _READRESPONSE._serialized_end=226 _WRITEREQUEST._serialized_start=228 _WRITEREQUEST._serialized_end=323 _WRITERESPONSE._serialized_start=325 _WRITERESPONSE._serialized_end=364 _QUERYWRITESTATUSREQUEST._serialized_start=366 _QUERYWRITESTATUSREQUEST._serialized_end=414 _QUERYWRITESTATUSRESPONSE._serialized_start=416 _QUERYWRITESTATUSRESPONSE._serialized_end=484 _BYTESTREAM._serialized_start=487 _BYTESTREAM._serialized_end=761 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py000066400000000000000000000247561437515270000300260ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from buildstream._protos.google.bytestream import bytestream_pb2 as google_dot_bytestream_dot_bytestream__pb2 class ByteStreamStub(object): """#### Introduction The Byte Stream API enables a client to read and write a stream of bytes to and from a resource. Resources have names, and these names are supplied in the API calls below to identify the resource that is being read from or written to. All implementations of the Byte Stream API export the interface defined here: * `Read()`: Reads the contents of a resource. * `Write()`: Writes the contents of a resource. The client can call `Write()` multiple times with the same resource and can check the status of the write by calling `QueryWriteStatus()`. #### Service parameters and metadata The ByteStream API provides no direct way to access/modify any metadata associated with the resource. #### Errors The errors returned by the service are in the Google canonical error space. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.Read = channel.unary_stream( '/google.bytestream.ByteStream/Read', request_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString, response_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString, ) self.Write = channel.stream_unary( '/google.bytestream.ByteStream/Write', request_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString, response_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString, ) self.QueryWriteStatus = channel.unary_unary( '/google.bytestream.ByteStream/QueryWriteStatus', request_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString, response_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString, ) class ByteStreamServicer(object): """#### Introduction The Byte Stream API enables a client to read and write a stream of bytes to and from a resource. Resources have names, and these names are supplied in the API calls below to identify the resource that is being read from or written to. All implementations of the Byte Stream API export the interface defined here: * `Read()`: Reads the contents of a resource. * `Write()`: Writes the contents of a resource. The client can call `Write()` multiple times with the same resource and can check the status of the write by calling `QueryWriteStatus()`. #### Service parameters and metadata The ByteStream API provides no direct way to access/modify any metadata associated with the resource. #### Errors The errors returned by the service are in the Google canonical error space. """ def Read(self, request, context): """`Read()` is used to retrieve the contents of a resource as a sequence of bytes. The bytes are returned in a sequence of responses, and the responses are delivered as the results of a server-side streaming RPC. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def Write(self, request_iterator, context): """`Write()` is used to send the contents of a resource as a sequence of bytes. The bytes are sent in a sequence of request protos of a client-side streaming RPC. A `Write()` action is resumable. If there is an error or the connection is broken during the `Write()`, the client should check the status of the `Write()` by calling `QueryWriteStatus()` and continue writing from the returned `committed_size`. This may be less than the amount of data the client previously sent. Calling `Write()` on a resource name that was previously written and finalized could cause an error, depending on whether the underlying service allows over-writing of previously written resources. When the client closes the request channel, the service will respond with a `WriteResponse`. The service will not view the resource as `complete` until the client has sent a `WriteRequest` with `finish_write` set to `true`. Sending any requests on a stream after sending a request with `finish_write` set to `true` will cause an error. The client **should** check the `WriteResponse` it receives to determine how much data the service was able to commit and whether the service views the resource as `complete` or not. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def QueryWriteStatus(self, request, context): """`QueryWriteStatus()` is used to find the `committed_size` for a resource that is being written, which can then be used as the `write_offset` for the next `Write()` call. If the resource does not exist (i.e., the resource has been deleted, or the first `Write()` has not yet reached the service), this method returns the error `NOT_FOUND`. The client **may** call `QueryWriteStatus()` at any time to determine how much data has been processed for this resource. This is useful if the client is buffering data and needs to know which data can be safely evicted. For any sequence of `QueryWriteStatus()` calls for a given resource name, the sequence of returned `committed_size` values will be non-decreasing. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_ByteStreamServicer_to_server(servicer, server): rpc_method_handlers = { 'Read': grpc.unary_stream_rpc_method_handler( servicer.Read, request_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.FromString, response_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.SerializeToString, ), 'Write': grpc.stream_unary_rpc_method_handler( servicer.Write, request_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.FromString, response_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.SerializeToString, ), 'QueryWriteStatus': grpc.unary_unary_rpc_method_handler( servicer.QueryWriteStatus, request_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.FromString, response_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.bytestream.ByteStream', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class ByteStream(object): """#### Introduction The Byte Stream API enables a client to read and write a stream of bytes to and from a resource. Resources have names, and these names are supplied in the API calls below to identify the resource that is being read from or written to. All implementations of the Byte Stream API export the interface defined here: * `Read()`: Reads the contents of a resource. * `Write()`: Writes the contents of a resource. The client can call `Write()` multiple times with the same resource and can check the status of the write by calling `QueryWriteStatus()`. #### Service parameters and metadata The ByteStream API provides no direct way to access/modify any metadata associated with the resource. #### Errors The errors returned by the service are in the Google canonical error space. """ @staticmethod def Read(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_stream(request, target, '/google.bytestream.ByteStream/Read', google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString, google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def Write(request_iterator, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.stream_unary(request_iterator, target, '/google.bytestream.ByteStream/Write', google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString, google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def QueryWriteStatus(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/google.bytestream.ByteStream/QueryWriteStatus', google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString, google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) buildstream-1.6.9/buildstream/_protos/google/longrunning/000077500000000000000000000000001437515270000236625ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/longrunning/__init__.py000066400000000000000000000000001437515270000257610ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/longrunning/operations.proto000066400000000000000000000154571437515270000271460ustar00rootroot00000000000000// Copyright 2016 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.longrunning; import "google/api/annotations.proto"; import "google/protobuf/any.proto"; import "google/protobuf/empty.proto"; import "google/rpc/status.proto"; option csharp_namespace = "Google.LongRunning"; option go_package = "google.golang.org/genproto/googleapis/longrunning;longrunning"; option java_multiple_files = true; option java_outer_classname = "OperationsProto"; option java_package = "com.google.longrunning"; option php_namespace = "Google\\LongRunning"; // Manages long-running operations with an API service. // // When an API method normally takes long time to complete, it can be designed // to return [Operation][google.longrunning.Operation] to the client, and the client can use this // interface to receive the real response asynchronously by polling the // operation resource, or pass the operation resource to another API (such as // Google Cloud Pub/Sub API) to receive the response. Any API service that // returns long-running operations should implement the `Operations` interface // so developers can have a consistent client experience. service Operations { // Lists operations that match the specified filter in the request. If the // server doesn't support this method, it returns `UNIMPLEMENTED`. // // NOTE: the `name` binding below allows API services to override the binding // to use different resource name schemes, such as `users/*/operations`. rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) { option (google.api.http) = { get: "/v1/{name=operations}" }; } // Gets the latest state of a long-running operation. Clients can use this // method to poll the operation result at intervals as recommended by the API // service. rpc GetOperation(GetOperationRequest) returns (Operation) { option (google.api.http) = { get: "/v1/{name=operations/**}" }; } // Deletes a long-running operation. This method indicates that the client is // no longer interested in the operation result. It does not cancel the // operation. If the server doesn't support this method, it returns // `google.rpc.Code.UNIMPLEMENTED`. rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1/{name=operations/**}" }; } // Starts asynchronous cancellation on a long-running operation. The server // makes a best effort to cancel the operation, but success is not // guaranteed. If the server doesn't support this method, it returns // `google.rpc.Code.UNIMPLEMENTED`. Clients can use // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or // other methods to check whether the cancellation succeeded or whether the // operation completed despite cancellation. On successful cancellation, // the operation is not deleted; instead, it becomes an operation with // an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, // corresponding to `Code.CANCELLED`. rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) { option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" }; } } // This resource represents a long-running operation that is the result of a // network API call. message Operation { // The server-assigned name, which is only unique within the same service that // originally returns it. If you use the default HTTP mapping, the // `name` should have the format of `operations/some/unique/name`. string name = 1; // Service-specific metadata associated with the operation. It typically // contains progress information and common metadata such as create time. // Some services might not provide such metadata. Any method that returns a // long-running operation should document the metadata type, if any. google.protobuf.Any metadata = 2; // If the value is `false`, it means the operation is still in progress. // If true, the operation is completed, and either `error` or `response` is // available. bool done = 3; // The operation result, which can be either an `error` or a valid `response`. // If `done` == `false`, neither `error` nor `response` is set. // If `done` == `true`, exactly one of `error` or `response` is set. oneof result { // The error result of the operation in case of failure or cancellation. google.rpc.Status error = 4; // The normal response of the operation in case of success. If the original // method returns no data on success, such as `Delete`, the response is // `google.protobuf.Empty`. If the original method is standard // `Get`/`Create`/`Update`, the response should be the resource. For other // methods, the response should have the type `XxxResponse`, where `Xxx` // is the original method name. For example, if the original method name // is `TakeSnapshot()`, the inferred response type is // `TakeSnapshotResponse`. google.protobuf.Any response = 5; } } // The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation]. message GetOperationRequest { // The name of the operation resource. string name = 1; } // The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. message ListOperationsRequest { // The name of the operation collection. string name = 4; // The standard list filter. string filter = 1; // The standard list page size. int32 page_size = 2; // The standard list page token. string page_token = 3; } // The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations]. message ListOperationsResponse { // A list of operations that matches the specified filter in the request. repeated Operation operations = 1; // The standard List next-page token. string next_page_token = 2; } // The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation]. message CancelOperationRequest { // The name of the operation resource to be cancelled. string name = 1; } // The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation]. message DeleteOperationRequest { // The name of the operation resource to be deleted. string name = 1; } buildstream-1.6.9/buildstream/_protos/google/longrunning/operations_pb2.py000066400000000000000000000157551437515270000271770ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/longrunning/operations.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#google/longrunning/operations.proto\x12\x12google.longrunning\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/protobuf/any.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x17google/rpc/status.proto\"\xa8\x01\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\x12#\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusH\x00\x12(\n\x08response\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x08\n\x06result\"#\n\x13GetOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\\\n\x15ListOperationsRequest\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x16ListOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"&\n\x16\x43\x61ncelOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16\x44\x65leteOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t2\x8c\x04\n\nOperations\x12\x86\x01\n\x0eListOperations\x12).google.longrunning.ListOperationsRequest\x1a*.google.longrunning.ListOperationsResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/v1/{name=operations}\x12x\n\x0cGetOperation\x12\'.google.longrunning.GetOperationRequest\x1a\x1d.google.longrunning.Operation\" \x82\xd3\xe4\x93\x02\x1a\x12\x18/v1/{name=operations/**}\x12w\n\x0f\x44\x65leteOperation\x12*.google.longrunning.DeleteOperationRequest\x1a\x16.google.protobuf.Empty\" \x82\xd3\xe4\x93\x02\x1a*\x18/v1/{name=operations/**}\x12\x81\x01\n\x0f\x43\x61ncelOperation\x12*.google.longrunning.CancelOperationRequest\x1a\x16.google.protobuf.Empty\"*\x82\xd3\xe4\x93\x02$\"\x1f/v1/{name=operations/**}:cancel:\x01*B\x94\x01\n\x16\x63om.google.longrunningB\x0fOperationsProtoP\x01Z=google.golang.org/genproto/googleapis/longrunning;longrunning\xaa\x02\x12Google.LongRunning\xca\x02\x12Google\\LongRunningb\x06proto3') _OPERATION = DESCRIPTOR.message_types_by_name['Operation'] _GETOPERATIONREQUEST = DESCRIPTOR.message_types_by_name['GetOperationRequest'] _LISTOPERATIONSREQUEST = DESCRIPTOR.message_types_by_name['ListOperationsRequest'] _LISTOPERATIONSRESPONSE = DESCRIPTOR.message_types_by_name['ListOperationsResponse'] _CANCELOPERATIONREQUEST = DESCRIPTOR.message_types_by_name['CancelOperationRequest'] _DELETEOPERATIONREQUEST = DESCRIPTOR.message_types_by_name['DeleteOperationRequest'] Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), { 'DESCRIPTOR' : _OPERATION, '__module__' : 'google.longrunning.operations_pb2' # @@protoc_insertion_point(class_scope:google.longrunning.Operation) }) _sym_db.RegisterMessage(Operation) GetOperationRequest = _reflection.GeneratedProtocolMessageType('GetOperationRequest', (_message.Message,), { 'DESCRIPTOR' : _GETOPERATIONREQUEST, '__module__' : 'google.longrunning.operations_pb2' # @@protoc_insertion_point(class_scope:google.longrunning.GetOperationRequest) }) _sym_db.RegisterMessage(GetOperationRequest) ListOperationsRequest = _reflection.GeneratedProtocolMessageType('ListOperationsRequest', (_message.Message,), { 'DESCRIPTOR' : _LISTOPERATIONSREQUEST, '__module__' : 'google.longrunning.operations_pb2' # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsRequest) }) _sym_db.RegisterMessage(ListOperationsRequest) ListOperationsResponse = _reflection.GeneratedProtocolMessageType('ListOperationsResponse', (_message.Message,), { 'DESCRIPTOR' : _LISTOPERATIONSRESPONSE, '__module__' : 'google.longrunning.operations_pb2' # @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsResponse) }) _sym_db.RegisterMessage(ListOperationsResponse) CancelOperationRequest = _reflection.GeneratedProtocolMessageType('CancelOperationRequest', (_message.Message,), { 'DESCRIPTOR' : _CANCELOPERATIONREQUEST, '__module__' : 'google.longrunning.operations_pb2' # @@protoc_insertion_point(class_scope:google.longrunning.CancelOperationRequest) }) _sym_db.RegisterMessage(CancelOperationRequest) DeleteOperationRequest = _reflection.GeneratedProtocolMessageType('DeleteOperationRequest', (_message.Message,), { 'DESCRIPTOR' : _DELETEOPERATIONREQUEST, '__module__' : 'google.longrunning.operations_pb2' # @@protoc_insertion_point(class_scope:google.longrunning.DeleteOperationRequest) }) _sym_db.RegisterMessage(DeleteOperationRequest) _OPERATIONS = DESCRIPTOR.services_by_name['Operations'] if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\026com.google.longrunningB\017OperationsProtoP\001Z=google.golang.org/genproto/googleapis/longrunning;longrunning\252\002\022Google.LongRunning\312\002\022Google\\LongRunning' _OPERATIONS.methods_by_name['ListOperations']._options = None _OPERATIONS.methods_by_name['ListOperations']._serialized_options = b'\202\323\344\223\002\027\022\025/v1/{name=operations}' _OPERATIONS.methods_by_name['GetOperation']._options = None _OPERATIONS.methods_by_name['GetOperation']._serialized_options = b'\202\323\344\223\002\032\022\030/v1/{name=operations/**}' _OPERATIONS.methods_by_name['DeleteOperation']._options = None _OPERATIONS.methods_by_name['DeleteOperation']._serialized_options = b'\202\323\344\223\002\032*\030/v1/{name=operations/**}' _OPERATIONS.methods_by_name['CancelOperation']._options = None _OPERATIONS.methods_by_name['CancelOperation']._serialized_options = b'\202\323\344\223\002$\"\037/v1/{name=operations/**}:cancel:\001*' _OPERATION._serialized_start=171 _OPERATION._serialized_end=339 _GETOPERATIONREQUEST._serialized_start=341 _GETOPERATIONREQUEST._serialized_end=376 _LISTOPERATIONSREQUEST._serialized_start=378 _LISTOPERATIONSREQUEST._serialized_end=470 _LISTOPERATIONSRESPONSE._serialized_start=472 _LISTOPERATIONSRESPONSE._serialized_end=572 _CANCELOPERATIONREQUEST._serialized_start=574 _CANCELOPERATIONREQUEST._serialized_end=612 _DELETEOPERATIONREQUEST._serialized_start=614 _DELETEOPERATIONREQUEST._serialized_end=652 _OPERATIONS._serialized_start=655 _OPERATIONS._serialized_end=1179 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/google/longrunning/operations_pb2_grpc.py000066400000000000000000000253101437515270000301760ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2 from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 class OperationsStub(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def __init__(self, channel): """Constructor. Args: channel: A grpc.Channel. """ self.ListOperations = channel.unary_unary( '/google.longrunning.Operations/ListOperations', request_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString, ) self.GetOperation = channel.unary_unary( '/google.longrunning.Operations/GetOperation', request_serializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString, response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, ) self.DeleteOperation = channel.unary_unary( '/google.longrunning.Operations/DeleteOperation', request_serializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) self.CancelOperation = channel.unary_unary( '/google.longrunning.Operations/CancelOperation', request_serializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString, response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, ) class OperationsServicer(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ def ListOperations(self, request, context): """Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding below allows API services to override the binding to use different resource name schemes, such as `users/*/operations`. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def GetOperation(self, request, context): """Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def DeleteOperation(self, request, context): """Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def CancelOperation(self, request, context): """Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use [Operations.GetOperation][google.longrunning.Operations.GetOperation] or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') def add_OperationsServicer_to_server(servicer, server): rpc_method_handlers = { 'ListOperations': grpc.unary_unary_rpc_method_handler( servicer.ListOperations, request_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString, ), 'GetOperation': grpc.unary_unary_rpc_method_handler( servicer.GetOperation, request_deserializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString, response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, ), 'DeleteOperation': grpc.unary_unary_rpc_method_handler( servicer.DeleteOperation, request_deserializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), 'CancelOperation': grpc.unary_unary_rpc_method_handler( servicer.CancelOperation, request_deserializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString, response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, ), } generic_handler = grpc.method_handlers_generic_handler( 'google.longrunning.Operations', rpc_method_handlers) server.add_generic_rpc_handlers((generic_handler,)) # This class is part of an EXPERIMENTAL API. class Operations(object): """Manages long-running operations with an API service. When an API method normally takes long time to complete, it can be designed to return [Operation][google.longrunning.Operation] to the client, and the client can use this interface to receive the real response asynchronously by polling the operation resource, or pass the operation resource to another API (such as Google Cloud Pub/Sub API) to receive the response. Any API service that returns long-running operations should implement the `Operations` interface so developers can have a consistent client experience. """ @staticmethod def ListOperations(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/ListOperations', google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString, google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def GetOperation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/GetOperation', google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString, google_dot_longrunning_dot_operations__pb2.Operation.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def DeleteOperation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/DeleteOperation', google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) @staticmethod def CancelOperation(request, target, options=(), channel_credentials=None, call_credentials=None, insecure=False, compression=None, wait_for_ready=None, timeout=None, metadata=None): return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/CancelOperation', google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString, google_dot_protobuf_dot_empty__pb2.Empty.FromString, options, channel_credentials, insecure, call_credentials, compression, wait_for_ready, timeout, metadata) buildstream-1.6.9/buildstream/_protos/google/rpc/000077500000000000000000000000001437515270000221065ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/rpc/__init__.py000066400000000000000000000000001437515270000242050ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_protos/google/rpc/status.proto000066400000000000000000000077171437515270000245320ustar00rootroot00000000000000// Copyright 2017 Google Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. syntax = "proto3"; package google.rpc; import "google/protobuf/any.proto"; option go_package = "google.golang.org/genproto/googleapis/rpc/status;status"; option java_multiple_files = true; option java_outer_classname = "StatusProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; // The `Status` type defines a logical error model that is suitable for different // programming environments, including REST APIs and RPC APIs. It is used by // [gRPC](https://github.com/grpc). The error model is designed to be: // // - Simple to use and understand for most users // - Flexible enough to meet unexpected needs // // # Overview // // The `Status` message contains three pieces of data: error code, error message, // and error details. The error code should be an enum value of // [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The // error message should be a developer-facing English message that helps // developers *understand* and *resolve* the error. If a localized user-facing // error message is needed, put the localized message in the error details or // localize it in the client. The optional error details may contain arbitrary // information about the error. There is a predefined set of error detail types // in the package `google.rpc` that can be used for common error conditions. // // # Language mapping // // The `Status` message is the logical representation of the error model, but it // is not necessarily the actual wire format. When the `Status` message is // exposed in different client libraries and different wire protocols, it can be // mapped differently. For example, it will likely be mapped to some exceptions // in Java, but more likely mapped to some error codes in C. // // # Other uses // // The error model and the `Status` message can be used in a variety of // environments, either with or without APIs, to provide a // consistent developer experience across different environments. // // Example uses of this error model include: // // - Partial errors. If a service needs to return partial errors to the client, // it may embed the `Status` in the normal response to indicate the partial // errors. // // - Workflow errors. A typical workflow has multiple steps. Each step may // have a `Status` message for error reporting. // // - Batch operations. If a client uses batch request and batch response, the // `Status` message should be used directly inside batch response, one for // each error sub-response. // // - Asynchronous operations. If an API call embeds asynchronous operation // results in its response, the status of those operations should be // represented directly using the `Status` message. // // - Logging. If some API errors are stored in logs, the message `Status` could // be used directly after any stripping needed for security/privacy reasons. message Status { // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. int32 code = 1; // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. string message = 2; // A list of messages that carry the error details. There is a common set of // message types for APIs to use. repeated google.protobuf.Any details = 3; } buildstream-1.6.9/buildstream/_protos/google/rpc/status_pb2.py000066400000000000000000000031641437515270000245520ustar00rootroot00000000000000# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source: google/rpc/status.proto """Generated protocol buffer code.""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17google/rpc/status.proto\x12\ngoogle.rpc\x1a\x19google/protobuf/any.proto\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyB^\n\x0e\x63om.google.rpcB\x0bStatusProtoP\x01Z7google.golang.org/genproto/googleapis/rpc/status;status\xa2\x02\x03RPCb\x06proto3') _STATUS = DESCRIPTOR.message_types_by_name['Status'] Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), { 'DESCRIPTOR' : _STATUS, '__module__' : 'google.rpc.status_pb2' # @@protoc_insertion_point(class_scope:google.rpc.Status) }) _sym_db.RegisterMessage(Status) if _descriptor._USE_C_DESCRIPTORS == False: DESCRIPTOR._options = None DESCRIPTOR._serialized_options = b'\n\016com.google.rpcB\013StatusProtoP\001Z7google.golang.org/genproto/googleapis/rpc/status;status\242\002\003RPC' _STATUS._serialized_start=66 _STATUS._serialized_end=144 # @@protoc_insertion_point(module_scope) buildstream-1.6.9/buildstream/_protos/google/rpc/status_pb2_grpc.py000066400000000000000000000002371437515270000255630ustar00rootroot00000000000000# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services.""" import grpc buildstream-1.6.9/buildstream/_scheduler/000077500000000000000000000000001437515270000204765ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_scheduler/__init__.py000066400000000000000000000021401437515270000226040ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from .queues import Queue, QueueStatus from .queues.fetchqueue import FetchQueue from .queues.trackqueue import TrackQueue from .queues.buildqueue import BuildQueue from .queues.pushqueue import PushQueue from .queues.pullqueue import PullQueue from .scheduler import Scheduler, SchedStatus from .jobs import ElementJob, JobStatus buildstream-1.6.9/buildstream/_scheduler/_multiprocessing.py000066400000000000000000000061331437515270000244410ustar00rootroot00000000000000# # Copyright (C) 2019 Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # TLDR: # ALWAYS use `.AsyncioSafeProcess` when you have an asyncio event loop running and need a `multiprocessing.Process` # # # The upstream asyncio library doesn't play well with forking subprocesses while an event loop is running. # # The main problem that affects us is that the parent and the child will share some file handlers. # The most important one for us is the sig_handler_fd, which the loop uses to buffer signals received # by the app so that the asyncio loop can treat them afterwards. # # This sharing means that when we send a signal to the child, the sighandler in the child will write # it back to the parent sig_handler_fd, making the parent have to treat it too. # This is a problem for example when we sigterm the process. The scheduler will send sigterms to all its children, # which in turn will make the scheduler receive N SIGTERMs (one per child). Which in turn will send sigterms to # the children... # # We therefore provide a `AsyncioSafeProcess` derived from multiprocessing.Process that automatically # tries to cleanup the loop and never calls `waitpid` on the child process, which breaks our child watchers. # # # Relevant issues: # - Asyncio: support fork (https://bugs.python.org/issue21998) # - Asyncio: support multiprocessing (support fork) (https://bugs.python.org/issue22087) # - Signal delivered to a subprocess triggers parent's handler (https://bugs.python.org/issue31489) # # import multiprocessing import signal import sys from asyncio import set_event_loop_policy # _AsyncioSafeForkAwareProcess() # # Process class that doesn't call waitpid on its own. # This prevents conflicts with the asyncio child watcher. # # Also automatically close any running asyncio loop before calling # the actual run target # class _AsyncioSafeForkAwareProcess(multiprocessing.Process): # pylint: disable=attribute-defined-outside-init def start(self): self._popen = self._Popen(self) self._sentinel = self._popen.sentinel def run(self): signal.set_wakeup_fd(-1) set_event_loop_policy(None) super().run() if sys.platform != "win32": # Set the default event loop policy to automatically close our asyncio loop in child processes AsyncioSafeProcess = _AsyncioSafeForkAwareProcess else: # Windows doesn't support ChildWatcher that way anyways, we'll need another # implementation if we want it AsyncioSafeProcess = multiprocessing.Process buildstream-1.6.9/buildstream/_scheduler/jobs/000077500000000000000000000000001437515270000214335ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_scheduler/jobs/__init__.py000066400000000000000000000002101437515270000235350ustar00rootroot00000000000000from .elementjob import ElementJob from .cachesizejob import CacheSizeJob from .cleanupjob import CleanupJob from .job import JobStatus buildstream-1.6.9/buildstream/_scheduler/jobs/cachesizejob.py000066400000000000000000000026061437515270000244420ustar00rootroot00000000000000# Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Author: # Tristan Daniël Maat # from .job import Job, JobStatus class CacheSizeJob(Job): def __init__(self, *args, complete_cb, **kwargs): super().__init__(*args, **kwargs) self._complete_cb = complete_cb context = self._scheduler.context self._artifacts = context.artifactcache def child_process(self): return self._artifacts.compute_cache_size() def parent_complete(self, status, result): if status == JobStatus.OK: self._artifacts.set_cache_size(result) if self._complete_cb: self._complete_cb(status, result) def child_process_data(self): return {} buildstream-1.6.9/buildstream/_scheduler/jobs/cleanupjob.py000066400000000000000000000034171437515270000241340ustar00rootroot00000000000000# Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Author: # Tristan Daniël Maat # from .job import Job, JobStatus class CleanupJob(Job): def __init__(self, *args, complete_cb, **kwargs): super().__init__(*args, **kwargs) self._complete_cb = complete_cb context = self._scheduler.context self._artifacts = context.artifactcache def child_process(self): def progress(): self.send_message('update-cache-size', self._artifacts.get_cache_size()) return self._artifacts.clean(progress) def handle_message(self, message_type, message): # Update the cache size in the main process as we go, # this provides better feedback in the UI. if message_type == 'update-cache-size': self._artifacts.set_cache_size(message) return True return False def parent_complete(self, status, result): if status == JobStatus.OK: self._artifacts.set_cache_size(result) if self._complete_cb: self._complete_cb(status, result) buildstream-1.6.9/buildstream/_scheduler/jobs/elementjob.py000066400000000000000000000077431437515270000241440ustar00rootroot00000000000000# Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Author: # Tristan Daniël Maat # from ruamel import yaml from ..._message import Message, MessageType from .job import Job # ElementJob() # # A job to run an element's commands. When this job is spawned # `action_cb` will be called, and when it completes `complete_cb` will # be called. # # Args: # scheduler (Scheduler): The scheduler # action_name (str): The queue action name # max_retries (int): The maximum number of retries # action_cb (callable): The function to execute on the child # complete_cb (callable): The function to execute when the job completes # element (Element): The element to work on # kwargs: Remaining Job() constructor arguments # # Here is the calling signature of the action_cb: # # action_cb(): # # This function will be called in the child task # # Args: # element (Element): The element passed to the Job() constructor # # Returns: # (object): Any abstract simple python object, including a string, int, # bool, list or dict, this must be a simple serializable object. # # Here is the calling signature of the complete_cb: # # complete_cb(): # # This function will be called when the child task completes # # Args: # job (Job): The job object which completed # element (Element): The element passed to the Job() constructor # status (JobStatus): The status of whether the workload raised an exception # result (object): The deserialized object returned by the `action_cb`, or None # if `success` is False # class ElementJob(Job): def __init__(self, *args, element, queue, action_cb, complete_cb, **kwargs): super().__init__(*args, **kwargs) self.queue = queue self._element = element self._action_cb = action_cb # The action callable function self._complete_cb = complete_cb # The complete callable function # Set the task wide ID for logging purposes self.set_task_id(element._unique_id) @property def element(self): return self._element def child_process(self): # Print the element's environment at the beginning of any element's log file. # # This should probably be omitted for non-build tasks but it's harmless here elt_env = self._element.get_environment() env_dump = yaml.round_trip_dump(elt_env, default_flow_style=False, allow_unicode=True) self.message(MessageType.LOG, "Build environment for element {}".format(self._element.name), detail=env_dump) # Run the action return self._action_cb(self._element) def parent_complete(self, status, result): self._complete_cb(self, self._element, status, self._result) def message(self, message_type, message, **kwargs): args = dict(kwargs) args['scheduler'] = True self._scheduler.context.message( Message(self._element._unique_id, message_type, message, **args)) def child_process_data(self): data = {} workspace = self._element._get_workspace() if workspace is not None: data['workspace'] = workspace.to_dict() return data buildstream-1.6.9/buildstream/_scheduler/jobs/job.py000066400000000000000000000545431437515270000225720ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # Copyright (C) 2019 Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # Tristan Maat # System imports import os import sys import signal import datetime import traceback import asyncio import multiprocessing # BuildStream toplevel imports from ..._exceptions import ImplError, BstError, set_last_task_error, SkipJob from ..._message import Message, MessageType, unconditional_messages from ... import _signals, utils from .. import _multiprocessing # Return code values shutdown of job handling child processes # RC_OK = 0 RC_FAIL = 1 RC_PERM_FAIL = 2 RC_SKIPPED = 3 # JobStatus: # # The job completion status, passed back through the # complete callbacks. # class JobStatus(): # Job succeeded OK = 0 # A temporary BstError was raised FAIL = 1 # A SkipJob was raised SKIPPED = 3 # Used to distinguish between status messages and return values class _Envelope(): def __init__(self, message_type, message): self.message_type = message_type self.message = message # Job() # # The Job object represents a parallel task, when calling Job.spawn(), # the given `Job.child_process()` will be called in parallel to the # calling process, and `Job.parent_complete()` will be called with the # action result in the calling process when the job completes. # # Args: # scheduler (Scheduler): The scheduler # action_name (str): The queue action name # logfile (str): A template string that points to the logfile # that should be used - should contain {pid}. # max_retries (int): The maximum number of retries # class Job(): def __init__(self, scheduler, action_name, logfile, *, max_retries=0): # # Public members # self.action_name = action_name # The action name for the Queue self.child_data = None # Data to be sent to the main process # # Private members # self._scheduler = scheduler # The scheduler self._queue = multiprocessing.Queue() # A message passing queue self._process = None # The Process object self._watcher = None # Child process watcher self._listening = False # Whether the parent is currently listening self._suspended = False # Whether this job is currently suspended self._max_retries = max_retries # Maximum number of automatic retries self._result = None # Return value of child action in the parent self._tries = 0 # Try count, for retryable jobs self._terminated = False # Whether this job has been explicitly terminated # If False, a retry will not be attempted regardless of whether _tries is less than _max_retries. # self._retry_flag = True self._logfile = logfile self._task_id = None # spawn() # # Spawns the job. # def spawn(self): self._tries += 1 self._parent_start_listening() # Spawn the process self._process = _multiprocessing.AsyncioSafeProcess(target=self._child_action, args=[self._queue]) # Block signals which are handled in the main process such that # the child process does not inherit the parent's state, but the main # process will be notified of any signal after we launch the child. # with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False): with asyncio.get_child_watcher() as watcher: self._process.start() # Register the process to call `_parent_child_completed` once it is done # Here we delay the call to the next loop tick. This is in order to be running # in the main thread, as the callback itself must be thread safe. def on_completion(pid, returncode): asyncio.get_event_loop().call_soon(self._parent_child_completed, pid, returncode) watcher.add_child_handler(self._process.pid, on_completion) # terminate() # # Politely request that an ongoing job terminate soon. # # This will send a SIGTERM signal to the Job process. # def terminate(self): # First resume the job if it's suspended self.resume(silent=True) self.message(MessageType.STATUS, "{} terminating".format(self.action_name)) # Make sure there is no garbage on the queue self._parent_stop_listening() # Terminate the process using multiprocessing API pathway self._process.terminate() self._terminated = True # get_terminated() # # Check if a job has been terminated. # # Returns: # (bool): True in the main process if Job.terminate() was called. # def get_terminated(self): return self._terminated # kill() # # Forcefully kill the process, and any children it might have. # def kill(self): # Force kill self.message(MessageType.WARN, "{} did not terminate gracefully, killing".format(self.action_name)) utils._kill_process_tree(self._process.pid) # suspend() # # Suspend this job. # def suspend(self): if not self._suspended: self.message(MessageType.STATUS, "{} suspending".format(self.action_name)) try: # Use SIGTSTP so that child processes may handle and propagate # it to processes they spawn that become session leaders os.kill(self._process.pid, signal.SIGTSTP) # For some reason we receive exactly one suspend event for every # SIGTSTP we send to the child fork(), even though the child forks # are setsid(). We keep a count of these so we can ignore them # in our event loop suspend_event() self._scheduler.internal_stops += 1 self._suspended = True except ProcessLookupError: # ignore, process has already exited pass # resume() # # Resume this suspended job. # def resume(self, silent=False): if self._suspended: if not silent and not self._scheduler.terminated: self.message(MessageType.STATUS, "{} resuming".format(self.action_name)) os.kill(self._process.pid, signal.SIGCONT) self._suspended = False # set_task_id() # # This is called by Job subclasses to set a plugin ID # associated with the task at large (if any element is related # to the task). # # The task ID helps keep messages in the frontend coherent # in the case that multiple plugins log in the context of # a single task (e.g. running integration commands should appear # in the frontend for the element being built, not the element # running the integration commands). # # Args: # task_id (int): The plugin identifier for this task # def set_task_id(self, task_id): self._task_id = task_id # send_message() # # To be called from inside Job.child_process() implementations # to send messages to the main process during processing. # # These messages will be processed by the class's Job.handle_message() # implementation. # def send_message(self, message_type, message): self._queue.put(_Envelope(message_type, message)) ####################################################### # Abstract Methods # ####################################################### # handle_message() # # Handle a custom message. This will be called in the main process in # response to any messages sent to the main proces using the # Job.send_message() API from inside a Job.child_process() implementation # # Args: # message_type (str): A string to identify the message type # message (any): A simple serializable object # # Returns: # (bool): Should return a truthy value if message_type is handled. # def handle_message(self, message_type, message): return False # parent_complete() # # This will be executed after the job finishes, and is expected to # pass the result to the main thread. # # Args: # status (JobStatus): The job exit status # result (any): The result returned by child_process(). # def parent_complete(self, status, result): raise ImplError("Job '{kind}' does not implement parent_complete()" .format(kind=type(self).__name__)) # child_process() # # This will be executed after fork(), and is intended to perform # the job's task. # # Returns: # (any): A (simple!) object to be returned to the main thread # as the result. # def child_process(self): raise ImplError("Job '{kind}' does not implement child_process()" .format(kind=type(self).__name__)) # message(): # # Logs a message, this will be logged in the task's logfile and # conditionally also be sent to the frontend. # # Args: # message_type (MessageType): The type of message to send # message (str): The message # kwargs: Remaining Message() constructor arguments # def message(self, message_type, message, **kwargs): args = dict(kwargs) args['scheduler'] = True self._scheduler.context.message(Message(None, message_type, message, **args)) # child_process_data() # # Abstract method to retrieve additional data that should be # returned to the parent process. Note that the job result is # retrieved independently. # # Values can later be retrieved in Job.child_data. # # Returns: # (dict) A dict containing values to be reported to the main process # def child_process_data(self): return {} ####################################################### # Local Private Methods # ####################################################### # # Methods prefixed with the word 'child' take place in the child process # # Methods prefixed with the word 'parent' take place in the parent process # # Other methods can be called in both child or parent processes # ####################################################### # _child_action() # # Perform the action in the child process, this calls the action_cb. # # Args: # queue (multiprocessing.Queue): The message queue for IPC # def _child_action(self, queue): # This avoids some SIGTSTP signals from grandchildren # getting propagated up to the master process os.setsid() # First set back to the default signal handlers for the signals # we handle, and then clear their blocked state. # signal_list = [signal.SIGTSTP, signal.SIGTERM] for sig in signal_list: signal.signal(sig, signal.SIG_DFL) signal.pthread_sigmask(signal.SIG_UNBLOCK, signal_list) # Assign the queue we passed across the process boundaries # # Set the global message handler in this child # process to forward messages to the parent process self._queue = queue self._scheduler.context.set_message_handler(self._child_message_handler) starttime = datetime.datetime.now() stopped_time = None def stop_time(): nonlocal stopped_time stopped_time = datetime.datetime.now() def resume_time(): nonlocal stopped_time nonlocal starttime starttime += (datetime.datetime.now() - stopped_time) # Time, log and and run the action function # with _signals.suspendable(stop_time, resume_time), \ self._scheduler.context.recorded_messages(self._logfile) as filename: self.message(MessageType.START, self.action_name, logfile=filename) try: # Try the task action result = self.child_process() except SkipJob as e: elapsed = datetime.datetime.now() - starttime self.message(MessageType.SKIPPED, str(e), elapsed=elapsed, logfile=filename) # Alert parent of skip by return code self._child_shutdown(RC_SKIPPED) except BstError as e: elapsed = datetime.datetime.now() - starttime self._retry_flag = e.temporary if self._retry_flag and (self._tries <= self._max_retries): self.message(MessageType.FAIL, "Try #{} failed, retrying".format(self._tries), elapsed=elapsed, logfile=filename) else: self.message(MessageType.FAIL, str(e), elapsed=elapsed, detail=e.detail, logfile=filename, sandbox=e.sandbox) self._queue.put(_Envelope('child_data', self.child_process_data())) # Report the exception to the parent (for internal testing purposes) self._child_send_error(e) # Set return code based on whether or not the error was temporary. # self._child_shutdown(RC_FAIL if self._retry_flag else RC_PERM_FAIL) except Exception as e: # pylint: disable=broad-except # If an unhandled (not normalized to BstError) occurs, that's a bug, # send the traceback and formatted exception back to the frontend # and print it to the log file. # elapsed = datetime.datetime.now() - starttime detail = "An unhandled exception occured:\n\n{}".format(traceback.format_exc()) self.message(MessageType.BUG, self.action_name, elapsed=elapsed, detail=detail, logfile=filename) # Unhandled exceptions should permenantly fail self._child_shutdown(RC_PERM_FAIL) else: # No exception occurred in the action self._queue.put(_Envelope('child_data', self.child_process_data())) self._child_send_result(result) elapsed = datetime.datetime.now() - starttime self.message(MessageType.SUCCESS, self.action_name, elapsed=elapsed, logfile=filename) # Shutdown needs to stay outside of the above context manager, # make sure we dont try to handle SIGTERM while the process # is already busy in sys.exit() self._child_shutdown(RC_OK) # _child_send_error() # # Sends an error to the main process through the message queue # # Args: # e (Exception): The error to send # def _child_send_error(self, e): domain = None reason = None if isinstance(e, BstError): domain = e.domain reason = e.reason envelope = _Envelope('error', { 'domain': domain, 'reason': reason }) self._queue.put(envelope) # _child_send_result() # # Sends the serialized result to the main process through the message queue # # Args: # result (object): A simple serializable object, or None # # Note: If None is passed here, nothing needs to be sent, the # result member in the parent process will simply remain None. # def _child_send_result(self, result): if result is not None: envelope = _Envelope('result', result) self._queue.put(envelope) # _child_shutdown() # # Shuts down the child process by cleaning up and exiting the process # # Args: # exit_code (int): The exit code to exit with # def _child_shutdown(self, exit_code): self._queue.close() sys.exit(exit_code) # _child_message_handler() # # A Context delegate for handling messages, this replaces the # frontend's main message handler in the context of a child task # and performs local logging to the local log file before sending # the message back to the parent process for further propagation. # # Args: # message (Message): The message to log # context (Context): The context object delegating this message # def _child_message_handler(self, message, context): message.action_name = self.action_name message.task_id = self._task_id # Send to frontend if appropriate if context.silent_messages() and (message.message_type not in unconditional_messages): return if message.message_type == MessageType.LOG: return self._queue.put(_Envelope('message', message)) # _parent_shutdown() # # Shuts down the Job on the parent side by reading any remaining # messages on the message queue and cleaning up any resources. # def _parent_shutdown(self): # Make sure we've read everything we need and then stop listening self._parent_process_queue() self._parent_stop_listening() # _parent_child_completed() # # Called in the main process courtesy of asyncio's ChildWatcher.add_child_handler() # # Args: # pid (int): The PID of the child which completed # returncode (int): The return code of the child process # def _parent_child_completed(self, pid, returncode): self._parent_shutdown() # We don't want to retry if we got OK or a permanent fail. # This is set in _child_action but must also be set for the parent. # self._retry_flag = returncode == RC_FAIL if self._retry_flag and (self._tries <= self._max_retries) and not self._scheduler.terminated: self.spawn() return # Resolve the outward facing overall job completion status # if returncode == RC_OK: status = JobStatus.OK elif returncode == RC_SKIPPED: status = JobStatus.SKIPPED elif returncode in (RC_FAIL, RC_PERM_FAIL): status = JobStatus.FAIL else: status = JobStatus.FAIL self.parent_complete(status, self._result) self._scheduler.job_completed(self, status) # _parent_process_envelope() # # Processes a message Envelope deserialized form the message queue. # # this will have the side effect of assigning some local state # on the Job in the parent process for later inspection when the # child process completes. # # Args: # envelope (Envelope): The message envelope # def _parent_process_envelope(self, envelope): if not self._listening: return if envelope.message_type == 'message': # Propagate received messages from children # back through the context. self._scheduler.context.message(envelope.message) elif envelope.message_type == 'error': # For regression tests only, save the last error domain / reason # reported from a child task in the main process, this global state # is currently managed in _exceptions.py set_last_task_error(envelope.message['domain'], envelope.message['reason']) elif envelope.message_type == 'result': assert self._result is None self._result = envelope.message elif envelope.message_type == 'child_data': # If we retry a job, we assign a new value to this self.child_data = envelope.message # Try Job subclass specific messages now elif not self.handle_message(envelope.message_type, envelope.message): assert 0, "Unhandled message type '{}': {}" \ .format(envelope.message_type, envelope.message) # _parent_process_queue() # # Reads back message envelopes from the message queue # in the parent process. # def _parent_process_queue(self): while not self._queue.empty(): envelope = self._queue.get_nowait() self._parent_process_envelope(envelope) # _parent_recv() # # A callback to handle I/O events from the message # queue file descriptor in the main process message loop # def _parent_recv(self, *args): self._parent_process_queue() # _parent_start_listening() # # Starts listening on the message queue # def _parent_start_listening(self): # Warning: Platform specific code up ahead # # The multiprocessing.Queue object does not tell us how # to receive io events in the receiving process, so we # need to sneak in and get its file descriptor. # # The _reader member of the Queue is currently private # but well known, perhaps it will become public: # # http://bugs.python.org/issue3831 # if not self._listening: self._scheduler.loop.add_reader( self._queue._reader.fileno(), self._parent_recv) self._listening = True # _parent_stop_listening() # # Stops listening on the message queue # def _parent_stop_listening(self): if self._listening: self._scheduler.loop.remove_reader(self._queue._reader.fileno()) self._listening = False buildstream-1.6.9/buildstream/_scheduler/queues/000077500000000000000000000000001437515270000220055ustar00rootroot00000000000000buildstream-1.6.9/buildstream/_scheduler/queues/__init__.py000066400000000000000000000000461437515270000241160ustar00rootroot00000000000000from .queue import Queue, QueueStatus buildstream-1.6.9/buildstream/_scheduler/queues/buildqueue.py000066400000000000000000000050101437515270000245170ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter from . import Queue, QueueStatus from ..jobs import JobStatus from ..resources import ResourceType # A queue which assembles elements # class BuildQueue(Queue): action_name = "Build" complete_name = "Built" resources = [ResourceType.PROCESS, ResourceType.CACHE] def process(self, element): return element._assemble() def status(self, element): if not element._is_required(): # Artifact is not currently required but it may be requested later. # Keep it in the queue. return QueueStatus.WAIT if element._cached(): return QueueStatus.SKIP if not element._buildable(): return QueueStatus.WAIT return QueueStatus.READY def _check_cache_size(self, job, element, artifact_size): # After completing a build job, add the artifact size # as returned from Element._assemble() to the estimated # artifact cache size # context = self._scheduler.context artifacts = context.artifactcache artifacts.add_artifact_size(artifact_size) # If the estimated size outgrows the quota, ask the scheduler # to queue a job to actually check the real cache size. # if artifacts.has_quota_exceeded(): self._scheduler.check_cache_size() def done(self, job, element, result, status): if status == JobStatus.OK: # Inform element in main process that assembly is done element._assemble_done() # This has to be done after _assemble_done, such that the # element may register its cache key as required self._check_cache_size(job, element, result) buildstream-1.6.9/buildstream/_scheduler/queues/fetchqueue.py000066400000000000000000000046401437515270000245210ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # BuildStream toplevel imports from ... import Consistency # Local imports from . import Queue, QueueStatus from ..resources import ResourceType from ..jobs import JobStatus # A queue which fetches element sources # class FetchQueue(Queue): action_name = "Fetch" complete_name = "Fetched" resources = [ResourceType.DOWNLOAD] def __init__(self, scheduler, skip_cached=False): super().__init__(scheduler) self._skip_cached = skip_cached def process(self, element): previous_sources = [] for source in element.sources(): source._fetch(previous_sources) previous_sources.append(source) def status(self, element): if not element._is_required(): # Artifact is not currently required but it may be requested later. # Keep it in the queue. return QueueStatus.WAIT # Optionally skip elements that are already in the artifact cache if self._skip_cached: if not element._can_query_cache(): return QueueStatus.WAIT if element._cached(): return QueueStatus.SKIP # This will automatically skip elements which # have no sources. if element._get_consistency() == Consistency.CACHED: return QueueStatus.SKIP return QueueStatus.READY def done(self, _, element, result, status): if status == JobStatus.FAIL: return element._fetch_done() # Successful fetch, we must be CACHED now assert element._get_consistency() == Consistency.CACHED buildstream-1.6.9/buildstream/_scheduler/queues/pullqueue.py000066400000000000000000000041711437515270000244030ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # Local imports from . import Queue, QueueStatus from ..resources import ResourceType from ..jobs import JobStatus from ..._exceptions import SkipJob # A queue which pulls element artifacts # class PullQueue(Queue): action_name = "Pull" complete_name = "Pulled" resources = [ResourceType.DOWNLOAD, ResourceType.CACHE] def process(self, element): # returns whether an artifact was downloaded or not if not element._pull(): raise SkipJob(self.action_name) def status(self, element): if not element._is_required(): # Artifact is not currently required but it may be requested later. # Keep it in the queue. return QueueStatus.WAIT if not element._can_query_cache(): return QueueStatus.WAIT if element._pull_pending(): return QueueStatus.READY else: return QueueStatus.SKIP def done(self, _, element, result, status): if status == JobStatus.FAIL: return element._pull_done() # Build jobs will check the "approximate" size first. Since we # do not get an artifact size from pull jobs, we have to # actually check the cache size. if status == JobStatus.OK: self._scheduler.check_cache_size() buildstream-1.6.9/buildstream/_scheduler/queues/pushqueue.py000066400000000000000000000026251437515270000244100ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # Local imports from . import Queue, QueueStatus from ..resources import ResourceType from ..._exceptions import SkipJob # A queue which pushes element artifacts # class PushQueue(Queue): action_name = "Push" complete_name = "Pushed" resources = [ResourceType.UPLOAD] def process(self, element): # returns whether an artifact was uploaded or not if not element._push(): raise SkipJob(self.action_name) def status(self, element): if element._skip_push(): return QueueStatus.SKIP return QueueStatus.READY buildstream-1.6.9/buildstream/_scheduler/queues/queue.py000066400000000000000000000254021437515270000235060ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # System imports import os from collections import deque from enum import Enum import traceback # Local imports from ..jobs import ElementJob, JobStatus from ..resources import ResourceType # BuildStream toplevel imports from ..._exceptions import BstError, set_last_task_error from ..._message import Message, MessageType # Queue status for a given element # # class QueueStatus(Enum): # The element is waiting for dependencies. WAIT = 1 # The element can skip this queue. SKIP = 2 # The element is ready for processing in this queue. READY = 3 # Queue() # # Args: # scheduler (Scheduler): The Scheduler # class Queue(): # These should be overridden on class data of of concrete Queue implementations action_name = None complete_name = None resources = [] # Resources this queues' jobs want def __init__(self, scheduler): # # Public members # self.failed_elements = [] # List of failed elements, for the frontend self.processed_elements = [] # List of processed elements, for the frontend self.skipped_elements = [] # List of skipped elements, for the frontend # # Private members # self._scheduler = scheduler self._resources = scheduler.resources # Shared resource pool self._wait_queue = deque() # Ready / Waiting elements self._done_queue = deque() # Processed / Skipped elements self._max_retries = 0 # Assert the subclass has setup class data assert self.action_name is not None assert self.complete_name is not None if ResourceType.UPLOAD in self.resources or ResourceType.DOWNLOAD in self.resources: self._max_retries = scheduler.context.sched_network_retries ##################################################### # Abstract Methods for Queue implementations # ##################################################### # process() # # Abstract method for processing an element # # Args: # element (Element): An element to process # # Returns: # (any): An optional something to be returned # for every element successfully processed # # def process(self, element): pass # status() # # Abstract method for reporting the status of an element. # # Args: # element (Element): An element to process # # Returns: # (QueueStatus): The element status # def status(self, element): return QueueStatus.READY # done() # # Abstract method for handling a successful job completion. # # Args: # job (Job): The job which completed processing # element (Element): The element which completed processing # result (any): The return value of the process() implementation # status (JobStatus): The return status of the Job # def done(self, job, element, result, status): pass ##################################################### # Scheduler / Pipeline facing APIs # ##################################################### # enqueue() # # Enqueues some elements # # Args: # elts (list): A list of Elements # def enqueue(self, elts): if not elts: return # Place skipped elements on the done queue right away. # # The remaining ready and waiting elements must remain in the # same queue, and ready status must be determined at the moment # which the scheduler is asking for the next job. # skip = [elt for elt in elts if self.status(elt) == QueueStatus.SKIP] wait = [elt for elt in elts if elt not in skip] self.skipped_elements.extend(skip) # Public record of skipped elements self._done_queue.extend(skip) # Elements to be processed self._wait_queue.extend(wait) # Elements eligible to be dequeued # dequeue() # # A generator which dequeues the elements which # are ready to exit the queue. # # Yields: # (Element): Elements being dequeued # def dequeue(self): while self._done_queue: yield self._done_queue.popleft() # dequeue_ready() # # Reports whether any elements can be promoted to other queues # # Returns: # (bool): Whether there are elements ready # def dequeue_ready(self): return any(self._done_queue) # harvest_jobs() # # Process elements in the queue, moving elements which were enqueued # into the dequeue pool, and creating as many jobs for which resources # can be reserved. # # Returns: # ([Job]): A list of jobs which can be run now # def harvest_jobs(self): unready = [] ready = [] while self._wait_queue: if not self._resources.reserve(self.resources, peek=True): break element = self._wait_queue.popleft() status = self.status(element) if status == QueueStatus.WAIT: unready.append(element) elif status == QueueStatus.SKIP: self._done_queue.append(element) self.skipped_elements.append(element) else: reserved = self._resources.reserve(self.resources) assert reserved ready.append(element) self._wait_queue.extendleft(unready) return [ ElementJob(self._scheduler, self.action_name, self._element_log_path(element), element=element, queue=self, action_cb=self.process, complete_cb=self._job_done, max_retries=self._max_retries) for element in ready ] ##################################################### # Private Methods # ##################################################### # _update_workspaces() # # Updates and possibly saves the workspaces in the # main data model in the main process after a job completes. # # Args: # element (Element): The element which completed # job (Job): The job which completed # def _update_workspaces(self, element, job): workspace_dict = None if job.child_data: workspace_dict = job.child_data.get('workspace', None) # Handle any workspace modifications now # if workspace_dict: context = element._get_context() workspaces = context.get_workspaces() if workspaces.update_workspace(element._get_full_name(), workspace_dict): try: workspaces.save_config() except BstError as e: self._message(element, MessageType.ERROR, "Error saving workspaces", detail=str(e)) except Exception: # pylint: disable=broad-except self._message(element, MessageType.BUG, "Unhandled exception while saving workspaces", detail=traceback.format_exc()) # _job_done() # # A callback reported by the Job() when a job completes # # This will call the Queue implementation specific Queue.done() # implementation and trigger the scheduler to reschedule. # # See the Job object for an explanation of the call signature # def _job_done(self, job, element, status, result): # Now release the resources we reserved # self._resources.release(self.resources) # Update values that need to be synchronized in the main task # before calling any queue implementation self._update_workspaces(element, job) # Give the result of the job to the Queue implementor, # and determine if it should be considered as processed # or skipped. try: self.done(job, element, result, status) except BstError as e: # Report error and mark as failed # self._message(element, MessageType.ERROR, "Post processing error", detail=str(e)) self.failed_elements.append(element) # Treat this as a task error as it's related to a task # even though it did not occur in the task context # # This just allows us stronger testing capability # set_last_task_error(e.domain, e.reason) except Exception: # pylint: disable=broad-except # Report unhandled exceptions and mark as failed # self._message(element, MessageType.BUG, "Unhandled exception in post processing", detail=traceback.format_exc()) self.failed_elements.append(element) else: # All elements get placed on the done queue for later processing. self._done_queue.append(element) # These lists are for bookkeeping purposes for the UI and logging. if status == JobStatus.SKIPPED: self.skipped_elements.append(element) elif status == JobStatus.OK: self.processed_elements.append(element) else: self.failed_elements.append(element) # Convenience wrapper for Queue implementations to send # a message for the element they are processing def _message(self, element, message_type, brief, **kwargs): context = element._get_context() message = Message(element._unique_id, message_type, brief, **kwargs) context.message(message) def _element_log_path(self, element): project = element._get_project() key = element._get_display_key()[1] action = self.action_name.lower() logfile = "{key}-{action}".format(key=key, action=action) return os.path.join(project.name, element.normal_name, logfile) buildstream-1.6.9/buildstream/_scheduler/queues/trackqueue.py000066400000000000000000000036221437515270000245330ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # BuildStream toplevel imports from ...plugin import Plugin # Local imports from . import Queue, QueueStatus from ..resources import ResourceType from ..jobs import JobStatus # A queue which tracks sources # class TrackQueue(Queue): action_name = "Track" complete_name = "Tracked" resources = [ResourceType.DOWNLOAD] def process(self, element): return element._track() def status(self, element): # We can skip elements entirely if they have no sources. if not list(element.sources()): # But we still have to mark them as tracked element._tracking_done() return QueueStatus.SKIP return QueueStatus.READY def done(self, _, element, result, status): if status == JobStatus.FAIL: return # Set the new refs in the main process one by one as they complete, # writing to bst files this time for unique_id, new_ref in result: source = Plugin._lookup(unique_id) source._set_ref(new_ref, save=True) element._tracking_done() buildstream-1.6.9/buildstream/_scheduler/resources.py000066400000000000000000000135421437515270000230670ustar00rootroot00000000000000class ResourceType(): CACHE = 0 DOWNLOAD = 1 PROCESS = 2 UPLOAD = 3 class Resources(): def __init__(self, num_builders, num_fetchers, num_pushers): self._max_resources = { ResourceType.CACHE: 0, ResourceType.DOWNLOAD: num_fetchers, ResourceType.PROCESS: num_builders, ResourceType.UPLOAD: num_pushers } # Resources jobs are currently using. self._used_resources = { ResourceType.CACHE: 0, ResourceType.DOWNLOAD: 0, ResourceType.PROCESS: 0, ResourceType.UPLOAD: 0 } # Resources jobs currently want exclusive access to. The set # of jobs that have asked for exclusive access is the value - # this is so that we can avoid scheduling any other jobs until # *all* exclusive jobs that "register interest" have finished # - which avoids starving them of scheduling time. self._exclusive_resources = { ResourceType.CACHE: set(), ResourceType.DOWNLOAD: set(), ResourceType.PROCESS: set(), ResourceType.UPLOAD: set() } # reserve() # # Reserves a set of resources # # Args: # resources (set): A set of ResourceTypes # exclusive (set): Another set of ResourceTypes # peek (bool): Whether to only peek at whether the resource is available # # Returns: # (bool): True if the resources could be reserved # def reserve(self, resources, exclusive=None, *, peek=False): if exclusive is None: exclusive = set() resources = set(resources) exclusive = set(exclusive) # First, we check if the job wants to access a resource that # another job wants exclusive access to. If so, it cannot be # scheduled. # # Note that if *both* jobs want this exclusively, we don't # fail yet. # # FIXME: I *think* we can deadlock if two jobs want disjoint # sets of exclusive and non-exclusive resources. This # is currently not possible, but may be worth thinking # about. # for resource in resources - exclusive: # If our job wants this resource exclusively, we never # check this, so we can get away with not (temporarily) # removing it from the set. if self._exclusive_resources[resource]: return False # Now we check if anything is currently using any resources # this job wants exclusively. If so, the job cannot be # scheduled. # # Since jobs that use a resource exclusively are also using # it, this means only one exclusive job can ever be scheduled # at a time, despite being allowed to be part of the exclusive # set. # for resource in exclusive: if self._used_resources[resource] != 0: return False # Finally, we check if we have enough of each resource # available. If we don't have enough, the job cannot be # scheduled. for resource in resources: if (self._max_resources[resource] > 0 and self._used_resources[resource] >= self._max_resources[resource]): return False # Now we register the fact that our job is using the resources # it asked for, and tell the scheduler that it is allowed to # continue. if not peek: for resource in resources: self._used_resources[resource] += 1 return True # release() # # Release resources previously reserved with Resources.reserve() # # Args: # resources (set): A set of resources to release # def release(self, resources): for resource in resources: assert self._used_resources[resource] > 0, "Scheduler resource imbalance" self._used_resources[resource] -= 1 # register_exclusive_interest() # # Inform the resources pool that `source` has an interest in # reserving this resource exclusively. # # The source parameter is used to identify the caller, it # must be ensured to be unique for the time that the # interest is registered. # # This function may be called multiple times, and subsequent # calls will simply have no effect until clear_exclusive_interest() # is used to clear the interest. # # This must be called in advance of reserve() # # Args: # resources (set): Set of resources to reserve exclusively # source (any): Source identifier, to be used again when unregistering # the interest. # def register_exclusive_interest(self, resources, source): # The very first thing we do is to register any exclusive # resources this job may want. Even if the job is not yet # allowed to run (because another job is holding the resource # it wants), we can still set this - it just means that any # job *currently* using these resources has to finish first, # and no new jobs wanting these can be launched (except other # exclusive-access jobs). # for resource in resources: self._exclusive_resources[resource].add(source) # unregister_exclusive_interest() # # Clear the exclusive interest in these resources. # # This should be called by the given source which registered # an exclusive interest. # # Args: # resources (set): Set of resources to reserve exclusively # source (str): Source identifier, to be used again when unregistering # the interest. # def unregister_exclusive_interest(self, resources, source): for resource in resources: self._exclusive_resources[resource].remove(source) buildstream-1.6.9/buildstream/_scheduler/scheduler.py000066400000000000000000000434331437515270000230350ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # System imports import os import asyncio from itertools import chain import signal import datetime from contextlib import contextmanager # Local imports from .resources import Resources, ResourceType from .jobs import JobStatus, CacheSizeJob, CleanupJob # A decent return code for Scheduler.run() class SchedStatus(): SUCCESS = 0 ERROR = -1 TERMINATED = 1 # Some action names for the internal jobs we launch # _ACTION_NAME_CLEANUP = 'cleanup' _ACTION_NAME_CACHE_SIZE = 'cache_size' # Scheduler() # # The scheduler operates on a list queues, each of which is meant to accomplish # a specific task. Elements enter the first queue when Scheduler.run() is called # and into the next queue when complete. Scheduler.run() returns when all of the # elements have been traversed or when an occurs. # # Using the scheduler is a matter of: # a.) Deriving the Queue class and implementing its abstract methods # b.) Instantiating a Scheduler with one or more queues # c.) Calling Scheduler.run(elements) with a list of elements # d.) Fetching results from your queues # # Args: # context: The Context in the parent scheduling process # start_time: The time at which the session started # interrupt_callback: A callback to handle ^C # ticker_callback: A callback call once per second # job_start_callback: A callback call when each job starts # job_complete_callback: A callback call when each job completes # class Scheduler(): def __init__(self, context, start_time, interrupt_callback=None, ticker_callback=None, job_start_callback=None, job_complete_callback=None): # # Public members # self.queues = None # Exposed for the frontend to print summaries self.context = context # The Context object shared with Queues self.terminated = False # Whether the scheduler was asked to terminate or has terminated self.suspended = False # Whether the scheduler is currently suspended # These are shared with the Job, but should probably be removed or made private in some way. self.loop = None # Shared for Job access to observe the message queue self.internal_stops = 0 # Amount of SIGSTP signals we've introduced, this is shared with job.py # # Private members # self._active_jobs = [] # Jobs currently being run in the scheduler self._starttime = start_time # Initial application start time self._suspendtime = None # Session time compensation for suspended state self._queue_jobs = True # Whether we should continue to queue jobs # State of cache management related jobs self._cache_size_scheduled = False # Whether we have a cache size job scheduled self._cache_size_running = None # A running CacheSizeJob, or None self._cleanup_scheduled = False # Whether we have a cleanup job scheduled self._cleanup_running = None # A running CleanupJob, or None # Callbacks to report back to the Scheduler owner self._interrupt_callback = interrupt_callback self._ticker_callback = ticker_callback self._job_start_callback = job_start_callback self._job_complete_callback = job_complete_callback # Whether our exclusive jobs, like 'cleanup' are currently already # waiting or active. # # This is just a bit quicker than scanning the wait queue and active # queue and comparing job action names. # self._exclusive_waiting = set() self._exclusive_active = set() self.resources = Resources(context.sched_builders, context.sched_fetchers, context.sched_pushers) # run() # # Args: # queues (list): A list of Queue objects # # Returns: # (SchedStatus): How the scheduling terminated # # Elements in the 'plan' will be processed by each # queue in order. Processing will complete when all # elements have been processed by each queue or when # an error arises # def run(self, queues): # Hold on to the queues to process self.queues = queues # NOTE: Enforce use of `SafeChildWatcher` as we generally don't want # background threads. # In Python 3.8+, `ThreadedChildWatcher` is the default watcher, and # not `SafeChildWatcher`. asyncio.set_child_watcher(asyncio.SafeChildWatcher()) # Ensure that we have a fresh new event loop, in case we want # to run another test in this thread. self.loop = asyncio.new_event_loop() asyncio.set_event_loop(self.loop) # Add timeouts if self._ticker_callback: self.loop.call_later(1, self._tick) # Handle unix signals while running self._connect_signals() # Run the queues self._sched() self.loop.run_forever() self.loop.close() # Stop handling unix signals self._disconnect_signals() failed = any(any(queue.failed_elements) for queue in self.queues) self.loop = None if failed: status = SchedStatus.ERROR elif self.terminated: status = SchedStatus.TERMINATED else: status = SchedStatus.SUCCESS return self.elapsed_time(), status # terminate_jobs() # # Forcefully terminates all ongoing jobs. # # For this to be effective, one needs to return to # the scheduler loop first and allow the scheduler # to complete gracefully. # # NOTE: This will block SIGINT so that graceful process # termination is not interrupted, and SIGINT will # remain blocked after Scheduler.run() returns. # def terminate_jobs(self): # Set this right away, the frontend will check this # attribute to decide whether or not to print status info # etc and the following code block will trigger some callbacks. self.terminated = True self.loop.call_soon(self._terminate_jobs_real) # Block this until we're finished terminating jobs, # this will remain blocked forever. signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT]) # jobs_suspended() # # A context manager for running with jobs suspended # @contextmanager def jobs_suspended(self): self._disconnect_signals() self._suspend_jobs() yield self._resume_jobs() self._connect_signals() # stop_queueing() # # Stop queueing additional jobs, causes Scheduler.run() # to return once all currently processing jobs are finished. # def stop_queueing(self): self._queue_jobs = False # elapsed_time() # # Fetches the current session elapsed time # # Returns: # (datetime): The amount of time since the start of the session, # discounting any time spent while jobs were suspended. # def elapsed_time(self): timenow = datetime.datetime.now() starttime = self._starttime if not starttime: starttime = timenow return timenow - starttime # job_completed(): # # Called when a Job completes # # Args: # queue (Queue): The Queue holding a complete job # job (Job): The completed Job # status (JobStatus): The status of the completed job # def job_completed(self, job, status): # Remove from the active jobs list self._active_jobs.remove(job) # Scheduler owner facing callback self._job_complete_callback(job, status) # Now check for more jobs self._sched() # check_cache_size(): # # Queues a cache size calculation job, after the cache # size is calculated, a cleanup job will be run automatically # if needed. # def check_cache_size(self): # Here we assume we are called in response to a job # completion callback, or before entering the scheduler. # # As such there is no need to call `_sched()` from here, # and we prefer to run it once at the last moment. # self._cache_size_scheduled = True ####################################################### # Local Private Methods # ####################################################### # _spawn_job() # # Spanws a job # # Args: # job (Job): The job to spawn # def _spawn_job(self, job): job.spawn() self._active_jobs.append(job) if self._job_start_callback: self._job_start_callback(job) # Callback for the cache size job def _cache_size_job_complete(self, status, cache_size): context = self.context artifacts = context.artifactcache # Deallocate cache size job resources self._cache_size_running = None self.resources.release([ResourceType.CACHE, ResourceType.PROCESS]) # Schedule a cleanup job if we've hit the threshold if status != JobStatus.OK: return if artifacts.has_quota_exceeded(): self._cleanup_scheduled = True # Callback for the cleanup job def _cleanup_job_complete(self, status, cache_size): # Deallocate cleanup job resources self._cleanup_running = None self.resources.release([ResourceType.CACHE, ResourceType.PROCESS]) # Unregister the exclusive interest when we're done with it if not self._cleanup_scheduled: self.resources.unregister_exclusive_interest( [ResourceType.CACHE], 'cache-cleanup' ) # _sched_cleanup_job() # # Runs a cleanup job if one is scheduled to run now and # sufficient recources are available. # def _sched_cleanup_job(self): if self._cleanup_scheduled and self._cleanup_running is None: # Ensure we have an exclusive interest in the resources self.resources.register_exclusive_interest( [ResourceType.CACHE], 'cache-cleanup' ) if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS], [ResourceType.CACHE]): # Update state and launch self._cleanup_scheduled = False self._cleanup_running = \ CleanupJob(self, _ACTION_NAME_CLEANUP, 'cleanup/cleanup', complete_cb=self._cleanup_job_complete) self._spawn_job(self._cleanup_running) # _sched_cache_size_job() # # Runs a cache size job if one is scheduled to run now and # sufficient recources are available. # def _sched_cache_size_job(self): if self._cache_size_scheduled and not self._cache_size_running: if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS]): self._cache_size_scheduled = False self._cache_size_running = \ CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE, 'cache_size/cache_size', complete_cb=self._cache_size_job_complete) self._spawn_job(self._cache_size_running) # _sched_queue_jobs() # # Ask the queues what jobs they want to schedule and schedule # them. This is done here so we can ask for new jobs when jobs # from previous queues become available. # # This will process the Queues, pull elements through the Queues # and process anything that is ready. # def _sched_queue_jobs(self): ready = [] process_queues = True while self._queue_jobs and process_queues: # Pull elements forward through queues elements = [] for queue in self.queues: queue.enqueue(elements) elements = list(queue.dequeue()) # Kickoff whatever processes can be processed at this time # # We start by queuing from the last queue first, because # we want to give priority to queues later in the # scheduling process in the case that multiple queues # share the same token type. # # This avoids starvation situations where we dont move on # to fetch tasks for elements which failed to pull, and # thus need all the pulls to complete before ever starting # a build ready.extend(chain.from_iterable( q.harvest_jobs() for q in reversed(self.queues) )) # harvest_jobs() may have decided to skip some jobs, making # them eligible for promotion to the next queue as a side effect. # # If that happens, do another round. process_queues = any(q.dequeue_ready() for q in self.queues) # Spawn the jobs # for job in ready: self._spawn_job(job) # _sched() # # Run any jobs which are ready to run, or quit the main loop # when nothing is running or is ready to run. # # This is the main driving function of the scheduler, it is called # initially when we enter Scheduler.run(), and at the end of whenever # any job completes, after any bussiness logic has occurred and before # going back to sleep. # def _sched(self): if not self.terminated: # # Try the cache management jobs # self._sched_cleanup_job() self._sched_cache_size_job() # # Run as many jobs as the queues can handle for the # available resources # self._sched_queue_jobs() # # If nothing is ticking then bail out # if not self._active_jobs: self.loop.stop() # _suspend_jobs() # # Suspend all ongoing jobs. # def _suspend_jobs(self): if not self.suspended: self._suspendtime = datetime.datetime.now() self.suspended = True for job in self._active_jobs: job.suspend() # _resume_jobs() # # Resume suspended jobs. # def _resume_jobs(self): if self.suspended: for job in self._active_jobs: job.resume() self.suspended = False self._starttime += (datetime.datetime.now() - self._suspendtime) self._suspendtime = None # _interrupt_event(): # # A loop registered event callback for keyboard interrupts # def _interrupt_event(self): # FIXME: This should not be needed, but for some reason we receive an # additional SIGINT event when the user hits ^C a second time # to inform us that they really intend to terminate; even though # we have disconnected our handlers at this time. # if self.terminated: return # Leave this to the frontend to decide, if no # interrrupt callback was specified, then just terminate. if self._interrupt_callback: self._interrupt_callback() else: # Default without a frontend is just terminate self.terminate_jobs() # _terminate_event(): # # A loop registered event callback for SIGTERM # def _terminate_event(self): self.terminate_jobs() # _suspend_event(): # # A loop registered event callback for SIGTSTP # def _suspend_event(self): # Ignore the feedback signals from Job.suspend() if self.internal_stops: self.internal_stops -= 1 return # No need to care if jobs were suspended or not, we _only_ handle this # while we know jobs are not suspended. self._suspend_jobs() os.kill(os.getpid(), signal.SIGSTOP) self._resume_jobs() # _connect_signals(): # # Connects our signal handler event callbacks to the mainloop # def _connect_signals(self): self.loop.add_signal_handler(signal.SIGINT, self._interrupt_event) self.loop.add_signal_handler(signal.SIGTERM, self._terminate_event) self.loop.add_signal_handler(signal.SIGTSTP, self._suspend_event) def _disconnect_signals(self): self.loop.remove_signal_handler(signal.SIGINT) self.loop.remove_signal_handler(signal.SIGTSTP) self.loop.remove_signal_handler(signal.SIGTERM) def _terminate_jobs_real(self): def kill_jobs(): for job_ in self._active_jobs: job_.kill() # Schedule all jobs to be killed if they have not exited in 20 sec self.loop.call_later(20, kill_jobs) for job in self._active_jobs: job.terminate() # Regular timeout for driving status in the UI def _tick(self): elapsed = self.elapsed_time() self._ticker_callback(elapsed) self.loop.call_later(1, self._tick) buildstream-1.6.9/buildstream/_signals.py000066400000000000000000000143651437515270000205430ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import signal import sys import threading import traceback from contextlib import contextmanager, ExitStack from collections import deque # Global per process state for handling of sigterm/sigtstp/sigcont, # note that it is expected that this only ever be used by processes # the scheduler forks off, not the main process terminator_stack = deque() suspendable_stack = deque() # Per process SIGTERM handler def terminator_handler(signal_, frame): while terminator_stack: terminator_ = terminator_stack.pop() try: terminator_() except: # pylint: disable=bare-except # Ensure we print something if there's an exception raised when # processing the handlers. Note that the default exception # handler won't be called because we os._exit next, so we must # catch all possible exceptions with the unqualified 'except' # clause. traceback.print_exc(file=sys.stderr) print('Error encountered in BuildStream while processing custom SIGTERM handler:', terminator_, file=sys.stderr) # Use special exit here, terminate immediately, recommended # for precisely this situation where child forks are teminated. os._exit(-1) # terminator() # # A context manager for interruptable tasks, this guarantees # that while the code block is running, the supplied function # will be called upon process termination. # # Note that after handlers are called, the termination will be handled by # terminating immediately with os._exit(). This means that SystemExit will not # be raised and 'finally' clauses will not be executed. # # Args: # terminate_func (callable): A function to call when aborting # the nested code block. # @contextmanager def terminator(terminate_func): # Signal handling only works in the main thread if threading.current_thread() != threading.main_thread(): yield return outermost = not terminator_stack terminator_stack.append(terminate_func) if outermost: original_handler = signal.signal(signal.SIGTERM, terminator_handler) try: yield finally: if outermost: signal.signal(signal.SIGTERM, original_handler) terminator_stack.pop() # Just a simple object for holding on to two callbacks class Suspender(): def __init__(self, suspend_callback, resume_callback): self.suspend = suspend_callback self.resume = resume_callback # Per process SIGTSTP handler def suspend_handler(sig, frame): # Suspend callbacks from innermost frame first for suspender in reversed(suspendable_stack): suspender.suspend() # Use SIGSTOP directly now on self, dont introduce more SIGTSTP # # Here the process sleeps until SIGCONT, which we simply # dont handle. We know we'll pickup execution right here # when we wake up. os.kill(os.getpid(), signal.SIGSTOP) # Resume callbacks from outermost frame inwards for suspender in suspendable_stack: suspender.resume() # suspendable() # # A context manager for handling process suspending and resumeing # # Args: # suspend_callback (callable): A function to call as process suspend time. # resume_callback (callable): A function to call as process resume time. # # This must be used in code blocks which spawn processes that become # their own session leader. In these cases, SIGSTOP and SIGCONT need # to be propagated to the child process group. # # This context manager can also be used recursively, so multiple # things can happen at suspend/resume time (such as tracking timers # and ensuring durations do not count suspended time). # @contextmanager def suspendable(suspend_callback, resume_callback): outermost = not suspendable_stack suspender = Suspender(suspend_callback, resume_callback) suspendable_stack.append(suspender) if outermost: original_stop = signal.signal(signal.SIGTSTP, suspend_handler) try: yield finally: if outermost: signal.signal(signal.SIGTSTP, original_stop) suspendable_stack.pop() # blocked() # # A context manager for running a code block with blocked signals # # Args: # signals (list): A list of unix signals to block # ignore (bool): Whether to ignore entirely the signals which were # received and pending while the process had blocked them # @contextmanager def blocked(signal_list, ignore=True): with ExitStack() as stack: # Optionally add the ignored() context manager to this context if ignore: stack.enter_context(ignored(signal_list)) # Set and save the sigprocmask blocked_signals = signal.pthread_sigmask(signal.SIG_BLOCK, signal_list) try: yield finally: # If we have discarded the signals completely, this line will cause # the discard_handler() to trigger for each signal in the list signal.pthread_sigmask(signal.SIG_SETMASK, blocked_signals) # ignored() # # A context manager for running a code block with ignored signals # # Args: # signals (list): A list of unix signals to ignore # @contextmanager def ignored(signal_list): orig_handlers = {} for sig in signal_list: orig_handlers[sig] = signal.signal(sig, signal.SIG_IGN) try: yield finally: for sig in signal_list: signal.signal(sig, orig_handlers[sig]) buildstream-1.6.9/buildstream/_site.py000066400000000000000000000056461437515270000200510ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import shutil import subprocess # # Private module declaring some info about where the buildstream # is installed so we can lookup package relative resources easily # # The package root, wherever we are running the package from root = os.path.dirname(os.path.abspath(__file__)) # The Element plugin directory element_plugins = os.path.join(root, 'plugins', 'elements') # The Source plugin directory source_plugins = os.path.join(root, 'plugins', 'sources') # Default user configuration default_user_config = os.path.join(root, 'data', 'userconfig.yaml') # Default project configuration default_project_config = os.path.join(root, 'data', 'projectconfig.yaml') # Script template to call module building scripts build_all_template = os.path.join(root, 'data', 'build-all.sh.in') # Module building script template build_module_template = os.path.join(root, 'data', 'build-module.sh.in') # Cached bwrap version _bwrap_major = None _bwrap_minor = None _bwrap_patch = None # check_bwrap_version() # # Checks the version of installed bwrap against the requested version # # Args: # major (int): The required major version # minor (int): The required minor version # patch (int): The required patch level # # Returns: # (bool): Whether installed bwrap meets the requirements # def check_bwrap_version(major, minor, patch): # pylint: disable=global-statement global _bwrap_major global _bwrap_minor global _bwrap_patch # Parse bwrap version and save into cache, if not already cached if _bwrap_major is None: bwrap_path = shutil.which('bwrap') if not bwrap_path: return False cmd = [bwrap_path, "--version"] version = str(subprocess.check_output(cmd).split()[1], "utf-8") _bwrap_major, _bwrap_minor, _bwrap_patch = map(int, version.split(".")) # Check whether the installed version meets the requirements if _bwrap_major > major: return True elif _bwrap_major < major: return False else: if _bwrap_minor > minor: return True elif _bwrap_minor < minor: return False else: return _bwrap_patch >= patch buildstream-1.6.9/buildstream/_sourcefactory.py000066400000000000000000000044531437515270000217700ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom from . import _site from ._plugincontext import PluginContext from .source import Source # A SourceFactory creates Source instances # in the context of a given factory # # Args: # plugin_base (PluginBase): The main PluginBase object to work with # plugin_origins (list): Data used to search for external Source plugins # class SourceFactory(PluginContext): def __init__(self, plugin_base, *, format_versions=None, plugin_origins=None): if format_versions is None: format_versions = {} super().__init__(plugin_base, Source, [_site.source_plugins], format_versions=format_versions, plugin_origins=plugin_origins) # create(): # # Create a Source object, the pipeline uses this to create Source # objects on demand for a given pipeline. # # Args: # context (object): The Context object for processing # project (object): The project object # meta (object): The loaded MetaSource # # Returns: # A newly created Source object of the appropriate kind # # Raises: # PluginError (if the kind lookup failed) # LoadError (if the source itself took issue with the config) # def create(self, context, project, meta): source_type, _ = self.lookup(meta.kind) source = source_type(context, project, meta) version = self._format_versions.get(meta.kind, 0) self._assert_plugin_format(source, version) return source buildstream-1.6.9/buildstream/_stream.py000066400000000000000000001270451437515270000203760ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jürg Billeter # Tristan Maat import os import sys import stat import shlex import shutil import tarfile from contextlib import contextmanager from tempfile import TemporaryDirectory from ._exceptions import StreamError, ImplError, BstError from ._message import Message, MessageType from ._scheduler import Scheduler, SchedStatus, TrackQueue, FetchQueue, BuildQueue, PullQueue, PushQueue from ._pipeline import Pipeline, PipelineSelection from . import utils, _yaml, _site from . import Scope, Consistency # Stream() # # This is the main, toplevel calling interface in BuildStream core. # # Args: # context (Context): The Context object # project (Project): The Project object # session_start (datetime): The time when the session started # session_start_callback (callable): A callback to invoke when the session starts # interrupt_callback (callable): A callback to invoke when we get interrupted # ticker_callback (callable): Invoked every second while running the scheduler # job_start_callback (callable): Called when a job starts # job_complete_callback (callable): Called when a job completes # class Stream(): def __init__(self, context, project, session_start, *, session_start_callback=None, interrupt_callback=None, ticker_callback=None, job_start_callback=None, job_complete_callback=None): # # Public members # self.targets = [] # Resolved target elements self.session_elements = [] # List of elements being processed this session self.total_elements = [] # Total list of elements based on targets self.queues = [] # Queue objects # # Private members # self._artifacts = context.artifactcache self._context = context self._project = project self._pipeline = Pipeline(context, project, self._artifacts) self._scheduler = Scheduler(context, session_start, interrupt_callback=interrupt_callback, ticker_callback=ticker_callback, job_start_callback=job_start_callback, job_complete_callback=job_complete_callback) self._first_non_track_queue = None self._session_start_callback = session_start_callback # cleanup() # # Cleans up application state # def cleanup(self): if self._project: self._project.cleanup() # load_selection() # # An all purpose method for loading a selection of elements, this # is primarily useful for the frontend to implement `bst show` # and `bst shell`. # # Args: # targets (list of str): Targets to pull # selection (PipelineSelection): The selection mode for the specified targets # except_targets (list of str): Specified targets to except from fetching # # Returns: # (list of Element): The selected elements def load_selection(self, targets, *, selection=PipelineSelection.NONE, except_targets=()): elements, _ = self._load(targets, (), selection=selection, except_targets=except_targets, fetch_subprojects=False) return elements # shell() # # Run a shell # # Args: # element (Element): An Element object to run the shell for # scope (Scope): The scope for the shell (Scope.BUILD or Scope.RUN) # prompt (str): The prompt to display in the shell # directory (str): A directory where an existing prestaged sysroot is expected, or None # mounts (list of HostMount): Additional directories to mount into the sandbox # isolate (bool): Whether to isolate the environment like we do in builds # command (list): An argv to launch in the sandbox, or None # # Returns: # (int): The exit code of the launched shell # def shell(self, element, scope, prompt, *, directory=None, mounts=None, isolate=False, command=None): # Assert we have everything we need built, unless the directory is specified # in which case we just blindly trust the directory, using the element # definitions to control the execution environment only. if directory is None: missing_deps = [ dep._get_full_name() for dep in self._pipeline.dependencies([element], scope) if not dep._cached() ] if missing_deps: raise StreamError("Elements need to be built or downloaded before staging a shell environment", detail="\n".join(missing_deps)) return element._shell(scope, directory, mounts=mounts, isolate=isolate, prompt=prompt, command=command) # build() # # Builds (assembles) elements in the pipeline. # # Args: # targets (list of str): Targets to build # track_targets (list of str): Specified targets for tracking # track_except (list of str): Specified targets to except from tracking # track_cross_junctions (bool): Whether tracking should cross junction boundaries # build_all (bool): Whether to build all elements, or only those # which are required to build the target. # def build(self, targets, *, track_targets=None, track_except=None, track_cross_junctions=False, build_all=False): if build_all: selection = PipelineSelection.ALL else: selection = PipelineSelection.PLAN elements, track_elements = \ self._load(targets, track_targets, selection=selection, track_selection=PipelineSelection.ALL, track_except_targets=track_except, track_cross_junctions=track_cross_junctions, use_artifact_config=True, fetch_subprojects=True, dynamic_plan=True) # Remove the tracking elements from the main targets elements = self._pipeline.subtract_elements(elements, track_elements) # Assert that the elements we're not going to track are consistent self._pipeline.assert_consistent(elements) # Now construct the queues # track_queue = None if track_elements: track_queue = TrackQueue(self._scheduler) self._add_queue(track_queue, track=True) if self._artifacts.has_fetch_remotes(): self._add_queue(PullQueue(self._scheduler)) self._add_queue(FetchQueue(self._scheduler, skip_cached=True)) self._add_queue(BuildQueue(self._scheduler)) if self._artifacts.has_push_remotes(): self._add_queue(PushQueue(self._scheduler)) # Enqueue elements # if track_elements: self._enqueue_plan(track_elements, queue=track_queue) self._enqueue_plan(elements) self._run() # fetch() # # Fetches sources on the pipeline. # # Args: # targets (list of str): Targets to fetch # selection (PipelineSelection): The selection mode for the specified targets # except_targets (list of str): Specified targets to except from fetching # track_targets (bool): Whether to track selected targets in addition to fetching # track_cross_junctions (bool): Whether tracking should cross junction boundaries # def fetch(self, targets, *, selection=PipelineSelection.PLAN, except_targets=None, track_targets=False, track_cross_junctions=False): if track_targets: track_targets = targets track_selection = selection track_except_targets = except_targets else: track_targets = () track_selection = PipelineSelection.NONE track_except_targets = () elements, track_elements = \ self._load(targets, track_targets, selection=selection, track_selection=track_selection, except_targets=except_targets, track_except_targets=track_except_targets, track_cross_junctions=track_cross_junctions, fetch_subprojects=True) # Delegated to a shared fetch method self._fetch(elements, track_elements=track_elements) # track() # # Tracks all the sources of the selected elements. # # Args: # targets (list of str): Targets to track # selection (PipelineSelection): The selection mode for the specified targets # except_targets (list of str): Specified targets to except from tracking # cross_junctions (bool): Whether tracking should cross junction boundaries # # If no error is encountered while tracking, then the project files # are rewritten inline. # def track(self, targets, *, selection=PipelineSelection.REDIRECT, except_targets=None, cross_junctions=False): # We pass no target to build. Only to track. Passing build targets # would fully load project configuration which might not be # possible before tracking is done. _, elements = \ self._load([], targets, selection=selection, track_selection=selection, except_targets=except_targets, track_except_targets=except_targets, track_cross_junctions=cross_junctions, fetch_subprojects=True) track_queue = TrackQueue(self._scheduler) self._add_queue(track_queue, track=True) self._enqueue_plan(elements, queue=track_queue) self._run() # pull() # # Pulls artifacts from remote artifact server(s) # # Args: # targets (list of str): Targets to pull # selection (PipelineSelection): The selection mode for the specified targets # remote (str): The URL of a specific remote server to pull from, or None # # If `remote` specified as None, then regular configuration will be used # to determine where to pull artifacts from. # def pull(self, targets, *, selection=PipelineSelection.NONE, remote=None): use_config = True if remote: use_config = False elements, _ = self._load(targets, (), selection=selection, use_artifact_config=use_config, artifact_remote_url=remote, fetch_subprojects=True) if not self._artifacts.has_fetch_remotes(): raise StreamError("No artifact caches available for pulling artifacts") self._pipeline.assert_consistent(elements) self._add_queue(PullQueue(self._scheduler)) self._enqueue_plan(elements) self._run() # push() # # Pulls artifacts to remote artifact server(s) # # Args: # targets (list of str): Targets to push # selection (PipelineSelection): The selection mode for the specified targets # remote (str): The URL of a specific remote server to push to, or None # # If `remote` specified as None, then regular configuration will be used # to determine where to push artifacts to. # def push(self, targets, *, selection=PipelineSelection.NONE, remote=None): use_config = True if remote: use_config = False elements, _ = self._load(targets, (), selection=selection, use_artifact_config=use_config, artifact_remote_url=remote, fetch_subprojects=True) if not self._artifacts.has_push_remotes(): raise StreamError("No artifact caches available for pushing artifacts") # Mark all dependencies of all selected elements as "pulled" before # trying to push. # # In non-strict mode, elements which are cached by their weak keys # will attempt to pull a remote artifact by it's strict key and prefer # a strict key artifact, however pull does not occur when running # a `bst push` session. # # Marking the elements as pulled is a workaround which ensures that # the cache keys are resolved before pushing. # for element in elements: element._pull_done() self._pipeline.assert_consistent(elements) self._add_queue(PushQueue(self._scheduler)) self._enqueue_plan(elements) self._run() # checkout() # # Checkout target artifact to the specified location # # Args: # target (str): Target to checkout # location (str): Location to checkout the artifact to # force (bool): Whether files can be overwritten if necessary # deps (str): The dependencies to checkout # integrate (bool): Whether to run integration commands # hardlinks (bool): Whether checking out files hardlinked to # their artifacts is acceptable # tar (bool): If true, a tarball from the artifact contents will # be created, otherwise the file tree of the artifact # will be placed at the given location. If true and # location is '-', the tarball will be dumped on the # standard output. # def checkout(self, target, *, location=None, force=False, deps='run', integrate=True, hardlinks=False, tar=False): # We only have one target in a checkout command elements, _ = self._load((target,), (), fetch_subprojects=True) target = elements[0] if not tar: try: os.makedirs(location, exist_ok=True) except OSError as e: raise StreamError("Failed to create checkout directory: '{}'" .format(e)) from e if not tar: if not os.access(location, os.W_OK): raise StreamError("Checkout directory '{}' not writable" .format(location)) if not force and os.listdir(location): raise StreamError("Checkout directory '{}' not empty" .format(location)) elif os.path.exists(location) and location != '-': if not os.access(location, os.W_OK): raise StreamError("Output file '{}' not writable" .format(location)) if not force and os.path.exists(location): raise StreamError("Output file '{}' already exists" .format(location)) # Stage deps into a temporary sandbox first try: with target._prepare_sandbox(Scope.RUN, None, deps=deps, integrate=integrate) as sandbox: # Copy or move the sandbox to the target directory sandbox_root = sandbox.get_directory() if not tar: with target.timed_activity("Checking out files in '{}'" .format(location)): try: if hardlinks: self._checkout_hardlinks(sandbox_root, location) else: utils.copy_files(sandbox_root, location) except OSError as e: raise StreamError("Failed to checkout files: '{}'" .format(e)) from e else: if location == '-': with target.timed_activity("Creating tarball"): with os.fdopen(sys.stdout.fileno(), 'wb') as fo: with tarfile.open(fileobj=fo, mode="w|") as tf: Stream._add_directory_to_tarfile( tf, sandbox_root, '.') else: with target.timed_activity("Creating tarball '{}'" .format(location)): with tarfile.open(location, "w:") as tf: Stream._add_directory_to_tarfile( tf, sandbox_root, '.') except BstError as e: raise StreamError("Error while staging dependencies into a sandbox" ": '{}'".format(e), detail=e.detail, reason=e.reason) from e # workspace_open # # Open a project workspace # # Args: # target (str): The target element to open the workspace for # directory (str): The directory to stage the source in # no_checkout (bool): Whether to skip checking out the source # track_first (bool): Whether to track and fetch first # force (bool): Whether to ignore contents in an existing directory # def workspace_open(self, target, directory, *, no_checkout, track_first, force): if track_first: track_targets = (target,) else: track_targets = () elements, track_elements = self._load((target,), track_targets, selection=PipelineSelection.REDIRECT, track_selection=PipelineSelection.REDIRECT) target = elements[0] directory = os.path.abspath(directory) if not list(target.sources()): build_depends = [x.name for x in target.dependencies(Scope.BUILD, recurse=False)] if not build_depends: raise StreamError("The given element has no sources") detail = "Try opening a workspace on one of its dependencies instead:\n" detail += " \n".join(build_depends) raise StreamError("The given element has no sources", detail=detail) workspaces = self._context.get_workspaces() # Check for workspace config workspace = workspaces.get_workspace(target._get_full_name()) if workspace and not force: raise StreamError("Workspace '{}' is already defined at: {}" .format(target.name, workspace.get_absolute_path())) # If we're going to checkout, we need at least a fetch, # if we were asked to track first, we're going to fetch anyway. # if not no_checkout or track_first: track_elements = [] if track_first: track_elements = elements self._fetch(elements, track_elements=track_elements) if not no_checkout and target._get_consistency() != Consistency.CACHED: raise StreamError("Could not stage uncached source. " + "Use `--track` to track and " + "fetch the latest version of the " + "source.") if workspace: workspaces.delete_workspace(target._get_full_name()) workspaces.save_config() shutil.rmtree(directory) try: os.makedirs(directory, exist_ok=True) except OSError as e: raise StreamError("Failed to create workspace directory: {}".format(e)) from e workspaces.create_workspace(target._get_full_name(), directory) if not no_checkout: with target.timed_activity("Staging sources to {}".format(directory)): target._open_workspace() workspaces.save_config() self._message(MessageType.INFO, "Saved workspace configuration") # workspace_close # # Close a project workspace # # Args: # element_name (str): The element name to close the workspace for # remove_dir (bool): Whether to remove the associated directory # def workspace_close(self, element_name, *, remove_dir): workspaces = self._context.get_workspaces() workspace = workspaces.get_workspace(element_name) # Remove workspace directory if prompted if remove_dir: with self._context.timed_activity("Removing workspace directory {}" .format(workspace.get_absolute_path())): try: shutil.rmtree(workspace.get_absolute_path()) except OSError as e: raise StreamError("Could not remove '{}': {}" .format(workspace.get_absolute_path(), e)) from e # Delete the workspace and save the configuration workspaces.delete_workspace(element_name) workspaces.save_config() self._message(MessageType.INFO, "Closed workspace for {}".format(element_name)) # workspace_reset # # Reset a workspace to its original state, discarding any user # changes. # # Args: # targets (list of str): The target elements to reset the workspace for # soft (bool): Only reset workspace state # track_first (bool): Whether to also track the sources first # def workspace_reset(self, targets, *, soft, track_first): if track_first: track_targets = targets else: track_targets = () elements, track_elements = self._load(targets, track_targets, selection=PipelineSelection.REDIRECT, track_selection=PipelineSelection.REDIRECT) nonexisting = [] for element in elements: if not self.workspace_exists(element.name): nonexisting.append(element.name) if nonexisting: raise StreamError("Workspace does not exist", detail="\n".join(nonexisting)) # Do the tracking first if track_first: self._fetch(elements, track_elements=track_elements) workspaces = self._context.get_workspaces() for element in elements: workspace = workspaces.get_workspace(element._get_full_name()) workspace_path = workspace.get_absolute_path() if soft: workspace.prepared = False self._message(MessageType.INFO, "Reset workspace state for {} at: {}" .format(element.name, workspace_path)) continue with element.timed_activity("Removing workspace directory {}" .format(workspace_path)): try: shutil.rmtree(workspace_path) except OSError as e: raise StreamError("Could not remove '{}': {}" .format(workspace_path, e)) from e workspaces.delete_workspace(element._get_full_name()) workspaces.create_workspace(element._get_full_name(), workspace_path) with element.timed_activity("Staging sources to {}".format(workspace_path)): element._open_workspace() self._message(MessageType.INFO, "Reset workspace for {} at: {}".format(element.name, workspace_path)) workspaces.save_config() # workspace_exists # # Check if a workspace exists # # Args: # element_name (str): The element name to close the workspace for, or None # # Returns: # (bool): True if the workspace exists # # If None is specified for `element_name`, then this will return # True if there are any existing workspaces. # def workspace_exists(self, element_name=None): workspaces = self._context.get_workspaces() if element_name: workspace = workspaces.get_workspace(element_name) if workspace: return True elif any(workspaces.list()): return True return False # workspace_list # # Serializes the workspaces and dumps them in YAML to stdout. # def workspace_list(self): workspaces = [] for element_name, workspace_ in self._context.get_workspaces().list(): workspace_detail = { 'element': element_name, 'directory': workspace_.get_absolute_path(), } workspaces.append(workspace_detail) _yaml.dump({ 'workspaces': workspaces }) # source_bundle() # # Create a host buildable tarball bundle for the given target. # # Args: # target (str): The target element to bundle # directory (str): The directory to output the tarball # track_first (bool): Track new source references before bundling # compression (str): The compression type to use # force (bool): Overwrite an existing tarball # def source_bundle(self, target, directory, *, track_first=False, force=False, compression="gz", except_targets=()): if track_first: track_targets = (target,) else: track_targets = () elements, track_elements = self._load((target,), track_targets, selection=PipelineSelection.ALL, except_targets=except_targets, track_selection=PipelineSelection.ALL, fetch_subprojects=True) # source-bundle only supports one target target = self.targets[0] self._message(MessageType.INFO, "Bundling sources for target {}".format(target.name)) # Find the correct filename for the compression algorithm tar_location = os.path.join(directory, target.normal_name + ".tar") if compression != "none": tar_location += "." + compression # Attempt writing a file to generate a good error message # early # # FIXME: A bit hackish try: with open(tar_location, mode="x") as _: # pylint: disable=unspecified-encoding pass os.remove(tar_location) except IOError as e: raise StreamError("Cannot write to {0}: {1}" .format(tar_location, e)) from e # Fetch and possibly track first # self._fetch(elements, track_elements=track_elements) # We don't use the scheduler for this as it is almost entirely IO # bound. # Create a temporary directory to build the source tree in builddir = self._context.builddir prefix = "{}-".format(target.normal_name) with TemporaryDirectory(prefix=prefix, dir=builddir) as tempdir: source_directory = os.path.join(tempdir, 'source') try: os.makedirs(source_directory) except OSError as e: raise StreamError("Failed to create directory: {}" .format(e)) from e # Any elements that don't implement _write_script # should not be included in the later stages. elements = [ element for element in elements if self._write_element_script(source_directory, element) ] self._write_element_sources(tempdir, elements) self._write_build_script(tempdir, elements) self._collect_sources(tempdir, tar_location, target.normal_name, compression) # redirect_element_names() # # Takes a list of element names and returns a list where elements have been # redirected to their source elements if the element file exists, and just # the name, if not. # # Args: # elements (list of str): The element names to redirect # # Returns: # (list of str): The element names after redirecting # def redirect_element_names(self, elements): element_dir = self._project.element_path load_elements = [] output_elements = set() for e in elements: element_path = os.path.join(element_dir, e) if os.path.exists(element_path): load_elements.append(e) else: output_elements.add(e) if load_elements: loaded_elements, _ = self._load(load_elements, (), selection=PipelineSelection.REDIRECT, track_selection=PipelineSelection.REDIRECT) for e in loaded_elements: output_elements.add(e.name) return list(output_elements) ############################################################# # Scheduler API forwarding # ############################################################# # running # # Whether the scheduler is running # @property def running(self): return self._scheduler.loop is not None # suspended # # Whether the scheduler is currently suspended # @property def suspended(self): return self._scheduler.suspended # terminated # # Whether the scheduler is currently terminated # @property def terminated(self): return self._scheduler.terminated # elapsed_time # # Elapsed time since the session start # @property def elapsed_time(self): return self._scheduler.elapsed_time() # terminate() # # Terminate jobs # def terminate(self): self._scheduler.terminate_jobs() # quit() # # Quit the session, this will continue with any ongoing # jobs, use Stream.terminate() instead for cancellation # of ongoing jobs # def quit(self): self._scheduler.stop_queueing() # suspend() # # Context manager to suspend ongoing jobs # @contextmanager def suspend(self): with self._scheduler.jobs_suspended(): yield ############################################################# # Private Methods # ############################################################# # _load() # # A convenience method for loading element lists # # If `targets` is not empty used project configuration will be # fully loaded. If `targets` is empty, tracking will still be # resolved for elements in `track_targets`, but no build pipeline # will be resolved. This is behavior is import for track() to # not trigger full loading of project configuration. # # Args: # targets (list of str): Main targets to load # track_targets (list of str): Tracking targets # selection (PipelineSelection): The selection mode for the specified targets # track_selection (PipelineSelection): The selection mode for the specified tracking targets # except_targets (list of str): Specified targets to except from fetching # track_except_targets (list of str): Specified targets to except from fetching # track_cross_junctions (bool): Whether tracking should cross junction boundaries # use_artifact_config (bool): Whether to initialize artifacts with the config # artifact_remote_url (bool): A remote url for initializing the artifacts # fetch_subprojects (bool): Whether to fetch subprojects while loading # # Returns: # (list of Element): The primary element selection # (list of Element): The tracking element selection # def _load(self, targets, track_targets, *, selection=PipelineSelection.NONE, track_selection=PipelineSelection.NONE, except_targets=(), track_except_targets=(), track_cross_junctions=False, use_artifact_config=False, artifact_remote_url=None, fetch_subprojects=False, dynamic_plan=False): # Load rewritable if we have any tracking selection to make rewritable = False if track_targets: rewritable = True # Load all targets elements, except_elements, track_elements, track_except_elements = \ self._pipeline.load([targets, except_targets, track_targets, track_except_targets], rewritable=rewritable, fetch_subprojects=fetch_subprojects) # Hold on to the targets self.targets = elements # Here we should raise an error if the track_elements targets # are not dependencies of the primary targets, this is not # supported. # # This can happen with `bst build --track` # if targets and not self._pipeline.targets_include(elements, track_elements): raise StreamError("Specified tracking targets that are not " "within the scope of primary targets") # First take care of marking tracking elements, this must be # done before resolving element states. # assert track_selection != PipelineSelection.PLAN # Tracked elements are split by owner projects in order to # filter cross junctions tracking dependencies on their # respective project. track_projects = {} for element in track_elements: project = element._get_project() if project not in track_projects: track_projects[project] = [element] else: track_projects[project].append(element) track_selected = [] for project, project_elements in track_projects.items(): selected = self._pipeline.get_selection(project_elements, track_selection) selected = self._pipeline.track_cross_junction_filter(project, selected, track_cross_junctions) track_selected.extend(selected) track_selected = self._pipeline.except_elements(track_elements, track_selected, track_except_elements) for element in track_selected: element._schedule_tracking() if not targets: self._pipeline.resolve_elements(track_selected) return [], track_selected # ArtifactCache.setup_remotes expects all projects to be fully loaded for project in self._context.get_projects(): project.ensure_fully_loaded() # Connect to remote caches, this needs to be done before resolving element state self._artifacts.setup_remotes(use_config=use_artifact_config, remote_url=artifact_remote_url) # Now move on to loading primary selection. # self._pipeline.resolve_elements(elements) selected = self._pipeline.get_selection(elements, selection, silent=False) selected = self._pipeline.except_elements(elements, selected, except_elements) # Set the "required" artifacts that should not be removed # while this pipeline is active # # It must include all the artifacts which are required by the # final product. Note that this is a superset of the build plan. # self._artifacts.mark_required_elements(self._pipeline.dependencies(elements, Scope.ALL)) if selection == PipelineSelection.PLAN and dynamic_plan: # We use a dynamic build plan, only request artifacts of top-level targets, # others are requested dynamically as needed. # This avoids pulling, fetching, or building unneeded build-only dependencies. for element in elements: element._set_required() else: for element in selected: element._set_required() return selected, track_selected # _message() # # Local message propagator # def _message(self, message_type, message, **kwargs): args = dict(kwargs) self._context.message( Message(None, message_type, message, **args)) # _add_queue() # # Adds a queue to the stream # # Args: # queue (Queue): Queue to add to the pipeline # track (bool): Whether this is the tracking queue # def _add_queue(self, queue, *, track=False): self.queues.append(queue) if not (track or self._first_non_track_queue): self._first_non_track_queue = queue # _enqueue_plan() # # Enqueues planned elements to the specified queue. # # Args: # plan (list of Element): The list of elements to be enqueued # queue (Queue): The target queue, defaults to the first non-track queue # def _enqueue_plan(self, plan, *, queue=None): queue = queue or self._first_non_track_queue queue.enqueue(plan) self.session_elements += plan # _run() # # Common function for running the scheduler # def _run(self): # Inform the frontend of the full list of elements # and the list of elements which will be processed in this run # self.total_elements = list(self._pipeline.dependencies(self.targets, Scope.ALL)) if self._session_start_callback is not None: self._session_start_callback() _, status = self._scheduler.run(self.queues) if status == SchedStatus.ERROR: raise StreamError() if status == SchedStatus.TERMINATED: raise StreamError(terminated=True) # _fetch() # # Performs the fetch job, the body of this function is here because # it is shared between a few internals. # # Args: # elements (list of Element): Elements to fetch # track_elements (list of Element): Elements to track # def _fetch(self, elements, *, track_elements=None): if track_elements is None: track_elements = [] # Subtract the track elements from the fetch elements, they will be added separately fetch_plan = self._pipeline.subtract_elements(elements, track_elements) # Assert consistency for the fetch elements self._pipeline.assert_consistent(fetch_plan) # Filter out elements with cached sources, only from the fetch plan # let the track plan resolve new refs. cached = [elt for elt in fetch_plan if elt._get_consistency() == Consistency.CACHED] fetch_plan = self._pipeline.subtract_elements(fetch_plan, cached) # Construct queues, enqueue and run # track_queue = None if track_elements: track_queue = TrackQueue(self._scheduler) self._add_queue(track_queue, track=True) self._add_queue(FetchQueue(self._scheduler)) if track_elements: self._enqueue_plan(track_elements, queue=track_queue) self._enqueue_plan(fetch_plan) self._run() # Helper function for checkout() # def _checkout_hardlinks(self, sandbox_root, directory): try: removed = utils.safe_remove(directory) except OSError as e: raise StreamError("Failed to remove checkout directory: {}".format(e)) from e if removed: # Try a simple rename of the sandbox root; if that # doesnt cut it, then do the regular link files code path try: os.rename(sandbox_root, directory) except OSError: os.makedirs(directory, exist_ok=True) utils.link_files(sandbox_root, directory) else: utils.link_files(sandbox_root, directory) # Add a directory entry deterministically to a tar file # # This function takes extra steps to ensure the output is deterministic. # First, it sorts the results of os.listdir() to ensure the ordering of # the files in the archive is the same. Second, it sets a fixed # timestamp for each entry. See also https://bugs.python.org/issue24465. @staticmethod def _add_directory_to_tarfile(tf, dir_name, dir_arcname, mtime=0): for filename in sorted(os.listdir(dir_name)): name = os.path.join(dir_name, filename) arcname = os.path.join(dir_arcname, filename) tarinfo = tf.gettarinfo(name, arcname) tarinfo.mtime = mtime if tarinfo.isreg(): with open(name, "rb") as f: tf.addfile(tarinfo, f) elif tarinfo.isdir(): tf.addfile(tarinfo) Stream._add_directory_to_tarfile(tf, name, arcname, mtime) else: tf.addfile(tarinfo) # Write the element build script to the given directory def _write_element_script(self, directory, element): try: element._write_script(directory) except ImplError: return False return True # Write all source elements to the given directory def _write_element_sources(self, directory, elements): for element in elements: source_dir = os.path.join(directory, "source") element_source_dir = os.path.join(source_dir, element.normal_name) element._stage_sources_at(element_source_dir) # Write a master build script to the sandbox def _write_build_script(self, directory, elements): module_string = "" for element in elements: module_string += shlex.quote(element.normal_name) + " " script_path = os.path.join(directory, "build.sh") with open(_site.build_all_template, "r", encoding="utf-8") as f: script_template = f.read() with utils.save_file_atomic(script_path, "w") as script: script.write(script_template.format(modules=module_string)) os.chmod(script_path, stat.S_IEXEC | stat.S_IREAD) # Collect the sources in the given sandbox into a tarfile def _collect_sources(self, directory, tar_name, element_name, compression): with self._context.timed_activity("Creating tarball {}".format(tar_name)): if compression == "none": permissions = "w:" else: permissions = "w:" + compression with tarfile.open(tar_name, permissions) as tar: tar.add(directory, arcname=element_name) buildstream-1.6.9/buildstream/_variables.py000066400000000000000000000545761437515270000210630ustar00rootroot00000000000000# # Copyright (C) 2020 Codethink Limited # Copyright (C) 2019 Bloomberg L.P. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Daniel Silverstone # Benjamin Schubert import re import sys from ._exceptions import LoadError, LoadErrorReason from . import _yaml ######################################################## # Understanding Value Expressions # ######################################################## # # This code uses the term "value expression" a lot to refer to `str` objects # which have references to variables in them, and also to `list` objects which # are effectively broken down strings. # # Ideally we would have a ValueExpression type in order to make this more # comprehensive, but this would unfortunately introduce unnecessary overhead, # making the code measurably slower. # # Value Expression Strings # ------------------------ # Strings which contain variables in them, such as: # # "My name is %{username}, good day." # # # Parsed Value Expression Lists # ----------------------------- # Using `re.split()` from python's regular expression implementation, we # parse the list using our locally defined VALUE_EXPRESSION_REGEX, which # breaks down the string into a list of "literal" and "variable" components. # # The "literal" components are literal portions of the string which need # no substitution, while the "variable" components represent variable names # which need to be substituted with their corresponding resolved values. # # The parsed variable expressions have the following properties: # # * They are sparse, some of the "literal" values contain zero length # strings which can be ignored. # # * Literal values are found only at even indices of the parsed # variable expression # # * Variable names are found only at odd indices # # The above example "My name is %{username}, good day." is broken down # into a parsed value expression as follows: # # [ # "My name is ", # <- Index 0, literal value # "username", # <- Index 1, variable name, '%{ ... }' discarded # ", good day." # <- Index 2, literal value # ] # # Maximum recursion depth using the fast (recursive) variable resolution # algorithm. # MAX_RECURSION_DEPTH = 200 # Regular expression used to parse %{variables} in value expressions # # Note that variables are allowed to have dashes # VALUE_EXPRESSION_REGEX = re.compile(r"\%\{([a-zA-Z][a-zA-Z0-9_-]*)\}") # Cache for the parsed expansion strings. # VALUE_EXPRESSION_CACHE = { # Prime the cache with the empty string since otherwise that can # cause issues with the parser, complications to which cause slowdown "": [""], } # Variables() # # The Variables object resolves the variable references in the given MappingNode, # expecting that any dictionary values which contain variable references can be # resolved from the same dictionary. # # Each Element creates its own Variables instance to track the configured # variable settings for the element. # # Notably, this object is delegated the responsibility of expanding # variables in yaml Node hierarchies and substituting variables in strings # in the context of a given Element's variable configuration. # # Args: # node (dict): A node loaded and composited with yaml tools # # Raises: # LoadError, if unresolved variables, or cycles in resolution, occur. # class Variables: ################################################################# # Dunder Methods # ################################################################# def __init__(self, node): # The original MappingNode, we need to keep this # around for proper error reporting. # self._original = node # The value map, this dictionary contains either unresolved # value expressions, or resolved values. # # Each mapping value is a list, in the case that the value # is resolved, then the list is only 1 element long. # self._values = self._init_values(node) # __getitem__() # # Fetches a resolved variable by it's name, allows # addressing the Variables instance like a dictionary. # # Args: # name (str): The name of the variable # # Returns: # (str): The resolved variable value # # Raises: # (LoadError): In the case of an undefined variable or # a cyclic variable reference # def __getitem__(self, name): if name not in self._values: raise KeyError(name) return self._expand_var(name) # __contains__() # # Checks whether a given variable exists, allows # supporting `if 'foo' in variables` expressions. # # Args: # name (str): The name of the variable to check for # # Returns: # (bool): True if `name` is a valid variable # def __contains__(self, name): return name in self._values # __iter__() # # Provide an iterator for all variables effective values # # Returns: # (Iterator[Tuple[str, str]]) # def __iter__(self): return _VariablesIterator(self) ################################################################# # Public API # ################################################################# # check() # # Assert that all variables declared on this Variables # instance have been resolved properly, and reports errors # for undefined references and circular references. # # Raises: # (LoadError): In the case of an undefined variable or # a cyclic variable reference # def check(self): # Just resolve all variables. for key in self._values: self._expand_var(key) # get() # # Expand definition of variable by name. If the variable is not # defined, it will return None instead of failing. # # Args: # name (str): Name of the variable to expand # # Returns: # (str|None): The expanded value for the variable or None variable was not defined. # def get(self, name): if name not in self._values: return None return self[name] # subst(): # # Substitutes any variables in 'string' and returns the result. # # Args: # string (str): The string to substitute # provenance (Provenance): The provenance of the string # # Returns: # (str): The new string with any substitutions made # # Raises: # (LoadError): In the case of an undefined variable or # a cyclic variable reference # def subst(self, string, provenance): value_expression = _parse_value_expression(string) return self._expand_value_expression(value_expression, provenance) ################################################################# # Private API # ################################################################# # _init_values() # # Initialize the table of values. # # The value table is a dictionary keyed by the variable names where # the values are value expressions (lists) which are initially unresolved. # # Value expressions are later resolved on demand and replaced in this # table with single element lists. # # Args: # node (dict): The original variables mapping node # # Returns: # (dict): A dictionary of value expressions (lists) # def _init_values(self, node): # Special case, if notparallel is specified in the variables for this # element, then override max-jobs to be 1. # Initialize it as a string as all variables are processed as strings. # if _yaml.node_get(node, bool, 'notparallel', default_value=False): node['max-jobs'] = str(1) ret = {} for key in node.keys(): value = _yaml.node_get(node, str, key) ret[sys.intern(key)] = _parse_value_expression(value) return ret # _expand_var() # # Expand and cache a variable definition. # # This will try the fast, recursive path first and fallback to # the slower iterative codepath. # # Args: # name (str): Name of the variable to expand # # Returns: # (str): The expanded value of variable # # Raises: # (LoadError): In the case of an undefined variable or # a cyclic variable reference # def _expand_var(self, name): try: return self._fast_expand_var(name) except (KeyError, RecursionError): return self._slow_expand_var(name) # _expand_value_expression() # # Expands a value expression # # This will try the fast, recursive path first and fallback to # the slower iterative codepath. # # Args: # value_expression (list): The parsed value expression to be expanded # provenance (Provenance): The provenance of the value expression # # Returns: # (str): The expanded value expression # # Raises: # (LoadError): In the case of an undefined variable or # a cyclic variable reference # def _expand_value_expression(self, value_expression, provenance): try: return self._fast_expand_value_expression(value_expression) except (KeyError, RecursionError): return self._slow_expand_value_expression(None, value_expression, provenance) ################################################################# # Resolution algorithm: fast path # ################################################################# # _fast_expand_var() # # Fast, recursive path for variable expansion # # Args: # name (str): Name of the variable to expand # counter (int): Number of recursion cycles (used only in recursion) # # Returns: # (str): The expanded value of variable # # Raises: # (KeyError): If a reference to an undefined variable is encountered # (RecursionError): If MAX_RECURSION_DEPTH recursion cycles is reached # def _fast_expand_var(self, name, counter=0): value_expression = self._values[name] if len(value_expression) > 1: sub = self._fast_expand_value_expression(value_expression, counter) value_expression = [sys.intern(sub)] self._values[name] = value_expression return value_expression[0] # _fast_expand_value_expression() # # Fast, recursive path for value expression expansion. # # Args: # value_expression (list): The parsed value expression to be expanded # counter (int): Number of recursion cycles (used only in recursion) # # Returns: # (str): The expanded value expression # # Raises: # (KeyError): If a reference to an undefined variable is encountered # (RecursionError): If MAX_RECURSION_DEPTH recursion cycles is reached # def _fast_expand_value_expression(self, value_expression, counter=0): if counter > MAX_RECURSION_DEPTH: raise RecursionError() acc = [] for idx, value in enumerate(value_expression): if (idx % 2) == 0: acc.append(value) else: acc.append(self._fast_expand_var(value, counter + 1)) return "".join(acc) ################################################################# # Resolution algorithm: slow path # ################################################################# # _slow_expand_var() # # Slow, iterative path for variable expansion with full error reporting # # Args: # name (str): Name of the variable to expand # # Returns: # (str): The expanded value of variable # # Raises: # (LoadError): In the case of an undefined variable or # a cyclic variable reference # def _slow_expand_var(self, name): value_expression = self._get_checked_value_expression(name, None, None) if len(value_expression) > 1: expanded = self._slow_expand_value_expression(name, value_expression, None) value_expression = [sys.intern(expanded)] self._values[name] = value_expression return value_expression[0] # _slow_expand_value_expression() # # Slow, iterative path for value expression expansion with full error reporting # # Note that either `varname` or `node` must be provided, these are used to # identify the provenance of this value expression (which might be the value # of a variable, or a value expression found elswhere in project YAML which # needs to be substituted). # # Args: # varname (str|None): The variable name associated with this value expression, if any # value_expression (list): The parsed value expression to be expanded # provenance (Provenance): The provenance who is asking for an expansion # # Returns: # (str): The expanded value expression # # Raises: # (LoadError): In the case of an undefined variable or # a cyclic variable reference # def _slow_expand_value_expression(self, varname, value_expression, provenance): idx = 0 resolved_value = None # We will collect the varnames and value expressions which need # to be resolved in the loop, sorted by dependency, and then # finally reverse through them resolving them one at a time # resolved_varnames = [] resolved_values = [] step = ResolutionStep(varname, value_expression, None) while step: # Keep a hold of the current overall step this_step = step step = step.prev # Check for circular dependencies this_step.check_circular(self._original) for idx, value in enumerate(this_step.value_expression): # Skip literal parts of the value expression if (idx % 2) == 0: continue iter_value_expression = self._get_checked_value_expression(value, this_step.referee, provenance) # Queue up this value. # # Even if the value was already resolved, we need it in context to resolve # previously enqueued variables resolved_values.append(iter_value_expression) resolved_varnames.append(value) # Queue up the values dependencies. # if len(iter_value_expression) > 1: new_step = ResolutionStep(value, iter_value_expression, this_step) # Link it to the end of the stack new_step.prev = step step = new_step # We've now constructed the dependencies queue such that # later dependencies are on the right, we can now safely peddle # backwards and the last (leftmost) resolved value is the one # we want to return. # for iter_value_expression, resolved_varname in zip(reversed(resolved_values), reversed(resolved_varnames)): # Resolve as needed # if len(iter_value_expression) > 1: resolved_value = self._resolve_value_expression(iter_value_expression) iter_value_expression = [resolved_value] if resolved_varname is not None: self._values[resolved_varname] = iter_value_expression return resolved_value # _get_checked_value_expression() # # Fetches a value expression from the value table and raises a user # facing error if the value is undefined. # # Args: # varname (str): The variable name to fetch # referee (str): The variable name referring to `varname`, or None # provenance (Provenance): The provenance for which we need to resolve `name` # # Returns: # (list): The value expression for varname # # Raises: # (LoadError): An appropriate error in case of undefined variables # def _get_checked_value_expression(self, varname, referee=None, provenance=None): # # Fetch the value and detect undefined references # try: return self._values[varname] except KeyError as e: # Either the provenance is the toplevel calling provenance, # or it is the provenance of the direct referee if referee: p = _yaml.node_get_provenance(self._original, referee) else: p = provenance error_message = "Reference to undefined variable '{}'".format(varname) if p: error_message = "{}: {}".format(p, error_message) raise LoadError(LoadErrorReason.UNRESOLVED_VARIABLE, error_message) from e # _resolve_value_expression() # # Resolves a value expression with the expectation that all # variables within this value expression have already been # resolved and updated in the Variables._values table. # # This is used as a part of the iterative resolution codepath, # where value expressions are first sorted by dependency before # being resolved in one go. # # Args: # value_expression (list): The value expression to resolve # # Returns: # (str): The resolved value expression # def _resolve_value_expression(self, value_expression): acc = [] for idx, value in enumerate(value_expression): if (idx % 2) == 0: acc.append(value) else: acc.append(self._values[value][0]) return "".join(acc) # ResolutionStep() # # The context for a single iteration in variable resolution. # # Args: # referee (str): The name of the referring variable # value_expression (list): The parsed value expression to be expanded # parent (ResolutionStep): The parent ResolutionStep # class ResolutionStep: def __init__(self, referee, value_expression, parent): self.referee = referee self.value_expression = value_expression self.parent = parent self.prev = None # check_circular() # # Check for circular references in this step. # # Args: # original_values (MappingNode): The original MappingNode for the Variables # # Raises: # (LoadError): Will raise a user facing LoadError with # LoadErrorReason.CIRCULAR_REFERENCE_VARIABLE in case # circular references were encountered. # def check_circular(self, original_values): step = self.parent while step: if self.referee is step.referee: self._raise_circular_reference_error(step, original_values) step = step.parent # _raise_circular_reference_error() # # Helper function to construct a full report and raise the LoadError # with LoadErrorReason.CIRCULAR_REFERENCE_VARIABLE. # # Args: # conflict (ResolutionStep): The resolution step which conflicts with this step # original_values (MappingNode): The original node to extract provenances from # # Raises: # (LoadError): Unconditionally # def _raise_circular_reference_error(self, conflict, original_values): error_lines = [] step = self while step is not conflict: if step.parent: referee = step.parent.referee else: referee = self.referee provenance = _yaml.node_get_provenance(original_values, referee) error_lines.append("{}: Variable '{}' refers to variable '{}'".format(provenance, referee, step.referee)) step = step.parent raise LoadError(LoadErrorReason.CIRCULAR_REFERENCE_VARIABLE, "Circular dependency detected on variable '{}'".format(self.referee), detail="\n".join(reversed(error_lines))) # _parse_value_expression() # # Tries to fetch the parsed value expression from the cache, parsing and # caching value expressions on demand and returns the parsed value expression. # # Args: # value_expression (str): The value expression in string form to parse # # Returns: # (list): The parsed value expression in list form. # def _parse_value_expression(value_expression): try: return VALUE_EXPRESSION_CACHE[value_expression] except KeyError: # This use of the regex turns a string like "foo %{bar} baz" into # a list ["foo ", "bar", " baz"] # # The result is a parsed value expression, where even indicies # contain literal parts of the value and odd indices contain # variable names which need to be replaced by resolved variables. # splits = VALUE_EXPRESSION_REGEX.split(value_expression) # Optimize later routines by discarding any unnecessary trailing # empty strings. # if splits[-1] == '': del splits[-1] # We intern the string parts to try and reduce the memory impact # of the cache. # ret = [sys.intern(s) for s in splits] # Cache and return the value expression # VALUE_EXPRESSION_CACHE[value_expression] = ret return ret # Iterator for all flatten variables. # Used by Variables.__iter__ class _VariablesIterator: def __init__(self, variables): self._variables = variables self._iter = iter(variables._values) def __iter__(self): return self def __next__(self): name = next(self._iter) return name, self._variables._expand_var(name) buildstream-1.6.9/buildstream/_version.py000066400000000000000000000442471437515270000205720ustar00rootroot00000000000000# pylint: skip-file # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = " (tag: 1.6.9, bst-1)" git_full = "4abd1f3e1b5e5d128bc24e45ec9a37d61723be87" git_date = "2023-02-21 23:31:28 +0900" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "pep440" cfg.tag_prefix = "" cfg.tag_regex = "*.*.*" cfg.parentdir_prefix = "BuildStream-" cfg.versionfile_source = "buildstream/_version.py" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, tag_regex, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s%s" % (tag_prefix, tag_regex)], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, cfg.tag_regex, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} buildstream-1.6.9/buildstream/_versions.py000066400000000000000000000024171437515270000207460ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # The base BuildStream format version # # This version is bumped whenever enhancements are made # to the `project.conf` format or the core element format. # BST_FORMAT_VERSION = 18 # The base BuildStream artifact version # # The artifact version changes whenever the cache key # calculation algorithm changes in an incompatible way # or if buildstream was changed in a way which can cause # the same cache key to produce something that is no longer # the same. BST_CORE_ARTIFACT_VERSION = ('bst-1.2', 5) buildstream-1.6.9/buildstream/_workspaces.py000066400000000000000000000330161437515270000212560ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat import os from . import utils from . import _yaml from ._exceptions import LoadError, LoadErrorReason BST_WORKSPACE_FORMAT_VERSION = 3 # Workspace() # # An object to contain various helper functions and data required for # workspaces. # # last_successful, path and running_files are intended to be public # properties, but may be best accessed using this classes' helper # methods. # # Args: # toplevel_project (Project): Top project. Will be used for resolving relative workspace paths. # path (str): The path that should host this workspace # last_successful (str): The key of the last successful build of this workspace # running_files (dict): A dict mapping dependency elements to files # changed between failed builds. Should be # made obsolete with failed build artifacts. # class Workspace(): def __init__(self, toplevel_project, *, last_successful=None, path=None, prepared=False, running_files=None): self.prepared = prepared self.last_successful = last_successful self._path = path self.running_files = running_files if running_files is not None else {} self._toplevel_project = toplevel_project self._key = None # to_dict() # # Convert a list of members which get serialized to a dict for serialization purposes # # Returns: # (dict) A dict representation of the workspace # def to_dict(self): ret = { 'prepared': self.prepared, 'path': self._path, 'running_files': self.running_files } if self.last_successful is not None: ret["last_successful"] = self.last_successful return ret # from_dict(): # # Loads a new workspace from a simple dictionary, the dictionary # is expected to be generated from Workspace.to_dict(), or manually # when loading from a YAML file. # # Args: # toplevel_project (Project): Top project. Will be used for resolving relative workspace paths. # dictionary: A simple dictionary object # # Returns: # (Workspace): A newly instantiated Workspace # @classmethod def from_dict(cls, toplevel_project, dictionary): # Just pass the dictionary as kwargs return cls(toplevel_project, **dictionary) # differs() # # Checks if two workspaces are different in any way. # # Args: # other (Workspace): Another workspace instance # # Returns: # True if the workspace differs from 'other', otherwise False # def differs(self, other): return self.to_dict() != other.to_dict() # invalidate_key() # # Invalidate the workspace key, forcing a recalculation next time # it is accessed. # def invalidate_key(self): self._key = None # stage() # # Stage the workspace to the given directory. # # Args: # directory (str) - The directory into which to stage this workspace # def stage(self, directory): fullpath = self.get_absolute_path() if os.path.isdir(fullpath): utils.copy_files(fullpath, directory) else: destfile = os.path.join(directory, os.path.basename(self.get_absolute_path())) utils.safe_copy(fullpath, destfile) # add_running_files() # # Append a list of files to the running_files for the given # dependency. Duplicate files will be ignored. # # Args: # dep_name (str) - The dependency name whose files to append to # files (str) - A list of files to append # def add_running_files(self, dep_name, files): if dep_name in self.running_files: # ruamel.py cannot serialize sets in python3.4 to_add = set(files) - set(self.running_files[dep_name]) self.running_files[dep_name].extend(to_add) else: self.running_files[dep_name] = list(files) # clear_running_files() # # Clear all running files associated with this workspace. # def clear_running_files(self): self.running_files = {} # get_key() # # Get a unique key for this workspace. # # Args: # recalculate (bool) - Whether to recalculate the key # # Returns: # (str) A unique key for this workspace # def get_key(self, recalculate=False): def unique_key(filename): try: stat = os.lstat(filename) except OSError as e: raise LoadError(LoadErrorReason.MISSING_FILE, "Failed to stat file in workspace: {}".format(e)) from e # Use the mtime of any file with sub second precision return stat.st_mtime_ns if recalculate or self._key is None: fullpath = self.get_absolute_path() # Get a list of tuples of the the project relative paths and fullpaths if os.path.isdir(fullpath): filelist = utils.list_relative_paths(fullpath) filelist = [(relpath, os.path.join(fullpath, relpath)) for relpath in filelist] else: filelist = [(self.get_absolute_path(), fullpath)] self._key = [(relpath, unique_key(fullpath)) for relpath, fullpath in filelist] return self._key # get_absolute_path(): # # Returns: The absolute path of the element's workspace. # def get_absolute_path(self): return os.path.join(self._toplevel_project.directory, self._path) # Workspaces() # # A class to manage Workspaces for multiple elements. # # Args: # toplevel_project (Project): Top project used to resolve paths. # class Workspaces(): def __init__(self, toplevel_project): self._toplevel_project = toplevel_project self._bst_directory = os.path.join(toplevel_project.directory, ".bst") self._workspaces = self._load_config() # list() # # Generator function to enumerate workspaces. # # Yields: # A tuple in the following format: (str, Workspace), where the # first element is the name of the workspaced element. def list(self): for element, _ in _yaml.node_items(self._workspaces): yield (element, self._workspaces[element]) # create_workspace() # # Create a workspace in the given path for the given element. # # Args: # element_name (str) - The element name to create a workspace for # path (str) - The path in which the workspace should be kept # def create_workspace(self, element_name, path): if path.startswith(self._toplevel_project.directory): path = os.path.relpath(path, self._toplevel_project.directory) self._workspaces[element_name] = Workspace(self._toplevel_project, path=path) return self._workspaces[element_name] # get_workspace() # # Get the path of the workspace source associated with the given # element's source at the given index # # Args: # element_name (str) - The element name whose workspace to return # # Returns: # (None|Workspace) # def get_workspace(self, element_name): if element_name not in self._workspaces: return None return self._workspaces[element_name] # update_workspace() # # Update the datamodel with a new Workspace instance # # Args: # element_name (str): The name of the element to update a workspace for # workspace_dict (Workspace): A serialized workspace dictionary # # Returns: # (bool): Whether the workspace has changed as a result # def update_workspace(self, element_name, workspace_dict): assert element_name in self._workspaces workspace = Workspace.from_dict(self._toplevel_project, workspace_dict) if self._workspaces[element_name].differs(workspace): self._workspaces[element_name] = workspace return True return False # delete_workspace() # # Remove the workspace from the workspace element. Note that this # does *not* remove the workspace from the stored yaml # configuration, call save_config() afterwards. # # Args: # element_name (str) - The element name whose workspace to delete # def delete_workspace(self, element_name): del self._workspaces[element_name] # save_config() # # Dump the current workspace element to the project configuration # file. This makes any changes performed with delete_workspace or # create_workspace permanent # def save_config(self): assert utils._is_main_process() config = { 'format-version': BST_WORKSPACE_FORMAT_VERSION, 'workspaces': { element: workspace.to_dict() for element, workspace in _yaml.node_items(self._workspaces) } } os.makedirs(self._bst_directory, exist_ok=True) _yaml.dump(_yaml.node_sanitize(config), self._get_filename()) # _load_config() # # Loads and parses the workspace configuration # # Returns: # (dict) The extracted workspaces # # Raises: LoadError if there was a problem with the workspace config # def _load_config(self): workspace_file = self._get_filename() try: node = _yaml.load(workspace_file) except LoadError as e: if e.reason == LoadErrorReason.MISSING_FILE: # Return an empty dict if there was no workspace file return {} raise return self._parse_workspace_config(node) # _parse_workspace_config_format() # # If workspace config is in old-style format, i.e. it is using # source-specific workspaces, try to convert it to element-specific # workspaces. # # Args: # workspaces (dict): current workspace config, usually output of _load_workspace_config() # # Returns: # (dict) The extracted workspaces # # Raises: LoadError if there was a problem with the workspace config # def _parse_workspace_config(self, workspaces): version = _yaml.node_get(workspaces, int, "format-version", default_value=0) if version == 0: # Pre-versioning format can be of two forms for element, config in _yaml.node_items(workspaces): if isinstance(config, str): pass elif isinstance(config, dict): sources = list(_yaml.node_items(config)) if len(sources) > 1: detail = "There are multiple workspaces open for '{}'.\n" + \ "This is not supported anymore.\n" + \ "Please remove this element from '{}'." raise LoadError(LoadErrorReason.INVALID_DATA, detail.format(element, self._get_filename())) workspaces[element] = sources[0][1] else: raise LoadError(LoadErrorReason.INVALID_DATA, "Workspace config is in unexpected format.") res = { element: Workspace(self._toplevel_project, path=config) for element, config in _yaml.node_items(workspaces) } elif version >= 1 and version <= BST_WORKSPACE_FORMAT_VERSION: workspaces = _yaml.node_get(workspaces, dict, "workspaces", default_value={}) res = {element: self._load_workspace(node) for element, node in _yaml.node_items(workspaces)} else: raise LoadError(LoadErrorReason.INVALID_DATA, "Workspace configuration format version {} not supported." "Your version of buildstream may be too old. Max supported version: {}" .format(version, BST_WORKSPACE_FORMAT_VERSION)) return res # _load_workspace(): # # Loads a new workspace from a YAML node # # Args: # node: A YAML Node # # Returns: # (Workspace): A newly instantiated Workspace # def _load_workspace(self, node): dictionary = { 'prepared': _yaml.node_get(node, bool, 'prepared', default_value=False), 'path': _yaml.node_get(node, str, 'path'), 'last_successful': _yaml.node_get(node, str, 'last_successful', default_value=None), 'running_files': _yaml.node_get(node, dict, 'running_files', default_value=None), } return Workspace.from_dict(self._toplevel_project, dictionary) # _get_filename(): # # Get the workspaces.yml file path. # # Returns: # (str): The path to workspaces.yml file. def _get_filename(self): return os.path.join(self._bst_directory, "workspaces.yml") buildstream-1.6.9/buildstream/_yaml.py000066400000000000000000001223221437515270000200360ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import sys import collections import string from io import StringIO from copy import deepcopy from contextlib import ExitStack from pathlib import Path from ruamel import yaml from ruamel.yaml.representer import SafeRepresenter, RoundTripRepresenter from ruamel.yaml.constructor import RoundTripConstructor from ._exceptions import LoadError, LoadErrorReason # SanitizedDict is an OrderedDict that is dumped as unordered mapping. # This provides deterministic output for unordered mappings. # class SanitizedDict(collections.OrderedDict): pass # This overrides the ruamel constructor to treat everything as a string RoundTripConstructor.add_constructor('tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_str) RoundTripConstructor.add_constructor('tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_str) RoundTripConstructor.add_constructor('tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_str) # Represent simple types as strings def represent_as_str(self, value): return self.represent_str(str(value)) RoundTripRepresenter.add_representer(SanitizedDict, SafeRepresenter.represent_dict) RoundTripRepresenter.add_representer(type(None), represent_as_str) RoundTripRepresenter.add_representer(int, represent_as_str) RoundTripRepresenter.add_representer(float, represent_as_str) # We store information in the loaded yaml on a DictProvenance # stored in all dictionaries under this key PROVENANCE_KEY = '__bst_provenance_info' # Provides information about file for provenance # # Args: # name (str): Full path to the file # shortname (str): Relative path to the file # project (Project): Project where the shortname is relative from class ProvenanceFile(): def __init__(self, name, shortname, project): self.name = name self.shortname = shortname self.project = project # Provenance tracks the origin of a given node in the parsed dictionary. # # Args: # node (dict, list, value): A binding to the originally parsed value # filename (string): The filename the node was loaded from # toplevel (dict): The toplevel of the loaded file, suitable for later dumps # line (int): The line number where node was parsed # col (int): The column number where node was parsed # class Provenance(): def __init__(self, filename, node, toplevel, line=0, col=0): self.filename = filename self.node = node self.toplevel = toplevel self.line = line self.col = col # Convert a Provenance to a string for error reporting def __str__(self): filename = self.filename.shortname if self.filename.project and self.filename.project.junction: filename = "{}:{}".format(self.filename.project.junction.name, self.filename.shortname) return "{} [line {:d} column {:d}]".format(filename, self.line, self.col) # Abstract method def clone(self): pass # pragma: nocover # A Provenance for dictionaries, these are stored in the copy of the # loaded YAML tree and track the provenance of all members # class DictProvenance(Provenance): def __init__(self, filename, node, toplevel, line=None, col=None): if line is None or col is None: # Special case for loading an empty dict if hasattr(node, 'lc'): line = node.lc.line + 1 col = node.lc.col else: line = 1 col = 0 super().__init__(filename, node, toplevel, line=line, col=col) self.members = {} def clone(self): provenance = DictProvenance(self.filename, self.node, self.toplevel, line=self.line, col=self.col) provenance.members = { member_name: member.clone() for member_name, member in self.members.items() } return provenance # A Provenance for dict members # class MemberProvenance(Provenance): def __init__(self, filename, parent_dict, member_name, toplevel, node=None, line=None, col=None): if parent_dict is not None: node = parent_dict[member_name] line, col = parent_dict.lc.value(member_name) line += 1 super().__init__(filename, node, toplevel, line=line, col=col) # Only used if member is a list self.elements = [] def clone(self): provenance = MemberProvenance(self.filename, None, None, self.toplevel, node=self.node, line=self.line, col=self.col) provenance.elements = [e.clone() for e in self.elements] return provenance # A Provenance for list elements # class ElementProvenance(Provenance): def __init__(self, filename, parent_list, index, toplevel, node=None, line=None, col=None): if parent_list is not None: node = parent_list[index] line, col = parent_list.lc.item(index) line += 1 super().__init__(filename, node, toplevel, line=line, col=col) # Only used if element is a list self.elements = [] def clone(self): provenance = ElementProvenance(self.filename, None, None, self.toplevel, node=self.node, line=self.line, col=self.col) provenance.elements = [e.clone for e in self.elements] return provenance # These exceptions are intended to be caught entirely within # the BuildStream framework, hence they do not reside in the # public exceptions.py class CompositeError(Exception): def __init__(self, path, message): super().__init__(message) self.path = path class CompositeTypeError(CompositeError): def __init__(self, path, expected_type, actual_type): super().__init__( path, "Error compositing dictionary key '{}', expected source type '{}' " "but received type '{}'" .format(path, expected_type.__name__, actual_type.__name__)) self.expected_type = expected_type self.actual_type = actual_type # Loads a dictionary from some YAML # # Args: # filename (str): The YAML file to load # shortname (str): The filename in shorthand for error reporting (or None) # copy_tree (bool): Whether to make a copy, preserving the original toplevels # for later serialization # # Returns (dict): A loaded copy of the YAML file with provenance information # # Raises: LoadError # def load(filename, shortname=None, copy_tree=False, *, project=None): if not shortname: shortname = filename file = ProvenanceFile(filename, shortname, project) try: with open(filename, encoding="utf-8") as f: return load_data(f, file, copy_tree=copy_tree) except FileNotFoundError as e: raise LoadError(LoadErrorReason.MISSING_FILE, "Could not find file at {}".format(filename)) from e except IsADirectoryError as e: raise LoadError(LoadErrorReason.LOADING_DIRECTORY, "{} is a directory. bst command expects a .bst file." .format(filename)) from e # A function to get the roundtrip yaml handle # # Args: # write (bool): Whether we intend to write # def prepare_roundtrip_yaml(write=False): yml = yaml.YAML() yml.preserve_quotes=True # For each of YAML 1.1 and 1.2, force everything to be a plain string for version in [(1, 1), (1, 2), None]: yml.resolver.add_version_implicit_resolver( version, 'tag:yaml.org,2002:str', yaml.util.RegExp(r'.*'), None) # When writing, we want to represent boolean as strings if write: yml.representer.add_representer(bool, represent_as_str) return yml # Like load(), but doesnt require the data to be in a file # def load_data(data, file=None, copy_tree=False): yml = prepare_roundtrip_yaml() try: contents = yml.load(data) except (yaml.scanner.ScannerError, yaml.composer.ComposerError, yaml.parser.ParserError) as e: raise LoadError(LoadErrorReason.INVALID_YAML, "Malformed YAML:\n\n{}\n\n{}\n".format(e.problem, e.problem_mark)) from e if not isinstance(contents, dict): # Special case allowance for None, when the loaded file has only comments in it. if contents is None: contents = {} else: raise LoadError(LoadErrorReason.INVALID_YAML, "YAML file has content of type '{}' instead of expected type 'dict': {}" .format(type(contents).__name__, file.name)) return node_decorated_copy(file, contents, copy_tree=copy_tree) # Dumps a previously loaded YAML node to a file handle # def dump_file_handle(node, fh): yml = prepare_roundtrip_yaml(write=True) yml.dump(node, fh) # Dumps a previously loaded YAML node to a file # # Args: # node (dict): A node previously loaded with _yaml.load() above # # Returns: # (str): The generated string # def dump_string(node): with StringIO() as f: dump_file_handle(node, f) return f.getvalue() # Dumps a previously loaded YAML node to a file # # Args: # node (dict): A node previously loaded with _yaml.load() above # filename (str): The YAML file to load # def dump(node, filename=None): with ExitStack() as stack: if filename: from . import utils # pylint: disable=import-outside-toplevel f = stack.enter_context(utils.save_file_atomic(filename, 'w')) else: f = sys.stdout dump_file_handle(node, f) # node_decorated_copy() # # Create a copy of a loaded dict tree decorated with Provenance # information, used directly after loading yaml # # Args: # filename (str): The filename # toplevel (node): The toplevel dictionary node # copy_tree (bool): Whether to load a copy and preserve the original # # Returns: A copy of the toplevel decorated with Provinance # def node_decorated_copy(filename, toplevel, copy_tree=False): if copy_tree: result = deepcopy(toplevel) else: result = toplevel node_decorate_dict(filename, result, toplevel, toplevel) return result def node_decorate_dict(filename, target, source, toplevel): provenance = DictProvenance(filename, source, toplevel) target[PROVENANCE_KEY] = provenance for key, value in node_items(source): member = MemberProvenance(filename, source, key, toplevel) provenance.members[key] = member target_value = target.get(key) if isinstance(value, collections.abc.Mapping): node_decorate_dict(filename, target_value, value, toplevel) elif isinstance(value, list): member.elements = node_decorate_list(filename, target_value, value, toplevel) def node_decorate_list(filename, target, source, toplevel): elements = [] for item in source: idx = source.index(item) target_item = target[idx] element = ElementProvenance(filename, source, idx, toplevel) if isinstance(item, collections.abc.Mapping): node_decorate_dict(filename, target_item, item, toplevel) elif isinstance(item, list): element.elements = node_decorate_list(filename, target_item, item, toplevel) elements.append(element) return elements # node_get_provenance() # # Gets the provenance for a node # # Args: # node (dict): a dictionary # key (str): key in the dictionary # indices (list of indexes): Index path, in the case of list values # # Returns: The Provenance of the dict, member or list element # def node_get_provenance(node, key=None, indices=None): provenance = node.get(PROVENANCE_KEY) if provenance and key: provenance = provenance.members.get(key) if provenance and indices is not None: for index in indices: provenance = provenance.elements[index] return provenance # Helper to use utils.sentinel without unconditional utils import, # which causes issues for completion. # # Local private, but defined here because sphinx appears to break if # it's not defined before any functions calling it in default kwarg # values. # def _get_sentinel(): from .utils import _sentinel # pylint: disable=import-outside-toplevel return _sentinel # node_get() # # Fetches a value from a dictionary node and checks it for # an expected value. Use default_value when parsing a value # which is only optionally supplied. # # Args: # node (dict): The dictionary node # expected_type (type): The expected type for the value being searched # key (str): The key to get a value for in node # indices (list of ints): Optionally decend into lists of lists # # Returns: # The value if found in node, otherwise default_value is returned # # Raises: # LoadError, when the value found is not of the expected type # # Note: # Returned strings are stripped of leading and trailing whitespace # def node_get(node, expected_type, key, indices=None, default_value=_get_sentinel()): value = node.get(key, default_value) provenance = node_get_provenance(node) if value is _get_sentinel(): raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Dictionary did not contain expected key '{}'".format(provenance, key)) path = key if indices is not None: # Implied type check of the element itself value = node_get(node, list, key) for index in indices: value = value[index] path += '[{:d}]'.format(index) # We want to allow None as a valid value for any type if value is None: return None if not isinstance(value, expected_type): # Attempt basic conversions if possible, typically we want to # be able to specify numeric values and convert them to strings, # but we dont want to try converting dicts/lists try: if (expected_type == bool and isinstance(value, str)): # Dont coerce booleans to string, this makes "False" strings evaluate to True if value in ('true', 'True'): value = True elif value in ('false', 'False'): value = False else: raise ValueError() elif not (expected_type == list or expected_type == dict or isinstance(value, (list, dict))): value = expected_type(value) else: raise ValueError() except (ValueError, TypeError) as e: provenance = node_get_provenance(node, key=key, indices=indices) raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Value of '{}' is not of the expected type '{}'" .format(provenance, path, expected_type.__name__)) from e # Trim it at the bud, let all loaded strings from yaml be stripped of whitespace if isinstance(value, str): value = value.strip() return value # node_get_project_path() # # Fetches a project path from a dictionary node and validates it # # Paths are asserted to never lead to a directory outside of the project # directory. In addition, paths can not point to symbolic links, fifos, # sockets and block/character devices. # # The `check_is_file` and `check_is_dir` parameters can be used to # perform additional validations on the path. Note that an exception # will always be raised if both parameters are set to ``True``. # # Args: # node (dict): A dictionary loaded from YAML # key (str): The key whose value contains a path to validate # project_dir (str): The project directory # check_is_file (bool): If ``True`` an error will also be raised # if path does not point to a regular file. # Defaults to ``False`` # check_is_dir (bool): If ``True`` an error will be also raised # if path does not point to a directory. # Defaults to ``False`` # Returns: # (str): The project path # # Raises: # (LoadError): In case that the project path is not valid or does not # exist # def node_get_project_path(node, key, project_dir, *, check_is_file=False, check_is_dir=False): path_str = node_get(node, str, key) path = Path(path_str) project_dir_path = Path(project_dir) provenance = node_get_provenance(node, key=key) if (project_dir_path / path).is_symlink(): raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND, "{}: Specified path '{}' must not point to " "symbolic links " .format(provenance, path_str)) if path.parts and path.parts[0] == '..': raise LoadError(LoadErrorReason.PROJ_PATH_INVALID, "{}: Specified path '{}' first component must " "not be '..'" .format(provenance, path_str)) try: if sys.version_info[0] == 3 and sys.version_info[1] < 6: full_resolved_path = (project_dir_path / path).resolve() else: full_resolved_path = (project_dir_path / path).resolve(strict=True) except FileNotFoundError as e: raise LoadError(LoadErrorReason.MISSING_FILE, "{}: Specified path '{}' does not exist" .format(provenance, path_str)) from e is_inside = project_dir_path.resolve() in full_resolved_path.parents or ( full_resolved_path == project_dir_path) if path.is_absolute() or not is_inside: raise LoadError(LoadErrorReason.PROJ_PATH_INVALID, "{}: Specified path '{}' must not lead outside of the " "project directory" .format(provenance, path_str)) if full_resolved_path.is_socket() or ( full_resolved_path.is_fifo() or full_resolved_path.is_block_device()): raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND, "{}: Specified path '{}' points to an unsupported " "file kind" .format(provenance, path_str)) if check_is_file and not full_resolved_path.is_file(): raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND, "{}: Specified path '{}' is not a regular file" .format(provenance, path_str)) if check_is_dir and not full_resolved_path.is_dir(): raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND, "{}: Specified path '{}' is not a directory" .format(provenance, path_str)) return path_str # node_items() # # A convenience generator for iterating over loaded key/value # tuples in a dictionary loaded from project YAML. # # Args: # node (dict): The dictionary node # # Yields: # (str): The key name # (anything): The value for the key # def node_items(node): for key, value in node.items(): if key == PROVENANCE_KEY: continue yield (key, value) # Gives a node a dummy provenance, in case of compositing dictionaries # where the target is an empty {} def ensure_provenance(node): provenance = node.get(PROVENANCE_KEY) if not provenance: provenance = DictProvenance(ProvenanceFile('', '', None), node, node) node[PROVENANCE_KEY] = provenance return provenance # is_ruamel_str(): # # Args: # value: A value loaded from ruamel # # This returns if the value is "stringish", since ruamel # has some complex types to represent strings, this is needed # to avoid compositing exceptions in order to allow various # string types to be interchangable and acceptable # def is_ruamel_str(value): if isinstance(value, str): return True elif isinstance(value, yaml.scalarstring.ScalarString): return True return False # is_composite_list # # Checks if the given node is a Mapping with array composition # directives. # # Args: # node (value): Any node # # Returns: # (bool): True if node was a Mapping containing only # list composition directives # # Raises: # (LoadError): If node was a mapping and contained a mix of # list composition directives and other keys # def is_composite_list(node): if isinstance(node, collections.abc.Mapping): has_directives = False has_keys = False for key, _ in node_items(node): if key in ['(>)', '(<)', '(=)']: # pylint: disable=simplifiable-if-statement has_directives = True else: has_keys = True if has_keys and has_directives: provenance = node_get_provenance(node) raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Dictionary contains array composition directives and arbitrary keys" .format(provenance)) return has_directives return False # composite_list_prepend # # Internal helper for list composition # # Args: # target_node (dict): A simple dictionary # target_key (dict): The key indicating a literal array to prepend to # source_node (dict): Another simple dictionary # source_key (str): The key indicating an array to prepend to the target # # Returns: # (bool): True if a source list was found and compositing occurred # def composite_list_prepend(target_node, target_key, source_node, source_key): source_list = node_get(source_node, list, source_key, default_value=[]) if not source_list: return False target_provenance = node_get_provenance(target_node) source_provenance = node_get_provenance(source_node) if target_node.get(target_key) is None: target_node[target_key] = [] source_list = list_chain_copy(source_list) target_list = target_node[target_key] for element in reversed(source_list): target_list.insert(0, element) if not target_provenance.members.get(target_key): target_provenance.members[target_key] = source_provenance.members[source_key].clone() else: for p in reversed(source_provenance.members[source_key].elements): target_provenance.members[target_key].elements.insert(0, p.clone()) return True # composite_list_append # # Internal helper for list composition # # Args: # target_node (dict): A simple dictionary # target_key (dict): The key indicating a literal array to append to # source_node (dict): Another simple dictionary # source_key (str): The key indicating an array to append to the target # # Returns: # (bool): True if a source list was found and compositing occurred # def composite_list_append(target_node, target_key, source_node, source_key): source_list = node_get(source_node, list, source_key, default_value=[]) if not source_list: return False target_provenance = node_get_provenance(target_node) source_provenance = node_get_provenance(source_node) if target_node.get(target_key) is None: target_node[target_key] = [] source_list = list_chain_copy(source_list) target_list = target_node[target_key] target_list.extend(source_list) if not target_provenance.members.get(target_key): target_provenance.members[target_key] = source_provenance.members[source_key].clone() else: target_provenance.members[target_key].elements.extend([ p.clone() for p in source_provenance.members[source_key].elements ]) return True # composite_list_overwrite # # Internal helper for list composition # # Args: # target_node (dict): A simple dictionary # target_key (dict): The key indicating a literal array to overwrite # source_node (dict): Another simple dictionary # source_key (str): The key indicating an array to overwrite the target with # # Returns: # (bool): True if a source list was found and compositing occurred # def composite_list_overwrite(target_node, target_key, source_node, source_key): # We need to handle the legitimate case of overwriting a list with an empty # list, hence the slightly odd default_value of [None] rather than []. source_list = node_get(source_node, list, source_key, default_value=[None]) if source_list == [None]: return False target_provenance = node_get_provenance(target_node) source_provenance = node_get_provenance(source_node) target_node[target_key] = list_chain_copy(source_list) target_provenance.members[target_key] = source_provenance.members[source_key].clone() return True # composite_list(): # # Composite the source value onto the target value, if either # sides are lists, or dictionaries containing list compositing directives # # Args: # target_node (dict): A simple dictionary # source_node (dict): Another simple dictionary # key (str): The key to compose on # # Returns: # (bool): True if both sides were logical lists # # Raises: # (LoadError): If one side was a logical list and the other was not # def composite_list(target_node, source_node, key): target_value = target_node.get(key) source_value = source_node[key] target_key_provenance = node_get_provenance(target_node, key) source_key_provenance = node_get_provenance(source_node, key) # Whenever a literal list is encountered in the source, it # overwrites the target values and provenance completely. # if isinstance(source_value, list): source_provenance = node_get_provenance(source_node) target_provenance = node_get_provenance(target_node) # Assert target type if not (target_value is None or isinstance(target_value, list) or is_composite_list(target_value)): raise LoadError(LoadErrorReason.INVALID_DATA, "{}: List cannot overwrite value at: {}" .format(source_key_provenance, target_key_provenance)) # Special case: The `project.conf` in some cases needs to composite # include files before having resolved options, so there can be # conditionals that need to be merged at this point. # # This unconditionally appends conditional statements to a matching # conditional in the target so as to preserve them. The precedence # of include files is preserved regardless due to the order in which # included dictionaries are composited. # if key == "(?)": composite_list_append(target_node, key, source_node, key) else: composite_list_overwrite(target_node, key, source_node, key) return True # When a composite list is encountered in the source, then # multiple outcomes can occur... # elif is_composite_list(source_value): # If there is nothing there, then the composite list # is copied in it's entirety as is, and preserved # for later composition # if target_value is None: source_provenance = node_get_provenance(source_node) target_provenance = node_get_provenance(target_node) target_node[key] = node_chain_copy(source_value) target_provenance.members[key] = source_provenance.members[key].clone() # If the target is a literal list, then composition # occurs directly onto that target, leaving the target # as a literal list to overwrite anything in later composition # elif isinstance(target_value, list): composite_list_overwrite(target_node, key, source_value, '(=)') composite_list_prepend(target_node, key, source_value, '(<)') composite_list_append(target_node, key, source_value, '(>)') # If the target is a composite list, then composition # occurs in the target composite list, and the composite # target list is preserved in dictionary form for further # composition. # elif is_composite_list(target_value): if composite_list_overwrite(target_value, '(=)', source_value, '(=)'): # When overwriting a target with composition directives, remove any # existing prepend/append directives in the target before adding our own target_provenance = node_get_provenance(target_value) for directive in ['(<)', '(>)']: try: del target_value[directive] del target_provenance.members[directive] except KeyError: # Ignore errors from deletion of non-existing keys pass # Prepend to the target prepend array, and append to the append array composite_list_prepend(target_value, '(<)', source_value, '(<)') composite_list_append(target_value, '(>)', source_value, '(>)') else: raise LoadError(LoadErrorReason.INVALID_DATA, "{}: List cannot overwrite value at: {}" .format(source_key_provenance, target_key_provenance)) # We handled list composition in some way return True # Source value was not a logical list return False # composite_dict(): # # Composites values in target with values from source # # Args: # target (dict): A simple dictionary # source (dict): Another simple dictionary # # Raises: CompositeError # # Unlike the dictionary update() method, nested values in source # will not obsolete entire subdictionaries in target, instead both # dictionaries will be recursed and a composition of both will result # # This is useful for overriding configuration files and element # configurations. # def composite_dict(target, source, path=None): target_provenance = ensure_provenance(target) source_provenance = ensure_provenance(source) for key, source_value in node_items(source): # Track the full path of keys, only for raising CompositeError if path: thispath = path + '.' + key else: thispath = key # Handle list composition separately if composite_list(target, source, key): continue target_value = target.get(key) if isinstance(source_value, collections.abc.Mapping): # Handle creating new dicts on target side if target_value is None: target_value = {} target[key] = target_value # Give the new dict provenance value_provenance = source_value.get(PROVENANCE_KEY) if value_provenance: target_value[PROVENANCE_KEY] = value_provenance.clone() # Add a new provenance member element to the containing dict target_provenance.members[key] = source_provenance.members[key] if not isinstance(target_value, collections.abc.Mapping): raise CompositeTypeError(thispath, type(target_value), type(source_value)) # Recurse into matching dictionary composite_dict(target_value, source_value, path=thispath) else: if target_value is not None: # Exception here: depending on how strings were declared ruamel may # use a different type, but for our purposes, any stringish type will do. if not (is_ruamel_str(source_value) and is_ruamel_str(target_value)) \ and not isinstance(source_value, type(target_value)): raise CompositeTypeError(thispath, type(target_value), type(source_value)) # Overwrite simple values, lists and mappings have already been handled target_provenance.members[key] = source_provenance.members[key].clone() target[key] = source_value # Like composite_dict(), but raises an all purpose LoadError for convenience # def composite(target, source): assert hasattr(source, 'get') source_provenance = node_get_provenance(source) try: composite_dict(target, source) except CompositeTypeError as e: error_prefix = "" if source_provenance: error_prefix = "{}: ".format(source_provenance) raise LoadError(LoadErrorReason.ILLEGAL_COMPOSITE, "{}Expected '{}' type for configuration '{}', instead received '{}'" .format(error_prefix, e.expected_type.__name__, e.path, e.actual_type.__name__)) from e # node_sanitize() # # Returnes an alphabetically ordered recursive copy # of the source node with internal provenance information stripped. # # Only dicts are ordered, list elements are left in order. # def node_sanitize(node): if isinstance(node, collections.abc.Mapping): result = SanitizedDict() key_list = [key for key, _ in node_items(node)] for key in sorted(key_list): result[key] = node_sanitize(node[key]) return result elif isinstance(node, list): return [node_sanitize(elt) for elt in node] return node # node_validate() # # Validate the node so as to ensure the user has not specified # any keys which are unrecognized by buildstream (usually this # means a typo which would otherwise not trigger an error). # # Args: # node (dict): A dictionary loaded from YAML # valid_keys (list): A list of valid keys for the specified node # # Raises: # LoadError: In the case that the specified node contained # one or more invalid keys # def node_validate(node, valid_keys): # Probably the fastest way to do this: https://stackoverflow.com/a/23062482 valid_keys = set(valid_keys) valid_keys.add(PROVENANCE_KEY) invalid = next((key for key in node if key not in valid_keys), None) if invalid: provenance = node_get_provenance(node, key=invalid) raise LoadError(LoadErrorReason.INVALID_DATA, "{}: Unexpected key: {}".format(provenance, invalid)) # ChainMap # # This is a derivative of collections.ChainMap(), but supports # explicit deletions of keys. # # The purpose of this is to create a virtual copy-on-write # copy of a dictionary, so that mutating it in any way does # not effect the underlying dictionaries. # # collections.ChainMap covers this already mostly, but fails # to record internal state so as to hide keys which have been # explicitly deleted. # class ChainMap(collections.ChainMap): def __init__(self, *maps): super().__init__(*maps) self.__deletions = set() def __getitem__(self, key): # Honor deletion state of 'key' if key in self.__deletions: return self.__missing__(key) return super().__getitem__(key) def __len__(self): return len(set().union(*self.maps) - self.__deletions) def __iter__(self): return iter(set().union(*self.maps) - self.__deletions) def __contains__(self, key): if key in self.__deletions: return False return any(key in m for m in self.maps) def __bool__(self): # Attempt to preserve 'any' optimization any_keys = any(self.maps) # Something existed, try again with deletions subtracted if any_keys: return any(set().union(*self.maps) - self.__deletions) return False def __setitem__(self, key, value): self.__deletions.discard(key) super().__setitem__(key, value) def __delitem__(self, key): if key in self.__deletions: raise KeyError('Key was already deleted from this mapping: {!r}'.format(key)) # Ignore KeyError if it's not in the first map, just save the deletion state try: super().__delitem__(key) except KeyError: pass # Store deleted state self.__deletions.add(key) def popitem(self): poppable = set().union(*self.maps) - self.__deletions for key in poppable: return self.pop(key) raise KeyError('No keys found.') __marker = object() def pop(self, key, default=__marker): # Reimplement MutableMapping's behavior here try: value = self[key] except KeyError: if default is self.__marker: raise return default else: del self[key] return value def clear(self): clearable = set().union(*self.maps) - self.__deletions for key in clearable: del self[key] def node_chain_copy(source): copy = ChainMap({}, source) for key, value in source.items(): if isinstance(value, collections.abc.Mapping): copy[key] = node_chain_copy(value) elif isinstance(value, list): copy[key] = list_chain_copy(value) elif isinstance(value, Provenance): copy[key] = value.clone() return copy def list_chain_copy(source): copy = [] for item in source: if isinstance(item, collections.abc.Mapping): copy.append(node_chain_copy(item)) elif isinstance(item, list): copy.append(list_chain_copy(item)) elif isinstance(item, Provenance): copy.append(item.clone()) else: copy.append(item) return copy def node_copy(source): copy = {} for key, value in source.items(): if isinstance(value, collections.abc.Mapping): copy[key] = node_copy(value) elif isinstance(value, list): copy[key] = list_copy(value) elif isinstance(value, Provenance): copy[key] = value.clone() else: copy[key] = value ensure_provenance(copy) return copy def list_copy(source): copy = [] for item in source: if isinstance(item, collections.abc.Mapping): copy.append(node_copy(item)) elif isinstance(item, list): copy.append(list_copy(item)) elif isinstance(item, Provenance): copy.append(item.clone()) else: copy.append(item) return copy # node_final_assertions() # # This must be called on a fully loaded and composited node, # after all composition has completed. # # Args: # node (Mapping): The final composited node # # Raises: # (LoadError): If any assertions fail # def node_final_assertions(node): for key, value in node_items(node): # Assert that list composition directives dont remain, this # indicates that the user intended to override a list which # never existed in the underlying data # if key in ['(>)', '(<)', '(=)']: provenance = node_get_provenance(node, key) raise LoadError(LoadErrorReason.TRAILING_LIST_DIRECTIVE, "{}: Attempt to override non-existing list".format(provenance)) if isinstance(value, collections.abc.Mapping): node_final_assertions(value) elif isinstance(value, list): list_final_assertions(value) def list_final_assertions(values): for value in values: if isinstance(value, collections.abc.Mapping): node_final_assertions(value) elif isinstance(value, list): list_final_assertions(value) # assert_symbol_name() # # A helper function to check if a loaded string is a valid symbol # name and to raise a consistent LoadError if not. For strings which # are required to be symbols. # # Args: # provenance (Provenance): The provenance of the loaded symbol, or None # symbol_name (str): The loaded symbol name # purpose (str): The purpose of the string, for an error message # allow_dashes (bool): Whether dashes are allowed for this symbol # # Raises: # LoadError: If the symbol_name is invalid # # Note that dashes are generally preferred for variable names and # usage in YAML, but things such as option names which will be # evaluated with jinja2 cannot use dashes. # def assert_symbol_name(provenance, symbol_name, purpose, *, allow_dashes=True): valid_chars = string.digits + string.ascii_letters + '_' if allow_dashes: valid_chars += '-' valid = True if not symbol_name: valid = False elif any(x not in valid_chars for x in symbol_name): valid = False elif symbol_name[0] in string.digits: valid = False if not valid: detail = "Symbol names must contain only alphanumeric characters, " + \ "may not start with a digit, and may contain underscores" if allow_dashes: detail += " or dashes" message = "Invalid symbol name for {}: '{}'".format(purpose, symbol_name) if provenance is not None: message = "{}: {}".format(provenance, message) raise LoadError(LoadErrorReason.INVALID_SYMBOL_NAME, message, detail=detail) buildstream-1.6.9/buildstream/buildelement.py000066400000000000000000000240021437515270000214020ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # Copyright (C) 2018 Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ BuildElement - Abstract class for build elements ================================================ The BuildElement class is a convenience element one can derive from for implementing the most common case of element. Abstract method implementations ------------------------------- Element.configure_sandbox() ~~~~~~~~~~~~~~~~~~~~~~~~~~~ In :func:`Element.configure_sandbox() `, the BuildElement will ensure that the sandbox locations described by the ``%{build-root}`` and ``%{install-root}`` variables are marked and will be mounted read-write for the :func:`assemble phase`. The working directory for the sandbox will be configured to be the ``%{build-root}``, unless the ``%{command-subdir}`` variable is specified for the element in question, in which case the working directory will be configured as ``%{build-root}/%{command-subdir}``. Element.stage() ~~~~~~~~~~~~~~~ In :func:`Element.stage() `, the BuildElement will do the following operations: * Stage all the dependencies in the :func:`Scope.BUILD ` scope into the sandbox root. * Run the integration commands for all staged dependencies using :func:`Element.integrate() ` * Stage any Source on the given element to the ``%{build-root}`` location inside the sandbox, using :func:`Element.stage_sources() ` Element.prepare() ~~~~~~~~~~~~~~~~~ In :func:`Element.prepare() `, the BuildElement will run ``configure-commands``, which are used to run one-off preparations that should not be repeated for a single build directory. Element.assemble() ~~~~~~~~~~~~~~~~~~ In :func:`Element.assemble() `, the BuildElement will proceed to run sandboxed commands which are expected to be found in the element configuration. Commands are run in the following order: * ``configure-commands``: Commands to configure the element before building * ``build-commands``: Commands to build the element * ``install-commands``: Commands to install the results into ``%{install-root}`` * ``strip-commands``: Commands to strip debugging symbols installed binaries The result of the build is expected to end up in ``%{install-root}``, and as such; Element.assemble() method will return the ``%{install-root}`` for artifact collection purposes. In addition to the command lists specified above, build elements support specifying the ``create-dev-shm`` boolean parameter. If configured, this parameter causes the sandbox to mount a tmpfs filesystem at ``/dev/shm``. **Example of create-dev-shm**: .. code:: yaml kind: manual config: # Enable /dev/shm create-dev-shm: true """ import os from . import Element, Scope, ElementError from . import SandboxFlags # This list is preserved because of an unfortunate situation, we # need to remove these older commands which were secret and never # documented, but without breaking the cache keys. _legacy_command_steps = ['bootstrap-commands', 'configure-commands', 'build-commands', 'test-commands', 'install-commands', 'strip-commands'] _command_steps = ['configure-commands', 'build-commands', 'install-commands', 'strip-commands'] class BuildElement(Element): # pylint: disable=attribute-defined-outside-init ############################################################# # Abstract Method Implementations # ############################################################# def configure(self, node): self.__commands = {} self.__create_dev_shm = False # FIXME: Currently this forcefully validates configurations # for all BuildElement subclasses so they are unable to # extend the configuration self.node_validate(node, _command_steps + ["create-dev-shm"]) self.__create_dev_shm = self.node_get_member(node, bool, "create-dev-shm", False) for command_name in _legacy_command_steps: if command_name in _command_steps: self.__commands[command_name] = self.__get_commands(node, command_name) else: self.__commands[command_name] = [] def preflight(self): pass def get_unique_key(self): dictionary = {} for command_name, command_list in self.__commands.items(): dictionary[command_name] = command_list # Specifying notparallel for a given element effects the # cache key, while having the side effect of setting max-jobs to 1, # which is normally automatically resolved and does not effect # the cache key. if self.get_variable('notparallel'): dictionary['notparallel'] = True return dictionary def configure_sandbox(self, sandbox): build_root = self.get_variable('build-root') install_root = self.get_variable('install-root') # Tell the sandbox to mount the build root and install root sandbox.mark_directory(build_root) sandbox.mark_directory(install_root) # Allow running all commands in a specified subdirectory command_subdir = self.get_variable('command-subdir') if command_subdir: command_dir = os.path.join(build_root, command_subdir) else: command_dir = build_root sandbox.set_work_directory(command_dir) # Setup environment sandbox.set_environment(self.get_environment()) def stage(self, sandbox): # Stage deps in the sandbox root with self.timed_activity("Staging dependencies", silent_nested=True): self.stage_dependency_artifacts(sandbox, Scope.BUILD) # Run any integration commands provided by the dependencies # once they are all staged and ready with self.timed_activity("Integrating sandbox"): for dep in self.dependencies(Scope.BUILD): dep.integrate(sandbox) # Stage sources in the build root self.stage_sources(sandbox, self.get_variable('build-root')) def assemble(self, sandbox): # Run commands for command_name in _command_steps: commands = self.__commands[command_name] if not commands or command_name == 'configure-commands': continue with self.timed_activity("Running {}".format(command_name)): for cmd in commands: self.__run_command(sandbox, cmd, command_name) # %{install-root}/%{build-root} should normally not be written # to - if an element later attempts to stage to a location # that is not empty, we abort the build - in this case this # will almost certainly happen. staged_build = os.path.join(self.get_variable('install-root'), self.get_variable('build-root')) if os.path.isdir(staged_build) and os.listdir(staged_build): self.warn("Writing to %{install-root}/%{build-root}.", detail="Writing to this directory will almost " + "certainly cause an error, since later elements " + "will not be allowed to stage to %{build-root}.") # Return the payload, this is configurable but is generally # always the /buildstream-install directory return self.get_variable('install-root') def prepare(self, sandbox): commands = self.__commands['configure-commands'] if commands: with self.timed_activity("Running configure-commands"): for cmd in commands: self.__run_command(sandbox, cmd, 'configure-commands') def generate_script(self): script = "" for command_name in _command_steps: commands = self.__commands[command_name] for cmd in commands: script += "(set -ex; {}\n) || exit 1\n".format(cmd) return script ############################################################# # Private Local Methods # ############################################################# def __get_commands(self, node, name): list_node = self.node_get_member(node, list, name, []) commands = [] for i in range(len(list_node)): command = self.node_subst_list_element(node, name, [i]) commands.append(command) return commands def __run_command(self, sandbox, cmd, cmd_name): self.status("Running {}".format(cmd_name), detail=cmd) if self.__create_dev_shm: flags = SandboxFlags.ROOT_READ_ONLY | SandboxFlags.CREATE_DEV_SHM else: flags = SandboxFlags.ROOT_READ_ONLY # Note the -e switch to 'sh' means to exit with an error # if any untested command fails. # exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'], flags) if exitcode != 0: raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode)) buildstream-1.6.9/buildstream/data/000077500000000000000000000000001437515270000172725ustar00rootroot00000000000000buildstream-1.6.9/buildstream/data/bst000066400000000000000000000011671437515270000200120ustar00rootroot00000000000000# BuildStream bash completion scriptlet. # # On systems which use the bash-completion module for # completion discovery with bash, this can be installed at: # # pkg-config --variable=completionsdir bash-completion # # If BuildStream is not installed system wide, you can # simply source this script to enable completions or append # this script to your ~/.bash_completion file. # _bst_completion() { local IFS=$' ' COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ COMP_CWORD=$COMP_CWORD \ _BST_COMPLETION=complete $1 ) ) return 0 } complete -F _bst_completion -o nospace bst; buildstream-1.6.9/buildstream/data/build-all.sh.in000066400000000000000000000014011437515270000220740ustar00rootroot00000000000000#!/bin/sh # # DO NOT EDIT THIS FILE # # This is a build script generated by # [BuildStream](https://wiki.gnome.org/Projects/BuildStream/). # # Builds all given modules using their respective scripts. set -eu echo "Buildstream native bootstrap script" export PATH='/usr/bin:/usr/sbin/:/sbin:/bin:/tools/bin:/tools/sbin' export SRCDIR='./source' SUCCESS=false CURRENT_MODULE='None' echo 'Setting up build environment...' except() {{ if [ "$SUCCESS" = true ]; then echo "Done!" else echo "Error building module ${{CURRENT_MODULE}}." fi }} trap "except" EXIT for module in {modules}; do CURRENT_MODULE="$module" "$SRCDIR/build-$module" if [ -e /sbin/ldconfig ]; then /sbin/ldconfig || true; fi done SUCCESS=true buildstream-1.6.9/buildstream/data/build-module.sh.in000066400000000000000000000013051437515270000226140ustar00rootroot00000000000000#!/bin/sh # # DO NOT EDIT THIS FILE # # This is a build script generated by # [BuildStream](https://wiki.gnome.org/Projects/BuildStream/). # # Builds the module {name}. set -e # Prepare the build environment echo 'Building {name}' if [ -d '{build_root}' ]; then rm -rf '{build_root}' fi if [ -d '{install_root}' ]; then rm -rf '{install_root}' fi mkdir -p '{build_root}' mkdir -p '{install_root}' if [ -d "$SRCDIR/{name}/" ]; then cp -a "$SRCDIR/{name}/." '{build_root}' fi cd '{build_root}' export PREFIX='{install_root}' export {variables} # Build the module {commands} rm -rf '{build_root}' # Install the module echo 'Installing {name}' (cd '{install_root}'; find . | cpio -umdp /) buildstream-1.6.9/buildstream/data/projectconfig.yaml000066400000000000000000000104641437515270000230170ustar00rootroot00000000000000# Default BuildStream project configuration. # General configuration defaults # # Require format version 0 format-version: 0 # Elements are found at the project root element-path: . # Store source references in element files ref-storage: inline # Variable Configuration # variables: # Path configuration, to be used in build instructions. prefix: "/usr" exec_prefix: "%{prefix}" bindir: "%{exec_prefix}/bin" sbindir: "%{exec_prefix}/sbin" libexecdir: "%{exec_prefix}/libexec" datadir: "%{prefix}/share" sysconfdir: "/etc" sharedstatedir: "%{prefix}/com" localstatedir: "/var" lib: "lib" libdir: "%{prefix}/%{lib}" debugdir: "%{libdir}/debug" includedir: "%{prefix}/include" docdir: "%{datadir}/doc" infodir: "%{datadir}/info" mandir: "%{datadir}/man" # Indicates the default build directory where input is # normally staged build-root: /buildstream/%{project-name}/%{element-name} # Indicates the build installation directory in the sandbox install-root: /buildstream-install # Arguments for tooling used when stripping debug symbols objcopy-link-args: --add-gnu-debuglink objcopy-extract-args: | --only-keep-debug --compress-debug-sections strip-args: | --remove-section=.comment --remove-section=.note --strip-unneeded # Generic implementation for stripping debugging symbols strip-binaries: | cd "%{install-root}" && find -type f \ '(' -perm -111 -o -name '*.so*' \ -o -name '*.cmxs' -o -name '*.node' ')' \ -exec sh -ec \ 'read -n4 hdr <"$1" # check for elf header if [ "$hdr" != "$(printf \\x7fELF)" ]; then exit 0 fi debugfile="%{install-root}%{debugdir}/$1" mkdir -p "$(dirname "$debugfile")" objcopy %{objcopy-extract-args} "$1" "$debugfile" chmod 644 "$debugfile" strip %{strip-args} "$1" objcopy %{objcopy-link-args} "$debugfile" "$1"' - {} ';' # Generic implementation for reproducible python builds fix-pyc-timestamps: | find "%{install-root}" -name '*.pyc' -exec \ dd if=/dev/zero of={} bs=1 count=4 seek=4 conv=notrunc ';' # Base sandbox environment, can be overridden by plugins environment: PATH: /usr/bin:/bin:/usr/sbin:/sbin SHELL: /bin/sh TERM: dumb USER: tomjon USERNAME: tomjon LOGNAME: tomjon LC_ALL: C HOME: /tmp TZ: UTC # For reproducible builds we use 2011-11-11 as a constant SOURCE_DATE_EPOCH: 1320937200 # List of environment variables which should not be taken into # account when calculating a cache key for a given element. # environment-nocache: [] # Configuration for the sandbox other than environment variables # should go in 'sandbox'. This just contains the UID and GID that # the user in the sandbox will have. Not all sandboxes will support # changing the values. sandbox: build-uid: 0 build-gid: 0 # Defaults for the 'split-rules' public data found on elements # in the 'bst' domain. # split-rules: # The runtime domain includes whatever is needed for the # built element to run, this includes stripped executables # and shared libraries by default. runtime: - | %{bindir}/* - | %{sbindir}/* - | %{libexecdir}/* - | %{libdir}/lib*.so* # The devel domain includes additional things which # you may need for development. # # By default this includes header files, static libraries # and other metadata such as pkgconfig files, m4 macros and # libtool archives. devel: - | %{includedir} - | %{includedir}/** - | %{libdir}/lib*.a - | %{libdir}/lib*.la - | %{libdir}/pkgconfig/*.pc - | %{datadir}/pkgconfig/*.pc - | %{datadir}/aclocal/*.m4 # The debug domain includes debugging information stripped # away from libraries and executables debug: - | %{debugdir} - | %{debugdir}/** # The doc domain includes documentation doc: - | %{docdir} - | %{docdir}/** - | %{infodir} - | %{infodir}/** - | %{mandir} - | %{mandir}/** # The locale domain includes translations etc locale: - | %{datadir}/locale - | %{datadir}/locale/** - | %{datadir}/i18n - | %{datadir}/i18n/** - | %{datadir}/zoneinfo - | %{datadir}/zoneinfo/** # Default behavior for `bst shell` # shell: # Command to run when `bst shell` does not provide a command # command: [ 'sh', '-i' ] buildstream-1.6.9/buildstream/data/userconfig.yaml000066400000000000000000000052341437515270000223260ustar00rootroot00000000000000# Default BuildStream user configuration. # # Work Directories # # # Note that BuildStream forces the XDG Base Directory names # into the environment if they are not already set, and allows # expansion of '~' and environment variables when specifying # paths. # # Location to store sources sourcedir: ${XDG_CACHE_HOME}/buildstream/sources # Location to perform builds builddir: ${XDG_CACHE_HOME}/buildstream/build # Location to store local binary artifacts artifactdir: ${XDG_CACHE_HOME}/buildstream/artifacts # Location to store build logs logdir: ${XDG_CACHE_HOME}/buildstream/logs # # Cache # cache: # Size of the artifact cache in bytes - BuildStream will attempt to keep the # artifact cache within this size. # If the value is suffixed with K, M, G or T, the specified memory size is # parsed as Kilobytes, Megabytes, Gigabytes, or Terabytes (with the base # 1024), respectively. # Alternatively, a percentage value may be specified, which is taken relative # to the isize of the file system containing the cache. quota: infinity # # Scheduler # scheduler: # Maximum number of simultaneous downloading tasks. fetchers: 10 # Maximum number of simultaneous build tasks. builders: 4 # Maximum number of simultaneous uploading tasks. pushers: 4 # Maximum number of retries for network tasks. network-retries: 2 # What to do when an element fails, if not running in # interactive mode: # # continue - Continue queueing jobs as much as possible # quit - Exit after all ongoing jobs complete # terminate - Terminate any ongoing jobs and exit # on-error: quit # # Build related configuration # build: # # Maximum number of jobs to run per build task. # # The default behavior when this is set to 0, is to use the # maximum number of threads available, with a maximum of 8. # max-jobs: 0 # # Logging # logging: # The abbreviated cache key length to display in the UI key-length: 8 # Whether to show extra detailed messages verbose: True # Maximum number of lines to print from the # end of a failing build log error-lines: 20 # Maximum number of lines to print in a detailed # message on the console or in the master log (the full # messages are always recorded in the individual build # logs) message-lines: 20 # Whether to enable debugging messages debug: False # Format string for printing the pipeline at startup, this # also determines the default display format for `bst show` element-format: | %{state: >12} %{full-key} %{name} %{workspace-dirs} # Format string for all log messages. message-format: | [%{elapsed}][%{key}][%{element}] %{action} %{message} buildstream-1.6.9/buildstream/element.py000066400000000000000000003041241437515270000203700ustar00rootroot00000000000000# # Copyright (C) 2016-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ Element - Base element class ============================ .. _core_element_abstract_methods: Abstract Methods ---------------- For loading and configuration purposes, Elements must implement the :ref:`Plugin base class abstract methods `. .. _core_element_build_phase: Build Phase ~~~~~~~~~~~ The following methods are the foundation of the element's *build phase*, they must be implemented by all Element classes, unless explicitly stated otherwise. * :func:`Element.configure_sandbox() ` Configures the :class:`.Sandbox`. This is called before anything else * :func:`Element.stage() ` Stage dependencies and :class:`Sources ` into the sandbox. * :func:`Element.prepare() ` Call preparation methods that should only be performed once in the lifetime of a build directory (e.g. autotools' ./configure). **Optional**: If left unimplemented, this step will be skipped. * :func:`Element.assemble() ` Perform the actual assembly of the element Miscellaneous ~~~~~~~~~~~~~ Miscellaneous abstract methods also exist: * :func:`Element.generate_script() ` For the purpose of ``bst source bundle``, an Element may optionally implement this. Class Reference --------------- """ import os import re import stat import copy from collections import OrderedDict from collections.abc import Mapping from contextlib import contextmanager from enum import Enum import tempfile import time import shutil from . import _yaml from ._variables import Variables from ._versions import BST_CORE_ARTIFACT_VERSION from ._exceptions import BstError, LoadError, LoadErrorReason, ImplError, ErrorDomain from .utils import UtilError from .types import _UniquePriorityQueue from . import Plugin, Consistency from . import SandboxFlags from . import utils from . import _cachekey from . import _signals from . import _site from ._platform import Platform from .sandbox._config import SandboxConfig from .types import _KeyStrength, CoreWarnings class Scope(Enum): """Types of scope for a given element""" ALL = 1 """All elements which the given element depends on, following all elements required for building. Including the element itself. """ BUILD = 2 """All elements required for building the element, including their respective run dependencies. Not including the given element itself. """ RUN = 3 """All elements required for running the element. Including the element itself. """ class ElementError(BstError): """This exception should be raised by :class:`.Element` implementations to report errors to the user. Args: message (str): The error message to report to the user detail (str): A possibly multiline, more detailed error message reason (str): An optional machine readable reason string, used for test cases temporary (bool): An indicator to whether the error may occur if the operation was run again. (*Since: 1.2*) """ def __init__(self, message, *, detail=None, reason=None, temporary=False): super().__init__(message, detail=detail, domain=ErrorDomain.ELEMENT, reason=reason, temporary=temporary) class Element(Plugin): """Element() Base Element class. All elements derive from this class, this interface defines how the core will be interacting with Elements. """ __defaults = {} # The defaults from the yaml file and project __defaults_set = False # Flag, in case there are no defaults at all __instantiated_elements = {} # A hash of Element by MetaElement __redundant_source_refs = [] # A list of (source, ref) tuples which were redundantly specified BST_ARTIFACT_VERSION = 0 """The element plugin's artifact version Elements must first set this to 1 if they change their unique key structure in a way that would produce a different key for the same input, or introduce a change in the build output for the same unique key. Further changes of this nature require bumping the artifact version. """ BST_STRICT_REBUILD = False """Whether to rebuild this element in non strict mode if any of the dependencies have changed. """ BST_FORBID_RDEPENDS = False """Whether to raise exceptions if an element has runtime dependencies. *Since: 1.2* """ BST_FORBID_BDEPENDS = False """Whether to raise exceptions if an element has build dependencies. *Since: 1.2* """ BST_FORBID_SOURCES = False """Whether to raise exceptions if an element has sources. *Since: 1.2* """ def __init__(self, context, project, meta, plugin_conf): self.__cache_key_dict = None # Dict for cache key calculation self.__cache_key = None # Our cached cache key super().__init__(meta.name, context, project, meta.provenance, "element") self.__is_junction = meta.kind == "junction" if not self.__is_junction: project.ensure_fully_loaded() self.normal_name = os.path.splitext(self.name.replace(os.sep, '-'))[0] """A normalized element name This is the original element without path separators or the extension, it's used mainly for composing log file names and creating directory names and such. """ self.__runtime_dependencies = [] # Direct runtime dependency Elements self.__build_dependencies = [] # Direct build dependency Elements self.__strict_dependencies = [] # Direct build dependency subset which require strict rebuilds self.__reverse_dependencies = set() # Direct reverse dependency Elements self.__ready_for_runtime = False # Wether the element has all its dependencies ready and has a cache key self.__sources = [] # List of Sources self.__weak_cache_key = None # Our cached weak cache key self.__strict_cache_key = None # Our cached cache key for strict builds self.__artifacts = context.artifactcache # Artifact cache self.__consistency = Consistency.INCONSISTENT # Cached overall consistency state self.__cached = None # Whether we have a cached artifact self.__strong_cached = None # Whether we have a cached artifact self.__assemble_scheduled = False # Element is scheduled to be assembled self.__assemble_done = False # Element is assembled self.__tracking_scheduled = False # Sources are scheduled to be tracked self.__tracking_done = False # Sources have been tracked self.__pull_done = False # Whether pull was attempted self.__splits = None # Resolved regex objects for computing split domains self.__whitelist_regex = None # Resolved regex object to check if file is allowed to overlap self.__staged_sources_directory = None # Location where Element.stage_sources() was called self.__tainted = None # Whether the artifact is tainted and should not be shared self.__required = False # Whether the artifact is required in the current session # hash tables of loaded artifact metadata, hashed by key self.__metadata_keys = {} # Strong and weak keys for this key self.__metadata_dependencies = {} # Dictionary of dependency strong keys self.__metadata_workspaced = {} # Boolean of whether it's workspaced self.__metadata_workspaced_dependencies = {} # List of which dependencies are workspaced # Ensure we have loaded this class's defaults self.__init_defaults(plugin_conf) # Collect the composited variables and resolve them variables = self.__extract_variables(meta) variables['element-name'] = self.name self.__variables = Variables(variables) if not self.__is_junction: self.__variables.check() # Collect the composited environment now that we have variables env = self.__extract_environment(meta) self.__environment = env # Collect the environment nocache blacklist list nocache = self.__extract_env_nocache(meta) self.__env_nocache = nocache # Grab public domain data declared for this instance self.__public = self.__extract_public(meta) self.__dynamic_public = None # Collect the composited element configuration and # ask the element to configure itself. self.__config = self.__extract_config(meta) self._configure(self.__config) # Extract Sandbox config self.__sandbox_config = self.__extract_sandbox_config(meta) # Extract Sandbox config self.__sandbox_config = self.__extract_sandbox_config(meta) self.__sandbox_config_supported = True platform = Platform.get_platform() if not platform.check_sandbox_config(self.__sandbox_config): # Local sandbox does not fully support specified sandbox config. # This will taint the artifact, disable pushing. self.__sandbox_config_supported = False def __lt__(self, other): return self.name < other.name ############################################################# # Abstract Methods # ############################################################# def configure_sandbox(self, sandbox): """Configures the the sandbox for execution Args: sandbox (:class:`.Sandbox`): The build sandbox Raises: (:class:`.ElementError`): When the element raises an error Elements must implement this method to configure the sandbox object for execution. """ raise ImplError("element plugin '{kind}' does not implement configure_sandbox()".format( kind=self.get_kind())) def stage(self, sandbox): """Stage inputs into the sandbox directories Args: sandbox (:class:`.Sandbox`): The build sandbox Raises: (:class:`.ElementError`): When the element raises an error Elements must implement this method to populate the sandbox directory with data. This is done either by staging :class:`.Source` objects, by staging the artifacts of the elements this element depends on, or both. """ raise ImplError("element plugin '{kind}' does not implement stage()".format( kind=self.get_kind())) def prepare(self, sandbox): """Run one-off preparation commands. This is run before assemble(), but is guaranteed to run only the first time if we build incrementally - this makes it possible to run configure-like commands without causing the entire element to rebuild. Args: sandbox (:class:`.Sandbox`): The build sandbox Raises: (:class:`.ElementError`): When the element raises an error By default, this method does nothing, but may be overriden to allow configure-like commands. *Since: 1.2* """ def assemble(self, sandbox): """Assemble the output artifact Args: sandbox (:class:`.Sandbox`): The build sandbox Returns: (str): An absolute path within the sandbox to collect the artifact from Raises: (:class:`.ElementError`): When the element raises an error Elements must implement this method to create an output artifact from its sources and dependencies. """ raise ImplError("element plugin '{kind}' does not implement assemble()".format( kind=self.get_kind())) def generate_script(self): """Generate a build (sh) script to build this element Returns: (str): A string containing the shell commands required to build the element BuildStream guarantees the following environment when the generated script is run: - All element variables have been exported. - The cwd is `self.get_variable('build_root')/self.normal_name`. - $PREFIX is set to `self.get_variable('install_root')`. - The directory indicated by $PREFIX is an empty directory. Files are expected to be installed to $PREFIX. If the script fails, it is expected to return with an exit code != 0. """ raise ImplError("element plugin '{kind}' does not implement write_script()".format( kind=self.get_kind())) ############################################################# # Public Methods # ############################################################# def sources(self): """A generator function to enumerate the element sources Yields: (:class:`.Source`): The sources of this element """ for source in self.__sources: yield source def dependencies(self, scope, *, recurse=True, visited=None, recursed=False): """dependencies(scope, *, recurse=True) A generator function which yields the dependencies of the given element. If `recurse` is specified (the default), the full dependencies will be listed in deterministic staging order, starting with the basemost elements in the given `scope`. Otherwise, if `recurse` is not specified then only the direct dependencies in the given `scope` will be traversed, and the element itself will be omitted. Args: scope (:class:`.Scope`): The scope to iterate in recurse (bool): Whether to recurse Yields: (:class:`.Element`): The dependencies in `scope`, in deterministic staging order """ if visited is None: visited = {} full_name = self._get_full_name() scope_set = set((Scope.BUILD, Scope.RUN)) if scope == Scope.ALL else set((scope,)) if full_name in visited and scope_set.issubset(visited[full_name]): return should_yield = False if full_name not in visited: visited[full_name] = scope_set should_yield = True else: visited[full_name] |= scope_set if recurse or not recursed: if scope == Scope.ALL: for dep in self.__build_dependencies: yield from dep.dependencies(Scope.ALL, recurse=recurse, visited=visited, recursed=True) for dep in self.__runtime_dependencies: if dep not in self.__build_dependencies: yield from dep.dependencies(Scope.ALL, recurse=recurse, visited=visited, recursed=True) elif scope == Scope.BUILD: for dep in self.__build_dependencies: yield from dep.dependencies(Scope.RUN, recurse=recurse, visited=visited, recursed=True) elif scope == Scope.RUN: for dep in self.__runtime_dependencies: yield from dep.dependencies(Scope.RUN, recurse=recurse, visited=visited, recursed=True) # Yeild self only at the end, after anything needed has been traversed if should_yield and (recurse or recursed) and scope != Scope.BUILD: yield self def search(self, scope, name): """Search for a dependency by name Args: scope (:class:`.Scope`): The scope to search name (str): The dependency to search for Returns: (:class:`.Element`): The dependency element, or None if not found. """ for dep in self.dependencies(scope): if dep.name == name: return dep return None def node_subst_member(self, node, member_name, default=utils._sentinel): """Fetch the value of a string node member, substituting any variables in the loaded value with the element contextual variables. Args: node (dict): A dictionary loaded from YAML member_name (str): The name of the member to fetch default (str): A value to return when *member_name* is not specified in *node* Returns: The value of *member_name* in *node*, otherwise *default* Raises: :class:`.LoadError`: When *member_name* is not found and no *default* was provided This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_member` except that it assumes the expected type is a string and will also perform variable substitutions. **Example:** .. code:: python # Expect a string 'name' in 'node', substituting any # variables in the returned string name = self.node_subst_member(node, 'name') """ value = self.node_get_member(node, str, member_name, default) provenance = _yaml.node_get_provenance(node, key=member_name) try: return self.__variables.subst(value, provenance) except LoadError as e: raise LoadError(e.reason, '{}: {}'.format(provenance, str(e))) from e def node_subst_list(self, node, member_name): """Fetch a list from a node member, substituting any variables in the list Args: node (dict): A dictionary loaded from YAML member_name (str): The name of the member to fetch (a list) Returns: The list in *member_name* Raises: :class:`.LoadError` This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_member` except that it assumes the expected type is a list of strings and will also perform variable substitutions. """ value = self.node_get_member(node, list, member_name) ret = [] for index, x in enumerate(value): provenance = _yaml.node_get_provenance(node, key=member_name, indices=[index]) try: ret.append(self.__variables.subst(x, provenance)) except LoadError as e: raise LoadError(e.reason, '{}: {}'.format(provenance, str(e))) from e return ret def node_subst_list_element(self, node, member_name, indices): """Fetch the value of a list element from a node member, substituting any variables in the loaded value with the element contextual variables. Args: node (dict): A dictionary loaded from YAML member_name (str): The name of the member to fetch indices (list of int): List of indices to search, in case of nested lists Returns: The value of the list element in *member_name* at the specified *indices* Raises: :class:`.LoadError` This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_list_element` except that it assumes the expected type is a string and will also perform variable substitutions. **Example:** .. code:: python # Fetch the list itself strings = self.node_get_member(node, list, 'strings') # Iterate over the list indices for i in range(len(strings)): # Fetch the strings in this list, substituting content # with our element's variables if needed string = self.node_subst_list_element( node, 'strings', [ i ]) """ value = self.node_get_list_element(node, str, member_name, indices) provenance = _yaml.node_get_provenance(node, key=member_name, indices=indices) try: return self.__variables.subst(value, provenance) except LoadError as e: raise LoadError(e.reason, '{}: {}'.format(provenance, str(e))) from e def compute_manifest(self, *, include=None, exclude=None, orphans=True): """Compute and return this element's selective manifest The manifest consists on the list of file paths in the artifact. The files in the manifest are selected according to `include`, `exclude` and `orphans` parameters. If `include` is not specified then all files spoken for by any domain are included unless explicitly excluded with an `exclude` domain. Args: include (list): An optional list of domains to include files from exclude (list): An optional list of domains to exclude files from orphans (bool): Whether to include files not spoken for by split domains Yields: (str): The paths of the files in manifest """ self.__assert_cached() return self.__compute_splits(include, exclude, orphans) def stage_artifact(self, sandbox, *, path=None, include=None, exclude=None, orphans=True, update_mtimes=None): """Stage this element's output artifact in the sandbox This will stage the files from the artifact to the sandbox at specified location. The files are selected for staging according to the `include`, `exclude` and `orphans` parameters; if `include` is not specified then all files spoken for by any domain are included unless explicitly excluded with an `exclude` domain. Args: sandbox (:class:`.Sandbox`): The build sandbox path (str): An optional sandbox relative path include (list): An optional list of domains to include files from exclude (list): An optional list of domains to exclude files from orphans (bool): Whether to include files not spoken for by split domains update_mtimes (list): An optional list of files whose mtimes to set to the current time. Raises: (:class:`.ElementError`): If the element has not yet produced an artifact. Returns: (:class:`~.utils.FileListResult`): The result describing what happened while staging .. note:: Directories in `dest` are replaced with files from `src`, unless the existing directory in `dest` is not empty in which case the path will be reported in the return value. **Example:** .. code:: python # Stage the dependencies for a build of 'self' for dep in self.dependencies(Scope.BUILD): dep.stage_artifact(sandbox) """ if not self._cached(): detail = "No artifacts have been cached yet for that element\n" + \ "Try building the element first with `bst build`\n" raise ElementError("No artifacts to stage", detail=detail, reason="uncached-checkout-attempt") if update_mtimes is None: update_mtimes = [] # Time to use the artifact, check once more that it's there self.__assert_cached() with self.timed_activity("Staging {}/{}".format(self.name, self._get_brief_display_key())): # Get the extracted artifact artifact_base, _ = self.__extract() artifact = os.path.join(artifact_base, 'files') # Hard link it into the staging area # basedir = sandbox.get_directory() stagedir = basedir \ if path is None \ else os.path.join(basedir, path.lstrip(os.sep)) files = list(self.__compute_splits(include, exclude, orphans)) # We must not hardlink files whose mtimes we want to update if update_mtimes: link_files = [f for f in files if f not in update_mtimes] copy_files = [f for f in files if f in update_mtimes] else: link_files = files copy_files = [] link_result = utils.link_files(artifact, stagedir, files=link_files, report_written=True) copy_result = utils.copy_files(artifact, stagedir, files=copy_files, report_written=True) cur_time = time.time() for f in copy_result.files_written: os.utime(os.path.join(stagedir, f), times=(cur_time, cur_time)) return link_result.combine(copy_result) def stage_dependency_artifacts(self, sandbox, scope, *, path=None, include=None, exclude=None, orphans=True): """Stage element dependencies in scope This is primarily a convenience wrapper around :func:`Element.stage_artifact() ` which takes care of staging all the dependencies in `scope` and issueing the appropriate warnings. Args: sandbox (:class:`.Sandbox`): The build sandbox scope (:class:`.Scope`): The scope to stage dependencies in path (str): An optional sandbox relative path include (list): An optional list of domains to include files from exclude (list): An optional list of domains to exclude files from orphans (bool): Whether to include files not spoken for by split domains Raises: (:class:`.ElementError`): If any of the dependencies in `scope` have not yet produced artifacts, or if forbidden overlaps occur. """ ignored = {} overlaps = OrderedDict() files_written = {} old_dep_keys = {} workspace = self._get_workspace() context = self._get_context() if self.__can_build_incrementally() and workspace.last_successful: # Try to perform an incremental build if the last successful # build is still in the artifact cache # if self.__artifacts.contains(self, workspace.last_successful): old_dep_keys = self.__get_artifact_metadata_dependencies(workspace.last_successful) else: # Last successful build is no longer in the artifact cache, # so let's reset it and perform a full build now. workspace.prepared = False workspace.last_successful = None self.info("Resetting workspace state, last successful build is no longer in the cache") # In case we are staging in the main process if utils._is_main_process(): context.get_workspaces().save_config() for dep in self.dependencies(scope): # If we are workspaced, and we therefore perform an # incremental build, we must ensure that we update the mtimes # of any files created by our dependencies since the last # successful build. to_update = None if workspace and old_dep_keys: dep.__assert_cached() if dep.name in old_dep_keys: key_new = dep._get_cache_key() key_old = old_dep_keys[dep.name] # We only need to worry about modified and added # files, since removed files will be picked up by # build systems anyway. to_update, _, added = self.__artifacts.diff(dep, key_old, key_new, subdir='files') workspace.add_running_files(dep.name, to_update + added) to_update.extend(workspace.running_files[dep.name]) # In case we are running `bst shell`, this happens in the # main process and we need to update the workspace config if utils._is_main_process(): context.get_workspaces().save_config() result = dep.stage_artifact(sandbox, path=path, include=include, exclude=exclude, orphans=orphans, update_mtimes=to_update) if result.overwritten: for overwrite in result.overwritten: # Completely new overwrite if overwrite not in overlaps: # Find the overwritten element by checking where we've # written the element before for elm, contents in files_written.items(): if overwrite in contents: overlaps[overwrite] = [elm, dep.name] else: overlaps[overwrite].append(dep.name) files_written[dep.name] = result.files_written if result.ignored: ignored[dep.name] = result.ignored if overlaps: overlap_warning = False warning_detail = "Staged files overwrite existing files in staging area:\n" for f, elements in overlaps.items(): overlap_warning_elements = [] # The bottom item overlaps nothing overlapping_elements = elements[1:] for elm in overlapping_elements: element = self.search(scope, elm) if not element.__file_is_whitelisted(f): overlap_warning_elements.append(elm) overlap_warning = True warning_detail += _overlap_error_detail(f, overlap_warning_elements, elements) if overlap_warning: self.warn("Non-whitelisted overlaps detected", detail=warning_detail, warning_token=CoreWarnings.OVERLAPS) if ignored: detail = "Not staging files which would replace non-empty directories:\n" for key, value in ignored.items(): detail += "\nFrom {}:\n".format(key) detail += " " + " ".join(["/" + f + "\n" for f in value]) self.warn("Ignored files", detail=detail) def integrate(self, sandbox): """Integrate currently staged filesystem against this artifact. Args: sandbox (:class:`.Sandbox`): The build sandbox This modifies the sysroot staged inside the sandbox so that the sysroot is *integrated*. Only an *integrated* sandbox may be trusted for running the software therein, as the integration commands will create and update important system cache files required for running the installed software (such as the ld.so.cache). """ bstdata = self.get_public_data('bst') environment = self.get_environment() if bstdata is not None: commands = self.node_get_member(bstdata, list, 'integration-commands', []) for i in range(len(commands)): cmd = self.node_subst_list_element(bstdata, 'integration-commands', [i]) self.status("Running integration command", detail=cmd) exitcode = sandbox.run(['sh', '-e', '-c', cmd], 0, env=environment, cwd='/') if exitcode != 0: raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode)) def stage_sources(self, sandbox, directory): """Stage this element's sources to a directory in the sandbox Args: sandbox (:class:`.Sandbox`): The build sandbox directory (str): An absolute path within the sandbox to stage the sources at """ # Hold on to the location where a plugin decided to stage sources, # this will be used to reconstruct the failed sysroot properly # after a failed build. # assert self.__staged_sources_directory is None self.__staged_sources_directory = directory self._stage_sources_in_sandbox(sandbox, directory) def get_public_data(self, domain): """Fetch public data on this element Args: domain (str): A public domain name to fetch data for Returns: (dict): The public data dictionary for the given domain .. note:: This can only be called the abstract methods which are called as a part of the :ref:`build phase ` and never before. """ if self.__dynamic_public is None: self.__load_public_data() data = self.__dynamic_public.get(domain) if data is not None: data = _yaml.node_copy(data) return data def set_public_data(self, domain, data): """Set public data on this element Args: domain (str): A public domain name to fetch data for data (dict): The public data dictionary for the given domain This allows an element to dynamically mutate public data of elements or add new domains as the result of success completion of the :func:`Element.assemble() ` method. """ if self.__dynamic_public is None: self.__load_public_data() if data is not None: data = _yaml.node_copy(data) self.__dynamic_public[domain] = data def get_environment(self): """Fetch the environment suitable for running in the sandbox Returns: (dict): A dictionary of string key/values suitable for passing to :func:`Sandbox.run() ` """ return _yaml.node_sanitize(self.__environment) def get_variable(self, varname): """Fetch the value of a variable resolved for this element. Args: varname (str): The name of the variable to fetch Returns: (str): The resolved value for *varname*, or None if no variable was declared with the given name. """ return self.__variables.get(varname) ############################################################# # Private Methods used in BuildStream # ############################################################# # _new_from_meta(): # # Recursively instantiate a new Element instance, it's sources # and it's dependencies from a meta element. # # Args: # meta (MetaElement): The meta element # # Returns: # (Element): A newly created Element instance # @classmethod def _new_from_meta(cls, meta): if not meta.first_pass: meta.project.ensure_fully_loaded() if meta in cls.__instantiated_elements: return cls.__instantiated_elements[meta] element = meta.project.create_element(meta, first_pass=meta.first_pass) cls.__instantiated_elements[meta] = element # Instantiate sources for meta_source in meta.sources: meta_source.first_pass = meta.kind == "junction" source = meta.project.create_source(meta_source, first_pass=meta.first_pass) redundant_ref = source._load_ref() element.__sources.append(source) # Collect redundant refs which occurred at load time if redundant_ref is not None: cls.__redundant_source_refs.append((source, redundant_ref)) # Instantiate dependencies for meta_dep in meta.dependencies: dependency = Element._new_from_meta(meta_dep) element.__runtime_dependencies.append(dependency) dependency.__reverse_dependencies.add(element) for meta_dep in meta.build_dependencies: dependency = Element._new_from_meta(meta_dep) element.__build_dependencies.append(dependency) dependency.__reverse_dependencies.add(element) if meta_dep in meta.strict_dependencies: element.__strict_dependencies.append(dependency) return element # _get_redundant_source_refs() # # Fetches a list of (Source, ref) tuples of all the Sources # which were loaded with a ref specified in the element declaration # for projects which use project.refs ref-storage. # # This is used to produce a warning @classmethod def _get_redundant_source_refs(cls): return cls.__redundant_source_refs # _reset_load_state() # # This is called by Pipeline.cleanup() and is used to # reset the loader state between multiple sessions. # @classmethod def _reset_load_state(cls): cls.__instantiated_elements = {} cls.__redundant_source_refs = [] # _get_consistency() # # Returns cached consistency state # def _get_consistency(self): return self.__consistency # _cached(): # # Returns: # (bool): Whether this element is already present in # the artifact cache # def _cached(self): return self.__cached # _buildable(): # # Returns: # (bool): Whether this element can currently be built # def _buildable(self): if self._get_consistency() != Consistency.CACHED: return False for dependency in self.dependencies(Scope.BUILD): # In non-strict mode an element's strong cache key may not be available yet # even though an artifact is available in the local cache. This can happen # if the pull job is still pending as the remote cache may have an artifact # that matches the strict cache key, which is preferred over a locally # cached artifact with a weak cache key match. if not dependency._cached() or not dependency._get_cache_key(strength=_KeyStrength.STRONG): return False if not self.__assemble_scheduled: return False return True # _get_cache_key(): # # Returns the cache key # # Args: # strength (_KeyStrength): Either STRONG or WEAK key strength # # Returns: # (str): A hex digest cache key for this Element, or None # # None is returned if information for the cache key is missing. # def _get_cache_key(self, strength=_KeyStrength.STRONG): if strength == _KeyStrength.STRONG: return self.__cache_key else: return self.__weak_cache_key # _can_query_cache(): # # Returns whether the cache key required for cache queries is available. # # Returns: # (bool): True if cache can be queried # def _can_query_cache(self): # If build has already been scheduled, we know that the element is # not cached and thus can allow cache query even if the strict cache key # is not available yet. # This special case is required for workspaced elements to prevent # them from getting blocked in the pull queue. if self.__assemble_scheduled: return True # cache cannot be queried until strict cache key is available return self.__strict_cache_key is not None # _update_state() # # Keep track of element state. Calculate cache keys if possible and # check whether artifacts are cached. # # This must be called whenever the state of an element may have changed. # def _update_state(self): context = self._get_context() # Compute and determine consistency of sources self.__update_source_state() if self._get_consistency() == Consistency.INCONSISTENT: # Tracking may still be pending return if self._get_workspace() and self.__assemble_scheduled: # If we have an active workspace and are going to build, then # discard current cache key values as their correct values can only # be calculated once the build is complete self.__cache_key_dict = None self.__cache_key = None self.__weak_cache_key = None self.__strict_cache_key = None self.__strong_cached = None return if self.__weak_cache_key is None: # Calculate weak cache key # Weak cache key includes names of direct build dependencies # so as to only trigger rebuilds when the shape of the # dependencies change. # # Some conditions cause dependencies to be strict, such # that this element will be rebuilt anyway if the dependency # changes even in non strict mode, for these cases we just # encode the dependency's weak cache key instead of it's name. # dependencies = [ e._get_cache_key(strength=_KeyStrength.WEAK) if self.BST_STRICT_REBUILD or e in self.__strict_dependencies else e.name for e in self.dependencies(Scope.BUILD) ] self.__weak_cache_key = self.__calculate_cache_key(dependencies) if self.__weak_cache_key is None: # Weak cache key could not be calculated yet return if not context.get_strict(): # Full cache query in non-strict mode requires both the weak and # strict cache keys. However, we need to determine as early as # possible whether a build is pending to discard unstable cache keys # for workspaced elements. For this cache check the weak cache keys # are sufficient. However, don't update the `cached` attributes # until the full cache query below. cached = self.__artifacts.contains(self, self.__weak_cache_key) if (not self.__assemble_scheduled and not self.__assemble_done and not cached and not self._pull_pending()): # For uncached workspaced elements, assemble is required # even if we only need the cache key if self._is_required() or self._get_workspace(): self._schedule_assemble() return if self.__strict_cache_key is None: dependencies = [ e.__strict_cache_key for e in self.dependencies(Scope.BUILD) ] self.__strict_cache_key = self.__calculate_cache_key(dependencies) if self.__strict_cache_key is None: # Strict cache key could not be calculated yet return # Query caches now that the weak and strict cache keys are available key_for_cache_lookup = self.__strict_cache_key if context.get_strict() else self.__weak_cache_key if not self.__cached: self.__cached = self.__artifacts.contains(self, key_for_cache_lookup) if not self.__strong_cached: self.__strong_cached = self.__artifacts.contains(self, self.__strict_cache_key) if (not self.__assemble_scheduled and not self.__assemble_done and not self.__cached and not self._pull_pending()): # Workspaced sources are considered unstable if a build is pending # as the build will modify the contents of the workspace. # Determine as early as possible if a build is pending to discard # unstable cache keys. # For uncached workspaced elements, assemble is required # even if we only need the cache key if self._is_required() or self._get_workspace(): self._schedule_assemble() return if self.__cache_key is None: # Calculate strong cache key if context.get_strict(): self.__cache_key = self.__strict_cache_key elif self._pull_pending(): # Effective strong cache key is unknown until after the pull pass elif self._cached(): # Load the strong cache key from the artifact strong_key, _ = self.__get_artifact_metadata_keys() self.__cache_key = strong_key elif self.__assemble_scheduled or self.__assemble_done: # Artifact will or has been built, not downloaded dependencies = [ e._get_cache_key() for e in self.dependencies(Scope.BUILD) ] self.__cache_key = self.__calculate_cache_key(dependencies) if self.__cache_key is None: # Strong cache key could not be calculated yet return if not self.__ready_for_runtime and self.__cache_key is not None: self.__ready_for_runtime = all( dep.__ready_for_runtime for dep in self.__runtime_dependencies) # _get_display_key(): # # Returns cache keys for display purposes # # Returns: # (str): A full hex digest cache key for this Element # (str): An abbreviated hex digest cache key for this Element # (bool): True if key should be shown as dim, False otherwise # # Question marks are returned if information for the cache key is missing. # def _get_display_key(self): context = self._get_context() dim_key = True cache_key = self._get_cache_key() if not cache_key: cache_key = "{:?<64}".format('') elif self._get_cache_key() == self.__strict_cache_key: # Strong cache key used in this session matches cache key # that would be used in strict build mode dim_key = False length = min(len(cache_key), context.log_key_length) return (cache_key, cache_key[0:length], dim_key) # _get_brief_display_key() # # Returns an abbreviated cache key for display purposes # # Returns: # (str): An abbreviated hex digest cache key for this Element # # Question marks are returned if information for the cache key is missing. # def _get_brief_display_key(self): _, display_key, _ = self._get_display_key() return display_key # _preflight(): # # A wrapper for calling the abstract preflight() method on # the element and it's sources. # def _preflight(self): if self.BST_FORBID_RDEPENDS and self.BST_FORBID_BDEPENDS: if any(self.dependencies(Scope.RUN, recurse=False)) or any(self.dependencies(Scope.BUILD, recurse=False)): raise ElementError("{}: Dependencies are forbidden for '{}' elements" .format(self, self.get_kind()), reason="element-forbidden-depends") if self.BST_FORBID_RDEPENDS: if any(self.dependencies(Scope.RUN, recurse=False)): raise ElementError("{}: Runtime dependencies are forbidden for '{}' elements" .format(self, self.get_kind()), reason="element-forbidden-rdepends") if self.BST_FORBID_BDEPENDS: if any(self.dependencies(Scope.BUILD, recurse=False)): raise ElementError("{}: Build dependencies are forbidden for '{}' elements" .format(self, self.get_kind()), reason="element-forbidden-bdepends") if self.BST_FORBID_SOURCES: if any(self.sources()): raise ElementError("{}: Sources are forbidden for '{}' elements" .format(self, self.get_kind()), reason="element-forbidden-sources") try: self.preflight() except BstError as e: # Prepend provenance to the error raise ElementError("{}: {}".format(self, e), reason=e.reason) from e # Ensure that the first source does not need access to previous soruces if self.__sources and self.__sources[0]._requires_previous_sources(): raise ElementError("{}: {} cannot be the first source of an element " "as it requires access to previous sources" .format(self, self.__sources[0])) # Preflight the sources for source in self.sources(): source._preflight() # _schedule_tracking(): # # Force an element state to be inconsistent. Any sources appear to be # inconsistent. # # This is used across the pipeline in sessions where the # elements in question are going to be tracked, causing the # pipeline to rebuild safely by ensuring cache key recalculation # and reinterrogation of element state after tracking of elements # succeeds. # def _schedule_tracking(self): self.__tracking_scheduled = True self._update_state() # _tracking_done(): # # This is called in the main process after the element has been tracked # def _tracking_done(self): assert self.__tracking_scheduled self.__tracking_scheduled = False self.__tracking_done = True self.__update_state_recursively() # _track(): # # Calls track() on the Element sources # # Raises: # SourceError: If one of the element sources has an error # # Returns: # (list): A list of Source object ids and their new references # def _track(self): refs = [] for index, source in enumerate(self.__sources): old_ref = source.get_ref() new_ref = source._track(self.__sources[0:index]) refs.append((source._unique_id, new_ref)) # Complimentary warning that the new ref will be unused. if old_ref != new_ref and self._get_workspace(): detail = "This source has an open workspace.\n" \ + "To start using the new reference, please close the existing workspace." source.warn("Updated reference will be ignored as source has open workspace", detail=detail) return refs # _prepare_sandbox(): # # This stages things for either _shell() (below) or also # is used to stage things by the `bst checkout` codepath # @contextmanager def _prepare_sandbox(self, scope, directory, deps='run', integrate=True): with self.__sandbox(directory, config=self.__sandbox_config) as sandbox: # Configure always comes first, and we need it. self.configure_sandbox(sandbox) # Stage something if we need it if not directory: if scope == Scope.BUILD: self.stage(sandbox) elif scope == Scope.RUN: if deps == 'run': dependency_scope = Scope.RUN else: dependency_scope = None # Stage deps in the sandbox root with self.timed_activity("Staging dependencies", silent_nested=True): self.stage_dependency_artifacts(sandbox, dependency_scope) # Run any integration commands provided by the dependencies # once they are all staged and ready if integrate: with self.timed_activity("Integrating sandbox"): for dep in self.dependencies(dependency_scope): dep.integrate(sandbox) yield sandbox # _stage_sources_in_sandbox(): # # Stage this element's sources to a directory inside sandbox # # Args: # sandbox (:class:`.Sandbox`): The build sandbox # directory (str): An absolute path to stage the sources at # mount_workspaces (bool): mount workspaces if True, copy otherwise # def _stage_sources_in_sandbox(self, sandbox, directory, mount_workspaces=True): # Only artifact caches that implement diff() are allowed to # perform incremental builds. if mount_workspaces and self.__can_build_incrementally(): workspace = self._get_workspace() sandbox.mark_directory(directory) sandbox._set_mount_source(directory, workspace.get_absolute_path()) # Stage all sources that need to be copied sandbox_root = sandbox.get_directory() host_directory = os.path.join(sandbox_root, directory.lstrip(os.sep)) self._stage_sources_at(host_directory, mount_workspaces=mount_workspaces) # _stage_sources_at(): # # Stage this element's sources to a directory # # Args: # directory (str): An absolute path to stage the sources at # mount_workspaces (bool): mount workspaces if True, copy otherwise # def _stage_sources_at(self, directory, mount_workspaces=True): with self.timed_activity("Staging sources", silent_nested=True): if os.path.isdir(directory) and os.listdir(directory): raise ElementError("Staging directory '{}' is not empty".format(directory)) workspace = self._get_workspace() if workspace: # If mount_workspaces is set and we're doing incremental builds, # the workspace is already mounted into the sandbox. if not (mount_workspaces and self.__can_build_incrementally()): with self.timed_activity("Staging local files at {}" .format(workspace.get_absolute_path())): workspace.stage(directory) else: # No workspace, stage directly for source in self.sources(): source._stage(directory) # Ensure deterministic mtime of sources at build time utils._set_deterministic_mtime(directory) # Ensure deterministic owners of sources at build time utils._set_deterministic_user(directory) # _set_required(): # # Mark this element and its runtime dependencies as required. # This unblocks pull/fetch/build. # def _set_required(self): if self.__required: # Already done return self.__required = True # Request artifacts of runtime dependencies for dep in self.dependencies(Scope.RUN, recurse=False): dep._set_required() self._update_state() # _is_required(): # # Returns whether this element has been marked as required. # def _is_required(self): return self.__required # _schedule_assemble(): # # This is called in the main process before the element is assembled # in a subprocess. # def _schedule_assemble(self): assert not self.__assemble_scheduled self.__assemble_scheduled = True # Requests artifacts of build dependencies for dep in self.dependencies(Scope.BUILD, recurse=False): dep._set_required() self._set_required() # Invalidate workspace key as the build modifies the workspace directory workspace = self._get_workspace() if workspace: workspace.invalidate_key() self._update_state() # _assemble_done(): # # This is called in the main process after the element has been assembled # and in the a subprocess after assembly completes. # # This will result in updating the element state. # def _assemble_done(self): assert self.__assemble_scheduled self.__assemble_scheduled = False self.__assemble_done = True self.__update_state_recursively() if self._get_workspace() and self._cached(): # # Note that this block can only happen in the # main process, since `self._cached()` cannot # be true when assembly is completed in the task. # # For this reason, it is safe to update and # save the workspaces configuration # key = self._get_cache_key() workspace = self._get_workspace() workspace.last_successful = key workspace.clear_running_files() self._get_context().get_workspaces().save_config() # This element will have already been marked as # required, but we bump the atime again, in case # we did not know the cache key until now. # # FIXME: This is not exactly correct, we should be # doing this at the time which we have discovered # a new cache key, this just happens to be the # last place where that can happen. # # Ultimately, we should be refactoring # Element._update_state() such that we know # when a cache key is actually discovered. # self.__artifacts.mark_required_elements([self]) # _assemble(): # # Internal method for running the entire build phase. # # This will: # - Prepare a sandbox for the build # - Call the public abstract methods for the build phase # - Cache the resulting artifact # # Returns: # (int): The size of the newly cached artifact # def _assemble(self): # Assert call ordering assert not self._cached() context = self._get_context() with self._output_file() as output_file: if not self.__sandbox_config_supported: self.warn("Sandbox configuration is not supported by the platform.", detail="Falling back to UID {} GID {}. Artifact will not be pushed." .format(self.__sandbox_config.build_uid, self.__sandbox_config.build_gid)) # Explicitly clean it up, keep the build dir around if exceptions are raised os.makedirs(context.builddir, exist_ok=True) rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir) # Cleanup the build directory on explicit SIGTERM def cleanup_rootdir(): utils._force_rmtree(rootdir) with _signals.terminator(cleanup_rootdir), \ self.__sandbox(rootdir, output_file, output_file, self.__sandbox_config) as sandbox: # nopep8 sandbox_root = sandbox.get_directory() # By default, the dynamic public data is the same as the static public data. # The plugin's assemble() method may modify this, though. self.__dynamic_public = _yaml.node_copy(self.__public) # Call the abstract plugin methods try: # Step 1 - Configure self.configure_sandbox(sandbox) # Step 2 - Stage self.stage(sandbox) # Step 3 - Prepare self.__prepare(sandbox) # Step 4 - Assemble collect = self.assemble(sandbox) except BstError as e: # If an error occurred assembling an element in a sandbox, # then tack on the sandbox directory to the error e.sandbox = rootdir # If there is a workspace open on this element, it will have # been mounted for sandbox invocations instead of being staged. # # In order to preserve the correct failure state, we need to # copy over the workspace files into the appropriate directory # in the sandbox. # workspace = self._get_workspace() if workspace and self.__staged_sources_directory: sandbox_root = sandbox.get_directory() sandbox_path = os.path.join(sandbox_root, self.__staged_sources_directory.lstrip(os.sep)) try: utils.copy_files(workspace.get_absolute_path(), sandbox_path) except UtilError as err: self.warn("Failed to preserve workspace state for failed build sysroot: {}" .format(err)) raise collectdir = os.path.join(sandbox_root, collect.lstrip(os.sep)) if not os.path.exists(collectdir): raise ElementError( "Directory '{}' was not found inside the sandbox, " "unable to collect artifact contents" .format(collect)) # At this point, we expect an exception was raised leading to # an error message, or we have good output to collect. # Create artifact directory structure assembledir = os.path.join(rootdir, 'artifact') filesdir = os.path.join(assembledir, 'files') logsdir = os.path.join(assembledir, 'logs') metadir = os.path.join(assembledir, 'meta') os.mkdir(assembledir) os.mkdir(filesdir) os.mkdir(logsdir) os.mkdir(metadir) # Hard link files from collect dir to files directory utils.link_files(collectdir, filesdir) # Copy build log log_filename = context.get_log_filename() if log_filename: shutil.copyfile(log_filename, os.path.join(logsdir, 'build.log')) # Store public data _yaml.dump(_yaml.node_sanitize(self.__dynamic_public), os.path.join(metadir, 'public.yaml')) # ensure we have cache keys self._assemble_done() # Store keys.yaml _yaml.dump(_yaml.node_sanitize({ 'strong': self._get_cache_key(), 'weak': self._get_cache_key(_KeyStrength.WEAK), }), os.path.join(metadir, 'keys.yaml')) # Store dependencies.yaml _yaml.dump(_yaml.node_sanitize({ e.name: e._get_cache_key() for e in self.dependencies(Scope.BUILD) }), os.path.join(metadir, 'dependencies.yaml')) # Store workspaced.yaml _yaml.dump(_yaml.node_sanitize({ 'workspaced': bool(self._get_workspace()) }), os.path.join(metadir, 'workspaced.yaml')) # Store workspaced-dependencies.yaml _yaml.dump(_yaml.node_sanitize({ 'workspaced-dependencies': [ e.name for e in self.dependencies(Scope.BUILD) if e._get_workspace() ] }), os.path.join(metadir, 'workspaced-dependencies.yaml')) with self.timed_activity("Caching artifact"): artifact_size = utils._get_dir_size(assembledir) self.__artifacts.commit(self, assembledir, self.__get_cache_keys_for_commit()) # Finally cleanup the build dir cleanup_rootdir() return artifact_size # _fetch_done() # # Indicates that fetching the sources for this element has been done. # def _fetch_done(self): # We are not updating the state recursively here since fetching can # never end up in updating them. self._update_state() # _pull_pending() # # Check whether the artifact will be pulled. # # Returns: # (bool): Whether a pull operation is pending # def _pull_pending(self): if self._get_workspace(): # Workspace builds are never pushed to artifact servers return False if self.__strong_cached: # Artifact already in local cache return False # Pull is pending if artifact remote server available # and pull has not been attempted yet return self.__artifacts.has_fetch_remotes(element=self) and not self.__pull_done # _pull_done() # # Indicate that pull was attempted. # # This needs to be called in the main process after a pull # succeeds or fails so that we properly update the main # process data model # # This will result in updating the element state. # def _pull_done(self): self.__pull_done = True self.__update_state_recursively() def _pull_strong(self, *, progress=None): weak_key = self._get_cache_key(strength=_KeyStrength.WEAK) key = self.__strict_cache_key if not self.__artifacts.pull(self, key, progress=progress): return False # update weak ref by pointing it to this newly fetched artifact self.__artifacts.link_key(self, key, weak_key) return True def _pull_weak(self, *, progress=None): weak_key = self._get_cache_key(strength=_KeyStrength.WEAK) if not self.__artifacts.pull(self, weak_key, progress=progress): return False # extract strong cache key from this newly fetched artifact self._pull_done() # create tag for strong cache key key = self._get_cache_key(strength=_KeyStrength.STRONG) self.__artifacts.link_key(self, weak_key, key) return True # _pull(): # # Pull artifact from remote artifact repository into local artifact cache. # # Returns: True if the artifact has been downloaded, False otherwise # def _pull(self): context = self._get_context() def progress(percent, message): self.status(message) # Attempt to pull artifact without knowing whether it's available pulled = self._pull_strong(progress=progress) if not pulled and not self._cached() and not context.get_strict(): pulled = self._pull_weak(progress=progress) if not pulled: return False # Notify successfull download return True # _skip_push(): # # Determine whether we should create a push job for this element. # # Returns: # (bool): True if this element does not need a push job to be created # def _skip_push(self): if not self.__artifacts.has_push_remotes(element=self): # No push remotes for this element's project return True if not self._cached(): return True # Do not push tained artifact if self.__get_tainted(): return True return False # _push(): # # Push locally cached artifact to remote artifact repository. # # Returns: # (bool): True if the remote was updated, False if it already existed # and no updated was required # def _push(self): self.__assert_cached() if self.__get_tainted(): self.warn("Not pushing tainted artifact.") return False # Push all keys used for local commit pushed = self.__artifacts.push(self, self.__get_cache_keys_for_commit()) if not pushed: return False # Notify successful upload return True # _shell(): # # Connects the terminal with a shell running in a staged # environment # # Args: # scope (Scope): Either BUILD or RUN scopes are valid, or None # directory (str): A directory to an existing sandbox, or None # mounts (list): A list of (str, str) tuples, representing host/target paths to mount # isolate (bool): Whether to isolate the environment like we do in builds # prompt (str): A suitable prompt string for PS1 # command (list): An argv to launch in the sandbox # # Returns: Exit code # # If directory is not specified, one will be staged using scope def _shell(self, scope=None, directory=None, *, mounts=None, isolate=False, prompt=None, command=None): with self._prepare_sandbox(scope, directory) as sandbox: environment = self.get_environment() environment = copy.copy(environment) flags = SandboxFlags.INTERACTIVE | SandboxFlags.ROOT_READ_ONLY # Fetch the main toplevel project, in case this is a junctioned # subproject, we want to use the rules defined by the main one. context = self._get_context() project = context.get_toplevel_project() shell_command, shell_environment, shell_host_files = project.get_shell_config() if prompt is not None: environment['PS1'] = prompt # Special configurations for non-isolated sandboxes if not isolate: # Open the network, and reuse calling uid/gid # flags |= SandboxFlags.NETWORK_ENABLED | SandboxFlags.INHERIT_UID # Apply project defined environment vars to set for a shell for key, value in _yaml.node_items(shell_environment): environment[key] = value # Setup any requested bind mounts if mounts is None: mounts = [] for mount in shell_host_files + mounts: if not os.path.exists(mount.host_path): if not mount.optional: self.warn("Not mounting non-existing host file: {}".format(mount.host_path)) else: sandbox.mark_directory(mount.path) sandbox._set_mount_source(mount.path, mount.host_path) if command: argv = list(command) else: argv = shell_command self.status("Running command", detail=" ".join(argv)) # Run shells with network enabled and readonly root. return sandbox.run(argv, flags, env=environment) # _open_workspace(): # # "Open" a workspace for this element # # This requires that a workspace already be created in # the workspaces metadata first. # def _open_workspace(self): context = self._get_context() workspace = self._get_workspace() assert workspace is not None # First lets get a temp dir in our build directory # and stage there, then link the files over to the desired # path. # # We do this so that force opening workspaces which overwrites # files in the target directory actually works without any # additional support from Source implementations. # os.makedirs(context.builddir, exist_ok=True) with utils._tempdir(dir=context.builddir, prefix='workspace-{}' .format(self.normal_name)) as temp: for source in self.sources(): source._init_workspace(temp) # Now hardlink the files into the workspace target. utils.link_files(temp, workspace.get_absolute_path()) # _get_workspace(): # # Returns: # (Workspace|None): A workspace associated with this element # def _get_workspace(self): workspaces = self._get_context().get_workspaces() return workspaces.get_workspace(self._get_full_name()) # _write_script(): # # Writes a script to the given directory. def _write_script(self, directory): with open(_site.build_module_template, "r", encoding="utf-8") as f: script_template = f.read() variable_string = "" for var, val in self.get_environment().items(): variable_string += "{0}={1} ".format(var, val) script = script_template.format( name=self.normal_name, build_root=self.get_variable('build-root'), install_root=self.get_variable('install-root'), variables=variable_string, commands=self.generate_script() ) os.makedirs(directory, exist_ok=True) script_path = os.path.join(directory, "build-" + self.normal_name) with self.timed_activity("Writing build script", silent_nested=True): with utils.save_file_atomic(script_path, "w") as script_file: script_file.write(script) os.chmod(script_path, stat.S_IEXEC | stat.S_IREAD) # _subst_string() # # Substitue a string, this is an internal function related # to how junctions are loaded and needs to be more generic # than the public node_subst_member() # # Args: # value (str): A string value # # Returns: # (str): The string after substitutions have occurred # def _subst_string(self, value): return self.__variables.subst(value, None) # Returns the element whose sources this element is ultimately derived from. # # This is intended for being used to redirect commands that operate on an # element to the element whose sources it is ultimately derived from. # # For example, element A is a build element depending on source foo, # element B is a filter element that depends on element A. The source # element of B is A, since B depends on A, and A has sources. # def _get_source_element(self): return self ############################################################# # Private Local Methods # ############################################################# # __update_source_state() # # Updates source consistency state # def __update_source_state(self): # Cannot resolve source state until tracked if self.__tracking_scheduled: return self.__consistency = Consistency.CACHED workspace = self._get_workspace() # Special case for workspaces if workspace: # A workspace is considered inconsistent in the case # that it's directory went missing # fullpath = workspace.get_absolute_path() if not os.path.exists(fullpath): self.__consistency = Consistency.INCONSISTENT else: # Determine overall consistency of the element for source in self.__sources: source._update_state() source_consistency = source._get_consistency() self.__consistency = min(self.__consistency, source_consistency) # __calculate_cache_key(): # # Calculates the cache key # # Returns: # (str): A hex digest cache key for this Element, or None # # None is returned if information for the cache key is missing. # def __calculate_cache_key(self, dependencies): # No cache keys for dependencies which have no cache keys if None in dependencies: return None # Generate dict that is used as base for all cache keys if self.__cache_key_dict is None: # Filter out nocache variables from the element's environment cache_env = { key: value for key, value in self.node_items(self.__environment) if key not in self.__env_nocache } context = self._get_context() project = self._get_project() workspace = self._get_workspace() self.__cache_key_dict = { 'artifact-version': "{}.{}".format(BST_CORE_ARTIFACT_VERSION, self.BST_ARTIFACT_VERSION), 'context': context.get_cache_key(), 'project': project.get_cache_key(), 'element': self.get_unique_key(), 'execution-environment': self.__sandbox_config.get_unique_key(), 'environment': cache_env, 'sources': [s._get_unique_key(workspace is None) for s in self.__sources], 'workspace': '' if workspace is None else workspace.get_key(self._get_project()), 'public': self.__public, 'cache': 'CASCache' } self.__cache_key_dict['fatal-warnings'] = sorted(project._fatal_warnings) cache_key_dict = self.__cache_key_dict.copy() cache_key_dict['dependencies'] = dependencies return _cachekey.generate_key(cache_key_dict) # __can_build_incrementally() # # Check if the element can be built incrementally, this # is used to decide how to stage things # # Returns: # (bool): Whether this element can be built incrementally # def __can_build_incrementally(self): return bool(self._get_workspace()) # __prepare(): # # Internal method for calling public abstract prepare() method. # def __prepare(self, sandbox): workspace = self._get_workspace() # We need to ensure that the prepare() method is only called # once in workspaces, because the changes will persist across # incremental builds - not desirable, for example, in the case # of autotools' `./configure`. if not (workspace and workspace.prepared): self.prepare(sandbox) if workspace: workspace.prepared = True # __assert_cached() # # Raises an error if the artifact is not cached. # def __assert_cached(self): assert self._cached(), "{}: Missing artifact {}".format(self, self._get_brief_display_key()) # __get_tainted(): # # Checkes whether this artifact should be pushed to an artifact cache. # # Args: # recalculate (bool) - Whether to force recalculation # # Returns: # (bool) False if this artifact should be excluded from pushing. # # Note: # This method should only be called after the element's # artifact is present in the local artifact cache. # def __get_tainted(self, recalculate=False): if recalculate or self.__tainted is None: # Whether this artifact has a workspace workspaced = self.__get_artifact_metadata_workspaced() # Whether this artifact's dependencies have workspaces workspaced_dependencies = self.__get_artifact_metadata_workspaced_dependencies() # Other conditions should be or-ed self.__tainted = (workspaced or workspaced_dependencies or not self.__sandbox_config_supported) return self.__tainted # __sandbox(): # # A context manager to prepare a Sandbox object at the specified directory, # if the directory is None, then a directory will be chosen automatically # in the configured build directory. # # Args: # directory (str): The local directory where the sandbox will live, or None # stdout (fileobject): The stream for stdout for the sandbox # stderr (fileobject): The stream for stderr for the sandbox # config (SandboxConfig): The SandboxConfig object # # Yields: # (Sandbox): A usable sandbox # @contextmanager def __sandbox(self, directory, stdout=None, stderr=None, config=None): context = self._get_context() project = self._get_project() platform = Platform.get_platform() if directory is not None and os.path.exists(directory): sandbox = platform.create_sandbox(context, project, directory, stdout=stdout, stderr=stderr, config=config) yield sandbox else: os.makedirs(context.builddir, exist_ok=True) rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir) # Recursive contextmanager... with self.__sandbox(rootdir, stdout=stdout, stderr=stderr, config=config) as sandbox: yield sandbox # Cleanup the build dir utils._force_rmtree(rootdir) def __compose_default_splits(self, defaults): project = self._get_project() element_public = _yaml.node_get(defaults, Mapping, 'public', default_value={}) element_bst = _yaml.node_get(element_public, Mapping, 'bst', default_value={}) element_splits = _yaml.node_get(element_bst, Mapping, 'split-rules', default_value={}) if self.__is_junction: splits = _yaml.node_chain_copy(element_splits) else: assert project._splits is not None splits = _yaml.node_chain_copy(project._splits) # Extend project wide split rules with any split rules defined by the element _yaml.composite(splits, element_splits) element_bst['split-rules'] = splits element_public['bst'] = element_bst defaults['public'] = element_public def __init_defaults(self, plugin_conf): # Defaults are loaded once per class and then reused # if not self.__defaults_set: # Load the plugin's accompanying .yaml file if one was provided defaults = {} try: defaults = _yaml.load(plugin_conf, os.path.basename(plugin_conf)) except LoadError as e: if e.reason != LoadErrorReason.MISSING_FILE: raise e # Special case; compose any element-wide split-rules declarations self.__compose_default_splits(defaults) # Override the element's defaults with element specific # overrides from the project.conf project = self._get_project() if self.__is_junction: elements = project.first_pass_config.element_overrides else: elements = project.element_overrides overrides = elements.get(self.get_kind()) if overrides: _yaml.composite(defaults, overrides) # Set the data class wide type(self).__defaults = defaults type(self).__defaults_set = True # This will resolve the final environment to be used when # creating sandboxes for this element # def __extract_environment(self, meta): default_env = _yaml.node_get(self.__defaults, Mapping, 'environment', default_value={}) if self.__is_junction: environment = {} else: project = self._get_project() environment = _yaml.node_chain_copy(project.base_environment) _yaml.composite(environment, default_env) _yaml.composite(environment, meta.environment) _yaml.node_final_assertions(environment) # Resolve variables in environment value strings final_env = {} for key, _ in self.node_items(environment): final_env[key] = self.node_subst_member(environment, key) return final_env def __extract_env_nocache(self, meta): if self.__is_junction: project_nocache = [] else: project = self._get_project() project.ensure_fully_loaded() project_nocache = project.base_env_nocache default_nocache = _yaml.node_get(self.__defaults, list, 'environment-nocache', default_value=[]) element_nocache = meta.env_nocache # Accumulate values from the element default, the project and the element # itself to form a complete list of nocache env vars. env_nocache = set(project_nocache + default_nocache + element_nocache) # Convert back to list now we know they're unique return list(env_nocache) # This will resolve the final variables to be used when # substituting command strings to be run in the sandbox # def __extract_variables(self, meta): default_vars = _yaml.node_get(self.__defaults, Mapping, 'variables', default_value={}) project = self._get_project() if self.__is_junction: variables = _yaml.node_chain_copy(project.first_pass_config.base_variables) else: project.ensure_fully_loaded() variables = _yaml.node_chain_copy(project.base_variables) _yaml.composite(variables, default_vars) _yaml.composite(variables, meta.variables) _yaml.node_final_assertions(variables) for var in ('project-name', 'element-name', 'max-jobs'): provenance = _yaml.node_get_provenance(variables, var) if provenance and provenance.filename != '': raise LoadError(LoadErrorReason.PROTECTED_VARIABLE_REDEFINED, "{}: invalid redefinition of protected variable '{}'" .format(provenance, var)) return variables # This will resolve the final configuration to be handed # off to element.configure() # def __extract_config(self, meta): # The default config is already composited with the project overrides config = _yaml.node_get(self.__defaults, Mapping, 'config', default_value={}) config = _yaml.node_chain_copy(config) _yaml.composite(config, meta.config) _yaml.node_final_assertions(config) return config # Sandbox-specific configuration data, to be passed to the sandbox's constructor. # def __extract_sandbox_config(self, meta): if self.__is_junction: sandbox_config = {'build-uid': 0, 'build-gid': 0} else: project = self._get_project() project.ensure_fully_loaded() sandbox_config = _yaml.node_chain_copy(project._sandbox) host_os, _, _, _, host_arch = os.uname() # The default config is already composited with the project overrides sandbox_defaults = _yaml.node_get(self.__defaults, Mapping, 'sandbox', default_value={}) sandbox_defaults = _yaml.node_chain_copy(sandbox_defaults) _yaml.composite(sandbox_config, sandbox_defaults) _yaml.composite(sandbox_config, meta.sandbox) _yaml.node_final_assertions(sandbox_config) # Sandbox config, unlike others, has fixed members so we should validate them _yaml.node_validate(sandbox_config, ['build-uid', 'build-gid', 'build-os', 'build-arch']) return SandboxConfig( int(self.node_subst_member(sandbox_config, 'build-uid')), int(self.node_subst_member(sandbox_config, 'build-gid')), self.node_subst_member(sandbox_config, 'build-os', default=host_os), self.node_subst_member(sandbox_config, 'build-arch', default=host_arch)) # This makes a special exception for the split rules, which # elements may extend but whos defaults are defined in the project. # def __extract_public(self, meta): base_public = _yaml.node_get(self.__defaults, Mapping, 'public', default_value={}) base_public = _yaml.node_chain_copy(base_public) base_bst = _yaml.node_get(base_public, Mapping, 'bst', default_value={}) base_splits = _yaml.node_get(base_bst, Mapping, 'split-rules', default_value={}) element_public = _yaml.node_chain_copy(meta.public) element_bst = _yaml.node_get(element_public, Mapping, 'bst', default_value={}) element_splits = _yaml.node_get(element_bst, Mapping, 'split-rules', default_value={}) # Allow elements to extend the default splits defined in their project or # element specific defaults _yaml.composite(base_splits, element_splits) element_bst['split-rules'] = base_splits element_public['bst'] = element_bst _yaml.node_final_assertions(element_public) # Also, resolve any variables in the public split rules directly for domain, splits in self.node_items(base_splits): new_splits = [] for index, split in enumerate(splits): provenance = _yaml.node_get_provenance(base_splits, key=domain, indices=[index]) new_splits.append( self.__variables.subst(split.strip(), provenance) ) base_splits[domain] = new_splits return element_public def __init_splits(self): bstdata = self.get_public_data('bst') splits = bstdata.get('split-rules') self.__splits = { domain: re.compile( "^(?:" + "|".join([utils._glob2re(r) for r in rules]) + ")$", re.MULTILINE | re.DOTALL ) for domain, rules in self.node_items(splits) } def __compute_splits(self, include=None, exclude=None, orphans=True): artifact_base, _ = self.__extract() basedir = os.path.join(artifact_base, 'files') # No splitting requested, just report complete artifact if orphans and not (include or exclude): for filename in utils.list_relative_paths(basedir): yield filename return if not self.__splits: self.__init_splits() element_domains = list(self.__splits.keys()) if not include: include = element_domains if not exclude: exclude = [] # Ignore domains that dont apply to this element # include = [domain for domain in include if domain in element_domains] exclude = [domain for domain in exclude if domain in element_domains] # FIXME: Instead of listing the paths in an extracted artifact, # we should be using a manifest loaded from the artifact # metadata. # element_files = [ os.path.join(os.sep, filename) for filename in utils.list_relative_paths(basedir) ] for filename in element_files: include_file = False exclude_file = False claimed_file = False for domain in element_domains: if self.__splits[domain].match(filename): claimed_file = True if domain in include: include_file = True if domain in exclude: exclude_file = True if orphans and not claimed_file: include_file = True if include_file and not exclude_file: yield filename.lstrip(os.sep) def __file_is_whitelisted(self, path): # Considered storing the whitelist regex for re-use, but public data # can be altered mid-build. # Public data is not guaranteed to stay the same for the duration of # the build, but I can think of no reason to change it mid-build. # If this ever changes, things will go wrong unexpectedly. if not self.__whitelist_regex: bstdata = self.get_public_data('bst') whitelist = _yaml.node_get(bstdata, list, 'overlap-whitelist', default_value=[]) whitelist_expressions = [ utils._glob2re( self.__variables.subst( exp.strip(), _yaml.node_get_provenance(bstdata, key='overlap-whitelist', indices=[index]) ) ) for index, exp in enumerate(whitelist) ] expression = ('^(?:' + '|'.join(whitelist_expressions) + ')$') self.__whitelist_regex = re.compile(expression, re.MULTILINE | re.DOTALL) return self.__whitelist_regex.match(path) or self.__whitelist_regex.match(os.path.join(os.sep, path)) # __extract(): # # Extract an artifact and return the directory # # Args: # key (str): The key for the artifact to extract, # or None for the default key # # Returns: # (str): The path to the extracted artifact # (str): The chosen key # def __extract(self, key=None): if key is None: context = self._get_context() key = self.__strict_cache_key # Use weak cache key, if artifact is missing for strong cache key # and the context allows use of weak cache keys if not context.get_strict() and not self.__artifacts.contains(self, key): key = self._get_cache_key(strength=_KeyStrength.WEAK) return (self.__artifacts.extract(self, key), key) # __get_artifact_metadata_keys(): # # Retrieve the strong and weak keys from the given artifact. # # Args: # key (str): The artifact key, or None for the default key # # Returns: # (str): The strong key # (str): The weak key # def __get_artifact_metadata_keys(self, key=None): # Now extract it and possibly derive the key artifact_base, key = self.__extract(key) # Now try the cache, once we're sure about the key if key in self.__metadata_keys: return (self.__metadata_keys[key]['strong'], self.__metadata_keys[key]['weak']) # Parse the expensive yaml now and cache the result meta_file = os.path.join(artifact_base, 'meta', 'keys.yaml') meta = _yaml.load(meta_file) strong_key = meta['strong'] weak_key = meta['weak'] assert key in (strong_key, weak_key) self.__metadata_keys[strong_key] = meta self.__metadata_keys[weak_key] = meta return (strong_key, weak_key) # __get_artifact_metadata_dependencies(): # # Retrieve the hash of dependency strong keys from the given artifact. # # Args: # key (str): The artifact key, or None for the default key # # Returns: # (dict): A dictionary of element names and their strong keys # def __get_artifact_metadata_dependencies(self, key=None): # Extract it and possibly derive the key artifact_base, key = self.__extract(key) # Now try the cache, once we're sure about the key if key in self.__metadata_dependencies: return self.__metadata_dependencies[key] # Parse the expensive yaml now and cache the result meta_file = os.path.join(artifact_base, 'meta', 'dependencies.yaml') meta = _yaml.load(meta_file) # Cache it under both strong and weak keys strong_key, weak_key = self.__get_artifact_metadata_keys(key) self.__metadata_dependencies[strong_key] = meta self.__metadata_dependencies[weak_key] = meta return meta # __get_artifact_metadata_workspaced(): # # Retrieve the hash of dependency strong keys from the given artifact. # # Args: # key (str): The artifact key, or None for the default key # # Returns: # (bool): Whether the given artifact was workspaced # def __get_artifact_metadata_workspaced(self, key=None): # Extract it and possibly derive the key artifact_base, key = self.__extract(key) # Now try the cache, once we're sure about the key if key in self.__metadata_workspaced: return self.__metadata_workspaced[key] # Parse the expensive yaml now and cache the result meta_file = os.path.join(artifact_base, 'meta', 'workspaced.yaml') meta = _yaml.load(meta_file) workspaced = _yaml.node_get(meta, bool, 'workspaced') # Cache it under both strong and weak keys strong_key, weak_key = self.__get_artifact_metadata_keys(key) self.__metadata_workspaced[strong_key] = workspaced self.__metadata_workspaced[weak_key] = workspaced return workspaced # __get_artifact_metadata_workspaced_dependencies(): # # Retrieve the hash of dependency strong keys from the given artifact. # # Args: # key (str): The artifact key, or None for the default key # # Returns: # (list): List of which dependencies are workspaced # def __get_artifact_metadata_workspaced_dependencies(self, key=None): # Extract it and possibly derive the key artifact_base, key = self.__extract(key) # Now try the cache, once we're sure about the key if key in self.__metadata_workspaced_dependencies: return self.__metadata_workspaced_dependencies[key] # Parse the expensive yaml now and cache the result meta_file = os.path.join(artifact_base, 'meta', 'workspaced-dependencies.yaml') meta = _yaml.load(meta_file) workspaced = _yaml.node_get(meta, list, 'workspaced-dependencies') # Cache it under both strong and weak keys strong_key, weak_key = self.__get_artifact_metadata_keys(key) self.__metadata_workspaced_dependencies[strong_key] = workspaced self.__metadata_workspaced_dependencies[weak_key] = workspaced return workspaced # __load_public_data(): # # Loads the public data from the cached artifact # def __load_public_data(self): self.__assert_cached() assert self.__dynamic_public is None # Load the public data from the artifact artifact_base, _ = self.__extract() metadir = os.path.join(artifact_base, 'meta') self.__dynamic_public = _yaml.load(os.path.join(metadir, 'public.yaml')) def __get_cache_keys_for_commit(self): keys = [] # tag with strong cache key based on dependency versions used for the build keys.append(self._get_cache_key(strength=_KeyStrength.STRONG)) # also store under weak cache key keys.append(self._get_cache_key(strength=_KeyStrength.WEAK)) return utils._deduplicate(keys) # __update_state_recursively() # # Update the state of all reverse dependencies, recursively. # def __update_state_recursively(self): queue = _UniquePriorityQueue() queue.push(self._unique_id, self) while queue: element = queue.pop() old_ready_for_runtime = element.__ready_for_runtime old_strict_cache_key = element.__strict_cache_key element._update_state() if element.__ready_for_runtime != old_ready_for_runtime or \ element.__strict_cache_key != old_strict_cache_key: for rdep in element.__reverse_dependencies: queue.push(rdep._unique_id, rdep) def _overlap_error_detail(f, forbidden_overlap_elements, elements): if forbidden_overlap_elements: return ("/{}: {} {} not permitted to overlap other elements, order {} \n" .format(f, " and ".join(forbidden_overlap_elements), "is" if len(forbidden_overlap_elements) == 1 else "are", " above ".join(reversed(elements)))) else: return "" buildstream-1.6.9/buildstream/plugin.py000066400000000000000000000747541437515270000202520ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ Plugin - Base plugin class ========================== BuildStream supports third party plugins to define additional kinds of :mod:`Elements ` and :mod:`Sources `. The common API is documented here, along with some information on how external plugin packages are structured. .. _core_plugin_abstract_methods: Abstract Methods ---------------- For both :mod:`Elements ` and :mod:`Sources `, it is mandatory to implement the following abstract methods: * :func:`Plugin.configure() ` Loads the user provided configuration YAML for the given source or element * :func:`Plugin.preflight() ` Early preflight checks allow plugins to bail out early with an error in the case that it can predict that failure is inevitable. * :func:`Plugin.get_unique_key() ` Once all configuration has been loaded and preflight checks have passed, this method is used to inform the core of a plugin's unique configuration. Configurable Warnings --------------------- Warnings raised through calling :func:`Plugin.warn() ` can provide an optional parameter ``warning_token``, this will raise a :class:`PluginError` if the warning is configured as fatal within the project configuration. Configurable warnings will be prefixed with :func:`Plugin.get_kind() ` within buildstream and must be prefixed as such in project configurations. For more detail on project configuration see :ref:`Configurable Warnings `. It is important to document these warnings in your plugin documentation to allow users to make full use of them while configuring their projects. Example ~~~~~~~ If the :class:`git ` plugin uses the warning ``"inconsistent-submodule"`` then it could be referenced in project configuration as ``"git:inconsistent-submodule"``. Plugin Structure ---------------- A plugin should consist of a `setuptools package `_ that advertises contained plugins using `entry points `_. A plugin entry point must be a module that extends a class in the :ref:`core_framework` to be discovered by BuildStream. A YAML file defining plugin default settings with the same name as the module can also be defined in the same directory as the plugin module. .. note:: BuildStream does not support function/class entry points. A sample plugin could be structured as such: .. code-block:: text . ├── elements │   ├── autotools.py │   ├── autotools.yaml │   └── __init__.py ├── MANIFEST.in └── setup.py The setuptools configuration should then contain at least: setup.py: .. literalinclude:: ../source/sample_plugin/setup.py :language: python MANIFEST.in: .. literalinclude:: ../source/sample_plugin/MANIFEST.in :language: text Class Reference --------------- """ import itertools import os import subprocess from contextlib import contextmanager from weakref import WeakValueDictionary from . import _yaml from . import utils from ._exceptions import PluginError, ImplError from ._message import Message, MessageType from .types import CoreWarnings class Plugin(): """Plugin() Base Plugin class. Some common features to both Sources and Elements are found in this class. .. note:: Derivation of plugins is not supported. Plugins may only derive from the base :mod:`Source ` and :mod:`Element ` types, and any convenience subclasses (like :mod:`BuildElement `) which are included in the buildstream namespace. """ BST_REQUIRED_VERSION_MAJOR = 0 """Minimum required major version""" BST_REQUIRED_VERSION_MINOR = 0 """Minimum required minor version""" BST_FORMAT_VERSION = 0 """The plugin's YAML format version This should be set to ``1`` the first time any new configuration is understood by your :func:`Plugin.configure() ` implementation and subsequently bumped every time your configuration is enhanced. .. note:: Plugins are expected to maintain backward compatibility in the format and configurations they expose. The versioning is intended to track availability of new features only. For convenience, the format version for plugins maintained and distributed with BuildStream are revisioned with BuildStream's core format version :ref:`core format version `. """ # Unique id generator for Plugins # # Each plugin gets a unique id at creation. # # Ids are a monotically increasing integer which # starts as 1 (a falsy plugin ID is considered unset # in various parts of the codebase). # __id_generator = itertools.count(1) # Hold on to a lookup table by counter of all instantiated plugins. # We use this to send the id back from child processes so we can lookup # corresponding element/source in the master process. # # Use WeakValueDictionary() so the map we use to lookup objects does not # keep the plugins alive after pipeline destruction. # # Note that Plugins can only be instantiated in the main process before # scheduling tasks. __TABLE = WeakValueDictionary() def __init__(self, name, context, project, provenance, type_tag, unique_id=None): self.name = name """The plugin name For elements, this is the project relative bst filename, for sources this is the owning element's name with a suffix indicating it's index on the owning element. For sources this is for display purposes only. """ # Unique ID # # This id allows to uniquely identify a plugin. # # /!\ the unique id must be an increasing value /!\ # This is because we are depending on it in buildstream.element.Element # to give us a topological sort over all elements. # Modifying how we handle ids here will modify the behavior of the # Element's state handling. if unique_id is None: # Register ourself in the table containing all existing plugins self._unique_id = next(self.__id_generator) self.__TABLE[self._unique_id] = self else: # If the unique ID is passed in the constructor, then it is a cloned # plugin in a subprocess and should use the same ID. self._unique_id = unique_id self.__context = context # The Context object self.__project = project # The Project object self.__provenance = provenance # The Provenance information self.__type_tag = type_tag # The type of plugin (element or source) self.__configuring = False # Whether we are currently configuring # Infer the kind identifier modulename = type(self).__module__ self.__kind = modulename.rsplit('.', maxsplit=1)[-1] self.debug("Created: {}".format(self)) def __del__(self): # Dont send anything through the Message() pipeline at destruction time, # any subsequent lookup of plugin by unique id would raise KeyError. if self.__context.log_debug: print("DEBUG: Destroyed: {}".format(self)) def __str__(self): return "{kind} {typetag} at {provenance}".format( kind=self.__kind, typetag=self.__type_tag, provenance=self.__provenance) ############################################################# # Abstract Methods # ############################################################# def configure(self, node): """Configure the Plugin from loaded configuration data Args: node (dict): The loaded configuration dictionary Raises: :class:`.SourceError`: If its a :class:`.Source` implementation :class:`.ElementError`: If its an :class:`.Element` implementation Plugin implementors should implement this method to read configuration data and store it. Plugins should use the :func:`Plugin.node_get_member() ` and :func:`Plugin.node_get_list_element() ` methods to fetch values from the passed `node`. This will ensure that a nice human readable error message will be raised if the expected configuration is not found, indicating the filename, line and column numbers. Further the :func:`Plugin.node_validate() ` method should be used to ensure that the user has not specified keys in `node` which are unsupported by the plugin. .. note:: For Elements, when variable substitution is desirable, the :func:`Element.node_subst_member() ` and :func:`Element.node_subst_list_element() ` methods can be used. """ raise ImplError("{tag} plugin '{kind}' does not implement configure()".format( tag=self.__type_tag, kind=self.get_kind())) def preflight(self): """Preflight Check Raises: :class:`.SourceError`: If its a :class:`.Source` implementation :class:`.ElementError`: If its an :class:`.Element` implementation This method is run after :func:`Plugin.configure() ` and after the pipeline is fully constructed. Implementors should simply raise :class:`.SourceError` or :class:`.ElementError` with an informative message in the case that the host environment is unsuitable for operation. Plugins which require host tools (only sources usually) should obtain them with :func:`utils.get_host_tool() ` which will raise an error automatically informing the user that a host tool is needed. """ raise ImplError("{tag} plugin '{kind}' does not implement preflight()".format( tag=self.__type_tag, kind=self.get_kind())) def get_unique_key(self): """Return something which uniquely identifies the plugin input Returns: A string, list or dictionary which uniquely identifies the input This is used to construct unique cache keys for elements and sources, sources should return something which uniquely identifies the payload, such as an sha256 sum of a tarball content. Elements and Sources should implement this by collecting any configurations which could possibly effect the output and return a dictionary of these settings. For Sources, this is guaranteed to only be called if :func:`Source.get_consistency() ` has not returned :func:`Consistency.INCONSISTENT ` which is to say that the Source is expected to have an exact *ref* indicating exactly what source is going to be staged. """ raise ImplError("{tag} plugin '{kind}' does not implement get_unique_key()".format( tag=self.__type_tag, kind=self.get_kind())) ############################################################# # Public Methods # ############################################################# def get_kind(self): """Fetches the kind of this plugin Returns: (str): The kind of this plugin """ return self.__kind def node_items(self, node): """Iterate over a dictionary loaded from YAML Args: node (dict): The YAML loaded dictionary object Returns: list: List of key/value tuples to iterate over BuildStream holds some private data in dictionaries loaded from the YAML in order to preserve information to report in errors. This convenience function should be used instead of the dict.items() builtin function provided by python. """ yield from _yaml.node_items(node) def node_provenance(self, node, member_name=None): """Gets the provenance for `node` and `member_name` This reports a string with file, line and column information suitable for reporting an error or warning. Args: node (dict): The YAML loaded dictionary object member_name (str): The name of the member to check, or None for the node itself Returns: (str): A string describing the provenance of the node and member """ provenance = _yaml.node_get_provenance(node, key=member_name) return str(provenance) def node_get_member(self, node, expected_type, member_name, default=utils._sentinel): """Fetch the value of a node member, raising an error if the value is missing or incorrectly typed. Args: node (dict): A dictionary loaded from YAML expected_type (type): The expected type of the node member member_name (str): The name of the member to fetch default (expected_type): A value to return when *member_name* is not specified in *node* Returns: The value of *member_name* in *node*, otherwise *default* Raises: :class:`.LoadError`: When *member_name* is not found and no *default* was provided Note: Returned strings are stripped of leading and trailing whitespace **Example:** .. code:: python # Expect a string 'name' in 'node' name = self.node_get_member(node, str, 'name') # Fetch an optional integer level = self.node_get_member(node, int, 'level', -1) """ return _yaml.node_get(node, expected_type, member_name, default_value=default) def node_get_project_path(self, node, key, *, check_is_file=False, check_is_dir=False): """Fetches a project path from a dictionary node and validates it Paths are asserted to never lead to a directory outside of the project directory. In addition, paths can not point to symbolic links, fifos, sockets and block/character devices. The `check_is_file` and `check_is_dir` parameters can be used to perform additional validations on the path. Note that an exception will always be raised if both parameters are set to ``True``. Args: node (dict): A dictionary loaded from YAML key (str): The key whose value contains a path to validate check_is_file (bool): If ``True`` an error will also be raised if path does not point to a regular file. Defaults to ``False`` check_is_dir (bool): If ``True`` an error will also be raised if path does not point to a directory. Defaults to ``False`` Returns: (str): The project path Raises: :class:`.LoadError`: In the case that the project path is not valid or does not exist *Since: 1.2* **Example:** .. code:: python path = self.node_get_project_path(node, 'path') """ return _yaml.node_get_project_path(node, key, self.__project.directory, check_is_file=check_is_file, check_is_dir=check_is_dir) def node_validate(self, node, valid_keys): """This should be used in :func:`~buildstream.plugin.Plugin.configure` implementations to assert that users have only entered valid configuration keys. Args: node (dict): A dictionary loaded from YAML valid_keys (iterable): A list of valid keys for the node Raises: :class:`.LoadError`: When an invalid key is found **Example:** .. code:: python # Ensure our node only contains valid autotools config keys self.node_validate(node, [ 'configure-commands', 'build-commands', 'install-commands', 'strip-commands' ]) """ _yaml.node_validate(node, valid_keys) def node_get_list_element(self, node, expected_type, member_name, indices): """Fetch the value of a list element from a node member, raising an error if the value is incorrectly typed. Args: node (dict): A dictionary loaded from YAML expected_type (type): The expected type of the node member member_name (str): The name of the member to fetch indices (list of int): List of indices to search, in case of nested lists Returns: The value of the list element in *member_name* at the specified *indices* Raises: :class:`.LoadError` Note: Returned strings are stripped of leading and trailing whitespace **Example:** .. code:: python # Fetch the list itself things = self.node_get_member(node, list, 'things') # Iterate over the list indices for i in range(len(things)): # Fetch dict things thing = self.node_get_list_element( node, dict, 'things', [ i ]) """ return _yaml.node_get(node, expected_type, member_name, indices=indices) def debug(self, brief, *, detail=None): """Print a debugging message Args: brief (str): The brief message detail (str): An optional detailed message, can be multiline output """ if self.__context.log_debug: self.__message(MessageType.DEBUG, brief, detail=detail) def status(self, brief, *, detail=None): """Print a status message Args: brief (str): The brief message detail (str): An optional detailed message, can be multiline output Note: Status messages tell about what a plugin is currently doing """ self.__message(MessageType.STATUS, brief, detail=detail) def info(self, brief, *, detail=None): """Print an informative message Args: brief (str): The brief message detail (str): An optional detailed message, can be multiline output Note: Informative messages tell the user something they might want to know, like if refreshing an element caused it to change. """ self.__message(MessageType.INFO, brief, detail=detail) def warn(self, brief, *, detail=None, warning_token=None): """Print a warning message, checks warning_token against project configuration Args: brief (str): The brief message detail (str): An optional detailed message, can be multiline output warning_token (str): An optional configurable warning assosciated with this warning, this will cause PluginError to be raised if this warning is configured as fatal. (*Since 1.4*) Raises: (:class:`.PluginError`): When warning_token is considered fatal by the project configuration """ if warning_token: warning_token = _prefix_warning(self, warning_token) brief = "[{}]: {}".format(warning_token, brief) project = self._get_project() if project._warning_is_fatal(warning_token): detail = detail if detail else "" raise PluginError(message="{}\n{}".format(brief, detail), reason=warning_token) self.__message(MessageType.WARN, brief=brief, detail=detail) def log(self, brief, *, detail=None): """Log a message into the plugin's log file The message will not be shown in the master log at all (so it will not be displayed to the user on the console). Args: brief (str): The brief message detail (str): An optional detailed message, can be multiline output """ self.__message(MessageType.LOG, brief, detail=detail) @contextmanager def timed_activity(self, activity_name, *, detail=None, silent_nested=False): """Context manager for performing timed activities in plugins Args: activity_name (str): The name of the activity detail (str): An optional detailed message, can be multiline output silent_nested (bool): If specified, nested messages will be silenced This function lets you perform timed tasks in your plugin, the core will take care of timing the duration of your task and printing start / fail / success messages. **Example** .. code:: python # Activity will be logged and timed with self.timed_activity("Mirroring {}".format(self.url)): # This will raise SourceError on its own self.call(... command which takes time ...) """ with self.__context.timed_activity(activity_name, unique_id=self._unique_id, detail=detail, silent_nested=silent_nested): yield def call(self, *popenargs, fail=None, fail_temporarily=False, **kwargs): """A wrapper for subprocess.call() Args: popenargs (list): Popen() arguments fail (str): A message to display if the process returns a non zero exit code fail_temporarily (bool): Whether any exceptions should be raised as temporary. (*Since: 1.2*) rest_of_args (kwargs): Remaining arguments to subprocess.call() Returns: (int): The process exit code. Raises: (:class:`.PluginError`): If a non-zero return code is received and *fail* is specified Note: If *fail* is not specified, then the return value of subprocess.call() is returned even on error, and no exception is automatically raised. **Example** .. code:: python # Call some host tool self.tool = utils.get_host_tool('toolname') self.call( [self.tool, '--download-ponies', self.mirror_directory], "Failed to download ponies from {}".format( self.mirror_directory)) """ exit_code, _ = self.__call(*popenargs, fail=fail, fail_temporarily=fail_temporarily, **kwargs) return exit_code def check_output(self, *popenargs, fail=None, fail_temporarily=False, **kwargs): """A wrapper for subprocess.check_output() Args: popenargs (list): Popen() arguments fail (str): A message to display if the process returns a non zero exit code fail_temporarily (bool): Whether any exceptions should be raised as temporary. (*Since: 1.2*) rest_of_args (kwargs): Remaining arguments to subprocess.call() Returns: (int): The process exit code (str): The process standard output Raises: (:class:`.PluginError`): If a non-zero return code is received and *fail* is specified Note: If *fail* is not specified, then the return value of subprocess.check_output() is returned even on error, and no exception is automatically raised. **Example** .. code:: python # Get the tool at preflight time self.tool = utils.get_host_tool('toolname') # Call the tool, automatically raise an error _, output = self.check_output( [self.tool, '--print-ponies'], "Failed to print the ponies in {}".format( self.mirror_directory), cwd=self.mirror_directory) # Call the tool, inspect exit code exit_code, output = self.check_output( [self.tool, 'get-ref', tracking], cwd=self.mirror_directory) if exit_code == 128: return elif exit_code != 0: fmt = "{plugin}: Failed to get ref for tracking: {track}" raise SourceError( fmt.format(plugin=self, track=tracking)) from e """ return self.__call(*popenargs, collect_stdout=True, fail=fail, fail_temporarily=fail_temporarily, **kwargs) ############################################################# # Private Methods used in BuildStream # ############################################################# # _lookup(): # # Fetch a plugin in the current process by its # unique identifier # # Args: # unique_id: The unique identifier as returned by # plugin._unique_id # # Returns: # (Plugin): The plugin for the given ID, or None # @classmethod def _lookup(cls, unique_id): assert unique_id != 0, "Looking up invalid plugin ID 0, ID counter starts at 1" assert unique_id in cls.__TABLE, "Could not find plugin with ID {}".format(unique_id) return cls.__TABLE[unique_id] # _get_context() # # Fetches the invocation context # def _get_context(self): return self.__context # _get_project() # # Fetches the project object associated with this plugin # def _get_project(self): return self.__project # _get_provenance(): # # Fetch bst file, line and column of the entity # def _get_provenance(self): return self.__provenance # Context manager for getting the open file handle to this # plugin's log. Used in the child context to add stuff to # a log. # @contextmanager def _output_file(self): log = self.__context.get_log_handle() if log is None: with open(os.devnull, "w", encoding="utf-8") as output: yield output else: yield log # _configure(): # # Calls configure() for the plugin, this must be called by # the core instead of configure() directly, so that the # _get_configuring() state is up to date. # # Args: # node (dict): The loaded configuration dictionary # def _configure(self, node): self.__configuring = True self.configure(node) self.__configuring = False # _get_configuring(): # # Checks whether the plugin is in the middle of having # its Plugin.configure() method called # # Returns: # (bool): Whether we are currently configuring def _get_configuring(self): return self.__configuring # _preflight(): # # Calls preflight() for the plugin, and allows generic preflight # checks to be added # # Raises: # SourceError: If it's a Source implementation # ElementError: If it's an Element implementation # ProgramNotFoundError: If a required host tool is not found # def _preflight(self): self.preflight() ############################################################# # Local Private Methods # ############################################################# # Internal subprocess implementation for the call() and check_output() APIs # def __call(self, *popenargs, collect_stdout=False, fail=None, fail_temporarily=False, **kwargs): with self._output_file() as output_file: if 'stdout' not in kwargs: kwargs['stdout'] = output_file if 'stderr' not in kwargs: kwargs['stderr'] = output_file if collect_stdout: kwargs['stdout'] = subprocess.PIPE self.__note_command(output_file, *popenargs, **kwargs) exit_code, output = utils._call(*popenargs, **kwargs) if fail and exit_code: raise PluginError("{plugin}: {message}".format(plugin=self, message=fail), temporary=fail_temporarily) return (exit_code, output) def __message(self, message_type, brief, **kwargs): message = Message(self._unique_id, message_type, brief, **kwargs) self.__context.message(message) def __note_command(self, output, *popenargs, **kwargs): workdir = os.getcwd() workdir = kwargs.get('cwd', workdir) command = " ".join(popenargs[0]) output.write('Running host command {}: {}\n'.format(workdir, command)) output.flush() self.status('Running host command', detail=command) def _get_full_name(self): project = self.__project if project.junction: return '{}:{}'.format(project.junction.name, self.name) else: return self.name # A local table for _prefix_warning() # __CORE_WARNINGS = [ value for name, value in CoreWarnings.__dict__.items() if not name.startswith("__") ] # _prefix_warning(): # # Prefix a warning with the plugin kind. CoreWarnings are not prefixed. # # Args: # plugin (Plugin): The plugin which raised the warning # warning (str): The warning to prefix # # Returns: # (str): A prefixed warning # def _prefix_warning(plugin, warning): if any((warning is core_warning for core_warning in __CORE_WARNINGS)): return warning return "{}:{}".format(plugin.get_kind(), warning) buildstream-1.6.9/buildstream/plugins/000077500000000000000000000000001437515270000200425ustar00rootroot00000000000000buildstream-1.6.9/buildstream/plugins/elements/000077500000000000000000000000001437515270000216565ustar00rootroot00000000000000buildstream-1.6.9/buildstream/plugins/elements/__init__.py000066400000000000000000000000001437515270000237550ustar00rootroot00000000000000buildstream-1.6.9/buildstream/plugins/elements/autotools.py000066400000000000000000000037271437515270000242720ustar00rootroot00000000000000# # Copyright (C) 2016, 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ autotools - Autotools build element =================================== This is a :mod:`BuildElement ` implementation for using Autotools build scripts (also known as the `GNU Build System `_). You will often want to pass additional arguments to ``configure``. This should be done on a per-element basis by setting the ``conf-local`` variable. Here is an example: .. code:: yaml variables: conf-local: | --disable-foo --enable-bar If you want to pass extra options to ``configure`` for every element in your project, set the ``conf-global`` variable in your project.conf file. Here is an example of that: .. code:: yaml elements: autotools: variables: conf-global: | --disable-gtk-doc --disable-static Here is the default configuration for the ``autotools`` element in full: .. literalinclude:: ../../../buildstream/plugins/elements/autotools.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'autotools' kind. class AutotoolsElement(BuildElement): pass # Plugin entry point def setup(): return AutotoolsElement buildstream-1.6.9/buildstream/plugins/elements/autotools.yaml000066400000000000000000000036141437515270000245770ustar00rootroot00000000000000# Autotools default configurations variables: autogen: | export NOCONFIGURE=1; if [ -x %{conf-cmd} ]; then true; elif [ -x autogen ]; then ./autogen; elif [ -x autogen.sh ]; then ./autogen.sh; elif [ -x bootstrap ]; then ./bootstrap; elif [ -x bootstrap.sh ]; then ./bootstrap.sh; else autoreconf -ivf; fi # Project-wide extra arguments to be passed to `configure` conf-global: '' # Element-specific extra arguments to be passed to `configure`. conf-local: '' # For backwards compatibility only, do not use. conf-extra: '' conf-cmd: ./configure conf-args: | --prefix=%{prefix} \ --exec-prefix=%{exec_prefix} \ --bindir=%{bindir} \ --sbindir=%{sbindir} \ --sysconfdir=%{sysconfdir} \ --datadir=%{datadir} \ --includedir=%{includedir} \ --libdir=%{libdir} \ --libexecdir=%{libexecdir} \ --localstatedir=%{localstatedir} \ --sharedstatedir=%{sharedstatedir} \ --mandir=%{mandir} \ --infodir=%{infodir} %{conf-extra} %{conf-global} %{conf-local} configure: | %{conf-cmd} %{conf-args} make: make make-install: make -j1 DESTDIR="%{install-root}" install # Set this if the sources cannot handle parallelization. # # notparallel: True config: # Commands for configuring the software # configure-commands: - | %{autogen} - | %{configure} # Commands for building the software # build-commands: - | %{make} # Commands for installing the software into a # destination folder # install-commands: - | %{make-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} # Use max-jobs CPUs for building and enable verbosity environment: MAKEFLAGS: -j%{max-jobs} V: 1 # And dont consider MAKEFLAGS or V as something which may # effect build output. environment-nocache: - MAKEFLAGS - V buildstream-1.6.9/buildstream/plugins/elements/cmake.py000066400000000000000000000036161437515270000233160ustar00rootroot00000000000000# # Copyright (C) 2016, 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ cmake - CMake build element =========================== This is a :mod:`BuildElement ` implementation for using the `CMake `_ build system. You will often want to pass additional arguments to the ``cmake`` program for specific configuration options. This should be done on a per-element basis by setting the ``cmake-local`` variable. Here is an example: .. code:: yaml variables: cmake-local: | -DCMAKE_BUILD_TYPE=Debug If you want to pass extra options to ``cmake`` for every element in your project, set the ``cmake-global`` variable in your project.conf file. Here is an example of that: .. code:: yaml elements: cmake: variables: cmake-global: | -DCMAKE_BUILD_TYPE=Release Here is the default configuration for the ``cmake`` element in full: .. literalinclude:: ../../../buildstream/plugins/elements/cmake.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'cmake' kind. class CMakeElement(BuildElement): pass # Plugin entry point def setup(): return CMakeElement buildstream-1.6.9/buildstream/plugins/elements/cmake.yaml000066400000000000000000000026331437515270000236260ustar00rootroot00000000000000# CMake default configuration variables: build-dir: _builddir # Project-wide extra arguments to be passed to `cmake` cmake-global: '' # Element-specific extra arguments to be passed to `cmake`. cmake-local: '' # For backwards compatibility only, do not use. cmake-extra: '' # The cmake generator to use generator: Unix Makefiles cmake-args: | -DCMAKE_INSTALL_PREFIX:PATH="%{prefix}" \ -DCMAKE_INSTALL_LIBDIR:PATH="%{lib}" %{cmake-extra} %{cmake-global} %{cmake-local} cmake: | cmake -B%{build-dir} -H. -G"%{generator}" %{cmake-args} make: cmake --build %{build-dir} -- ${JOBS} make-install: env DESTDIR="%{install-root}" cmake --build %{build-dir} --target install # Set this if the sources cannot handle parallelization. # # notparallel: True config: # Commands for configuring the software # configure-commands: - | %{cmake} # Commands for building the software # build-commands: - | %{make} # Commands for installing the software into a # destination folder # install-commands: - | %{make-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} # Use max-jobs CPUs for building and enable verbosity environment: JOBS: -j%{max-jobs} V: 1 # And dont consider JOBS or V as something which may # effect build output. environment-nocache: - JOBS - V buildstream-1.6.9/buildstream/plugins/elements/compose.py000066400000000000000000000173041437515270000237020ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ compose - Compose the output of multiple elements ================================================= This element creates a selective composition of its dependencies. This is normally used at near the end of a pipeline to prepare something for later deployment. Since this element's output includes its dependencies, it may only depend on elements as `build` type dependencies. The default configuration and possible options are as such: .. literalinclude:: ../../../buildstream/plugins/elements/compose.yaml :language: yaml """ import os from buildstream import utils from buildstream import Element, Scope # Element implementation for the 'compose' kind. class ComposeElement(Element): # pylint: disable=attribute-defined-outside-init # The compose element's output is it's dependencies, so # we must rebuild if the dependencies change even when # not in strict build plans. # BST_STRICT_REBUILD = True # Compose artifacts must never have indirect dependencies, # so runtime dependencies are forbidden. BST_FORBID_RDEPENDS = True # This element ignores sources, so we should forbid them from being # added, to reduce the potential for confusion BST_FORBID_SOURCES = True def configure(self, node): self.node_validate(node, [ 'integrate', 'include', 'exclude', 'include-orphans' ]) # We name this variable 'integration' only to avoid # collision with the Element.integrate() method. self.integration = self.node_get_member(node, bool, 'integrate') self.include = self.node_get_member(node, list, 'include') self.exclude = self.node_get_member(node, list, 'exclude') self.include_orphans = self.node_get_member(node, bool, 'include-orphans') def preflight(self): pass def get_unique_key(self): key = {'integrate': self.integration, 'include': sorted(self.include), 'orphans': self.include_orphans} if self.exclude: key['exclude'] = sorted(self.exclude) return key def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): require_split = self.include or self.exclude or not self.include_orphans # Stage deps in the sandbox root with self.timed_activity("Staging dependencies", silent_nested=True): self.stage_dependency_artifacts(sandbox, Scope.BUILD) manifest = set() if require_split: with self.timed_activity("Computing split", silent_nested=True): for dep in self.dependencies(Scope.BUILD): files = dep.compute_manifest(include=self.include, exclude=self.exclude, orphans=self.include_orphans) manifest.update(files) basedir = sandbox.get_directory() modified_files = set() removed_files = set() added_files = set() # Run any integration commands provided by the dependencies # once they are all staged and ready if self.integration: with self.timed_activity("Integrating sandbox"): if require_split: # Make a snapshot of all the files before integration-commands are run. snapshot = { f: getmtime(os.path.join(basedir, f)) for f in utils.list_relative_paths(basedir) } for dep in self.dependencies(Scope.BUILD): dep.integrate(sandbox) if require_split: # Calculate added, modified and removed files basedir_contents = set(utils.list_relative_paths(basedir)) for path in manifest: if path in basedir_contents: if path in snapshot: preintegration_mtime = snapshot[path] if preintegration_mtime != getmtime(os.path.join(basedir, path)): modified_files.add(path) else: # If the path appears in the manifest but not the initial snapshot, # it may be a file staged inside a directory symlink. In this case # the path we got from the manifest won't show up in the snapshot # because utils.list_relative_paths() doesn't recurse into symlink # directories. pass elif path in snapshot: removed_files.add(path) for path in basedir_contents: if path not in snapshot: added_files.add(path) self.info("Integration modified {}, added {} and removed {} files" .format(len(modified_files), len(added_files), len(removed_files))) # The remainder of this is expensive, make an early exit if # we're not being selective about what is to be included. if not require_split: return '/' # Do we want to force include files which were modified by # the integration commands, even if they were not added ? # manifest.update(added_files) manifest.difference_update(removed_files) # XXX We should be moving things outside of the build sandbox # instead of into a subdir. The element assemble() method should # support this in some way. # installdir = os.path.join(basedir, 'buildstream', 'install') os.makedirs(installdir, exist_ok=True) # We already saved the manifest for created files in the integration phase, # now collect the rest of the manifest. # lines = [] if self.include: lines.append("Including files from domains: " + ", ".join(self.include)) else: lines.append("Including files from all domains") if self.exclude: lines.append("Excluding files from domains: " + ", ".join(self.exclude)) if self.include_orphans: lines.append("Including orphaned files") else: lines.append("Excluding orphaned files") detail = "\n".join(lines) with self.timed_activity("Creating composition", detail=detail, silent_nested=True): self.info("Composing {} files".format(len(manifest))) utils.link_files(basedir, installdir, files=manifest) # And we're done return os.path.join(os.sep, 'buildstream', 'install') # Like os.path.getmtime(), but doesnt explode on symlinks # def getmtime(path): stat = os.lstat(path) return stat.st_mtime # Plugin entry point def setup(): return ComposeElement buildstream-1.6.9/buildstream/plugins/elements/compose.yaml000066400000000000000000000016701437515270000242130ustar00rootroot00000000000000 # Compose element configuration config: # Whether to run the integration commands for the # staged dependencies. # integrate: True # A list of domains to include from each artifact, as # they were defined in the element's 'split-rules'. # # Since domains can be added, it is not an error to # specify domains which may not exist for all of the # elements in this composition. # # The default empty list indicates that all domains # from each dependency should be included. # include: [] # A list of domains to exclude from each artifact, as # they were defined in the element's 'split-rules'. # # In the case that a file is spoken for by a domain # in the 'include' list and another in the 'exclude' # list, then the file will be excluded. exclude: [] # Whether to include orphan files which are not # included by any of the 'split-rules' present on # a given element. # include-orphans: True buildstream-1.6.9/buildstream/plugins/elements/distutils.py000066400000000000000000000024271437515270000242610ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ distutils - Python distutils element ==================================== A :mod:`BuildElement ` implementation for using python distutils The distutils default configuration: .. literalinclude:: ../../../buildstream/plugins/elements/distutils.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the python 'distutils' kind. class DistutilsElement(BuildElement): pass # Plugin entry point def setup(): return DistutilsElement buildstream-1.6.9/buildstream/plugins/elements/distutils.yaml000066400000000000000000000014571437515270000245750ustar00rootroot00000000000000# Default python distutils configuration variables: # When building for python2 distutils, simply # override this in the element declaration python: python3 python-build: | %{python} setup.py build install-args: | --prefix "%{prefix}" \ --root "%{install-root}" python-install: | %{python} setup.py install %{install-args} config: # Commands for configuring the software # configure-commands: [] # Commands for building the software # build-commands: - | %{python-build} # Commands for installing the software into a # destination folder # install-commands: - | %{python-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} - | %{fix-pyc-timestamps} buildstream-1.6.9/buildstream/plugins/elements/filter.py000066400000000000000000000113251437515270000235170ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jonathan Maw """ filter - Extract a subset of files from another element ======================================================= This filters another element by producing an output that is a subset of the filtered element. To specify the element to filter, specify it as the one and only build dependency to filter. See :ref:`Dependencies ` for what dependencies are and how to specify them. Dependencies aside from the filtered element may be specified, but they must be runtime dependencies only. This can be useful to propagate runtime dependencies forward from this filter element onto its reverse dependencies. When workspaces are opened, closed or reset on this element, or this element is tracked, instead of erroring due to a lack of sources, this element will transparently pass on the command to its sole build-dependency. The default configuration and possible options are as such: .. literalinclude:: ../../../buildstream/plugins/elements/filter.yaml :language: yaml """ from buildstream import Element, ElementError, Scope class FilterElement(Element): # pylint: disable=attribute-defined-outside-init BST_ARTIFACT_VERSION = 1 # The filter element's output is its dependencies, so # we must rebuild if the dependencies change even when # not in strict build plans. BST_STRICT_REBUILD = True # This element ignores sources, so we should forbid them from being # added, to reduce the potential for confusion BST_FORBID_SOURCES = True def configure(self, node): self.node_validate(node, [ 'include', 'exclude', 'include-orphans' ]) self.include = self.node_get_member(node, list, 'include') self.exclude = self.node_get_member(node, list, 'exclude') self.include_orphans = self.node_get_member(node, bool, 'include-orphans') def preflight(self): # Exactly one build-depend is permitted build_deps = list(self.dependencies(Scope.BUILD, recurse=False)) if len(build_deps) != 1: detail = "Full list of build-depends:\n" deps_list = " \n".join([x.name for x in build_deps]) detail += deps_list raise ElementError("{}: {} element must have exactly 1 build-dependency, actually have {}" .format(self, type(self).__name__, len(build_deps)), detail=detail, reason="filter-bdepend-wrong-count") # That build-depend must not also be a runtime-depend runtime_deps = list(self.dependencies(Scope.RUN, recurse=False)) if build_deps[0] in runtime_deps: detail = "Full list of runtime depends:\n" deps_list = " \n".join([x.name for x in runtime_deps]) detail += deps_list raise ElementError("{}: {} element's build dependency must not also be a runtime dependency" .format(self, type(self).__name__), detail=detail, reason="filter-bdepend-also-rdepend") def get_unique_key(self): key = { 'include': sorted(self.include), 'exclude': sorted(self.exclude), 'orphans': self.include_orphans, } return key def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): with self.timed_activity("Staging artifact", silent_nested=True): for dep in self.dependencies(Scope.BUILD, recurse=False): dep.stage_artifact(sandbox, include=self.include, exclude=self.exclude, orphans=self.include_orphans) return "" def _get_source_element(self): # Filter elements act as proxies for their sole build-dependency build_deps = list(self.dependencies(Scope.BUILD, recurse=False)) assert len(build_deps) == 1 output_elm = build_deps[0]._get_source_element() return output_elm def setup(): return FilterElement buildstream-1.6.9/buildstream/plugins/elements/filter.yaml000066400000000000000000000015241437515270000240310ustar00rootroot00000000000000 # Filter element configuration config: # A list of domains to include from each artifact, as # they were defined in the element's 'split-rules'. # # Since domains can be added, it is not an error to # specify domains which may not exist for all of the # elements in this composition. # # The default empty list indicates that all domains # from each dependency should be included. # include: [] # A list of domains to exclude from each artifact, as # they were defined in the element's 'split-rules'. # # In the case that a file is spoken for by a domain # in the 'include' list and another in the 'exclude' # list, then the file will be excluded. exclude: [] # Whether to include orphan files which are not # included by any of the 'split-rules' present on # a given element. # include-orphans: False buildstream-1.6.9/buildstream/plugins/elements/import.py000066400000000000000000000103021437515270000235360ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ import - Import sources directly ================================ Import elements produce artifacts directly from its sources without any kind of processing. These are typically used to import an SDK to build on top of or to overlay your build with some configuration data. The empty configuration is as such: .. literalinclude:: ../../../buildstream/plugins/elements/import.yaml :language: yaml """ import os import shutil from buildstream import Element, BuildElement, ElementError # Element implementation for the 'import' kind. class ImportElement(BuildElement): # pylint: disable=attribute-defined-outside-init def configure(self, node): self.source = self.node_subst_member(node, 'source') self.target = self.node_subst_member(node, 'target') def preflight(self): # Assert that we have at least one source to fetch. sources = list(self.sources()) if not sources: raise ElementError("{}: An import element must have at least one source.".format(self)) def get_unique_key(self): return { 'source': self.source, 'target': self.target } def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): # Stage sources into the input directory # Do not mount workspaces as the files are copied from outside the sandbox self._stage_sources_in_sandbox(sandbox, 'input', mount_workspaces=False) rootdir = sandbox.get_directory() inputdir = os.path.join(rootdir, 'input') outputdir = os.path.join(rootdir, 'output') # The directory to grab inputdir = os.path.join(inputdir, self.source.lstrip(os.sep)) inputdir = inputdir.rstrip(os.sep) # The output target directory outputdir = os.path.join(outputdir, self.target.lstrip(os.sep)) outputdir = outputdir.rstrip(os.sep) # Ensure target directory parent os.makedirs(os.path.dirname(outputdir), exist_ok=True) if not os.path.exists(inputdir): raise ElementError("{}: No files were found inside directory '{}'" .format(self, self.source)) # Move it over shutil.move(inputdir, outputdir) # And we're done return '/output' def prepare(self, sandbox): # We inherit a non-default prepare from BuildElement. Element.prepare(self, sandbox) def generate_script(self): build_root = self.get_variable('build-root') install_root = self.get_variable('install-root') commands = [] # The directory to grab inputdir = os.path.join(build_root, self.normal_name, self.source.lstrip(os.sep)) inputdir = inputdir.rstrip(os.sep) # The output target directory outputdir = os.path.join(install_root, self.target.lstrip(os.sep)) outputdir = outputdir.rstrip(os.sep) # Ensure target directory parent exists but target directory doesn't commands.append("mkdir -p {}".format(os.path.dirname(outputdir))) commands.append("[ ! -e {} ] || rmdir {}".format(outputdir, outputdir)) # Move it over commands.append("mv {} {}".format(inputdir, outputdir)) script = "" for cmd in commands: script += "(set -ex; {}\n) || exit 1\n".format(cmd) return script # Plugin entry point def setup(): return ImportElement buildstream-1.6.9/buildstream/plugins/elements/import.yaml000066400000000000000000000006711437515270000240600ustar00rootroot00000000000000# The import element simply stages the given sources # directly to the root of the sandbox and then collects # the output to create an output artifact. # config: # By default we collect everything staged, specify a # directory here to output only a subset of the staged # input sources. source: / # Prefix the output with an optional directory, by default # the input is found at the root of the produced artifact. target: / buildstream-1.6.9/buildstream/plugins/elements/junction.py000066400000000000000000000125661437515270000240730ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jürg Billeter """ junction - Integrate subprojects ================================ This element is a link to another BuildStream project. It allows integration of multiple projects into a single pipeline. Overview -------- .. code:: yaml kind: junction # Specify the BuildStream project source sources: - kind: git url: upstream:projectname.git track: master ref: d0b38561afb8122a3fc6bafc5a733ec502fcaed6 # Specify the junction configuration config: # Override project options options: machine_arch: "%{machine_arch}" debug: True # Optionally look in a subpath of the source repository for the project path: projects/hello .. note:: Junction elements may not specify any dependencies as they are simply links to other projects and are not in the dependency graph on their own. With a junction element in place, local elements can depend on elements in the other BuildStream project using the additional ``junction`` attribute in the dependency dictionary: .. code:: yaml depends: - junction: toolchain.bst filename: gcc.bst type: build While junctions are elements, only a limited set of element operations is supported. They can be tracked and fetched like other elements. However, junction elements do not produce any artifacts, which means that they cannot be built or staged. It also means that another element cannot depend on a junction element itself. .. note:: BuildStream does not implicitly track junction elements. This means that if we were to invoke: `bst build --track-all ELEMENT` on an element which uses a junction element, the ref of the junction element will not automatically be updated if a more recent version exists. Therefore, if you require the most up-to-date version of a subproject, you must explicitly track the junction element by invoking: `bst track JUNCTION_ELEMENT`. Furthermore, elements within the subproject are also not tracked by default. For this, we must specify the `--track-cross-junctions` option. This option must be preceeded by `--track ELEMENT` or `--track-all`. Sources ------- ``bst show`` does not implicitly fetch junction sources if they haven't been cached yet. However, they can be fetched explicitly: .. code:: bst fetch junction.bst Other commands such as ``bst build`` implicitly fetch junction sources. Options ------- .. code:: yaml options: machine_arch: "%{machine_arch}" debug: True Junctions can configure options of the linked project. Options are never implicitly inherited across junctions, however, variables can be used to explicitly assign the same value to a subproject option. .. _core_junction_nested: Nested Junctions ---------------- Junctions can be nested. That is, subprojects are allowed to have junctions on their own. Nested junctions in different subprojects may point to the same project, however, in most use cases the same project should be loaded only once. BuildStream uses the junction element name as key to determine which junctions to merge. It is recommended that the name of a junction is set to the same as the name of the linked project. As the junctions may differ in source version and options, BuildStream cannot simply use one junction and ignore the others. Due to this, BuildStream requires the user to resolve possibly conflicting nested junctions by creating a junction with the same name in the top-level project, which then takes precedence. """ from collections.abc import Mapping from buildstream import Element from buildstream._pipeline import PipelineError # Element implementation for the 'junction' kind. class JunctionElement(Element): # pylint: disable=attribute-defined-outside-init # Junctions are not allowed any dependencies BST_FORBID_BDEPENDS = True BST_FORBID_RDEPENDS = True def configure(self, node): self.path = self.node_get_member(node, str, 'path', default='') self.options = self.node_get_member(node, Mapping, 'options', default={}) def preflight(self): pass def get_unique_key(self): # Junctions do not produce artifacts. get_unique_key() implementation # is still required for `bst fetch`. return 1 def configure_sandbox(self, sandbox): raise PipelineError("Cannot build junction elements") def stage(self, sandbox): raise PipelineError("Cannot stage junction elements") def generate_script(self): raise PipelineError("Cannot build junction elements") def assemble(self, sandbox): raise PipelineError("Cannot build junction elements") # Plugin entry point def setup(): return JunctionElement buildstream-1.6.9/buildstream/plugins/elements/make.py000066400000000000000000000025431437515270000231510ustar00rootroot00000000000000# # Copyright Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Ed Baunton """ make - Make build element ========================= This is a :mod:`BuildElement ` implementation for using GNU make based build. .. note:: The ``make`` element is available since :ref:`format version 9 ` Here is the default configuration for the ``make`` element in full: .. literalinclude:: ../../../buildstream/plugins/elements/make.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'make' kind. class MakeElement(BuildElement): pass # Plugin entry point def setup(): return MakeElement buildstream-1.6.9/buildstream/plugins/elements/make.yaml000066400000000000000000000015641437515270000234650ustar00rootroot00000000000000# make default configurations variables: make-args: >- PREFIX="%{prefix}" make-install-args: >- %{make-args} DESTDIR="%{install-root}" install make: make %{make-args} make-install: make -j1 %{make-install-args} # Set this if the sources cannot handle parallelization. # # notparallel: True config: # Commands for building the software # build-commands: - | %{make} # Commands for installing the software into a # destination folder # install-commands: - | %{make-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} # Use max-jobs CPUs for building and enable verbosity environment: MAKEFLAGS: -j%{max-jobs} V: 1 # And dont consider MAKEFLAGS or V as something which may # effect build output. environment-nocache: - MAKEFLAGS - V buildstream-1.6.9/buildstream/plugins/elements/makemaker.py000066400000000000000000000024601437515270000241670ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ makemaker - Perl MakeMaker build element ======================================== A :mod:`BuildElement ` implementation for using the Perl ExtUtil::MakeMaker build system The MakeMaker default configuration: .. literalinclude:: ../../../buildstream/plugins/elements/makemaker.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'makemaker' kind. class MakeMakerElement(BuildElement): pass # Plugin entry point def setup(): return MakeMakerElement buildstream-1.6.9/buildstream/plugins/elements/makemaker.yaml000066400000000000000000000020511437515270000244750ustar00rootroot00000000000000# Default configuration for the Perl ExtUtil::MakeMaker # build system variables: # To install perl distributions into the correct location # in our chroot we need to set PREFIX to / # in the configure-commands. # # The mapping between PREFIX and the final installation # directories is complex and depends upon the configuration # of perl see, # https://metacpan.org/pod/distribution/perl/INSTALL#Installation-Directories # and ExtUtil::MakeMaker's documentation for more details. configure: | perl Makefile.PL PREFIX=%{install-root}%{prefix} make: make make-install: make install config: # Commands for configuring the software # configure-commands: - | %{configure} # Commands for building the software # build-commands: - | %{make} # Commands for installing the software into a # destination folder # install-commands: - | %{make-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} buildstream-1.6.9/buildstream/plugins/elements/manual.py000066400000000000000000000025001437515270000235020ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ manual - Manual build element ============================= The most basic build element does nothing but allows users to add custom build commands to the array understood by the :mod:`BuildElement ` The empty configuration is as such: .. literalinclude:: ../../../buildstream/plugins/elements/manual.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'manual' kind. class ManualElement(BuildElement): pass # Plugin entry point def setup(): return ManualElement buildstream-1.6.9/buildstream/plugins/elements/manual.yaml000066400000000000000000000014541437515270000240230ustar00rootroot00000000000000# No variables added for the manual element by default, set # this if you plan to use make, and the sources cannot handle # parallelization. # # variables: # # notparallel: True # Manual build element does not provide any default # build commands config: # Commands for configuring the software # configure-commands: [] # Commands for building the software # build-commands: [] # Commands for installing the software into a # destination folder # install-commands: [] # Commands for stripping installed binaries # strip-commands: - | %{strip-binaries} # Use max-jobs CPUs for building and enable verbosity environment: MAKEFLAGS: -j%{max-jobs} V: 1 # And dont consider MAKEFLAGS or V as something which may # effect build output. environment-nocache: - MAKEFLAGS - V buildstream-1.6.9/buildstream/plugins/elements/meson.py000066400000000000000000000034301437515270000233510ustar00rootroot00000000000000# Copyright (C) 2017 Patrick Griffis # Copyright (C) 2018 Codethink Ltd. # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . """ meson - Meson build element =========================== This is a :mod:`BuildElement ` implementation for using `Meson `_ build scripts. You will often want to pass additional arguments to ``meson``. This should be done on a per-element basis by setting the ``meson-local`` variable. Here is an example: .. code:: yaml variables: meson-local: | -Dmonkeys=yes If you want to pass extra options to ``meson`` for every element in your project, set the ``meson-global`` variable in your project.conf file. Here is an example of that: .. code:: yaml elements: meson: variables: meson-global: | -Dmonkeys=always Here is the default configuration for the ``meson`` element in full: .. literalinclude:: ../../../buildstream/plugins/elements/meson.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'meson' kind. class MesonElement(BuildElement): pass # Plugin entry point def setup(): return MesonElement buildstream-1.6.9/buildstream/plugins/elements/meson.yaml000066400000000000000000000031011437515270000236560ustar00rootroot00000000000000# Meson default configuration variables: build-dir: _builddir # Project-wide extra arguments to be passed to `meson` meson-global: '' # Element-specific extra arguments to be passed to `meson`. meson-local: '' # For backwards compatibility only, do not use. meson-extra: '' meson-args: | --prefix=%{prefix} \ --bindir=%{bindir} \ --sbindir=%{sbindir} \ --sysconfdir=%{sysconfdir} \ --datadir=%{datadir} \ --includedir=%{includedir} \ --libdir=%{libdir} \ --libexecdir=%{libexecdir} \ --localstatedir=%{localstatedir} \ --sharedstatedir=%{sharedstatedir} \ --mandir=%{mandir} \ --infodir=%{infodir} %{meson-extra} %{meson-global} %{meson-local} meson: meson %{build-dir} %{meson-args} ninja: | ninja -j ${NINJAJOBS} -C %{build-dir} ninja-install: | env DESTDIR="%{install-root}" ninja -C %{build-dir} install # Set this if the sources cannot handle parallelization. # # notparallel: True config: # Commands for configuring the software # configure-commands: - | %{meson} # Commands for building the software # build-commands: - | %{ninja} # Commands for installing the software into a # destination folder # install-commands: - | %{ninja-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} # Use max-jobs CPUs for building environment: NINJAJOBS: | %{max-jobs} # And dont consider NINJAJOBS as something which may # effect build output. environment-nocache: - NINJAJOBS buildstream-1.6.9/buildstream/plugins/elements/modulebuild.py000066400000000000000000000025011437515270000245330ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ modulebuild - Perl Module::Build build element ============================================== A :mod:`BuildElement ` implementation for using the Perl Module::Build build system The modulebuild default configuration: .. literalinclude:: ../../../buildstream/plugins/elements/modulebuild.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'modulebuild' kind. class ModuleBuildElement(BuildElement): pass # Plugin entry point def setup(): return ModuleBuildElement buildstream-1.6.9/buildstream/plugins/elements/modulebuild.yaml000066400000000000000000000020671437515270000250540ustar00rootroot00000000000000# Default configuration for the Perl Module::Build # build system. variables: # To install perl distributions into the correct location # in our chroot we need to set PREFIX to / # in the configure-commands. # # The mapping between PREFIX and the final installation # directories is complex and depends upon the configuration # of perl see, # https://metacpan.org/pod/distribution/perl/INSTALL#Installation-Directories # and ExtUtil::MakeMaker's documentation for more details. configure: | perl Build.PL --prefix "%{install-root}%{prefix}" perl-build: ./Build perl-install: ./Build install config: # Commands for configuring the software # configure-commands: - | %{configure} # Commands for building the software # build-commands: - | %{perl-build} # Commands for installing the software into a # destination folder # install-commands: - | %{perl-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} buildstream-1.6.9/buildstream/plugins/elements/pip.py000066400000000000000000000023201437515270000230150ustar00rootroot00000000000000# # Copyright (C) 2017 Mathieu Bridon # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Mathieu Bridon """ pip - Pip build element ======================= A :mod:`BuildElement ` implementation for installing Python modules with pip The pip default configuration: .. literalinclude:: ../../../buildstream/plugins/elements/pip.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'pip' kind. class PipElement(BuildElement): pass # Plugin entry point def setup(): return PipElement buildstream-1.6.9/buildstream/plugins/elements/pip.yaml000066400000000000000000000006761437515270000233430ustar00rootroot00000000000000# Pip default configurations variables: pip: pip config: configure-commands: [] build-commands: [] # Commands for installing the software into a # destination folder # install-commands: - | %{pip} install --no-deps --root=%{install-root} --prefix=%{prefix} . # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} - | %{fix-pyc-timestamps} buildstream-1.6.9/buildstream/plugins/elements/qmake.py000066400000000000000000000023601437515270000233270ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ qmake - QMake build element =========================== A :mod:`BuildElement ` implementation for using the qmake build system The qmake default configuration: .. literalinclude:: ../../../buildstream/plugins/elements/qmake.yaml :language: yaml """ from buildstream import BuildElement # Element implementation for the 'qmake' kind. class QMakeElement(BuildElement): pass # Plugin entry point def setup(): return QMakeElement buildstream-1.6.9/buildstream/plugins/elements/qmake.yaml000066400000000000000000000015611437515270000236430ustar00rootroot00000000000000# QMake default configuration variables: qmake: qmake -makefile make: make make-install: make -j1 INSTALL_ROOT="%{install-root}" install # Set this if the sources cannot handle parallelization. # # notparallel: True config: # Commands for configuring the software # configure-commands: - | %{qmake} # Commands for building the software # build-commands: - | %{make} # Commands for installing the software into a # destination folder # install-commands: - | %{make-install} # Commands for stripping debugging information out of # installed binaries # strip-commands: - | %{strip-binaries} # Use max-jobs CPUs for building and enable verbosity environment: MAKEFLAGS: -j%{max-jobs} V: 1 # And dont consider MAKEFLAGS or V as something which may # effect build output. environment-nocache: - MAKEFLAGS - V buildstream-1.6.9/buildstream/plugins/elements/script.py000066400000000000000000000045321437515270000235400ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jonathan Maw """ script - Run scripts to create output ===================================== This element allows one to run some commands to mutate the input and create some output. .. note:: Script elements may only specify build dependencies. See :ref:`the format documentation ` for more detail on specifying dependencies. The default configuration and possible options are as such: .. literalinclude:: ../../../buildstream/plugins/elements/script.yaml :language: yaml """ import buildstream # Element implementation for the 'script' kind. class ScriptElement(buildstream.ScriptElement): # pylint: disable=attribute-defined-outside-init def configure(self, node): for n in self.node_get_member(node, list, 'layout', []): dst = self.node_subst_member(n, 'destination') elm = self.node_subst_member(n, 'element', None) self.layout_add(elm, dst) self.node_validate(node, [ 'commands', 'root-read-only', 'layout', 'create-dev-shm' ]) cmds = self.node_subst_list(node, "commands") self.add_commands("commands", cmds) self.set_work_dir() self.set_install_root() self.set_root_read_only(self.node_get_member(node, bool, 'root-read-only', False)) self.set_create_dev_shm(self.node_get_member(node, bool, 'create-dev-shm', False)) # Plugin entry point def setup(): return ScriptElement buildstream-1.6.9/buildstream/plugins/elements/script.yaml000066400000000000000000000015221437515270000240460ustar00rootroot00000000000000# Common script element variables variables: # Defines the directory commands will be run from. cwd: / # Script element configuration config: # Defines whether to run the sandbox with '/' read-only. # It is recommended to set root as read-only wherever possible. root-read-only: False # Defines whether we should mount a tmpfs filesystem at /dev/shm # create-dev-shm: False # Defines where to stage elements which are direct or indirect dependencies. # By default, all direct dependencies are staged to '/'. # This is also commonly used to take one element as an environment # containing the tools used to operate on the other element. # layout: # - element: foo-tools.bst # destination: / # - element: foo-system.bst # destination: %{build-root} # List of commands to run in the sandbox. commands: [] buildstream-1.6.9/buildstream/plugins/elements/stack.py000066400000000000000000000044361437515270000233440ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ stack - Symbolic Element for dependency grouping ================================================ Stack elements are simply a symbolic element used for representing a logical group of elements. """ import os from buildstream import Element # Element implementation for the 'stack' kind. class StackElement(Element): def configure(self, node): pass def preflight(self): pass def get_unique_key(self): # We do not add anything to the build, only our dependencies # do, so our unique key is just a constant. return 1 def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): # Just create a dummy empty artifact, its existence is a statement # that all this stack's dependencies are built. rootdir = sandbox.get_directory() # XXX FIXME: This is currently needed because the artifact # cache wont let us commit an empty artifact. # # We need to fix the artifact cache so that it stores # the actual artifact data in a subdirectory, then we # will be able to store some additional state in the # artifact cache, and we can also remove this hack. outputdir = os.path.join(rootdir, 'output', 'bst') # Ensure target directory parent os.makedirs(os.path.dirname(outputdir), exist_ok=True) # And we're done return '/output' # Plugin entry point def setup(): return StackElement buildstream-1.6.9/buildstream/plugins/sources/000077500000000000000000000000001437515270000215255ustar00rootroot00000000000000buildstream-1.6.9/buildstream/plugins/sources/__init__.py000066400000000000000000000000001437515270000236240ustar00rootroot00000000000000buildstream-1.6.9/buildstream/plugins/sources/_downloadablefilesource.py000066400000000000000000000223041437515270000267530ustar00rootroot00000000000000"""A base abstract class for source implementations which download a file""" import os import urllib.request import urllib.error import contextlib import shutil import netrc from buildstream import Source, SourceError, Consistency from buildstream import utils class _NetrcFTPOpener(urllib.request.FTPHandler): def __init__(self, netrc_config): self.netrc = netrc_config def _split(self, netloc): userpass, hostport = urllib.parse.splituser(netloc) host, port = urllib.parse.splitport(hostport) if userpass: user, passwd = urllib.parse.splitpasswd(userpass) else: user = None passwd = None return host, port, user, passwd def _unsplit(self, host, port, user, passwd): if port: host = '{}:{}'.format(host, port) if user: if passwd: user = '{}:{}'.format(user, passwd) host = '{}@{}'.format(user, host) return host def ftp_open(self, req): host, port, user, passwd = self._split(req.host) if user is None and self.netrc: entry = self.netrc.authenticators(host) if entry: user, _, passwd = entry req.host = self._unsplit(host, port, user, passwd) return super().ftp_open(req) class _NetrcPasswordManager: def __init__(self, netrc_config): self.netrc = netrc_config def add_password(self, realm, uri, user, passwd): pass def find_user_password(self, realm, authuri): if not self.netrc: return None, None parts = urllib.parse.urlsplit(authuri) entry = self.netrc.authenticators(parts.hostname) if not entry: return None, None else: login, _, password = entry return login, password class DownloadableFileSource(Source): # pylint: disable=attribute-defined-outside-init COMMON_CONFIG_KEYS = Source.COMMON_CONFIG_KEYS + ['url', 'ref', 'etag'] __urlopener = None def configure(self, node): self.original_url = self.node_get_member(node, str, 'url') self.ref = self.node_get_member(node, str, 'ref', None) self.url = self.translate_url(self.original_url) self._warn_deprecated_etag(node) def preflight(self): return def get_unique_key(self): return [self.original_url, self.ref] def get_consistency(self): if self.ref is None: return Consistency.INCONSISTENT if os.path.isfile(self._get_mirror_file()): return Consistency.CACHED else: return Consistency.RESOLVED def load_ref(self, node): self.ref = self.node_get_member(node, str, 'ref', None) self._warn_deprecated_etag(node) def get_ref(self): return self.ref def set_ref(self, ref, node): node['ref'] = self.ref = ref def track(self): # there is no 'track' field in the source to determine what/whether # or not to update refs, because tracking a ref is always a conscious # decision by the user. with self.timed_activity("Tracking {}".format(self.url), silent_nested=True): new_ref = self._ensure_mirror() if self.ref and self.ref != new_ref: detail = "When tracking, new ref differs from current ref:\n" \ + " Tracked URL: {}\n".format(self.url) \ + " Current ref: {}\n".format(self.ref) \ + " New ref: {}\n".format(new_ref) self.warn("Potential man-in-the-middle attack!", detail=detail) return new_ref def fetch(self): # Just a defensive check, it is impossible for the # file to be already cached because Source.fetch() will # not be called if the source is already Consistency.CACHED. # if os.path.isfile(self._get_mirror_file()): return # pragma: nocover # Download the file, raise hell if the sha256sums don't match, # and mirror the file otherwise. with self.timed_activity("Fetching {}".format(self.url), silent_nested=True): sha256 = self._ensure_mirror() if sha256 != self.ref: raise SourceError("File downloaded from {} has sha256sum '{}', not '{}'!" .format(self.url, sha256, self.ref)) def _warn_deprecated_etag(self, node): etag = self.node_get_member(node, str, 'etag', None) if etag: provenance = self.node_provenance(node, member_name='etag') self.warn('{} "etag" is deprecated and ignored.'.format(provenance)) def _get_etag(self, ref): etagfilename = os.path.join(self._get_mirror_dir(), '{}.etag'.format(ref)) if os.path.exists(etagfilename): with open(etagfilename, 'r') as etagfile: return etagfile.read() return None def _store_etag(self, ref, etag): etagfilename = os.path.join(self._get_mirror_dir(), '{}.etag'.format(ref)) with utils.save_file_atomic(etagfilename) as etagfile: etagfile.write(etag) def _ensure_mirror(self): # Downloads from the url and caches it according to its sha256sum. try: with self.tempdir() as td: default_name = os.path.basename(self.url) request = urllib.request.Request(self.url) request.add_header('Accept', '*/*') request.add_header('User-Agent', 'BuildStream/1') # We do not use etag in case what we have in cache is # not matching ref in order to be able to recover from # corrupted download. if self.ref: etag = self._get_etag(self.ref) # Do not re-download the file if the ETag matches. if etag and self.get_consistency() == Consistency.CACHED: request.add_header('If-None-Match', etag) opener = self.__get_urlopener() with contextlib.closing(opener.open(request)) as response: info = response.info() # some servers don't honor the 'If-None-Match' header if self.ref and etag and info["ETag"] == etag: return self.ref etag = info["ETag"] filename = info.get_filename(default_name) filename = os.path.basename(filename) local_file = os.path.join(td, filename) with open(local_file, 'wb') as dest: shutil.copyfileobj(response, dest) # Make sure url-specific mirror dir exists. if not os.path.isdir(self._get_mirror_dir()): os.makedirs(self._get_mirror_dir()) # Store by sha256sum sha256 = utils.sha256sum(local_file) # Even if the file already exists, move the new file over. # In case the old file was corrupted somehow. os.rename(local_file, self._get_mirror_file(sha256)) if etag: self._store_etag(sha256, etag) return sha256 except urllib.error.HTTPError as e: if e.code == 304: # 304 Not Modified. # Because we use etag only for matching ref, currently specified ref is what # we would have downloaded. return self.ref raise SourceError("{}: Error mirroring {}: {}" .format(self, self.url, e), temporary=True) from e except (urllib.error.URLError, urllib.error.ContentTooShortError, OSError) as e: raise SourceError("{}: Error mirroring {}: {}" .format(self, self.url, e), temporary=True) from e def _get_mirror_dir(self): return os.path.join(self.get_mirror_directory(), utils.url_directory_name(self.original_url)) def _get_mirror_file(self, sha=None): return os.path.join(self._get_mirror_dir(), sha or self.ref) def __get_urlopener(self): if not DownloadableFileSource.__urlopener: try: netrc_config = netrc.netrc() except OSError: # If the .netrc file was not found, FileNotFoundError will be # raised, but OSError will be raised directly by the netrc package # in the case that $HOME is not set. # # This will catch both cases. # DownloadableFileSource.__urlopener = urllib.request.build_opener() except netrc.NetrcParseError as e: self.warn('{}: While reading .netrc: {}'.format(self, e)) return urllib.request.build_opener() else: netrc_pw_mgr = _NetrcPasswordManager(netrc_config) http_auth = urllib.request.HTTPBasicAuthHandler(netrc_pw_mgr) ftp_handler = _NetrcFTPOpener(netrc_config) DownloadableFileSource.__urlopener = urllib.request.build_opener(http_auth, ftp_handler) return DownloadableFileSource.__urlopener buildstream-1.6.9/buildstream/plugins/sources/bzr.py000066400000000000000000000174741437515270000227110ustar00rootroot00000000000000# Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jonathan Maw """ bzr - stage files from a bazaar repository ========================================== **Host dependencies:** * bzr **Usage:** .. code:: yaml # Specify the bzr source kind kind: bzr # Optionally specify a relative staging directory # directory: path/to/stage # Specify the bzr url. Bazaar URLs come in many forms, see # `bzr help urlspec` for more information. Using an alias defined # in your project configuration is encouraged. url: https://launchpad.net/bzr # Specify the tracking branch. This is mandatory, as bzr cannot identify # an individual revision outside its branch. bzr URLs that omit the branch # name implicitly specify the trunk branch, but bst requires this to be # explicit. track: trunk # Specify the ref. This is a revision number. This is usually a decimal, # but revisions on a branch are of the form # .. # e.g. 6622.1.6. # The ref must be specified to build, and 'bst track' will update the # revision number to the one on the tip of the branch specified in 'track'. ref: 6622 """ import os import shutil import fcntl from contextlib import contextmanager from buildstream import Source, SourceError, Consistency from buildstream import utils class BzrSource(Source): # pylint: disable=attribute-defined-outside-init def configure(self, node): self.node_validate(node, ['url', 'track', 'ref'] + Source.COMMON_CONFIG_KEYS) self.original_url = self.node_get_member(node, str, 'url') self.tracking = self.node_get_member(node, str, 'track') self.ref = self.node_get_member(node, str, 'ref', None) self.url = self.translate_url(self.original_url) def preflight(self): # Check if bzr is installed, get the binary at the same time. self.host_bzr = utils.get_host_tool('bzr') def get_unique_key(self): return [self.original_url, self.tracking, self.ref] def get_consistency(self): if self.ref is None or self.tracking is None: return Consistency.INCONSISTENT # Lock for the _check_ref() with self._locked(): if self._check_ref(): return Consistency.CACHED else: return Consistency.RESOLVED def load_ref(self, node): self.ref = self.node_get_member(node, str, 'ref', None) def get_ref(self): return self.ref def set_ref(self, ref, node): node['ref'] = self.ref = ref def track(self): with self.timed_activity("Tracking {}".format(self.url), silent_nested=True), self._locked(): self._ensure_mirror(skip_ref_check=True) ret, out = self.check_output([self.host_bzr, "version-info", "--custom", "--template={revno}", self._get_branch_dir()], fail="Failed to read the revision number at '{}'" .format(self._get_branch_dir())) if ret != 0: raise SourceError("{}: Failed to get ref for tracking {}".format(self, self.tracking)) return out def fetch(self): with self.timed_activity("Fetching {}".format(self.url), silent_nested=True), self._locked(): self._ensure_mirror() def stage(self, directory): self.call([self.host_bzr, "checkout", "--lightweight", "--revision=revno:{}".format(self.ref), self._get_branch_dir(), directory], fail="Failed to checkout revision {} from branch {} to {}" .format(self.ref, self._get_branch_dir(), directory)) def init_workspace(self, directory): url = os.path.join(self.url, self.tracking) with self.timed_activity('Setting up workspace "{}"'.format(directory), silent_nested=True): # Checkout from the cache self.call([self.host_bzr, "branch", "--use-existing-dir", "--revision=revno:{}".format(self.ref), self._get_branch_dir(), directory], fail="Failed to branch revision {} from branch {} to {}" .format(self.ref, self._get_branch_dir(), directory)) # Switch the parent branch to the source's origin self.call([self.host_bzr, "switch", "--directory={}".format(directory), url], fail="Failed to switch workspace's parent branch to {}".format(url)) # _locked() # # This context manager ensures exclusive access to the # bzr repository. # @contextmanager def _locked(self): lockdir = os.path.join(self.get_mirror_directory(), 'locks') lockfile = os.path.join( lockdir, utils.url_directory_name(self.original_url) + '.lock' ) os.makedirs(lockdir, exist_ok=True) with open(lockfile, 'w') as lock: fcntl.flock(lock, fcntl.LOCK_EX) try: yield finally: fcntl.flock(lock, fcntl.LOCK_UN) def _check_ref(self): # If the mirror doesnt exist yet, then we dont have the ref if not os.path.exists(self._get_branch_dir()): return False return self.call([self.host_bzr, "revno", "--revision=revno:{}".format(self.ref), self._get_branch_dir()]) == 0 def _get_branch_dir(self): return os.path.join(self._get_mirror_dir(), self.tracking) def _get_mirror_dir(self): return os.path.join(self.get_mirror_directory(), utils.url_directory_name(self.original_url)) def _ensure_mirror(self, skip_ref_check=False): mirror_dir = self._get_mirror_dir() bzr_metadata_dir = os.path.join(mirror_dir, ".bzr") if not os.path.exists(bzr_metadata_dir): self.call([self.host_bzr, "init-repo", "--no-trees", mirror_dir], fail="Failed to initialize bzr repository") branch_dir = os.path.join(mirror_dir, self.tracking) branch_url = self.url + "/" + self.tracking if not os.path.exists(branch_dir): # `bzr branch` the branch if it doesn't exist # to get the upstream code self.call([self.host_bzr, "branch", branch_url, branch_dir], fail="Failed to branch from {} to {}".format(branch_url, branch_dir)) else: # `bzr pull` the branch if it does exist # to get any changes to the upstream code self.call([self.host_bzr, "pull", "--directory={}".format(branch_dir), branch_url], fail="Failed to pull new changes for {}".format(branch_dir)) if not skip_ref_check and not self._check_ref(): raise SourceError("Failed to ensure ref '{}' was mirrored".format(self.ref), reason="ref-not-mirrored") def setup(): return BzrSource buildstream-1.6.9/buildstream/plugins/sources/deb.py000066400000000000000000000053321437515270000226340ustar00rootroot00000000000000# Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Phillip Smyth # Jonathan Maw # Richard Maw """ deb - stage files from .deb packages ==================================== **Host dependencies:** * arpy (python package) **Usage:** .. code:: yaml # Specify the deb source kind kind: deb # Optionally specify a relative staging directory # directory: path/to/stage # Specify the deb url. Using an alias defined in your project # configuration is encouraged. 'bst track' will update the # sha256sum in 'ref' to the downloaded file's sha256sum. url: upstream:foo.deb # Specify the ref. It's a sha256sum of the file you download. ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b # Specify the basedir to return only the specified dir and it's children base-dir: '' """ import tarfile from contextlib import contextmanager, ExitStack import arpy # pylint: disable=import-error from .tar import TarSource class DebSource(TarSource): # pylint: disable=attribute-defined-outside-init def configure(self, node): super().configure(node) self.base_dir = self.node_get_member(node, str, 'base-dir', None) def preflight(self): return @contextmanager def _get_tar(self): with ExitStack() as context: deb_file = context.enter_context(open(self._get_mirror_file(), 'rb')) arpy_archive = arpy.Archive(fileobj=deb_file) arpy_archive.read_all_headers() data_tar_arpy = [v for k, v in arpy_archive.archived_files.items() if b"data.tar" in k][0] # ArchiveFileData is not enough like a file object for tarfile to use. # Monkey-patching a seekable method makes it close enough for TarFile to open. data_tar_arpy.seekable = lambda *args: True tar = tarfile.open(fileobj=data_tar_arpy, mode="r:*") yield tar def setup(): return DebSource buildstream-1.6.9/buildstream/plugins/sources/git.py000066400000000000000000000650321437515270000226700ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ git - stage files from a git repository ======================================= **Host dependencies:** * git **Usage:** .. code:: yaml # Specify the git source kind kind: git # Optionally specify a relative staging directory # directory: path/to/stage # Specify the repository url, using an alias defined # in your project configuration is recommended. url: upstream:foo.git # Optionally specify a symbolic tracking branch or tag, this # will be used to update the 'ref' when refreshing the pipeline. track: master # Optionally specify the ref format used for tracking. # The default is 'sha1' for the raw commit hash. # If you specify 'git-describe', the commit hash will be prefixed # with the closest tag. ref-format: sha1 # Specify the commit ref, this must be specified in order to # checkout sources and build, but can be automatically updated # if the 'track' attribute was specified. ref: d63cbb6fdc0bbdadc4a1b92284826a6d63a7ebcd # Optionally specify whether submodules should be checked-out. # If not set, this will default to 'True' checkout-submodules: True # If your repository has submodules, explicitly specifying the # url from which they are to be fetched allows you to easily # rebuild the same sources from a different location. This is # especially handy when used with project defined aliases which # can be redefined at a later time. # You may also explicitly specify whether to check out this # submodule. If 'checkout' is set, it will override # 'checkout-submodules' with the value set below. submodules: plugins/bar: url: upstream:bar.git checkout: True plugins/baz: url: upstream:baz.git checkout: False **Configurable Warnings:** This plugin provides the following :ref:`configurable warnings `: - ``git:inconsistent-submodule`` - A submodule present in the git repository's .gitmodules was never added with `git submodule add`. - ``git:unlisted-submodule`` - A submodule is present in the git repository but was not specified in the source configuration and was not disabled for checkout. .. note:: The ``git:unlisted-submodule`` warning is available since :ref:`format version 20 ` - ``git:invalid-submodule`` - A submodule is specified in the source configuration but does not exist in the repository. .. note:: The ``git:invalid-submodule`` warning is available since :ref:`format version 20 ` This plugin also utilises the following configurable :class:`core warnings `: - :attr:`ref-not-in-track ` - The provided ref was not found in the provided track in the element's git repository. """ import os import errno import re from collections.abc import Mapping from io import StringIO from configparser import RawConfigParser from buildstream import Source, SourceError, Consistency, SourceFetcher, CoreWarnings from buildstream import utils GIT_MODULES = '.gitmodules' # Warnings WARN_INCONSISTENT_SUBMODULE = "inconsistent-submodule" WARN_UNLISTED_SUBMODULE = "unlisted-submodule" WARN_INVALID_SUBMODULE = "invalid-submodule" # Because of handling of submodules, we maintain a GitMirror # for the primary git source and also for each submodule it # might have at a given time # class GitMirror(SourceFetcher): def __init__(self, source, path, url, ref, *, primary=False): super().__init__() self.source = source self.path = path self.url = url self.ref = ref self.primary = primary self.mirror = os.path.join(source.get_mirror_directory(), utils.url_directory_name(url)) self.mark_download_url(url) # Ensures that the mirror exists def ensure(self, alias_override=None): # Unfortunately, git does not know how to only clone just a specific ref, # so we have to download all of those gigs even if we only need a couple # of bytes. if not os.path.exists(self.mirror): # Do the initial clone in a tmpdir just because we want an atomic move # after a long standing clone which could fail overtime, for now do # this directly in our git directory, eliminating the chances that the # system configured tmpdir is not on the same partition. # with self.source.tempdir() as tmpdir: url = self.source.translate_url(self.url, alias_override=alias_override, primary=self.primary) self.source.call([self.source.host_git, 'clone', '--mirror', '-n', url, tmpdir], fail="Failed to clone git repository {}".format(url), fail_temporarily=True) # Attempt atomic rename into destination, this will fail if # another process beat us to the punch try: os.rename(tmpdir, self.mirror) except OSError as e: # When renaming and the destination repo already exists, os.rename() # will fail with ENOTEMPTY or EEXIST, since an empty directory will # be silently replaced if e.errno in (errno.ENOTEMPTY, errno.EEXIST): self.source.status("{}: Discarding duplicate clone of {}" .format(self.source, url)) else: raise SourceError("{}: Failed to move cloned git repository {} from '{}' to '{}': {}" .format(self.source, url, tmpdir, self.mirror, e)) from e def _fetch(self, alias_override=None): url = self.source.translate_url(self.url, alias_override=alias_override, primary=self.primary) if alias_override: remote_name = utils.url_directory_name(alias_override) _, remotes = self.source.check_output( [self.source.host_git, 'remote'], fail="Failed to retrieve list of remotes in {}".format(self.mirror), cwd=self.mirror ) if remote_name not in remotes.strip().split(): self.source.call( [self.source.host_git, 'remote', 'add', remote_name, url], fail="Failed to add remote {} with url {}".format(remote_name, url), cwd=self.mirror ) else: remote_name = "origin" # In git < 1.9.0, we have to call `git fetch` twice, once for the tags # if self.source.git_fetch_tags_exclusive: self.source.call([self.source.host_git, 'fetch', remote_name, '--prune', '--force'], fail="Failed to fetch from remote git repository: {}".format(url), fail_temporarily=True, cwd=self.mirror) self.source.call([self.source.host_git, 'fetch', remote_name, '--prune', '--force', '--tags'], fail="Failed to fetch from remote git repository: {}".format(url), fail_temporarily=True, cwd=self.mirror) def fetch(self, alias_override=None): # Resolve the URL for the message resolved_url = self.source.translate_url(self.url, alias_override=alias_override, primary=self.primary) with self.source.timed_activity("Fetching from {}" .format(resolved_url), silent_nested=True): self.ensure(alias_override) if not self.has_ref(): self._fetch(alias_override) self.assert_ref() def has_ref(self): if not self.ref: return False # If the mirror doesnt exist, we also dont have the ref if not os.path.exists(self.mirror): return False # Check if the ref is really there rc = self.source.call([self.source.host_git, 'cat-file', '-t', self.ref], cwd=self.mirror) return rc == 0 def assert_ref(self): if not self.has_ref(): raise SourceError("{}: expected ref '{}' was not found in git repository: '{}'" .format(self.source, self.ref, self.url)) def latest_commit(self, tracking): _, output = self.source.check_output( [self.source.host_git, 'rev-parse', tracking], fail="Unable to find commit for specified branch name '{}'".format(tracking), cwd=self.mirror) ref = output.rstrip('\n') if self.source.ref_format == 'git-describe': # Prefix the ref with the closest tag, if available, # to make the ref human readable exit_code, output = self.source.check_output( [self.source.host_git, 'describe', '--tags', '--abbrev=40', '--long', ref], cwd=self.mirror) if exit_code == 0: ref = output.rstrip('\n') return ref def stage(self, directory): fullpath = os.path.join(directory, self.path) # We need to pass '--no-hardlinks' because there's nothing to # stop the build from overwriting the files in the .git directory # inside the sandbox. self.source.call([self.source.host_git, 'clone', '--no-checkout', '--no-hardlinks', self.mirror, fullpath], fail="Failed to create git mirror {} in directory: {}".format(self.mirror, fullpath), fail_temporarily=True) self.source.call([self.source.host_git, 'checkout', '--force', self.ref], fail="Failed to checkout git ref {}".format(self.ref), cwd=fullpath) def init_workspace(self, directory): fullpath = os.path.join(directory, self.path) url = self.source.translate_url(self.url) self.source.call([self.source.host_git, 'clone', '--no-checkout', self.mirror, fullpath], fail="Failed to clone git mirror {} in directory: {}".format(self.mirror, fullpath), fail_temporarily=True) self.source.call([self.source.host_git, 'remote', 'set-url', 'origin', url], fail='Failed to add remote origin "{}"'.format(url), cwd=fullpath) self.source.call([self.source.host_git, 'checkout', '--force', self.ref], fail="Failed to checkout git ref {}".format(self.ref), cwd=fullpath) # List the submodules (path/url tuples) present at the given ref of this repo def submodule_list(self): modules = "{}:{}".format(self.ref, GIT_MODULES) exit_code, output = self.source.check_output( [self.source.host_git, 'show', modules], cwd=self.mirror) # If git show reports error code 128 here, we take it to mean there is # no .gitmodules file to display for the given revision. if exit_code == 128: return elif exit_code != 0: raise SourceError( "{plugin}: Failed to show gitmodules at ref {ref}".format( plugin=self, ref=self.ref)) content = '\n'.join([l.strip() for l in output.splitlines()]) io = StringIO(content) parser = RawConfigParser() parser.read_file(io) for section in parser.sections(): # validate section name against the 'submodule "foo"' pattern if re.match(r'submodule "(.*)"', section): path = parser.get(section, 'path') url = parser.get(section, 'url') yield (path, url) # Fetch the ref which this mirror requires its submodule to have, # at the given ref of this mirror. def submodule_ref(self, submodule, ref=None): if not ref: ref = self.ref # list objects in the parent repo tree to find the commit # object that corresponds to the submodule _, output = self.source.check_output([self.source.host_git, 'ls-tree', ref, submodule], fail="ls-tree failed for commit {} and submodule: {}".format( ref, submodule), cwd=self.mirror) # read the commit hash from the output fields = output.split() if len(fields) >= 2 and fields[1] == 'commit': submodule_commit = output.split()[2] # fail if the commit hash is invalid if len(submodule_commit) != 40: raise SourceError("{}: Error reading commit information for submodule '{}'" .format(self.source, submodule)) return submodule_commit else: detail = "The submodule '{}' is defined either in the BuildStream source\n".format(submodule) + \ "definition, or in a .gitmodules file. But the submodule was never added to the\n" + \ "underlying git repository with `git submodule add`." self.source.warn("{}: Ignoring inconsistent submodule '{}'" .format(self.source, submodule), detail=detail, warning_token=WARN_INCONSISTENT_SUBMODULE) return None class GitSource(Source): # pylint: disable=attribute-defined-outside-init # # The --tags option before git 1.9.0 used to mean to fetch tags exclusively, # since git 1.9.0 the --tags option means to additionally fetch tags. # # https://github.com/git/git/blob/master/Documentation/RelNotes/1.9.0.txt # git_fetch_tags_exclusive = None def configure(self, node): ref = self.node_get_member(node, str, 'ref', None) config_keys = ['url', 'track', 'ref', 'submodules', 'checkout-submodules', 'ref-format'] self.node_validate(node, config_keys + Source.COMMON_CONFIG_KEYS) self.original_url = self.node_get_member(node, str, 'url') self.mirror = GitMirror(self, '', self.original_url, ref, primary=True) self.tracking = self.node_get_member(node, str, 'track', None) self.ref_format = self.node_get_member(node, str, 'ref-format', 'sha1') if self.ref_format not in ['sha1', 'git-describe']: provenance = self.node_provenance(node, member_name='ref-format') raise SourceError("{}: Unexpected value for ref-format: {}".format(provenance, self.ref_format)) # At this point we now know if the source has a ref and/or a track. # If it is missing both then we will be unable to track or build. if self.mirror.ref is None and self.tracking is None: raise SourceError("{}: Git sources require a ref and/or track".format(self), reason="missing-track-and-ref") self.checkout_submodules = self.node_get_member(node, bool, 'checkout-submodules', True) self.submodules = [] # Parse a dict of submodule overrides, stored in the submodule_overrides # and submodule_checkout_overrides dictionaries. self.submodule_overrides = {} self.submodule_checkout_overrides = {} modules = self.node_get_member(node, Mapping, 'submodules', {}) for path, _ in self.node_items(modules): submodule = self.node_get_member(modules, Mapping, path) url = self.node_get_member(submodule, str, 'url', None) # Make sure to mark all URLs that are specified in the configuration if url: self.mark_download_url(url, primary=False) self.submodule_overrides[path] = url if 'checkout' in submodule: checkout = self.node_get_member(submodule, bool, 'checkout') self.submodule_checkout_overrides[path] = checkout self.mark_download_url(self.original_url) def preflight(self): # Check if git is installed, get the binary at the same time self.host_git = utils.get_host_tool('git') # Resolve what `--tags` means when calling `git fetch` self.init_fetch_tags_mode() def get_unique_key(self): # Here we want to encode the local name of the repository and # the ref, if the user changes the alias to fetch the same sources # from another location, it should not effect the cache key. key = [self.original_url, self.mirror.ref] # Only modify the cache key with checkout_submodules if it's something # other than the default behaviour. if self.checkout_submodules is False: key.append({"checkout_submodules": self.checkout_submodules}) # We want the cache key to change if the source was # configured differently, and submodules count. if self.submodule_overrides: key.append(self.submodule_overrides) if self.submodule_checkout_overrides: key.append({"submodule_checkout_overrides": self.submodule_checkout_overrides}) return key def get_consistency(self): if self.have_all_refs(): return Consistency.CACHED elif self.mirror.ref is not None: return Consistency.RESOLVED return Consistency.INCONSISTENT def load_ref(self, node): self.mirror.ref = self.node_get_member(node, str, 'ref', None) def get_ref(self): return self.mirror.ref def set_ref(self, ref, node): node['ref'] = self.mirror.ref = ref def track(self): # If self.tracking is not specified it's not an error, just silently return if not self.tracking: return None # Resolve the URL for the message resolved_url = self.translate_url(self.mirror.url) with self.timed_activity("Tracking {} from {}" .format(self.tracking, resolved_url), silent_nested=True): self.mirror.ensure() self.mirror._fetch() # Update self.mirror.ref and node.ref from the self.tracking branch ret = self.mirror.latest_commit(self.tracking) return ret def init_workspace(self, directory): # XXX: may wish to refactor this as some code dupe with stage() self.refresh_submodules() with self.timed_activity('Setting up workspace "{}"'.format(directory), silent_nested=True): self.mirror.init_workspace(directory) for mirror in self.submodules: mirror.init_workspace(directory) def stage(self, directory): # Need to refresh submodule list here again, because # it's possible that we did not load in the main process # with submodules present (source needed fetching) and # we may not know about the submodule yet come time to build. # self.refresh_submodules() # Stage the main repo in the specified directory # with self.timed_activity("Staging {}".format(self.mirror.url), silent_nested=True): self.mirror.stage(directory) for mirror in self.submodules: mirror.stage(directory) def get_source_fetchers(self): yield self.mirror self.refresh_submodules() for submodule in self.submodules: yield submodule def validate_cache(self): discovered_submodules = {} unlisted_submodules = [] invalid_submodules = [] for path, url in self.mirror.submodule_list(): discovered_submodules[path] = url if self.ignore_submodule(path): continue override_url = self.submodule_overrides.get(path) if not override_url: unlisted_submodules.append((path, url)) # Warn about submodules which are explicitly configured but do not exist for path, url in self.submodule_overrides.items(): if path not in discovered_submodules: invalid_submodules.append((path, url)) if invalid_submodules: detail = [] for path, url in invalid_submodules: detail.append(" Submodule URL '{}' at path '{}'".format(url, path)) self.warn("{}: Invalid submodules specified".format(self), warning_token=WARN_INVALID_SUBMODULE, detail="The following submodules are specified in the source " "description but do not exist according to the repository\n\n" + "\n".join(detail)) # Warn about submodules which exist but have not been explicitly configured if unlisted_submodules: detail = [] for path, url in unlisted_submodules: detail.append(" Submodule URL '{}' at path '{}'".format(url, path)) self.warn("{}: Unlisted submodules exist".format(self), warning_token=WARN_UNLISTED_SUBMODULE, detail="The following submodules exist but are not specified " + "in the source description\n\n" + "\n".join(detail)) # Assert that the ref exists in the track tag/branch, if track has been specified. ref_in_track = False if self.tracking: _, branch = self.check_output([self.host_git, 'branch', '--list', self.tracking, '--contains', self.mirror.ref], cwd=self.mirror.mirror) if branch: ref_in_track = True else: _, tag = self.check_output([self.host_git, 'tag', '--list', self.tracking, '--contains', self.mirror.ref], cwd=self.mirror.mirror) if tag: ref_in_track = True if not ref_in_track: detail = "The ref provided for the element does not exist locally " + \ "in the provided track branch / tag '{}'.\n".format(self.tracking) + \ "You may wish to track the element to update the ref from '{}' ".format(self.tracking) + \ "with `bst track`,\n" + \ "or examine the upstream at '{}' for the specific ref.".format(self.mirror.url) self.warn("{}: expected ref '{}' was not found in given track '{}' for staged repository: '{}'\n" .format(self, self.mirror.ref, self.tracking, self.mirror.url), detail=detail, warning_token=CoreWarnings.REF_NOT_IN_TRACK) ########################################################### # Local Functions # ########################################################### def have_all_refs(self): if not self.mirror.has_ref(): return False self.refresh_submodules() for mirror in self.submodules: if not os.path.exists(mirror.mirror): return False if not mirror.has_ref(): return False return True # Refreshes the GitMirror objects for submodules # # Assumes that we have our mirror and we have the ref which we point to # def refresh_submodules(self): self.mirror.ensure() submodules = [] for path, url in self.mirror.submodule_list(): # Completely ignore submodules which are disabled for checkout if self.ignore_submodule(path): continue # Allow configuration to override the upstream # location of the submodules. override_url = self.submodule_overrides.get(path) if override_url: url = override_url ref = self.mirror.submodule_ref(path) if ref is not None: mirror = GitMirror(self, path, url, ref) submodules.append(mirror) self.submodules = submodules # Checks whether the plugin configuration has explicitly # configured this submodule to be ignored def ignore_submodule(self, path): try: checkout = self.submodule_checkout_overrides[path] except KeyError: checkout = self.checkout_submodules return not checkout # Checks whether the plugin configuration has explicitly # configured this submodule to be ignored def ignore_submodule(self, path): try: checkout = self.submodule_checkout_overrides[path] except KeyError: checkout = self.checkout_submodules return not checkout # Resolve GitSource.git_fetch_tags_exclusive def init_fetch_tags_mode(self): if self.git_fetch_tags_exclusive is None: _, version_output = self.check_output([self.host_git, '--version']) version_output = version_output.strip() # Extract the version from "git version {version}" string git_version = version_output.rsplit(maxsplit=1)[-1] # Parse out the minor and major versions git_version_split = git_version.split(".") if len(git_version_split) < 3: raise SourceError("{}: Failed to parse git version: {}".format(self, version_output)) git_version_major = int(git_version_split[0]) git_version_minor = int(git_version_split[1]) # Resolve whether `git fetch --tags` means to fetch tags exclusively if git_version_major == 1 and git_version_minor < 9: type(self).git_fetch_tags_exclusive = True else: type(self).git_fetch_tags_exclusive = False # Plugin entry point def setup(): return GitSource buildstream-1.6.9/buildstream/plugins/sources/local.py000066400000000000000000000120241437515270000231700ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Tiago Gomes """ local - stage local files and directories ========================================= **Usage:** .. code:: yaml # Specify the local source kind kind: local # Optionally specify a relative staging directory # directory: path/to/stage # Specify the project relative path to a file or directory path: files/somefile.txt """ import os import stat from buildstream import Source, Consistency from buildstream import utils class LocalSource(Source): # pylint: disable=attribute-defined-outside-init def __init__(self, context, project, meta): super().__init__(context, project, meta) # Cached unique key to avoid multiple file system traversal if the unique key is requested multiple times. self.__unique_key = None def configure(self, node): self.node_validate(node, ['path'] + Source.COMMON_CONFIG_KEYS) self.path = self.node_get_project_path(node, 'path') self.fullpath = os.path.join(self.get_project_directory(), self.path) def preflight(self): pass def get_unique_key(self): if self.__unique_key is None: # Get a list of tuples of the the project relative paths and fullpaths if os.path.isdir(self.fullpath): filelist = utils.list_relative_paths(self.fullpath) filelist = [(relpath, os.path.join(self.fullpath, relpath)) for relpath in filelist] else: filelist = [(self.path, self.fullpath)] # Return a list of (relative filename, sha256 digest) tuples, a sorted list # has already been returned by list_relative_paths() self.__unique_key = [(relpath, unique_key(fullpath)) for relpath, fullpath in filelist] return self.__unique_key def get_consistency(self): return Consistency.CACHED # We dont have a ref, we're a local file... def load_ref(self, node): pass def get_ref(self): return None # pragma: nocover def set_ref(self, ref, node): pass # pragma: nocover def fetch(self): # Nothing to do here for a local source pass # pragma: nocover def stage(self, directory): # Dont use hardlinks to stage sources, they are not write protected # in the sandbox. with self.timed_activity("Staging local files at {}".format(self.path)): if os.path.isdir(self.fullpath): files = list(utils.list_relative_paths(self.fullpath, list_dirs=True)) utils.copy_files(self.fullpath, directory, files=files) else: destfile = os.path.join(directory, os.path.basename(self.path)) files = [os.path.basename(self.path)] utils.safe_copy(self.fullpath, destfile) for f in files: # Non empty directories are not listed by list_relative_paths dirs = f.split(os.sep) for i in range(1, len(dirs)): d = os.path.join(directory, *(dirs[:i])) assert os.path.isdir(d) and not os.path.islink(d) os.chmod(d, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) path = os.path.join(directory, f) if os.path.islink(path): pass elif os.path.isdir(path): os.chmod(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) else: st = os.stat(path) if st.st_mode & stat.S_IXUSR: os.chmod(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) else: os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) # Create a unique key for a file def unique_key(filename): # Return some hard coded things for files which # have no content to calculate a key for if os.path.isdir(filename): return "0" elif os.path.islink(filename): # For a symbolic link, use the link target as it's unique identifier return os.readlink(filename) return utils.sha256sum(filename) # Plugin entry point def setup(): return LocalSource buildstream-1.6.9/buildstream/plugins/sources/ostree.py000066400000000000000000000216051437515270000234040ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Andrew Leeming # Tiago Gomes """ ostree - stage files from an OSTree repository ============================================== **Usage:** .. code:: yaml # Specify the ostree source kind kind: ostree # Optionally specify a relative staging directory # directory: path/to/stage # Specify the repository url, using an alias defined # in your project configuration is recommended. url: upstream:runtime # Optionally specify a symbolic tracking branch or tag, this # will be used to update the 'ref' when refreshing the pipeline. track: runtime/x86_64/stable # Specify the commit checksum, this must be specified in order # to checkout sources and build, but can be automatically # updated if the 'track' attribute was specified. ref: d63cbb6fdc0bbdadc4a1b92284826a6d63a7ebcd # For signed ostree repositories, specify a local project relative # path to the public verifying GPG key for this remote. gpg-key: keys/runtime.gpg """ import os import shutil from buildstream import Source, SourceError, Consistency from buildstream import utils class OSTreeSource(Source): # pylint: disable=attribute-defined-outside-init def configure(self, node): self.node_validate(node, ['url', 'ref', 'track', 'gpg-key'] + Source.COMMON_CONFIG_KEYS) self.ostree = None self.original_url = self.node_get_member(node, str, 'url') self.url = self.translate_url(self.original_url) self.ref = self.node_get_member(node, str, 'ref', None) self.tracking = self.node_get_member(node, str, 'track', None) self.mirror = os.path.join(self.get_mirror_directory(), utils.url_directory_name(self.original_url)) # At this point we now know if the source has a ref and/or a track. # If it is missing both then we will be unable to track or build. if self.ref is None and self.tracking is None: raise SourceError("{}: OSTree sources require a ref and/or track".format(self), reason="missing-track-and-ref") # (optional) Not all repos are signed. But if they are, get the gpg key self.gpg_key_path = None if self.node_get_member(node, str, 'gpg-key', None): self.gpg_key = self.node_get_project_path(node, 'gpg-key', check_is_file=True) self.gpg_key_path = os.path.join(self.get_project_directory(), self.gpg_key) # Our OSTree repo handle self.repo = None def preflight(self): # Check if ostree is installed, get the binary at the same time self.ostree = utils.get_host_tool("ostree") def get_unique_key(self): return [self.original_url, self.ref] def load_ref(self, node): self.ref = self.node_get_member(node, str, 'ref', None) def get_ref(self): return self.ref def set_ref(self, ref, node): node['ref'] = self.ref = ref def track(self): # If self.tracking is not specified it's not an error, just silently return if not self.tracking: return None self.ensure() remote_name = self.ensure_remote(self.url) with self.timed_activity( "Fetching tracking ref '{}' from origin: {}".format( self.tracking, self.url ) ): self.call( [ self.ostree, "pull", "--repo", self.mirror, remote_name, self.tracking, ], fail="Failed to fetch tracking ref '{}' from origin {}".format( self.tracking, self.url ), ) return self.check_output( [self.ostree, "rev-parse", "--repo", self.mirror, self.tracking], fail="Failed to compute checksum of '{}' on '{}'".format( self.tracking, self.mirror ), )[1].strip() def fetch(self): self.ensure() remote_name = self.ensure_remote(self.url) with self.timed_activity( "Fetching remote ref: {} from origin: {}".format( self.ref, self.url ) ): self.call( [ self.ostree, "pull", "--repo", self.mirror, remote_name, self.ref, ], fail="Failed to fetch ref '{}' from origin: {}".format( self.ref, remote_name ), ) def stage(self, directory): self.ensure() # Checkout self.ref into the specified directory with self.tempdir() as tmpdir: checkoutdir = os.path.join(tmpdir, "checkout") with self.timed_activity( "Staging ref: {} from origin: {}".format(self.ref, self.url) ): self.call( [ self.ostree, "checkout", "--repo", self.mirror, "--user-mode", self.ref, checkoutdir, ], fail="Failed to checkout ref '{}' from origin: {}".format( self.ref, self.url ), ) # The target directory is guaranteed to exist, here we must move the # content of out checkout into the existing target directory. # # We may not be able to create the target directory as its parent # may be readonly, and the directory itself is often a mount point. # try: for entry in os.listdir(checkoutdir): source_path = os.path.join(checkoutdir, entry) shutil.move(source_path, directory) except (shutil.Error, OSError) as e: raise SourceError( "{}: Failed to move ostree checkout {} from '{}' to '{}'\n\n{}".format( self, self.url, tmpdir, directory, e ) ) from e def get_consistency(self): if self.ref is None: return Consistency.INCONSISTENT elif os.path.exists(self.mirror): if self.call([self.ostree, "show", "--repo", self.mirror, self.ref]) == 0: return Consistency.CACHED return Consistency.RESOLVED # # Local helpers # def ensure(self): if not os.path.exists(self.mirror): self.status("Creating local mirror for {}".format(self.url)) self.call( [ self.ostree, "init", "--repo", self.mirror, "--mode", "archive-z2", ], fail="Unable to create local mirror for repository", ) self.call( [ self.ostree, "config", "--repo", self.mirror, "set", "core.min-free-space-percent", "0", ], fail="Unable to disable minimum disk space checks", ) def ensure_remote(self, url): if self.original_url == self.url: remote_name = "origin" else: remote_name = utils.url_directory_name(url) command = [ self.ostree, "remote", "add", "--if-not-exists", "--repo", self.mirror, remote_name, url, ] if self.gpg_key_path: command.extend(["--gpg-import", self.gpg_key_path]) else: command.extend(["--no-gpg-verify"]) self.call(command, fail="Failed to configure origin {}".format(url)) return remote_name # Plugin entry point def setup(): return OSTreeSource buildstream-1.6.9/buildstream/plugins/sources/patch.py000066400000000000000000000060141437515270000231770ustar00rootroot00000000000000# # Copyright Bloomberg Finance LP # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Chandan Singh # Tiago Gomes """ patch - apply locally stored patches ==================================== **Host dependencies:** * patch **Usage:** .. code:: yaml # Specify the local source kind kind: patch # Specify the project relative path to a patch file path: files/somefile.diff # Optionally specify the root directory for the patch # directory: path/to/stage # Optionally specify the strip level, defaults to 1 strip-level: 1 """ import os from buildstream import Source, SourceError, Consistency from buildstream import utils class PatchSource(Source): # pylint: disable=attribute-defined-outside-init def configure(self, node): self.path = self.node_get_project_path(node, 'path', check_is_file=True) self.strip_level = self.node_get_member(node, int, "strip-level", 1) self.fullpath = os.path.join(self.get_project_directory(), self.path) def preflight(self): # Check if patch is installed, get the binary at the same time self.host_patch = utils.get_host_tool("patch") def get_unique_key(self): return [self.path, utils.sha256sum(self.fullpath), self.strip_level] def get_consistency(self): return Consistency.CACHED def load_ref(self, node): pass def get_ref(self): return None # pragma: nocover def set_ref(self, ref, node): pass # pragma: nocover def fetch(self): # Nothing to do here for a local source pass # pragma: nocover def stage(self, directory): with self.timed_activity("Applying local patch: {}".format(self.path)): # Bail out with a comprehensive message if the target directory is empty if not os.listdir(directory): raise SourceError("Nothing to patch in directory '{}'".format(directory), reason="patch-no-files") strip_level_option = "-p{}".format(self.strip_level) self.call([self.host_patch, strip_level_option, "-i", self.fullpath, "-d", directory], fail="Failed to apply patch {}".format(self.path)) # Plugin entry point def setup(): return PatchSource buildstream-1.6.9/buildstream/plugins/sources/pip.py000066400000000000000000000210051437515270000226650ustar00rootroot00000000000000# # Copyright 2018 Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Chandan Singh """ pip - stage python packages using pip ===================================== **Host depndencies:** * ``pip`` python module This plugin will download source distributions for specified packages using ``pip`` but will not install them. It is expected that the elements using this source will install the downloaded packages. Downloaded tarballs will be stored in a directory called ".bst_pip_downloads". **Usage:** .. code:: yaml # Specify the pip source kind kind: pip # Optionally specify index url, defaults to PyPi # This url is used to discover new versions of packages and download them # Projects intending to mirror their sources to a permanent location should # use an aliased url, and declare the alias in the project configuration url: https://mypypi.example.com/simple # Optionally specify the path to requirements files # Note that either 'requirements-files' or 'packages' must be defined requirements-files: - requirements.txt # Optionally specify a list of additional packages # Note that either 'requirements-files' or 'packages' must be defined packages: - flake8 # Optionally specify a relative staging directory directory: path/to/stage # Specify the ref. It is a list of strings of format # "==", separated by "\\n". # Usually this will be contents of a requirements.txt file where all # package versions have been frozen. ref: "flake8==3.5.0\\nmccabe==0.6.1\\npkg-resources==0.0.0\\npycodestyle==2.3.1\\npyflakes==1.6.0" .. note:: The ``pip`` plugin is available since :ref:`format version 16 ` """ import errno import hashlib import os import re from buildstream import Consistency, Source, SourceError, utils _OUTPUT_DIRNAME = '.bst_pip_downloads' _PYPI_INDEX_URL = 'https://pypi.org/simple/' # Used only for finding pip command _PYTHON_VERSIONS = [ 'python2.7', 'python3.0', 'python3.1', 'python3.2', 'python3.3', 'python3.4', 'python3.5', 'python3.6', 'python3.7', 'python3.8', 'python3.9', 'python3.10', 'python3.11', ] # List of allowed extensions taken from # https://docs.python.org/3/distutils/sourcedist.html. # Names of source distribution archives must be of the form # '%{package-name}-%{version}.%{extension}'. _SDIST_RE = re.compile( r'^([a-zA-Z0-9]+?)-(.+).(?:tar|tar.bz2|tar.gz|tar.xz|tar.Z|zip)$', re.IGNORECASE) class PipSource(Source): # pylint: disable=attribute-defined-outside-init # We need access to previous sources at track time to use requirements.txt # but not at fetch time as self.ref should contain sufficient information # for this plugin BST_REQUIRES_PREVIOUS_SOURCES_TRACK = True def configure(self, node): self.node_validate(node, ['url', 'packages', 'ref', 'requirements-files'] + Source.COMMON_CONFIG_KEYS) self.ref = self.node_get_member(node, str, 'ref', None) self.original_url = self.node_get_member(node, str, 'url', _PYPI_INDEX_URL) self.index_url = self.translate_url(self.original_url) self.packages = self.node_get_member(node, list, 'packages', []) self.requirements_files = self.node_get_member(node, list, 'requirements-files', []) if not (self.packages or self.requirements_files): raise SourceError("{}: Either 'packages' or 'requirements-files' must be specified". format(self)) def preflight(self): # Try to find a pip version that supports download command self.host_pip = None for python in reversed(_PYTHON_VERSIONS): try: host_python = utils.get_host_tool(python) rc = self.call([host_python, '-m', 'pip', 'download', '--help']) if rc == 0: self.host_pip = [host_python, '-m', 'pip'] break except utils.ProgramNotFoundError: pass if self.host_pip is None: raise SourceError("{}: Unable to find a suitable pip command".format(self)) def get_unique_key(self): return [self.original_url, self.ref] def get_consistency(self): if not self.ref: return Consistency.INCONSISTENT if os.path.exists(self._mirror) and os.listdir(self._mirror): return Consistency.CACHED return Consistency.RESOLVED def get_ref(self): return self.ref def load_ref(self, node): self.ref = self.node_get_member(node, str, 'ref', None) def set_ref(self, ref, node): node['ref'] = self.ref = ref def track(self, previous_sources_dir): # XXX pip does not offer any public API other than the CLI tool so it # is not feasible to correctly parse the requirements file or to check # which package versions pip is going to install. # See https://pip.pypa.io/en/stable/user_guide/#using-pip-from-your-program # for details. # As a result, we have to wastefully install the packages during track. with self.tempdir() as tmpdir: install_args = self.host_pip + ['download', '--no-binary', ':all:', '--index-url', self.index_url, '--dest', tmpdir] for requirement_file in self.requirements_files: fpath = os.path.join(previous_sources_dir, requirement_file) install_args += ['-r', fpath] install_args += self.packages self.call(install_args, fail="Failed to install python packages") reqs = self._parse_sdist_names(tmpdir) return '\n'.join(["{}=={}".format(pkg, ver) for pkg, ver in reqs]) def fetch(self): with self.tempdir() as tmpdir: packages = self.ref.strip().split('\n') package_dir = os.path.join(tmpdir, 'packages') os.makedirs(package_dir) self.call(self.host_pip + ['download', '--no-binary', ':all:', '--index-url', self.index_url, '--dest', package_dir] + packages, fail="Failed to install python packages: {}".format(packages)) # If the mirror directory already exists, assume that some other # process has fetched the sources before us and ensure that we do # not raise an error in that case. try: os.makedirs(self._mirror) os.rename(package_dir, self._mirror) except FileExistsError: return except OSError as e: if e.errno != errno.ENOTEMPTY: raise def stage(self, directory): with self.timed_activity("Staging Python packages", silent_nested=True): utils.copy_files(self._mirror, os.path.join(directory, _OUTPUT_DIRNAME)) # Directory where this source should stage its files # @property def _mirror(self): if not self.ref: return None return os.path.join(self.get_mirror_directory(), utils.url_directory_name(self.original_url), hashlib.sha256(self.ref.encode()).hexdigest()) # Parse names of downloaded source distributions # # Args: # basedir (str): Directory containing source distribution archives # # Returns: # (list): List of (package_name, version) tuples in sorted order # def _parse_sdist_names(self, basedir): reqs = [] for f in os.listdir(basedir): pkg_match = _SDIST_RE.match(f) if pkg_match: reqs.append(pkg_match.groups()) return sorted(reqs) def setup(): return PipSource buildstream-1.6.9/buildstream/plugins/sources/remote.py000066400000000000000000000054411437515270000233760ustar00rootroot00000000000000# # Copyright Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Ed Baunton """ remote - stage files from remote urls ===================================== **Usage:** .. code:: yaml # Specify the remote source kind kind: remote # Optionally specify a relative staging directory # directory: path/to/stage # Optionally specify a relative staging filename. # If not specified, the basename of the url will be used. # filename: customfilename # Specify the url. Using an alias defined in your project # configuration is encouraged. 'bst track' will update the # sha256sum in 'ref' to the downloaded file's sha256sum. url: upstream:foo # Specify the ref. It's a sha256sum of the file you download. ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b .. note:: The ``remote`` plugin is available since :ref:`format version 10 ` """ import os import stat from buildstream import SourceError, utils from ._downloadablefilesource import DownloadableFileSource class RemoteSource(DownloadableFileSource): # pylint: disable=attribute-defined-outside-init def configure(self, node): super().configure(node) self.filename = self.node_get_member(node, str, 'filename', os.path.basename(self.url)) if os.sep in self.filename: raise SourceError('{}: filename parameter cannot contain directories'.format(self), reason="filename-contains-directory") self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['filename']) def get_unique_key(self): return super().get_unique_key() + [self.filename] def stage(self, directory): # Same as in local plugin, don't use hardlinks to stage sources, they # are not write protected in the sandbox. dest = os.path.join(directory, self.filename) with self.timed_activity("Staging remote file to {}".format(dest)): utils.safe_copy(self._get_mirror_file(), dest) os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH) def setup(): return RemoteSource buildstream-1.6.9/buildstream/plugins/sources/tar.py000066400000000000000000000160661437515270000226760ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jonathan Maw """ tar - stage files from tar archives =================================== **Host dependencies:** * lzip (for .tar.lz files) **Usage:** .. code:: yaml # Specify the tar source kind kind: tar # Optionally specify a relative staging directory # directory: path/to/stage # Specify the tar url. Using an alias defined in your project # configuration is encouraged. 'bst track' will update the # sha256sum in 'ref' to the downloaded file's sha256sum. url: upstream:foo.tar # Specify the ref. It's a sha256sum of the file you download. ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b # Specify a glob pattern to indicate the base directory to extract # from the tarball. The first matching directory will be used. # # Note that this is '*' by default since most standard release # tarballs contain a self named subdirectory at the root which # contains the files one normally wants to extract to build. # # To extract the root of the tarball directly, this can be set # to an empty string. base-dir: '*' """ import os import tarfile from contextlib import contextmanager, ExitStack from tempfile import TemporaryFile from buildstream import SourceError from buildstream import utils from ._downloadablefilesource import DownloadableFileSource class TarSource(DownloadableFileSource): # pylint: disable=attribute-defined-outside-init def configure(self, node): super().configure(node) self.base_dir = self.node_get_member(node, str, 'base-dir', '*') or None self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['base-dir']) def preflight(self): self.host_lzip = None if self.url.endswith('.lz'): self.host_lzip = utils.get_host_tool('lzip') def get_unique_key(self): return super().get_unique_key() + [self.base_dir] @contextmanager def _run_lzip(self): assert self.host_lzip with TemporaryFile() as lzip_stdout: with ExitStack() as context: lzip_file = context.enter_context(open(self._get_mirror_file(), 'r')) self.call([self.host_lzip, '-d'], stdin=lzip_file, stdout=lzip_stdout) lzip_stdout.seek(0, 0) yield lzip_stdout @contextmanager def _get_tar(self): if self.url.endswith('.lz'): with self._run_lzip() as lzip_dec: with tarfile.open(fileobj=lzip_dec, mode='r:') as tar: yield tar else: with tarfile.open(self._get_mirror_file()) as tar: yield tar def stage(self, directory): try: with self._get_tar() as tar: base_dir = None if self.base_dir: base_dir = self._find_base_dir(tar, self.base_dir) if base_dir: tar.extractall(path=directory, members=self._extract_members(tar, base_dir)) else: tar.extractall(path=directory) except (tarfile.TarError, OSError) as e: raise SourceError("{}: Error staging source: {}".format(self, e)) from e # Override and translate which filenames to extract def _extract_members(self, tar, base_dir): if not base_dir.endswith(os.sep): base_dir = base_dir + os.sep l = len(base_dir) for member in tar.getmembers(): # First, ensure that a member never starts with `./` if member.path.startswith('./'): member.path = member.path[2:] # Now extract only the paths which match the normalized path if member.path.startswith(base_dir): # If it's got a link name, give it the same treatment, we # need the link targets to match up with what we are staging # # NOTE: Its possible this is not perfect, we may need to # consider links which point outside of the chosen # base directory. # if member.type == tarfile.LNKTYPE: member.linkname = member.linkname[l:] member.path = member.path[l:] yield member # We want to iterate over all paths of a tarball, but getmembers() # is not enough because some tarballs simply do not contain the leading # directory paths for the archived files. def _list_tar_paths(self, tar): visited = {} for member in tar.getmembers(): # Remove any possible leading './', offer more consistent behavior # across tarballs encoded with or without a leading '.' member_name = member.name.lstrip('./') if not member.isdir(): # Loop over the components of a path, for a path of a/b/c/d # we will first visit 'a', then 'a/b' and then 'a/b/c', excluding # the final component components = member_name.split('/') for i in range(len(components) - 1): dir_component = '/'.join([components[j] for j in range(i + 1)]) if dir_component not in visited: visited[dir_component] = True try: # Dont yield directory members which actually do # exist in the archive _ = tar.getmember(dir_component) except KeyError: if dir_component != '.': yield dir_component continue # Avoid considering the '.' directory, if any is included in the archive # this is to avoid the default 'base-dir: *' value behaving differently # depending on whether the tarball was encoded with a leading '.' or not elif member_name == '.': continue yield member_name def _find_base_dir(self, tar, pattern): paths = self._list_tar_paths(tar) matches = sorted(list(utils.glob(paths, pattern))) if not matches: raise SourceError("{}: Could not find base directory matching pattern: {}".format(self, pattern)) return matches[0] def setup(): return TarSource buildstream-1.6.9/buildstream/plugins/sources/zip.py000066400000000000000000000150441437515270000227050ustar00rootroot00000000000000# # Copyright (C) 2017 Mathieu Bridon # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Mathieu Bridon """ zip - stage files from zip archives =================================== **Usage:** .. code:: yaml # Specify the zip source kind kind: zip # Optionally specify a relative staging directory # directory: path/to/stage # Specify the zip url. Using an alias defined in your project # configuration is encouraged. 'bst track' will update the # sha256sum in 'ref' to the downloaded file's sha256sum. url: upstream:foo.zip # Specify the ref. It's a sha256sum of the file you download. ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b # Specify a glob pattern to indicate the base directory to extract # from the archive. The first matching directory will be used. # # Note that this is '*' by default since most standard release # archives contain a self named subdirectory at the root which # contains the files one normally wants to extract to build. # # To extract the root of the archive directly, this can be set # to an empty string. base-dir: '*' .. attention:: File permissions are not preserved. All extracted directories have permissions 0755 and all extracted files have permissions 0644. """ import os import zipfile import stat from buildstream import SourceError from buildstream import utils from ._downloadablefilesource import DownloadableFileSource class ZipSource(DownloadableFileSource): # pylint: disable=attribute-defined-outside-init def configure(self, node): super().configure(node) self.base_dir = self.node_get_member(node, str, 'base-dir', '*') or None self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['base-dir']) def get_unique_key(self): return super().get_unique_key() + [self.base_dir] def stage(self, directory): exec_rights = (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) & ~(stat.S_IWGRP | stat.S_IWOTH) noexec_rights = exec_rights & ~(stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) try: with zipfile.ZipFile(self._get_mirror_file()) as archive: base_dir = None if self.base_dir: base_dir = self._find_base_dir(archive, self.base_dir) if base_dir: members = self._extract_members(archive, base_dir) else: members = archive.namelist() for member in members: written = archive.extract(member, path=directory) # zipfile.extract might create missing directories rel = os.path.relpath(written, start=directory) assert not os.path.isabs(rel) rel = os.path.dirname(rel) while rel: os.chmod(os.path.join(directory, rel), exec_rights) rel = os.path.dirname(rel) if os.path.islink(written): pass elif os.path.isdir(written): os.chmod(written, exec_rights) else: os.chmod(written, noexec_rights) except (zipfile.BadZipFile, zipfile.LargeZipFile, OSError) as e: raise SourceError("{}: Error staging source: {}".format(self, e)) from e # Override and translate which filenames to extract def _extract_members(self, archive, base_dir): if not base_dir.endswith(os.sep): base_dir = base_dir + os.sep l = len(base_dir) for member in archive.infolist(): if member.filename == base_dir: continue if member.filename.startswith(base_dir): member.filename = member.filename[l:] yield member # We want to iterate over all paths of an archive, but namelist() # is not enough because some archives simply do not contain the leading # directory paths for the archived files. def _list_archive_paths(self, archive): visited = {} for member in archive.infolist(): # ZipInfo.is_dir() is only available in python >= 3.6, but all # it does is check for a trailing '/' in the name # if not member.filename.endswith('/'): # Loop over the components of a path, for a path of a/b/c/d # we will first visit 'a', then 'a/b' and then 'a/b/c', excluding # the final component components = member.filename.split('/') for i in range(len(components) - 1): dir_component = '/'.join([components[j] for j in range(i + 1)]) if dir_component not in visited: visited[dir_component] = True try: # Dont yield directory members which actually do # exist in the archive _ = archive.getinfo(dir_component) except KeyError: if dir_component != '.': yield dir_component continue # Avoid considering the '.' directory, if any is included in the archive # this is to avoid the default 'base-dir: *' value behaving differently # depending on whether the archive was encoded with a leading '.' or not elif member.filename == '.' or member.filename == './': continue yield member.filename def _find_base_dir(self, archive, pattern): paths = self._list_archive_paths(archive) matches = sorted(list(utils.glob(paths, pattern))) if not matches: raise SourceError("{}: Could not find base directory matching pattern: {}".format(self, pattern)) return matches[0] def setup(): return ZipSource buildstream-1.6.9/buildstream/sandbox/000077500000000000000000000000001437515270000200175ustar00rootroot00000000000000buildstream-1.6.9/buildstream/sandbox/__init__.py000066400000000000000000000016551437515270000221370ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat from .sandbox import Sandbox, SandboxFlags from ._sandboxchroot import SandboxChroot from ._sandboxbwrap import SandboxBwrap from ._sandboxdummy import SandboxDummy buildstream-1.6.9/buildstream/sandbox/_config.py000066400000000000000000000041151437515270000217760ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jim MacArthur # SandboxConfig # # A container for sandbox configuration data. We want the internals # of this to be opaque, hence putting it in its own private file. class SandboxConfig(): def __init__(self, build_uid, build_gid, build_os=None, build_arch=None): self.build_uid = build_uid self.build_gid = build_gid self.build_os = build_os self.build_arch = build_arch # get_unique_key(): # # This returns the SandboxConfig's contribution # to an element's cache key. # # Returns: # (dict): A dictionary to add to an element's cache key # def get_unique_key(self): # Currently operating system and machine architecture # are not configurable and we have no sandbox implementation # which can conform to such configurations. # # However this should be the right place to support # such configurations in the future. # unique_key = { 'os': self.build_os, 'arch': self.build_arch } # Avoid breaking cache key calculation with # the addition of configurabuild build uid/gid if self.build_uid != 0: unique_key['build-uid'] = self.build_uid if self.build_gid != 0: unique_key['build-gid'] = self.build_gid return unique_key buildstream-1.6.9/buildstream/sandbox/_mount.py000066400000000000000000000123311437515270000216720ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os from collections import OrderedDict from contextlib import contextmanager, ExitStack from .. import utils from .._fuse import SafeHardlinks # Mount() # # Helper data object representing a single mount point in the mount map # class Mount(): def __init__(self, sandbox, mount_point, safe_hardlinks): scratch_directory = sandbox._get_scratch_directory() root_directory = sandbox.get_directory() self.mount_point = mount_point self.safe_hardlinks = safe_hardlinks # FIXME: When the criteria for mounting something and it's parent # mount is identical, then there is no need to mount an additional # fuse layer (i.e. if the root is read-write and there is a directory # marked for staged artifacts directly within the rootfs, they can # safely share the same fuse layer). # # In these cases it would be saner to redirect the sub-mount to # a regular mount point within the parent's redirected mount. # if self.safe_hardlinks: # Redirected mount self.mount_origin = os.path.join(root_directory, mount_point.lstrip(os.sep)) self.mount_base = os.path.join(scratch_directory, utils.url_directory_name(mount_point)) self.mount_source = os.path.join(self.mount_base, 'mount') self.mount_tempdir = os.path.join(self.mount_base, 'temp') os.makedirs(self.mount_origin, exist_ok=True) os.makedirs(self.mount_tempdir, exist_ok=True) else: # No redirection needed self.mount_source = os.path.join(root_directory, mount_point.lstrip(os.sep)) external_mount_sources = sandbox._get_mount_sources() external_mount_source = external_mount_sources.get(mount_point) if external_mount_source is None: os.makedirs(self.mount_source, exist_ok=True) else: if os.path.isdir(external_mount_source): os.makedirs(self.mount_source, exist_ok=True) else: # When mounting a regular file, ensure the parent # directory exists in the sandbox; and that an empty # file is created at the mount location. parent_dir = os.path.dirname(self.mount_source.rstrip('/')) os.makedirs(parent_dir, exist_ok=True) if not os.path.exists(self.mount_source): with open(self.mount_source, 'w', encoding='utf-8'): pass @contextmanager def mounted(self, sandbox): if self.safe_hardlinks: mount = SafeHardlinks(self.mount_origin, self.mount_tempdir) with mount.mounted(self.mount_source): yield else: # Nothing to mount here yield # MountMap() # # Helper object for mapping of the sandbox mountpoints # # Args: # sandbox (Sandbox): The sandbox object # root_readonly (bool): Whether the sandbox root is readonly # class MountMap(): def __init__(self, sandbox, root_readonly): # We will be doing the mounts in the order in which they were declared. self.mounts = OrderedDict() # We want safe hardlinks on rootfs whenever root is not readonly self.mounts['/'] = Mount(sandbox, '/', not root_readonly) for mark in sandbox._get_marked_directories(): directory = mark['directory'] artifact = mark['artifact'] # We want safe hardlinks for any non-root directory where # artifacts will be staged to self.mounts[directory] = Mount(sandbox, directory, artifact) # get_mount_source() # # Gets the host directory where the mountpoint in the # sandbox should be bind mounted from # # Args: # mountpoint (str): The absolute mountpoint path inside the sandbox # # Returns: # The host path to be mounted at the mount point # def get_mount_source(self, mountpoint): return self.mounts[mountpoint].mount_source # mounted() # # A context manager which ensures all the mount sources # were mounted with any fuse layers which may have been needed. # # Args: # sandbox (Sandbox): The sandbox # @contextmanager def mounted(self, sandbox): with ExitStack() as stack: for _, mount in self.mounts.items(): stack.enter_context(mount.mounted(sandbox)) yield buildstream-1.6.9/buildstream/sandbox/_mounter.py000066400000000000000000000111041437515270000222160ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat import sys from contextlib import contextmanager from .._exceptions import SandboxError from .. import utils, _signals # A class to wrap the `mount` and `umount` system commands class Mounter: @classmethod def _mount(cls, dest, src=None, mount_type=None, stdout=sys.stdout, stderr=sys.stderr, options=None, flags=None): argv = [utils.get_host_tool('mount')] if mount_type: argv.extend(['-t', mount_type]) if options: argv.extend(['-o', options]) if flags: argv.extend(flags) if src is not None: argv += [src] argv += [dest] status, _ = utils._call( argv, terminate=True, stdout=stdout, stderr=stderr ) if status != 0: raise SandboxError('`{}` failed with exit code {}' .format(' '.join(argv), status)) return dest @classmethod def _umount(cls, path, stdout=sys.stdout, stderr=sys.stderr): cmd = [utils.get_host_tool('umount'), '-R', path] status, _ = utils._call( cmd, terminate=True, stdout=stdout, stderr=stderr ) if status != 0: raise SandboxError('`{}` failed with exit code {}' .format(' '.join(cmd), status)) # mount() # # A wrapper for the `mount` command. The device is unmounted when # the context is left. # # Args: # dest (str) - The directory to mount to # src (str) - The directory to mount # stdout (file) - stdout # stderr (file) - stderr # mount_type (str|None) - The mount type (can be omitted or None) # kwargs - Arguments to pass to the mount command, such as `ro=True` # # Yields: # (str) The path to the destination # @classmethod @contextmanager def mount(cls, dest, src=None, stdout=sys.stdout, stderr=sys.stderr, mount_type=None, **kwargs): def kill_proc(): cls._umount(dest, stdout, stderr) options = ','.join([key for key, val in kwargs.items() if val]) path = cls._mount(dest, src, mount_type, stdout=stdout, stderr=stderr, options=options) try: with _signals.terminator(kill_proc): yield path finally: cls._umount(dest, stdout, stderr) # bind_mount() # # Mount a directory to a different location (a hardlink for all # intents and purposes). The directory is unmounted when the # context is left. # # Args: # dest (str) - The directory to mount to # src (str) - The directory to mount # stdout (file) - stdout # stderr (file) - stderr # kwargs - Arguments to pass to the mount command, such as `ro=True` # # Yields: # (str) The path to the destination # # While this is equivalent to `mount --rbind`, this option may not # exist and can be dangerous, requiring careful cleanupIt is # recommended to use this function over a manual mount invocation. # @classmethod @contextmanager def bind_mount(cls, dest, src=None, stdout=sys.stdout, stderr=sys.stderr, **kwargs): def kill_proc(): cls._umount(dest, stdout, stderr) kwargs['rbind'] = True options = ','.join([key for key, val in kwargs.items() if val]) path = cls._mount(dest, src, None, stdout, stderr, options) try: with _signals.terminator(kill_proc): # Make the rbind a slave to avoid unmounting vital devices in # /proc cls._mount(dest, flags=['--make-rslave']) yield path finally: cls._umount(dest, stdout, stderr) buildstream-1.6.9/buildstream/sandbox/_sandboxbwrap.py000066400000000000000000000416031437515270000232260ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Andrew Leeming # Tristan Van Berkom import os import sys import time import errno import signal import subprocess import shutil from contextlib import ExitStack import psutil from .._exceptions import SandboxError from .. import utils, _signals from ._mount import MountMap from . import Sandbox, SandboxFlags # SandboxBwrap() # # Default bubblewrap based sandbox implementation. # class SandboxBwrap(Sandbox): # Minimal set of devices for the sandbox DEVICES = [ '/dev/full', '/dev/null', '/dev/urandom', '/dev/random', '/dev/zero' ] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.user_ns_available = kwargs['user_ns_available'] self.die_with_parent_available = kwargs['die_with_parent_available'] self._linux32 = kwargs['linux32'] def run(self, command, flags, *, cwd=None, env=None): stdout, stderr = self._get_output() root_directory = self.get_directory() # Fallback to the sandbox default settings for # the cwd and env. # if cwd is None: cwd = self._get_work_directory() if env is None: env = self._get_environment() if not self._has_command(command[0], env): raise SandboxError("Staged artifacts do not provide command " "'{}'".format(command[0]), reason='missing-command') # We want command args as a list of strings if isinstance(command, str): command = [command] # Create the mount map, this will tell us where # each mount point needs to be mounted from and to mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY) root_mount_source = mount_map.get_mount_source('/') if cwd is None: cwd = '/' # start command with linux32 if needed if self._linux32: bwrap_command = [utils.get_host_tool('linux32')] else: bwrap_command = [] # Grab the full path of the bwrap binary bwrap_command += [utils.get_host_tool('bwrap')] for k, v in env.items(): bwrap_command += ['--setenv', k, v] for k in os.environ.keys() - env.keys(): bwrap_command += ['--unsetenv', k] # Create a new pid namespace, this also ensures that any subprocesses # are cleaned up when the bwrap process exits. bwrap_command += ['--unshare-pid'] # Ensure subprocesses are cleaned up when the bwrap parent dies. if self.die_with_parent_available: bwrap_command += ['--die-with-parent'] # Add in the root filesystem stuff first. # # The rootfs is mounted as RW initially so that further mounts can be # placed on top. If a RO root is required, after all other mounts are # complete, root is remounted as RO bwrap_command += ["--bind", root_mount_source, "/"] if not flags & SandboxFlags.NETWORK_ENABLED: bwrap_command += ['--unshare-net'] bwrap_command += ['--unshare-uts', '--hostname', 'buildstream'] bwrap_command += ['--unshare-ipc'] if cwd is not None: bwrap_command += ['--chdir', cwd] # Give it a proc and tmpfs bwrap_command += [ '--proc', '/proc', '--tmpfs', '/tmp' ] # In interactive mode, we want a complete devpts inside # the container, so there is a /dev/console and such. In # the regular non-interactive sandbox, we want to hand pick # a minimal set of devices to expose to the sandbox. # if flags & SandboxFlags.INTERACTIVE: bwrap_command += ['--dev', '/dev'] else: for device in self.DEVICES: bwrap_command += ['--dev-bind', device, device] # Create a tmpfs for /dev/shm, if we're in interactive this # is handled by `--dev /dev` # if flags & SandboxFlags.CREATE_DEV_SHM: bwrap_command += ['--tmpfs', '/dev/shm'] # Add bind mounts to any marked directories marked_directories = self._get_marked_directories() mount_source_overrides = self._get_mount_sources() for mark in marked_directories: mount_point = mark['directory'] mount_source = mount_source_overrides.get(mount_point, mount_map.get_mount_source(mount_point)) # Use --dev-bind for all mounts, this is simply a bind mount which does # not restrictive about devices. # # While it's important for users to be able to mount devices # into the sandbox for `bst shell` testing purposes, it is # harmless to do in a build environment where the directories # we mount just never contain device files. # bwrap_command += ['--dev-bind', mount_source, mount_point] if flags & SandboxFlags.ROOT_READ_ONLY: bwrap_command += ["--remount-ro", "/"] # Set UID and GUI if self.user_ns_available: bwrap_command += ['--unshare-user'] if not flags & SandboxFlags.INHERIT_UID: uid = self._get_config().build_uid gid = self._get_config().build_gid bwrap_command += ['--uid', str(uid), '--gid', str(gid)] # Add the command bwrap_command += command # bwrap might create some directories while being suid # and may give them to root gid, if it does, we'll want # to clean them up after, so record what we already had # there just in case so that we can safely cleanup the debris. # existing_basedirs = { directory: os.path.lexists(os.path.join(root_directory, directory)) for directory in ['dev/shm', 'tmp', 'dev', 'proc'] } # Use the MountMap context manager to ensure that any redirected # mounts through fuse layers are in context and ready for bwrap # to mount them from. # with ExitStack() as stack: stack.enter_context(mount_map.mounted(self)) # Ensure the cwd exists if cwd is not None: workdir = os.path.join(root_mount_source, cwd.lstrip(os.sep)) os.makedirs(workdir, exist_ok=True) # If we're interactive, we want to inherit our stdin, # otherwise redirect to /dev/null, ensuring process # disconnected from terminal. if flags & SandboxFlags.INTERACTIVE: stdin = sys.stdin else: stdin = stack.enter_context(open(os.devnull, "r")) # pylint: disable=unspecified-encoding # Run bubblewrap ! exit_code = self.run_bwrap(bwrap_command, stdin, stdout, stderr, (flags & SandboxFlags.INTERACTIVE)) # Cleanup things which bwrap might have left behind, while # everything is still mounted because bwrap can be creating # the devices on the fuse mount, so we should remove it there. if not flags & SandboxFlags.INTERACTIVE: for device in self.DEVICES: device_path = os.path.join(root_mount_source, device.lstrip('/')) # This will remove the device in a loop, allowing some # retries in case the device file leaked by bubblewrap is still busy self.try_remove_device(device_path) # Remove /tmp, this is a bwrap owned thing we want to be sure # never ends up in an artifact for basedir in ['dev/shm', 'tmp', 'dev', 'proc']: # Skip removal of directories which already existed before # launching bwrap if existing_basedirs[basedir]: continue base_directory = os.path.join(root_mount_source, basedir) if flags & SandboxFlags.INTERACTIVE: # Be more lenient in interactive mode here. # # In interactive mode; it's possible that the project shell # configuration has mounted some things below the base # directories, such as /dev/dri, and in this case it's less # important to consider cleanup, as we wont be collecting # this build result and creating an artifact. # # Note: Ideally; we should instead fix upstream bubblewrap to # cleanup any debris it creates at startup time, and do # the same ourselves for any directories we explicitly create. # shutil.rmtree(base_directory, ignore_errors=True) else: try: os.rmdir(base_directory) except FileNotFoundError: # ignore this, if bwrap cleaned up properly then it's not a problem. # # If the directory was not empty on the other hand, then this is clearly # a bug, bwrap mounted a tempfs here and when it exits, that better be empty. pass return exit_code def run_bwrap(self, argv, stdin, stdout, stderr, interactive): # Wrapper around subprocess.Popen() with common settings. # # This function blocks until the subprocess has terminated. # # It then returns a tuple of (exit code, stdout output, stderr output). # If stdout was not equal to subprocess.PIPE, stdout will be None. Same for # stderr. # Fetch the process actually launched inside the bwrap sandbox, or the # intermediat control bwrap processes. # # NOTE: # The main bwrap process itself is setuid root and as such we cannot # send it any signals. Since we launch bwrap with --unshare-pid, it's # direct child is another bwrap process which retains ownership of the # pid namespace. This is the right process to kill when terminating. # # The grandchild is the binary which we asked bwrap to launch on our # behalf, whatever this binary is, it is the right process to use # for suspending and resuming. In the case that this is a shell, the # shell will be group leader and all build scripts will stop/resume # with that shell. # def get_user_proc(bwrap_pid, grand_child=False): bwrap_proc = psutil.Process(bwrap_pid) bwrap_children = bwrap_proc.children() if bwrap_children: if grand_child: bwrap_grand_children = bwrap_children[0].children() if bwrap_grand_children: return bwrap_grand_children[0] else: return bwrap_children[0] return None def terminate_bwrap(): if process: user_proc = get_user_proc(process.pid) if user_proc: user_proc.kill() def suspend_bwrap(): if process: user_proc = get_user_proc(process.pid, grand_child=True) if user_proc: group_id = os.getpgid(user_proc.pid) os.killpg(group_id, signal.SIGSTOP) def resume_bwrap(): if process: user_proc = get_user_proc(process.pid, grand_child=True) if user_proc: group_id = os.getpgid(user_proc.pid) os.killpg(group_id, signal.SIGCONT) with ExitStack() as stack: # We want to launch bwrap in a new session in non-interactive # mode so that we handle the SIGTERM and SIGTSTP signals separately # from the nested bwrap process, but in interactive mode this # causes launched shells to lack job control (we dont really # know why that is). # if interactive: new_session = False else: new_session = True stack.enter_context(_signals.suspendable(suspend_bwrap, resume_bwrap)) stack.enter_context(_signals.terminator(terminate_bwrap)) process = subprocess.Popen( # pylint: disable=consider-using-with argv, # The default is to share file descriptors from the parent process # to the subprocess, which is rarely good for sandboxing. close_fds=True, stdin=stdin, stdout=stdout, stderr=stderr, start_new_session=new_session ) # Wait for the child process to finish, ensuring that # a SIGINT has exactly the effect the user probably # expects (i.e. let the child process handle it). try: while True: try: _, status = os.waitpid(process.pid, 0) # If the process exits due to a signal, we # brutally murder it to avoid zombies if not os.WIFEXITED(status): user_proc = get_user_proc(process.pid) if user_proc: utils._kill_process_tree(user_proc.pid) # If we receive a KeyboardInterrupt we continue # waiting for the process since we are in the same # process group and it should also have received # the SIGINT. except KeyboardInterrupt: continue break # If we can't find the process, it has already died of its # own accord, and therefore we don't need to check or kill # anything. except psutil.NoSuchProcess: pass # Return the exit code - see the documentation for # os.WEXITSTATUS to see why this is required. if os.WIFEXITED(status): exit_code = os.WEXITSTATUS(status) else: exit_code = -1 if interactive and stdin.isatty(): # Make this process the foreground process again, otherwise the # next read() on stdin will trigger SIGTTIN and stop the process. # This is required because the sandboxed process does not have # permission to do this on its own (running in separate PID namespace). # # tcsetpgrp() will trigger SIGTTOU when called from a background # process, so ignore it temporarily. handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN) os.tcsetpgrp(0, os.getpid()) signal.signal(signal.SIGTTOU, handler) return exit_code def try_remove_device(self, device_path): # Put some upper limit on the tries here max_tries = 1000 tries = 0 while True: try: os.unlink(device_path) except OSError as e: if e.errno == errno.EBUSY: # This happens on some machines, seems there is a race sometimes # after bubblewrap returns and the device files it bind-mounted did # not finish unmounting. # if tries < max_tries: tries += 1 time.sleep(1 / 100) continue # We've reached the upper limit of tries, bail out now # because something must have went wrong # raise if e.errno == errno.ENOENT: # Bubblewrap cleaned it up for us, no problem if we cant remove it break # Something unexpected, reraise this error raise else: # Successfully removed the symlink break buildstream-1.6.9/buildstream/sandbox/_sandboxchroot.py000066400000000000000000000304721437515270000234130ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat # Tristan Van Berkom import os import sys import stat import signal import subprocess from contextlib import contextmanager, ExitStack import psutil from .._exceptions import SandboxError from .. import utils from .. import _signals from ._mounter import Mounter from ._mount import MountMap from . import Sandbox, SandboxFlags class SandboxChroot(Sandbox): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) uid = self._get_config().build_uid gid = self._get_config().build_gid if uid != 0 or gid != 0: raise SandboxError("Chroot sandboxes cannot specify a non-root uid/gid " "({},{} were supplied via config)".format(uid, gid)) self.mount_map = None def run(self, command, flags, *, cwd=None, env=None): # Default settings if cwd is None: cwd = self._get_work_directory() if cwd is None: cwd = '/' if env is None: env = self._get_environment() if not self._has_command(command[0], env): raise SandboxError("Staged artifacts do not provide command " "'{}'".format(command[0]), reason='missing-command') # Command must be a list if isinstance(command, str): command = [command] stdout, stderr = self._get_output() # Create the mount map, this will tell us where # each mount point needs to be mounted from and to self.mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY) root_mount_source = self.mount_map.get_mount_source('/') # Create a sysroot and run the command inside it with ExitStack() as stack: os.makedirs('/var/run/buildstream', exist_ok=True) # FIXME: While we do not currently do anything to prevent # network access, we also don't copy /etc/resolv.conf to # the new rootfs. # # This effectively disables network access, since DNs will # never resolve, so anything a normal process wants to do # will fail. Malicious processes could gain rights to # anything anyway. # # Nonetheless a better solution could perhaps be found. rootfs = stack.enter_context(utils._tempdir(dir='/var/run/buildstream')) stack.enter_context(self.create_devices(self.get_directory(), flags)) stack.enter_context(self.mount_dirs(rootfs, flags, stdout, stderr)) if flags & SandboxFlags.INTERACTIVE: stdin = sys.stdin else: stdin = stack.enter_context(open(os.devnull, 'r')) # pylint: disable=unspecified-encoding # Ensure the cwd exists if cwd is not None: workdir = os.path.join(root_mount_source, cwd.lstrip(os.sep)) os.makedirs(workdir, exist_ok=True) status = self.chroot(rootfs, command, stdin, stdout, stderr, cwd, env, flags) return status # chroot() # # A helper function to chroot into the rootfs. # # Args: # rootfs (str): The path of the sysroot to chroot into # command (list): The command to execute in the chroot env # stdin (file): The stdin # stdout (file): The stdout # stderr (file): The stderr # cwd (str): The current working directory # env (dict): The environment variables to use while executing the command # flags (:class:`SandboxFlags`): The flags to enable on the sandbox # # Returns: # (int): The exit code of the executed command # def chroot(self, rootfs, command, stdin, stdout, stderr, cwd, env, flags): # pylint: disable=subprocess-popen-preexec-fn def kill_proc(): if process: # First attempt to gracefully terminate proc = psutil.Process(process.pid) proc.terminate() try: proc.wait(20) except psutil.TimeoutExpired: utils._kill_process_tree(process.pid) def suspend_proc(): group_id = os.getpgid(process.pid) os.killpg(group_id, signal.SIGSTOP) def resume_proc(): group_id = os.getpgid(process.pid) os.killpg(group_id, signal.SIGCONT) try: with _signals.suspendable(suspend_proc, resume_proc), _signals.terminator(kill_proc): process = subprocess.Popen( # pylint: disable=consider-using-with command, close_fds=True, cwd=os.path.join(rootfs, cwd.lstrip(os.sep)), env=env, stdin=stdin, stdout=stdout, stderr=stderr, # If you try to put gtk dialogs here Tristan (either) # will personally scald you preexec_fn=lambda: (os.chroot(rootfs), os.chdir(cwd)), start_new_session=flags & SandboxFlags.INTERACTIVE ) # Wait for the child process to finish, ensuring that # a SIGINT has exactly the effect the user probably # expects (i.e. let the child process handle it). try: while True: try: _, status = os.waitpid(process.pid, 0) # If the process exits due to a signal, we # brutally murder it to avoid zombies if not os.WIFEXITED(status): utils._kill_process_tree(process.pid) # Unlike in the bwrap case, here only the main # process seems to receive the SIGINT. We pass # on the signal to the child and then continue # to wait. except KeyboardInterrupt: process.send_signal(signal.SIGINT) continue break # If we can't find the process, it has already died of # its own accord, and therefore we don't need to check # or kill anything. except psutil.NoSuchProcess: pass # Return the exit code - see the documentation for # os.WEXITSTATUS to see why this is required. if os.WIFEXITED(status): code = os.WEXITSTATUS(status) else: code = -1 except subprocess.SubprocessError as e: # Exceptions in preexec_fn are simply reported as # 'Exception occurred in preexec_fn', turn these into # a more readable message. if '{}'.format(e) == 'Exception occurred in preexec_fn.': raise SandboxError('Could not chroot into {} or chdir into {}. ' 'Ensure you are root and that the relevant directory exists.' .format(rootfs, cwd)) from e raise SandboxError('Could not run command {}: {}'.format(command, e)) from e return code # create_devices() # # Create the nodes in /dev/ usually required for builds (null, # none, etc.) # # Args: # rootfs (str): The path of the sysroot to prepare # flags (:class:`.SandboxFlags`): The sandbox flags # @contextmanager def create_devices(self, rootfs, flags): devices = [] # When we are interactive, we'd rather mount /dev due to the # sheer number of devices if not flags & SandboxFlags.INTERACTIVE: for device in Sandbox.DEVICES: location = os.path.join(rootfs, device.lstrip(os.sep)) os.makedirs(os.path.dirname(location), exist_ok=True) try: if os.path.exists(location): os.remove(location) devices.append(self.mknod(device, location)) except OSError as e: if e.errno == 1: raise SandboxError("Permission denied while creating device node: {}.".format(e) + "BuildStream reqiures root permissions for these setttings.") from e raise yield for device in devices: os.remove(device) # mount_dirs() # # Mount paths required for the command. # # Args: # rootfs (str): The path of the sysroot to prepare # flags (:class:`.SandboxFlags`): The sandbox flags # stdout (file): The stdout # stderr (file): The stderr # @contextmanager def mount_dirs(self, rootfs, flags, stdout, stderr): # FIXME: This should probably keep track of potentially # already existing files a la _sandboxwrap.py:239 @contextmanager def mount_point(point, **kwargs): mount_source_overrides = self._get_mount_sources() mount_source = mount_source_overrides.get(point, self.mount_map.get_mount_source(point)) mount_point = os.path.join(rootfs, point.lstrip(os.sep)) with Mounter.bind_mount(mount_point, src=mount_source, stdout=stdout, stderr=stderr, **kwargs): yield @contextmanager def mount_src(src, **kwargs): mount_point = os.path.join(rootfs, src.lstrip(os.sep)) os.makedirs(mount_point, exist_ok=True) with Mounter.bind_mount(mount_point, src=src, stdout=stdout, stderr=stderr, **kwargs): yield with ExitStack() as stack: stack.enter_context(self.mount_map.mounted(self)) stack.enter_context(mount_point('/')) if flags & SandboxFlags.INTERACTIVE: stack.enter_context(mount_src('/dev')) stack.enter_context(mount_src('/tmp')) stack.enter_context(mount_src('/proc')) for mark in self._get_marked_directories(): stack.enter_context(mount_point(mark['directory'])) # Remount root RO if necessary if flags & flags & SandboxFlags.ROOT_READ_ONLY: root_mount = Mounter.mount(rootfs, stdout=stdout, stderr=stderr, remount=True, ro=True, bind=True) # Since the exit stack has already registered a mount # for this path, we do not need to register another # umount call. root_mount.__enter__() yield # mknod() # # Create a device node equivalent to the given source node # # Args: # source (str): Path of the device to mimic (e.g. '/dev/null') # target (str): Location to create the new device in # # Returns: # target (str): The location of the created node # def mknod(self, source, target): try: dev = os.stat(source) major = os.major(dev.st_rdev) minor = os.minor(dev.st_rdev) target_dev = os.makedev(major, minor) os.mknod(target, mode=stat.S_IFCHR | dev.st_mode, device=target_dev) except PermissionError as e: raise SandboxError('Could not create device {}, ensure that you have root permissions: {}') from e except OSError as e: raise SandboxError('Could not create device {}: {}' .format(target, e)) from e return target buildstream-1.6.9/buildstream/sandbox/_sandboxdummy.py000066400000000000000000000005651437515270000232500ustar00rootroot00000000000000from .._exceptions import SandboxError from . import Sandbox # SandboxDummy() # # Dummy sandbox to use on a different. # class SandboxDummy(Sandbox): def __init__(self, reason, *args, **kwargs): super().__init__(*args, **kwargs) self._reason = reason def run(self, command, flags, *, cwd=None, env=None): raise SandboxError(self._reason) buildstream-1.6.9/buildstream/sandbox/sandbox.py000066400000000000000000000234521437515270000220350ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Andrew Leeming # Tristan Van Berkom """ Sandbox - The build sandbox =========================== :class:`.Element` plugins which want to interface with the sandbox need only understand this interface, while it may be given a different sandbox implementation, any sandbox implementation it is given will conform to this interface. See also: :ref:`sandboxing`. """ import os from .._exceptions import ImplError class SandboxFlags(): """Flags indicating how the sandbox should be run. """ ROOT_READ_ONLY = 0x01 """The root filesystem is read only. This is normally true except when running integration commands on staged dependencies, where we have to update caches and run things such as ldconfig. """ NETWORK_ENABLED = 0x02 """Whether to expose host network. This should not be set when running builds, but can be allowed for running a shell in a sandbox. """ INTERACTIVE = 0x04 """Whether to run the sandbox interactively This determines if the sandbox should attempt to connect the terminal through to the calling process, or detach the terminal entirely. """ INHERIT_UID = 0x08 """Whether to use the user id and group id from the host environment This determines if processes in the sandbox should run with the same user id and group id as BuildStream itself. By default, processes run with user id and group id 0, protected by a user namespace where available. """ CREATE_DEV_SHM = 0x10 """Whether to create /dev/shm in the sandbox. This allows plugins to create /dev/shm in the sandbox. This flag was added to fix a bug in which /dev/shm was not added in, meaning our sandbox was not POSIX compliant. """ class Sandbox(): """Sandbox() Sandbox programming interface for :class:`.Element` plugins. """ # Minimal set of devices for the sandbox DEVICES = [ '/dev/urandom', '/dev/random', '/dev/zero', '/dev/null' ] def __init__(self, context, project, directory, **kwargs): self.__context = context self.__project = project self.__directories = [] self.__cwd = None self.__env = None self.__mount_sources = {} # Configuration from kwargs common to all subclasses self.__config = kwargs['config'] self.__stdout = kwargs['stdout'] self.__stderr = kwargs['stderr'] # Setup the directories self.__directory = directory self.__root = os.path.join(self.__directory, 'root') self.__scratch = os.path.join(self.__directory, 'scratch') for directory_ in [self.__root, self.__scratch]: os.makedirs(directory_, exist_ok=True) def get_directory(self): """Fetches the sandbox root directory The root directory is where artifacts for the base runtime environment should be staged. Returns: (str): The sandbox root directory """ return self.__root def set_environment(self, environment): """Sets the environment variables for the sandbox Args: directory (dict): The environment variables to use in the sandbox """ self.__env = environment def set_work_directory(self, directory): """Sets the work directory for commands run in the sandbox Args: directory (str): An absolute path within the sandbox """ self.__cwd = directory def mark_directory(self, directory, *, artifact=False): """Marks a sandbox directory and ensures it will exist Args: directory (str): An absolute path within the sandbox to mark artifact (bool): Whether the content staged at this location contains artifacts .. note:: Any marked directories will be read-write in the sandboxed environment, only the root directory is allowed to be readonly. """ self.__directories.append({ 'directory': directory, 'artifact': artifact }) def run(self, command, flags, *, cwd=None, env=None): """Run a command in the sandbox. Args: command (list): The command to run in the sandboxed environment, as a list of strings starting with the binary to run. flags (:class:`.SandboxFlags`): The flags for running this command. cwd (str): The sandbox relative working directory in which to run the command. env (dict): A dictionary of string key, value pairs to set as environment variables inside the sandbox environment. Returns: (int): The program exit code. Raises: (:class:`.ProgramNotFoundError`): If a host tool which the given sandbox implementation requires is not found. .. note:: The optional *cwd* argument will default to the value set with :func:`~buildstream.sandbox.Sandbox.set_work_directory` """ raise ImplError("Sandbox of type '{}' does not implement run()" .format(type(self).__name__)) ################################################ # Private methods # ################################################ # _get_context() # # Fetches the context BuildStream was launched with. # # Returns: # (Context): The context of this BuildStream invocation def _get_context(self): return self.__context # _get_project() # # Fetches the Project this sandbox was created to build for. # # Returns: # (Project): The project this sandbox was created for. def _get_project(self): return self.__project # _get_marked_directories() # # Fetches the marked directories in the sandbox # # Returns: # (list): A list of directory mark objects. # # The returned objects are dictionaries with the following attributes: # directory: The absolute path within the sandbox # artifact: Whether the path will contain artifacts or not # def _get_marked_directories(self): return self.__directories # _get_mount_source() # # Fetches the list of mount sources # # Returns: # (dict): A dictionary where keys are mount points and values are the mount sources def _get_mount_sources(self): return self.__mount_sources # _set_mount_source() # # Sets the mount source for a given mountpoint # # Args: # mountpoint (str): The absolute mountpoint path inside the sandbox # mount_source (str): the host path to be mounted at the mount point def _set_mount_source(self, mountpoint, mount_source): self.__mount_sources[mountpoint] = mount_source # _get_environment() # # Fetches the environment variables for running commands # in the sandbox. # # Returns: # (str): The sandbox work directory def _get_environment(self): return self.__env # _get_work_directory() # # Fetches the working directory for running commands # in the sandbox. # # Returns: # (str): The sandbox work directory def _get_work_directory(self): return self.__cwd # _get_scratch_directory() # # Fetches the sandbox scratch directory, this directory can # be used by the sandbox implementation to cache things or # redirect temporary fuse mounts. # # The scratch directory is guaranteed to be on the same # filesystem as the root directory. # # Returns: # (str): The sandbox scratch directory def _get_scratch_directory(self): return self.__scratch # _get_output() # # Fetches the stdout & stderr # # Returns: # (file): The stdout, or None to inherit # (file): The stderr, or None to inherit def _get_output(self): return (self.__stdout, self.__stderr) # _get_config() # # Fetches the sandbox configuration object. # # Returns: # (SandboxConfig): An object containing the configuration # data passed in during construction. def _get_config(self): return self.__config # _has_command() # # Tests whether a command exists inside the sandbox # # Args: # command (list): The command to test. # env (dict): A dictionary of string key, value pairs to set as environment # variables inside the sandbox environment. # Returns: # (bool): Whether a command exists inside the sandbox. def _has_command(self, command, env=None): if os.path.isabs(command): return os.path.exists(os.path.join( self.get_directory(), command.lstrip(os.sep))) for path in env.get('PATH').split(':'): if os.path.exists(os.path.join( self.get_directory(), path.lstrip(os.sep), command)): return True return False buildstream-1.6.9/buildstream/scriptelement.py000066400000000000000000000273611437515270000216220ustar00rootroot00000000000000# # Copyright (C) 2017 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Jonathan Maw """ ScriptElement - Abstract class for scripting elements ===================================================== The ScriptElement class is a convenience class one can derive for implementing elements that stage elements and run command-lines on them. Any derived classes must write their own configure() implementation, using the public APIs exposed in this class. Derived classes must also chain up to the parent method in their preflight() implementations. """ import os from collections import OrderedDict from . import Element, ElementError, Scope, SandboxFlags class ScriptElement(Element): __install_root = "/" __cwd = "/" __root_read_only = False __commands = None __layout = [] __create_dev_shm = False # The compose element's output is it's dependencies, so # we must rebuild if the dependencies change even when # not in strict build plans. # BST_STRICT_REBUILD = True # Script artifacts must never have indirect dependencies, # so runtime dependencies are forbidden. BST_FORBID_RDEPENDS = True # This element ignores sources, so we should forbid them from being # added, to reduce the potential for confusion BST_FORBID_SOURCES = True def set_work_dir(self, work_dir=None): """Sets the working dir The working dir (a.k.a. cwd) is the directory which commands will be called from. Args: work_dir (str): The working directory. If called without this argument set, it'll default to the value of the variable ``cwd``. """ if work_dir is None: self.__cwd = self.get_variable("cwd") or "/" else: self.__cwd = work_dir def set_install_root(self, install_root=None): """Sets the install root The install root is the directory which output will be collected from once the commands have been run. Args: install_root(str): The install root. If called without this argument set, it'll default to the value of the variable ``install-root``. """ if install_root is None: self.__install_root = self.get_variable("install-root") or "/" else: self.__install_root = install_root def set_root_read_only(self, root_read_only): """Sets root read-only When commands are run, if root_read_only is true, then the root of the filesystem will be protected. This is strongly recommended whenever possible. If this variable is not set, the default permission is read-write. Args: root_read_only (bool): Whether to mark the root filesystem as read-only. """ self.__root_read_only = root_read_only def set_create_dev_shm(self, create_dev_shm=False): """Sets whether to use shared memory device in the sandbox Args: work_dir (bool): Whether to enable creation of the shared memory device """ self.__create_dev_shm = create_dev_shm def layout_add(self, element, destination): """Adds an element-destination pair to the layout. Layout is a way of defining how dependencies should be added to the staging area for running commands. Args: element (str): The name of the element to stage, or None. This may be any element found in the dependencies, whether it is a direct or indirect dependency. destination (str): The path inside the staging area for where to stage this element. If it is not "/", then integration commands will not be run. If this function is never called, then the default behavior is to just stage the Scope.BUILD dependencies of the element in question at the sandbox root. Otherwise, the Scope.RUN dependencies of each specified element will be staged in their specified destination directories. .. note:: The order of directories in the layout is significant as they will be mounted into the sandbox. It is an error to specify a parent directory which will shadow a directory already present in the layout. .. note:: In the case that no element is specified, a read-write directory will be made available at the specified location. """ # # Even if this is an empty list by default, make sure that it's # instance data instead of appending stuff directly onto class data. # if not self.__layout: self.__layout = [] self.__layout.append({"element": element, "destination": destination}) def add_commands(self, group_name, command_list): """Adds a list of commands under the group-name. .. note:: Command groups will be run in the order they were added. .. note:: This does not perform substitutions automatically. They must be performed beforehand (see :func:`~buildstream.element.Element.node_subst_list`) Args: group_name (str): The name of the group of commands. command_list (list): The list of commands to be run. """ if not self.__commands: self.__commands = OrderedDict() self.__commands[group_name] = command_list def __validate_layout(self): if self.__layout: # Cannot proceeed if layout is used, but none are for "/" root_defined = any(entry['destination'] == '/' for entry in self.__layout) if not root_defined: raise ElementError("{}: Using layout, but none are staged as '/'" .format(self)) # Cannot proceed if layout specifies an element that isn't part # of the dependencies. for item in self.__layout: if item['element']: if not self.search(Scope.BUILD, item['element']): raise ElementError("{}: '{}' in layout not found in dependencies" .format(self, item['element'])) def preflight(self): # The layout, if set, must make sense. self.__validate_layout() def get_unique_key(self): return { 'commands': self.__commands, 'cwd': self.__cwd, 'install-root': self.__install_root, 'layout': self.__layout, 'root-read-only': self.__root_read_only } def configure_sandbox(self, sandbox): # Setup the environment and work directory sandbox.set_work_directory(self.__cwd) # Setup environment sandbox.set_environment(self.get_environment()) # Tell the sandbox to mount the install root directories = {self.__install_root: False} # Mark the artifact directories in the layout for item in self.__layout: destination = item['destination'] was_artifact = directories.get(destination, False) directories[destination] = item['element'] or was_artifact for directory, artifact in directories.items(): # Root does not need to be marked as it is always mounted # with artifact (unless explicitly marked non-artifact) if directory != '/': sandbox.mark_directory(directory, artifact=artifact) def stage(self, sandbox): # Stage the elements, and run integration commands where appropriate. if not self.__layout: # if no layout set, stage all dependencies into / for build_dep in self.dependencies(Scope.BUILD, recurse=False): with self.timed_activity("Staging {} at /" .format(build_dep.name), silent_nested=True): build_dep.stage_dependency_artifacts(sandbox, Scope.RUN, path="/") for build_dep in self.dependencies(Scope.BUILD, recurse=False): with self.timed_activity("Integrating {}".format(build_dep.name), silent_nested=True): for dep in build_dep.dependencies(Scope.RUN): dep.integrate(sandbox) else: # If layout, follow its rules. for item in self.__layout: # Skip layout members which dont stage an element if not item['element']: continue element = self.search(Scope.BUILD, item['element']) if item['destination'] == '/': with self.timed_activity("Staging {} at /".format(element.name), silent_nested=True): element.stage_dependency_artifacts(sandbox, Scope.RUN) else: with self.timed_activity("Staging {} at {}" .format(element.name, item['destination']), silent_nested=True): real_dstdir = os.path.join(sandbox.get_directory(), item['destination'].lstrip(os.sep)) os.makedirs(os.path.dirname(real_dstdir), exist_ok=True) element.stage_dependency_artifacts(sandbox, Scope.RUN, path=item['destination']) for item in self.__layout: # Skip layout members which dont stage an element if not item['element']: continue element = self.search(Scope.BUILD, item['element']) # Integration commands can only be run for elements staged to / if item['destination'] == '/': with self.timed_activity("Integrating {}".format(element.name), silent_nested=True): for dep in element.dependencies(Scope.RUN): dep.integrate(sandbox) os.makedirs(os.path.join(sandbox.get_directory(), self.__install_root.lstrip(os.sep)), exist_ok=True) def assemble(self, sandbox): flags = 0 if self.__root_read_only: flags = flags | SandboxFlags.ROOT_READ_ONLY if self.__create_dev_shm: flags = flags | SandboxFlags.CREATE_DEV_SHM for groupname, commands in self.__commands.items(): with self.timed_activity("Running '{}'".format(groupname)): for cmd in commands: self.status("Running command", detail=cmd) # Note the -e switch to 'sh' means to exit with an error # if any untested command fails. exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'], flags) if exitcode != 0: raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode)) # Return where the result can be collected from return self.__install_root def setup(): return ScriptElement buildstream-1.6.9/buildstream/source.py000066400000000000000000001232721437515270000202420ustar00rootroot00000000000000# # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ Source - Base source class ========================== .. _core_source_abstract_methods: Abstract Methods ---------------- For loading and configuration purposes, Sources must implement the :ref:`Plugin base class abstract methods `. .. attention:: In order to ensure that all configuration data is processed at load time, it is important that all URLs have been processed during :func:`Plugin.configure() `. Source implementations *must* either call :func:`Source.translate_url() ` or :func:`Source.mark_download_url() ` for every URL that has been specified in the configuration during :func:`Plugin.configure() ` Sources expose the following abstract methods. Unless explicitly mentioned, these methods are mandatory to implement. * :func:`Source.get_consistency() ` Report the sources consistency state. * :func:`Source.load_ref() ` Load the ref from a specific YAML node * :func:`Source.get_ref() ` Fetch the source ref * :func:`Source.set_ref() ` Set a new ref explicitly * :func:`Source.track() ` Automatically derive a new ref from a symbolic tracking branch * :func:`Source.fetch() ` Fetch the actual payload for the currently set ref * :func:`Source.stage() ` Stage the sources for a given ref at a specified location * :func:`Source.init_workspace() ` Stage sources in a local directory for use as a workspace. **Optional**: If left unimplemented, this will default to calling :func:`Source.stage() ` * :func:`Source.get_source_fetchers() ` Get the objects that are used for fetching. **Optional**: This only needs to be implemented for sources that need to download from multiple URLs while fetching (e.g. a git repo and its submodules). For details on how to define a SourceFetcher, see :ref:`SourceFetcher `. * :func:`Source.validate_cache() ` Perform any validations which require the sources to be cached. **Optional**: This is completely optional and will do nothing if left unimplemented. Accessing previous sources -------------------------- *Since: 1.4* In the general case, all sources are fetched and tracked independently of one another. In situations where a source needs to access previous source(s) in order to perform its own track and/or fetch, following attributes can be set to request access to previous sources: * :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_TRACK` Indicate that access to previous sources is required during track * :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_FETCH` Indicate that access to previous sources is required during fetch The intended use of such plugins is to fetch external dependencies of other sources, typically using some kind of package manager, such that all the dependencies of the original source(s) are available at build time. When implementing such a plugin, implementors should adhere to the following guidelines: * Implementations must be able to store the obtained artifacts in a subdirectory. * Implementations must be able to deterministically generate a unique ref, such that two refs are different if and only if they produce different outputs. * Implementations must not introduce host contamination. .. _core_source_fetcher: SourceFetcher - Object for fetching individual URLs =================================================== Abstract Methods ---------------- SourceFetchers expose the following abstract methods. Unless explicitly mentioned, these methods are mandatory to implement. * :func:`SourceFetcher.fetch() ` Fetches the URL associated with this SourceFetcher, optionally taking an alias override. Class Reference --------------- """ import os from collections.abc import Mapping from contextlib import contextmanager from . import Plugin from . import _yaml, utils from .types import CoreWarnings from ._exceptions import BstError, ImplError, ErrorDomain from ._projectrefs import ProjectRefStorage class Consistency(): INCONSISTENT = 0 """Inconsistent Inconsistent sources have no explicit reference set. They cannot produce a cache key, be fetched or staged. They can only be tracked. """ RESOLVED = 1 """Resolved Resolved sources have a reference and can produce a cache key and be fetched, however they cannot be staged. """ CACHED = 2 """Cached Cached sources have a reference which is present in the local source cache. Only cached sources can be staged. """ class SourceError(BstError): """This exception should be raised by :class:`.Source` implementations to report errors to the user. Args: message (str): The breif error description to report to the user detail (str): A possibly multiline, more detailed error message reason (str): An optional machine readable reason string, used for test cases temporary (bool): An indicator to whether the error may occur if the operation was run again. (*Since: 1.2*) """ def __init__(self, message, *, detail=None, reason=None, temporary=False): super().__init__(message, detail=detail, domain=ErrorDomain.SOURCE, reason=reason, temporary=temporary) class SourceFetcher(): """SourceFetcher() This interface exists so that a source that downloads from multiple places (e.g. a git source with submodules) has a consistent interface for fetching and substituting aliases. *Since: 1.2* .. attention:: When implementing a SourceFetcher, remember to call :func:`Source.mark_download_url() ` for every URL found in the configuration data at :func:`Plugin.configure() ` time. """ def __init__(self): self.__alias = None ############################################################# # Abstract Methods # ############################################################# def fetch(self, alias_override=None, **kwargs): """Fetch remote sources and mirror them locally, ensuring at least that the specific reference is cached locally. Args: alias_override (str): The alias to use instead of the default one defined by the :ref:`aliases ` field in the project's config. Raises: :class:`.SourceError` Implementors should raise :class:`.SourceError` if the there is some network error or if the source reference could not be matched. """ raise ImplError("SourceFetcher '{}' does not implement fetch()".format(type(self))) ############################################################# # Public Methods # ############################################################# def mark_download_url(self, url): """Identifies the URL that this SourceFetcher uses to download This must be called during the fetcher's initialization Args: url (str): The url used to download. """ self.__alias = _extract_alias(url) ############################################################# # Private Methods used in BuildStream # ############################################################# # Returns the alias used by this fetcher def _get_alias(self): return self.__alias class Source(Plugin): """Source() Base Source class. All Sources derive from this class, this interface defines how the core will be interacting with Sources. """ __defaults = {} # The defaults from the project __defaults_set = False # Flag, in case there are not defaults at all BST_REQUIRES_PREVIOUS_SOURCES_TRACK = False """Whether access to previous sources is required during track When set to True: * all sources listed before this source in the given element will be fetched before this source is tracked * Source.track() will be called with an additional keyword argument `previous_sources_dir` where previous sources will be staged * this source can not be the first source for an element *Since: 1.4* """ BST_REQUIRES_PREVIOUS_SOURCES_FETCH = False """Whether access to previous sources is required during fetch When set to True: * all sources listed before this source in the given element will be fetched before this source is fetched * Source.fetch() will be called with an additional keyword argument `previous_sources_dir` where previous sources will be staged * this source can not be the first source for an element *Since: 1.4* """ def __init__(self, context, project, meta, *, alias_override=None, unique_id=None): provenance = _yaml.node_get_provenance(meta.config) super().__init__("{}-{}".format(meta.element_name, meta.element_index), context, project, provenance, "source", unique_id=unique_id) self.__element_name = meta.element_name # The name of the element owning this source self.__element_index = meta.element_index # The index of the source in the owning element's source list self.__element_kind = meta.element_kind # The kind of the element owning this source self.__directory = meta.directory # Staging relative directory self.__consistency = Consistency.INCONSISTENT # Cached consistency state # The alias_override is only set on a re-instantiated Source self.__alias_override = alias_override # Tuple of alias and its override to use instead self.__expected_alias = None # The primary alias self.__marked_urls = set() # Set of marked download URLs # FIXME: Reconstruct a MetaSource from a Source instead of storing it. self.__meta = meta # MetaSource stored so we can copy this source later. # Collect the composited element configuration and # ask the element to configure itself. self.__init_defaults(meta) self.__config = self.__extract_config(meta) self.__first_pass = meta.first_pass self._configure(self.__config) COMMON_CONFIG_KEYS = ['kind', 'directory'] """Common source config keys Source config keys that must not be accessed in configure(), and should be checked for using node_validate(). """ ############################################################# # Abstract Methods # ############################################################# def get_consistency(self): """Report whether the source has a resolved reference Returns: (:class:`.Consistency`): The source consistency """ raise ImplError("Source plugin '{}' does not implement get_consistency()".format(self.get_kind())) def load_ref(self, node): """Loads the *ref* for this Source from the specified *node*. Args: node (dict): The YAML node to load the ref from .. note:: The *ref* for the Source is expected to be read at :func:`Plugin.configure() ` time, this will only be used for loading refs from alternative locations than in the `element.bst` file where the given Source object has been declared. *Since: 1.2* """ raise ImplError("Source plugin '{}' does not implement load_ref()".format(self.get_kind())) def get_ref(self): """Fetch the internal ref, however it is represented Returns: (simple object): The internal source reference, or ``None`` .. note:: The reference is the user provided (or track resolved) value the plugin uses to represent a specific input, like a commit in a VCS or a tarball's checksum. Usually the reference is a string, but the plugin may choose to represent it with a tuple or such. Implementations *must* return a ``None`` value in the case that the ref was not loaded. E.g. a ``(None, None)`` tuple is not acceptable. """ raise ImplError("Source plugin '{}' does not implement get_ref()".format(self.get_kind())) def set_ref(self, ref, node): """Applies the internal ref, however it is represented Args: ref (simple object): The internal source reference to set, or ``None`` node (dict): The same dictionary which was previously passed to :func:`Plugin.configure() ` See :func:`Source.get_ref() ` for a discussion on the *ref* parameter. .. note:: Implementors must support the special ``None`` value here to allow clearing any existing ref. """ raise ImplError("Source plugin '{}' does not implement set_ref()".format(self.get_kind())) def track(self, **kwargs): """Resolve a new ref from the plugin's track option Args: previous_sources_dir (str): directory where previous sources are staged. Note that this keyword argument is available only when :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_TRACK` is set to True. Returns: (simple object): A new internal source reference, or None If the backend in question supports resolving references from a symbolic tracking branch or tag, then this should be implemented to perform this task on behalf of :ref:`bst track ` commands. This usually requires fetching new content from a remote origin to see if a new ref has appeared for your branch or tag. If the backend store allows one to query for a new ref from a symbolic tracking data without downloading then that is desirable. See :func:`Source.get_ref() ` for a discussion on the *ref* parameter. """ # Allow a non implementation return None def fetch(self, **kwargs): """Fetch remote sources and mirror them locally, ensuring at least that the specific reference is cached locally. Args: previous_sources_dir (str): directory where previous sources are staged. Note that this keyword argument is available only when :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_FETCH` is set to True. Raises: :class:`.SourceError` Implementors should raise :class:`.SourceError` if the there is some network error or if the source reference could not be matched. """ raise ImplError("Source plugin '{}' does not implement fetch()".format(self.get_kind())) def stage(self, directory): """Stage the sources to a directory Args: directory (str): Path to stage the source Raises: :class:`.SourceError` Implementors should assume that *directory* already exists and stage already cached sources to the passed directory. Implementors should raise :class:`.SourceError` when encountering some system error. """ raise ImplError("Source plugin '{}' does not implement stage()".format(self.get_kind())) def init_workspace(self, directory): """Initialises a new workspace Args: directory (str): Path of the workspace to init Raises: :class:`.SourceError` Default implementation is to call :func:`Source.stage() `. Implementors overriding this method should assume that *directory* already exists. Implementors should raise :class:`.SourceError` when encountering some system error. """ self.stage(directory) def get_source_fetchers(self): """Get the objects that are used for fetching If this source doesn't download from multiple URLs, returning None and falling back on the default behaviour is recommended. Returns: iterable: The Source's SourceFetchers, if any. .. note:: Implementors can implement this as a generator. The :func:`SourceFetcher.fetch() ` method will be called on the returned fetchers one by one, before consuming the next fetcher in the list. *Since: 1.2* """ return [] def validate_cache(self): """Implement any validations once we know the sources are cached This is guaranteed to be called only once for a given session once the sources are known to be :attr:`Consistency.CACHED `, if source tracking is enabled in the session for this source, then this will only be called if the sources become cached after tracking completes. *Since: 1.4* """ ############################################################# # Public Methods # ############################################################# def get_mirror_directory(self): """Fetches the directory where this source should store things Returns: (str): The directory belonging to this source """ # Create the directory if it doesnt exist context = self._get_context() directory = os.path.join(context.sourcedir, self.get_kind()) os.makedirs(directory, exist_ok=True) return directory def translate_url(self, url, *, alias_override=None, primary=True): """Translates the given url which may be specified with an alias into a fully qualified url. Args: url (str): A URL, which may be using an alias alias_override (str): Optionally, an URI to override the alias with. (*Since: 1.2*) primary (bool): Whether this is the primary URL for the source. (*Since: 1.2*) Returns: str: The fully qualified URL, with aliases resolved .. note:: This must be called for every URL in the configuration during :func:`Plugin.configure() ` if :func:`Source.mark_download_url() ` is not called. """ # Ensure that the download URL is also marked self.mark_download_url(url, primary=primary) # Alias overriding can happen explicitly (by command-line) or # implicitly (the Source being constructed with an __alias_override). if alias_override or self.__alias_override: url_alias, url_body = url.split(utils._ALIAS_SEPARATOR, 1) if url_alias: if alias_override: url = alias_override + url_body else: # Implicit alias overrides may only be done for one # specific alias, so that sources that fetch from multiple # URLs and use different aliases default to only overriding # one alias, rather than getting confused. override_alias = self.__alias_override[0] override_url = self.__alias_override[1] if url_alias == override_alias: url = override_url + url_body return url else: project = self._get_project() return project.translate_url(url, first_pass=self.__first_pass) def mark_download_url(self, url, *, primary=True): """Identifies the URL that this Source uses to download Args: url (str): The URL used to download primary (bool): Whether this is the primary URL for the source .. note:: This must be called for every URL in the configuration during :func:`Plugin.configure() ` if :func:`Source.translate_url() ` is not called. *Since: 1.2* """ # Only mark the Source level aliases on the main instance, not in # a reinstantiated instance in mirroring. if not self.__alias_override: if primary: expected_alias = _extract_alias(url) assert (self.__expected_alias is None or self.__expected_alias == expected_alias), \ "Primary URL marked twice with different URLs" self.__expected_alias = expected_alias # Enforce proper behaviour of plugins by ensuring that all # aliased URLs have been marked at Plugin.configure() time. # if self._get_configuring(): # Record marked urls while configuring # self.__marked_urls.add(url) else: # If an unknown aliased URL is seen after configuring, # this is an error. # # It is still possible that a URL that was not mentioned # in the element configuration can be marked, this is # the case for git submodules which might be automatically # discovered. # assert (url in self.__marked_urls or not _extract_alias(url)), \ "URL was not seen at configure time: {}".format(url) alias = _extract_alias(url) # Issue a (fatal-able) warning if the source used a URL without specifying an alias if not alias: self.warn( "{}: Use of unaliased source download URL: {}".format(self, url), warning_token=CoreWarnings.UNALIASED_URL, ) # If there is an alias in use, ensure that it exists in the project if alias: project = self._get_project() alias_uri = project.get_alias_uri(alias, first_pass=self.__first_pass) if alias_uri is None: raise SourceError( "{}: Invalid alias '{}' specified in URL: {}".format(self, alias, url), reason="invalid-source-alias", ) def get_project_directory(self): """Fetch the project base directory This is useful for sources which need to load resources stored somewhere inside the project. Returns: str: The project base directory """ project = self._get_project() return project.directory @contextmanager def tempdir(self): """Context manager for working in a temporary directory Yields: (str): A path to a temporary directory This should be used by source plugins directly instead of the tempfile module. This one will automatically cleanup in case of termination by catching the signal before os._exit(). It will also use the 'mirror directory' as expected for a source. """ mirrordir = self.get_mirror_directory() with utils._tempdir(dir=mirrordir) as tempdir: yield tempdir ############################################################# # Private Methods used in BuildStream # ############################################################# # Wrapper around preflight() method # def _preflight(self): try: self.preflight() except BstError as e: # Prepend provenance to the error raise SourceError("{}: {}".format(self, e), reason=e.reason) from e # Update cached consistency for a source # # This must be called whenever the state of a source may have changed. # def _update_state(self): if self.__consistency < Consistency.CACHED: # Source consistency interrogations are silent. context = self._get_context() with context.silence(): self.__consistency = self.get_consistency() # Give the Source an opportunity to validate the cached # sources as soon as the Source becomes Consistency.CACHED. if self.__consistency == Consistency.CACHED: self.validate_cache() # Return cached consistency # def _get_consistency(self): return self.__consistency # Wrapper function around plugin provided fetch method # # Args: # previous_sources (list): List of Sources listed prior to this source # def _fetch(self, previous_sources): if self.BST_REQUIRES_PREVIOUS_SOURCES_FETCH: self.__ensure_previous_sources(previous_sources) with self.tempdir() as staging_directory: for src in previous_sources: src._stage(staging_directory) self.__do_fetch(previous_sources_dir=self.__ensure_directory(staging_directory)) else: self.__do_fetch() # Wrapper for stage() api which gives the source # plugin a fully constructed path considering the # 'directory' option # def _stage(self, directory): staging_directory = self.__ensure_directory(directory) self.stage(staging_directory) # Wrapper for init_workspace() def _init_workspace(self, directory): directory = self.__ensure_directory(directory) self.init_workspace(directory) # _get_unique_key(): # # Wrapper for get_unique_key() api # # Args: # include_source (bool): Whether to include the delegated source key # def _get_unique_key(self, include_source): key = {} key['directory'] = self.__directory if include_source: key['unique'] = self.get_unique_key() return key # _project_refs(): # # Gets the appropriate ProjectRefs object for this source, # which depends on whether the owning element is a junction # # Args: # project (Project): The project to check # def _project_refs(self, project): element_kind = self.__element_kind if element_kind == 'junction': return project.junction_refs return project.refs # _load_ref(): # # Loads the ref for the said source. # # Raises: # (SourceError): If the source does not implement load_ref() # # Returns: # (ref): A redundant ref specified inline for a project.refs using project # # This is partly a wrapper around `Source.load_ref()`, it will decide # where to load the ref from depending on which project the source belongs # to and whether that project uses a project.refs file. # # Note the return value is used to construct a summarized warning in the # case that the toplevel project uses project.refs and also lists refs # which will be ignored. # def _load_ref(self): context = self._get_context() project = self._get_project() toplevel = context.get_toplevel_project() redundant_ref = None element_name = self.__element_name element_idx = self.__element_index def do_load_ref(node): try: self.load_ref(ref_node) except ImplError as e: raise SourceError("{}: Storing refs in project.refs is not supported by '{}' sources" .format(self, self.get_kind()), reason="unsupported-load-ref") from e # If the main project overrides the ref, use the override if project is not toplevel and toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS: refs = self._project_refs(toplevel) ref_node = refs.lookup_ref(project.name, element_name, element_idx) if ref_node is not None: do_load_ref(ref_node) return redundant_ref # If the project itself uses project.refs, clear the ref which # was already loaded via Source.configure(), as this would # violate the rule of refs being either in project.refs or in # the elements themselves. # if project.ref_storage == ProjectRefStorage.PROJECT_REFS: # First warn if there is a ref already loaded, and reset it redundant_ref = self.get_ref() if redundant_ref is not None: self.set_ref(None, {}) # Try to load the ref refs = self._project_refs(project) ref_node = refs.lookup_ref(project.name, element_name, element_idx) if ref_node is not None: do_load_ref(ref_node) return redundant_ref # _set_ref() # # Persists the ref for this source. This will decide where to save the # ref, or refuse to persist it, depending on active ref-storage project # settings. # # Args: # new_ref (smth): The new reference to save # save (bool): Whether to write the new reference to file or not # # Returns: # (bool): Whether the ref has changed # # Raises: # (SourceError): In the case we encounter errors saving a file to disk # def _set_ref(self, new_ref, *, save): context = self._get_context() project = self._get_project() toplevel = context.get_toplevel_project() toplevel_refs = self._project_refs(toplevel) provenance = self._get_provenance() element_name = self.__element_name element_idx = self.__element_index # # Step 1 - Obtain the node # if project is toplevel: if toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS: node = toplevel_refs.lookup_ref(project.name, element_name, element_idx, write=True) else: node = provenance.node else: if toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS: node = toplevel_refs.lookup_ref(project.name, element_name, element_idx, write=True) else: node = {} # # Step 2 - Set the ref in memory, and determine changed state # current_ref = self.get_ref() # pylint: disable=assignment-from-no-return # Set the ref regardless of whether it changed, the # TrackQueue() will want to update a specific node with # the ref, regardless of whether the original has changed. self.set_ref(new_ref, node) if current_ref == new_ref or not save: # Note: We do not look for and propagate changes at this point # which might result in desync depending if something changes about # tracking in the future. For now, this is quite safe. return False def do_save_refs(refs): try: refs.save() except OSError as e: raise SourceError("{}: Error saving source reference to 'project.refs': {}" .format(self, e), reason="save-ref-error") from e # # Step 3 - Apply the change in project data # if toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS: do_save_refs(toplevel_refs) else: if provenance.filename.project is toplevel: # Save the ref in the originating file # try: _yaml.dump(provenance.toplevel, provenance.filename.name) except OSError as e: raise SourceError("{}: Error saving source reference to '{}': {}" .format(self, provenance.filename.name, e), reason="save-ref-error") from e elif provenance.filename.project is project: self.warn("{}: Not persisting new reference in junctioned project".format(self)) elif provenance.filename.project is None: assert provenance.filename.name == '' assert provenance.filename.shortname == '' raise SourceError("{}: Error saving source reference to synthetic node." .format(self)) else: raise SourceError("{}: Cannot track source in a fragment from a junction" .format(provenance.filename.shortname), reason="tracking-junction-fragment") return True # Wrapper for track() # # Args: # previous_sources (list): List of Sources listed prior to this source # def _track(self, previous_sources): if self.BST_REQUIRES_PREVIOUS_SOURCES_TRACK: self.__ensure_previous_sources(previous_sources) with self.tempdir() as staging_directory: for src in previous_sources: src._stage(staging_directory) new_ref = self.__do_track(previous_sources_dir=self.__ensure_directory(staging_directory)) else: new_ref = self.__do_track() current_ref = self.get_ref() if new_ref is None: # No tracking, keep current ref new_ref = current_ref if current_ref != new_ref: self.info("Found new revision: {}".format(new_ref)) # Save ref in local process for subsequent sources self._set_ref(new_ref, save=False) return new_ref # _requires_previous_sources() # # If a plugin requires access to previous sources at track or fetch time, # then it cannot be the first source of an elemenet. # # Returns: # (bool): Whether this source requires access to previous sources # def _requires_previous_sources(self): return self.BST_REQUIRES_PREVIOUS_SOURCES_TRACK or self.BST_REQUIRES_PREVIOUS_SOURCES_FETCH # Returns the alias if it's defined in the project def _get_alias(self): alias = self.__expected_alias project = self._get_project() if project.get_alias_uri(alias, first_pass=self.__first_pass): # The alias must already be defined in the project's aliases # otherwise http://foo gets treated like it contains an alias return alias else: return None ############################################################# # Local Private Methods # ############################################################# # __clone_for_uri() # # Clone the source with an alternative URI setup for the alias # which this source uses. # # This is used for iteration over source mirrors. # # Args: # uri (str): The alternative URI for this source's alias # # Returns: # (Source): A new clone of this Source, with the specified URI # as the value of the alias this Source has marked as # primary with either mark_download_url() or # translate_url(). # def __clone_for_uri(self, uri): project = self._get_project() context = self._get_context() alias = self._get_alias() source_kind = type(self) clone = source_kind(context, project, self.__meta, alias_override=(alias, uri), unique_id=self._unique_id) # Do the necessary post instantiation routines here # clone._preflight() clone._load_ref() clone._update_state() return clone # Tries to call fetch for every mirror, stopping once it succeeds def __do_fetch(self, **kwargs): project = self._get_project() source_fetchers = self.get_source_fetchers() if source_fetchers: for fetcher in source_fetchers: alias = fetcher._get_alias() success = False for uri in project.get_alias_uris(alias, first_pass=self.__first_pass): try: fetcher.fetch(uri) # FIXME: Need to consider temporary vs. permanent failures, # and how this works with retries. except BstError as e: last_error = e continue success = True break if not success: raise last_error # pylint: disable=used-before-assignment else: alias = self._get_alias() if self.__first_pass: mirrors = project.first_pass_config.mirrors else: mirrors = project.config.mirrors if not mirrors or not alias: self.fetch(**kwargs) return for uri in project.get_alias_uris(alias, first_pass=self.__first_pass): new_source = self.__clone_for_uri(uri) try: new_source.fetch(**kwargs) # FIXME: Need to consider temporary vs. permanent failures, # and how this works with retries. except BstError as e: last_error = e continue return raise last_error # Tries to call track for every mirror, stopping once it succeeds def __do_track(self, **kwargs): project = self._get_project() alias = self._get_alias() if self.__first_pass: mirrors = project.first_pass_config.mirrors else: mirrors = project.config.mirrors # If there are no mirrors, or no aliases to replace, there's nothing to do here. if not mirrors or not alias: return self.track(**kwargs) # NOTE: We are assuming here that tracking only requires substituting the # first alias used for uri in reversed(project.get_alias_uris(alias, first_pass=self.__first_pass)): new_source = self.__clone_for_uri(uri) try: ref = new_source.track(**kwargs) # pylint: disable=assignment-from-none # FIXME: Need to consider temporary vs. permanent failures, # and how this works with retries. except BstError as e: last_error = e continue return ref raise last_error # pylint: disable=used-before-assignment # Ensures a fully constructed path and returns it def __ensure_directory(self, directory): if self.__directory is not None: directory = os.path.join(directory, self.__directory.lstrip(os.sep)) try: os.makedirs(directory, exist_ok=True) except OSError as e: raise SourceError("Failed to create staging directory: {}" .format(e), reason="ensure-stage-dir-fail") from e return directory def __init_defaults(self, meta): if not self.__defaults_set: project = self._get_project() if meta.first_pass: sources = project.first_pass_config.source_overrides else: sources = project.source_overrides type(self).__defaults = sources.get(self.get_kind(), {}) type(self).__defaults_set = True # This will resolve the final configuration to be handed # off to source.configure() # def __extract_config(self, meta): config = _yaml.node_get(self.__defaults, Mapping, 'config', default_value={}) config = _yaml.node_chain_copy(config) _yaml.composite(config, meta.config) _yaml.node_final_assertions(config) return config # Ensures that previous sources have been tracked and fetched. # def __ensure_previous_sources(self, previous_sources): for index, src in enumerate(previous_sources): # BuildStream should track sources in the order they appear so # previous sources should never be in an inconsistent state assert src.get_consistency() != Consistency.INCONSISTENT if src.get_consistency() == Consistency.RESOLVED: src._fetch(previous_sources[0:index]) def _extract_alias(url): parts = url.split(utils._ALIAS_SEPARATOR, 1) if len(parts) > 1 and not parts[0].lower() in utils._URI_SCHEMES: return parts[0] else: return "" buildstream-1.6.9/buildstream/types.py000066400000000000000000000115271437515270000201050ustar00rootroot00000000000000# # Copyright (C) 2018 Bloomberg LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom # Jim MacArthur # Benjamin Schubert """ Foundation types ================ """ from enum import Enum import heapq class Scope(Enum): """Defines the scope of dependencies to include for a given element when iterating over the dependency graph in APIs like :func:`Element.dependencies() ` """ ALL = 1 """All elements which the given element depends on, following all elements required for building. Including the element itself. """ BUILD = 2 """All elements required for building the element, including their respective run dependencies. Not including the given element itself. """ RUN = 3 """All elements required for running the element. Including the element itself. """ NONE = 4 """Just the element itself, no dependencies. *Since: 1.4* """ class Consistency(): """Defines the various consistency states of a :class:`.Source`. """ INCONSISTENT = 0 """Inconsistent Inconsistent sources have no explicit reference set. They cannot produce a cache key, be fetched or staged. They can only be tracked. """ RESOLVED = 1 """Resolved Resolved sources have a reference and can produce a cache key and be fetched, however they cannot be staged. """ CACHED = 2 """Cached Sources have a cached unstaged copy in the source directory. """ class CoreWarnings(): """CoreWarnings() Some common warnings which are raised by core functionalities within BuildStream are found in this class. """ OVERLAPS = "overlaps" """ This warning will be produced when buildstream detects an overlap on an element which is not whitelisted. See :ref:`Overlap Whitelist ` """ REF_NOT_IN_TRACK = "ref-not-in-track" """ This warning will be produced when a source is configured with a reference which is found to be invalid based on the configured track """ BAD_ELEMENT_SUFFIX = "bad-element-suffix" """ This warning will be produced when an element whose name does not end in .bst is referenced either on the command line or by another element """ BAD_CHARACTERS_IN_NAME = "bad-characters-in-name" """ This warning will be produces when filename for a target contains invalid characters in its name. """ UNALIASED_URL = "unaliased-url" """ A URL used for fetching a sources was specified without specifying any :ref:`alias ` """ # _KeyStrength(): # # Strength of cache key # class _KeyStrength(Enum): # Includes strong cache keys of all build dependencies and their # runtime dependencies. STRONG = 1 # Includes names of direct build dependencies but does not include # cache keys of dependencies. WEAK = 2 # _UniquePriorityQueue(): # # Implements a priority queue that adds only each key once. # # The queue will store and priority based on a tuple (key, item). # class _UniquePriorityQueue: def __init__(self): self._items = set() self._heap = [] # push(): # # Push a new item in the queue. # # If the item is already present in the queue as identified by the key, # this is a noop. # # Args: # key (hashable, comparable): unique key to use for checking for # the object's existence and used for # ordering # item (any): item to push to the queue # def push(self, key, item): if key not in self._items: self._items.add(key) heapq.heappush(self._heap, (key, item)) # pop(): # # Pop the next item from the queue, by priority order. # # Returns: # (any): the next item # # Throw: # IndexError: when the list is empty # def pop(self): key, item = heapq.heappop(self._heap) self._items.remove(key) return item def __len__(self): return len(self._heap) buildstream-1.6.9/buildstream/utils.py000066400000000000000000001172121437515270000200770ustar00rootroot00000000000000# # Copyright (C) 2016-2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom """ Utilities ========= """ import calendar import errno import hashlib import os import re import shutil import signal import stat import string import subprocess import tempfile import itertools import functools from contextlib import contextmanager import psutil from . import _signals from ._exceptions import BstError, ErrorDomain # The separator we use for user specified aliases _ALIAS_SEPARATOR = ':' _URI_SCHEMES = ["http", "https", "ftp", "file", "git", "sftp", "ssh"] class UtilError(BstError): """Raised by utility functions when system calls fail. This will be handled internally by the BuildStream core, if you need to handle this error, then it should be reraised, or either of the :class:`.ElementError` or :class:`.SourceError` exceptions should be raised from this error. """ def __init__(self, message, reason=None): super().__init__(message, domain=ErrorDomain.UTIL, reason=reason) class ProgramNotFoundError(BstError): """Raised if a required program is not found. It is normally unneeded to handle this exception from plugin code. """ def __init__(self, message, reason=None): super().__init__(message, domain=ErrorDomain.PROG_NOT_FOUND, reason=reason) class FileListResult(): """An object which stores the result of one of the operations which run on a list of files. """ def __init__(self): self.overwritten = [] """List of files which were overwritten in the target directory""" self.ignored = [] """List of files which were ignored, because they would have replaced a non empty directory""" self.failed_attributes = [] """List of files for which attributes could not be copied over""" self.files_written = [] """List of files that were written.""" def combine(self, other): """Create a new FileListResult that contains the results of both. """ ret = FileListResult() ret.overwritten = self.overwritten + other.overwritten ret.ignored = self.ignored + other.ignored ret.failed_attributes = self.failed_attributes + other.failed_attributes ret.files_written = self.files_written + other.files_written return ret def list_relative_paths(directory, *, list_dirs=True): """A generator for walking directory relative paths This generator is useful for checking the full manifest of a directory. Note that directories will be yielded only if they are empty. Symbolic links will not be followed, but will be included in the manifest. Args: directory (str): The directory to list files in list_dirs (bool): Whether to list directories Yields: Relative filenames in `directory` """ for (dirpath, dirnames, filenames) in os.walk(directory): # Modifying the dirnames directly ensures that the os.walk() generator # allows us to specify the order in which they will be iterated. dirnames.sort() filenames.sort() relpath = os.path.relpath(dirpath, directory) # We don't want "./" pre-pended to all the entries in the root of # `directory`, prefer to have no prefix in that case. basepath = relpath if relpath != '.' and dirpath != directory else '' # os.walk does not decend into symlink directories, which # makes sense because otherwise we might have redundant # directories, or end up descending into directories outside # of the walk() directory. # # But symlinks to directories are still identified as # subdirectories in the walked `dirpath`, so we extract # these symlinks from `dirnames` # if list_dirs: for d in dirnames: fullpath = os.path.join(dirpath, d) if os.path.islink(fullpath): yield os.path.join(basepath, d) # We've decended into an empty directory, in this case we # want to include the directory itself, but not in any other # case. if list_dirs and not filenames: yield relpath # List the filenames in the walked directory for f in filenames: yield os.path.join(basepath, f) # pylint: disable=anomalous-backslash-in-string def glob(paths, pattern): """A generator to yield paths which match the glob pattern Args: paths (iterable): The paths to check pattern (str): A glob pattern This generator will iterate over the passed *paths* and yield only the filenames which matched the provided *pattern*. +--------+------------------------------------------------------------------+ | Meta | Description | +========+==================================================================+ | \* | Zero or more of any character, excepting path separators | +--------+------------------------------------------------------------------+ | \** | Zero or more of any character, including path separators | +--------+------------------------------------------------------------------+ | ? | One of any character, except for path separators | +--------+------------------------------------------------------------------+ | [abc] | One of any of the specified characters | +--------+------------------------------------------------------------------+ | [a-z] | One of the characters in the specified range | +--------+------------------------------------------------------------------+ | [!abc] | Any single character, except the specified characters | +--------+------------------------------------------------------------------+ | [!a-z] | Any single character, except those in the specified range | +--------+------------------------------------------------------------------+ .. note:: Escaping of the metacharacters is not possible """ # Ensure leading slash, just because we want patterns # to match file lists regardless of whether the patterns # or file lists had a leading slash or not. if not pattern.startswith(os.sep): pattern = os.sep + pattern expression = _glob2re(pattern) regexer = re.compile(expression, re.MULTILINE | re.DOTALL) for filename in paths: filename_try = filename if not filename_try.startswith(os.sep): filename_try = os.sep + filename_try if regexer.match(filename_try): yield filename def sha256sum(filename): """Calculate the sha256sum of a file Args: filename (str): A path to a file on disk Returns: (str): An sha256 checksum string Raises: UtilError: In the case there was an issue opening or reading `filename` """ try: h = hashlib.sha256() with open(filename, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): h.update(chunk) except OSError as e: raise UtilError("Failed to get a checksum of file '{}': {}" .format(filename, e)) from e return h.hexdigest() def safe_copy(src, dest, *, result=None): """Copy a file while preserving attributes Args: src (str): The source filename dest (str): The destination filename result (:class:`~.FileListResult`): An optional collective result Raises: UtilError: In the case of unexpected system call failures This is almost the same as shutil.copy2(), except that we unlink *dest* before overwriting it if it exists, just incase *dest* is a hardlink to a different file. """ # First unlink the target if it exists try: os.unlink(dest) except OSError as e: if e.errno != errno.ENOENT: raise UtilError("Failed to remove destination file '{}': {}" .format(dest, e)) from e shutil.copyfile(src, dest) try: shutil.copystat(src, dest) except PermissionError: # If we failed to copy over some file stats, dont treat # it as an unrecoverable error, but provide some feedback # we can use for a warning. # # This has a tendency of happening when attempting to copy # over extended file attributes. if result: result.failed_attributes.append(dest) except shutil.Error as e: raise UtilError("Failed to copy '{} -> {}': {}" .format(src, dest, e)) from e def safe_link(src, dest, *, result=None): """Try to create a hardlink, but resort to copying in the case of cross device links. Args: src (str): The source filename dest (str): The destination filename result (:class:`~.FileListResult`): An optional collective result Raises: UtilError: In the case of unexpected system call failures """ # First unlink the target if it exists try: os.unlink(dest) except OSError as e: if e.errno != errno.ENOENT: raise UtilError("Failed to remove destination file '{}': {}" .format(dest, e)) from e # If we can't link it due to cross-device hardlink, copy try: os.link(src, dest) except OSError as e: if e.errno == errno.EXDEV: safe_copy(src, dest) else: raise UtilError("Failed to link '{} -> {}': {}" .format(src, dest, e)) from e def safe_remove(path): """Removes a file or directory This will remove a file if it exists, and will remove a directory if the directory is empty. Args: path (str): The path to remove Returns: True if `path` was removed or did not exist, False if `path` was a non empty directory. Raises: UtilError: In the case of unexpected system call failures """ if os.path.lexists(path): # Try to remove anything that is in the way, but issue # a warning instead if it removes a non empty directory try: os.unlink(path) except OSError as e: if e.errno != errno.EISDIR: raise UtilError("Failed to remove '{}': {}" .format(path, e)) from e try: os.rmdir(path) except OSError as err: if err.errno == errno.ENOTEMPTY: return False else: raise UtilError("Failed to remove '{}': {}" .format(path, err)) from err return True def copy_files(src, dest, *, files=None, ignore_missing=False, report_written=False): """Copy files from source to destination. Args: src (str): The source file or directory dest (str): The destination directory files (list): Optional list of files in `src` to copy ignore_missing (bool): Dont raise any error if a source file is missing report_written (bool): Add to the result object the full list of files written Returns: (:class:`~.FileListResult`): The result describing what happened during this file operation Raises: UtilError: In the case of unexpected system call failures .. note:: Directories in `dest` are replaced with files from `src`, unless the existing directory in `dest` is not empty in which case the path will be reported in the return value. """ presorted = False if files is None: files = list_relative_paths(src) presorted = True result = FileListResult() try: _process_list(src, dest, files, safe_copy, result, ignore_missing=ignore_missing, report_written=report_written, presorted=presorted) except OSError as e: raise UtilError("Failed to copy '{} -> {}': {}" .format(src, dest, e)) from e return result def link_files(src, dest, *, files=None, ignore_missing=False, report_written=False): """Hardlink files from source to destination. Args: src (str): The source file or directory dest (str): The destination directory files (list): Optional list of files in `src` to link ignore_missing (bool): Dont raise any error if a source file is missing report_written (bool): Add to the result object the full list of files written Returns: (:class:`~.FileListResult`): The result describing what happened during this file operation Raises: UtilError: In the case of unexpected system call failures .. note:: Directories in `dest` are replaced with files from `src`, unless the existing directory in `dest` is not empty in which case the path will be reported in the return value. .. note:: If a hardlink cannot be created due to crossing filesystems, then the file will be copied instead. """ presorted = False if files is None: files = list_relative_paths(src) presorted = True result = FileListResult() try: _process_list(src, dest, files, safe_link, result, ignore_missing=ignore_missing, report_written=report_written, presorted=presorted) except OSError as e: raise UtilError("Failed to link '{} -> {}': {}" .format(src, dest, e)) from e return result def get_host_tool(name): """Get the full path of a host tool Args: name (str): The name of the program to search for Returns: The full path to the program, if found Raises: :class:`.ProgramNotFoundError` """ search_path = os.environ.get('PATH') program_path = shutil.which(name, path=search_path) if not program_path: raise ProgramNotFoundError("Did not find '{}' in PATH: {}".format(name, search_path)) return program_path def url_directory_name(url): """Normalizes a url into a directory name Args: url (str): A url string Returns: A string which can be used as a directory name """ valid_chars = string.digits + string.ascii_letters + '%_' def transl(x): return x if x in valid_chars else '_' return ''.join([transl(x) for x in url]) def get_bst_version(): """Gets the major, minor release portion of the BuildStream version. Returns: (int): The major version (int): The minor version """ # Import this only conditionally, it's not resolved at bash complete time from . import __version__ # pylint: disable=import-outside-toplevel versions = __version__.split('.')[:2] if versions[0] == '0+untagged': raise UtilError("Your git repository has no tags - BuildStream can't " "determine its version. Please run `git fetch --tags`.") try: return (int(versions[0]), int(versions[1])) except IndexError as e: raise UtilError("Cannot detect Major and Minor parts of the version\n" "Version: {} not in XX.YY.whatever format" .format(__version__)) from e except ValueError as e: raise UtilError("Cannot convert version to integer numbers\n" "Version: {} not in Integer.Integer.whatever format" .format(__version__)) from e @contextmanager def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, tempdir=None): """Save a file with a temporary name and rename it into place when ready. This is a context manager which is meant for saving data to files. The data is written to a temporary file, which gets renamed to the target name when the context is closed. This avoids readers of the file from getting an incomplete file. **Example:** .. code:: python with save_file_atomic('/path/to/foo', 'w') as f: f.write(stuff) The file will be called something like ``tmpCAFEBEEF`` until the context block ends, at which point it gets renamed to ``foo``. The temporary file will be created in the same directory as the output file. The ``filename`` parameter must be an absolute path. If an exception occurs or the process is terminated, the temporary file will be deleted. """ # This feature has been proposed for upstream Python in the past, e.g.: # https://bugs.python.org/issue8604 assert os.path.isabs(filename), "The utils.save_file_atomic() parameter ``filename`` must be an absolute path" if tempdir is None: tempdir = os.path.dirname(filename) fd, tempname = tempfile.mkstemp(dir=tempdir) os.close(fd) f = open(tempname, mode=mode, buffering=buffering, encoding=encoding, # pylint: disable=consider-using-with errors=errors, newline=newline, closefd=closefd, opener=opener) def cleanup_tempfile(): f.close() try: os.remove(tempname) except FileNotFoundError: pass except OSError as e: raise UtilError("Failed to cleanup temporary file {}: {}".format(tempname, e)) from e try: with _signals.terminator(cleanup_tempfile): f.real_filename = filename yield f f.close() # This operation is atomic, at least on platforms we care about: # https://bugs.python.org/issue8828 os.replace(tempname, filename) except Exception: cleanup_tempfile() raise # _get_dir_size(): # # Get the disk usage of a given directory in bytes. # # This function assumes that files do not inadvertantly # disappear while this function is running. # # Arguments: # (str) The path whose size to check. # # Returns: # (int) The size on disk in bytes. # def _get_dir_size(path): path = os.path.abspath(path) def get_size(path): total = 0 for f in os.scandir(path): total += f.stat(follow_symlinks=False).st_size if f.is_dir(follow_symlinks=False): total += get_size(f.path) return total return get_size(path) # _get_volume_size(): # # Gets the overall usage and total size of a mounted filesystem in bytes. # # Args: # path (str): The path to check # # Returns: # (int): The total number of bytes on the volume # (int): The number of available bytes on the volume # def _get_volume_size(path): try: stat_ = os.statvfs(path) except OSError as e: raise UtilError("Failed to retrieve stats on volume for path '{}': {}" .format(path, e)) from e return stat_.f_bsize * stat_.f_blocks, stat_.f_bsize * stat_.f_bavail # _parse_size(): # # Convert a string representing data size to a number of # bytes. E.g. "2K" -> 2048. # # This uses the same format as systemd's # [resource-control](https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#). # # Arguments: # size (str) The string to parse # volume (str) A path on the volume to consider for percentage # specifications # # Returns: # (int|None) The number of bytes, or None if 'infinity' was specified. # # Raises: # UtilError if the string is not a valid data size. # def _parse_size(size, volume): if size == 'infinity': return None matches = re.fullmatch(r'([0-9]+\.?[0-9]*)([KMGT%]?)', size) if matches is None: raise UtilError("{} is not a valid data size.".format(size)) num, unit = matches.groups() if unit == '%': num = float(num) if num > 100: raise UtilError("{}% is not a valid percentage value.".format(num)) disk_size, _ = _get_volume_size(volume) return disk_size * (num / 100) units = ('', 'K', 'M', 'G', 'T') return int(num) * 1024**units.index(unit) # _pretty_size() # # Converts a number of bytes into a string representation in KB, MB, GB, TB # represented as K, M, G, T etc. # # Args: # size (int): The size to convert in bytes. # dec_places (int): The number of decimal places to output to. # # Returns: # (str): The string representation of the number of bytes in the largest def _pretty_size(size, dec_places=0): psize = size unit = 'B' for unit in ('B', 'K', 'M', 'G', 'T'): if psize < 1024: break psize /= 1024 return "{size:g}{unit}".format(size=round(psize, dec_places), unit=unit) # A sentinel to be used as a default argument for functions that need # to distinguish between a kwarg set to None and an unset kwarg. _sentinel = object() # Main process pid _main_pid = os.getpid() # _is_main_process() # # Return whether we are in the main process or not. # def _is_main_process(): assert _main_pid is not None return os.getpid() == _main_pid # Recursively remove directories, ignoring file permissions as much as # possible. def _force_rmtree(rootpath, **kwargs): for root, dirs, _ in os.walk(rootpath): for d in dirs: path = os.path.join(root, d.lstrip('/')) if os.path.exists(path) and not os.path.islink(path): try: os.chmod(path, 0o755) except OSError as e: raise UtilError("Failed to ensure write permission on file '{}': {}" .format(path, e)) from e try: shutil.rmtree(rootpath, **kwargs) except OSError as e: raise UtilError("Failed to remove cache directory '{}': {}" .format(rootpath, e)) from e # Recursively make directories in target area def _copy_directories(srcdir, destdir, target): this_dir = os.path.dirname(target) new_dir = os.path.join(destdir, this_dir) if not os.path.lexists(new_dir): if this_dir: yield from _copy_directories(srcdir, destdir, this_dir) old_dir = os.path.join(srcdir, this_dir) if os.path.lexists(old_dir): dir_stat = os.lstat(old_dir) mode = dir_stat.st_mode if stat.S_ISDIR(mode) or stat.S_ISLNK(mode): os.makedirs(new_dir) yield (new_dir, mode) else: raise UtilError('Source directory tree has file where ' 'directory expected: {}'.format(old_dir)) @functools.lru_cache(maxsize=64) def _resolve_symlinks(path): return os.path.realpath(path) def _ensure_real_directory(root, destpath): # The realpath in the sandbox may refer to a file outside of the # sandbox when any of the direcory branches are a symlink to an # absolute path. # # This should not happen as we rely on relative_symlink_target() below # when staging the actual symlinks which may lead up to this path. # destpath_resolved = _resolve_symlinks(destpath) if not destpath_resolved.startswith(_resolve_symlinks(root)): raise UtilError('Destination path resolves to a path outside ' + 'of the staging area\n\n' + ' Destination path: {}\n'.format(destpath) + ' Real path: {}'.format(destpath_resolved)) # Ensure the real destination path exists before trying to get the mode # of the real destination path. # # It is acceptable that chunks create symlinks inside artifacts which # refer to non-existing directories, they will be created on demand here # at staging time. # if not os.path.exists(destpath_resolved): os.makedirs(destpath_resolved) return destpath_resolved # _process_list() # # Internal helper for copying/moving/linking file lists # # This will handle directories, symlinks and special files # internally, the `actionfunc` will only be called for regular files. # # Args: # srcdir: The source base directory # destdir: The destination base directory # filelist: List of relative file paths # actionfunc: The function to call for regular files # result: The FileListResult # ignore_missing: Dont raise any error if a source file is missing # presorted: Whether the passed list is known to be presorted # # def _process_list(srcdir, destdir, filelist, actionfunc, result, ignore_missing=False, report_written=False, presorted=False): # Keep track of directory permissions, since these need to be set # *after* files have been written. permissions = [] # Sorting the list of files is necessary to ensure that we processes # symbolic links which lead to directories before processing files inside # those directories. if not presorted: filelist = sorted(filelist) # Now walk the list for path in filelist: srcpath = os.path.join(srcdir, path) destpath = os.path.join(destdir, path) # Add to the results the list of files written if report_written: result.files_written.append(path) # Collect overlaps if os.path.lexists(destpath) and not os.path.isdir(destpath): result.overwritten.append(path) # The destination directory may not have been created separately permissions.extend(_copy_directories(srcdir, destdir, path)) # Ensure that broken symlinks to directories have their targets # created before attempting to stage files across broken # symlink boundaries _ensure_real_directory(destdir, os.path.dirname(destpath)) try: file_stat = os.lstat(srcpath) mode = file_stat.st_mode except FileNotFoundError as e: # Skip this missing file if ignore_missing: continue raise UtilError("Source file is missing: {}".format(srcpath)) from e if stat.S_ISDIR(mode): # Ensure directory exists in destination if not os.path.exists(destpath): _ensure_real_directory(destdir, destpath) dest_stat = os.lstat(_resolve_symlinks(destpath)) if not stat.S_ISDIR(dest_stat.st_mode): raise UtilError('Destination not a directory. source has {}' ' destination has {}'.format(srcpath, destpath)) permissions.append((destpath, os.stat(srcpath).st_mode)) elif stat.S_ISLNK(mode): if not safe_remove(destpath): result.ignored.append(path) continue target = os.readlink(srcpath) target = _relative_symlink_target(destdir, destpath, target) os.symlink(target, destpath) elif stat.S_ISREG(mode): # Process the file. if not safe_remove(destpath): result.ignored.append(path) continue actionfunc(srcpath, destpath, result=result) elif stat.S_ISCHR(mode) or stat.S_ISBLK(mode): # Block or character device. Put contents of st_dev in a mknod. if not safe_remove(destpath): result.ignored.append(path) continue if os.path.lexists(destpath): os.remove(destpath) os.mknod(destpath, file_stat.st_mode, file_stat.st_rdev) os.chmod(destpath, file_stat.st_mode) else: # Unsupported type. raise UtilError('Cannot extract {} into staging-area. Unsupported type.'.format(srcpath)) # Write directory permissions now that all files have been written for d, perms in permissions: os.chmod(d, perms) # _relative_symlink_target() # # Fetches a relative path for symlink with an absolute target # # @root: The staging area root location # @symlink: Location of the symlink in staging area (including the root path) # @target: The symbolic link target, which may be an absolute path # # If @target is an absolute path, a relative path from the symbolic link # location will be returned, otherwise if @target is a relative path, it will # be returned unchanged. # # Using relative symlinks helps to keep the target self contained when staging # files onto the target. # def _relative_symlink_target(root, symlink, target): if os.path.isabs(target): # First fix the input a little, the symlink itself must not have a # trailing slash, otherwise we fail to remove the symlink filename # from its directory components in os.path.split() # # The absolute target filename must have its leading separator # removed, otherwise os.path.join() will discard the prefix symlink = symlink.rstrip(os.path.sep) target = target.lstrip(os.path.sep) # We want a relative path from the directory in which symlink # is located, not from the symlink itself. symlinkdir, _ = os.path.split(_resolve_symlinks(symlink)) # Create a full path to the target, including the leading staging # directory fulltarget = os.path.join(_resolve_symlinks(root), target) # now get the relative path from the directory where the symlink # is located within the staging root, to the target within the same # staging root newtarget = os.path.relpath(fulltarget, symlinkdir) return newtarget else: return target # _set_deterministic_user() # # Set the uid/gid for every file in a directory tree to the process' # euid/guid. # # Args: # directory (str): The directory to recursively set the uid/gid on # def _set_deterministic_user(directory): user = os.geteuid() group = os.getegid() for root, dirs, files in os.walk(directory.encode("utf-8"), topdown=False): for filename in files: os.chown(os.path.join(root, filename), user, group, follow_symlinks=False) for dirname in dirs: os.chown(os.path.join(root, dirname), user, group, follow_symlinks=False) # _set_deterministic_mtime() # # Set the mtime for every file in a directory tree to the same. # # Args: # directory (str): The directory to recursively set the mtime on # def _set_deterministic_mtime(directory): # The magic number for timestamps: 2011-11-11 11:11:11 magic_timestamp = calendar.timegm([2011, 11, 11, 11, 11, 11]) for dirname, _, filenames in os.walk(directory.encode("utf-8"), topdown=False): for filename in filenames: pathname = os.path.join(dirname, filename) # Python's os.utime only ever modifies the timestamp # of the target, it is not acceptable to set the timestamp # of the target here, if we are staging the link target we # will also set its timestamp. # # We should however find a way to modify the actual link's # timestamp, this outdated python bug report claims that # it is impossible: # # http://bugs.python.org/issue623782 # # However, nowadays it is possible at least on gnuish systems # with with the lutimes glibc function. if not os.path.islink(pathname): os.utime(pathname, (magic_timestamp, magic_timestamp)) os.utime(dirname, (magic_timestamp, magic_timestamp)) # _tempdir() # # A context manager for doing work in a temporary directory. # # Args: # dir (str): A path to a parent directory for the temporary directory # suffix (str): A suffix for the temproary directory name # prefix (str): A prefix for the temporary directory name # # Yields: # (str): The temporary directory # # In addition to the functionality provided by python's # tempfile.TemporaryDirectory() context manager, this one additionally # supports cleaning up the temp directory on SIGTERM. # @contextmanager def _tempdir(suffix="", prefix="tmp", dir=None): # pylint: disable=redefined-builtin tempdir = tempfile.mkdtemp(suffix=suffix, prefix=prefix, dir=dir) def cleanup_tempdir(): if os.path.isdir(tempdir): shutil.rmtree(tempdir) try: with _signals.terminator(cleanup_tempdir): yield tempdir finally: cleanup_tempdir() # _kill_process_tree() # # Brutally murder a process and all of it's children # # Args: # pid (int): Process ID # def _kill_process_tree(pid): proc = psutil.Process(pid) children = proc.children(recursive=True) def kill_proc(p): try: p.kill() except psutil.AccessDenied: # Ignore this error, it can happen with # some setuid bwrap processes. pass except psutil.NoSuchProcess: # It is certain that this has already been sent # SIGTERM, so there is a window where the process # could have exited already. pass # Bloody Murder for child in children: kill_proc(child) kill_proc(proc) # _call() # # A wrapper for subprocess.call() supporting suspend and resume # # Args: # popenargs (list): Popen() arguments # terminate (bool): Whether to attempt graceful termination before killing # rest_of_args (kwargs): Remaining arguments to subprocess.call() # # Returns: # (int): The process exit code. # (str): The program output. # def _call(*popenargs, terminate=False, **kwargs): kwargs['start_new_session'] = True process = None old_preexec_fn = kwargs.get('preexec_fn') if 'preexec_fn' in kwargs: del kwargs['preexec_fn'] def preexec_fn(): os.umask(stat.S_IWGRP | stat.S_IWOTH) if old_preexec_fn is not None: old_preexec_fn() # Handle termination, suspend and resume def kill_proc(): if process: # Some callers know that their subprocess can be # gracefully terminated, make an attempt first if terminate: proc = psutil.Process(process.pid) proc.terminate() try: proc.wait(20) except psutil.TimeoutExpired: # Did not terminate within the timeout: murder _kill_process_tree(process.pid) else: # FIXME: This is a brutal but reliable approach # # Other variations I've tried which try SIGTERM first # and then wait for child processes to exit gracefully # have not reliably cleaned up process trees and have # left orphaned git or ssh processes alive. # # This cleans up the subprocesses reliably but may # cause side effects such as possibly leaving stale # locks behind. Hopefully this should not be an issue # as long as any child processes only interact with # the temp directories which we control and cleanup # ourselves. # _kill_process_tree(process.pid) def suspend_proc(): if process: group_id = os.getpgid(process.pid) os.killpg(group_id, signal.SIGSTOP) def resume_proc(): if process: group_id = os.getpgid(process.pid) os.killpg(group_id, signal.SIGCONT) with _signals.suspendable(suspend_proc, resume_proc), _signals.terminator(kill_proc): process = subprocess.Popen(*popenargs, preexec_fn=preexec_fn, **kwargs) # pylint: disable=consider-using-with,subprocess-popen-preexec-fn output, _ = process.communicate() exit_code = process.poll() # Program output is returned as bytes, we want utf8 strings if output is not None: output = output.decode('UTF-8') return (exit_code, output) # _glob2re() # # Function to translate a glob style pattern into a regex # # Args: # pat (str): The glob pattern # # This is a modified version of the python standard library's # fnmatch.translate() function which supports path like globbing # a bit more correctly, and additionally supports recursive glob # patterns with double asterisk. # # Note that this will only support the most basic of standard # glob patterns, and additionally the recursive double asterisk. # # Support includes: # # * Match any pattern except a path separator # ** Match any pattern, including path separators # ? Match any single character # [abc] Match one of the specified characters # [A-Z] Match one of the characters in the specified range # [!abc] Match any single character, except the specified characters # [!A-Z] Match any single character, except those in the specified range # def _glob2re(pat): i, n = 0, len(pat) res = '' while i < n: c = pat[i] i = i + 1 if c == '*': # fnmatch.translate() simply uses the '.*' separator here, # we only want that for double asterisk (bash 'globstar' behavior) # if i < n and pat[i] == '*': res = res + '.*' i = i + 1 else: res = res + '[^/]*' elif c == '?': # fnmatch.translate() simply uses the '.' wildcard here, but # we dont want to match path separators here res = res + '[^/]' elif c == '[': j = i if j < n and pat[j] == '!': j = j + 1 if j < n and pat[j] == ']': j = j + 1 while j < n and pat[j] != ']': j = j + 1 if j >= n: res = res + '\\[' else: stuff = pat[i:j].replace('\\', '\\\\') i = j + 1 if stuff[0] == '!': stuff = '^' + stuff[1:] elif stuff[0] == '^': stuff = '\\' + stuff res = '{}[{}]'.format(res, stuff) else: res = res + re.escape(c) return res + r'\Z' # _deduplicate() # # Remove duplicate entries in a list or other iterable. # # Copied verbatim from the unique_everseen() example at # https://docs.python.org/3/library/itertools.html#itertools-recipes # # Args: # iterable (iterable): What to deduplicate # key (callable): Optional function to map from list entry to value # # Returns: # (generator): Generator that produces a deduplicated version of 'iterable' # def _deduplicate(iterable, key=None): seen = set() seen_add = seen.add if key is None: for element in itertools.filterfalse(seen.__contains__, iterable): seen_add(element) yield element else: for element in iterable: k = key(element) if k not in seen: seen_add(k) yield element # _parse_version(): # # Args: # version (str): The file name from which to determine compression # # Returns: # A 2-tuple of form (major_version: int, minor_version: int) # # Raises: # UtilError: In the case of a malformed version string # def _parse_version(version): try: versions = version.split(".") except AttributeError as e: raise UtilError("Malformed version string: {}".format(version),) from e try: major = int(versions[0]) minor = int(versions[1]) except (IndexError, ValueError) as e: raise UtilError("Malformed version string: {}".format(version),) from e return major, minor buildstream-1.6.9/conftest.py000077500000000000000000000051441437515270000162540ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Maat # import os import shutil import tempfile import pytest ################################################# # Implement pytest option # ################################################# def pytest_addoption(parser): parser.addoption('--integration', action='store_true', default=False, help='Run integration tests') def pytest_runtest_setup(item): if item.get_closest_marker('integration') and not item.config.getvalue('integration'): pytest.skip('skipping integration test') ################################################# # integration_cache fixture # ################################################# # # This is yielded by the `integration_cache` fixture # class IntegrationCache(): def __init__(self, cache): cache = os.path.abspath(cache) os.makedirs(cache, exist_ok=True) # Use the same sources every time self.sources = os.path.join(cache, 'sources') # Create a temp directory for the duration of the test for # the artifacts directory try: self.artifacts = tempfile.mkdtemp(dir=cache, prefix='artifacts-') except OSError as e: raise AssertionError("Unable to create test directory !") from e @pytest.fixture(scope='session') def integration_cache(request): # Set the cache dir to the INTEGRATION_CACHE variable, or the # default if that is not set. if 'INTEGRATION_CACHE' in os.environ: cache_dir = os.environ['INTEGRATION_CACHE'] else: cache_dir = os.path.abspath('./integration-cache') cache = IntegrationCache(cache_dir) yield cache # Clean up the artifacts after each test run - we only want to # cache sources between runs try: shutil.rmtree(cache.artifacts) except FileNotFoundError: pass buildstream-1.6.9/contrib/000077500000000000000000000000001437515270000155065ustar00rootroot00000000000000buildstream-1.6.9/contrib/bst-here000077500000000000000000000065021437515270000171500ustar00rootroot00000000000000#!/bin/bash # # Copyright 2017 Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Charles Bailey # Sam Thursfield # This is a helper script for using BuildStream via Docker. See # docs/source/install.rst for documentation. usage() { cat </dev/null done BST_HERE_PS1="\[\033[01;34m\]\w\[\033[00m\]> " if [ "$#" -eq 0 ]; then command="/bin/bash -i" else command="/usr/local/bin/bst $@" fi if "$update" == true then docker pull "$bst_here_image" fi # FIXME: We run with --privileged to allow bwrap to mount system # directories, but this is overkill. We should add the correct # --cap-add calls, or seccomp settings, but we are not sure # what those are yet. # # Old settings: # --cap-add SYS_ADMIN # --security-opt seccomp=unconfined # exec docker run --rm -i${is_tty:+ -t} \ --privileged \ --env PS1="$BST_HERE_PS1" \ --device /dev/fuse \ --volume buildstream-cache:/root/.cache/buildstream \ --volume buildstream-config:/root/.config \ --volume "$PWD":/src \ $extra_volumes_opt \ --workdir /src \ "$bst_here_image" \ $command buildstream-1.6.9/doc/000077500000000000000000000000001437515270000146135ustar00rootroot00000000000000buildstream-1.6.9/doc/Makefile000066400000000000000000000104211437515270000162510ustar00rootroot00000000000000# Makefile for Sphinx documentation # # Note, due to a problem with python2/python3 parallel # installability of sphinx (https://github.com/sphinx-doc/sphinx/issues/4375) # we dont use the standard `sphinx-build` and `sphinx-apidoc` entry points. # # The following technique works as long as sphinx is installed for python3, # regardless of the entry point which might be in /usr/bin or PATH, but # will stop working in sphinx >= 2.0. Hopefully by then, the mentioned # bug will be fixed and we can use a standard python3 specific script to # invoke sphnix. # SPHINXOPTS = SPHINXBUILD = python3 -m sphinx SPHINXAPIDOC = python3 -m sphinx.ext.apidoc PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -W -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # Set BST_FORCE_SESSION_REBUILD to force rebuild the docs BST2HTML = $(CURDIR)/bst2html.py BST2HTMLOPTS = ifneq ($(strip $(BST_FORCE_SESSION_REBUILD)),) BST2HTMLOPTS = --force endif # Help Python find buildstream and its plugins PYTHONPATH=$(CURDIR)/..:$(CURDIR)/../buildstream/plugins .PHONY: all clean templates templates-clean sessions sessions-prep sessions-clean html devhelp # Canned recipe for generating plugin api skeletons # $1 = the plugin directory # $2 = the output docs directory # # Explanation: # # Sphinx does not have any option for skipping documentation, # we dont want to document plugin code because nobody uses that # but we do want to use module-level docstrings in plugins in # order to explain how plugins work. # # For this purpose, we replace sphinx-apidoc with a simple # makefile rule which generates a template slightly differently # from how sphinx does it, allowing us to get what we want # from plugin documentation. # define plugin-doc-skeleton @for file in $$(find ${1} -name "*.py" ! -name "_*.py"); do \ base=$$(basename $$file); \ module=${2}.$${base%.py}; \ modname=$${base%.py}; \ echo -n "Generating source/${2}/$${modname}.rst... "; \ sed -e "s|@@MODULE@@|$${module}|g" \ source/plugin.rsttemplate > \ source/${2}/$${modname}.rst.tmp && \ mv source/${2}/$${modname}.rst.tmp source/${2}/$${modname}.rst || exit 1; \ echo "Done."; \ done endef all: html devhelp clean: templates-clean sessions-clean rm -rf build # Generate rst templates for the docs using a mix of sphinx-apidoc and # our 'plugin-doc-skeleton' routine for plugin pages. templates: mkdir -p source/elements mkdir -p source/sources $(SPHINXAPIDOC) --force --separate --module-first --no-headings --no-toc -o source $(CURDIR)/../buildstream *_pb2*.py $(call plugin-doc-skeleton,$(CURDIR)/../buildstream/plugins/elements,elements) $(call plugin-doc-skeleton,$(CURDIR)/../buildstream/plugins/sources,sources) templates-clean: rm -rf source/elements rm -rf source/sources # Stage the stored sessions into the place where they will # be used in the build. # # This is separated so that the git tree does not become # dirty as a result of a documentation build process - which # messes up the docs version number and the version number # printed in some command line output. # sessions-prep: mkdir -p source/sessions cp source/sessions-stored/*.html source/sessions # By default, this will generate the html fragments of colorized BuildStream terminal # output only if they don't yet exist. # # Specify BST_FORCE_SESSION_REBUILD=1 to force rebuild all session html files. # sessions: sessions-prep for file in $(wildcard sessions/*.run); do \ PYTHONPATH=$(PYTHONPATH) $(BST2HTML) $(BST2HTMLOPTS) $$file; \ done sessions-clean: rm -rf source/sessions # Targets which generate docs with sphinx build # # html devhelp: templates sessions @echo "Building $@..." PYTHONPATH=$(PYTHONPATH) \ $(SPHINXBUILD) -b $@ $(ALLSPHINXOPTS) "$(BUILDDIR)/$@" \ $(wildcard source/*.rst) \ $(wildcard source/tutorial/*.rst) \ $(wildcard source/examples/*.rst) \ $(wildcard source/elements/*.rst) \ $(wildcard source/sources/*.rst) @echo @echo "Build of $@ finished, output: $(CURDIR)/$(BUILDDIR)/$@" buildstream-1.6.9/doc/bst2html.py000077500000000000000000000423521437515270000167350ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright (c) 2013 German M. Bravo (Kronuz) # Copyright (c) 2018 Codethink Limited # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # # This file is substantially based on German's work, obtained at: # https://github.com/Kronuz/ansi2html.git # import os import sys import re import shlex import subprocess from collections.abc import Mapping from contextlib import contextmanager from tempfile import TemporaryDirectory import click from buildstream import _yaml from buildstream import utils from buildstream._exceptions import BstError _ANSI2HTML_STYLES = {} ANSI2HTML_CODES_RE = re.compile('(?:\033\\[(\d+(?:;\d+)*)?([cnRhlABCDfsurgKJipm]))') ANSI2HTML_PALETTE = { # See http://ethanschoonover.com/solarized 'solarized': ['#073642', '#D30102', '#859900', '#B58900', '#268BD2', '#D33682', '#2AA198', '#EEE8D5', '#002B36', '#CB4B16', '#586E75', '#657B83', '#839496', '#6C71C4', '#93A1A1', '#FDF6E3'], # Above mapped onto the xterm 256 color palette 'solarized-xterm': ['#262626', '#AF0000', '#5F8700', '#AF8700', '#0087FF', '#AF005F', '#00AFAF', '#E4E4E4', '#1C1C1C', '#D75F00', '#585858', '#626262', '#808080', '#5F5FAF', '#8A8A8A', '#FFFFD7'], # Gnome default: 'tango': ['#000000', '#CC0000', '#4E9A06', '#C4A000', '#3465A4', '#75507B', '#06989A', '#D3D7CF', '#555753', '#EF2929', '#8AE234', '#FCE94F', '#729FCF', '#AD7FA8', '#34E2E2', '#EEEEEC'], # xterm: 'xterm': ['#000000', '#CD0000', '#00CD00', '#CDCD00', '#0000EE', '#CD00CD', '#00CDCD', '#E5E5E5', '#7F7F7F', '#FF0000', '#00FF00', '#FFFF00', '#5C5CFF', '#FF00FF', '#00FFFF', '#FFFFFF'], 'console': ['#000000', '#AA0000', '#00AA00', '#AA5500', '#0000AA', '#AA00AA', '#00AAAA', '#AAAAAA', '#555555', '#FF5555', '#55FF55', '#FFFF55', '#5555FF', '#FF55FF', '#55FFFF', '#FFFFFF'], } def _ansi2html_get_styles(palette): if palette not in _ANSI2HTML_STYLES: p = ANSI2HTML_PALETTE.get(palette, ANSI2HTML_PALETTE['console']) regular_style = { '1': '', # bold '2': 'opacity:0.5', '4': 'text-decoration:underline', '5': 'font-weight:bold', '7': '', '8': 'display:none', } bold_style = regular_style.copy() for i in range(8): regular_style['3%s' % i] = 'color:%s' % p[i] regular_style['4%s' % i] = 'background-color:%s' % p[i] bold_style['3%s' % i] = 'color:%s' % p[i + 8] bold_style['4%s' % i] = 'background-color:%s' % p[i + 8] # The default xterm 256 colour p: indexed_style = {} for i in range(16): indexed_style['%s' % i] = p[i] for rr in range(6): for gg in range(6): for bb in range(6): i = 16 + rr * 36 + gg * 6 + bb r = (rr * 40 + 55) if rr else 0 g = (gg * 40 + 55) if gg else 0 b = (bb * 40 + 55) if bb else 0 indexed_style['%s' % i] = ''.join('%02X' % c if 0 <= c <= 255 else None for c in (r, g, b)) for g in range(24): i = g + 232 l = g * 10 + 8 indexed_style['%s' % i] = ''.join('%02X' % c if 0 <= c <= 255 else None for c in (l, l, l)) _ANSI2HTML_STYLES[palette] = (regular_style, bold_style, indexed_style) return _ANSI2HTML_STYLES[palette] def ansi2html(text, palette='solarized'): def _ansi2html(m): if m.group(2) != 'm': return '' import sys state = None sub = '' cs = m.group(1) cs = cs.strip() if cs else '' for c in cs.split(';'): c = c.strip().lstrip('0') or '0' if c == '0': while stack: sub += '' stack.pop() elif c in ('38', '48'): extra = [c] state = 'extra' elif state == 'extra': if c == '5': state = 'idx' elif c == '2': state = 'r' elif state: if state == 'idx': extra.append(c) state = None # 256 colors color = indexed_style.get(c) # TODO: convert index to RGB! if color is not None: sub += '' % ('color' if extra[0] == '38' else 'background-color', color) stack.append(extra) elif state in ('r', 'g', 'b'): extra.append(c) if state == 'r': state = 'g' elif state == 'g': state = 'b' else: state = None try: color = '#' + ''.join( '%02X' % c if 0 <= c <= 255 else None for x in extra for c in [int(x)] ) except (ValueError, TypeError): pass else: sub += ''.format( 'color' if extra[0] == '38' else 'background-color', color) stack.append(extra) else: if '1' in stack: style = bold_style.get(c) else: style = regular_style.get(c) if style is not None: sub += '' % style # Still needs to be added to the stack even if style is empty # (so it can check '1' in stack above, for example) stack.append(c) return sub stack = [] regular_style, bold_style, indexed_style = _ansi2html_get_styles(palette) sub = ANSI2HTML_CODES_RE.sub(_ansi2html, text) while stack: sub += '' stack.pop() return sub # workdir() # # Sets up a new temp directory with a config file # # Args: # work_directory (str): The directory where to create a tempdir first # source_cache (str): The directory of a source cache to share with, or None # # Yields: # The buildstream.conf full path # @contextmanager def workdir(source_cache=None): with TemporaryDirectory(prefix='run-bst-', dir=os.getcwd()) as tempdir: if not source_cache: source_cache = os.path.join(tempdir, 'sources') bst_config_file = os.path.join(tempdir, 'buildstream.conf') config = { 'sourcedir': source_cache, 'artifactdir': os.path.join(tempdir, 'artifacts'), 'logdir': os.path.join(tempdir, 'logs'), 'builddir': os.path.join(tempdir, 'build'), } _yaml.dump(config, bst_config_file) yield (tempdir, bst_config_file, source_cache) # run_bst_command() # # Runs a command # # Args: # config_file (str): The path to the config file to use # directory (str): The project directory # command (str): A command string # # Returns: # (str): The colorized combined stdout/stderr of BuildStream # def run_bst_command(config_file, directory, command): click.echo("Running bst command in directory '{}': bst {}".format(directory, command), err=True) argv = ['python3', '-m', 'buildstream', '--colors', '--config', config_file] + shlex.split(command) p = subprocess.Popen(argv, cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = p.communicate() return out.decode('utf-8').strip() # run_shell_command() # # Runs a command # # Args: # directory (str): The project directory # command (str): A shell command # # Returns: # (str): The combined stdout/stderr of the shell command # def run_shell_command(directory, command): click.echo("Running shell command in directory '{}': {}".format(directory, command), err=True) argv = shlex.split(command) p = subprocess.Popen(argv, cwd=directory, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out, _ = p.communicate() return out.decode('utf-8').strip() # generate_html # # Generate html based on the output # # Args: # output (str): The output of the BuildStream command # directory (str): The project directory # config_file (str): The config file # source_cache (str): The source cache # tempdir (str): The base work directory # palette (str): The rendering color style # command (str): The command # fake_output (bool): Whether the provided output is faked or not # # Returns: # (str): The html formatted output # def generate_html(output, directory, config_file, source_cache, tempdir, palette, command, fake_output): test_base_name = os.path.basename(directory) if fake_output: show_command = command else: show_command = 'bst ' + command # Substitute some things we want normalized for the docs output = re.sub(os.environ.get('HOME'), '/home/user', output) output = re.sub(config_file, '/home/user/.config/buildstream.conf', output) output = re.sub(source_cache, '/home/user/.cache/buildstream/sources', output) output = re.sub(tempdir, '/home/user/.cache/buildstream', output) output = re.sub(directory, '/home/user/{}'.format(test_base_name), output) # Now convert to HTML and add some surrounding sugar output = ansi2html(output, palette=palette) # Finally format it nicely into a
final_output = '\n' + \ '
' + \ '
\n' + \
                   'user@host:' + \
                   '~/{}$ '.format(test_base_name) + \
                   show_command + '\n'

    if output:
        final_output += '\n' + output + '\n'

    final_output += '
\n' return final_output # check_needs_build() # # Checks whether filename, specified relative to basedir, # needs to be built (based on whether it exists). # # Args: # basedir (str): The base directory to check relative of, or None for CWD # filename (str): The basedir relative path to the file # force (bool): Whether force rebuilding of existing things is enabled # # Returns: # (bool): Whether the file needs to be built # def check_needs_build(basedir, filename, force=False): if force: return True if basedir is None: basedir = os.getcwd() filename = os.path.join(basedir, filename) filename = os.path.realpath(filename) if not os.path.exists(filename): return True return False def run_session(description, tempdir, source_cache, palette, config_file, force): desc = _yaml.load(description, shortname=os.path.basename(description)) desc_dir = os.path.dirname(description) # Preflight commands and check if we can skip this session # if not force: needs_build = False commands = _yaml.node_get(desc, list, 'commands') for command in commands: output = _yaml.node_get(command, str, 'output', default_value=None) if output is not None and check_needs_build(desc_dir, output, force=False): needs_build = True break if not needs_build: click.echo("Skipping '{}' as no files need to be built".format(description), err=True) return # FIXME: Workaround a setuptools bug where the symlinks # we store in git dont get carried into a release # tarball. This workaround lets us build docs from # a source distribution tarball. # symlinks = _yaml.node_get(desc, Mapping, 'workaround-symlinks', default_value={}) for symlink, target in _yaml.node_items(symlinks): # Resolve real path to where symlink should be symlink = os.path.join(desc_dir, symlink) # Ensure dir exists symlink_dir = os.path.dirname(symlink) os.makedirs(symlink_dir, exist_ok=True) click.echo("Generating symlink at: {} (target: {})".format(symlink, target), err=True) # Generate a symlink try: os.symlink(target, symlink) except FileExistsError: # If the files exist, we're running from a git checkout and # not a source distribution, no need to complain pass remove_files = _yaml.node_get(desc, list, 'remove-files', default_value=[]) for remove_file in remove_files: remove_file = os.path.join(desc_dir, remove_file) remove_file = os.path.realpath(remove_file) if os.path.isdir(remove_file): utils._force_rmtree(remove_file) else: utils.safe_remove(remove_file) # Run commands # commands = _yaml.node_get(desc, list, 'commands') for c in commands: command = _yaml.node_get(desc, Mapping, 'commands', indices=[commands.index(c)]) # Get the directory where this command should be run directory = _yaml.node_get(command, str, 'directory') directory = os.path.join(desc_dir, directory) directory = os.path.realpath(directory) # Get the command string command_str = _yaml.node_get(command, str, 'command') # Check whether this is a shell command and not a bst command is_shell = _yaml.node_get(command, bool, 'shell', default_value=False) # Check if there is fake output command_fake_output = _yaml.node_get(command, str, 'fake-output', default_value=None) # Run the command, or just use the fake output if command_fake_output is None: if is_shell: command_out = run_shell_command(directory, command_str) else: command_out = run_bst_command(config_file, directory, command_str) else: command_out = command_fake_output # Encode and save the output if that was asked for output = _yaml.node_get(command, str, 'output', default_value=None) if output is not None: # Convert / Generate a nice
converted = generate_html(command_out, directory, config_file, source_cache, tempdir, palette, command_str, command_fake_output is not None) # Save it filename = os.path.join(desc_dir, output) filename = os.path.realpath(filename) output_dir = os.path.dirname(filename) os.makedirs(output_dir, exist_ok=True) with open(filename, 'wb') as f: f.write(converted.encode('utf-8')) click.echo("Saved session at '{}'".format(filename), err=True) @click.command(short_help="Run a bst command and capture stdout/stderr in html") @click.option('--directory', '-C', type=click.Path(file_okay=False, dir_okay=True), help="The project directory where to run the command") @click.option('--force', is_flag=True, default=False, help="Force rebuild, even if the file exists") @click.option('--source-cache', type=click.Path(file_okay=False, dir_okay=True), help="A shared source cache") @click.option('--palette', '-p', default='tango', type=click.Choice(['solarized', 'solarized-xterm', 'tango', 'xterm', 'console']), help="Selects a palette for the output style") @click.argument('description', type=click.Path(file_okay=True, dir_okay=False, readable=True)) def run_bst(directory, force, source_cache, description, palette): """Run a bst command and capture stdout/stderr in html This command normally takes a description yaml file, see the HACKING file for information on it's format. """ if not source_cache and os.environ.get('BST_SOURCE_CACHE'): source_cache = os.environ['BST_SOURCE_CACHE'] with workdir(source_cache=source_cache) as (tempdir, config_file, source_cache): run_session(description, tempdir, source_cache, palette, config_file, force) return 0 if __name__ == '__main__': try: run_bst() except BstError as e: click.echo("Error: {}".format(e), err=True) sys.exit(-1) buildstream-1.6.9/doc/examples/000077500000000000000000000000001437515270000164315ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/autotools/000077500000000000000000000000001437515270000204625ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/autotools/elements/000077500000000000000000000000001437515270000222765ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/autotools/elements/base.bst000066400000000000000000000001001437515270000237110ustar00rootroot00000000000000kind: stack description: Base stack depends: - base/alpine.bst buildstream-1.6.9/doc/examples/autotools/elements/base/000077500000000000000000000000001437515270000232105ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/autotools/elements/base/alpine.bst000066400000000000000000000004611437515270000251730ustar00rootroot00000000000000kind: import description: | Alpine Linux base runtime sources: - kind: tar # This is a post doctored, trimmed down system image # of the Alpine linux distribution. # url: alpine:integration-tests-base.v1.x86_64.tar.xz ref: 3eb559250ba82b64a68d86d0636a6b127aa5f6d25d3601a79f79214dc9703639 buildstream-1.6.9/doc/examples/autotools/elements/hello.bst000066400000000000000000000007021437515270000241120ustar00rootroot00000000000000kind: autotools description: | Hello world example from automake variables: # The hello world example lives in the doc/amhello folder. # # Set the %{command-subdir} variable to that location # and just have the autotools element run it's commands there. # command-subdir: doc/amhello sources: - kind: tar url: gnu:automake-1.16.tar.gz ref: 80da43bb5665596ee389e6d8b64b4f122ea4b92a685b1dbd813cd1f0e0c2d83f depends: - base.bst buildstream-1.6.9/doc/examples/autotools/project.conf000066400000000000000000000005321437515270000227770ustar00rootroot00000000000000# Unique project name name: autotools # Required BuildStream format version format-version: 9 # Subdirectory where elements are stored element-path: elements # Define some aliases for the tarballs we download aliases: alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/ gnu: http://ftpmirror.gnu.org/gnu/automake/ buildstream-1.6.9/doc/examples/first-project/000077500000000000000000000000001437515270000212245ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/first-project/elements/000077500000000000000000000000001437515270000230405ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/first-project/elements/hello.bst000066400000000000000000000003441437515270000246560ustar00rootroot00000000000000kind: import # Use a local source to stage our file sources: - kind: local path: hello.world # Configure the import element config: # Place the content staged by sources at the # root of the output artifact target: / buildstream-1.6.9/doc/examples/first-project/hello.world000066400000000000000000000000001437515270000233660ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/first-project/project.conf000066400000000000000000000002451437515270000235420ustar00rootroot00000000000000# Unique project name name: first-project # Required BuildStream format version format-version: 17 # Subdirectory where elements are stored element-path: elements buildstream-1.6.9/doc/examples/flatpak-autotools/000077500000000000000000000000001437515270000221025ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/elements/000077500000000000000000000000001437515270000237165ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/elements/base.bst000066400000000000000000000001211437515270000253340ustar00rootroot00000000000000kind: stack description: Base stack depends: - base/sdk.bst - base/usrmerge.bst buildstream-1.6.9/doc/examples/flatpak-autotools/elements/base/000077500000000000000000000000001437515270000246305ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/elements/base/sdk.bst000066400000000000000000000007401437515270000261240ustar00rootroot00000000000000kind: import description: Import the base freedesktop SDK sources: - kind: ostree url: flathub:repo/ gpg-key: keys/flathub.gpg (?): - arch == "x86_64": track: runtime/org.freedesktop.BaseSdk/x86_64/1.6 ref: 7306169ea9c563f3ce75bb57be9e94b0acf1d742edacab0aa751cf6646a4b52e - arch == "i386": track: runtime/org.freedesktop.BaseSdk/i386/1.6 ref: 63f9537eea89448ec865f907a3ec89b261493b3d999121a81603c827b6219d20 config: source: files target: usr buildstream-1.6.9/doc/examples/flatpak-autotools/elements/base/usrmerge.bst000066400000000000000000000003151437515270000271720ustar00rootroot00000000000000kind: import description: Base usr merge symlinks # Depend on the base-sdk.bst such that the # symlinks get added after staging the SDK depends: - base/sdk.bst sources: - kind: local path: files/links buildstream-1.6.9/doc/examples/flatpak-autotools/elements/hello.bst000066400000000000000000000001561437515270000255350ustar00rootroot00000000000000kind: autotools description: Autotools project depends: - base.bst sources: - kind: local path: files/src buildstream-1.6.9/doc/examples/flatpak-autotools/files/000077500000000000000000000000001437515270000232045ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/files/links/000077500000000000000000000000001437515270000243245ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/files/links/bin000077700000000000000000000000001437515270000263162usr/binustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/files/links/etc000077700000000000000000000000001437515270000263242usr/etcustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/files/links/lib000077700000000000000000000000001437515270000263122usr/libustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/000077500000000000000000000000001437515270000237735ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/Makefile.am000066400000000000000000000003411437515270000260250ustar00rootroot00000000000000# Copyright (C) 2006-2014 Free Software Foundation, Inc. # This Makefile.am is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. SUBDIRS = src dist_doc_DATA = README buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/README000066400000000000000000000001441437515270000246520ustar00rootroot00000000000000This is a demonstration package for GNU Automake. Type `info Automake' to read the Automake manual. buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/aclocal.m4000066400000000000000000001222351437515270000256400ustar00rootroot00000000000000# generated automatically by aclocal 1.15 -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, [m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) # Copyright (C) 2002-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.15' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.15], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.15])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to # '$srcdir', '$srcdir/..', or '$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is '.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ([2.52])dnl m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], [$1], [CXX], [depcc="$CXX" am_compiler_list=], [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], [$1], [UPC], [depcc="$UPC" am_compiler_list=], [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES. AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE([dependency-tracking], [dnl AS_HELP_STRING( [--enable-dependency-tracking], [do not reject slow dependency extractors]) AS_HELP_STRING( [--disable-dependency-tracking], [speeds up one-time build])]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each '.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC]) [_AM_PROG_CC_C_O ]) # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.65])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [AC_DIAGNOSE([obsolete], [$0: two- and three-arguments forms are deprecated.]) m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) AM_MISSING_PROG([AUTOCONF], [autoconf]) AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) AM_MISSING_PROG([AUTOHEADER], [autoheader]) AM_MISSING_PROG([MAKEINFO], [makeinfo]) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES([CC])], [m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES([CXX])], [m4_define([AC_PROG_CXX], m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES([OBJC])], [m4_define([AC_PROG_OBJC], m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [_AM_DEPENDENCIES([OBJCXX])], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) fi fi dnl The trailing newline in this macro's definition is deliberate, for dnl backward compatibility and to allow trailing 'dnl'-style comments dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. ]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST([install_sh])]) # Copyright (C) 2003-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it is modern enough. # If it is, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= AC_MSG_WARN(['missing' script is too old or missing]) fi ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), [1])]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_CC_C_O # --------------- # Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC # to automatically call this. AC_DEFUN([_AM_PROG_CC_C_O], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([compile])dnl AC_LANG_PUSH([C])dnl AC_CACHE_CHECK( [whether $CC understands -c and -o together], [am_cv_prog_cc_c_o], [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i]) if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi AC_LANG_POP([C])]) # For backward compatibility. AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi if test "$[2]" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT([yes]) # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi AC_CONFIG_COMMANDS_PRE( [AC_MSG_CHECKING([that generated files are newer than configure]) if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi AC_MSG_RESULT([done])]) rm -f conftest.file ]) # Copyright (C) 2009-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SILENT_RULES([DEFAULT]) # -------------------------- # Enable less verbose build rules; with the default set to DEFAULT # ("yes" being less verbose, "no" or empty being verbose). AC_DEFUN([AM_SILENT_RULES], [AC_ARG_ENABLE([silent-rules], [dnl AS_HELP_STRING( [--enable-silent-rules], [less verbose build output (undo: "make V=1")]) AS_HELP_STRING( [--disable-silent-rules], [verbose build output (undo: "make V=0")])dnl ]) case $enable_silent_rules in @%:@ ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; esac dnl dnl A few 'make' implementations (e.g., NonStop OS and NextStep) dnl do not support nested variable expansions. dnl See automake bug#9928 and bug#10237. am_make=${MAKE-make} AC_CACHE_CHECK([whether $am_make supports nested variables], [am_cv_make_support_nested_variables], [if AS_ECHO([['TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi]) if test $am_cv_make_support_nested_variables = yes; then dnl Using '$V' instead of '$(V)' breaks IRIX make. AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AC_SUBST([AM_V])dnl AM_SUBST_NOTMAKE([AM_V])dnl AC_SUBST([AM_DEFAULT_V])dnl AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl AC_SUBST([AM_DEFAULT_VERBOSITY])dnl AM_BACKSLASH='\' AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor 'install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in "make install-strip", and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of 'v7', 'ustar', or 'pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar # AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar], [# The POSIX 1988 'ustar' format is defined with fixed-size fields. # There is notably a 21 bits limit for the UID and the GID. In fact, # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 # and bug#13588). am_max_uid=2097151 # 2^21 - 1 am_max_gid=$am_max_uid # The $UID and $GID variables are not portable, so we need to resort # to the POSIX-mandated id(1) utility. Errors in the 'id' calls # below are definitely unexpected, so allow the users to see them # (that is, avoid stderr redirection). am_uid=`id -u || echo unknown` am_gid=`id -g || echo unknown` AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) if test $am_uid -le $am_max_uid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) if test $am_gid -le $am_max_gid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi], [pax], [], [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_$1-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/compile000077500000000000000000000162451437515270000253610ustar00rootroot00000000000000#! /bin/sh # Wrapper for compilers which do not understand '-c -o'. scriptversion=2012-10-14.11; # UTC # Copyright (C) 1999-2014 Free Software Foundation, Inc. # Written by Tom Tromey . # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to or send patches to # . nl=' ' # We need space, tab and new line, in precisely that order. Quoting is # there to prevent tools from complaining about whitespace usage. IFS=" "" $nl" file_conv= # func_file_conv build_file lazy # Convert a $build file to $host form and store it in $file # Currently only supports Windows hosts. If the determined conversion # type is listed in (the comma separated) LAZY, no conversion will # take place. func_file_conv () { file=$1 case $file in / | /[!/]*) # absolute file, and not a UNC file if test -z "$file_conv"; then # lazily determine how to convert abs files case `uname -s` in MINGW*) file_conv=mingw ;; CYGWIN*) file_conv=cygwin ;; *) file_conv=wine ;; esac fi case $file_conv/,$2, in *,$file_conv,*) ;; mingw/*) file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` ;; cygwin/*) file=`cygpath -m "$file" || echo "$file"` ;; wine/*) file=`winepath -w "$file" || echo "$file"` ;; esac ;; esac } # func_cl_dashL linkdir # Make cl look for libraries in LINKDIR func_cl_dashL () { func_file_conv "$1" if test -z "$lib_path"; then lib_path=$file else lib_path="$lib_path;$file" fi linker_opts="$linker_opts -LIBPATH:$file" } # func_cl_dashl library # Do a library search-path lookup for cl func_cl_dashl () { lib=$1 found=no save_IFS=$IFS IFS=';' for dir in $lib_path $LIB do IFS=$save_IFS if $shared && test -f "$dir/$lib.dll.lib"; then found=yes lib=$dir/$lib.dll.lib break fi if test -f "$dir/$lib.lib"; then found=yes lib=$dir/$lib.lib break fi if test -f "$dir/lib$lib.a"; then found=yes lib=$dir/lib$lib.a break fi done IFS=$save_IFS if test "$found" != yes; then lib=$lib.lib fi } # func_cl_wrapper cl arg... # Adjust compile command to suit cl func_cl_wrapper () { # Assume a capable shell lib_path= shared=: linker_opts= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. eat=1 case $2 in *.o | *.[oO][bB][jJ]) func_file_conv "$2" set x "$@" -Fo"$file" shift ;; *) func_file_conv "$2" set x "$@" -Fe"$file" shift ;; esac ;; -I) eat=1 func_file_conv "$2" mingw set x "$@" -I"$file" shift ;; -I*) func_file_conv "${1#-I}" mingw set x "$@" -I"$file" shift ;; -l) eat=1 func_cl_dashl "$2" set x "$@" "$lib" shift ;; -l*) func_cl_dashl "${1#-l}" set x "$@" "$lib" shift ;; -L) eat=1 func_cl_dashL "$2" ;; -L*) func_cl_dashL "${1#-L}" ;; -static) shared=false ;; -Wl,*) arg=${1#-Wl,} save_ifs="$IFS"; IFS=',' for flag in $arg; do IFS="$save_ifs" linker_opts="$linker_opts $flag" done IFS="$save_ifs" ;; -Xlinker) eat=1 linker_opts="$linker_opts $2" ;; -*) set x "$@" "$1" shift ;; *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) func_file_conv "$1" set x "$@" -Tp"$file" shift ;; *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) func_file_conv "$1" mingw set x "$@" "$file" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -n "$linker_opts"; then linker_opts="-link$linker_opts" fi exec "$@" $linker_opts exit 1 } eat= case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: compile [--help] [--version] PROGRAM [ARGS] Wrapper for compilers which do not understand '-c -o'. Remove '-o dest.o' from ARGS, run PROGRAM with the remaining arguments, and rename the output as expected. If you are trying to build a whole package this is not the right script to run: please start by reading the file 'INSTALL'. Report bugs to . EOF exit $? ;; -v | --v*) echo "compile $scriptversion" exit $? ;; cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) func_cl_wrapper "$@" # Doesn't return... ;; esac ofile= cfile= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. # So we strip '-o arg' only if arg is an object. eat=1 case $2 in *.o | *.obj) ofile=$2 ;; *) set x "$@" -o "$2" shift ;; esac ;; *.c) cfile=$1 set x "$@" "$1" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -z "$ofile" || test -z "$cfile"; then # If no '-o' option was seen then we might have been invoked from a # pattern rule where we don't need one. That is ok -- this is a # normal compilation that the losing compiler can handle. If no # '.c' file was seen then we are probably linking. That is also # ok. exec "$@" fi # Name of file we expect compiler to create. cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` # Create the lock directory. # Note: use '[/\\:.-]' here to ensure that we don't use the same name # that we are using for the .o file. Also, base the name on the expected # object file name, since that is what matters with a parallel build. lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d while true; do if mkdir "$lockdir" >/dev/null 2>&1; then break fi sleep 1 done # FIXME: race condition here if user kills between mkdir and trap. trap "rmdir '$lockdir'; exit 1" 1 2 15 # Run the compile. "$@" ret=$? if test -f "$cofile"; then test "$cofile" = "$ofile" || mv "$cofile" "$ofile" elif test -f "${cofile}bj"; then test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" fi rmdir "$lockdir" exit $ret # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/configure.ac000066400000000000000000000006001437515270000262550ustar00rootroot00000000000000# Copyright (C) 2006-2014 Free Software Foundation, Inc. # This configure.ac script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. AC_INIT([amhello], [1.0], [bug-automake@gnu.org]) AM_INIT_AUTOMAKE([-Wall -Werror foreign]) AC_PROG_CC AC_CONFIG_HEADERS([config.h]) AC_CONFIG_FILES([ Makefile src/Makefile ]) AC_OUTPUT buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/depcomp000077500000000000000000000560161437515270000253600ustar00rootroot00000000000000#! /bin/sh # depcomp - compile a program generating dependencies as side-effects scriptversion=2013-05-30.07; # UTC # Copyright (C) 1999-2014 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by 'PROGRAMS ARGS'. object Object file output by 'PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputting dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac # Get the directory component of the given path, and save it in the # global variables '$dir'. Note that this directory component will # be either empty or ending with a '/' character. This is deliberate. set_dir_from () { case $1 in */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; *) dir=;; esac } # Get the suffix-stripped basename of the given path, and save it the # global variable '$base'. set_base_from () { base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` } # If no dependency file was actually created by the compiler invocation, # we still have to create a dummy depfile, to avoid errors with the # Makefile "include basename.Plo" scheme. make_dummy_depfile () { echo "#dummy" > "$depfile" } # Factor out some common post-processing of the generated depfile. # Requires the auxiliary global variable '$tmpdepfile' to be set. aix_post_process_depfile () { # If the compiler actually managed to produce a dependency file, # post-process it. if test -f "$tmpdepfile"; then # Each line is of the form 'foo.o: dependency.h'. # Do two passes, one to just change these to # $object: dependency.h # and one to simply output # dependency.h: # which is needed to avoid the deleted-header problem. { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" } > "$depfile" rm -f "$tmpdepfile" else make_dummy_depfile fi } # A tabulation character. tab=' ' # A newline character. nl=' ' # Character ranges might be problematic outside the C locale. # These definitions help. upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ lower=abcdefghijklmnopqrstuvwxyz digits=0123456789 alpha=${upper}${lower} if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Avoid interferences from the environment. gccflag= dashmflag= # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvisualcpp fi if test "$depmode" = msvc7msys; then # This is just like msvc7 but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvc7 fi if test "$depmode" = xlc; then # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. gccflag=-qmakedep=gcc,-MF depmode=gcc fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. ## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. ## (see the conditional assignment to $gccflag above). ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). Also, it might not be ## supported by the other compilers which use the 'gcc' depmode. ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The second -e expression handles DOS-style file names with drive # letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the "deleted header file" problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. ## Some versions of gcc put a space before the ':'. On the theory ## that the space means something, we add a space to the output as ## well. hp depmode also adds that space, but also prefixes the VPATH ## to the object. Take care to not repeat it in the output. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like '#:fec' to the end of the # dependency line. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ | tr "$nl" ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" ;; xlc) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts '$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done aix_post_process_depfile ;; tcc) # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 # FIXME: That version still under development at the moment of writing. # Make that this statement remains true also for stable, released # versions. # It will wrap lines (doesn't matter whether long or short) with a # trailing '\', as in: # # foo.o : \ # foo.c \ # foo.h \ # # It will put a trailing '\' even on the last line, and will use leading # spaces rather than leading tabs (at least since its commit 0394caf7 # "Emit spaces for -MD"). "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. # We have to change lines of the first kind to '$object: \'. sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" # And for each line of the second kind, we have to emit a 'dep.h:' # dummy dependency, to avoid the deleted-header problem. sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" rm -f "$tmpdepfile" ;; ## The order of this option in the case statement is important, since the ## shell code in configure will try each of these formats in the order ## listed in this file. A plain '-MD' option would be understood by many ## compilers, so we must ensure this comes after the gcc and icc options. pgcc) # Portland's C compiler understands '-MD'. # Will always output deps to 'file.d' where file is the root name of the # source file under compilation, even if file resides in a subdirectory. # The object file name does not affect the name of the '.d' file. # pgcc 10.2 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using '\' : # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... set_dir_from "$object" # Use the source, not the object, to determine the base name, since # that's sadly what pgcc will do too. set_base_from "$source" tmpdepfile=$base.d # For projects that build the same source file twice into different object # files, the pgcc approach of using the *source* file root name can cause # problems in parallel builds. Use a locking strategy to avoid stomping on # the same $tmpdepfile. lockdir=$base.d-lock trap " echo '$0: caught signal, cleaning up...' >&2 rmdir '$lockdir' exit 1 " 1 2 13 15 numtries=100 i=$numtries while test $i -gt 0; do # mkdir is a portable test-and-set. if mkdir "$lockdir" 2>/dev/null; then # This process acquired the lock. "$@" -MD stat=$? # Release the lock. rmdir "$lockdir" break else # If the lock is being held by a different process, wait # until the winning process is done or we timeout. while test -d "$lockdir" && test $i -gt 0; do sleep 1 i=`expr $i - 1` done fi i=`expr $i - 1` done trap - 1 2 13 15 if test $i -le 0; then echo "$0: failed to acquire lock after $numtries attempts" >&2 echo "$0: check lockdir '$lockdir'" >&2 exit 1 fi if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form `foo.o: dependent.h', # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this invocation # correctly. Breaking it into two sed invocations is a workaround. sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" # Add 'dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in 'foo.d' instead, so we check for that too. # Subdirectories are respected. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then # Libtool generates 2 separate objects for the 2 libraries. These # two compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir$base.o.d # libtool 1.5 tmpdepfile2=$dir.libs/$base.o.d # Likewise. tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d "$@" -MD fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done # Same post-processing that is required for AIX mode. aix_post_process_depfile ;; msvc7) if test "$libtool" = yes; then showIncludes=-Wc,-showIncludes else showIncludes=-showIncludes fi "$@" $showIncludes > "$tmpdepfile" stat=$? grep -v '^Note: including file: ' "$tmpdepfile" if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The first sed program below extracts the file names and escapes # backslashes for cygpath. The second sed program outputs the file # name when reading, but also accumulates all include files in the # hold buffer in order to output them again at the end. This only # works with sed implementations that can handle large buffers. sed < "$tmpdepfile" -n ' /^Note: including file: *\(.*\)/ { s//\1/ s/\\/\\\\/g p }' | $cygpath_u | sort -u | sed -n ' s/ /\\ /g s/\(.*\)/'"$tab"'\1 \\/p s/.\(.*\) \\/\1:/ H $ { s/.*/'"$tab"'/ G p }' >> "$depfile" echo >> "$depfile" # make sure the fragment doesn't end with a backslash rm -f "$tmpdepfile" ;; msvc7msys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for ':' # in the target name. This is to cope with DOS-style filenames: # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. "$@" $dashmflag | sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this sed invocation # correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" # makedepend may prepend the VPATH from the source file name to the object. # No need to regex-escape $object, excess matching of '.' is harmless. sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process the last invocation # correctly. Breaking it into two sed invocations is a workaround. sed '1,2d' "$tmpdepfile" \ | tr ' ' "$nl" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E \ | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" echo "$tab" >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/install-sh000077500000000000000000000345231437515270000260060ustar00rootroot00000000000000#!/bin/sh # install - install a program, script, or datafile scriptversion=2013-12-25.23; # UTC # This originates from X11R5 (mit/util/scripts/install.sh), which was # later released in X11R6 (xc/config/util/install.sh) with the # following copyright and license. # # Copyright (C) 1994 X Consortium # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN # AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- # TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # # Except as contained in this notice, the name of the X Consortium shall not # be used in advertising or otherwise to promote the sale, use or other deal- # ings in this Software without prior written authorization from the X Consor- # tium. # # # FSF changes to this file are in the public domain. # # Calling this script install-sh is preferred over install.sh, to prevent # 'make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. tab=' ' nl=' ' IFS=" $tab$nl" # Set DOITPROG to "echo" to test this script. doit=${DOITPROG-} doit_exec=${doit:-exec} # Put in absolute file names if you don't have them in your path; # or use environment vars. chgrpprog=${CHGRPPROG-chgrp} chmodprog=${CHMODPROG-chmod} chownprog=${CHOWNPROG-chown} cmpprog=${CMPPROG-cmp} cpprog=${CPPROG-cp} mkdirprog=${MKDIRPROG-mkdir} mvprog=${MVPROG-mv} rmprog=${RMPROG-rm} stripprog=${STRIPPROG-strip} posix_mkdir= # Desired mode of installed file. mode=0755 chgrpcmd= chmodcmd=$chmodprog chowncmd= mvcmd=$mvprog rmcmd="$rmprog -f" stripcmd= src= dst= dir_arg= dst_arg= copy_on_change=false is_target_a_directory=possibly usage="\ Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE or: $0 [OPTION]... SRCFILES... DIRECTORY or: $0 [OPTION]... -t DIRECTORY SRCFILES... or: $0 [OPTION]... -d DIRECTORIES... In the 1st form, copy SRCFILE to DSTFILE. In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. In the 4th, create DIRECTORIES. Options: --help display this help and exit. --version display version info and exit. -c (ignored) -C install only if different (preserve the last data modification time) -d create directories instead of installing files. -g GROUP $chgrpprog installed files to GROUP. -m MODE $chmodprog installed files to MODE. -o USER $chownprog installed files to USER. -s $stripprog installed files. -t DIRECTORY install into DIRECTORY. -T report an error if DSTFILE is a directory. Environment variables override the default commands: CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG RMPROG STRIPPROG " while test $# -ne 0; do case $1 in -c) ;; -C) copy_on_change=true;; -d) dir_arg=true;; -g) chgrpcmd="$chgrpprog $2" shift;; --help) echo "$usage"; exit $?;; -m) mode=$2 case $mode in *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) echo "$0: invalid mode: $mode" >&2 exit 1;; esac shift;; -o) chowncmd="$chownprog $2" shift;; -s) stripcmd=$stripprog;; -t) is_target_a_directory=always dst_arg=$2 # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac shift;; -T) is_target_a_directory=never;; --version) echo "$0 $scriptversion"; exit $?;; --) shift break;; -*) echo "$0: invalid option: $1" >&2 exit 1;; *) break;; esac shift done # We allow the use of options -d and -T together, by making -d # take the precedence; this is for compatibility with GNU install. if test -n "$dir_arg"; then if test -n "$dst_arg"; then echo "$0: target directory not allowed when installing a directory." >&2 exit 1 fi fi if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then # When -d is used, all remaining arguments are directories to create. # When -t is used, the destination is already specified. # Otherwise, the last argument is the destination. Remove it from $@. for arg do if test -n "$dst_arg"; then # $@ is not empty: it contains at least $arg. set fnord "$@" "$dst_arg" shift # fnord fi shift # arg dst_arg=$arg # Protect names problematic for 'test' and other utilities. case $dst_arg in -* | [=\(\)!]) dst_arg=./$dst_arg;; esac done fi if test $# -eq 0; then if test -z "$dir_arg"; then echo "$0: no input file specified." >&2 exit 1 fi # It's OK to call 'install-sh -d' without argument. # This can happen when creating conditional directories. exit 0 fi if test -z "$dir_arg"; then if test $# -gt 1 || test "$is_target_a_directory" = always; then if test ! -d "$dst_arg"; then echo "$0: $dst_arg: Is not a directory." >&2 exit 1 fi fi fi if test -z "$dir_arg"; then do_exit='(exit $ret); exit $ret' trap "ret=129; $do_exit" 1 trap "ret=130; $do_exit" 2 trap "ret=141; $do_exit" 13 trap "ret=143; $do_exit" 15 # Set umask so as not to create temps with too-generous modes. # However, 'strip' requires both read and write access to temps. case $mode in # Optimize common cases. *644) cp_umask=133;; *755) cp_umask=22;; *[0-7]) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw='% 200' fi cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; *) if test -z "$stripcmd"; then u_plus_rw= else u_plus_rw=,u+rw fi cp_umask=$mode$u_plus_rw;; esac fi for src do # Protect names problematic for 'test' and other utilities. case $src in -* | [=\(\)!]) src=./$src;; esac if test -n "$dir_arg"; then dst=$src dstdir=$dst test -d "$dstdir" dstdir_status=$? else # Waiting for this to be detected by the "$cpprog $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if test ! -f "$src" && test ! -d "$src"; then echo "$0: $src does not exist." >&2 exit 1 fi if test -z "$dst_arg"; then echo "$0: no destination specified." >&2 exit 1 fi dst=$dst_arg # If destination is a directory, append the input filename; won't work # if double slashes aren't ignored. if test -d "$dst"; then if test "$is_target_a_directory" = never; then echo "$0: $dst_arg: Is a directory" >&2 exit 1 fi dstdir=$dst dst=$dstdir/`basename "$src"` dstdir_status=0 else dstdir=`dirname "$dst"` test -d "$dstdir" dstdir_status=$? fi fi obsolete_mkdir_used=false if test $dstdir_status != 0; then case $posix_mkdir in '') # Create intermediate dirs using mode 755 as modified by the umask. # This is like FreeBSD 'install' as of 1997-10-28. umask=`umask` case $stripcmd.$umask in # Optimize common cases. *[2367][2367]) mkdir_umask=$umask;; .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; *[0-7]) mkdir_umask=`expr $umask + 22 \ - $umask % 100 % 40 + $umask % 20 \ - $umask % 10 % 4 + $umask % 2 `;; *) mkdir_umask=$umask,go-w;; esac # With -d, create the new directory with the user-specified mode. # Otherwise, rely on $mkdir_umask. if test -n "$dir_arg"; then mkdir_mode=-m$mode else mkdir_mode= fi posix_mkdir=false case $umask in *[123567][0-7][0-7]) # POSIX mkdir -p sets u+wx bits regardless of umask, which # is incompatible with FreeBSD 'install' when (umask & 300) != 0. ;; *) tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 if (umask $mkdir_umask && exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 then if test -z "$dir_arg" || { # Check for POSIX incompatibilities with -m. # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or # other-writable bit of parent directory when it shouldn't. # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. ls_ld_tmpdir=`ls -ld "$tmpdir"` case $ls_ld_tmpdir in d????-?r-*) different_mode=700;; d????-?--*) different_mode=755;; *) false;; esac && $mkdirprog -m$different_mode -p -- "$tmpdir" && { ls_ld_tmpdir_1=`ls -ld "$tmpdir"` test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" } } then posix_mkdir=: fi rmdir "$tmpdir/d" "$tmpdir" else # Remove any dirs left behind by ancient mkdir implementations. rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null fi trap '' 0;; esac;; esac if $posix_mkdir && ( umask $mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" ) then : else # The umask is ridiculous, or mkdir does not conform to POSIX, # or it failed possibly due to a race condition. Create the # directory the slow way, step by step, checking for races as we go. case $dstdir in /*) prefix='/';; [-=\(\)!]*) prefix='./';; *) prefix='';; esac oIFS=$IFS IFS=/ set -f set fnord $dstdir shift set +f IFS=$oIFS prefixes= for d do test X"$d" = X && continue prefix=$prefix$d if test -d "$prefix"; then prefixes= else if $posix_mkdir; then (umask=$mkdir_umask && $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break # Don't fail if two instances are running concurrently. test -d "$prefix" || exit 1 else case $prefix in *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; *) qprefix=$prefix;; esac prefixes="$prefixes '$qprefix'" fi fi prefix=$prefix/ done if test -n "$prefixes"; then # Don't fail if two instances are running concurrently. (umask $mkdir_umask && eval "\$doit_exec \$mkdirprog $prefixes") || test -d "$dstdir" || exit 1 obsolete_mkdir_used=true fi fi fi if test -n "$dir_arg"; then { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 else # Make a couple of temp file names in the proper directory. dsttmp=$dstdir/_inst.$$_ rmtmp=$dstdir/_rm.$$_ # Trap to clean up those temp files at exit. trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 # Copy the file name to the temp name. (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && # and set any options; do chmod last to preserve setuid bits. # # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $cpprog $src $dsttmp" command. # { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && # If -C, don't bother to copy if it wouldn't change the file. if $copy_on_change && old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && set -f && set X $old && old=:$2:$4:$5:$6 && set X $new && new=:$2:$4:$5:$6 && set +f && test "$old" = "$new" && $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 then rm -f "$dsttmp" else # Rename the file to the real destination. $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || # The rename failed, perhaps because mv can't rename something else # to itself, or perhaps because mv is so ancient that it does not # support -f. { # Now remove or move aside any old file at destination location. # We try this two ways since rm can't unlink itself on some # systems and the destination file might be busy for other # reasons. In this case, the final cleanup might fail but the new # file should still install successfully. { test ! -f "$dst" || $doit $rmcmd -f "$dst" 2>/dev/null || { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } } || { echo "$0: cannot unlink or rename $dst" >&2 (exit 1); exit 1 } } && # Now rename the file to the real destination. $doit $mvcmd "$dsttmp" "$dst" } fi || exit 1 trap '' 0 fi done # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/missing000077500000000000000000000153301437515270000253740ustar00rootroot00000000000000#! /bin/sh # Common wrapper for a few potentially missing GNU programs. scriptversion=2013-10-28.13; # UTC # Copyright (C) 1996-2014 Free Software Foundation, Inc. # Originally written by Fran,cois Pinard , 1996. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. if test $# -eq 0; then echo 1>&2 "Try '$0 --help' for more information" exit 1 fi case $1 in --is-lightweight) # Used by our autoconf macros to check whether the available missing # script is modern enough. exit 0 ;; --run) # Back-compat with the calling convention used by older automake. shift ;; -h|--h|--he|--hel|--help) echo "\ $0 [OPTION]... PROGRAM [ARGUMENT]... Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due to PROGRAM being missing or too old. Options: -h, --help display this help and exit -v, --version output version information and exit Supported PROGRAM values: aclocal autoconf autoheader autom4te automake makeinfo bison yacc flex lex help2man Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and 'g' are ignored when checking the name. Send bug reports to ." exit $? ;; -v|--v|--ve|--ver|--vers|--versi|--versio|--version) echo "missing $scriptversion (GNU Automake)" exit $? ;; -*) echo 1>&2 "$0: unknown '$1' option" echo 1>&2 "Try '$0 --help' for more information" exit 1 ;; esac # Run the given program, remember its exit status. "$@"; st=$? # If it succeeded, we are done. test $st -eq 0 && exit 0 # Also exit now if we it failed (or wasn't found), and '--version' was # passed; such an option is passed most likely to detect whether the # program is present and works. case $2 in --version|--help) exit $st;; esac # Exit code 63 means version mismatch. This often happens when the user # tries to use an ancient version of a tool on a file that requires a # minimum version. if test $st -eq 63; then msg="probably too old" elif test $st -eq 127; then # Program was missing. msg="missing on your system" else # Program was found and executed, but failed. Give up. exit $st fi perl_URL=http://www.perl.org/ flex_URL=http://flex.sourceforge.net/ gnu_software_URL=http://www.gnu.org/software program_details () { case $1 in aclocal|automake) echo "The '$1' program is part of the GNU Automake package:" echo "<$gnu_software_URL/automake>" echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/autoconf>" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; autoconf|autom4te|autoheader) echo "The '$1' program is part of the GNU Autoconf package:" echo "<$gnu_software_URL/autoconf/>" echo "It also requires GNU m4 and Perl in order to run:" echo "<$gnu_software_URL/m4/>" echo "<$perl_URL>" ;; esac } give_advice () { # Normalize program name to check for. normalized_program=`echo "$1" | sed ' s/^gnu-//; t s/^gnu//; t s/^g//; t'` printf '%s\n' "'$1' is $msg." configure_deps="'configure.ac' or m4 files included by 'configure.ac'" case $normalized_program in autoconf*) echo "You should only need it if you modified 'configure.ac'," echo "or m4 files included by it." program_details 'autoconf' ;; autoheader*) echo "You should only need it if you modified 'acconfig.h' or" echo "$configure_deps." program_details 'autoheader' ;; automake*) echo "You should only need it if you modified 'Makefile.am' or" echo "$configure_deps." program_details 'automake' ;; aclocal*) echo "You should only need it if you modified 'acinclude.m4' or" echo "$configure_deps." program_details 'aclocal' ;; autom4te*) echo "You might have modified some maintainer files that require" echo "the 'autom4te' program to be rebuilt." program_details 'autom4te' ;; bison*|yacc*) echo "You should only need it if you modified a '.y' file." echo "You may want to install the GNU Bison package:" echo "<$gnu_software_URL/bison/>" ;; lex*|flex*) echo "You should only need it if you modified a '.l' file." echo "You may want to install the Fast Lexical Analyzer package:" echo "<$flex_URL>" ;; help2man*) echo "You should only need it if you modified a dependency" \ "of a man page." echo "You may want to install the GNU Help2man package:" echo "<$gnu_software_URL/help2man/>" ;; makeinfo*) echo "You should only need it if you modified a '.texi' file, or" echo "any other file indirectly affecting the aspect of the manual." echo "You might want to install the Texinfo package:" echo "<$gnu_software_URL/texinfo/>" echo "The spurious makeinfo call might also be the consequence of" echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might" echo "want to install GNU make:" echo "<$gnu_software_URL/make/>" ;; *) echo "You might have modified some files without having the proper" echo "tools for further handling them. Check the 'README' file, it" echo "often tells you about the needed prerequisites for installing" echo "this package. You may also peek at any GNU archive site, in" echo "case some other package contains this missing '$1' program." ;; esac } give_advice "$1" | sed -e '1s/^/WARNING: /' \ -e '2,$s/^/ /' >&2 # Propagate the correct exit status (expected to be 127 for a program # not found, 63 for a program that failed due to version mismatch). exit $st # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/src/000077500000000000000000000000001437515270000245625ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/src/Makefile.am000066400000000000000000000003501437515270000266140ustar00rootroot00000000000000# Copyright (C) 2006-2014 Free Software Foundation, Inc. # This Makefile.am is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. bin_PROGRAMS = hello hello_SOURCES = main.c buildstream-1.6.9/doc/examples/flatpak-autotools/files/src/src/main.c000066400000000000000000000005101437515270000256460ustar00rootroot00000000000000/* Copyright (C) 2006-2014 Free Software Foundation, Inc. This program is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. */ #include #include int main (void) { puts ("Hello World!"); puts ("This is " PACKAGE_STRING "."); return 0; } buildstream-1.6.9/doc/examples/flatpak-autotools/keys/000077500000000000000000000000001437515270000230555ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/flatpak-autotools/keys/flathub.gpg000066400000000000000000000054341437515270000252070ustar00rootroot00000000000000 YCFT9`GCiit$M&C͵o?) _ `L6 DVꮕ8!x)c瑶YӪr_1I7M1@RF,%à[B c "DJ4}W7]AQYO$TGG#΁daEҳef` W!|ŐR%PIFF\Lg 0Ld B}'< ٧쑑' 'dR e3T>!n\ym5AMz|YC    AMz|Q%>xlZQd&`{SX@&1~4Foxpy% 9f&@nZa \E!ň2욭"eh -tg@*ֻcu)_1x@F4991F:_BkҜG!V# w k҉Ij{gJʅ):0@x\ ~^K=-8u o` _qX"QqI>E8f}=haջLpb@n 6?}mP:Hr-Yxo\1ӟB'f ѓҵdg<3hemsr'Eglc,GBt+͛t)XB&FfyQsqt /\#vw #d=<ڷ*YT{bJ / UL=R(3DQ# 2 >[q^>+( YC dg֕d0^흡ƧzN<$CXiq6Ad\=['B@uDf/`\3]A,'nyo/(rթO~E%`MrpiH{"@[9]ZHŖ+"X{ǛiN9bAU]}"_|,n./}C>NG,И1WAG||P*`gצ'bHhH}-EB"0@`-Ox 4ϣBp_;ւg{ے.ǫv *Zy3ںc]`h2^>Luƍn nwƺ!?`=`WuZAM{\%qQ‚m5aX_$3]( (潑͗JD0՚דʝH}~@{"lzѬ3ܩ\w+7!+0`רSR-KI68+FkM9r&!n\ym5AMz|YC @ AMz|t !T݉ BجV'~YC V'~GB zu76ՇRwqU{[2GB4f]xfL)m gd]DUZv䞓{滩j(A7Xg(kS  d zSar0rsnrӲ?hߣN`Ί UWz@c $>46ѸŋYt_VRx,hZnfXX'] ]egrǂ;Kے-'#4]a1Lwy`?|4nhRظnqxZ,5U-+dI6%r^/<.]3 JxUe5,wDfR RYʯ;.I:+ht p:ϭz#'U~֧7Ļw:(u:Y'FEޅzMTT^N Fg=j4[:RUƂ(/4βh:-G@T6~xhuX42 b]-`r9@f9#7*<7P<2ްH< Py91@?[;)G [(&Ǖ'/$`"Lz9bO﷡+R;7W%R@/:Gj(2LZҌ= ݿ׎UjǓ\f:WX\BxO7kOXgM|V+ [jAfpmP7!&Jb >*M:.[nlxcs2t.͉4 %fR(+Y'98>3(LԖmj ogjjawy:̑Rbuildstream-1.6.9/doc/examples/flatpak-autotools/project.conf000066400000000000000000000003151437515270000244160ustar00rootroot00000000000000name: flatpak-autotools aliases: flathub: https://dl.flathub.org/ element-path: elements options: arch: type: arch description: The machine architecture values: - x86_64 - i386 buildstream-1.6.9/doc/examples/integration-commands/000077500000000000000000000000001437515270000225535ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/integration-commands/elements/000077500000000000000000000000001437515270000243675ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/integration-commands/elements/base.bst000066400000000000000000000001001437515270000260020ustar00rootroot00000000000000kind: stack description: Base stack depends: - base/alpine.bst buildstream-1.6.9/doc/examples/integration-commands/elements/base/000077500000000000000000000000001437515270000253015ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/integration-commands/elements/base/alpine.bst000066400000000000000000000005151437515270000272640ustar00rootroot00000000000000kind: import description: | Alpine Linux base runtime sources: - kind: tar url: alpine:integration-tests-base.v1.x86_64.tar.xz ref: 3eb559250ba82b64a68d86d0636a6b127aa5f6d25d3601a79f79214dc9703639 # # Run ldconfig in the libdir before running anything # public: bst: integration-commands: - ldconfig "%{libdir}" buildstream-1.6.9/doc/examples/integration-commands/elements/hello.bst000066400000000000000000000005761437515270000262140ustar00rootroot00000000000000kind: manual description: | The hello application # Depend on the hello library depends: - libhello.bst # Stage the files/hello directory for building sources: - kind: local path: files/hello # Now configure the commands to run config: build-commands: - make PREFIX="%{prefix}" install-commands: - make -j1 PREFIX="%{prefix}" DESTDIR="%{install-root}" install buildstream-1.6.9/doc/examples/integration-commands/elements/libhello.bst000066400000000000000000000005751437515270000267020ustar00rootroot00000000000000kind: manual description: | The libhello library # Depend on the base system depends: - base.bst # Stage the files/libhello directory for building sources: - kind: local path: files/libhello # Now configure the commands to run config: build-commands: - make PREFIX="%{prefix}" install-commands: - make -j1 PREFIX="%{prefix}" DESTDIR="%{install-root}" install buildstream-1.6.9/doc/examples/integration-commands/files/000077500000000000000000000000001437515270000236555ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/integration-commands/files/hello/000077500000000000000000000000001437515270000247605ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/integration-commands/files/hello/Makefile000066400000000000000000000003121437515270000264140ustar00rootroot00000000000000# Sample makefile for hello.c # .PHONY: all install all: hello install: install -d ${DESTDIR}${PREFIX}/bin install -m 755 hello ${DESTDIR}${PREFIX}/bin hello: hello.c $(CC) $< -o $@ -Wall -lhello buildstream-1.6.9/doc/examples/integration-commands/files/hello/hello.c000066400000000000000000000004111437515270000262230ustar00rootroot00000000000000/* * hello.c - Simple hello program */ #include #include int main(int argc, char *argv[]) { const char *person = NULL; if (argc > 1) person = argv[1]; if (person) hello(person); else hello("stranger"); return 0; } buildstream-1.6.9/doc/examples/integration-commands/files/libhello/000077500000000000000000000000001437515270000254475ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/integration-commands/files/libhello/Makefile000066400000000000000000000005451437515270000271130ustar00rootroot00000000000000# Sample makefile for hello library # .PHONY: all install all: libhello.so install: install -d ${DESTDIR}${PREFIX}/lib install -d ${DESTDIR}${PREFIX}/include install -m 644 libhello.so ${DESTDIR}${PREFIX}/lib install -m 644 libhello.h ${DESTDIR}${PREFIX}/include %.o: %.c %.h $(CC) -c $< -o $@ -Wall libhello.so: libhello.o $(CC) -shared -o $@ $< buildstream-1.6.9/doc/examples/integration-commands/files/libhello/libhello.c000066400000000000000000000002001437515270000273750ustar00rootroot00000000000000/* * libhello.c - The hello library */ #include void hello(const char *person) { printf("Hello %s\n", person); } buildstream-1.6.9/doc/examples/integration-commands/files/libhello/libhello.h000066400000000000000000000001671437515270000274160ustar00rootroot00000000000000/* * libhello.h - The hello library */ /* * A function to say hello to @person */ void hello(const char *person); buildstream-1.6.9/doc/examples/integration-commands/project.conf000066400000000000000000000004551437515270000250740ustar00rootroot00000000000000# Unique project name name: integration-commands # Required BuildStream format version format-version: 9 # Subdirectory where elements are stored element-path: elements # Define an alias for our alpine tarball aliases: alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/ buildstream-1.6.9/doc/examples/running-commands/000077500000000000000000000000001437515270000217105ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/running-commands/elements/000077500000000000000000000000001437515270000235245ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/running-commands/elements/base.bst000066400000000000000000000001001437515270000251370ustar00rootroot00000000000000kind: stack description: Base stack depends: - base/alpine.bst buildstream-1.6.9/doc/examples/running-commands/elements/base/000077500000000000000000000000001437515270000244365ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/running-commands/elements/base/alpine.bst000066400000000000000000000004611437515270000264210ustar00rootroot00000000000000kind: import description: | Alpine Linux base runtime sources: - kind: tar # This is a post doctored, trimmed down system image # of the Alpine linux distribution. # url: alpine:integration-tests-base.v1.x86_64.tar.xz ref: 3eb559250ba82b64a68d86d0636a6b127aa5f6d25d3601a79f79214dc9703639 buildstream-1.6.9/doc/examples/running-commands/elements/hello.bst000066400000000000000000000005601437515270000253420ustar00rootroot00000000000000kind: manual description: | Building manually # Depend on the base system depends: - base.bst # Stage the files/src directory for building sources: - kind: local path: files/src # Now configure the commands to run config: build-commands: - make PREFIX="%{prefix}" install-commands: - make -j1 PREFIX="%{prefix}" DESTDIR="%{install-root}" install buildstream-1.6.9/doc/examples/running-commands/files/000077500000000000000000000000001437515270000230125ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/running-commands/files/src/000077500000000000000000000000001437515270000236015ustar00rootroot00000000000000buildstream-1.6.9/doc/examples/running-commands/files/src/Makefile000066400000000000000000000003021437515270000252340ustar00rootroot00000000000000# Sample makefile for hello.c # .PHONY: all install all: hello install: install -d ${DESTDIR}${PREFIX}/bin install -m 755 hello ${DESTDIR}${PREFIX}/bin hello: hello.c $(CC) -Wall -o $@ $< buildstream-1.6.9/doc/examples/running-commands/files/src/hello.c000066400000000000000000000002171437515270000250500ustar00rootroot00000000000000/* * hello.c - Simple hello world program */ #include int main(int argc, char *argv[]) { printf("Hello World\n"); return 0; } buildstream-1.6.9/doc/examples/running-commands/project.conf000066400000000000000000000004511437515270000242250ustar00rootroot00000000000000# Unique project name name: running-commands # Required BuildStream format version format-version: 9 # Subdirectory where elements are stored element-path: elements # Define an alias for our alpine tarball aliases: alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/ buildstream-1.6.9/doc/sessions/000077500000000000000000000000001437515270000164615ustar00rootroot00000000000000buildstream-1.6.9/doc/sessions/autotools.run000066400000000000000000000010731437515270000212410ustar00rootroot00000000000000 commands: # Make it fetch first - directory: ../examples/autotools command: fetch hello.bst # Capture a `bst show` of the variables - directory: ../examples/autotools output: ../source/sessions/autotools-show-variables.html command: show --deps none --format "%{vars}" hello.bst # Capture a `bst build` - directory: ../examples/autotools output: ../source/sessions/autotools-build.html command: build hello.bst # Capture a shell output - directory: ../examples/autotools output: ../source/sessions/autotools-shell.html command: shell hello.bst -- hello buildstream-1.6.9/doc/sessions/first-project.run000066400000000000000000000021271437515270000220040ustar00rootroot00000000000000# Re-create project.conf using `bst init` remove-files: - ../examples/first-project/project.conf - ../examples/first-project/here commands: # Use bst init to create the project.conf - directory: ../examples/first-project output: ../source/sessions/first-project-init.html command: init --project-name first-project # Use bst init to create the project.conf - directory: ../examples/first-project output: ../source/sessions/first-project-touch.html command: touch hello.world fake-output: '' # Capture a build output - directory: ../examples/first-project output: ../source/sessions/first-project-build.html command: build hello.bst # Capture a show output - directory: ../examples/first-project output: ../source/sessions/first-project-show.html command: show hello.bst # Checkout the output - directory: ../examples/first-project output: ../source/sessions/first-project-checkout.html command: checkout hello.bst here # Checkout the output - directory: ../examples/first-project output: ../source/sessions/first-project-ls.html command: ls ./here fake-output: hello.world buildstream-1.6.9/doc/sessions/flatpak-autotools.run000066400000000000000000000012301437515270000226540ustar00rootroot00000000000000# Workaround setuptools bug for our symlinks here # workaround-symlinks: ../examples/flatpak-autotools/files/links/bin: usr/bin ../examples/flatpak-autotools/files/links/etc: usr/etc ../examples/flatpak-autotools/files/links/lib: usr/lib commands: # Make it fetch first - directory: ../examples/flatpak-autotools command: fetch hello.bst # Capture a build output - directory: ../examples/flatpak-autotools output: ../source/sessions/flatpak-autotools-build.html command: build hello.bst # Capture a shell output - directory: ../examples/flatpak-autotools output: ../source/sessions/flatpak-autotools-shell.html command: shell hello.bst -- hello buildstream-1.6.9/doc/sessions/integration-commands.run000066400000000000000000000006701437515270000233340ustar00rootroot00000000000000 commands: # Make it fetch first - directory: ../examples/integration-commands command: fetch hello.bst # Capture a build output - directory: ../examples/integration-commands output: ../source/sessions/integration-commands-build.html command: build hello.bst # Capture a shell output - directory: ../examples/integration-commands output: ../source/sessions/integration-commands-shell.html command: shell hello.bst -- hello pony buildstream-1.6.9/doc/sessions/running-commands.run000066400000000000000000000013341437515270000224670ustar00rootroot00000000000000 commands: # Make it fetch first - directory: ../examples/running-commands command: fetch hello.bst # Capture a show output - directory: ../examples/running-commands output: ../source/sessions/running-commands-show-before.html command: show hello.bst # Capture a build output - directory: ../examples/running-commands output: ../source/sessions/running-commands-build.html command: build hello.bst # Capture another show output - directory: ../examples/running-commands output: ../source/sessions/running-commands-show-after.html command: show hello.bst # Capture a shell output - directory: ../examples/running-commands output: ../source/sessions/running-commands-shell.html command: shell hello.bst -- hello buildstream-1.6.9/doc/source/000077500000000000000000000000001437515270000161135ustar00rootroot00000000000000buildstream-1.6.9/doc/source/HACKING.rst000077700000000000000000000000001437515270000221302../../HACKING.rstustar00rootroot00000000000000buildstream-1.6.9/doc/source/additional_cachekeys.rst000066400000000000000000000113721437515270000230000ustar00rootroot00000000000000 .. _cachekeys: Cache keys ========== Cache keys for artifacts are generated from the inputs of the build process for the purpose of reusing artifacts in a well-defined, predictable way. Structure --------- Cache keys are SHA256 hash values generated from a pickled Python dict that includes: * Environment (e.g., project configuration and variables) * Element configuration (details depend on element kind, ``Element.get_unique_key()``) * Sources (``Source.get_unique_key()``) * Dependencies (depending on cache key type, see below) * Public data Cache key types --------------- There are two types of cache keys in BuildStream, ``strong`` and ``weak``. The purpose of a ``strong`` cache key is to capture the state of as many aspects as possible that can have an influence on the build output. The aim is that builds will be fully reproducible as long as the cache key doesn't change, with suitable module build systems that don't embed timestamps, for example. A ``strong`` cache key includes the strong cache key of each build dependency (and their runtime dependencies) of the element as changes in build dependencies (or their runtime dependencies) can result in build differences in reverse dependencies. This means that whenever the strong cache key of a dependency changes, the strong cache key of its reverse dependencies will change as well. A ``weak`` cache key has an almost identical structure, however, it includes only the names of build dependencies, not their cache keys or their runtime dependencies. A weak cache key will thus still change when the element itself or the environment changes but it will not change when a dependency is updated. For elements without build dependencies the ``strong`` cache key is identical to the ``weak`` cache key. Strict build plan ----------------- This is the default build plan that exclusively uses ``strong`` cache keys for the core functionality. An element's cache key can be calculated when the cache keys of the element's build dependencies (and their runtime dependencies) have been calculated and either tracking is not enabled or it has already completed for this element, i.e., the ``ref`` is available. This means that with tracking disabled the cache keys of all elements could be calculated right at the start of a build session. While BuildStream only uses ``strong`` cache keys with the strict build plan for the actual staging and build process, it will still calculate ``weak`` cache keys for each element. This allows BuildStream to store the artifact in the cache with both keys, reducing rebuilds when switching between strict and non-strict build plans. If the artifact cache already contains an artifact with the same ``weak`` cache key, it's replaced. Thus, non-strict builds always use the latest artifact available for a given ``weak`` cache key. Non-strict build plan --------------------- The non-strict build plan disables the time-consuming automatic rebuild of reverse dependencies at the cost of dropping the reproducibility benefits. It uses the ``weak`` cache keys for the core staging and build process. I.e., if an artifact is available with the calculated ``weak`` cache key, it will be reused for staging instead of being rebuilt. ``weak`` cache keys can be calculated early in the build session. After tracking, similar to when ``strong`` cache keys can be calculated with a strict build plan. Similar to how strict build plans also calculate ``weak`` cache keys, non-strict build plans also calculate ``strong`` cache keys. However, this is slightly more complex. To calculate the ``strong`` cache key of an element, BuildStream requires the ``strong`` cache keys of the build dependencies (and their runtime dependencies). The build dependencies of an element may have been updated since the artifact was built. With the non-strict build plan the artifact will still be reused. However, this means that we cannot use a ``strong`` cache key calculated purely based on the element definitions. We need a cache key that matches the environment at the time the artifact was built, not the current definitions. The only way to get the correct ``strong`` cache key is by retrieving it from the metadata stored in the artifact. As artifacts may need to be pulled from a remote artifact cache, the ``strong`` cache key is not readily available early in the build session. However, it can always be retrieved when an element is about to be built, as the dependencies are guaranteed to be in the local artifact cache at that point. ``Element._get_cache_key_from_artifact()`` extracts the ``strong`` cache key from an artifact in the local cache. ``Element._get_cache_key_for_build()`` calculates the ``strong`` cache key that is used for a particular build job. This is used for the embedded metadata and also as key to store the artifact in the cache. buildstream-1.6.9/doc/source/additional_sandboxing.rst000066400000000000000000000165751437515270000232070ustar00rootroot00000000000000 .. _sandboxing: Sandboxing ========== Introduction ------------ BuildStream assembles each element in a *sandbox*. The sandbox is a container environment which serves two purposes: giving BuildStream control over all build aspects in order to ensure reproducibility of build results, and providing safety guarantees for the host system that BuildStream is running on. The exact implementation of the sandbox varies depending on which platform you are running BuildStream. See below for backend-specific details. There are several factors that affect the build output and must therefore be under BuildStream's control: * Filesystem contents and metadata * The user and permissions model * Network access * Device access Each of these is detailed below. For safety reasons, BuildStream also controls the following things: * Access to files outside of the sandbox directory * Access to certain kernel-specific syscalls Creating a sandbox can require special priviliges. This is a safety concern too because bugs in the `bst` program can cause damage to a host if the program is running with extra privileges. The exact priviliges that are required depend on your platform and backend. Element plugins can run arbitary commands within the sandbox using the :mod:`sandbox API `. What elements can and can't do in the sandbox --------------------------------------------- This section specifies how BuildStream sandboxes are intended to work. A specific sandbox provider may not necessarily be able to achieve all of the requirements listed below so be sure to read the "platform notes" section as well. Filesystem access ~~~~~~~~~~~~~~~~~ The filesystem inside sandboxes should be read only during element assembly, except for certain directories which element plugins can mark as being read/write. Most elements plugins derive from :mod:`BuildElement `, which marks ``%{build-root}`` and ``%{install-root}`` as read/write. When running integration commands or `bst shell`, the sandbox should have a fully read-write filesystem. The changes made here do not need to persist beyond the lifetime of that sandbox, and **must not** affect the contents of artifacts stored in the cache. Certain top level directories should be treated specially in all sandboxes: * The ``/dev`` directory should contain device nodes, which are described in a separate section. * The ``/proc`` directory should have a UNIX 'procfs' style filesystem mounted. It should not expose any information about processes running outside of the sandbox. * The ``/tmp`` directory should be writable. Filesystem metadata ~~~~~~~~~~~~~~~~~~~ The writable areas inside a BuildStream sandbox are limited in what metadata can be written and stored. * All files must be owned by UID 0 and GID 0 * No files may have the setuid or setgid bits set * Extended file attributes (xattrs) cannot be written to or read. * Hardlinks to other files can be created, but the information about which files are hardlinked to each other will not be stored in the artifact that is created from the sandbox. These restrictions are due to technical limitations. In future we hope to support a wider range of filesystem metadata operations. See `issue #38 `_ for more details. User and permissions model ~~~~~~~~~~~~~~~~~~~~~~~~~~ All commands inside the sandbox run with user ID 0 and group ID 0. It should not be possible to become any other user ID. Network access ~~~~~~~~~~~~~~ Builds should not be able to access the network at all from the sandbox. All remote resources needed to build an element must be specified in the element's ``sources`` list so that BuildStream is able to see when they have changed. A sandbox opened by `bst shell` should allow network access. Device access ~~~~~~~~~~~~~ Builds should not be able to access any hardware devices at all. A few standard UNIX device files are needed, the whitelist is: * ``/dev/full`` * ``/dev/null`` * ``/dev/urandom`` * ``/dev/random`` * ``/dev/zero`` It may seem odd that we have sources of randomness in the sandbox, but a lot of tools do expect them to exist. We take the view that it's up to integrators to ensure that elements do not deliberately include randomness in their output. A sandbox opened by `bst shell` can make any devices available. There needs to be a console device so that it can be used interactively. Platform notes -------------- BuildStream currently only carries first-class support for modern Linux-based operating systems. There is also a "fallback" backend which aims to make BuildStream usable on any POSIX-compatible operating system. The POSIX standard does not provide good support for creating containers so this implementation makes a number of unfortunate compromises. Linux ~~~~~ On Linux we use the following isolation and sandboxing primitives: * bind mounts * FUSE * Mount namespaces * Network namespaces * PID (process ID) namespaces * User namespaces (if available) * seccomp We access all of these features through a sandboxing tool named `Bubblewrap `_. User namespaces are not enabled by default in all Linux distributions. BuildStream still runs on such systems but will give a big warning on startup and will refuse to push any artifacts built on such a system to a remote cache. For more information, see `issue #92 `_. The Linux platform can operate as a standard user provided user namespace support is available. If user namespace support is not available you have the option of installing bubblewrap as a setuid binary to avoid needing to run the entire ``bst`` process as the ``root`` user. The artifact cache on Linux systems is implemented using `OSTree `_, which can allow us to stage artifacts using hardlinks instead of copying them. To avoid cache corruption it is vital that hardlinked files cannot be overwritten. In cases where the root filesystem inside the sandbox needs to be writable, a custom FUSE filesystem named SafeHardlinks is used which provides a copy-on-write layer. Some of the operations on filesystem metadata listed above are not prohibited by the sandbox, but will instead be silently dropped when an artifact is created. For more details see `issue #38 `_. Some details of the host machine are currently leaked by this platform backend. For more details, see `issue #262 `_. Fallback (POSIX) ~~~~~~~~~~~~~~~~ The fallback backend aims to be usable on a wide range of operating systems. Any OS that implements the POSIX specification and the ``chroot()`` syscall can be expected to work. There are no real isolation or sandboxing primitives that work across multiple operating systems, so the protection provided by this backend is minimal. It would be much safer to use a platform-specific backend. Filesystem isolation is done using the chroot() system call. This system call requires special privileges to use so ``bst`` usually needs to be run as the ``root`` user when using this backend. Network access is not blocked in the sandbox. However since there is unlikely to be a correct `/etc/resolv.conf` file, any network access that depends on name resolution will most likely fail anyway. Builds inside the sandbox execute as the ``root`` user. buildstream-1.6.9/doc/source/conf.py000066400000000000000000000235461437515270000174240ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # BuildStream documentation build configuration file, created by # sphinx-quickstart on Mon Nov 7 21:03:37 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys sys.path.insert(0, os.path.abspath('..')) from buildstream import __version__ # pylint: disable=wrong-import-position # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx_click.ext' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['.templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'BuildStream' copyright = '2017-2018, The BuildStream Contributors' # pylint: disable=redefined-builtin author = 'The BuildStream Contributors' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__ # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # today = '' # # Else, today_fmt is used as the format for a strftime call. # # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = [ 'buildstream.' ] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # " v documentation" by default. # # html_title = 'BuildStream v0.1' # A shorter title for the navigation bar. Default is the same as html_title. # # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # # html_additional_pages = {} # If false, no module index is generated. # # html_domain_indices = True # If false, no index is generated. # # html_use_index = True # If true, the index is split into individual pages for each letter. # # html_split_index = False # If true, links to the reST sources are added to the pages. # # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh' # # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'BuildStreamdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'BuildStream.tex', 'BuildStream Documentation', 'The BuildStream Contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # # latex_use_parts = False # If true, show page references after internal links. # # latex_show_pagerefs = False # If true, show URL addresses after external links. # # latex_show_urls = False # Documents to append as an appendix to all manuals. # # latex_appendices = [] # It false, will not define \strong, \code, itleref, \crossref ... but only # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added # packages. # # latex_keep_old_macro_names = True # If false, no module index is generated. # # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'buildstream', 'BuildStream Documentation', [author], 1) ] # If true, show URL addresses after external links. # # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'BuildStream', 'BuildStream Documentation', author, 'BuildStream', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # # texinfo_appendices = [] # If false, no module index is generated. # # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # # texinfo_no_detailmenu = False autodoc_member_order = 'bysource' buildstream-1.6.9/doc/source/core_additional.rst000066400000000000000000000001721437515270000217650ustar00rootroot00000000000000 Additional writings =================== .. toctree:: :maxdepth: 2 additional_cachekeys additional_sandboxing buildstream-1.6.9/doc/source/core_format.rst000066400000000000000000000004561437515270000211520ustar00rootroot00000000000000 Project format ============== This section details how to use the BuildStream YAML format to create your own project or modify existing projects. .. toctree:: :maxdepth: 2 :caption: Project format format_intro format_project format_declaring format_public format_project_refs buildstream-1.6.9/doc/source/core_framework.rst000066400000000000000000000007541437515270000216600ustar00rootroot00000000000000 .. _core_framework: Plugin API reference ==================== The core public APIs are of interest to anyone who wishes to implement custom :mod:`Element ` or :mod:`Source ` plugins, and can also be useful for working on BuildStream itself. .. toctree:: :maxdepth: 1 buildstream.plugin buildstream.source buildstream.element buildstream.buildelement buildstream.scriptelement buildstream.sandbox.sandbox buildstream.utils buildstream-1.6.9/doc/source/core_plugins.rst000066400000000000000000000024661437515270000213460ustar00rootroot00000000000000 .. _plugins: Plugin specific documentation ============================= Plugins provide their own individual plugin specific YAML configurations, The element ``.bst`` files can specify plugin specific configuration in the :ref:`config section `, while sources declared on a given element specify their plugin specific configuration directly :ref:`in their source declarations `. General elements ---------------- .. toctree:: :maxdepth: 1 elements/stack elements/import elements/compose elements/script elements/junction elements/filter .. _plugins_build_elements: Build elements -------------- .. toctree:: :maxdepth: 1 elements/manual elements/make elements/autotools elements/cmake elements/qmake elements/distutils elements/makemaker elements/modulebuild elements/meson elements/pip .. _plugins_sources: Sources ------- .. toctree:: :maxdepth: 1 sources/local sources/remote sources/tar sources/zip sources/git sources/bzr sources/ostree sources/patch sources/deb sources/pip External plugins ---------------- External plugins need to be installed separately, here is a list of BuildStream plugin projects known to us at this time: * `bst-external `_ buildstream-1.6.9/doc/source/examples/000077500000000000000000000000001437515270000177315ustar00rootroot00000000000000buildstream-1.6.9/doc/source/examples/flatpak-autotools.rst000066400000000000000000000117341437515270000241420ustar00rootroot00000000000000 .. _examples_flatpak_autotools: Building on a Flatpak SDK ========================= Here we demonstrate how to build and run software using a Flatpak SDK for the base runtime. .. note:: This example is distributed with BuildStream in the `doc/examples/flatpak-autotools `_ subdirectory. Project structure ----------------- The following is a simple :ref:`project ` definition: ``project.conf`` ~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/flatpak-autotools/project.conf :language: yaml Here we use an :ref:`arch option ` to allow conditional statements in this project to be made depending on machine architecture. For this example we only support the ``i386`` and ``x86_64`` architectures. Note that we've added a :ref:`source alias ` for the ``https://dl.flathuhb.org/`` repository to download the SDK from. ``elements/base/sdk.bst`` ~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/flatpak-autotools/elements/base/sdk.bst :language: yaml This is the :mod:`import ` element used to import the actual Flatpak SDK, it uses an :mod:`ostree ` source to download the Flatpak since these are hosted in OSTree repositories. While declaring the :mod:`ostree ` source, we specify a GPG public key to verify the OSTree download. This configuration is optional but recommended for OSTree repositories. The key is stored in the project directory at ``keys/gnome-sdk.gpg``, and can be downloaded from https://sdk.gnome.org/keys/. We also use :ref:`conditional statements ` to decide which branch to download. For the ``config`` section of this :mod:`import ` element, it's important to note two things: * **source**: We only want to extract the ``files/`` directory from the SDK, This is becase Flatpak runtimes dont start at the root of the OSTree checkout, instead the actual files start in the ``files//`` subdirectory * **target**: The content we've extracted should be staged at ``/usr`` This is because Flatpak runtimes only contain the data starting at ``/usr``, and they expect to be staged at ``/usr`` at runtime, in an environment with the appropriate symlinks setup from ``/``. ``elements/base/usrmerge.bst`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/flatpak-autotools/elements/base/usrmerge.bst :language: yaml This is another :mod:`import ` element, and it uses the :mod:`local ` source type so that we can stage files literally stored in the same repository as the project. The purpose of this element is simply to add the symlinks for ``/lib -> /usr/lib``, ``/bin -> /usr/bin`` and ``/etc -> /usr/etc``, we have it depend on the ``base/sdk.bst`` element only to ensure that it is staged *after*, i.e. the symlinks are created after the SDK is staged. As suggested by the ``.bst`` file, the symlinks themselves are a part of the project and they are stored in the ``files/links`` directory. ``elements/base.bst`` ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/flatpak-autotools/elements/base.bst :language: yaml This is just a :mod:`stack ` element for convenience sake. Often times you will have a more complex base to build things on, and it is convenient to just use a :mod:`stack ` element for your elements to depend on without needing to know about the inner workings of the base system build. ``elements/hello.bst`` ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/flatpak-autotools/elements/hello.bst :language: yaml Finally, we show an example of an :mod:`autotools ` element to build our sample "Hello World" program. We use another :mod:`local ` source to obtain the sample autotools project, but normally you would probably use a :mod:`git ` or other source to obtain source code from another repository. Using the project ----------------- Now that we've explained the basic layout of the project, here are just a few things you can try to do with the project. .. note:: The following examples assume that you have first changed your working directory to the `project root `_. Build the hello.bst element ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build the project, run :ref:`bst build ` in the following way: .. raw:: html :file: ../sessions/flatpak-autotools-build.html Run the hello world program ~~~~~~~~~~~~~~~~~~~~~~~~~~~ The hello world program has been built into the standard ``/usr`` prefix, and will automatically be in the default ``PATH`` for running things in a :ref:`bst shell `. To just run the program, run :ref:`bst shell ` in the following way: .. raw:: html :file: ../sessions/flatpak-autotools-shell.html buildstream-1.6.9/doc/source/examples/git-mirror.rst000066400000000000000000000102511437515270000225550ustar00rootroot00000000000000 Creating and using a git mirror ''''''''''''''''''''''''''''''' This is an example of how to create a git mirror using git's `git-http-backend `_ and `lighttpd `_. Prerequisites ============= You will need git installed, and git-http-backend must be present. It is assumed that the git-http-backend binary exists at `/usr/lib/git-core/git-http-backend`. You will need `lighttpd` installed, and at the bare minimum has the modules `mod_alias`, `mod_cgi`, and `mod_setenv`. I will be using gnome-modulesets as an example, which can be cloned from `http://gnome7.codethink.co.uk/gnome-modulesets.git`. Starting a git http server ========================== 1. Set up a directory containing mirrors ---------------------------------------- Choose a suitable directory to hold your mirrors, e.g. `/var/www/git`. Place the git repositories you want to use as mirrors in the mirror dir, e.g. ``git clone --mirror http://git.gnome.org/browse/yelp-xsl /var/www/git/yelp-xsl.git``. 2. Configure lighttpd --------------------- Write out a lighttpd.conf as follows: :: server.document-root = "/var/www/git/" server.port = 3000 server.modules = ( "mod_alias", "mod_cgi", "mod_setenv", ) alias.url += ( "/git" => "/usr/lib/git-core/git-http-backend" ) $HTTP["url"] =~ "^/git" { cgi.assign = ("" => "") setenv.add-environment = ( "GIT_PROJECT_ROOT" => "/var/www/git", "GIT_HTTP_EXPORT_ALL" => "" ) } .. note:: If you have your mirrors in another directory, replace /var/www/git/ with that directory. 3. Start lighttpd ----------------- lighttpd can be invoked with the command-line ``lighttpd -D -f lighttpd.conf``. 4. Test that you can fetch from it ---------------------------------- We can then clone the mirrored repo using git via http with ``git clone http://127.0.0.1:3000/git/yelp-xsl``. .. note:: If you have set server.port to something other than the default, you will need to replace the '3000' in the command-line. 5. Configure the project to use the mirror ------------------------------------------ To add this local http server as a mirror, add the following to the project.conf: .. code:: yaml mirrors: - name: local-mirror aliases: git_gnome_org: - http://127.0.0.1:3000/git/ 6. Test that the mirror works ----------------------------- We can make buildstream use the mirror by setting the alias to an invalid URL, e.g. .. code:: yaml aliases: git_gnome_org: https://www.example.com/invalid/url/ Now, if you build an element that uses the source you placed in the mirror (e.g. ``bst build core-deps/yelp-xsl.bst``), you will see that it uses your mirror. .. _lighttpd_git_tar_conf: Bonus: lighttpd conf for git and tar ==================================== For those who have also used the :ref:`tar-mirror tutorial `, a combined lighttpd.conf is below: :: server.document-root = "/var/www/" server.port = 3000 server.modules = ( "mod_alias", "mod_cgi", "mod_setenv", ) alias.url += ( "/git" => "/usr/lib/git-core/git-http-backend" ) $HTTP["url"] =~ "^/git" { cgi.assign = ("" => "") setenv.add-environment = ( "GIT_PROJECT_ROOT" => "/var/www/git", "GIT_HTTP_EXPORT_ALL" => "" ) } else $HTTP["url"] =~ "^/tar" { dir-listing.activate = "enable" } Further reading =============== If this mirror isn't being used exclusively in a secure network, it is strongly recommended you `use SSL `_. This is the bare minimum required to set up a git mirror. A large, public project would prefer to set it up using the `git protocol `_, and a security-conscious project would be configured to use `git over SSH `_. Lighttpd is documented on `its wiki `_. buildstream-1.6.9/doc/source/examples/tar-mirror.rst000066400000000000000000000052261437515270000225660ustar00rootroot00000000000000 .. _using_tar_mirror: Creating and using a tar mirror ''''''''''''''''''''''''''''''' This is an example of how to create a tar mirror using `lighttpd `_. Prerequisites ============= You will need `lighttpd` installed. I will be using gnome-modulesets as an example, which can be cloned from `http://gnome7.codethink.co.uk/gnome-modulesets.git`. Starting a tar server ===================== 1. Set up a directory containing mirrors ---------------------------------------- Choose a suitable directory to hold your mirrored tar files, e.g. `/var/www/tar`. Place the tar files you want to use as mirrors in your mirror dir, e.g. .. code:: mkdir -p /var/www/tar/gettext wget -O /var/www/tar/gettext/gettext-0.19.8.1.tar.xz https://ftp.gnu.org/gnu/gettext/gettext-0.19.8.1.tar.xz 2. Configure lighttpd --------------------- Write out a lighttpd.conf as follows: :: server.document-root = "/var/www/tar/" server.port = 3000 dir-listing.activate = "enable" .. note:: If you have your mirrors in another directory, replace /var/www/tar/ with that directory. .. note:: An example lighttpd.conf that works for both git and tar services is available :ref:`here ` 3. Start lighttpd ----------------- lighttpd can be invoked with the command-line ``lighttpd -D -f lighttpd.conf``. 4. Test that you can fetch from it ---------------------------------- We can then download the mirrored file with ``wget 127.0.0.1:3000/tar/gettext/gettext-0.19.8.1.tar.xz``. .. note:: If you have set server.port to something other than the default, you will need to replace the '3000' in the command-line. 5. Configure the project to use the mirror ------------------------------------------ To add this local http server as a mirror, add the following to the project.conf: .. code:: yaml mirrors: - name: local-mirror aliases: ftp_gnu_org: - http://127.0.0.1:3000/tar/ 6. Test that the mirror works ----------------------------- We can make buildstream use the mirror by setting the alias to an invalid URL, e.g. .. code:: yaml aliases: ftp_gnu_org: https://www.example.com/invalid/url/ Now, if you build an element that uses the source you placed in the mirror (e.g. ``bst build core-deps/gettext.bst``), you will see that it uses your mirror. Further reading =============== If this mirror isn't being used exclusively in a secure network, it is strongly recommended you `use SSL `_. Lighttpd is documented on `its wiki `_. buildstream-1.6.9/doc/source/format_declaring.rst000066400000000000000000000356631437515270000221620ustar00rootroot00000000000000 Declaring elements ================== .. _format_basics: Element basics -------------- Here is a rather complete example using the autotools element kind and git source kind: .. code:: yaml # Specify the kind of element this is kind: autotools # Specify some dependencies depends: - element1.bst - element2.bst # Specify the source which should be built sources: - kind: git url: upstream:modulename.git track: master ref: d0b38561afb8122a3fc6bafc5a733ec502fcaed6 # Override some variables variables: sysconfdir: "%{prefix}/etc" # Tweak the sandbox shell environment environment: LD_LIBRARY_PATH: /some/custom/path # Specify the configuration of the element config: # Override autotools element default configure-commands configure-commands: - "%{configure} --enable-fancy-feature" # Specify public domain data, visible to other elements. public: bst: integration-commands: - /usr/bin/update-fancy-feature-cache # Specify a user id and group id to use in the build sandbox. sandbox: build-uid: 0 build-gid: 0 For most use cases you would not need to specify this much detail, we've provided details here in order to have a more complete initial example. Let's break down the above and give a brief explanation of what these attributes mean. Kind ~~~~ .. code:: yaml # Specify the kind of element this is kind: autotools The ``kind`` attribute specifies which plugin will be operating on the element's input to produce its output. Plugins define element types and each of them can be referred to by name with the ``kind`` attribute. To refer to a third party plugin, prefix the plugin with its package, for example: .. code:: yaml kind: buildstream-plugins:dpkg_build .. _format_depends: Depends ~~~~~~~ .. code:: yaml # Specify some dependencies depends: - element1.bst - element2.bst Relationships between elements are specified with the ``depends`` attribute. Elements may depend on other elements by specifying the :ref:`element path ` relative filename to the elements they depend on here. See :ref:`format_dependencies` for more information on the dependency model. .. _format_build_depends: Build-Depends ~~~~~~~~~~~~~ .. code:: yaml # Specify some build-dependencies build-depends: - element1.bst - element2.bst Build dependencies between elements can be specified with the ``build-depends`` attribute. The above code snippet is equivalent to: .. code:: yaml # Specify some build-dependencies depends: - filename: element1.bst type: build - filename: element2.bst type: build See :ref:`format_dependencies` for more information on the dependency model. .. note:: The ``build-depends`` configuration is available since :ref:`format version 14 ` .. _format_runtime_depends: Runtime-Depends ~~~~~~~~~~~~~~~ .. code:: yaml # Specify some runtime-dependencies runtime-depends: - element1.bst - element2.bst Runtime dependencies between elements can be specified with the ``runtime-depends`` attribute. The above code snippet is equivalent to: .. code:: yaml # Specify some runtime-dependencies depends: - filename: element1.bst type: runtime - filename: element2.bst type: runtime See :ref:`format_dependencies` for more information on the dependency model. .. note:: The ``runtime-depends`` configuration is available since :ref:`format version 14 ` .. _format_sources: Sources ~~~~~~~ .. code:: yaml # Specify the source which should be built sources: - kind: git url: upstream:modulename.git track: master ref: d0b38561afb8122a3fc6bafc5a733ec502fcaed6 Here we specify some input for the element, any number of sources may be specified. By default the sources will be staged in the root of the element's build directory in the build sandbox, but sources may specify a ``directory`` attribute to control where the sources will be staged. The ``directory`` attribute may specify a build sandbox relative subdirectory. For example, one might encounter a component which requires a separate data package in order to build itself, in this case the sources might be listed as: .. code:: yaml sources: # Specify the source which should be built - kind: git url: upstream:modulename.git track: master ref: d0b38561afb8122a3fc6bafc5a733ec502fcaed6 # Specify the data package we need for build frobnication, # we need it to be unpacked in a src/frobdir - kind: tarball directory: src/frobdir url: data:frobs.tgz ref: 9d4b1147f8cf244b0002ba74bfb0b8dfb3... Like Elements, Source types are plugins which are indicated by the ``kind`` attribute. Asides from the common ``kind`` and ``directory`` attributes which may be applied to all Sources, refer to the Source specific documentation for meaningful attributes for the particular Source. Variables ~~~~~~~~~ .. code:: yaml # Override some variables variables: sysconfdir: "%{prefix}/etc" Variables can be declared or overridden from an element. Variables can also be declared and overridden in the :ref:`projectconf` See :ref:`format_variables` below for a more in depth discussion on variables in BuildStream. .. _format_environment: Environment ~~~~~~~~~~~ .. code:: yaml # Tweak the sandbox shell environment environment: LD_LIBRARY_PATH: /some/custom/path Environment variables can be set to literal values here, these environment variables will be effective in the :mod:`Sandbox ` where build instructions are run for this element. Environment variables can also be declared and overridden in the :ref:`projectconf` .. _format_config: Config ~~~~~~ .. code:: yaml # Specify the configuration of the element config: # Override autotools element default configure-commands configure-commands: - "%{configure} --enable-fancy-feature" Here we configure the element itself. The autotools element provides sane defaults for building sources which use autotools. Element default configurations can be overridden in the ``project.conf`` file and additionally overridden in the declaration of an element. For meaningful documentation on what can be specified in the ``config`` section for a given element ``kind``, refer to the :ref:`element specific documentation `. .. _format_public: Public ~~~~~~ .. code:: yaml # Specify public domain data, visible to other elements. public: bst: integration-commands: - /usr/bin/update-fancy-feature-cache Metadata declared in the ``public`` section of an element is visible to any other element which depends on the declaring element in a given pipeline. BuildStream itself consumes public data from the ``bst`` domain. The ``integration-commands`` demonstrated above for example, describe commands which should be run in an environment where the given element is installed but before anything should be run. An element is allowed to read domain data from any element it depends on, and users may specify additional domains to be understood and processed by their own element plugins. The public data keys which are recognized under the ``bst`` domain can be viewed in detail in the :ref:`builtin public data ` section. .. _format_sandbox: Sandbox ~~~~~~~ Configuration for the build sandbox (other than :ref:`environment variables `) can be placed in the ``sandbox`` configuration. At present, only the UID and GID used by the user in the group can be specified. .. code:: yaml # Specify a user id and group id to use in the build sandbox. sandbox: build-uid: 1003 build-gid: 1001 BuildStream normally uses uid 0 and gid 0 (root) to perform all builds. However, the behaviour of certain tools depends on user id, behaving differently when run as non-root. To support those builds, you can supply a different uid or gid for the sandbox. Only bwrap-style sandboxes support custom user IDs at the moment, and hence this will only work on Linux host platforms. .. note:: The ``sandbox`` configuration is available since :ref:`format version 6 ` .. _format_dependencies: Dependencies ------------ The dependency model in BuildStream is simplified by treating software distribution and software building as separate problem spaces. This is to say that one element can only ever depend on another element but never on a subset of the product which another element produces. In this section we'll quickly go over the few features BuildStream offers in its dependency model. Expressing dependencies ~~~~~~~~~~~~~~~~~~~~~~~ Dependencies in BuildStream are parameterizable objects, however as demonstrated in the :ref:`above example `, they can also be expressed as simple strings as a convenience shorthand in most cases, whenever the default dependency attributes are suitable. .. note:: Note the order in which element dependencies are declared in the ``depends``, ``build-depends`` and ``runtime-depends`` lists are not meaningful. Dependency dictionary: .. code:: yaml # Fully specified dependency depends: - filename: foo.bst type: build junction: baseproject.bst strict: false Attributes: * ``filename`` The :ref:`element path ` relative filename of the element to depend on in the project. * ``type`` This attribute is used to express the :ref:`dependency type `. This field is not permitted in :ref:`Build-Depends ` or :ref:`Runtime-Depends `. * ``junction`` This attribute can be used to depend on elements in other projects. If a junction is specified, then it must be an :ref:`element path ` relative filename of the junction element in the project. In the case that a *junction* is specified, the ``filename`` attribute indicates an element in the *junctioned project*. See :mod:`junction `. .. note:: The ``junction`` attribute is available since :ref:`format version 1 ` * ``strict`` This attribute can be used to specify that this element should be rebuilt when the dependency changes, even when :ref:`strict mode ` has been turned off. This is appropriate whenever a dependency's output is consumed verbatim in the output of the depending element, for instance when static linking is in use. Cross-junction dependencies ~~~~~~~~~~~~~~~~~~~~~~~~~~~ As mentioned above, cross-junction dependencies can be specified using the ``junction`` attribute. They can also be expressed as simple strings as a convenience shorthand. You can refer to cross-junction elements using the syntax ``{junction-name}:{element-name}``. For example, the following is logically same as the example above: .. code:: yaml build-depends: - baseproject.bst:foo.bst Similarly, you can also refer to cross-junction elements via the ``filename`` attribute, like so: .. code:: yaml depends: - filename: baseproject.bst:foo.bst type: build .. note:: BuildStream does not allow recursice lookups for junction elements. If a filename contains more than one ``:`` (colon) character, an error will be raised. See :ref:`nested junctions ` for more details on nested junctions. .. note:: This shorthand is available since :ref:`format version 15 ` .. _format_dependencies_types: Dependency types ~~~~~~~~~~~~~~~~ The dependency ``type`` attribute defines what the dependency is required for and is essential to how BuildStream plots a build plan. There are two types which one can specify for a dependency: * ``build`` A ``build`` dependency type states that the given element's product must be staged in order to build the depending element. Depending on an element which has ``build`` dependencies will not implicitly depend on that element's ``build`` dependencies. * ``runtime`` A ``runtime`` dependency type states that the given element's product must be present for the depending element to function. An element's ``runtime`` dependencies need not be staged in order to build the element. If ``type`` is not specified, then it is assumed that the dependency is required both at build time and runtime. .. note:: It is assumed that a dependency which is required for building an element must run while building the depending element. This means that ``build`` depending on a given element implies that that element's ``runtime`` dependencies will also be staged for the purpose of building. .. _format_variables: Using variables --------------- Variables in BuildStream are a way to make your build instructions and element configurations more dynamic. Referring to variables ~~~~~~~~~~~~~~~~~~~~~~ Variables are expressed as ``%{...}``, where ``...`` must contain only alphanumeric characters and the separators ``_`` and ``-``. Further, the first letter of ``...`` must be an alphabetic character. .. code:: yaml This is release version %{version} Declaring and overriding variables ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To declare or override a variable, one need only specify a value in the relevant *variables* section: .. code:: yaml variables: hello: Hello World You can refer to another variable while declaring a variable: .. code:: yaml variables: release-text: This is release version %{version} The order in which you declare variables is arbitrary, so long as there is no cyclic dependency and that all referenced variables are declared, the following is fine: .. code:: yaml variables: release-text: This is release version %{version} version: 5.5 .. note:: It should be noted that variable resolution only happens after all :ref:`Element Composition ` has already taken place. This is to say that overriding ``%{version}`` at a higher priority will effect the final result of ``%{release-text}``. **Example:** .. code:: yaml kind: autotools # Declare variable, expect %{version} was already declared variables: release-text: This is release version %{version} config: # Customize the installation install-commands: - | %{make-install} RELEASE_TEXT="%{release-text}" Variables declared by BuildStream ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BuildStream declares a set of :ref:`builtin ` variables that may be overridden. In addition, the following read-only variables are also dynamically declared by BuildStream: * ``element-name`` The name of the element being processed (e.g base/alpine.bst). * ``project-name`` The name of project where BuildStream is being used. * ``max-jobs`` Maximum number of parallel build processes within a given build, support for this is conditional on the element type and the build system used (any element using 'make' can implement this). buildstream-1.6.9/doc/source/format_intro.rst000066400000000000000000000237661437515270000213660ustar00rootroot00000000000000 Introduction ============ At the core of BuildStream is a data model of :mod:`Elements ` which are parsed from ``.bst`` files in a project directory and configured from a few different sources. When BuildStream loads your project, various levels of composition occur, allowing configuration on various levels with different priority. This page provides an introduction to the project directory structure, explains the basic *directives* supported inherently throughout the format, and outlines how composition occurs and what configurations are considered in which order. The meaning of the various constructs expressed in the BuildStream format are covered in other sections of the documentation. .. _format_structure: Directory structure ------------------- A BuildStream project is a directory consisting of: * A project configuration file * BuildStream element files * Optional user defined plugins * An optional project.refs file A typical project structure may look like this:: myproject/project.conf myproject/project.refs myproject/elements/element1.bst myproject/elements/element2.bst myproject/elements/... myproject/plugins/customelement.py myproject/plugins/customelement.yaml myproject/plugins/... Except for the project configuration file, the user is allowed to structure their project directory in any way. For documentation on the format of the project configuration file, refer to the :ref:`projectconf` documentation. Simpler projects may choose to place all element definition files at the root of the project directory while more complex projects may decide to put stacks in one directory and other floating elements into other directories, perhaps placing deployment elements in another directory, this is all fine. The important part to remember is that when you declare dependency relationships, a project relative path to the element one depends on must be provided. .. _format_composition: Composition ----------- Below are the various sources of configuration which go into an element or source in the order in which they are applied. Configurations which are applied later have a higher priority and override configurations which precede them. 1. Builtin defaults ~~~~~~~~~~~~~~~~~~~ The :ref:`builtin defaults ` provide a set of builtin default default values for ``project.conf``. The project wide defaults defined in the builtin project configuration, such as the *variables* or *environment* sections, form the base configuration of all elements. 2. Project configuration ~~~~~~~~~~~~~~~~~~~~~~~~ The :ref:`project wide defaults ` specified in your ``project.conf`` are now applied on top of builtin defaults. Defaults such as the :ref:`variables ` or :ref:`environment ` which are specified in your ``project.conf`` override the builtin defaults for elements. Note that :ref:`plugin type specific configuration ` in ``project.conf`` is not applied until later. 3. Plugin defaults ~~~~~~~~~~~~~~~~~~ Elements and Sources are all implemented as plugins. Each Element plugin installs a ``.yaml`` file along side their plugin to define the default *variables*, *environment* and *config*. The *config* is element specific and as such this is the first place where defaults can be set on the *config* section. The *variables* and *environment* specified in the declaring plugin's defaults here override the project configuration defaults for the given element ``kind``. Source plugins do not have a ``.yaml`` file, and do not have *variables* or *environment*. 4. Project configuration overrides ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``project.conf`` now gives you :ref:`another opportunity ` to override configuration on a per plugin basis. Configurations specified in the :ref:`elements ` or :ref:`sources ` sections of the ``project.conf`` will override the given plugin's defaults. In this phase, it is possible to override any configurations of a given plugin, including configuration in element specific *config* sections. See also :ref:`project_overrides` 5. Plugin declarations ~~~~~~~~~~~~~~~~~~~~~~~ Finally, after having resolved any :ref:`conditionals ` in the parsing phase of loading element declarations; the configurations specified in a ``.bst`` file have the last word on any configuration in the data model. .. _format_directives: Directives ---------- .. _format_directives_conditional: (?) Conditionals ~~~~~~~~~~~~~~~~ The ``(?)`` directive allows expression of conditional statements which test :ref:`project option ` values. The ``(?)`` directive may appear as a key in any dictionary expressed in YAML, and its value is a list of conditional expressions. Each conditional expression must be a single key dictionary, where the key is the conditional expression itself, and the value is a dictionary to be composited into the parent dictionary containing the ``(?)`` directive if the expression evaluates to a truthy value. **Example:** .. code:: yaml variables: prefix: "/usr" enable-debug: False (?): - relocate == True: prefix: "/opt" - debug == True: enable-debug: True Expressions are evaluated in the specified order, and each time an expression evaluates to a truthy value, its value will be composited to the parent dictionary in advance of processing other elements, allowing for logically overriding previous decisions in the condition list. Nesting of conditional statements is also supported. **Example:** .. code:: yaml variables: enable-logging: False enable-debug: False (?): - logging == True: enable-logging: True (?): - debugging == True: enable-debug: True Conditionals are expressed in a pythonic syntax, the specifics for testing the individually supported option types are described in their :ref:`respective documentation `. Compound conditionals are also allowed. **Example:** .. code:: yaml variables: enable-debug: False (?): - (logging == True and debugging == True): enable-debug: True .. _format_directives_assertion: (!) Assertions ~~~~~~~~~~~~~~ Assertions allow the project author to abort processing and present a custom error message to the user building their project. This is only useful when used with conditionals, allowing the project author to assert some invalid configurations. **Example:** .. code:: yaml variables: (?): - (logging == False and debugging == True): (!): | Impossible to print any debugging information when logging is disabled. .. _format_directives_list_prepend: (<) List Prepend ~~~~~~~~~~~~~~~~ Indicates that the list should be prepended to the target list, instead of the default behavior which is to replace the target list. **Example:** .. code:: yaml config: configure-commands: # Before configuring, lets make sure we're using # the latest config.sub & config.guess (<): - cp %{datadir}/automake-*/config.{sub,guess} . .. _format_directives_list_append: (>) List Append ~~~~~~~~~~~~~~~ Indicates that the list should be appended to the target list, instead of the default behavior which is to replace the target list. **Example:** .. code:: yaml public: bst: split-rules: devel: # This element also adds some extra stubs which # need to be included in the devel domain (>): - "%{libdir}/*.stub" .. _format_directives_list_overwrite: (=) List Overwrite ~~~~~~~~~~~~~~~~~~ Indicates that the list should be overwritten completely. This exists mostly for completeness, and we recommend using literal lists most of the time instead of list overwrite directives when the intent is to overwrite a list. This has the same behavior as a literal list, except that an error will be triggered in the case that there is no underlying list to overwrite; whereas a literal list will simply create a new list. The added error protection can be useful when intentionally overwriting a list in an element's *public data*, which is mostly free form and not validated. **Example:** .. code:: yaml config: install-commands: # This element's `make install` is broken, replace it. (=): - cp src/program %{bindir} (@) Include ~~~~~~~~~~~ Indicates that content should be loaded from files. This include directive expects a string, or a list of strings when including multiple files. Each of these strings represent a project relative filename to include. Files can be included from subprojects by prefixing the string with the locally defined :mod:`junction element ` and colon (':'). The include directive can be used in any dictionary declared in the :ref:`project.conf `, in any :ref:`.bst file `, or recursively included in a another include file. The including YAML fragment has priority over the files it includes, and overrides any values introduced by the includes. When including multiple files, files are included in the order they are declared in the include list, and each subsequent include file takes priority over the previous one. .. important:: Cross junction include files are not processed when loading :mod:`junction elements `. Variables, :ref:`element overrides `, :ref:`source overrides ` and :ref:`mirrors ` used in the declaration of a junction must be declared in the :ref:`project.conf ` or in included files which are local to the project declaring the junction itself. :mod:`Junction elements ` cannot use include directives. **Example:** .. code:: yaml elements: (@): junction.bst:includes/element-overrides.bst .. note:: The include directive is available since :ref:`format version 12 ` buildstream-1.6.9/doc/source/format_project.rst000066400000000000000000000514551437515270000216750ustar00rootroot00000000000000 .. _projectconf: Project configuration ===================== The project configuration file should be named ``project.conf`` and be located at the project root. It holds information such as Source aliases relevant for the sources used in the given project as well as overrides for the configuration of element types used in the project. Values specified in the project configuration override any of the default BuildStream project configuration, which is included :ref:`here ` for reference. .. _project_essentials: Essentials ---------- .. _project_format_name: Project name ~~~~~~~~~~~~ The project name is a unique symbol for your project and will be used to distinguish your project from others in user preferences, namspaceing of your project's artifacts in shared artifact caches, and in any case where BuildStream needs to distinguish between multiple projects. The first thing to setup in your ``project.conf`` should be the name of your project. .. code:: yaml name: my-project-name .. note:: The project name may contain alphanumeric characters, dashes and underscores, and may not start with a leading digit. .. _project_format_version: Format version ~~~~~~~~~~~~~~ The BuildStream format is guaranteed to be backwards compatible with any earlier releases. The project's minimum required format version of BuildStream can be specified in ``project.conf`` with the ``format-version`` field, e.g.: .. code:: yaml # The minimum base BuildStream format format-version: 0 BuildStream will increment it's core YAML format version at least once in any given minor point release where the format has been extended to support a new feature. .. note:: External :mod:`Element ` and :mod:`Source ` plugins also implement their own YAML configuration fragments and as such are revisioned separately from the core format. See :ref:`project_plugins` for details on specifying a minimum version of a specific plugin. Core :mod:`Elements ` and :mod:`Sources ` which are maintained and distributed as a part of BuildStream are revisioned under the same global ``format-version`` described here. .. _project_element_path: Element path ~~~~~~~~~~~~ To allow the user to structure their project nicely, BuildStream allows the user to specify a project subdirectory where element ``.bst`` files are stored. .. code:: yaml element-path: elements Note that elements are referred to by their relative paths, whenever elements are referred to in a ``.bst`` file or on the command line. .. _project_format_ref_storage: Ref storage ~~~~~~~~~~~ By default, BuildStream expects to read and write source references directly in the :ref:`source declaration `, but this can be inconvenient and prohibitive in some workflows. Alternatively, BuildStream allows source references to be stored centrally in a :ref:`project.refs file ` in the toplevel :ref:`project directory `. This can be controlled with the ``ref-storage`` option, which is allowed to be configured with the following values: * ``inline`` Source references are stored directly in the :ref:`source declaration ` * ``project.refs`` Source references are stored in the ``project.refs`` file, and junction source references are stored in the ``junction.refs`` file. To enable storing of source references in ``project.refs``, add the following to your ``project.conf``: .. code:: yaml ref-storage: project.refs .. note:: The ``ref-storage`` configuration is available since :ref:`format version 8 ` .. _configurable_warnings: Configurable Warnings ~~~~~~~~~~~~~~~~~~~~~ Warnings can be configured as fatal using the ``fatal-warnings`` configuration item. When a warning is configured as fatal, where a warning would usually be thrown instead an error will be thrown causing the build to fail. Individual warnings can be configured as fatal by setting ``fatal-warnings`` to a list of warnings. .. code:: yaml fatal-warnings: - overlaps - ref-not-in-track - : BuildStream provides a collection of :class:`Core Warnings ` which may be raised by a variety of plugins. Other configurable warnings are plugin specific and should be noted within their individual documentation. .. note:: The ``fatal-warnings`` configuration is available since :ref:`format version 16 ` .. _project_source_aliases: Source aliases ~~~~~~~~~~~~~~ In order to abstract the download location of source code and any assets which need to be downloaded, and also as a matter of convenience, BuildStream allows one to create named aliases for URLs which are to be used in the individual ``.bst`` files. .. code:: yaml aliases: foo: git://git.foo.org/ bar: http://bar.com/downloads/ Sandbox options ~~~~~~~~~~~~~~~ Sandbox options for the whole project can be supplied in ``project.conf`` in the same way as in an element. See :ref:`element configuration ` for more detail. .. code:: yaml # Specify a user id and group id to use in the build sandbox. sandbox: build-uid: 1003 build-gid: 1001 .. note:: The ``sandbox`` configuration is available since :ref:`format version 6 ` .. _project_essentials_artifacts: Artifact server ~~~~~~~~~~~~~~~ If you have setup an :ref:`artifact server ` for your project then it is convenient to configure this in your ``project.conf`` so that users need not have any additional configuration to communicate with an artifact share. .. code:: yaml artifacts: # A url from which to download prebuilt artifacts url: https://foo.com/artifacts You can also specify a list of caches here; earlier entries in the list will have higher priority than later ones. .. _project_essentials_mirrors: Mirrors ~~~~~~~ A list of mirrors can be defined that couple a location to a mapping of aliases to a list of URIs, e.g. .. code:: yaml mirrors: - name: middle-earth aliases: foo: - http://www.middle-earth.com/foo/1 - http://www.middle-earth.com/foo/2 bar: - http://www.middle-earth.com/bar/1 - http://www.middle-earth.com/bar/2 - name: oz aliases: foo: - http://www.oz.com/foo bar: - http://www.oz.com/bar The order that the mirrors (and the URIs therein) are consulted is in the order they are defined when fetching, and in reverse-order when tracking. A default mirror to consult first can be defined via :ref:`user config `, or the command-line argument :ref:`--default-mirror `. .. note:: The ``mirrors`` field is available since :ref:`format version 11 ` .. _project_plugins: External plugins ---------------- If your project makes use of any custom :mod:`Element ` or :mod:`Source ` plugins, then the project must inform BuildStream of the plugins it means to make use of and the origin from which they can be loaded. Note that plugins with the same name from different origins are not permitted. Local plugins ~~~~~~~~~~~~~ Local plugins are expected to be found in a subdirectory of the actual BuildStream project. :mod:`Element ` and :mod:`Source ` plugins should be stored in separate directories to avoid namespace collisions. The versions of local plugins are largely immaterial since they are revisioned along with the project by the user, usually in a VCS like git. However, for the sake of consistency with other plugin loading origins we require that you specify a version, this can always be ``0`` for a local plugin. .. code:: yaml plugins: - origin: local path: plugins/sources # We want to use the `mysource` source plugin located in our # project's `plugins/sources` subdirectory. sources: mysource: 0 Pip plugins ~~~~~~~~~~~ Plugins loaded from the ``pip`` origin are expected to be installed separately on the host operating system using python's package management system. .. code:: yaml plugins: - origin: pip # Specify the name of the python package containing # the plugins we want to load. The name one would use # on the `pip install` command line. # package-name: potato # We again must specify a minimal format version for the # external plugin, it is allowed to be `0`. # elements: potato: 0 .. _project_options: Options ------- Options are how BuildStream projects can define parameters which can be configured by users invoking BuildStream to build your project. Options are declared in the ``project.conf`` in the main ``options`` dictionary. .. code:: yaml options: debug: type: bool description: Whether to enable debugging default: False Users can configure those options when invoking BuildStream with the ``--option`` argument:: $ bst --option debug True ... .. note:: The name of the option may contain alphanumeric characters underscores, and may not start with a leading digit. Common properties ~~~~~~~~~~~~~~~~~ All option types accept the following common attributes * ``type`` Indicates the type of option to declare * ``description`` A description of the meaning of the option * ``variable`` Optionally indicate a :ref:`variable ` name to export the option to. A string form of the selected option will be used to set the exported value. If used, this value will override any existing value for the variable declared in ``project.conf``, and will be overridden in the regular :ref:`composition order `. .. note:: The name of the variable to export may contain alphanumeric characters, dashes, underscores, and may not start with a leading digit. .. _project_options_bool: Boolean ~~~~~~~ The ``bool`` option type allows specifying boolean values which can be cased in conditional expressions. **Declaring** .. code:: yaml options: debug: type: bool description: Whether to enable debugging default: False **Evaluating** Boolean options can be tested in expressions with equality tests: .. code:: yaml variables: enable-debug: False (?): - debug == True: enable-debug: True Or simply treated as truthy values: .. code:: yaml variables: enable-debug: False (?): - debug: enable-debug: True **Exporting** When exporting boolean options as variables, a ``True`` option value will be exported as ``1`` and a ``False`` option as ``0`` .. _project_options_enum: Enumeration ~~~~~~~~~~~ The ``enum`` option type allows specifying a string value with a restricted set of possible values. **Declaring** .. code:: yaml options: loglevel: type: enum description: The logging level values: - debug - info - warning default: info **Evaluating** Enumeration options must be tested as strings in conditional expressions: .. code:: yaml variables: enable-debug: False (?): - loglevel == "debug": enable-debug: True **Exporting** When exporting enumeration options as variables, the value is exported as a variable directly, as it is a simple string. .. _project_options_flags: Flags ~~~~~ The ``flags`` option type allows specifying a list of string values with a restricted set of possible values. In contrast with the ``enum`` option type, the *default* value need not be specified and will default to an empty set. **Declaring** .. code:: yaml options: logmask: type: flags description: The logging mask values: - debug - info - warning default: - info **Evaluating** Options of type ``flags`` can be tested in conditional expressions using a pythonic *in* syntax to test if an element is present in a set: .. code:: yaml variables: enable-debug: False (?): - ("debug" in logmask): enable-debug: True **Exporting** When exporting flags options as variables, the value is exported as a comma separated list of selected value strings. .. _project_options_arch: Architecture ~~~~~~~~~~~~ The ``arch`` option type is special enumeration option which defaults to the result of `uname -m`, and does not support assigning any default in the project configuration. .. code:: yaml options: machine_arch: type: arch description: The machine architecture values: - arm - aarch64 - i386 - x86_64 Architecture options can be tested with the same expressions as other Enumeration options. .. _project_options_element_mask: Element mask ~~~~~~~~~~~~ The ``element-mask`` option type is a special Flags option which automatically allows only element names as values. .. code:: yaml options: debug_elements: type: element-mask description: The elements to build in debug mode This can be convenient for automatically declaring an option which might apply to any element, and can be tested with the same syntax as other Flag options. .. code:: yaml variables: enable-debug: False (?): - ("element.bst" in debug_elements): enable-debug: True .. _project_defaults: Element default configuration ----------------------------- The ``project.conf`` plays a role in defining elements by providing default values and also by overriding values declared by plugins on a plugin wide basis. See the :ref:`composition ` documentation for more detail on how elements are composed. .. _project_defaults_variables: Variables ~~~~~~~~~ The defaults for :ref:`Variables ` used in your project is defined here. .. code:: yaml variables: prefix: "/usr" .. _project_defaults_environment: Environment ~~~~~~~~~~~ The defaults environment for the build sandbox is defined here. .. code:: yaml environment: PATH: /usr/bin:/bin:/usr/sbin:/sbin Additionally, the special ``environment-nocache`` list which specifies which environment variables do not effect build output, and are thus not considered in the calculation of artifact keys can be defined here. .. code:: yaml environment-nocache: - MAXJOBS Note that the ``environment-nocache`` list only exists so that we can control parameters such as ``make -j ${MAXJOBS}``, allowing us to control the number of jobs for a given build without effecting the resulting cache key. Split rules ~~~~~~~~~~~ The project wide :ref:`split rules ` defaults can be specified here. .. code:: yaml split-rules: devel: - | %{includedir} - | %{includedir}/** - | %{libdir}/lib*.a - | %{libdir}/lib*.la .. _project_overrides: Overriding plugin defaults -------------------------- Base attributes declared by element and source plugins can be overridden on a project wide basis. This section explains how to make project wide statements which augment the configuration of an element or source plugin. .. _project_element_overrides: Element overrides ~~~~~~~~~~~~~~~~~ The elements dictionary can be used to override variables, environments or plugin specific configuration data as shown below. .. code:: yaml elements: # Override default values for all autotools elements autotools: variables: bindir: "%{prefix}/bin" config: configure-commands: ... environment: PKG_CONFIG_PATH=%{libdir}/pkgconfig .. _project_source_overrides: Source overrides ~~~~~~~~~~~~~~~~ The sources dictionary can be used to override source plugin specific configuration data as shown below. .. code:: yaml sources: # Override default values for all git sources git: config: checkout-submodules: False .. note:: The ``sources`` override is available since :ref:`format version 1 ` .. _project_shell: Customizing the shell --------------------- Since BuildStream cannot know intimate details about your host or about the nature of the runtime and software that you are building, the shell environment for debugging and testing applications may need some help. The ``shell`` section allows some customization of the shell environment. .. note:: The ``shell`` section is available since :ref:`format version 1 ` Interactive shell command ~~~~~~~~~~~~~~~~~~~~~~~~~ By default, BuildStream will use ``sh -i`` when running an interactive shell, unless a specific command is given to the ``bst shell`` command. BuildStream will automatically set a convenient prompt via the ``PS1`` environment variable for interactive shells; which might be overwritten depending on the shell you use in your runtime. If you are using ``bash``, we recommend the following configuration to ensure that the customized prompt is not overwritten: .. code:: yaml shell: # Specify the command to run by default for interactive shells command: [ 'bash', '--noprofile', '--norc', '-i' ] Environment assignments ~~~~~~~~~~~~~~~~~~~~~~~ In order to cooperate with your host environment, a debugging shell sometimes needs to be configured with some extra knowledge inheriting from your host environment. This can be achieved by setting up the shell ``environment`` configuration, which is expressed as a dictionary very similar to the :ref:`default environment `, except that it supports host side environment variable expansion in values. .. note:: The ``environment`` configuration is available since :ref:`format version 4 ` For example, to share your host ``DISPLAY`` and ``DBUS_SESSION_BUS_ADDRESS`` environments with debugging shells for your project, specify the following: .. code:: yaml shell: # Share some environment variables from the host environment environment: DISPLAY: '$DISPLAY' DBUS_SESSION_BUS_ADDRESS: '$DBUS_SESSION_BUS_ADDRESS' Or, a more complex example is how one might share the host pulseaudio server with a ``bst shell`` environment: .. code:: yaml shell: # Set some environment variables explicitly environment: PULSE_SERVER: 'unix:${XDG_RUNTIME_DIR}/pulse/native' Host files ~~~~~~~~~~ It can be useful to share some files on the host with a shell so that it can integrate better with the host environment. The ``host-files`` configuration allows one to specify files and directories on the host to be bind mounted into the sandbox. .. note:: The ``host-files`` configuration is available since :ref:`format version 4 ` .. warning:: One should never mount directories where one expects to find data and files which belong to the user, such as ``/home`` on POSIX platforms. This is because the unsuspecting user may corrupt their own files accidentally as a result. Instead users can use the ``--mount`` option of ``bst shell`` to mount data into the shell. The ``host-files`` configuration is an ordered list of *mount specifications*. Members of the list can be *fully specified* as a dictionary, or a simple string can be used if only the defaults are required. The fully specified dictionary has the following members: * ``path`` The path inside the sandbox. This is the only mandatory member of the mount specification. * ``host_path`` The host path to mount at ``path`` in the sandbox. This will default to ``path`` if left unspecified. * ``optional`` Whether the mount should be considered optional. This is ``False`` by default. Here is an example of a *fully specified mount specification*: .. code:: yaml shell: # Mount an arbitrary resolv.conf from the host to # /etc/resolv.conf in the sandbox, and avoid any # warnings if the host resolv.conf doesnt exist. host-files: - host_path: '/usr/local/work/etc/resolv.conf' path: '/etc/resolv.conf' optional: True Here is an example of using *shorthand mount specifications*: .. code:: yaml shell: # Specify a list of files to mount in the sandbox # directory from the host. # # If these do not exist on the host, a warning will # be issued but the shell will still be launched. host-files: - '/etc/passwd' - '/etc/group' - '/etc/resolv.conf' Host side environment variable expansion is also supported: .. code:: yaml shell: # Mount a host side pulseaudio server socket into # the shell environment at the same location. host-files: - '${XDG_RUNTIME_DIR}/pulse/native' .. _project_builtin_defaults: Builtin defaults ---------------- BuildStream defines some default values for convenience, the default values overridden by your project's ``project.conf`` are presented here: .. literalinclude:: ../../buildstream/data/projectconfig.yaml :language: yaml buildstream-1.6.9/doc/source/format_project_refs.rst000066400000000000000000000052431437515270000227060ustar00rootroot00000000000000 .. _projectrefs: The project.refs file ===================== If one has elected to store source references in a single ``project.refs`` file, then it will be stored at the toplevel of your project directory adjacent to ``project.conf``. This can be configured in your project using the :ref:`ref-storage configuration ` Sources for :mod:`junction ` elements are stored separately in an adjacent ``junction.refs`` file of the same format. .. _projectrefs_basics: Basic behavior -------------- When a ``project.refs`` file is in use, any source references found in the :ref:`inline source declarations ` are considered invalid and will be ignored, and a warning will be emitted for them. When ``bst track`` is run for your project, the ``project.refs`` file will be updated instead of the inline source declarations. In the absence of a ``project.refs`` file, ``bst track`` will create one automatically with the tracking results. An interesting property of ``project.refs`` is that it allows for *cross junction tracking*. This is to say that it is possible to override the *ref* of a given source in a project that your project depends on via a :mod:`junction `, without actually modifying the junctioned project. .. _projectrefs_format: Format ------ The ``project.refs`` uses the same YAML format used throughout BuildStream, and supports the same :ref:`directives ` which apply to ``project.conf`` and element declaration files (i.e. *element.bst* files). The ``project.refs`` file format itself is very simple, it contains a single ``projects`` key at the toplevel, which is a dictionary of :ref:`project names `. Each *project name* is a dictionary of *element names*, and each *element name* holds a list of dictionaries corresponding to the element's :ref:`sources `. **Example** .. code:: yaml # Main toplevel "projects" key projects: # The local project's name is "core" core: # A dictionary of element names base/automake.bst: # A list of sources corresponding to the element # in the same order in which they were declared. # # The values of this list are dictionaries of the # symbolic "ref" portion understood by the given # source plugin implementation. # - ref: af6ba39142220687c500f79b4aa2f181d9b24e4... # The "core" project depends on the "bootstrap" project, # here we are allowed to override the refs for the projects # we depend on through junctions. bootstrap: zlib.bst: - ref: 4ff941449631ace0d4d203e3483be9dbc9da4540... buildstream-1.6.9/doc/source/format_public.rst000066400000000000000000000056221437515270000215000ustar00rootroot00000000000000 .. _public_builtin: Builtin public data =================== Elements can provide public data which can be read by other elements later in the pipeline, the format for exposing public data on a given element is :ref:`described here `. Any element may use public data for whatever purpose it wants, but BuildStream has some built-in expectations of public data, which resides completely in the ``bst`` domain. In this section we will describe the public data in the ``bst`` domain. .. _public_integration: Integration commands -------------------- .. code:: yaml # Specify some integration commands public: bst: integration-commands: - /usr/bin/update-fancy-feature-cache The built-in ``integration-commands`` list indicates that depending elements should run this set of commands before expecting the staged runtime environment to be functional. Typical cases for this include running ``ldconfig`` at the base of a pipeline, or running commands to update various system caches. Integration commands of a given element are automatically run by the :func:`Element.integrate() ` method and are used by various plugins. Notably the :mod:`BuildElement ` derived classes will always integrate the build dependencies after staging and before running any build commands. .. _public_split_rules: Split rules ----------- .. code:: yaml # Specify some split rules public: bst: split-rules: runtime: - | %{bindir}/* - | %{sbindir}/* - | %{libexecdir}/* - | %{libdir}/lib*.so* Split rules indicate how the output of an element can be categorized into *domains*. The ``split-rules`` domains are used by the :func:`Element.stage_artifact() ` method when deciding what domains of an artifact should be staged. The strings listed in each domain are first substituted with the :ref:`variables ` in context of the given element, and then applied as a glob style match, as understood by :func:`utils.glob() ` This is used for creating compositions with the :mod:`compose ` element and can be used by other deployment related elements for the purpose of splitting element artifacts into separate packages. .. _public_overlap_whitelist: Overlap whitelist ----------------- The overlap whitelist indicates which files this element is allowed to overlap over other elements when staged together with other elements. Each item in the overlap whitelist has substitutions applied from :ref:`variables `, and is then applied as a glob-style match (i.e. :func:`utils.glob() `). .. code:: yaml public: bst: overlap-whitelist: - | %{sysconfdir}/* - | /etc/fontcache buildstream-1.6.9/doc/source/index.rst000066400000000000000000000016771437515270000177670ustar00rootroot00000000000000.. BuildStream documentation master file, created by sphinx-quickstart on Mon Nov 7 21:03:37 2016. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. BuildStream documentation ========================= These docs cover everything you need to build and integrate software stacks using BuildStream. They begin with a basic introduction to BuildStream, background information on basic concepts, and a guide to the BuildStream command line interface. Later sections provide detailed information on BuildStream internals. .. toctree:: :maxdepth: 1 main_about main_install main_using main_core HACKING Resources --------- * Github repository: https://github.com/apache/buildstream * Bug Tracking: https://github.com/apache/buildstream/issues * Mailing list: https://lists.apache.org/list.html?dev@buildstream.apache.org * IRC Channel: irc://irc.gnome.org/#buildstream buildstream-1.6.9/doc/source/install_artifacts.rst000066400000000000000000000134641437515270000223630ustar00rootroot00000000000000 .. _artifacts: Installing an artifact server ============================= BuildStream caches the results of builds in a local artifact cache, and will avoid building an element if there is a suitable build already present in the local artifact cache. In addition to the local artifact cache, you can configure one or more remote artifact caches and BuildStream will then try to pull a suitable build from one of the remotes, falling back to a local build if needed. Configuring BuildStream to use remote caches -------------------------------------------- A project will often set up continuous build infrastructure that pushes built artifacts to a shared cache, so developers working on the project can make use of these pre-built artifacts instead of having to each build the whole project locally. The project can declare this cache in its :ref:`project configuration file `. Users can declare additional remote caches in the :ref:`user configuration `. There are several use cases for this: your project may not define its own cache, it may be useful to have a local mirror of its cache, or you may have a reason to share artifacts privately. Remote artifact caches are identified by their URL. There are currently two supported protocols: * ``http``: Pull and push access, without transport-layer security * ``https``: Pull and push access, with transport-layer security BuildStream allows you to configure as many caches as you like, and will query them in a specific order: 1. Project-specific overrides in the user config 2. Project configuration 3. User configuration When an artifact is built locally, BuildStream will try to push it to all the caches which have the ``push: true`` flag set. You can also manually push artifacts to a specific cache using the :ref:`bst pull command `. Artifacts are identified using the element's :ref:`cache key ` so the builds provided by a cache should be interchangable with those provided by any other cache. Setting up a remote artifact cache ---------------------------------- The rest of this page outlines how to set up a shared artifact cache. Setting up the user ~~~~~~~~~~~~~~~~~~~ A specific user is not needed, however, a dedicated user to own the artifact cache is recommended. .. code:: bash useradd artifacts The recommended approach is to run two instances on different ports. One instance has push disabled and doesn't require client authentication. The other instance has push enabled and requires client authentication. Alternatively, you can set up a reverse proxy and handle authentication and authorization there. Installing the server ~~~~~~~~~~~~~~~~~~~~~ You will also need to install BuildStream on the artifact server in order to receive uploaded artifacts over ssh. Follow the instructions for installing BuildStream :ref:`here ` When installing BuildStream on the artifact server, it must be installed in a system wide location, with ``pip3 install .`` in the BuildStream checkout directory. Otherwise, some tinkering is required to ensure BuildStream is available in ``PATH`` when it's companion ``bst-artifact-server`` program is run remotely. You can install only the artifact server companion program without requiring BuildStream's more exigent dependencies by setting the ``BST_ARTIFACTS_ONLY`` environment variable at install time, like so: .. code:: BST_ARTIFACTS_ONLY=1 pip3 install . Command reference ~~~~~~~~~~~~~~~~~ .. click:: buildstream._artifactcache.casserver:server_main :prog: bst-artifact-server Key pair for the server ~~~~~~~~~~~~~~~~~~~~~~~ For TLS you need a key pair for the server. The following example creates a self-signed key, which requires clients to have a copy of the server certificate (e.g., in the project directory). You can also use a key pair obtained from a trusted certificate authority instead. .. code:: bash openssl req -new -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -batch -subj "/CN=artifacts.com" -out server.crt -keyout server.key Authenticating users ~~~~~~~~~~~~~~~~~~~~ In order to give permission to a given user to upload artifacts, create a TLS key pair on the client. .. code:: bash openssl req -new -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -batch -subj "/CN=client" -out client.crt -keyout client.key Copy the public client certificate ``client.crt`` to the server and then add it to the authorized keys, like so: .. code:: bash cat client.crt >> /home/artifacts/authorized.crt Serve the cache over https ~~~~~~~~~~~~~~~~~~~~~~~~~~ Public instance without push: .. code:: bash bst-artifact-server --port 11001 --server-key server.key --server-cert server.crt /home/artifacts/artifacts Instance with push and requiring client authentication: .. code:: bash bst-artifact-server --port 11002 --server-key server.key --server-cert server.crt --client-certs authorized.crt --enable-push /home/artifacts/artifacts User configuration ~~~~~~~~~~~~~~~~~~ The user configuration for artifacts is documented with the rest of the :ref:`user configuration documentation `. Assuming you have the same setup used in this document, and that your host is reachable on the internet as ``artifacts.com`` (for example), then a user can use the following user configuration: Pull-only: .. code:: yaml # # Artifacts # artifacts: url: https://artifacts.com:11001 # Optional server certificate if not trusted by system root certificates server-cert: server.crt Pull and push: .. code:: yaml # # Artifacts # artifacts: url: https://artifacts.com:11002 # Optional server certificate if not trusted by system root certificates server-cert: server.crt # Optional client key pair for authentication client-key: client.key client-cert: client.crt push: true buildstream-1.6.9/doc/source/install_docker.rst000066400000000000000000000035071437515270000216470ustar00rootroot00000000000000 .. _docker: BuildStream inside Docker ------------------------- If your system cannot provide the base system requirements for BuildStream, then it is possible to run buildstream within a Docker image. The BuildStream project provides `Docker images `_ containing BuildStream and its dependencies. This gives you an easy way to get started using BuildStream on any Unix-like platform where Docker is available, including Mac OS X. We recommend using the `bst-here wrapper script `_ which automates the necessary container setup. You can download it and make it executable like this: .. code:: bash mkdir -p ~/.local/bin curl --get https://gitlab.com/BuildStream/buildstream/raw/master/contrib/bst-here > ~/.local/bin/bst-here chmod +x ~/.local/bin/bst-here Check if ``~/.local/bin`` appears in your PATH environment variable -- if it doesn't, you should `edit your ~/.profile so that it does `_. Once the script is available in your PATH, you can run ``bst-here`` to open a shell session inside a new container based off the latest version of the buildstream Docker image. The current working directory will be mounted inside the container at ``/src``. You can also run individual BuildStream commands as ``bst-here COMMAND``. For example: ``bst-here show systems/my-system.bst``. Note that BuildStream won't be able to integrate with Bash tab-completion if you invoke it in this way. Two Docker volumes are set up by the ``bst-here`` script: * buildstream-cache -- mounted at ``~/.cache/buildstream`` * buildstream-config -- mounted at ``~/.config/`` These are necessary so that your BuildStream cache and configuration files persist between invocations of ``bst-here``. buildstream-1.6.9/doc/source/install_linux_distro.rst000066400000000000000000000133561437515270000231260ustar00rootroot00000000000000 .. _install: Installing BuildStream on a Linux distro ======================================== Installing from distro packages ------------------------------- Arch Linux ~~~~~~~~~~ Packages for Arch exist in `AUR `_. Two different package versions are available: - BuildStream latest release: `buildstream `_ - BuildStream latest development snapshot: `buildstream-git `_ The external plugins are available as well: - BuildStream-external plugins latest release: `bst-external` https://aur.archlinux.org/packages/bst-external>`_ Fedora ~~~~~~ BuildStream is in the official Fedora repositories:: sudo dnf install buildstream Optionally, install the `buildstream-docs` package to have the BuildStream documentation in Devhelp or GNOME Builder. Installing from source ---------------------- Until BuildStream is available in your distro, you will need to install it yourself from the `git repository `_ using python's ``pip`` package manager. BuildStream requires the following base system requirements: * python3 >= 3.5 * libostree >= v2017.8 with introspection data * bubblewrap >= 0.1.2 * fuse2 * psutil python library (so you don't have to install GCC and python-devel to build it yourself) BuildStream also depends on the host tools for the :mod:`Source ` plugins. Refer to the respective :ref:`source plugin ` documentation for host tool requirements of specific plugins. If you intend to push built artifacts to a remote artifact server, which requires special permissions, you will also need: * ssh For the purpose of installing BuildStream while there are no distro packages, you will additionally need: * pip for python3 (only required for setup) * Python 3 development libraries and headers * git (to checkout buildstream) Installing dependencies ~~~~~~~~~~~~~~~~~~~~~~~ Arch Linux ++++++++++ Install the dependencies with:: sudo pacman -S fuse2 ostree bubblewrap git \ python python-pip python-gobject python-psutil lzip Debian ++++++ Stretch ^^^^^^^ With stretch, you first need to ensure that you have the backports repository setup as described `here `__ By adding the following line to your sources.list:: deb http://ftp.debian.org/debian stretch-backports main And then running:: sudo apt-get update At this point you should be able to get the system requirements with:: sudo apt-get install \ fuse ostree gir1.2-ostree-1.0 bubblewrap git \ python3 python3-pip python3-gi python3-psutil lzip sudo apt-get install -t stretch-backports \ gir1.2-ostree-1.0 ostree Buster and newer ^^^^^^^^^^^^^^^^ The following line should be enough to get the base system requirements installed:: sudo apt-get install \ fuse ostree gir1.2-ostree-1.0 bubblewrap git \ python3 python3-pip python3-gi python3-psutil lzip Fedora ++++++ For recent fedora systems, the following line should get you the system requirements you need:: dnf install -y fuse ostree bubblewrap git \ python3 python3-pip python3-gobject python3-psutil lzip Installing ~~~~~~~~~~ Once you have the base system dependencies, you can clone the BuildStream git repository and install it as a regular user:: git clone https://github.com/apache/buildstream.git cd buildstream git checkout pip3 install --user -e . This will install buildstream's pure python dependencies into your user's homedir in ``~/.local`` and will run BuildStream directly from the git checkout directory. Keep following the instructions below to ensure that the ``bst`` command is in your ``PATH`` and to enable bash completions for it. .. note:: We recommend the ``-e`` option because you can upgrade your installation by simply updating the checked out git repository. If you want a full installation that is not linked to your git checkout, just omit the ``-e`` option from the above commands. You can view available version tags `here `__ for example to install version 1.6.6 ``git checkout 1.6.6`` You may require ``bst-external`` the install instructions can be found on the `bst-external gitlab `__ Adjust PATH ~~~~~~~~~~~ Since BuildStream is now installed under your local user's install directories, you need to ensure that ``PATH`` is adjusted. A regular way to do this is to add the following line to the end of your ``~/.bashrc``:: export PATH="${PATH}:${HOME}/.local/bin" .. note:: You will have to restart your terminal in order for these changes to take effect. Bash completions ~~~~~~~~~~~~~~~~ Bash completions are supported by sourcing the ``buildstream/data/bst`` script found in the BuildStream repository. On many systems this script can be installed into a completions directory but when installing BuildStream without a package manager this is not an option. To enable completions for an installation of BuildStream you installed yourself from git, just append the script verbatim to your ``~/.bash_completion``: .. literalinclude:: ../../buildstream/data/bst :language: yaml Upgrading BuildStream ~~~~~~~~~~~~~~~~~~~~~ Assuming you have followed the default instructions above, all you need to do to upgrade BuildStream is to update your local git checkout:: cd /path/to/buildstream git pull --rebase If you did not specify the ``-e`` option at install time, you will need to cleanly reinstall BuildStream:: pip3 uninstall buildstream cd /path/to/buildstream git pull --rebase pip3 install --user . buildstream-1.6.9/doc/source/main_about.rst000066400000000000000000000000361437515270000207620ustar00rootroot00000000000000.. include:: ../../README.rst buildstream-1.6.9/doc/source/main_core.rst000066400000000000000000000007051437515270000206030ustar00rootroot00000000000000 Reference ========= This section details the core API reference along with other more elaborate details about BuildStream internals. .. toctree:: :maxdepth: 2 core_format core_plugins core_framework core_additional .. This is a hidden toctree so that the autogenerated modules index is not orphaned, sort of a cheat because we would rather present a manual toctree for this part. .. toctree:: :hidden: buildstream buildstream-1.6.9/doc/source/main_install.rst000066400000000000000000000004141437515270000213160ustar00rootroot00000000000000Install ======= This section covers how to install BuildStream onto your machine, how to run BuildStream inside a docker image and also how to configure an artifact server. .. toctree:: :maxdepth: 2 install_linux_distro install_docker install_artifacts buildstream-1.6.9/doc/source/main_using.rst000066400000000000000000000003671437515270000210040ustar00rootroot00000000000000 Using ===== This section includes user facing documentation including tutorials, guides and information on user preferences and configuration. .. toctree:: :maxdepth: 2 using_tutorial using_examples using_config using_commands buildstream-1.6.9/doc/source/plugin.rsttemplate000066400000000000000000000000331437515270000216730ustar00rootroot00000000000000.. automodule:: @@MODULE@@ buildstream-1.6.9/doc/source/sample_plugin/000077500000000000000000000000001437515270000207525ustar00rootroot00000000000000buildstream-1.6.9/doc/source/sample_plugin/MANIFEST.in000066400000000000000000000000261437515270000225060ustar00rootroot00000000000000global-include *.yaml buildstream-1.6.9/doc/source/sample_plugin/setup.py000066400000000000000000000006341437515270000224670ustar00rootroot00000000000000from setuptools import setup, find_packages setup(name='BuildStream Autotools', version="0.1", description="A better autotools element for BuildStream", packages=find_packages(), install_requires=[ 'setuptools' ], include_package_data=True, entry_points={ 'buildstream.plugins': [ 'autotools = elements.autotools' ] }) buildstream-1.6.9/doc/source/sessions-stored/000077500000000000000000000000001437515270000212575ustar00rootroot00000000000000buildstream-1.6.9/doc/source/sessions-stored/autotools-build.html000066400000000000000000001306431437515270000253020ustar00rootroot00000000000000
user@host:~/autotools$ bst build hello.bst

[--:--:--][][] STATUS  Cache usage recomputed: 12K / infinity (0%)
[--:--:--][][] START   Build
[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][][] START   Checking sources
[00:00:00][][] SUCCESS Checking sources

BuildStream Version 1.4.0
  Session Start: Monday, 02-09-2019 at 16:06:58
  Project:       autotools (/home/user/repos/buildstream/doc/examples/autotools)
  Targets:       hello.bst
  Cache Usage:   12K / infinity (0%)

User Configuration
  Configuration File:      /home/user/repos/buildstream/doc/run-bst-b3rd90i_/buildstream.conf
  Log Files:               /home/user/repos/buildstream/doc/run-bst-b3rd90i_/logs
  Source Mirrors:          /home/user/.cache/buildstream/sources
  Build Area:              /home/user/repos/buildstream/doc/run-bst-b3rd90i_/build
  Artifact Cache:          /home/user/repos/buildstream/doc/run-bst-b3rd90i_/artifacts
  Strict Build Plan:       Yes
  Maximum Fetch Tasks:     10
  Maximum Build Tasks:     4
  Maximum Push Tasks:      4
  Maximum Network Retries: 2

Pipeline
   buildable d5472eb60ceb8a45b0ed5907912b5d89ce91d8a4e9b454ea2663f052c6cf4dff base/alpine.bst 
     waiting 50110d08831106135148d991c33f15794b530938712216e84593eeff7c1ef8ca base.bst 
     waiting 0713461b97fa9c6c4de17c3481d2b5f64fc51af435b0ce55e604283e913aa950 hello.bst 
===============================================================================
[--:--:--][d5472eb6][build:base/alpine.bst               ] START   autotools/base-alpine/d5472eb6-build.12223.log
[--:--:--][d5472eb6][build:base/alpine.bst               ] START   Staging sources
[00:00:05][d5472eb6][build:base/alpine.bst               ] SUCCESS Staging sources
[--:--:--][d5472eb6][build:base/alpine.bst               ] START   Caching artifact
[00:00:02][d5472eb6][build:base/alpine.bst               ] SUCCESS Caching artifact
[00:00:08][d5472eb6][build:base/alpine.bst               ] SUCCESS autotools/base-alpine/d5472eb6-build.12223.log
[--:--:--][50110d08][build:base.bst                      ] START   autotools/base/50110d08-build.12229.log
[--:--:--][50110d08][build:base.bst                      ] START   Caching artifact
[00:00:00][50110d08][build:base.bst                      ] SUCCESS Caching artifact
[00:00:00][50110d08][build:base.bst                      ] SUCCESS autotools/base/50110d08-build.12229.log
[--:--:--][0713461b][build:hello.bst                     ] START   autotools/hello/0713461b-build.12231.log
[--:--:--][0713461b][build:hello.bst                     ] START   Staging dependencies
[00:00:00][0713461b][build:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][0713461b][build:hello.bst                     ] START   Integrating sandbox
[00:00:00][0713461b][build:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][0713461b][build:hello.bst                     ] START   Staging sources
[00:00:00][0713461b][build:hello.bst                     ] SUCCESS Staging sources
[--:--:--][0713461b][build:hello.bst                     ] START   Running configure-commands
[--:--:--][0713461b][build:hello.bst                     ] STATUS  Running configure-commands

    export NOCONFIGURE=1;
    
    if [ -x ./configure ]; then true;
    elif [ -x autogen ]; then ./autogen;
    elif [ -x autogen.sh ]; then ./autogen.sh;
    elif [ -x bootstrap ]; then ./bootstrap;
    elif [ -x bootstrap.sh ]; then ./bootstrap.sh;
    else autoreconf -ivf;
    fi

[--:--:--][0713461b][build:hello.bst                     ] STATUS  Running configure-commands

    ./configure --prefix=/usr \
    --exec-prefix=/usr \
    --bindir=/usr/bin \
    --sbindir=/usr/sbin \
    --sysconfdir=/etc \
    --datadir=/usr/share \
    --includedir=/usr/include \
    --libdir=/usr/lib \
    --libexecdir=/usr/libexec \
    --localstatedir=/var \
    --sharedstatedir=/usr/com \
    --mandir=/usr/share/man \
    --infodir=/usr/share/info

[00:00:03][0713461b][build:hello.bst                     ] SUCCESS Running configure-commands
[--:--:--][0713461b][build:hello.bst                     ] START   Running build-commands
[--:--:--][0713461b][build:hello.bst                     ] STATUS  Running build-commands

    make

[00:00:00][0713461b][build:hello.bst                     ] SUCCESS Running build-commands
[--:--:--][0713461b][build:hello.bst                     ] START   Running install-commands
[--:--:--][0713461b][build:hello.bst                     ] STATUS  Running install-commands

    make -j1 DESTDIR="/buildstream-install" install

[00:00:00][0713461b][build:hello.bst                     ] SUCCESS Running install-commands
[--:--:--][0713461b][build:hello.bst                     ] START   Running strip-commands
[--:--:--][0713461b][build:hello.bst                     ] STATUS  Running strip-commands

    cd "/buildstream-install" && find -type f \
      '(' -perm -111 -o -name '*.so*' \
          -o -name '*.cmxs' -o -name '*.node' ')' \
      -exec sh -ec \
      'read -n4 hdr <"$1" # check for elf header
       if [ "$hdr" != "$(printf \\x7fELF)" ]; then
           exit 0
       fi
       debugfile="/buildstream-install/usr/lib/debug/$1"
       mkdir -p "$(dirname "$debugfile")"
       objcopy --only-keep-debug --compress-debug-sections "$1" "$debugfile"
       chmod 644 "$debugfile"
       strip --remove-section=.comment --remove-section=.note --strip-unneeded "$1"
       objcopy --add-gnu-debuglink "$debugfile" "$1"' - {} ';'

[00:00:00][0713461b][build:hello.bst                     ] SUCCESS Running strip-commands
[--:--:--][0713461b][build:hello.bst                     ] START   Caching artifact
[00:00:00][0713461b][build:hello.bst                     ] SUCCESS Caching artifact
[00:00:04][0713461b][build:hello.bst                     ] SUCCESS autotools/hello/0713461b-build.12231.log
[00:00:12][][] SUCCESS Build

Pipeline Summary
  Total:       3
  Session:     3
  Fetch Queue: processed 0, skipped 3, failed 0 
  Build Queue: processed 3, skipped 0, failed 0
buildstream-1.6.9/doc/source/sessions-stored/autotools-shell.html000066400000000000000000000221261437515270000253060ustar00rootroot00000000000000
user@host:~/autotools$ bst shell hello.bst -- hello

[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][0713461b][ main:hello.bst                     ] START   Staging dependencies
[00:00:00][0713461b][ main:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][0713461b][ main:hello.bst                     ] START   Integrating sandbox
[00:00:00][0713461b][ main:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][0713461b][ main:hello.bst                     ] STATUS  Running command

    hello

Hello World!
This is amhello 1.0.
buildstream-1.6.9/doc/source/sessions-stored/autotools-show-variables.html000066400000000000000000000174411437515270000271310ustar00rootroot00000000000000
user@host:~/autotools$ bst show --deps none --format "%{vars}" hello.bst

[--:--:--][][] STATUS  Cache usage recomputed: 12K / infinity (0%)
[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
autogen: "export NOCONFIGURE=1;\n\nif [ -x ./configure ]; then true;\nelif [ -x autogen\
  \ ]; then ./autogen;\nelif [ -x autogen.sh ]; then ./autogen.sh;\nelif [ -x bootstrap\
  \ ]; then ./bootstrap;\nelif [ -x bootstrap.sh ]; then ./bootstrap.sh;\nelse autoreconf\
  \ -ivf;\nfi"
bindir: /usr/bin
build-root: /buildstream/autotools/hello.bst
command-subdir: doc/amhello
conf-args: "--prefix=/usr \\\n--exec-prefix=/usr \\\n--bindir=/usr/bin \\\n--sbindir=/usr/sbin\
  \ \\\n--sysconfdir=/etc \\\n--datadir=/usr/share \\\n--includedir=/usr/include \\\
  \n--libdir=/usr/lib \\\n--libexecdir=/usr/libexec \\\n--localstatedir=/var \\\n\
  --sharedstatedir=/usr/com \\\n--mandir=/usr/share/man \\\n--infodir=/usr/share/info"
conf-cmd: ./configure
conf-extra: ''
conf-global: ''
conf-local: ''
configure: "./configure --prefix=/usr \\\n--exec-prefix=/usr \\\n--bindir=/usr/bin\
  \ \\\n--sbindir=/usr/sbin \\\n--sysconfdir=/etc \\\n--datadir=/usr/share \\\n--includedir=/usr/include\
  \ \\\n--libdir=/usr/lib \\\n--libexecdir=/usr/libexec \\\n--localstatedir=/var \\\
  \n--sharedstatedir=/usr/com \\\n--mandir=/usr/share/man \\\n--infodir=/usr/share/info"
datadir: /usr/share
debugdir: /usr/lib/debug
docdir: /usr/share/doc
element-name: hello.bst
exec_prefix: /usr
fix-pyc-timestamps: "find \"/buildstream-install\" -name '*.pyc' -exec \\\n  dd if=/dev/zero\
  \ of={} bs=1 count=4 seek=4 conv=notrunc ';'"
includedir: /usr/include
infodir: /usr/share/info
install-root: /buildstream-install
lib: lib
libdir: /usr/lib
libexecdir: /usr/libexec
localstatedir: /var
make: make
make-install: make -j1 DESTDIR="/buildstream-install" install
mandir: /usr/share/man
max-jobs: '8'
objcopy-extract-args: --only-keep-debug --compress-debug-sections
objcopy-link-args: --add-gnu-debuglink
prefix: /usr
project-name: autotools
sbindir: /usr/sbin
sharedstatedir: /usr/com
strip-args: --remove-section=.comment --remove-section=.note --strip-unneeded
strip-binaries: "cd \"/buildstream-install\" && find -type f \\\n  '(' -perm -111\
  \ -o -name '*.so*' \\\n      -o -name '*.cmxs' -o -name '*.node' ')' \\\n  -exec\
  \ sh -ec \\\n  'read -n4 hdr <\"$1\" # check for elf header\n   if [ \"$hdr\" !=\
  \ \"$(printf \\\\x7fELF)\" ]; then\n       exit 0\n   fi\n   debugfile=\"/buildstream-install/usr/lib/debug/$1\"\
  \n   mkdir -p \"$(dirname \"$debugfile\")\"\n   objcopy --only-keep-debug --compress-debug-sections\
  \ \"$1\" \"$debugfile\"\n   chmod 644 \"$debugfile\"\n   strip --remove-section=.comment\
  \ --remove-section=.note --strip-unneeded \"$1\"\n   objcopy --add-gnu-debuglink\
  \ \"$debugfile\" \"$1\"' - {} ';'"
sysconfdir: /etc

buildstream-1.6.9/doc/source/sessions-stored/first-project-build.html000066400000000000000000000430771437515270000260500ustar00rootroot00000000000000
user@host:~/first-project$ bst build hello.bst

[--:--:--][][] STATUS  Cache usage recomputed: 12K / infinity (0%)
[--:--:--][][] START   Build
[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][][] START   Checking sources
[00:00:00][][] SUCCESS Checking sources

BuildStream Version 1.4.0
  Session Start: Monday, 02-09-2019 at 16:06:15
  Project:       first-project (/home/user/repos/buildstream/doc/examples/first-project)
  Targets:       hello.bst
  Cache Usage:   12K / infinity (0%)

User Configuration
  Configuration File:      /home/user/repos/buildstream/doc/run-bst-t0ct0ui2/buildstream.conf
  Log Files:               /home/user/repos/buildstream/doc/run-bst-t0ct0ui2/logs
  Source Mirrors:          /home/user/.cache/buildstream/sources
  Build Area:              /home/user/repos/buildstream/doc/run-bst-t0ct0ui2/build
  Artifact Cache:          /home/user/repos/buildstream/doc/run-bst-t0ct0ui2/artifacts
  Strict Build Plan:       Yes
  Maximum Fetch Tasks:     10
  Maximum Build Tasks:     4
  Maximum Push Tasks:      4
  Maximum Network Retries: 2

Pipeline
   buildable 7c73774896fecfd2be0710a3c9dbcdf5fb79f892091d951ef00f09e95f2988c2 hello.bst 
===============================================================================
[--:--:--][7c737748][build:hello.bst                     ] START   first-project/hello/7c737748-build.11044.log
[--:--:--][7c737748][build:hello.bst                     ] START   Staging sources
[00:00:00][7c737748][build:hello.bst                     ] SUCCESS Staging sources
[--:--:--][7c737748][build:hello.bst                     ] START   Caching artifact
[00:00:00][7c737748][build:hello.bst                     ] SUCCESS Caching artifact
[00:00:00][7c737748][build:hello.bst                     ] SUCCESS first-project/hello/7c737748-build.11044.log
[00:00:00][][] SUCCESS Build

Pipeline Summary
  Total:       1
  Session:     1
  Fetch Queue: processed 0, skipped 1, failed 0 
  Build Queue: processed 1, skipped 0, failed 0
buildstream-1.6.9/doc/source/sessions-stored/first-project-checkout.html000066400000000000000000000237031437515270000265500ustar00rootroot00000000000000
user@host:~/first-project$ bst checkout hello.bst here

[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][7c737748][ main:hello.bst                     ] START   Staging dependencies
[00:00:00][7c737748][ main:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][7c737748][ main:hello.bst                     ] START   Integrating sandbox
[00:00:00][7c737748][ main:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][7c737748][ main:hello.bst                     ] START   Checking out files in 'here'
[00:00:00][7c737748][ main:hello.bst                     ] SUCCESS Checking out files in 'here'
buildstream-1.6.9/doc/source/sessions-stored/first-project-init.html000066400000000000000000000006131437515270000257010ustar00rootroot00000000000000
user@host:~/first-project$ bst init --project-name first-project

Created project.conf at: /home/user/repos/buildstream/doc/examples/first-project/project.conf
buildstream-1.6.9/doc/source/sessions-stored/first-project-ls.html000066400000000000000000000004351437515270000253560ustar00rootroot00000000000000
user@host:~/first-project$ ls ./here

hello.world
buildstream-1.6.9/doc/source/sessions-stored/first-project-show.html000066400000000000000000000112541437515270000257210ustar00rootroot00000000000000
user@host:~/first-project$ bst show hello.bst

[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
      cached 7c73774896fecfd2be0710a3c9dbcdf5fb79f892091d951ef00f09e95f2988c2 hello.bst 
buildstream-1.6.9/doc/source/sessions-stored/first-project-touch.html000066400000000000000000000004301437515270000260550ustar00rootroot00000000000000
user@host:~/first-project$ touch hello.world
buildstream-1.6.9/doc/source/sessions-stored/flatpak-autotools-build.html000066400000000000000000001450301437515270000267160ustar00rootroot00000000000000
user@host:~/flatpak-autotools$ bst build hello.bst

[--:--:--][][] STATUS  Cache usage recomputed: 12K / infinity (0%)
[--:--:--][][] START   Build
[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][][] START   Checking sources
[00:00:00][][] SUCCESS Checking sources

BuildStream Version 1.4.0
  Session Start: Monday, 02-09-2019 at 16:06:17
  Project:       flatpak-autotools (/home/user/repos/buildstream/doc/examples/flatpak-autotools)
  Targets:       hello.bst
  Cache Usage:   12K / infinity (0%)

User Configuration
  Configuration File:      /home/user/repos/buildstream/doc/run-bst-wal0tcrg/buildstream.conf
  Log Files:               /home/user/repos/buildstream/doc/run-bst-wal0tcrg/logs
  Source Mirrors:          /home/user/.cache/buildstream/sources
  Build Area:              /home/user/repos/buildstream/doc/run-bst-wal0tcrg/build
  Artifact Cache:          /home/user/repos/buildstream/doc/run-bst-wal0tcrg/artifacts
  Strict Build Plan:       Yes
  Maximum Fetch Tasks:     10
  Maximum Build Tasks:     4
  Maximum Push Tasks:      4
  Maximum Network Retries: 2

Project Options
  arch: x86_64

Pipeline
   buildable 1fb677ea63be533ff4391db8c764030a80500c8f1feba0959eb747f720fd3748 base/sdk.bst 
     waiting 6fbcd70444b9b3bb74e48bf930fd93a0715e854bfe370c4a43b7e9d2d81caee7 base/usrmerge.bst 
     waiting 60ef22ffacf88b1fb99d3bf72752456b55c83ec86672310bcf7917f55ed7d485 base.bst 
     waiting 8c609e7d39e81c68c2f8162a19440d8a947f1b95535dfb18d43b7fbef611866f hello.bst 
===============================================================================
[--:--:--][1fb677ea][build:base/sdk.bst                  ] START   flatpak-autotools/base-sdk/1fb677ea-build.11113.log
[--:--:--][1fb677ea][build:base/sdk.bst                  ] START   Staging sources
[00:00:08][1fb677ea][build:base/sdk.bst                  ] SUCCESS Staging sources
[--:--:--][1fb677ea][build:base/sdk.bst                  ] START   Caching artifact
[00:00:17][1fb677ea][build:base/sdk.bst                  ] SUCCESS Caching artifact
[00:00:29][1fb677ea][build:base/sdk.bst                  ] SUCCESS flatpak-autotools/base-sdk/1fb677ea-build.11113.log
[--:--:--][6fbcd704][build:base/usrmerge.bst             ] START   flatpak-autotools/base-usrmerge/6fbcd704-build.11122.log
[--:--:--][6fbcd704][build:base/usrmerge.bst             ] START   Staging sources
[00:00:00][6fbcd704][build:base/usrmerge.bst             ] SUCCESS Staging sources
[--:--:--][6fbcd704][build:base/usrmerge.bst             ] START   Caching artifact
[00:00:00][6fbcd704][build:base/usrmerge.bst             ] SUCCESS Caching artifact
[00:00:00][6fbcd704][build:base/usrmerge.bst             ] SUCCESS flatpak-autotools/base-usrmerge/6fbcd704-build.11122.log
[--:--:--][60ef22ff][build:base.bst                      ] START   flatpak-autotools/base/60ef22ff-build.11124.log
[--:--:--][60ef22ff][build:base.bst                      ] START   Caching artifact
[00:00:00][60ef22ff][build:base.bst                      ] SUCCESS Caching artifact
[00:00:00][60ef22ff][build:base.bst                      ] SUCCESS flatpak-autotools/base/60ef22ff-build.11124.log
[--:--:--][8c609e7d][build:hello.bst                     ] START   flatpak-autotools/hello/8c609e7d-build.11126.log
[--:--:--][8c609e7d][build:hello.bst                     ] START   Staging dependencies
[00:00:02][8c609e7d][build:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][8c609e7d][build:hello.bst                     ] START   Integrating sandbox
[00:00:00][8c609e7d][build:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][8c609e7d][build:hello.bst                     ] START   Staging sources
[00:00:00][8c609e7d][build:hello.bst                     ] SUCCESS Staging sources
[--:--:--][8c609e7d][build:hello.bst                     ] START   Running configure-commands
[--:--:--][8c609e7d][build:hello.bst                     ] STATUS  Running configure-commands

    export NOCONFIGURE=1;
    
    if [ -x ./configure ]; then true;
    elif [ -x autogen ]; then ./autogen;
    elif [ -x autogen.sh ]; then ./autogen.sh;
    elif [ -x bootstrap ]; then ./bootstrap;
    elif [ -x bootstrap.sh ]; then ./bootstrap.sh;
    else autoreconf -ivf;
    fi

[--:--:--][8c609e7d][build:hello.bst                     ] STATUS  Running configure-commands

    ./configure --prefix=/usr \
    --exec-prefix=/usr \
    --bindir=/usr/bin \
    --sbindir=/usr/sbin \
    --sysconfdir=/etc \
    --datadir=/usr/share \
    --includedir=/usr/include \
    --libdir=/usr/lib \
    --libexecdir=/usr/libexec \
    --localstatedir=/var \
    --sharedstatedir=/usr/com \
    --mandir=/usr/share/man \
    --infodir=/usr/share/info

[00:00:03][8c609e7d][build:hello.bst                     ] SUCCESS Running configure-commands
[--:--:--][8c609e7d][build:hello.bst                     ] START   Running build-commands
[--:--:--][8c609e7d][build:hello.bst                     ] STATUS  Running build-commands

    make

[00:00:00][8c609e7d][build:hello.bst                     ] SUCCESS Running build-commands
[--:--:--][8c609e7d][build:hello.bst                     ] START   Running install-commands
[--:--:--][8c609e7d][build:hello.bst                     ] STATUS  Running install-commands

    make -j1 DESTDIR="/buildstream-install" install

[00:00:00][8c609e7d][build:hello.bst                     ] SUCCESS Running install-commands
[--:--:--][8c609e7d][build:hello.bst                     ] START   Running strip-commands
[--:--:--][8c609e7d][build:hello.bst                     ] STATUS  Running strip-commands

    cd "/buildstream-install" && find -type f \
      '(' -perm -111 -o -name '*.so*' \
          -o -name '*.cmxs' -o -name '*.node' ')' \
      -exec sh -ec \
      'read -n4 hdr <"$1" # check for elf header
       if [ "$hdr" != "$(printf \\x7fELF)" ]; then
           exit 0
       fi
       debugfile="/buildstream-install/usr/lib/debug/$1"
       mkdir -p "$(dirname "$debugfile")"
       objcopy --only-keep-debug --compress-debug-sections "$1" "$debugfile"
       chmod 644 "$debugfile"
       strip --remove-section=.comment --remove-section=.note --strip-unneeded "$1"
       objcopy --add-gnu-debuglink "$debugfile" "$1"' - {} ';'

[00:00:00][8c609e7d][build:hello.bst                     ] SUCCESS Running strip-commands
[--:--:--][8c609e7d][build:hello.bst                     ] START   Caching artifact
[00:00:00][8c609e7d][build:hello.bst                     ] SUCCESS Caching artifact
[00:00:07][8c609e7d][build:hello.bst                     ] SUCCESS flatpak-autotools/hello/8c609e7d-build.11126.log
[00:00:37][][] SUCCESS Build

Pipeline Summary
  Total:       4
  Session:     4
  Fetch Queue: processed 0, skipped 4, failed 0 
  Build Queue: processed 4, skipped 0, failed 0
buildstream-1.6.9/doc/source/sessions-stored/flatpak-autotools-shell.html000066400000000000000000000221361437515270000267270ustar00rootroot00000000000000
user@host:~/flatpak-autotools$ bst shell hello.bst -- hello

[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][8c609e7d][ main:hello.bst                     ] START   Staging dependencies
[00:00:01][8c609e7d][ main:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][8c609e7d][ main:hello.bst                     ] START   Integrating sandbox
[00:00:00][8c609e7d][ main:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][8c609e7d][ main:hello.bst                     ] STATUS  Running command

    hello

Hello World!
This is amhello 1.0.
buildstream-1.6.9/doc/source/sessions-stored/integration-commands-build.html000066400000000000000000001712201437515270000273670ustar00rootroot00000000000000
user@host:~/integration-commands$ bst build hello.bst

[--:--:--][][] STATUS  Cache usage recomputed: 12K / infinity (0%)
[--:--:--][][] START   Build
[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][][] START   Checking sources
[00:00:00][][] SUCCESS Checking sources

BuildStream Version 1.4.0
  Session Start: Monday, 02-09-2019 at 16:07:13
  Project:       integration-commands (/home/user/repos/buildstream/doc/examples/integration-commands)
  Targets:       hello.bst
  Cache Usage:   12K / infinity (0%)

User Configuration
  Configuration File:      /home/user/repos/buildstream/doc/run-bst-5a2ho6_u/buildstream.conf
  Log Files:               /home/user/repos/buildstream/doc/run-bst-5a2ho6_u/logs
  Source Mirrors:          /home/user/.cache/buildstream/sources
  Build Area:              /home/user/repos/buildstream/doc/run-bst-5a2ho6_u/build
  Artifact Cache:          /home/user/repos/buildstream/doc/run-bst-5a2ho6_u/artifacts
  Strict Build Plan:       Yes
  Maximum Fetch Tasks:     10
  Maximum Build Tasks:     4
  Maximum Push Tasks:      4
  Maximum Network Retries: 2

Pipeline
   buildable 8175172a6d0ef8e2aaaec63b6545b5e6571977c68d44b6c8325d2b31f91ee2fa base/alpine.bst 
     waiting 499981895f448544e7bf88260c3567a75b9a963bce16ad3082644b1ca8986b9b base.bst 
     waiting 3cc7855da0df0c55b74a4c39d854c15baca63b629c6e911277a3714bffe29654 libhello.bst 
     waiting 8e37238eaf58697f9d49cfe777023d197225eebb76c0660490349cb87530c667 hello.bst 
===============================================================================
[--:--:--][8175172a][build:base/alpine.bst               ] START   integration-commands/base-alpine/8175172a-build.13344.log
[--:--:--][8175172a][build:base/alpine.bst               ] START   Staging sources
[00:00:04][8175172a][build:base/alpine.bst               ] SUCCESS Staging sources
[--:--:--][8175172a][build:base/alpine.bst               ] START   Caching artifact
[00:00:02][8175172a][build:base/alpine.bst               ] SUCCESS Caching artifact
[00:00:07][8175172a][build:base/alpine.bst               ] SUCCESS integration-commands/base-alpine/8175172a-build.13344.log
[--:--:--][49998189][build:base.bst                      ] START   integration-commands/base/49998189-build.13347.log
[--:--:--][49998189][build:base.bst                      ] START   Caching artifact
[00:00:00][49998189][build:base.bst                      ] SUCCESS Caching artifact
[00:00:00][49998189][build:base.bst                      ] SUCCESS integration-commands/base/49998189-build.13347.log
[--:--:--][3cc7855d][build:libhello.bst                  ] START   integration-commands/libhello/3cc7855d-build.13349.log
[--:--:--][3cc7855d][build:libhello.bst                  ] START   Staging dependencies
[00:00:00][3cc7855d][build:libhello.bst                  ] SUCCESS Staging dependencies
[--:--:--][3cc7855d][build:libhello.bst                  ] START   Integrating sandbox
[--:--:--][3cc7855d][build:libhello.bst                  ] STATUS  Running integration command

    ldconfig "/usr/lib"

[00:00:00][3cc7855d][build:libhello.bst                  ] SUCCESS Integrating sandbox
[--:--:--][3cc7855d][build:libhello.bst                  ] START   Staging sources
[00:00:00][3cc7855d][build:libhello.bst                  ] SUCCESS Staging sources
[--:--:--][3cc7855d][build:libhello.bst                  ] START   Running build-commands
[--:--:--][3cc7855d][build:libhello.bst                  ] STATUS  Running build-commands

    make PREFIX="/usr"

[00:00:00][3cc7855d][build:libhello.bst                  ] SUCCESS Running build-commands
[--:--:--][3cc7855d][build:libhello.bst                  ] START   Running install-commands
[--:--:--][3cc7855d][build:libhello.bst                  ] STATUS  Running install-commands

    make -j1 PREFIX="/usr" DESTDIR="/buildstream-install" install

[00:00:00][3cc7855d][build:libhello.bst                  ] SUCCESS Running install-commands
[--:--:--][3cc7855d][build:libhello.bst                  ] START   Running strip-commands
[--:--:--][3cc7855d][build:libhello.bst                  ] STATUS  Running strip-commands

    cd "/buildstream-install" && find -type f \
      '(' -perm -111 -o -name '*.so*' \
          -o -name '*.cmxs' -o -name '*.node' ')' \
      -exec sh -ec \
      'read -n4 hdr <"$1" # check for elf header
       if [ "$hdr" != "$(printf \\x7fELF)" ]; then
           exit 0
       fi
       debugfile="/buildstream-install/usr/lib/debug/$1"
       mkdir -p "$(dirname "$debugfile")"
       objcopy --only-keep-debug --compress-debug-sections "$1" "$debugfile"
       chmod 644 "$debugfile"
       strip --remove-section=.comment --remove-section=.note --strip-unneeded "$1"
       objcopy --add-gnu-debuglink "$debugfile" "$1"' - {} ';'

[00:00:00][3cc7855d][build:libhello.bst                  ] SUCCESS Running strip-commands
[--:--:--][3cc7855d][build:libhello.bst                  ] START   Caching artifact
[00:00:00][3cc7855d][build:libhello.bst                  ] SUCCESS Caching artifact
[00:00:01][3cc7855d][build:libhello.bst                  ] SUCCESS integration-commands/libhello/3cc7855d-build.13349.log
[--:--:--][8e37238e][build:hello.bst                     ] START   integration-commands/hello/8e37238e-build.13398.log
[--:--:--][8e37238e][build:hello.bst                     ] START   Staging dependencies
[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][8e37238e][build:hello.bst                     ] START   Integrating sandbox
[--:--:--][8e37238e][build:hello.bst                     ] STATUS  Running integration command

    ldconfig "/usr/lib"

[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][8e37238e][build:hello.bst                     ] START   Staging sources
[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS Staging sources
[--:--:--][8e37238e][build:hello.bst                     ] START   Running build-commands
[--:--:--][8e37238e][build:hello.bst                     ] STATUS  Running build-commands

    make PREFIX="/usr"

[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS Running build-commands
[--:--:--][8e37238e][build:hello.bst                     ] START   Running install-commands
[--:--:--][8e37238e][build:hello.bst                     ] STATUS  Running install-commands

    make -j1 PREFIX="/usr" DESTDIR="/buildstream-install" install

[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS Running install-commands
[--:--:--][8e37238e][build:hello.bst                     ] START   Running strip-commands
[--:--:--][8e37238e][build:hello.bst                     ] STATUS  Running strip-commands

    cd "/buildstream-install" && find -type f \
      '(' -perm -111 -o -name '*.so*' \
          -o -name '*.cmxs' -o -name '*.node' ')' \
      -exec sh -ec \
      'read -n4 hdr <"$1" # check for elf header
       if [ "$hdr" != "$(printf \\x7fELF)" ]; then
           exit 0
       fi
       debugfile="/buildstream-install/usr/lib/debug/$1"
       mkdir -p "$(dirname "$debugfile")"
       objcopy --only-keep-debug --compress-debug-sections "$1" "$debugfile"
       chmod 644 "$debugfile"
       strip --remove-section=.comment --remove-section=.note --strip-unneeded "$1"
       objcopy --add-gnu-debuglink "$debugfile" "$1"' - {} ';'

[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS Running strip-commands
[--:--:--][8e37238e][build:hello.bst                     ] START   Caching artifact
[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS Caching artifact
[00:00:00][8e37238e][build:hello.bst                     ] SUCCESS integration-commands/hello/8e37238e-build.13398.log
[00:00:10][][] SUCCESS Build

Pipeline Summary
  Total:       4
  Session:     4
  Fetch Queue: processed 0, skipped 4, failed 0 
  Build Queue: processed 4, skipped 0, failed 0
buildstream-1.6.9/doc/source/sessions-stored/integration-commands-shell.html000066400000000000000000000240721437515270000274010ustar00rootroot00000000000000
user@host:~/integration-commands$ bst shell hello.bst -- hello pony

[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][8e37238e][ main:hello.bst                     ] START   Staging dependencies
[00:00:00][8e37238e][ main:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][8e37238e][ main:hello.bst                     ] START   Integrating sandbox
[--:--:--][8175172a][ main:base/alpine.bst               ] STATUS  Running integration command

    ldconfig "/usr/lib"

[00:00:00][8e37238e][ main:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][8e37238e][ main:hello.bst                     ] STATUS  Running command

    hello pony

Hello pony
buildstream-1.6.9/doc/source/sessions-stored/running-commands-build.html000066400000000000000000001202051437515270000265210ustar00rootroot00000000000000
user@host:~/running-commands$ bst build hello.bst

[--:--:--][][] STATUS  Cache usage recomputed: 12K / infinity (0%)
[--:--:--][][] START   Build
[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][][] START   Checking sources
[00:00:00][][] SUCCESS Checking sources

BuildStream Version 1.4.0
  Session Start: Monday, 02-09-2019 at 16:06:05
  Project:       running-commands (/home/user/repos/buildstream/doc/examples/running-commands)
  Targets:       hello.bst
  Cache Usage:   12K / infinity (0%)

User Configuration
  Configuration File:      /home/user/repos/buildstream/doc/run-bst-wz45hsv4/buildstream.conf
  Log Files:               /home/user/repos/buildstream/doc/run-bst-wz45hsv4/logs
  Source Mirrors:          /home/user/.cache/buildstream/sources
  Build Area:              /home/user/repos/buildstream/doc/run-bst-wz45hsv4/build
  Artifact Cache:          /home/user/repos/buildstream/doc/run-bst-wz45hsv4/artifacts
  Strict Build Plan:       Yes
  Maximum Fetch Tasks:     10
  Maximum Build Tasks:     4
  Maximum Push Tasks:      4
  Maximum Network Retries: 2

Pipeline
   buildable d5472eb60ceb8a45b0ed5907912b5d89ce91d8a4e9b454ea2663f052c6cf4dff base/alpine.bst 
     waiting 50110d08831106135148d991c33f15794b530938712216e84593eeff7c1ef8ca base.bst 
     waiting 2bee306eafed3b07aa9dae4c28185429984ca7b246585901709e10ba7ca52c8e hello.bst 
===============================================================================
[--:--:--][d5472eb6][build:base/alpine.bst               ] START   running-commands/base-alpine/d5472eb6-build.10941.log
[--:--:--][d5472eb6][build:base/alpine.bst               ] START   Staging sources
[00:00:04][d5472eb6][build:base/alpine.bst               ] SUCCESS Staging sources
[--:--:--][d5472eb6][build:base/alpine.bst               ] START   Caching artifact
[00:00:02][d5472eb6][build:base/alpine.bst               ] SUCCESS Caching artifact
[00:00:08][d5472eb6][build:base/alpine.bst               ] SUCCESS running-commands/base-alpine/d5472eb6-build.10941.log
[--:--:--][50110d08][build:base.bst                      ] START   running-commands/base/50110d08-build.10946.log
[--:--:--][50110d08][build:base.bst                      ] START   Caching artifact
[00:00:00][50110d08][build:base.bst                      ] SUCCESS Caching artifact
[00:00:00][50110d08][build:base.bst                      ] SUCCESS running-commands/base/50110d08-build.10946.log
[--:--:--][2bee306e][build:hello.bst                     ] START   running-commands/hello/2bee306e-build.10948.log
[--:--:--][2bee306e][build:hello.bst                     ] START   Staging dependencies
[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][2bee306e][build:hello.bst                     ] START   Integrating sandbox
[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][2bee306e][build:hello.bst                     ] START   Staging sources
[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS Staging sources
[--:--:--][2bee306e][build:hello.bst                     ] START   Running build-commands
[--:--:--][2bee306e][build:hello.bst                     ] STATUS  Running build-commands

    make PREFIX="/usr"

[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS Running build-commands
[--:--:--][2bee306e][build:hello.bst                     ] START   Running install-commands
[--:--:--][2bee306e][build:hello.bst                     ] STATUS  Running install-commands

    make -j1 PREFIX="/usr" DESTDIR="/buildstream-install" install

[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS Running install-commands
[--:--:--][2bee306e][build:hello.bst                     ] START   Running strip-commands
[--:--:--][2bee306e][build:hello.bst                     ] STATUS  Running strip-commands

    cd "/buildstream-install" && find -type f \
      '(' -perm -111 -o -name '*.so*' \
          -o -name '*.cmxs' -o -name '*.node' ')' \
      -exec sh -ec \
      'read -n4 hdr <"$1" # check for elf header
       if [ "$hdr" != "$(printf \\x7fELF)" ]; then
           exit 0
       fi
       debugfile="/buildstream-install/usr/lib/debug/$1"
       mkdir -p "$(dirname "$debugfile")"
       objcopy --only-keep-debug --compress-debug-sections "$1" "$debugfile"
       chmod 644 "$debugfile"
       strip --remove-section=.comment --remove-section=.note --strip-unneeded "$1"
       objcopy --add-gnu-debuglink "$debugfile" "$1"' - {} ';'

[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS Running strip-commands
[--:--:--][2bee306e][build:hello.bst                     ] START   Caching artifact
[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS Caching artifact
[00:00:00][2bee306e][build:hello.bst                     ] SUCCESS running-commands/hello/2bee306e-build.10948.log
[00:00:08][][] SUCCESS Build

Pipeline Summary
  Total:       3
  Session:     3
  Fetch Queue: processed 0, skipped 3, failed 0 
  Build Queue: processed 3, skipped 0, failed 0
buildstream-1.6.9/doc/source/sessions-stored/running-commands-shell.html000066400000000000000000000221071437515270000265330ustar00rootroot00000000000000
user@host:~/running-commands$ bst shell hello.bst -- hello

[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
[--:--:--][2bee306e][ main:hello.bst                     ] START   Staging dependencies
[00:00:00][2bee306e][ main:hello.bst                     ] SUCCESS Staging dependencies
[--:--:--][2bee306e][ main:hello.bst                     ] START   Integrating sandbox
[00:00:00][2bee306e][ main:hello.bst                     ] SUCCESS Integrating sandbox
[--:--:--][2bee306e][ main:hello.bst                     ] STATUS  Running command

    hello

Hello World
buildstream-1.6.9/doc/source/sessions-stored/running-commands-show-after.html000066400000000000000000000121441437515270000275030ustar00rootroot00000000000000
user@host:~/running-commands$ bst show hello.bst

[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
      cached d5472eb60ceb8a45b0ed5907912b5d89ce91d8a4e9b454ea2663f052c6cf4dff base/alpine.bst 
      cached 50110d08831106135148d991c33f15794b530938712216e84593eeff7c1ef8ca base.bst 
      cached 2bee306eafed3b07aa9dae4c28185429984ca7b246585901709e10ba7ca52c8e hello.bst 
buildstream-1.6.9/doc/source/sessions-stored/running-commands-show-before.html000066400000000000000000000135061437515270000276470ustar00rootroot00000000000000
user@host:~/running-commands$ bst show hello.bst

[--:--:--][][] STATUS  Cache usage recomputed: 12K / infinity (0%)
[--:--:--][][] START   Loading elements
[00:00:00][][] SUCCESS Loading elements
[--:--:--][][] START   Resolving elements
[00:00:00][][] SUCCESS Resolving elements
[--:--:--][][] START   Resolving cached state
[00:00:00][][] SUCCESS Resolving cached state
   buildable d5472eb60ceb8a45b0ed5907912b5d89ce91d8a4e9b454ea2663f052c6cf4dff base/alpine.bst 
     waiting 50110d08831106135148d991c33f15794b530938712216e84593eeff7c1ef8ca base.bst 
     waiting 2bee306eafed3b07aa9dae4c28185429984ca7b246585901709e10ba7ca52c8e hello.bst 
buildstream-1.6.9/doc/source/tutorial/000077500000000000000000000000001437515270000177565ustar00rootroot00000000000000buildstream-1.6.9/doc/source/tutorial/autotools.rst000066400000000000000000000121341437515270000225420ustar00rootroot00000000000000 Using the autotools element =========================== In :ref:`the last chapter ` we observed how the :mod:`manual ` element works, allowing one to specify and run commands manually in the process of constructing an *artifact*. In this chapter, we'll go over a mostly automated build of a similar hello world example. We will observe how our configurations of the :mod:`autotools ` element translate to configurations on the :mod:`manual ` element, and observe how :ref:`variable substitution ` works. .. note:: This example is distributed with BuildStream in the `doc/examples/autotools `_ subdirectory. Overview -------- Instead of using the :mod:`local ` source as we have been using in the previous examples, we're going to use a :mod:`tar ` source this time to obtain the ``automake`` release tarball directly from the upstream hosting. In this example we're going to build the example program included in the upstream ``automake`` tarball itself, and we're going to use the automated :mod:`autotools ` build element to do so. Project structure ----------------- ``project.conf`` ~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/autotools/project.conf :language: yaml Like the :ref:`last project.conf `, we've added another :ref:`source alias ` for ``gnu``, the location from which we're going to download the ``automake`` tarball. ``elements/base/alpine.bst`` and ``elements/base.bst`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The alpine base and base stack element are defined in the same way as in the last chapter: :ref:`tutorial_running_commands`. ``elements/hello.bst`` ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/autotools/elements/hello.bst :language: yaml In this case, we haven't touched the element's ``config`` section at all, instead we just slightly override the bahavior of the :mod:`autotools ` build element by overriding the :ref:`command-subdir variable ` Looking at variables '''''''''''''''''''' Let's take a moment and observe how :ref:`element composition ` works with variables. As :ref:`the documentation ` mentions: * The initial settings of the ``project.conf`` variables are setup using BuildStream's :ref:`builtin defaults `. * After this, your local ``project.conf`` may override some variables on a project wide basis. Those will in turn be overridden by any defaults provided by element classes, such as the variables set in the documentation of the :mod:`autotools ` build element. The variables you set in your final ```` *element declarations*, will have the final say on the value of a particular variable. * Finally, the variables, which may be composed of other variables, are resolved after all composition has taken place. The variable we needed to override was ``command-subdir``, which is an automatic variable provided by the :mod:`BuildElement ` abstract class. This variable simply instructs the :mod:`BuildElement ` in which subdirectory of the ``%{build-root}`` to run it's commands in. One can always display the resolved set of variables for a given element's configuration using :ref:`bst show `: .. raw:: html :file: ../sessions/autotools-show-variables.html As an exercise, we suggest that you modify the ``hello.bst`` element to set the prefix like so: .. code:: yaml variables: prefix: "/opt" And rerun the above :ref:`bst show ` command to observe how this changes the output. Observe where the variables are declared in the :ref:`builtin defaults ` and :mod:`autotools ` element documentation, and how overriding these affects the resolved set of variables. Using the project ----------------- Build the hello.bst element ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build the project, run :ref:`bst build ` in the following way: .. raw:: html :file: ../sessions/autotools-build.html Run the hello world program ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We probably know by now what's going to happen, but let's run the program we've compiled anyway using :ref:`bst shell `: .. raw:: html :file: ../sessions/autotools-shell.html Summary ------- Now we've used a builtin :ref:`build element `, and we've taken a look into :ref:`how variables work `. When browsing the :ref:`build elements ` in the documentation, we are now equipped with a good idea of what an element is going to do, based on their default YAML configuration and any configurations we have in our project. We can also now observe what variables are in effect for the build of a given element, using :ref:`bst show `. buildstream-1.6.9/doc/source/tutorial/first-project.rst000066400000000000000000000071511437515270000233070ustar00rootroot00000000000000 .. _tutorial_first_project: Your first project ================== To get a feel for the basics, we'll start with the most basic BuildStream project we could think of. .. note:: This example is distributed with BuildStream in the `doc/examples/first-project `_ subdirectory. Creating the project -------------------- First, lets create the project itself using the convenience :ref:`bst init ` command to create a little project structure: .. raw:: html :file: ../sessions/first-project-init.html This will give you a :ref:`project.conf ` which will look like this: ``project.conf`` ~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/first-project/project.conf :language: yaml The :ref:`project.conf ` is a central point of configuration for your BuildStream project. Add some content ---------------- BuildStream processes directory trees as input and output, so let's just create a ``hello.world`` file for the project to have. .. raw:: html :file: ../sessions/first-project-touch.html Declare the element ------------------- Here we're going to declare a simple :mod:`import ` element which will import the ``hello.world`` file we've created in the previous step. Create ``elements/hello.bst`` with the following content: ``elements/hello.bst`` ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/first-project/elements/hello.bst :language: yaml The source ~~~~~~~~~~ The :mod:`local ` source used by the ``hello.bst`` element, can be used to access files or directories which are stored in the same repository as your BuildStream project. The ``hello.bst`` element uses the :mod:`local ` source to stage our local ``hello.world`` file. The element ~~~~~~~~~~~ The :mod:`import ` element can be used to simply add content directly to the output artifacts. In this case, it simply takes the ``hello.world`` file provided by it's source and stages it directly to the artifact output root. .. tip:: In this example so far we've used two plugins, the :mod:`local ` source and the :mod:`import ` element. You can always browse the documentation for all plugins in the :ref:`plugins section ` of the manual. Build the element ----------------- In order to carry out the activities of the :mod:`import ` element we've declared, we're going to have to ask BuildStream to *build*. This process will collect all of the sources required for the specified ``hello.bst`` and get the backing :mod:`import ` element to generate an *artifact* for us. .. raw:: html :file: ../sessions/first-project-build.html Now the artifact is ready. Using :ref:`bst show `, we can observe that the artifact's state, which was reported as ``buildable`` in the :ref:`bst build ` command above, has now changed to ``cached``: .. raw:: html :file: ../sessions/first-project-show.html Observe the output ------------------ Now that we've finished building, we can checkout the output of the artifact we've created using :ref:`bst checkout ` .. raw:: html :file: ../sessions/first-project-checkout.html And observe that the file we expect is there: .. raw:: html :file: ../sessions/first-project-ls.html Summary ------- In this section we've created our first BuildStream project from scratch, but it doesnt do much. We've observed the general structure of a BuildStream project, and we've run our first build. buildstream-1.6.9/doc/source/tutorial/integration-commands.rst000066400000000000000000000111331437515270000246310ustar00rootroot00000000000000 Integration commands ==================== Sometimes a software requires more configuration or processing than what is performed at installation time, otherwise it will not run properly. This is especially true in cases where a daemon or library interoperates with third party extensions and needs to maintain a system wide cache whenever it's extensions are installed or removed; system wide font caches are an example of this. In these cases we use :ref:`integration commands ` to ensure that a runtime is ready to run after all of it's components have been *staged*. .. note:: This example is distributed with BuildStream in the `doc/examples/integration-commands `_ subdirectory. Overview -------- In this chapter, we'll be exploring :ref:`integration commands `, which will be our first look at :ref:`public data `. Project structure ----------------- ``project.conf`` and ``elements/base.bst`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The project.conf and base stack :mod:`stack ` element are configured in the same way as in the previous chapter: :ref:`tutorial_running_commands`. ``elements/base/alpine.bst`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/integration-commands/elements/base/alpine.bst :language: yaml This is the same ``base/alpine.bst`` we've seen in previous chapters, except that we've added an :ref:`integration command `. This informs BuildStream that whenever the output of this element is expected to *run*, this command should be run first. In this case we are simply running ``ldconfig`` as a precautionary measure, to ensure that the runtime linker is ready to find any shared libraries we may have added to ``%{libdir}``. Looking at public data '''''''''''''''''''''' The :ref:`integration commands ` used here is the first time we've used any :ref:`builtin public data `. Public data is a free form portion of an element's configuration and is not necessarily understood by the element on which it is declared, public data is intended to be read by it's reverse dependency elements. This allows annotations on some elements to inform elements later in the dependency chain about details of it's artifact, or to suggest how it should be processed. ``elements/libhello.bst`` and ``elements/hello.bst`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These are basically manual elements very similar to the ones we've seen in the previous chapter: :ref:`tutorial_running_commands`. These produce a library and a hello program which uses the library, we will consider these irrelevant to the topic and leave examination of `their sources `_ as an exercise for the reader. Using the project ----------------- Build the hello.bst element ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build the project, run :ref:`bst build ` in the following way: .. raw:: html :file: ../sessions/integration-commands-build.html Observe in the build process above, the integration command declared on the ``base/alpine.bst`` element is run after staging the dependency artifacts into the build sandbox and before running any of the build commands, for both of the ``libhello.bst`` and ``hello.bst`` elements. BuildStream assumes that commands which are to be run in the build sandbox need to be run in an *integrated* sandbox. .. tip:: Integration commands can be taxing on your overall build process, because they need to run at the beginning of every build which :ref:`runtime depends ` on the element declaring them. For this reason, it is better to leave out more onerous tasks if they are not needed at software build time, and handle those specific tasks differently later in the pipeline, before deployment. Run the hello world program ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Unlike the previous chapters, this hello world program takes an argument, we can invoke the program using :ref:`bst shell `: .. raw:: html :file: ../sessions/integration-commands-shell.html Here we see again, the integration commands are also used when preparing the shell to launch a command. Summary ------- In this chapter we've observed how :ref:`integration commands ` work, and we now know about :ref:`public data `, which plugins can read from their dependencies in order to influence their build process. buildstream-1.6.9/doc/source/tutorial/running-commands.rst000066400000000000000000000174021437515270000237730ustar00rootroot00000000000000 .. _tutorial_running_commands: Running commands ================ In :ref:`the first chapter ` we only imported a file to create an artifact, this time lets run some commands inside the :ref:`isolated build sandbox `. .. note:: This example is distributed with BuildStream in the `doc/examples/running-commands `_ subdirectory. Overview -------- In this chapter, we'll be running commands inside the sandboxed execution environment and producing build output. We'll be compiling the following simple C file: ``files/src/hello.c`` ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/running-commands/files/src/hello.c :language: c And we're going to build it using ``make``, using the following Makefile: ``files/src/Makefile`` ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/running-commands/files/src/Makefile :language: Makefile We'll be using the most fundamental :ref:`build element `, the :mod:`manual ` build element. The :mod:`manual ` element is the backbone on which all the other build elements are built, so understanding how it works at this level is helpful. Project structure ----------------- In this project we have a ``project.conf``, a directory with some source code, and 3 element declarations. Let's first take a peek at what we need to build using :ref:`bst show `: .. raw:: html :file: ../sessions/running-commands-show-before.html This time we have loaded a pipeline with 3 elements, let's go over what they do in detail. .. _tutorial_running_commands_project_conf: ``project.conf`` ~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/running-commands/project.conf :language: yaml Our ``project.conf`` is very much like the last one, except that we have defined a :ref:`source alias ` for ``alpine``. .. tip:: Using :ref:`source aliases ` for groups of sources which are generally hosted together is encouraged. This allows one to globally change the access scheme or URL for a group of repositories which belong together. ``elements/base/alpine.bst`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/running-commands/elements/base/alpine.bst :language: yaml This :mod:`import ` element uses a :mod:`tar ` source to download our Alpine Linux tarball to create our base runtime. This tarball is a sysroot which provides the C runtime libraries and some programs - this is what will be providing the programs we're going to run in this example. ``elements/base.bst`` ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/running-commands/elements/base.bst :language: yaml This is just a symbolic :mod:`stack ` element which declares that anything which depends on it, will implicitly depend on ``base/alpine.bst``. It is typical to use stack elements in places where the implementing logical software stack could change, but you rather not have your higher level components carry knowledge about those changing components. Any element which :ref:`runtime depends ` on the ``base.bst`` will now be able to execute programs provided by the imported ``base/alpine.bst`` runtime. ``elements/hello.bst`` ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ../../examples/running-commands/elements/hello.bst :language: yaml Finally we have the element which executes commands. Looking at the :mod:`manual ` element's documentation, we can see that the element configuration exposes four command lists: * ``configure-commands`` Commands which are run in preparation of a build. This is where you would normally call any configure stage build tools to configure the build how you like and generate some files needed for the build. * ``build-commands`` Commands to run the build, usually a build system will invoke the compiler for you here. * ``install-commands`` Commands to install the build results. Commands to install the build results into the target system, these should install files somewhere under ``%{install-root}``. * ``strip-commands`` Commands to doctor the build results after the install. Typically this involves stripping binaries of debugging symbols or stripping timestamps from build results to ensure reproducibility. .. tip:: All other :ref:`build elements ` implement exactly the same command lists too, except that they provide default commands specific to invoke the build systems they support. The :mod:`manual ` element however is the most basic and does not provide any default commands, so we have instructed it to use ``make`` to build and install our program. Using the project ----------------- Build the hello.bst element ~~~~~~~~~~~~~~~~~~~~~~~~~~~ To build the project, run :ref:`bst build ` in the following way: .. raw:: html :file: ../sessions/running-commands-build.html Now we've built our hello world program, using ``make`` and the C compiler provided by the Alpine Linux image. In the :ref:`first chapter ` we observed that the inputs and output of an element are *directory trees*. In this example, the directory tree generated by ``base/alpine.bst`` is consumed by ``hello.bst`` due to the :ref:`implicit runtime dependency ` introduced by ``base.bst``. .. tip:: All of the :ref:`dependencies ` which are required to run for the sake of a build, are staged at the root of the build sandbox. These comprise the runtime environment in which the depending element will run commands. The result is that the ``make`` program and C compiler provided by ``base/alpine.bst`` were already in ``$PATH`` and ready to run when the commands were needed by ``hello.bst``. Now observe that all of the elements in the loaded pipeline are ``cached``, the element is *built*: .. raw:: html :file: ../sessions/running-commands-show-after.html Run the hello world program ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Now that we've built everything, we can indulge ourselves in running the hello world program using :ref:`bst shell `: .. raw:: html :file: ../sessions/running-commands-shell.html Here, :ref:`bst shell ` created a runtime environment for running the ``hello.bst`` element. This was done by staging all of the dependencies of ``hello.bst`` including the ``hello.bst`` output itself into a directory. Once a directory with all of the dependencies was staged and ready, we ran the ``hello`` command from within the build sandbox environment. .. tip:: When specifying a command for :ref:`bst shell ` to run, we always specify ``--`` first. This is a commonly understood shell syntax to indicate that the remaining arguments are to be treated literally. Specifying ``--`` is optional and disambiguates BuildStream's arguments and options from those of the program being run by :ref:`bst shell `. Summary ------- In this chapter we've explored how to use the :mod:`manual ` element, which forms the basis of all build elements. We've also observed how the directory tree from the output *artifact* of one element is later *staged* at the root of the sandbox, as input for use by any build elements which :ref:`depend ` on that element. .. tip:: The way that elements consume their dependency input can vary across the different *kinds* of elements. This chapter describes how it works for :mod:`build elements ` implementations, which are the most commonly used element type. buildstream-1.6.9/doc/source/using_commands.rst000066400000000000000000000037421437515270000216610ustar00rootroot00000000000000 .. _commands: Commands ======== This page contains documentation for each BuildStream command, along with their possible options and arguments. Each command can be invoked on the command line, where, in most cases, this will be from the project's main directory. ---- .. The bst options e.g. bst --version, or bst --verbose etc. .. _invoking_bst: .. click:: buildstream._frontend:cli :prog: bst .. Further description of the command goes here ---- .. the `bst init` command .. _invoking_init: .. click:: buildstream._frontend.cli:init :prog: bst init ---- .. the `bst build` command .. _invoking_build: .. click:: buildstream._frontend.cli:build :prog: bst build ---- .. _invoking_fetch: .. click:: buildstream._frontend.cli:fetch :prog: bst fetch ---- .. _invoking_track: .. click:: buildstream._frontend.cli:track :prog: bst track ---- .. _invoking_pull: .. click:: buildstream._frontend.cli:pull :prog: bst pull ---- .. _invoking_push: .. click:: buildstream._frontend.cli:push :prog: bst push ---- .. _invoking_show: .. click:: buildstream._frontend.cli:show :prog: bst show ---- .. _invoking_shell: .. click:: buildstream._frontend.cli:shell :prog: bst shell ---- .. _invoking_checkout: .. click:: buildstream._frontend.cli:checkout :prog: bst checkout ---- .. _invoking_source_bundle: .. click:: buildstream._frontend.cli:source_bundle :prog: bst source bundle ---- .. _invoking_workspace: .. click:: buildstream._frontend.cli:workspace :prog: bst workspace ---- .. _invoking_workspace_open: .. click:: buildstream._frontend.cli:workspace_open :prog: bst workspace open ---- .. _invoking_workspace_close: .. click:: buildstream._frontend.cli:workspace_close :prog: bst workspace close ---- .. _invoking_workspace_reset: .. click:: buildstream._frontend.cli:workspace_reset :prog: bst workspace reset ---- .. _invoking_workspace_list: .. click:: buildstream._frontend.cli:workspace_list :prog: bst workspace list buildstream-1.6.9/doc/source/using_config.rst000066400000000000000000000067651437515270000213350ustar00rootroot00000000000000 .. _user_config: User configuration ================== User configuration and preferences can be specified in a user provided configuration file, and usually also on the command line. Values specified in a user provided configuration file override the defaults, while command line options take precedence over any other specified configurations. Configuration file ------------------ Users can provide a configuration file to override parameters in the default configuration. Unless a configuration file is explicitly specified on the command line when invoking ``bst``, an attempt is made to load user specific configuration from ``$XDG_CONFIG_HOME/buildstream.conf``. On most Linux based systems, the location will be ``~/.config/buildstream.conf`` .. note:: If you have have multiple major versions of BuildStream installed, you can have separate configuration files in your ``${XDG_CONFIG_HOME}``. You can do this by naming them according to the major versions of BuildStream you have installed. BuildStream 1 will load it's configuration from ``$XDG_CONFIG_HOME/buildstream1.conf`` and BuildStream 2 will load it's configuration from ``$XDG_CONFIG_HOME/buildstream2.conf``, while any version will fallback to ``$XDG_CONFIG_HOME/buildstream.conf``. Project specific value ---------------------- The ``projects`` key can be used to specify project specific configurations, the supported configurations on a project wide basis are listed here. .. _config_artifacts: Artifact server ~~~~~~~~~~~~~~~ The project you build will often specify a :ref:`remote artifact cache ` already, but you may want to specify extra caches. There are two ways to do this. You can add one or more global caches: **Example** .. code:: yaml artifacts: url: https://artifacts.com/artifacts Caches listed there will be considered lower priority than those specified by the project configuration. You can also add project-specific caches: **Example** .. code:: yaml projects: project-name: artifacts: - url: https://artifacts.com/artifacts1 - url: ssh://user@artifacts.com/artifacts2 push: true Caches listed here will be considered higher priority than those specified by the project. If you give a list of URLs, earlier entries in the list will have higher priority than later ones. .. _user_config_strict_mode: Strict build plan ~~~~~~~~~~~~~~~~~ The strict build plan option decides whether you want elements to rebuild when their dependencies have changed. This is enabled by default, but recommended to turn off in developer scenarios where you might want to build a large system and test it quickly after modifying some low level component. **Example** .. code:: yaml projects: project-name: strict: False .. note:: It is always possible to override this at invocation time using the ``--strict`` and ``--no-strict`` command line options. .. _config_default_mirror: Default Mirror ~~~~~~~~~~~~~~ When using :ref:`mirrors `, a default mirror can be defined to be fetched first. The default mirror is defined by its name, e.g. .. code:: yaml projects: project-name: default-mirror: oz .. note:: It is possible to override this at invocation time using the ``--default-mirror`` command-line option. Default configuration --------------------- The default BuildStream configuration is specified here for reference: .. literalinclude:: ../../buildstream/data/userconfig.yaml :language: yaml buildstream-1.6.9/doc/source/using_examples.rst000066400000000000000000000004601437515270000216700ustar00rootroot00000000000000 Examples ======== This page contains documentation for real examples of BuildStream projects, described step by step. All run under CI, so you can trust they are maintained and work as expected. .. toctree:: :maxdepth: 1 examples/flatpak-autotools examples/tar-mirror examples/git-mirror buildstream-1.6.9/doc/source/using_tutorial.rst000066400000000000000000000004741437515270000217220ustar00rootroot00000000000000 Tutorial ======== This is a step by step walkthrough meant help the user quickly get familiar with the fundamentals of creating and using BuildStream projects. .. toctree:: :numbered: :maxdepth: 1 tutorial/first-project tutorial/running-commands tutorial/autotools tutorial/integration-commands buildstream-1.6.9/man/000077500000000000000000000000001437515270000146215ustar00rootroot00000000000000buildstream-1.6.9/man/bst-build.1000066400000000000000000000012711437515270000165710ustar00rootroot00000000000000.TH "BST BUILD" "1" "14-Jul-2019" "" "bst build Manual" .SH NAME bst\-build \- Build elements in a pipeline .SH SYNOPSIS .B bst build [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Build elements in a pipeline .SH OPTIONS .TP \fB\-\-all\fP Build elements that would not be needed for the current build plan .TP \fB\-\-track\fP PATH Specify elements to track during the build. Can be used repeatedly to specify multiple elements .TP \fB\-\-track\-all\fP Track all elements in the pipeline .TP \fB\-\-track\-except\fP PATH Except certain dependencies from tracking .TP \fB\-J,\fP \-\-track\-cross\-junctions Allow tracking to cross junction boundaries .TP \fB\-\-track\-save\fP Deprecated: This is ignored buildstream-1.6.9/man/bst-checkout.1000066400000000000000000000013021437515270000172720ustar00rootroot00000000000000.TH "BST CHECKOUT" "1" "14-Jul-2019" "" "bst checkout Manual" .SH NAME bst\-checkout \- Checkout a built artifact .SH SYNOPSIS .B bst checkout [OPTIONS] ELEMENT LOCATION .SH DESCRIPTION Checkout a built artifact to the specified location .SH OPTIONS .TP \fB\-f,\fP \-\-force Allow files to be overwritten .TP \fB\-d,\fP \-\-deps [run|none] The dependencies to checkout (default: run) .TP \fB\-\-integrate\fP / \-\-no\-integrate Whether to run integration commands .TP \fB\-\-hardlinks\fP Checkout hardlinks instead of copies (handle with care) .TP \fB\-\-tar\fP Create a tarball from the artifact contents instead of a file tree. If LOCATION is '-', the tarball will be dumped to the standard output. buildstream-1.6.9/man/bst-fetch.1000066400000000000000000000016631437515270000165700ustar00rootroot00000000000000.TH "BST FETCH" "1" "14-Jul-2019" "" "bst fetch Manual" .SH NAME bst\-fetch \- Fetch sources in a pipeline .SH SYNOPSIS .B bst fetch [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Fetch sources required to build the pipeline .PP By default this will only try to fetch sources which are required for the build plan of the specified target element, omitting sources for any elements which are already built and available in the artifact cache. .PP Specify `--deps` to control which sources to fetch: .PP  none: No dependencies, just the element itself plan: Only dependencies required for the build plan all: All dependencies .SH OPTIONS .TP \fB\-\-except\fP PATH Except certain dependencies from fetching .TP \fB\-d,\fP \-\-deps [none|plan|all] The dependencies to fetch (default: plan) .TP \fB\-\-track\fP Track new source references before fetching .TP \fB\-J,\fP \-\-track\-cross\-junctions Allow tracking to cross junction boundaries buildstream-1.6.9/man/bst-help.1000066400000000000000000000003221437515270000164160ustar00rootroot00000000000000.TH "BST HELP" "1" "14-Jul-2019" "" "bst help Manual" .SH NAME bst\-help \- Print usage information .SH SYNOPSIS .B bst help [OPTIONS] COMMAND .SH DESCRIPTION Print usage information about a given command buildstream-1.6.9/man/bst-init.1000066400000000000000000000012211437515270000164300ustar00rootroot00000000000000.TH "BST INIT" "1" "14-Jul-2019" "" "bst init Manual" .SH NAME bst\-init \- Initialize a new BuildStream project .SH SYNOPSIS .B bst init [OPTIONS] .SH DESCRIPTION Initialize a new BuildStream project .PP Creates a new BuildStream project.conf in the project directory. .PP Unless `--project-name` is specified, this will be an interactive session. .SH OPTIONS .TP \fB\-\-project\-name\fP TEXT The project name to use .TP \fB\-\-format\-version\fP INTEGER The required format version (default: 12) .TP \fB\-\-element\-path\fP PATH The subdirectory to store elements in (default: elements) .TP \fB\-f,\fP \-\-force Allow overwriting an existing project.conf buildstream-1.6.9/man/bst-pull.1000066400000000000000000000013731437515270000164510ustar00rootroot00000000000000.TH "BST PULL" "1" "14-Jul-2019" "" "bst pull Manual" .SH NAME bst\-pull \- Pull a built artifact .SH SYNOPSIS .B bst pull [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Pull a built artifact from the configured remote artifact cache. .PP By default the artifact will be pulled one of the configured caches if possible, following the usual priority order. If the `--remote` flag is given, only the specified cache will be queried. .PP Specify `--deps` to control which artifacts to pull: .PP  none: No dependencies, just the element itself all: All dependencies .SH OPTIONS .TP \fB\-d,\fP \-\-deps [none|all] The dependency artifacts to pull (default: none) .TP \fB\-r,\fP \-\-remote TEXT The URL of the remote cache (defaults to the first configured cache) buildstream-1.6.9/man/bst-push.1000066400000000000000000000012671437515270000164560ustar00rootroot00000000000000.TH "BST PUSH" "1" "14-Jul-2019" "" "bst push Manual" .SH NAME bst\-push \- Push a built artifact .SH SYNOPSIS .B bst push [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Push a built artifact to a remote artifact cache. .PP The default destination is the highest priority configured cache. You can override this by passing a different cache URL with the `--remote` flag. .PP Specify `--deps` to control which artifacts to push: .PP  none: No dependencies, just the element itself all: All dependencies .SH OPTIONS .TP \fB\-d,\fP \-\-deps [none|all] The dependencies to push (default: none) .TP \fB\-r,\fP \-\-remote TEXT The URL of the remote cache (defaults to the first configured cache) buildstream-1.6.9/man/bst-shell.1000066400000000000000000000017511437515270000166040ustar00rootroot00000000000000.TH "BST SHELL" "1" "14-Jul-2019" "" "bst shell Manual" .SH NAME bst\-shell \- Shell into an element's sandbox environment .SH SYNOPSIS .B bst shell [OPTIONS] ELEMENT [COMMAND]... .SH DESCRIPTION Run a command in the target element's sandbox environment .PP This will stage a temporary sysroot for running the target element, assuming it has already been built and all required artifacts are in the local cache. .PP Use the --build option to create a temporary sysroot for building the element instead. .PP Use the --sysroot option with an existing failed build directory or with a checkout of the given target, in order to use a specific sysroot. .PP If no COMMAND is specified, the default is to attempt to run an interactive shell. .SH OPTIONS .TP \fB\-b,\fP \-\-build Stage dependencies and sources to build .TP \fB\-s,\fP \-\-sysroot DIRECTORY An existing sysroot .TP \fB\-\-mount\fP HOSTPATH PATH Mount a file or directory into the sandbox .TP \fB\-\-isolate\fP Create an isolated build sandbox buildstream-1.6.9/man/bst-show.1000066400000000000000000000037601437515270000164570ustar00rootroot00000000000000.TH "BST SHOW" "1" "14-Jul-2019" "" "bst show Manual" .SH NAME bst\-show \- Show elements in the pipeline .SH SYNOPSIS .B bst show [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Show elements in the pipeline .PP By default this will show all of the dependencies of the specified target element. .PP Specify `--deps` to control which elements to show: .PP  none: No dependencies, just the element itself plan: Dependencies required for a build plan run: Runtime dependencies, including the element itself build: Build time dependencies, excluding the element itself all: All dependencies .PP  FORMAT ~~~~~~ The --format option controls what should be printed for each element, the following symbols can be used in the format string: .PP  %{name} The element name %{key} The abbreviated cache key (if all sources are consistent) %{full-key} The full cache key (if all sources are consistent) %{state} cached, buildable, waiting or inconsistent %{config} The element configuration %{vars} Variable configuration %{env} Environment settings %{public} Public domain data %{workspaced} If the element is workspaced %{workspace-dirs} A list of workspace directories .PP The value of the %{symbol} without the leading '%' character is understood as a pythonic formatting string, so python formatting features apply, examle: .PP  bst show target.bst --format \ 'Name: %{name: ^20} Key: %{key: ^8} State: %{state}' .PP If you want to use a newline in a format string in bash, use the '$' modifier: .PP  bst show target.bst --format \ $'---------- %{name} ----------\n%{vars}' .SH OPTIONS .TP \fB\-\-except\fP PATH Except certain dependencies .TP \fB\-d,\fP \-\-deps [none|plan|run|build|all] The dependencies to show (default: all) .TP \fB\-\-order\fP [stage|alpha] Staging or alphabetic ordering of dependencies .TP \fB\-f,\fP \-\-format FORMAT Format string for each element buildstream-1.6.9/man/bst-source-bundle.1000066400000000000000000000011571437515270000202440ustar00rootroot00000000000000.TH "BST SOURCE-BUNDLE" "1" "14-Jul-2019" "" "bst source-bundle Manual" .SH NAME bst\-source-bundle \- Produce a build bundle to be manually executed .SH SYNOPSIS .B bst source-bundle [OPTIONS] ELEMENT .SH DESCRIPTION Produce a source bundle to be manually executed .SH OPTIONS .TP \fB\-\-except\fP PATH Elements to except from the tarball .TP \fB\-\-compression\fP [none|gz|bz2|xz] Compress the tar file using the given algorithm. .TP \fB\-\-track\fP Track new source references before bundling .TP \fB\-f,\fP \-\-force Overwrite an existing tarball .TP \fB\-\-directory\fP TEXT The directory to write the tarball to buildstream-1.6.9/man/bst-track.1000066400000000000000000000015021437515270000165730ustar00rootroot00000000000000.TH "BST TRACK" "1" "14-Jul-2019" "" "bst track Manual" .SH NAME bst\-track \- Track new source references .SH SYNOPSIS .B bst track [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Consults the specified tracking branches for new versions available to build and updates the project with any newly available references. .PP By default this will track just the specified element, but you can also update a whole tree of dependencies in one go. .PP Specify `--deps` to control which sources to track: .PP  none: No dependencies, just the specified elements all: All dependencies of all specified elements .SH OPTIONS .TP \fB\-\-except\fP PATH Except certain dependencies from tracking .TP \fB\-d,\fP \-\-deps [none|all] The dependencies to track (default: none) .TP \fB\-J,\fP \-\-cross\-junctions Allow crossing junction boundaries buildstream-1.6.9/man/bst-workspace-close.1000066400000000000000000000005471437515270000206000ustar00rootroot00000000000000.TH "BST WORKSPACE CLOSE" "1" "14-Jul-2019" "" "bst workspace close Manual" .SH NAME bst\-workspace\-close \- Close workspaces .SH SYNOPSIS .B bst workspace close [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Close a workspace .SH OPTIONS .TP \fB\-\-remove\-dir\fP Remove the path that contains the closed workspace .TP \fB\-a,\fP \-\-all Close all open workspaces buildstream-1.6.9/man/bst-workspace-list.1000066400000000000000000000003221437515270000204350ustar00rootroot00000000000000.TH "BST WORKSPACE LIST" "1" "14-Jul-2019" "" "bst workspace list Manual" .SH NAME bst\-workspace\-list \- List open workspaces .SH SYNOPSIS .B bst workspace list [OPTIONS] .SH DESCRIPTION List open workspaces buildstream-1.6.9/man/bst-workspace-open.1000066400000000000000000000010071437515270000204240ustar00rootroot00000000000000.TH "BST WORKSPACE OPEN" "1" "14-Jul-2019" "" "bst workspace open Manual" .SH NAME bst\-workspace\-open \- Open a new workspace .SH SYNOPSIS .B bst workspace open [OPTIONS] ELEMENT DIRECTORY .SH DESCRIPTION Open a workspace for manual source modification .SH OPTIONS .TP \fB\-\-no\-checkout\fP Do not checkout the source, only link to the given directory .TP \fB\-f,\fP \-\-force Overwrite files existing in checkout directory .TP \fB\-\-track\fP Track and fetch new source references before checking out the workspace buildstream-1.6.9/man/bst-workspace-reset.1000066400000000000000000000007261437515270000206140ustar00rootroot00000000000000.TH "BST WORKSPACE RESET" "1" "14-Jul-2019" "" "bst workspace reset Manual" .SH NAME bst\-workspace\-reset \- Reset a workspace to its original state .SH SYNOPSIS .B bst workspace reset [OPTIONS] [ELEMENTS]... .SH DESCRIPTION Reset a workspace to its original state .SH OPTIONS .TP \fB\-\-soft\fP Reset workspace state without affecting its contents .TP \fB\-\-track\fP Track and fetch the latest source before resetting .TP \fB\-a,\fP \-\-all Reset all open workspaces buildstream-1.6.9/man/bst-workspace.1000066400000000000000000000013571437515270000174750ustar00rootroot00000000000000.TH "BST WORKSPACE" "1" "14-Jul-2019" "" "bst workspace Manual" .SH NAME bst\-workspace \- Manipulate developer workspaces .SH SYNOPSIS .B bst workspace [OPTIONS] COMMAND [ARGS]... .SH DESCRIPTION Manipulate developer workspaces .SH COMMANDS .PP \fBopen\fP Open a new workspace See \fBbst workspace-open(1)\fP for full documentation on the \fBopen\fP command. .PP \fBclose\fP Close workspaces See \fBbst workspace-close(1)\fP for full documentation on the \fBclose\fP command. .PP \fBreset\fP Reset a workspace to its original state See \fBbst workspace-reset(1)\fP for full documentation on the \fBreset\fP command. .PP \fBlist\fP List open workspaces See \fBbst workspace-list(1)\fP for full documentation on the \fBlist\fP command. buildstream-1.6.9/man/bst.1000066400000000000000000000063721437515270000155030ustar00rootroot00000000000000.TH "BST" "1" "14-Jul-2019" "" "bst Manual" .SH NAME bst \- Build and manipulate BuildStream projects... .SH SYNOPSIS .B bst [OPTIONS] COMMAND [ARGS]... .SH DESCRIPTION Build and manipulate BuildStream projects .PP Most of the main options override options in the user preferences configuration file. .SH OPTIONS .TP \fB\-\-version\fP .PP .TP \fB\-c,\fP \-\-config FILE Configuration file to use .TP \fB\-C,\fP \-\-directory DIRECTORY Project directory (default: current directory) .TP \fB\-\-on\-error\fP [continue|quit|terminate] What to do when an error is encountered .TP \fB\-\-fetchers\fP INTEGER Maximum simultaneous download tasks .TP \fB\-\-builders\fP INTEGER Maximum simultaneous build tasks .TP \fB\-\-pushers\fP INTEGER Maximum simultaneous upload tasks .TP \fB\-\-max\-jobs\fP INTEGER Number of parallel jobs allowed for a given build task .TP \fB\-\-network\-retries\fP INTEGER Maximum retries for network tasks .TP \fB\-\-no\-interactive\fP Force non interactive mode, otherwise this is automatically decided .TP \fB\-\-verbose\fP / \-\-no\-verbose Be extra verbose .TP \fB\-\-debug\fP / \-\-no\-debug Print debugging output .TP \fB\-\-error\-lines\fP INTEGER Maximum number of lines to show from a task log .TP \fB\-\-message\-lines\fP INTEGER Maximum number of lines to show in a detailed message .TP \fB\-\-log\-file\fP FILENAME A file to store the main log (allows storing the main log while in interactive mode) .TP \fB\-\-colors\fP / \-\-no\-colors Force enable/disable ANSI color codes in output .TP \fB\-\-strict\fP / \-\-no\-strict Elements must be rebuilt when their dependencies have changed .TP \fB\-o,\fP \-\-option OPTION VALUE Specify a project option .TP \fB\-\-default\-mirror\fP TEXT The mirror to fetch from first, before attempting other mirrors .SH COMMANDS .PP \fBhelp\fP Print usage information See \fBbst-help(1)\fP for full documentation on the \fBhelp\fP command. .PP \fBinit\fP Initialize a new BuildStream project See \fBbst-init(1)\fP for full documentation on the \fBinit\fP command. .PP \fBbuild\fP Build elements in a pipeline See \fBbst-build(1)\fP for full documentation on the \fBbuild\fP command. .PP \fBfetch\fP Fetch sources in a pipeline See \fBbst-fetch(1)\fP for full documentation on the \fBfetch\fP command. .PP \fBtrack\fP Track new source references See \fBbst-track(1)\fP for full documentation on the \fBtrack\fP command. .PP \fBpull\fP Pull a built artifact See \fBbst-pull(1)\fP for full documentation on the \fBpull\fP command. .PP \fBpush\fP Push a built artifact See \fBbst-push(1)\fP for full documentation on the \fBpush\fP command. .PP \fBshow\fP Show elements in the pipeline See \fBbst-show(1)\fP for full documentation on the \fBshow\fP command. .PP \fBshell\fP Shell into an element's sandbox environment See \fBbst-shell(1)\fP for full documentation on the \fBshell\fP command. .PP \fBcheckout\fP Checkout a built artifact See \fBbst-checkout(1)\fP for full documentation on the \fBcheckout\fP command. .PP \fBworkspace\fP Manipulate developer workspaces See \fBbst-workspace(1)\fP for full documentation on the \fBworkspace\fP command. .PP \fBsource-bundle\fP Produce a build bundle to be manually executed See \fBbst-source-bundle(1)\fP for full documentation on the \fBsource-bundle\fP command. buildstream-1.6.9/requirements/000077500000000000000000000000001437515270000165715ustar00rootroot00000000000000buildstream-1.6.9/requirements/Makefile000066400000000000000000000007731437515270000202400ustar00rootroot00000000000000# Makefile for updating BuildStream's requirements files. # REQUIREMENTS_IN := $(wildcard *.in) REQUIREMENTS_TXT := $(REQUIREMENTS_IN:.in=.txt) PYTHON := python3 VENV := $(PYTHON) -m venv VENV_PIP = $(VENVDIR)/bin/pip .PHONY: all FORCE all: $(REQUIREMENTS_TXT) %.txt: %.in FORCE $(eval VENVDIR := $(shell mktemp -d $(CURDIR)/.bst-venv.XXXXXX)) $(VENV) $(VENVDIR) $(VENV_PIP) install wheel $(VENV_PIP) install -r $< $(VENV_PIP) freeze -r $< | grep -v pkg-resources > $@ rm -rf $(VENVDIR) FORCE: buildstream-1.6.9/requirements/cov-requirements.in000066400000000000000000000001531437515270000224300ustar00rootroot00000000000000coverage == 4.4.0 ; python_version < '3.8' coverage == 4.5.4 ; python_version >= '3.8' pytest-cov >= 2.5.0 buildstream-1.6.9/requirements/cov-requirements.txt000066400000000000000000000003171437515270000226430ustar00rootroot00000000000000coverage==4.5.4 pytest-cov==2.10.1 ## The following requirements were added by pip freeze: attrs==22.1.0 iniconfig==1.1.1 packaging==21.3 pluggy==1.0.0 py==1.11.0 pyparsing==3.0.9 pytest==7.1.2 tomli==2.0.1 buildstream-1.6.9/requirements/dev-requirements.in000066400000000000000000000001461437515270000224210ustar00rootroot00000000000000pep8 pylint >= 2.10.0 pytest >= 3.7 pytest-datafiles pytest-env pytest-xdist pytest-timeout pyftpdlib buildstream-1.6.9/requirements/dev-requirements.txt000066400000000000000000000007561437515270000226410ustar00rootroot00000000000000pep8==1.7.1 pylint==2.14.5 pytest==7.1.2 pytest-datafiles==2.0.1 pytest-env==0.6.2 pytest-xdist==2.5.0 pytest-timeout==2.1.0 pyftpdlib==1.5.6 ## The following requirements were added by pip freeze: astroid==2.11.7 attrs==22.1.0 dill==0.3.5.1 execnet==1.9.0 iniconfig==1.1.1 isort==5.10.1 lazy-object-proxy==1.7.1 mccabe==0.7.0 packaging==21.3 platformdirs==2.5.2 pluggy==1.0.0 py==1.11.0 pyparsing==3.0.9 pytest-forked==1.4.0 tomli==2.0.1 tomlkit==0.11.1 typing-extensions==4.3.0 wrapt==1.14.1 buildstream-1.6.9/requirements/plugin-requirements.in000066400000000000000000000000051437515270000231330ustar00rootroot00000000000000arpy buildstream-1.6.9/requirements/plugin-requirements.txt000066400000000000000000000001041437515270000233440ustar00rootroot00000000000000arpy==2.3.0 ## The following requirements were added by pip freeze: buildstream-1.6.9/requirements/requirements.in000066400000000000000000000001441437515270000216430ustar00rootroot00000000000000Click grpcio >= 1.30 jinja2 >= 2.10 pluginbase protobuf >= 3.19 psutil ruamel.yaml setuptools ujson buildstream-1.6.9/requirements/requirements.txt000066400000000000000000000003761437515270000220630ustar00rootroot00000000000000click==8.1.3 grpcio==1.51.1 Jinja2==3.1.2 pluginbase==1.0.1 protobuf==4.21.4 psutil==5.9.1 ruamel.yaml==0.17.21 setuptools==44.1.1 ujson==5.4.0 ## The following requirements were added by pip freeze: MarkupSafe==2.1.1 ruamel.yaml.clib==0.2.7 six==1.16.0 buildstream-1.6.9/setup.cfg000066400000000000000000000011521437515270000156660ustar00rootroot00000000000000[versioneer] VCS = git style = pep440 versionfile_source = buildstream/_version.py versionfile_build = buildstream/_version.py tag_prefix = tag_regex = *.*.* parentdir_prefix = BuildStream- [aliases] test=pytest [tool:pytest] addopts = --verbose --basetemp ./tmp --durations=20 --timeout=1800 norecursedirs = tests/integration/project tests/plugins/bst2 integration-cache tmp __pycache__ .eggs python_files = tests/*/*.py env = D:BST_TEST_SUITE=True filterwarnings = ignore::DeprecationWarning markers = datafiles: data files for tests integration: run test only if --integration option is specified buildstream-1.6.9/setup.py000077500000000000000000000261511437515270000155700ustar00rootroot00000000000000#!/usr/bin/env python3 # # Copyright (C) 2016 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tristan Van Berkom import os import re import shutil import subprocess import sys import versioneer ################################################################## # Python requirements ################################################################## REQUIRED_PYTHON_MAJOR = 3 REQUIRED_PYTHON_MINOR = 5 if sys.version_info[0] != REQUIRED_PYTHON_MAJOR or sys.version_info[1] < REQUIRED_PYTHON_MINOR: print("BuildStream requires Python >= 3.5") sys.exit(1) try: from setuptools import setup, find_packages, Command # pylint: disable=import-outside-toplevel from setuptools.command.easy_install import ScriptWriter # pylint: disable=import-outside-toplevel except ImportError: print("BuildStream requires setuptools in order to build. Install it using" " your package manager (usually python3-setuptools) or via pip (pip3" " install setuptools).") sys.exit(1) ################################################################## # Bubblewrap requirements ################################################################## REQUIRED_BWRAP_MAJOR = 0 REQUIRED_BWRAP_MINOR = 1 REQUIRED_BWRAP_PATCH = 2 def exit_bwrap(reason): print(reason + "\nBuildStream requires Bubblewrap (bwrap) for" " sandboxing the build environment. Install it using your package manager" " (usually bwrap or bubblewrap)") sys.exit(1) def bwrap_too_old(major, minor, patch): if major < REQUIRED_BWRAP_MAJOR: return True elif major == REQUIRED_BWRAP_MAJOR: if minor < REQUIRED_BWRAP_MINOR: return True elif minor == REQUIRED_BWRAP_MINOR: return patch < REQUIRED_BWRAP_PATCH else: return False else: return False def assert_bwrap(): platform = os.environ.get('BST_FORCE_BACKEND', '') or sys.platform if platform.startswith('linux'): bwrap_path = shutil.which('bwrap') if not bwrap_path: exit_bwrap("Bubblewrap not found") version_bytes = subprocess.check_output([bwrap_path, "--version"]).split()[1] version_string = str(version_bytes, "utf-8") major, minor, patch = map(int, version_string.split(".")) if bwrap_too_old(major, minor, patch): exit_bwrap("Bubblewrap too old") ########################################### # List the pre-built man pages to install # ########################################### # # Man pages are automatically generated however it was too difficult # to integrate with setuptools as a step of the build (FIXME !). # # To update the man pages in tree before a release, you need to # ensure you have the 'click_man' package installed, and run: # # python3 setup.py --command-packages=click_man.commands man_pages # # Then commit the result. # def list_man_pages(): bst_dir = os.path.dirname(os.path.abspath(__file__)) man_dir = os.path.join(bst_dir, 'man') man_pages = os.listdir(man_dir) return [os.path.join('man', page) for page in man_pages] ##################################################### # Conditional Checks # ##################################################### # # Because setuptools... there is no way to pass an option to # the setup.py explicitly at install time. # # So screw it, lets just use an env var. bst_install_entry_points = { 'console_scripts': [ 'bst-artifact-server = buildstream._artifactcache.casserver:server_main' ], } if not os.environ.get('BST_ARTIFACTS_ONLY', ''): assert_bwrap() bst_install_entry_points['console_scripts'] += [ 'bst = buildstream._frontend:cli' ] ##################################################### # Monkey-patching setuptools for performance # ##################################################### # # The template of easy_install.ScriptWriter is inefficient in our case as it # imports pkg_resources. Patching the template only doesn't work because of the # old string formatting used (%). This forces us to overwrite the class function # as well. # # The patch was inspired from https://github.com/ninjaaron/fast-entry_points # which we believe was also inspired from the code from `setuptools` project. TEMPLATE = '''\ # -*- coding: utf-8 -*- import sys from {0} import {1} if __name__ == '__main__': sys.exit({2}())''' # Modify the get_args() function of the ScriptWriter class # Note: the pylint no-member warning has been disabled as the functions: get_header(), # ensure_safe_name() and _get_script_args() are all members of this class. # pylint: disable=no-member @classmethod def get_args(cls, dist, header=None): if header is None: header = cls.get_header() for name, ep in dist.get_entry_map('console_scripts').items(): cls._ensure_safe_name(name) script_text = TEMPLATE.format(ep.module_name, ep.attrs[0], '.'.join(ep.attrs)) args = cls._get_script_args('console', name, header, script_text) for res in args: yield res ScriptWriter.get_args = get_args ##################################################### # gRPC command for code generation # ##################################################### class BuildGRPC(Command): """Command to generate project *_pb2.py modules from proto files.""" description = 'build gRPC protobuf modules' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): try: import grpc_tools.command # pylint: disable=import-outside-toplevel except ImportError: print("BuildStream requires grpc_tools in order to build gRPC modules.\n" "Install it via pip (pip3 install grpcio-tools).") sys.exit(1) protos_root = 'buildstream/_protos' grpc_tools.command.build_package_protos(protos_root) # Postprocess imports in generated code for root, _, files in os.walk(protos_root): for filename in files: if filename.endswith('.py'): path = os.path.join(root, filename) with open(path, 'r', encoding='utf-8') as f: code = f.read() # All protos are in buildstream._protos code = re.sub(r'^from ', r'from buildstream._protos.', code, flags=re.MULTILINE) # Except for the core google.protobuf protos code = re.sub(r'^from buildstream._protos.google.protobuf', r'from google.protobuf', code, flags=re.MULTILINE) with open(path, 'w', encoding='utf-8') as f: f.write(code) def get_cmdclass(): cmdclass = { 'build_grpc': BuildGRPC, } cmdclass.update(versioneer.get_cmdclass()) return cmdclass ##################################################### # Gather requirements # ##################################################### with open('requirements/dev-requirements.in', encoding='utf-8') as dev_reqs: dev_requires = dev_reqs.read().splitlines() with open('requirements/requirements.in', encoding='utf-8') as install_reqs: install_requires = install_reqs.read().splitlines() ##################################################### # Prepare package description from README # ##################################################### with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'README.rst'), encoding='utf-8') as readme: long_description = readme.read() ##################################################### # Main setup() Invocation # ##################################################### setup(name='BuildStream', # Use versioneer version=versioneer.get_version(), cmdclass=get_cmdclass(), author='BuildStream Developers', author_email='dev@buildstream.apache.org', classifiers=[ 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)', 'Operating System :: POSIX', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Topic :: Software Development :: Build Tools' ], description='A framework for modelling build pipelines in YAML', license='LGPL', long_description=long_description, long_description_content_type='text/x-rst; charset=UTF-8', url='https://gitlab.com/BuildStream/buildstream', project_urls={ 'Documentation': 'https://buildstream.gitlab.io/buildstream/', 'Tracker': 'https://gitlab.com/BuildStream/buildstream/issues', 'Mailing List': 'https://lists.apache.org/list.html?dev@buildstream.apache.org' }, python_requires='~={}.{}'.format(REQUIRED_PYTHON_MAJOR, REQUIRED_PYTHON_MINOR), packages=find_packages(exclude=('tests', 'tests.*')), package_data={'buildstream': ['plugins/*/*.py', 'plugins/*/*.yaml', 'data/*.yaml', 'data/*.sh.in']}, data_files=[ # This is a weak attempt to integrate with the user nicely, # installing things outside of the python package itself with pip is # not recommended, but there seems to be no standard structure for # addressing this; so just installing this here. # # These do not get installed in developer mode (`pip install --user -e .`) # # The completions are ignored by bash unless it happens to be installed # in the right directory; this is more like a weak statement that we # attempt to install bash completion scriptlet. # ('share/man/man1', list_man_pages()), ('share/bash-completion/completions', [ os.path.join('buildstream', 'data', 'bst') ]) ], install_requires=install_requires, entry_points=bst_install_entry_points, setup_requires=['pytest-runner'], tests_require=dev_requires, zip_safe=False) buildstream-1.6.9/tests/000077500000000000000000000000001437515270000152105ustar00rootroot00000000000000buildstream-1.6.9/tests/__init__.py000066400000000000000000000000001437515270000173070ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/000077500000000000000000000000001437515270000177715ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/cache_size.py000066400000000000000000000051421437515270000224420ustar00rootroot00000000000000import os import pytest from unittest import mock from buildstream import _yaml from buildstream._artifactcache import CACHE_SIZE_FILE from buildstream._exceptions import ErrorDomain from tests.testutils import cli, create_element_size # XXX: Currently lacking: # * A way to check whether it's faster to read cache size on # successive invocations. # * A way to check whether the cache size file has been read. def create_project(project_dir): project_file = os.path.join(project_dir, "project.conf") project_conf = { "name": "test" } _yaml.dump(project_conf, project_file) element_name = "test.bst" create_element_size(element_name, project_dir, ".", [], 1024) def test_cache_size_roundtrip(cli, tmpdir): # Builds (to put files in the cache), then invokes buildstream again # to check nothing breaks # Create project project_dir = str(tmpdir) create_project(project_dir) # Build, to populate the cache res = cli.run(project=project_dir, args=["build", "test.bst"]) res.assert_success() # Show, to check that nothing breaks while reading cache size res = cli.run(project=project_dir, args=["show", "test.bst"]) res.assert_success() def test_cache_size_write(cli, tmpdir): # Builds (to put files in the cache), then checks a number is # written to the cache size file. project_dir = str(tmpdir) create_project(project_dir) # Artifact cache must be in a known place artifactdir = os.path.join(project_dir, "artifacts") cli.configure({"artifactdir": artifactdir}) # Build, to populate the cache res = cli.run(project=project_dir, args=["build", "test.bst"]) res.assert_success() # Inspect the artifact cache sizefile = os.path.join(artifactdir, CACHE_SIZE_FILE) assert os.path.isfile(sizefile) with open(sizefile, "r") as f: size_data = f.read() size = int(size_data) def test_quota_over_1024T(cli, tmpdir): KiB = 1024 MiB = (KiB * 1024) GiB = (MiB * 1024) TiB = (GiB * 1024) cli.configure({ 'cache': { 'quota': 2048 * TiB } }) project = tmpdir.join("main") os.makedirs(str(project)) _yaml.dump({'name': 'main'}, str(project.join("project.conf"))) volume_space_patch = mock.patch( "buildstream._artifactcache.ArtifactCache._get_cache_volume_size", autospec=True, return_value=(1025 * TiB, 1025 * TiB) ) with volume_space_patch: result = cli.run(project, args=["build", "file.bst"]) result.assert_main_error(ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota') buildstream-1.6.9/tests/artifactcache/config.py000066400000000000000000000126001437515270000216070ustar00rootroot00000000000000import pytest import itertools import os from buildstream._artifactcache import ArtifactCacheSpec from buildstream._artifactcache.artifactcache import _configured_remote_artifact_cache_specs from buildstream._context import Context from buildstream._project import Project from buildstream.utils import _deduplicate from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli DATA_DIR = os.path.dirname(os.path.realpath(__file__)) cache1 = ArtifactCacheSpec(url='https://example.com/cache1', push=True) cache2 = ArtifactCacheSpec(url='https://example.com/cache2', push=False) cache3 = ArtifactCacheSpec(url='https://example.com/cache3', push=False) cache4 = ArtifactCacheSpec(url='https://example.com/cache4', push=False) cache5 = ArtifactCacheSpec(url='https://example.com/cache5', push=False) cache6 = ArtifactCacheSpec(url='https://example.com/cache6', push=True) # Generate cache configuration fragments for the user config and project config files. # def configure_remote_caches(override_caches, project_caches=[], user_caches=[]): user_config = {} if len(user_caches) == 1: user_config['artifacts'] = { 'url': user_caches[0].url, 'push': user_caches[0].push, } elif len(user_caches) > 1: user_config['artifacts'] = [ {'url': cache.url, 'push': cache.push} for cache in user_caches ] if len(override_caches) == 1: user_config['projects'] = { 'test': { 'artifacts': { 'url': override_caches[0].url, 'push': override_caches[0].push, } } } elif len(override_caches) > 1: user_config['projects'] = { 'test': { 'artifacts': [ {'url': cache.url, 'push': cache.push} for cache in override_caches ] } } project_config = {} if len(project_caches) > 0: if len(project_caches) == 1: project_config.update({ 'artifacts': { 'url': project_caches[0].url, 'push': project_caches[0].push, } }) elif len(project_caches) > 1: project_config.update({ 'artifacts': [ {'url': cache.url, 'push': cache.push} for cache in project_caches ] }) return user_config, project_config # Test that parsing the remote artifact cache locations produces the # expected results. @pytest.mark.parametrize( 'override_caches, project_caches, user_caches', [ # The leftmost cache is the highest priority one in all cases here. pytest.param([], [], [], id='empty-config'), pytest.param([], [], [cache1, cache2], id='user-config'), pytest.param([], [cache1, cache2], [cache3], id='project-config'), pytest.param([cache1], [cache2], [cache3], id='project-override-in-user-config'), pytest.param([cache1, cache2], [cache3, cache4], [cache5, cache6], id='list-order'), pytest.param([cache1, cache2, cache1], [cache2], [cache2, cache1], id='duplicates'), ]) def test_artifact_cache_precedence(tmpdir, override_caches, project_caches, user_caches): # Produce a fake user and project config with the cache configuration. user_config, project_config = configure_remote_caches(override_caches, project_caches, user_caches) project_config['name'] = 'test' user_config_file = str(tmpdir.join('buildstream.conf')) _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file) project_dir = tmpdir.mkdir('project') project_config_file = str(project_dir.join('project.conf')) _yaml.dump(_yaml.node_sanitize(project_config), filename=project_config_file) context = Context() context.load(config=user_config_file) project = Project(str(project_dir), context) project.ensure_fully_loaded() # Use the helper from the artifactcache module to parse our configuration. parsed_cache_specs = _configured_remote_artifact_cache_specs(context, project) # Verify that it was correctly read. expected_cache_specs = list(_deduplicate(itertools.chain(override_caches, project_caches, user_caches))) assert parsed_cache_specs == expected_cache_specs # Assert that if either the client key or client cert is specified # without specifying it's counterpart, we get a comprehensive LoadError # instead of an unhandled exception. @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize('config_key, config_value', [ ('client-cert', 'client.crt'), ('client-key', 'client.key') ]) def test_missing_certs(cli, datafiles, config_key, config_value): project = os.path.join(datafiles.dirname, datafiles.basename, 'missing-certs') project_conf = { 'name': 'test', 'artifacts': { 'url': 'https://cache.example.com:12345', 'push': 'true', config_key: config_value } } project_conf_file = os.path.join(project, 'project.conf') _yaml.dump(project_conf, project_conf_file) # Use `pull` here to ensure we try to initialize the remotes, triggering the error # # This does not happen for a simple `bst show`. result = cli.run(project=project, args=['pull', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) buildstream-1.6.9/tests/artifactcache/expiry.py000066400000000000000000000344371437515270000216760ustar00rootroot00000000000000import os import pytest from unittest import mock from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils import cli, create_element_size, update_element_size DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "expiry" ) # Ensure that the cache successfully removes an old artifact if we do # not have enough space left. @pytest.mark.datafiles(DATA_DIR) def test_artifact_expires(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' cache_location = os.path.join(project, 'cache', 'artifacts', 'ostree') checkout = os.path.join(project, 'checkout') cli.configure({ 'cache': { 'quota': 10000000, } }) # Create an element that uses almost the entire cache (an empty # ostree cache starts at about ~10KiB, so we need a bit of a # buffer) create_element_size('target.bst', project, element_path, [], 6000000) res = cli.run(project=project, args=['build', 'target.bst']) res.assert_success() assert cli.get_element_state(project, 'target.bst') == 'cached' # Our cache should now be almost full. Let's create another # artifact and see if we can cause buildstream to delete the old # one. create_element_size('target2.bst', project, element_path, [], 6000000) res = cli.run(project=project, args=['build', 'target2.bst']) res.assert_success() # Check that the correct element remains in the cache assert cli.get_element_state(project, 'target.bst') != 'cached' assert cli.get_element_state(project, 'target2.bst') == 'cached' # Ensure that we don't end up deleting the whole cache (or worse) if # we try to store an artifact that is too large to fit in the quota. @pytest.mark.parametrize('size', [ # Test an artifact that is obviously too large (500000), # Test an artifact that might be too large due to slight overhead # of storing stuff in ostree (399999) ]) @pytest.mark.datafiles(DATA_DIR) def test_artifact_too_large(cli, datafiles, tmpdir, size): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' cli.configure({ 'cache': { 'quota': 400000 } }) # Create an element whose artifact is too large create_element_size('target.bst', project, element_path, [], size) res = cli.run(project=project, args=['build', 'target.bst']) res.assert_main_error(ErrorDomain.STREAM, None) res.assert_task_error(ErrorDomain.ARTIFACT, 'cache-too-full') @pytest.mark.skip @pytest.mark.datafiles(DATA_DIR) def test_expiry_order(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' cache_location = os.path.join(project, 'cache', 'artifacts', 'ostree') checkout = os.path.join(project, 'workspace') cli.configure({ 'cache': { 'quota': 9000000 } }) # Create an artifact create_element_size('dep.bst', project, element_path, [], 2000000) res = cli.run(project=project, args=['build', 'dep.bst']) res.assert_success() # Create another artifact create_element_size('unrelated.bst', project, element_path, [], 2000000) res = cli.run(project=project, args=['build', 'unrelated.bst']) res.assert_success() # And build something else create_element_size('target.bst', project, element_path, [], 2000000) res = cli.run(project=project, args=['build', 'target.bst']) res.assert_success() create_element_size('target2.bst', project, element_path, [], 2000000) res = cli.run(project=project, args=['build', 'target2.bst']) res.assert_success() # Now extract dep.bst res = cli.run(project=project, args=['checkout', 'dep.bst', checkout]) res.assert_success() # Finally, build something that will cause the cache to overflow create_element_size('expire.bst', project, element_path, [], 2000000) res = cli.run(project=project, args=['build', 'expire.bst']) res.assert_success() # While dep.bst was the first element to be created, it should not # have been removed. # Note that buildstream will reduce the cache to 50% of the # original size - we therefore remove multiple elements. assert (tuple(cli.get_element_state(project, element) for element in ('unrelated.bst', 'target.bst', 'target2.bst', 'dep.bst', 'expire.bst')) == ('buildable', 'buildable', 'buildable', 'cached', 'cached', )) # Ensure that we don't accidentally remove an artifact from something # in the current build pipeline, because that would be embarassing, # wouldn't it? @pytest.mark.datafiles(DATA_DIR) def test_keep_dependencies(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' cache_location = os.path.join(project, 'cache', 'artifacts', 'ostree') cli.configure({ 'cache': { 'quota': 10000000 } }) # Create a pretty big dependency create_element_size('dependency.bst', project, element_path, [], 5000000) res = cli.run(project=project, args=['build', 'dependency.bst']) res.assert_success() # Now create some other unrelated artifact create_element_size('unrelated.bst', project, element_path, [], 4000000) res = cli.run(project=project, args=['build', 'unrelated.bst']) res.assert_success() # Check that the correct element remains in the cache assert cli.get_element_state(project, 'dependency.bst') == 'cached' assert cli.get_element_state(project, 'unrelated.bst') == 'cached' # We try to build an element which depends on the LRU artifact, # and could therefore fail if we didn't make sure dependencies # aren't removed. # # Since some artifact caches may implement weak cache keys by # duplicating artifacts (bad!) we need to make this equal in size # or smaller than half the size of its dependencies. # create_element_size('target.bst', project, element_path, ['dependency.bst'], 2000000) res = cli.run(project=project, args=['build', 'target.bst']) res.assert_success() assert cli.get_element_state(project, 'unrelated.bst') != 'cached' assert cli.get_element_state(project, 'dependency.bst') == 'cached' assert cli.get_element_state(project, 'target.bst') == 'cached' # Assert that we never delete a dependency required for a build tree @pytest.mark.datafiles(DATA_DIR) def test_never_delete_required(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' cli.configure({ 'cache': { 'quota': 10000000 }, 'scheduler': { 'builders': 1 } }) # Create a linear build tree create_element_size('dep1.bst', project, element_path, [], 8000000) create_element_size('dep2.bst', project, element_path, ['dep1.bst'], 8000000) create_element_size('dep3.bst', project, element_path, ['dep2.bst'], 8000000) create_element_size('target.bst', project, element_path, ['dep3.bst'], 8000000) # We try to build this pipeline, but it's too big for the # cache. Since all elements are required, the build should fail. res = cli.run(project=project, args=['build', 'target.bst']) res.assert_main_error(ErrorDomain.STREAM, None) res.assert_task_error(ErrorDomain.ARTIFACT, 'cache-too-full') # Only the first artifact fits in the cache, but we expect # that the first *two* artifacts will be cached. # # This is because after caching the first artifact we must # proceed to build the next artifact, and we cannot really # know how large an artifact will be until we try to cache it. # # In this case, we deem it more acceptable to not delete an # artifact which caused the cache to outgrow the quota. # # Note that this test only works because we have forced # the configuration to build one element at a time, in real # life there may potentially be N-builders cached artifacts # which exceed the quota # assert cli.get_element_state(project, 'dep1.bst') == 'cached' assert cli.get_element_state(project, 'dep2.bst') == 'cached' assert cli.get_element_state(project, 'dep3.bst') != 'cached' assert cli.get_element_state(project, 'target.bst') != 'cached' # Assert that we never delete a dependency required for a build tree, # even when the artifact cache was previously populated with # artifacts we do not require, and the new build is run with dynamic tracking. # @pytest.mark.datafiles(DATA_DIR) def test_never_delete_required_track(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' cli.configure({ 'cache': { 'quota': 10000000 }, 'scheduler': { 'builders': 1 } }) # Create a linear build tree repo_dep1 = create_element_size('dep1.bst', project, element_path, [], 2000000) repo_dep2 = create_element_size('dep2.bst', project, element_path, ['dep1.bst'], 2000000) repo_dep3 = create_element_size('dep3.bst', project, element_path, ['dep2.bst'], 2000000) repo_target = create_element_size('target.bst', project, element_path, ['dep3.bst'], 2000000) # This should all fit into the artifact cache res = cli.run(project=project, args=['build', 'target.bst']) res.assert_success() # They should all be cached assert cli.get_element_state(project, 'dep1.bst') == 'cached' assert cli.get_element_state(project, 'dep2.bst') == 'cached' assert cli.get_element_state(project, 'dep3.bst') == 'cached' assert cli.get_element_state(project, 'target.bst') == 'cached' # Now increase the size of all the elements # update_element_size('dep1.bst', project, repo_dep1, 8000000) update_element_size('dep2.bst', project, repo_dep2, 8000000) update_element_size('dep3.bst', project, repo_dep3, 8000000) update_element_size('target.bst', project, repo_target, 8000000) # Now repeat the same test we did in test_never_delete_required(), # except this time let's add dynamic tracking # res = cli.run(project=project, args=['build', '--track-all', 'target.bst']) res.assert_main_error(ErrorDomain.STREAM, None) res.assert_task_error(ErrorDomain.ARTIFACT, 'cache-too-full') # Expect the same result that we did in test_never_delete_required() # assert cli.get_element_state(project, 'dep1.bst') == 'cached' assert cli.get_element_state(project, 'dep2.bst') == 'cached' assert cli.get_element_state(project, 'dep3.bst') != 'cached' assert cli.get_element_state(project, 'target.bst') != 'cached' # Ensure that only valid cache quotas make it through the loading # process. # # This test virtualizes the condition to assume a storage volume # has 10K total disk space, and 6K of it is already in use (not # including any space used by the artifact cache). # @pytest.mark.xfail(reason="unittest.mock() not supported when running tests in subprocesses") @pytest.mark.parametrize("quota,err_domain,err_reason", [ # Valid configurations ("1", 'success', None), ("1K", 'success', None), ("50%", 'success', None), ("infinity", 'success', None), ("0", 'success', None), # Invalid configurations ("-1", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA), ("pony", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA), ("200%", ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA), # Not enough space for these caches ("7K", ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota'), ("70%", ErrorDomain.ARTIFACT, 'insufficient-storage-for-quota') ]) @pytest.mark.datafiles(DATA_DIR) def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, err_domain, err_reason): project = os.path.join(datafiles.dirname, datafiles.basename) os.makedirs(os.path.join(project, 'elements')) cli.configure({ 'cache': { 'quota': quota, } }) # We patch how we get space information # Ideally we would instead create a FUSE device on which we control # everything. # If the value is a percentage, we fix the current values to take into # account the block size, since this is important in how we compute the size if quota.endswith("%"): # We set the used space at 60% of total space stats = os.statvfs(".") free_space = 0.6 * stats.f_bsize * stats.f_blocks total_space = stats.f_bsize * stats.f_blocks else: free_space = 6000 total_space = 10000 volume_space_patch = mock.patch( "buildstream._artifactcache.ArtifactCache._get_cache_volume_size", autospec=True, return_value=(total_space, free_space), ) cache_size_patch = mock.patch( "buildstream._artifactcache.ArtifactCache.get_cache_size", autospec=True, return_value=0, ) with volume_space_patch, cache_size_patch: res = cli.run(project=project, args=['workspace', 'list']) if err_domain == 'success': res.assert_success() else: res.assert_main_error(err_domain, err_reason) @pytest.mark.datafiles(DATA_DIR) def test_extract_expiry(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' cli.configure({ 'cache': { 'quota': 10000000, } }) create_element_size('target.bst', project, element_path, [], 6000000) res = cli.run(project=project, args=['build', 'target.bst']) res.assert_success() assert cli.get_element_state(project, 'target.bst') == 'cached' # Force creating extract res = cli.run(project=project, args=['checkout', 'target.bst', os.path.join(str(tmpdir), 'checkout')]) res.assert_success() extractdir = os.path.join(project, 'cache', 'artifacts', 'extract', 'test', 'target') extracts = os.listdir(extractdir) assert(len(extracts) == 1) extract = os.path.join(extractdir, extracts[0]) # Remove target.bst from artifact cache create_element_size('target2.bst', project, element_path, [], 6000000) res = cli.run(project=project, args=['build', 'target2.bst']) res.assert_success() assert cli.get_element_state(project, 'target.bst') != 'cached' # Now the extract should be removed. assert not os.path.exists(extract) buildstream-1.6.9/tests/artifactcache/expiry/000077500000000000000000000000001437515270000213115ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/expiry/project.conf000066400000000000000000000004111437515270000236220ustar00rootroot00000000000000# Project config for cache expiry test name: test element-path: elements aliases: project_dir: file://{project_dir} options: linux: type: bool description: Whether to expect a linux platform default: True split-rules: test: - | /tests/* buildstream-1.6.9/tests/artifactcache/junctions.py000066400000000000000000000062771437515270000223730ustar00rootroot00000000000000import os import shutil import pytest from tests.testutils import cli, create_artifact_share from buildstream import _yaml DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "junctions", ) # Assert that a given artifact is in the share # def assert_shared(cli, share, project_name, project, element_name): # NOTE: 'test' here is the name of the project # specified in the project.conf we are testing with. # cache_key = cli.get_element_key(project, element_name) if not share.has_artifact(project_name, element_name, cache_key): raise AssertionError("Artifact share at {} does not contain the expected element {}" .format(share.repo, element_name)) def project_set_artifacts(project, url): project_conf_file = os.path.join(project, 'project.conf') project_config = _yaml.load(project_conf_file) project_config.update({ 'artifacts': { 'url': url, 'push': True } }) _yaml.dump(_yaml.node_sanitize(project_config), filename=project_conf_file) @pytest.mark.datafiles(DATA_DIR) def test_push_pull(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'foo') base_project = os.path.join(str(project), 'base') with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-foo')) as share,\ create_artifact_share(os.path.join(str(tmpdir), 'artifactshare-base')) as base_share: # First build it without the artifact cache configured result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 # Assert that we are now cached locally state = cli.get_element_state(project, 'target.bst') assert state == 'cached' state = cli.get_element_state(base_project, 'target.bst') assert state == 'cached' project_set_artifacts(project, share.repo) project_set_artifacts(base_project, base_share.repo) # Now try bst push result = cli.run(project=project, args=['push', '--deps', 'all', 'target.bst']) assert result.exit_code == 0 # And finally assert that the artifacts are in the right shares assert_shared(cli, share, 'foo', project, 'target.bst') assert_shared(cli, base_share, 'base', base_project, 'target.bst') # Now we've pushed, delete the user's local artifact cache # directory and try to redownload it from the share # artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) # Assert that nothing is cached locally anymore state = cli.get_element_state(project, 'target.bst') assert state != 'cached' state = cli.get_element_state(base_project, 'target.bst') assert state != 'cached' # Now try bst pull result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst']) assert result.exit_code == 0 # And assert that they are again in the local cache, without having built state = cli.get_element_state(project, 'target.bst') assert state == 'cached' state = cli.get_element_state(base_project, 'target.bst') assert state == 'cached' buildstream-1.6.9/tests/artifactcache/junctions/000077500000000000000000000000001437515270000220055ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/junctions/foo/000077500000000000000000000000001437515270000225705ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/junctions/foo/.bst/000077500000000000000000000000001437515270000234365ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/junctions/foo/.bst/workspaces.yml000066400000000000000000000000001437515270000263300ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/junctions/foo/app.bst000066400000000000000000000001511437515270000240570ustar00rootroot00000000000000kind: import sources: - kind: local path: foo.txt depends: - junction: base.bst filename: target.bst buildstream-1.6.9/tests/artifactcache/junctions/foo/base.bst000066400000000000000000000000631437515270000242130ustar00rootroot00000000000000kind: junction sources: - kind: local path: base buildstream-1.6.9/tests/artifactcache/junctions/foo/base/000077500000000000000000000000001437515270000235025ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/junctions/foo/base/base.txt000066400000000000000000000000241437515270000251510ustar00rootroot00000000000000This is a text file buildstream-1.6.9/tests/artifactcache/junctions/foo/base/project.conf000066400000000000000000000000131437515270000260110ustar00rootroot00000000000000name: base buildstream-1.6.9/tests/artifactcache/junctions/foo/base/target.bst000066400000000000000000000000651437515270000255030ustar00rootroot00000000000000kind: import sources: - kind: local path: base.txt buildstream-1.6.9/tests/artifactcache/junctions/foo/foo.txt000066400000000000000000000000041437515270000241060ustar00rootroot00000000000000foo buildstream-1.6.9/tests/artifactcache/junctions/foo/project.conf000066400000000000000000000000121437515270000250760ustar00rootroot00000000000000name: foo buildstream-1.6.9/tests/artifactcache/junctions/foo/target.bst000066400000000000000000000001131437515270000245630ustar00rootroot00000000000000kind: stack depends: - junction: base.bst filename: target.bst - app.bst buildstream-1.6.9/tests/artifactcache/missing-certs/000077500000000000000000000000001437515270000225605ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/missing-certs/certificates/000077500000000000000000000000001437515270000252255ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/missing-certs/certificates/client.crt000066400000000000000000000000001437515270000272030ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/missing-certs/certificates/client.key000066400000000000000000000000001437515270000272030ustar00rootroot00000000000000buildstream-1.6.9/tests/artifactcache/missing-certs/element.bst000066400000000000000000000000201437515270000247130ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/cachekey/000077500000000000000000000000001437515270000167645ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/__init__.py000066400000000000000000000000001437515270000210630ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/cachekey.py000066400000000000000000000166721437515270000211260ustar00rootroot00000000000000# Cache Key Test Instructions # # Adding Tests # ~~~~~~~~~~~~ # Cache key tests are bst element files created created in such a way # to exercise a feature which would cause the cache key for an element # or source to be calculated differently. # # Adding tests is a matter to adding files to the project found in the # 'project' subdirectory of this test case. Any files should be depended # on by the main `target.bst` in the toplevel of the project. # # One test is comprised of one `.bst` file and one # '.expected' file in the same directory, containing the # expected cache key. # # Running the cache key test once will reveal what the new element's # cache key should be and will also cause the depending elements to # change cache keys. # # # Updating tests # ~~~~~~~~~~~~~~ # When a test fails it will come with a summary of which cache keys # in the test project have mismatched. # # Also, in the case that the tests have changed or the artifact # versions have changed in some way and the test needs to be # updated; the expected cache keys for the given run are dumped to # '.actual' files beside the corresponding # '.expected' files they mismatched with, all inside # a temporary test directory. # # One can now easily copy over the .actual files from a failed # run over to the corresponding .expected source files and commit # the result. # from tests.testutils.runcli import cli from tests.testutils.site import HAVE_BZR, HAVE_GIT, HAVE_OSTREE, IS_LINUX from buildstream.plugin import CoreWarnings from buildstream import _yaml import os from collections import OrderedDict import pytest ############################################## # Some Helpers # ############################################## # Get whole filename in the temp project with # the option of changing the .bst suffix to something else # def element_filename(project_dir, element_name, alt_suffix=None): if alt_suffix: # Just in case... assert(element_name.endswith('.bst')) # Chop off the 'bst' in '.bst' and add the new suffix element_name = element_name[:-3] element_name = element_name + alt_suffix return os.path.join(project_dir, element_name) # Returns an OrderedDict of element names # and their cache keys # def parse_output_keys(output): actual_keys = OrderedDict() lines = output.splitlines() for line in lines: split = line.split("::") name = split[0] key = split[1] actual_keys[name] = key return actual_keys # Returns an OrderedDict of element names # and their cache keys # def load_expected_keys(project_dir, actual_keys, raise_error=True): expected_keys = OrderedDict() for element_name in actual_keys: expected = element_filename(project_dir, element_name, 'expected') try: with open(expected, 'r') as f: expected_key = f.read() expected_key = expected_key.strip() except FileNotFoundError as e: expected_key = None if raise_error: raise Exception("Cache key test needs update, " + "expected file {} not found.\n\n".format(expected) + "Use tests/cachekey/update.py to automatically " + "update this test case") expected_keys[element_name] = expected_key return expected_keys def assert_cache_keys(project_dir, output): # Read in the expected keys from the cache key test directory # and parse the actual keys from the `bst show` output # actual_keys = parse_output_keys(output) expected_keys = load_expected_keys(project_dir, actual_keys) mismatches = [] for element_name in actual_keys: if actual_keys[element_name] != expected_keys[element_name]: mismatches.append(element_name) if mismatches: info = "" for element_name in mismatches: info += " Element: {}\n".format(element_name) + \ " Expected: {}\n".format(expected_keys[element_name]) + \ " Actual: {}\n".format(actual_keys[element_name]) raise AssertionError("Cache key mismatches occurred:\n{}\n".format(info) + "Use tests/cachekey/update.py to automatically " + "update this test case") ############################################## # Test Entry Point # ############################################## # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) # The cache key test uses a project which exercises all plugins, # so we cant run it at all if we dont have them installed. # @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.skipif(HAVE_BZR is False, reason="bzr is not available") @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.skipif(HAVE_OSTREE is False, reason="ostree is not available") @pytest.mark.datafiles(DATA_DIR) def test_cache_key(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) # Workaround bug in recent versions of setuptools: newer # versions of setuptools fail to preserve symbolic links # when creating a source distribution, causing this test # to fail from a dist tarball. goodbye_link = os.path.join(project, 'files', 'local', 'usr', 'bin', 'goodbye') os.unlink(goodbye_link) os.symlink('hello', goodbye_link) result = cli.run(project=project, silent=True, args=[ 'show', '--format', '%{name}::%{full-key}', 'target.bst' ]) result.assert_success() assert_cache_keys(project, result.output) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("first_warnings, second_warnings, identical_keys", [ [[], [], True], [[], [CoreWarnings.REF_NOT_IN_TRACK], False], [[CoreWarnings.REF_NOT_IN_TRACK], [], False], [[CoreWarnings.REF_NOT_IN_TRACK], [CoreWarnings.REF_NOT_IN_TRACK], True], [[CoreWarnings.REF_NOT_IN_TRACK, CoreWarnings.OVERLAPS], [CoreWarnings.OVERLAPS, CoreWarnings.REF_NOT_IN_TRACK], True], ]) def test_cache_key_fatal_warnings(cli, tmpdir, first_warnings, second_warnings, identical_keys): # Builds project, Runs bst show, gathers cache keys def run_get_cache_key(project_name, warnings): config = { 'name': project_name, 'element-path': 'elements', 'fatal-warnings': warnings } project_dir = tmpdir.mkdir(project_name) project_config_file = str(project_dir.join('project.conf')) _yaml.dump(_yaml.node_sanitize(config), filename=project_config_file) elem_dir = project_dir.mkdir('elements') element_file = str(elem_dir.join('stack.bst')) _yaml.dump({'kind': 'stack'}, filename=element_file) result = cli.run(project=str(project_dir), args=[ 'show', '--format', '%{name}::%{full-key}', 'stack.bst' ]) return result.output # Returns true if all keys are identical def compare_cache_keys(first_keys, second_keys): return not any((x != y for x, y in zip(first_keys, second_keys))) first_keys = run_get_cache_key("first", first_warnings) second_keys = run_get_cache_key("second", second_warnings) assert compare_cache_keys(first_keys, second_keys) == identical_keys buildstream-1.6.9/tests/cachekey/project/000077500000000000000000000000001437515270000204325ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/.bst/000077500000000000000000000000001437515270000213005ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/.bst/workspaces.yml000066400000000000000000000000001437515270000241720ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/elements/000077500000000000000000000000001437515270000222465ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/elements/build1.bst000066400000000000000000000005401437515270000241370ustar00rootroot00000000000000# The cache key calculation algorithm is the same # for all build elements, better to only have one test # for it and without too much noise from changes which # would cause is to change the test case. # # Lets stick with manual, the most basic build element. kind: manual sources: - kind: local path: files/local config: build-commands: - make buildstream-1.6.9/tests/cachekey/project/elements/build1.expected000066400000000000000000000001001437515270000251400ustar00rootroot0000000000000036d193b074051c3315afc943cdf4cfb835becac3ea9f18cd1458130bb39e320cbuildstream-1.6.9/tests/cachekey/project/elements/build2.bst000066400000000000000000000003511437515270000241400ustar00rootroot00000000000000# This tests that sandbox build-uid / build-gid # contributions to the cache key do not regress. # kind: manual sources: - kind: local path: files/local config: build-commands: - make sandbox: build-uid: 20 build-gid: 20 buildstream-1.6.9/tests/cachekey/project/elements/build2.expected000066400000000000000000000001001437515270000251410ustar00rootroot000000000000007104190005529a3fe8a1ae6beb794b6a371312b915b35bec360af40a52bd3778buildstream-1.6.9/tests/cachekey/project/elements/compose1.bst000066400000000000000000000001061437515270000245030ustar00rootroot00000000000000kind: compose depends: - filename: elements/import1.bst type: build buildstream-1.6.9/tests/cachekey/project/elements/compose1.expected000066400000000000000000000001001437515270000255060ustar00rootroot000000000000002d251a155b377f20923dc66092dd7d942f1a0343a063d86341f8e761caa50bdbbuildstream-1.6.9/tests/cachekey/project/elements/compose2.bst000066400000000000000000000001421437515270000245040ustar00rootroot00000000000000kind: compose depends: - filename: elements/import1.bst type: build config: integrate: False buildstream-1.6.9/tests/cachekey/project/elements/compose2.expected000066400000000000000000000001001437515270000255070ustar00rootroot000000000000007dca9347141d00754c0cc2bbe67695062331002a6a21cd9969435fb70a45600abuildstream-1.6.9/tests/cachekey/project/elements/compose3.bst000066400000000000000000000001501437515270000245040ustar00rootroot00000000000000kind: compose depends: - filename: elements/import1.bst type: build config: include-orphans: False buildstream-1.6.9/tests/cachekey/project/elements/compose3.expected000066400000000000000000000001001437515270000255100ustar00rootroot00000000000000b7c821b8fd006069ae2b55b855be9dde6f3f3ce141dd546a2bce64b0bde9e93abuildstream-1.6.9/tests/cachekey/project/elements/compose4.bst000066400000000000000000000001601437515270000245060ustar00rootroot00000000000000kind: compose depends: - filename: elements/import1.bst type: build config: include: - runtime - devel buildstream-1.6.9/tests/cachekey/project/elements/compose4.expected000066400000000000000000000001001437515270000255110ustar00rootroot000000000000007df77bfd64828e4d3a6f08eaf5760f85526f6c1adee82b61701f26c4a3432a86buildstream-1.6.9/tests/cachekey/project/elements/compose5.bst000066400000000000000000000001441437515270000245110ustar00rootroot00000000000000kind: compose depends: - filename: elements/import1.bst type: build config: exclude: - debug buildstream-1.6.9/tests/cachekey/project/elements/compose5.expected000066400000000000000000000001001437515270000255120ustar00rootroot00000000000000504fe6417dc90a3c3dc7d809745b1f7a3cc2f3c3a1c2408408d871be027d49c7buildstream-1.6.9/tests/cachekey/project/elements/import1.bst000066400000000000000000000000701437515270000243500ustar00rootroot00000000000000kind: import sources: - kind: local path: files/local buildstream-1.6.9/tests/cachekey/project/elements/import1.expected000066400000000000000000000001001437515270000253530ustar00rootroot00000000000000f915b632d26b2e2ccc339677635a9783d0f00dd6e3cdc2fee08cc7b3e649a3a0buildstream-1.6.9/tests/cachekey/project/elements/import2.bst000066400000000000000000000001241437515270000243510ustar00rootroot00000000000000kind: import sources: - kind: local path: files/local config: source: /usr/bin buildstream-1.6.9/tests/cachekey/project/elements/import2.expected000066400000000000000000000001001437515270000253540ustar00rootroot0000000000000058f07139c4b6c814f04131b5d0f1dc7ebfc5c9db8669be59ee7c58e3b8fe8111buildstream-1.6.9/tests/cachekey/project/elements/import3.bst000066400000000000000000000001201437515270000243460ustar00rootroot00000000000000kind: import sources: - kind: local path: files/local config: target: /opt buildstream-1.6.9/tests/cachekey/project/elements/import3.expected000066400000000000000000000001001437515270000253550ustar00rootroot0000000000000076c6de73910086f2697ea748757a4f5e67c6e069131f28e3e54c380a7d6e8e18buildstream-1.6.9/tests/cachekey/project/elements/script1.bst000066400000000000000000000001631437515270000243450ustar00rootroot00000000000000kind: script depends: - filename: elements/import1.bst type: build config: commands: - echo "Hello World !" buildstream-1.6.9/tests/cachekey/project/elements/script1.expected000066400000000000000000000001001437515270000253450ustar00rootroot000000000000007e4e7e8ec36d9b661b357637cb4d2fb9adc37481764d645382d6b0ccb6ad7c71buildstream-1.6.9/tests/cachekey/project/files/000077500000000000000000000000001437515270000215345ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/files/local/000077500000000000000000000000001437515270000226265ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/files/local/etc/000077500000000000000000000000001437515270000234015ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/files/local/etc/hello.conf000066400000000000000000000000201437515270000253430ustar00rootroot00000000000000message = Hello buildstream-1.6.9/tests/cachekey/project/files/local/etc/ponystyle.conf000066400000000000000000000000051437515270000263110ustar00rootroot00000000000000pink buildstream-1.6.9/tests/cachekey/project/files/local/usr/000077500000000000000000000000001437515270000234375ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/files/local/usr/bin/000077500000000000000000000000001437515270000242075ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/files/local/usr/bin/goodbye000077700000000000000000000000001437515270000266032helloustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/files/local/usr/bin/hello000077500000000000000000000000341437515270000252350ustar00rootroot00000000000000#!/bin/bash echo "Hello !" buildstream-1.6.9/tests/cachekey/project/files/patches/000077500000000000000000000000001437515270000231635ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/files/patches/patch.diff000066400000000000000000000001451437515270000251140ustar00rootroot00000000000000--- a/usr/bin/hello +++ b/usr/bin/hello @@ -1,3 +1,3 @@ #!/bin/bash -echo "Hello !" +echo "Bye !" buildstream-1.6.9/tests/cachekey/project/project.conf000066400000000000000000000001371437515270000227500ustar00rootroot00000000000000# Project config for cache key test name: cachekey aliases: upstream: https://up.stream.org buildstream-1.6.9/tests/cachekey/project/sources/000077500000000000000000000000001437515270000221155ustar00rootroot00000000000000buildstream-1.6.9/tests/cachekey/project/sources/bzr1.bst000066400000000000000000000001361437515270000235050ustar00rootroot00000000000000kind: import sources: - kind: bzr url: https://launchpad.net/bzr track: trunk ref: 6622 buildstream-1.6.9/tests/cachekey/project/sources/bzr1.expected000066400000000000000000000001001437515270000245050ustar00rootroot00000000000000410a5606ccc28108db7bb9a57e017ede7435dbad27a8006e1c791b3253e9a56fbuildstream-1.6.9/tests/cachekey/project/sources/git1.bst000066400000000000000000000001721437515270000234730ustar00rootroot00000000000000kind: import sources: - kind: git url: https://example.com/git/repo.git ref: 6ac68af3e80b7b17c23a3c65233043550a7fa685 buildstream-1.6.9/tests/cachekey/project/sources/git1.expected000066400000000000000000000001001437515270000244730ustar00rootroot000000000000003dafe98cceea0434cf8a0317db110ce8d7715cf55148f7d01db62c87513f22a5buildstream-1.6.9/tests/cachekey/project/sources/git2.bst000066400000000000000000000002661437515270000235000ustar00rootroot00000000000000kind: import sources: - kind: git url: https://example.com/git/repo.git ref: 6ac68af3e80b7b17c23a3c65233043550a7fa685 submodules: plugins/foo: url: upstream:foo.git buildstream-1.6.9/tests/cachekey/project/sources/git2.expected000066400000000000000000000001001437515270000244740ustar00rootroot000000000000005170e2d5e00d5c82089e345b496a302ab3f718422557ef993ab6aa21ca6957f6buildstream-1.6.9/tests/cachekey/project/sources/git3.expected000066400000000000000000000001001437515270000244750ustar00rootroot00000000000000f2c3ab6ee644ba9507f3ddd33880fa78f92c80ff872bc1ad676e0a5c40ff3493buildstream-1.6.9/tests/cachekey/project/sources/local1.bst000066400000000000000000000000701437515270000237770ustar00rootroot00000000000000kind: import sources: - kind: local path: files/local buildstream-1.6.9/tests/cachekey/project/sources/local1.expected000066400000000000000000000001001437515270000250020ustar00rootroot00000000000000f915b632d26b2e2ccc339677635a9783d0f00dd6e3cdc2fee08cc7b3e649a3a0buildstream-1.6.9/tests/cachekey/project/sources/local2.bst000066400000000000000000000001111437515270000237740ustar00rootroot00000000000000kind: import sources: - kind: local path: files/local directory: opt buildstream-1.6.9/tests/cachekey/project/sources/local2.expected000066400000000000000000000001001437515270000250030ustar00rootroot0000000000000033239f183d3cbaca957e9693637fca6cb91053d1c22f5c4dea577b50d0acc23dbuildstream-1.6.9/tests/cachekey/project/sources/ostree1.bst000066400000000000000000000002451437515270000242120ustar00rootroot00000000000000kind: import sources: - kind: ostree url: https://example.com/repo ref: ccc885b96749f9d1774c7fa0c3262a9a3694e2d2643d1f8d420f5d23adf5db48 track: testing/x86_64 buildstream-1.6.9/tests/cachekey/project/sources/ostree1.expected000066400000000000000000000001001437515270000252110ustar00rootroot000000000000008bccdd60fc3a89eb9ed807dd31e868ea48c98411c564ca4aefc354b9956675debuildstream-1.6.9/tests/cachekey/project/sources/patch1.bst000066400000000000000000000001051437515270000240030ustar00rootroot00000000000000kind: import sources: - kind: patch path: files/patches/patch.diff buildstream-1.6.9/tests/cachekey/project/sources/patch1.expected000066400000000000000000000001001437515270000250070ustar00rootroot000000000000004408a9652884249b5b709d8defd726d3f5aa3279418fd303180bc12d084172efbuildstream-1.6.9/tests/cachekey/project/sources/patch2.bst000066400000000000000000000001531437515270000240070ustar00rootroot00000000000000kind: import sources: - kind: patch path: files/patches/patch.diff directory: usr/bin strip-level: 1 buildstream-1.6.9/tests/cachekey/project/sources/patch2.expected000066400000000000000000000001001437515270000250100ustar00rootroot00000000000000af4213e2a92ed6e447f7a932fffc36ace134c03ee6ddb2eca12f5feb194cc625buildstream-1.6.9/tests/cachekey/project/sources/patch3.bst000066400000000000000000000001531437515270000240100ustar00rootroot00000000000000kind: import sources: - kind: patch path: files/patches/patch.diff directory: usr/bin strip-level: 3 buildstream-1.6.9/tests/cachekey/project/sources/patch3.expected000066400000000000000000000001001437515270000250110ustar00rootroot00000000000000f40eb8bab43e2ff8deeb6b5d360d911506be914ef302d8a1de5a431f3d32c222buildstream-1.6.9/tests/cachekey/project/sources/pip1.bst000066400000000000000000000003701437515270000235000ustar00rootroot00000000000000kind: import sources: - kind: git url: https://example.com/foo/foobar.git ref: b99955530263172ed1beae52aed7a33885ef781f - kind: pip url: https://pypi.example.com/simple packages: - horses - ponies ref: 'horses==0.0.1\nponies==0.0.2' buildstream-1.6.9/tests/cachekey/project/sources/pip1.expected000066400000000000000000000001001437515270000245000ustar00rootroot00000000000000492115a2d3a54eb5fd4a301f1cdfd328b2d3dd918e709386cea8133f8cac53ebbuildstream-1.6.9/tests/cachekey/project/sources/remote1.expected000066400000000000000000000001001437515270000252030ustar00rootroot00000000000000009d779779361f1546d808945aaa05765e7d17b6b8b7a383d658ceaeddcff0dbbuildstream-1.6.9/tests/cachekey/project/sources/remote2.expected000066400000000000000000000001001437515270000252040ustar00rootroot00000000000000d4f791a9185a4f2c18c3046c41efb26c0a833b73fc52abb3cddd4fe3ac130196buildstream-1.6.9/tests/cachekey/project/sources/tar1.bst000066400000000000000000000002431437515270000234750ustar00rootroot00000000000000kind: import sources: - kind: tar url: https://example.com/releases/1.4/foo-1.4.5.tar.gz ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b buildstream-1.6.9/tests/cachekey/project/sources/tar1.expected000066400000000000000000000001001437515270000244760ustar00rootroot00000000000000f2360a3551f3cc5293cf9939773102bca6b6d0aa261bff78ef5f69aac3265afbbuildstream-1.6.9/tests/cachekey/project/sources/tar2.bst000066400000000000000000000002631437515270000235000ustar00rootroot00000000000000kind: import sources: - kind: tar url: https://example.com/releases/1.4/foo-1.4.5.tar.gz ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b base-dir: src buildstream-1.6.9/tests/cachekey/project/sources/tar2.expected000066400000000000000000000001001437515270000244770ustar00rootroot0000000000000015abf0e81387289c4c1635b386afcb162a3672ed2f757266cc6a6273170ac608buildstream-1.6.9/tests/cachekey/project/sources/zip1.bst000066400000000000000000000002401437515270000235060ustar00rootroot00000000000000kind: import sources: - kind: zip url: https://example.com/releases/1.4/foo-1.4.5.zip ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b buildstream-1.6.9/tests/cachekey/project/sources/zip1.expected000066400000000000000000000001001437515270000245120ustar00rootroot000000000000005a6ac152f8bd88add55172dca76225c1cf01c821e1037184ff54f06baf7fb028buildstream-1.6.9/tests/cachekey/project/sources/zip2.bst000066400000000000000000000002601437515270000235110ustar00rootroot00000000000000kind: import sources: - kind: zip url: https://example.com/releases/1.4/foo-1.4.5.zip ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b base-dir: src buildstream-1.6.9/tests/cachekey/project/sources/zip2.expected000066400000000000000000000001001437515270000245130ustar00rootroot000000000000000016e926397c238c574679cfa4c21b44c4c56cbd1328e92327d088fa319eb206buildstream-1.6.9/tests/cachekey/project/target.bst000066400000000000000000000012031437515270000224260ustar00rootroot00000000000000kind: stack description: | This is the main entry point including cases in the cache key test. depends: - sources/bzr1.bst - sources/git1.bst - sources/git2.bst - sources/local1.bst - sources/local2.bst - sources/ostree1.bst - sources/patch1.bst - sources/patch2.bst - sources/patch3.bst - sources/pip1.bst - sources/tar1.bst - sources/tar2.bst - sources/zip1.bst - sources/zip2.bst - elements/build1.bst - elements/build2.bst - elements/compose1.bst - elements/compose2.bst - elements/compose3.bst - elements/compose4.bst - elements/compose5.bst - elements/import1.bst - elements/import2.bst - elements/import3.bst - elements/script1.bst buildstream-1.6.9/tests/cachekey/project/target.expected000066400000000000000000000001001437515270000234320ustar00rootroot0000000000000006c4372d43e3ac92c5948af6b24b4451a9411a9d6b35b3ea8fc58ce139801c0dbuildstream-1.6.9/tests/cachekey/update.py000077500000000000000000000043201437515270000206220ustar00rootroot00000000000000#!/usr/bin/env python3 # # Automatically create or update the .expected files in the # cache key test directory. # # Simply run without any arguments, from anywhere, e.g.: # # ./tests/cachekey/update.py # # After this, add any files which were newly created and commit # the result in order to adjust the cache key test to changed # keys. # import os import tempfile from tests.testutils.runcli import Cli # This weird try / except is needed, because this will be imported differently # when pytest runner imports them vs when you run the updater directly from # this directory. try: from cachekey import element_filename, parse_output_keys, load_expected_keys except ImportError: from .cachekey import element_filename, parse_output_keys, load_expected_keys # Project directory PROJECT_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) def write_expected_key(element_name, actual_key): expected_file = element_filename(PROJECT_DIR, element_name, 'expected') with open(expected_file, 'w') as f: f.write(actual_key) def update_keys(): with tempfile.TemporaryDirectory(dir=PROJECT_DIR) as tmpdir: directory = os.path.join(tmpdir, 'cache') os.makedirs(directory) cli = Cli(directory, verbose=False) # Run bst show result = cli.run(project=PROJECT_DIR, silent=True, args=[ '--no-colors', 'show', '--format', '%{name}::%{full-key}', 'target.bst' ]) # Load the actual keys, and the expected ones if they exist actual_keys = parse_output_keys(result.output) expected_keys = load_expected_keys(PROJECT_DIR, actual_keys, raise_error=False) for element_name in actual_keys: expected = element_filename(PROJECT_DIR, element_name, 'expected') if actual_keys[element_name] != expected_keys[element_name]: if not expected_keys[element_name]: print("Creating new expected file: {}".format(expected)) else: print("Updating expected file: {}".format(expected)) write_expected_key(element_name, actual_keys[element_name]) if __name__ == '__main__': update_keys() buildstream-1.6.9/tests/completions/000077500000000000000000000000001437515270000175445ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/completions.py000066400000000000000000000210211437515270000224460ustar00rootroot00000000000000import os import pytest from tests.testutils import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) MAIN_COMMANDS = [ 'build ', 'checkout ', 'fetch ', 'help ', 'init ', 'pull ', 'push ', 'shell ', 'show ', 'source-bundle ', 'track ', 'workspace ' ] MAIN_OPTIONS = [ "--builders ", "-c ", "-C ", "--colors ", "--config ", "--debug ", "--default-mirror ", "--directory ", "--error-lines ", "--fetchers ", "--log-file ", "--max-jobs ", "--message-lines ", "--network-retries ", "--no-colors ", "--no-debug ", "--no-interactive ", "--no-strict ", "--no-verbose ", "-o ", "--option ", "--on-error ", "--pushers ", "--strict ", "--verbose ", "--version ", ] WORKSPACE_COMMANDS = [ 'close ', 'list ', 'open ', 'reset ' ] PROJECT_ELEMENTS = [ "compose-all.bst", "compose-exclude-dev.bst", "compose-include-bin.bst", "import-bin.bst", "import-dev.bst", "target.bst" ] def assert_completion(cli, cmd, word_idx, expected, cwd=None): result = cli.run(cwd=cwd, env={ '_BST_COMPLETION': 'complete', 'COMP_WORDS': cmd, 'COMP_CWORD': str(word_idx) }) words = [] if result.output: words = result.output.splitlines() # The order is meaningless, bash will # take the results and order it by it's # own little heuristics words = sorted(words) expected = sorted(expected) assert words == expected @pytest.mark.parametrize("cmd,word_idx,expected", [ ('bst', 0, []), ('bst ', 1, MAIN_COMMANDS), ('bst pu', 1, ['pull ', 'push ']), ('bst pul', 1, ['pull ']), ('bst w ', 1, ['workspace ']), ('bst workspace ', 2, WORKSPACE_COMMANDS), ]) def test_commands(cli, cmd, word_idx, expected): assert_completion(cli, cmd, word_idx, expected) @pytest.mark.parametrize("cmd,word_idx,expected", [ ('bst -', 1, MAIN_OPTIONS), ('bst --l', 1, ['--log-file ']), # Test that options of subcommands also complete ('bst --no-colors build -', 3, ['--all ', '--track ', '--track-all ', '--track-except ', '--track-cross-junctions ', '-J ', '--track-save ']), # Test the behavior of completing after an option that has a # parameter that cannot be completed, vs an option that has # no parameter ('bst --fetchers ', 2, []), ('bst --no-colors ', 2, MAIN_COMMANDS), ]) def test_options(cli, cmd, word_idx, expected): assert_completion(cli, cmd, word_idx, expected) @pytest.mark.parametrize("cmd,word_idx,expected", [ ('bst --on-error ', 2, ['continue ', 'quit ', 'terminate ']), ('bst show --deps ', 3, ['all ', 'build ', 'none ', 'plan ', 'run ']), ('bst show --deps=', 2, ['all ', 'build ', 'none ', 'plan ', 'run ']), ('bst show --deps b', 3, ['build ']), ('bst show --deps=b', 2, ['build ']), ('bst show --deps r', 3, ['run ']), ('bst track --deps ', 3, ['all ', 'none ']), ]) def test_option_choice(cli, cmd, word_idx, expected): assert_completion(cli, cmd, word_idx, expected) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'project')) @pytest.mark.parametrize("cmd,word_idx,expected,subdir", [ # Note that elements/ and files/ are partial completions and # as such do not come with trailing whitespace ('bst --config ', 2, ['cache/', 'elements/', 'files/', 'project.conf '], None), ('bst --log-file ', 2, ['cache/', 'elements/', 'files/', 'project.conf '], None), ('bst --config f', 2, ['files/'], None), ('bst --log-file f', 2, ['files/'], None), ('bst --config files', 2, ['files/bin-files/', 'files/dev-files/'], None), ('bst --log-file files', 2, ['files/bin-files/', 'files/dev-files/'], None), ('bst --config files/', 2, ['files/bin-files/', 'files/dev-files/'], None), ('bst --log-file elements/', 2, [os.path.join('elements', e) + ' ' for e in PROJECT_ELEMENTS], None), ('bst --config ../', 2, ['../cache/', '../elements/', '../files/', '../project.conf '], 'files'), ('bst --config ../elements/', 2, [os.path.join('..', 'elements', e) + ' ' for e in PROJECT_ELEMENTS], 'files'), ('bst --config ../nofile', 2, [], 'files'), ('bst --config /pony/rainbow/nobodyhas/this/file', 2, [], 'files'), ]) def test_option_file(datafiles, cli, cmd, word_idx, expected, subdir): cwd = str(datafiles) if subdir: cwd = os.path.join(cwd, subdir) assert_completion(cli, cmd, word_idx, expected, cwd=cwd) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'project')) @pytest.mark.parametrize("cmd,word_idx,expected,subdir", [ # Note that regular files like project.conf are not returned when # completing for a directory ('bst --directory ', 2, ['cache/', 'elements/', 'files/'], None), ('bst --directory elements/', 2, [], None), ('bst --directory ', 2, ['dev-files/', 'bin-files/'], 'files'), ('bst --directory ../', 2, ['../cache/', '../elements/', '../files/'], 'files'), ]) def test_option_directory(datafiles, cli, cmd, word_idx, expected, subdir): cwd = str(datafiles) if subdir: cwd = os.path.join(cwd, subdir) assert_completion(cli, cmd, word_idx, expected, cwd=cwd) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("project,cmd,word_idx,expected,subdir", [ # When running in the project directory ('project', 'bst show ', 2, [e + ' ' for e in PROJECT_ELEMENTS], None), ('project', 'bst build com', 2, ['compose-all.bst ', 'compose-include-bin.bst ', 'compose-exclude-dev.bst '], None), # When running from the files subdir ('project', 'bst show ', 2, [e + ' ' for e in PROJECT_ELEMENTS], 'files'), ('project', 'bst build com', 2, ['compose-all.bst ', 'compose-include-bin.bst ', 'compose-exclude-dev.bst '], 'files'), # When passing the project directory ('project', 'bst --directory ../ show ', 4, [e + ' ' for e in PROJECT_ELEMENTS], 'files'), ('project', 'bst --directory ../ build com', 4, ['compose-all.bst ', 'compose-include-bin.bst ', 'compose-exclude-dev.bst '], 'files'), # Also try multi arguments together ('project', 'bst --directory ../ checkout t ', 4, ['target.bst '], 'files'), ('project', 'bst --directory ../ checkout target.bst ', 5, ['bin-files/', 'dev-files/'], 'files'), # When running in the project directory ('no-element-path', 'bst show ', 2, [e + ' ' for e in (PROJECT_ELEMENTS + ['project.conf'])] + ['files/'], None), ('no-element-path', 'bst build com', 2, ['compose-all.bst ', 'compose-include-bin.bst ', 'compose-exclude-dev.bst '], None), # When running from the files subdir ('no-element-path', 'bst show ', 2, [e + ' ' for e in (PROJECT_ELEMENTS + ['project.conf'])] + ['files/'], 'files'), ('no-element-path', 'bst build com', 2, ['compose-all.bst ', 'compose-include-bin.bst ', 'compose-exclude-dev.bst '], 'files'), # When passing the project directory ('no-element-path', 'bst --directory ../ show ', 4, [e + ' ' for e in (PROJECT_ELEMENTS + ['project.conf'])] + ['files/'], 'files'), ('no-element-path', 'bst --directory ../ show f', 4, ['files/'], 'files'), ('no-element-path', 'bst --directory ../ show files/', 4, ['files/bin-files/', 'files/dev-files/'], 'files'), ('no-element-path', 'bst --directory ../ build com', 4, ['compose-all.bst ', 'compose-include-bin.bst ', 'compose-exclude-dev.bst '], 'files'), # Also try multi arguments together ('no-element-path', 'bst --directory ../ checkout t ', 4, ['target.bst '], 'files'), ('no-element-path', 'bst --directory ../ checkout target.bst ', 5, ['bin-files/', 'dev-files/'], 'files'), # When element-path have sub-folders ('sub-folders', 'bst show base', 2, ['base/wanted.bst '], None), ('sub-folders', 'bst show base/', 2, ['base/wanted.bst '], None), ]) def test_argument_element(datafiles, cli, project, cmd, word_idx, expected, subdir): cwd = os.path.join(str(datafiles), project) if subdir: cwd = os.path.join(cwd, subdir) assert_completion(cli, cmd, word_idx, expected, cwd=cwd) @pytest.mark.parametrize("cmd,word_idx,expected", [ ('bst he', 1, ['help ']), ('bst help ', 2, MAIN_COMMANDS), ('bst help fe', 2, ['fetch ']), ('bst help p', 2, ['pull ', 'push ']), ('bst help p', 2, ['pull ', 'push ']), ('bst help w', 2, ['workspace ']), ('bst help workspace ', 3, WORKSPACE_COMMANDS), ]) def test_help_commands(cli, cmd, word_idx, expected): assert_completion(cli, cmd, word_idx, expected) buildstream-1.6.9/tests/completions/no-element-path/000077500000000000000000000000001437515270000225415ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/compose-all.bst000066400000000000000000000003441437515270000254670ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False buildstream-1.6.9/tests/completions/no-element-path/compose-exclude-dev.bst000066400000000000000000000004251437515270000271240ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False # Exclude the dev domain exclude: - devel buildstream-1.6.9/tests/completions/no-element-path/compose-include-bin.bst000066400000000000000000000004301437515270000271040ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False # Only include the runtim include: - runtime buildstream-1.6.9/tests/completions/no-element-path/files/000077500000000000000000000000001437515270000236435ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/files/bin-files/000077500000000000000000000000001437515270000255135ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/files/bin-files/usr/000077500000000000000000000000001437515270000263245ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/files/bin-files/usr/bin/000077500000000000000000000000001437515270000270745ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/files/bin-files/usr/bin/hello000077500000000000000000000000341437515270000301220ustar00rootroot00000000000000#!/bin/bash echo "Hello !" buildstream-1.6.9/tests/completions/no-element-path/files/dev-files/000077500000000000000000000000001437515270000255215ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/files/dev-files/usr/000077500000000000000000000000001437515270000263325ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/files/dev-files/usr/include/000077500000000000000000000000001437515270000277555ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/no-element-path/files/dev-files/usr/include/pony.h000066400000000000000000000003711437515270000311140ustar00rootroot00000000000000#ifndef __PONY_H__ #define __PONY_H__ #define PONY_BEGIN "Once upon a time, there was a pony." #define PONY_END "And they lived happily ever after, the end." #define MAKE_PONY(story) \ PONY_BEGIN \ story \ PONY_END #endif /* __PONY_H__ */ buildstream-1.6.9/tests/completions/no-element-path/import-bin.bst000066400000000000000000000000741437515270000253340ustar00rootroot00000000000000kind: import sources: - kind: local path: files/bin-files buildstream-1.6.9/tests/completions/no-element-path/import-dev.bst000066400000000000000000000000741437515270000253420ustar00rootroot00000000000000kind: import sources: - kind: local path: files/dev-files buildstream-1.6.9/tests/completions/no-element-path/project.conf000066400000000000000000000000641437515270000250560ustar00rootroot00000000000000# Project config for frontend build test name: test buildstream-1.6.9/tests/completions/no-element-path/target.bst000066400000000000000000000001641437515270000245420ustar00rootroot00000000000000kind: stack description: | Main stack target for the bst build test depends: - import-bin.bst - compose-all.bst buildstream-1.6.9/tests/completions/project/000077500000000000000000000000001437515270000212125ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/elements/000077500000000000000000000000001437515270000230265ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/elements/compose-all.bst000066400000000000000000000003441437515270000257540ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False buildstream-1.6.9/tests/completions/project/elements/compose-exclude-dev.bst000066400000000000000000000004251437515270000274110ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False # Exclude the dev domain exclude: - devel buildstream-1.6.9/tests/completions/project/elements/compose-include-bin.bst000066400000000000000000000004301437515270000273710ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False # Only include the runtim include: - runtime buildstream-1.6.9/tests/completions/project/elements/import-bin.bst000066400000000000000000000000741437515270000256210ustar00rootroot00000000000000kind: import sources: - kind: local path: files/bin-files buildstream-1.6.9/tests/completions/project/elements/import-dev.bst000066400000000000000000000000741437515270000256270ustar00rootroot00000000000000kind: import sources: - kind: local path: files/dev-files buildstream-1.6.9/tests/completions/project/elements/target.bst000066400000000000000000000001641437515270000250270ustar00rootroot00000000000000kind: stack description: | Main stack target for the bst build test depends: - import-bin.bst - compose-all.bst buildstream-1.6.9/tests/completions/project/files/000077500000000000000000000000001437515270000223145ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/files/bin-files/000077500000000000000000000000001437515270000241645ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/files/bin-files/usr/000077500000000000000000000000001437515270000247755ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/files/bin-files/usr/bin/000077500000000000000000000000001437515270000255455ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/files/bin-files/usr/bin/hello000077500000000000000000000000341437515270000265730ustar00rootroot00000000000000#!/bin/bash echo "Hello !" buildstream-1.6.9/tests/completions/project/files/dev-files/000077500000000000000000000000001437515270000241725ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/files/dev-files/usr/000077500000000000000000000000001437515270000250035ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/files/dev-files/usr/include/000077500000000000000000000000001437515270000264265ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/project/files/dev-files/usr/include/pony.h000066400000000000000000000003711437515270000275650ustar00rootroot00000000000000#ifndef __PONY_H__ #define __PONY_H__ #define PONY_BEGIN "Once upon a time, there was a pony." #define PONY_END "And they lived happily ever after, the end." #define MAKE_PONY(story) \ PONY_BEGIN \ story \ PONY_END #endif /* __PONY_H__ */ buildstream-1.6.9/tests/completions/project/project.conf000066400000000000000000000001141437515270000235230ustar00rootroot00000000000000# Project config for frontend build test name: test element-path: elements buildstream-1.6.9/tests/completions/sub-folders/000077500000000000000000000000001437515270000217715ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/sub-folders/base/000077500000000000000000000000001437515270000227035ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/sub-folders/base/unwanted.bst000066400000000000000000000000771437515270000252460ustar00rootroot00000000000000kind: autotools description: | Not auto-completed element buildstream-1.6.9/tests/completions/sub-folders/elements/000077500000000000000000000000001437515270000236055ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/sub-folders/elements/base.bst000066400000000000000000000001001437515270000252200ustar00rootroot00000000000000kind: stack description: Base stack depends: - base/wanted.bst buildstream-1.6.9/tests/completions/sub-folders/elements/base/000077500000000000000000000000001437515270000245175ustar00rootroot00000000000000buildstream-1.6.9/tests/completions/sub-folders/elements/base/wanted.bst000066400000000000000000000000731437515270000265130ustar00rootroot00000000000000kind: autotools description: | Auto-completed element buildstream-1.6.9/tests/completions/sub-folders/elements/hello.bst000066400000000000000000000000601437515270000254160ustar00rootroot00000000000000kind: autotools description: | Hello world buildstream-1.6.9/tests/completions/sub-folders/project.conf000066400000000000000000000001141437515270000243020ustar00rootroot00000000000000# Project config for frontend build test name: test element-path: elements buildstream-1.6.9/tests/context/000077500000000000000000000000001437515270000166745ustar00rootroot00000000000000buildstream-1.6.9/tests/context/__init__.py000066400000000000000000000000001437515270000207730ustar00rootroot00000000000000buildstream-1.6.9/tests/context/context.py000066400000000000000000000124571437515270000207430ustar00rootroot00000000000000import os import pytest from buildstream._context import Context from buildstream._exceptions import LoadError, LoadErrorReason from buildstream import _yaml DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'data', ) # Simple fixture to create a Context object. @pytest.fixture() def context_fixture(): if os.environ.get('XDG_CACHE_HOME'): cache_home = os.environ['XDG_CACHE_HOME'] else: cache_home = os.path.expanduser('~/.cache') return { 'xdg-cache': cache_home, 'context': Context() } ####################################### # Test instantiation # ####################################### def test_context_create(context_fixture): context = context_fixture['context'] assert(isinstance(context, Context)) ####################################### # Test configuration loading # ####################################### def test_context_load(context_fixture): context = context_fixture['context'] cache_home = context_fixture['xdg-cache'] assert(isinstance(context, Context)) context.load(config=os.devnull) assert(context.sourcedir == os.path.join(cache_home, 'buildstream', 'sources')) assert(context.builddir == os.path.join(cache_home, 'buildstream', 'build')) assert(context.artifactdir == os.path.join(cache_home, 'buildstream', 'artifacts')) assert(context.logdir == os.path.join(cache_home, 'buildstream', 'logs')) # Assert that a changed XDG_CACHE_HOME doesn't cause issues def test_context_load_envvar(context_fixture): os.environ['XDG_CACHE_HOME'] = '/some/path/' context = context_fixture['context'] assert(isinstance(context, Context)) context.load(config=os.devnull) assert(context.sourcedir == os.path.join('/', 'some', 'path', 'buildstream', 'sources')) assert(context.builddir == os.path.join('/', 'some', 'path', 'buildstream', 'build')) assert(context.artifactdir == os.path.join('/', 'some', 'path', 'buildstream', 'artifacts')) assert(context.logdir == os.path.join('/', 'some', 'path', 'buildstream', 'logs')) # Reset the environment variable del os.environ['XDG_CACHE_HOME'] # Test that values in a user specified config file # override the defaults @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_context_load_user_config(context_fixture, datafiles): context = context_fixture['context'] cache_home = context_fixture['xdg-cache'] assert(isinstance(context, Context)) conf_file = os.path.join(datafiles.dirname, datafiles.basename, 'userconf.yaml') context.load(conf_file) assert(context.sourcedir == os.path.expanduser('~/pony')) assert(context.builddir == os.path.join(cache_home, 'buildstream', 'build')) assert(context.artifactdir == os.path.join(cache_home, 'buildstream', 'artifacts')) assert(context.logdir == os.path.join(cache_home, 'buildstream', 'logs')) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_context_priority(datafiles): confdir = os.path.join(str(datafiles), "config") os.makedirs(confdir) # The fallback (usual) config file bst_conf_path = os.path.join(confdir, "buildstream.conf") bst_conf = {"sourcedir": "/sources"} _yaml.dump(bst_conf, bst_conf_path) # The version specific config file bst_conf_path = os.path.join(confdir, "buildstream1.conf") bst_conf = {"sourcedir": "/other_sources"} _yaml.dump(bst_conf, bst_conf_path) # Load the Context() object and assert that we've chosen # the version specific one. # os.environ["XDG_CONFIG_HOME"] = confdir context = Context() context.load() assert context.sourcedir == "/other_sources" del os.environ["XDG_CONFIG_HOME"] ####################################### # Test failure modes # ####################################### @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_context_load_missing_config(context_fixture, datafiles): context = context_fixture['context'] assert(isinstance(context, Context)) conf_file = os.path.join(datafiles.dirname, datafiles.basename, 'nonexistant.yaml') with pytest.raises(LoadError) as exc: context.load(conf_file) assert (exc.value.reason == LoadErrorReason.MISSING_FILE) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_context_load_malformed_config(context_fixture, datafiles): context = context_fixture['context'] assert(isinstance(context, Context)) conf_file = os.path.join(datafiles.dirname, datafiles.basename, 'malformed.yaml') with pytest.raises(LoadError) as exc: context.load(conf_file) assert (exc.value.reason == LoadErrorReason.INVALID_YAML) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_context_load_notdict_config(context_fixture, datafiles): context = context_fixture['context'] assert(isinstance(context, Context)) conf_file = os.path.join(datafiles.dirname, datafiles.basename, 'notdict.yaml') with pytest.raises(LoadError) as exc: context.load(conf_file) # XXX Should this be a different LoadErrorReason ? assert (exc.value.reason == LoadErrorReason.INVALID_YAML) buildstream-1.6.9/tests/context/data/000077500000000000000000000000001437515270000176055ustar00rootroot00000000000000buildstream-1.6.9/tests/context/data/malformed.yaml000066400000000000000000000000421437515270000224330ustar00rootroot00000000000000- | this is malformed yaml. ** buildstream-1.6.9/tests/context/data/notdict.yaml000066400000000000000000000000241437515270000221310ustar00rootroot00000000000000This is not a dict. buildstream-1.6.9/tests/context/data/userconf.yaml000066400000000000000000000000561437515270000223160ustar00rootroot00000000000000# Try overriding something sourcedir: ~/pony buildstream-1.6.9/tests/examples/000077500000000000000000000000001437515270000170265ustar00rootroot00000000000000buildstream-1.6.9/tests/examples/__init__.py000066400000000000000000000000001437515270000211250ustar00rootroot00000000000000buildstream-1.6.9/tests/examples/autotools.py000066400000000000000000000036221437515270000214340ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains from tests.testutils.site import IS_LINUX pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..', 'doc', 'examples', 'autotools' ) # Tests a build of the autotools amhello project on a alpine-linux base runtime @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_autotools_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') # Check that the project can be built correctly. result = cli.run(project=project, args=['build', 'hello.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'hello.bst', checkout]) result.assert_success() assert_contains(checkout, ['/usr', '/usr/lib', '/usr/bin', '/usr/share', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello', '/usr/bin/hello', '/usr/share/doc', '/usr/share/doc/amhello', '/usr/share/doc/amhello/README']) # Test running an executable built with autotools. @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_autotools_run(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'hello.bst']) result.assert_success() result = cli.run(project=project, args=['shell', 'hello.bst', 'hello']) result.assert_success() assert result.output == 'Hello World!\nThis is amhello 1.0.\n' buildstream-1.6.9/tests/examples/first-project.py000066400000000000000000000015711437515270000221770ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains from tests.testutils.site import IS_LINUX pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..', 'doc', 'examples', 'first-project' ) @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_first_project_build_checkout(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') result = cli.run(project=project, args=['build', 'hello.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'hello.bst', checkout]) assert result.exit_code == 0 assert_contains(checkout, ['/hello.world']) buildstream-1.6.9/tests/examples/flatpak-autotools.py000066400000000000000000000053171437515270000230570ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains from tests.testutils.site import IS_LINUX pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..', 'doc', 'examples', 'flatpak-autotools' ) # FIXME: Workaround a setuptools bug which fails to include symbolic # links in the source distribution. # # Remove this hack once setuptools is fixed def workaround_setuptools_bug(project): os.makedirs(os.path.join(project, "files", "links"), exist_ok=True) try: os.symlink(os.path.join("usr", "lib"), os.path.join(project, "files", "links", "lib")) os.symlink(os.path.join("usr", "bin"), os.path.join(project, "files", "links", "bin")) os.symlink(os.path.join("usr", "etc"), os.path.join(project, "files", "links", "etc")) except FileExistsError: # If the files exist, we're running from a git checkout and # not a source distribution, no need to complain pass # Test that a build upon flatpak runtime 'works' - we use the autotools sample # amhello project for this. @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_autotools_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') workaround_setuptools_bug(project) result = cli.run(project=project, args=['build', 'hello.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'hello.bst', checkout]) assert result.exit_code == 0 assert_contains(checkout, ['/usr', '/usr/lib', '/usr/bin', '/usr/share', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello', '/usr/bin/hello', '/usr/share/doc', '/usr/share/doc/amhello', '/usr/share/doc/amhello/README']) # Test running an executable built with autotools @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_autotools_run(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workaround_setuptools_bug(project) result = cli.run(project=project, args=['build', 'hello.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['shell', 'hello.bst', '/usr/bin/hello']) assert result.exit_code == 0 assert result.output == 'Hello World!\nThis is amhello 1.0.\n' buildstream-1.6.9/tests/examples/integration-commands.py000066400000000000000000000023471437515270000235300ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains from tests.testutils.site import IS_LINUX pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..', 'doc', 'examples', 'integration-commands' ) @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_integration_commands_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') result = cli.run(project=project, args=['build', 'hello.bst']) assert result.exit_code == 0 # Test running the executable @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_integration_commands_run(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'hello.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['shell', 'hello.bst', '--', 'hello', 'pony']) assert result.exit_code == 0 assert result.output == 'Hello pony\n' buildstream-1.6.9/tests/examples/running-commands.py000066400000000000000000000023241437515270000226600ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains from tests.testutils.site import IS_LINUX pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), '..', '..', 'doc', 'examples', 'running-commands' ) @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_running_commands_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') result = cli.run(project=project, args=['build', 'hello.bst']) assert result.exit_code == 0 # Test running the executable @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_running_commands_run(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'hello.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['shell', 'hello.bst', '--', 'hello']) assert result.exit_code == 0 assert result.output == 'Hello World\n' buildstream-1.6.9/tests/format/000077500000000000000000000000001437515270000165005ustar00rootroot00000000000000buildstream-1.6.9/tests/format/__init__.py000066400000000000000000000000001437515270000205770ustar00rootroot00000000000000buildstream-1.6.9/tests/format/assertion.py000066400000000000000000000024331437515270000210630ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'assertion' ) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,opt_pony,opt_horsy,assertion", [ # Test an unconditional (!) directly in the element ('raw-assertion.bst', 'False', 'False', 'Raw assertion boogey'), # Test an assertion in a conditional ('conditional-assertion.bst', 'True', 'False', "It's not pony time yet"), # Test that we get the first composited assertion ('ordered-assertion.bst', 'True', 'True', "It's not horsy time yet"), ]) def test_assertion_cli(cli, datafiles, target, opt_pony, opt_horsy, assertion): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, silent=True, args=[ '--option', 'pony', opt_pony, '--option', 'horsy', opt_horsy, 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.USER_ASSERTION) # Assert that the assertion text provided by the user # is found in the exception text assert assertion in str(result.stderr) buildstream-1.6.9/tests/format/assertion/000077500000000000000000000000001437515270000205075ustar00rootroot00000000000000buildstream-1.6.9/tests/format/assertion/conditional-assertion.bst000066400000000000000000000002141437515270000255260ustar00rootroot00000000000000kind: autotools variables: thepony: "not pony" (?): - pony == True: thepony: "It's a ponay !" (!): It's not pony time yet buildstream-1.6.9/tests/format/assertion/ordered-assertion.bst000066400000000000000000000003431437515270000246520ustar00rootroot00000000000000kind: autotools variables: thepony: "not pony" (?): - pony == True: thepony: "It's a ponay !" (!): It's not pony time yet - horsy == True: thepony: "It's a horsay !" (!): It's not horsy time yet buildstream-1.6.9/tests/format/assertion/project.conf000066400000000000000000000002711437515270000230240ustar00rootroot00000000000000name: test options: pony: type: bool description: Whether a pony or not default: False horsy: type: bool description: Whether a horsy or not default: False buildstream-1.6.9/tests/format/assertion/raw-assertion.bst000066400000000000000000000001241437515270000240140ustar00rootroot00000000000000kind: autotools variables: thepony: "not pony" (!): | Raw assertion boogey buildstream-1.6.9/tests/format/include.py000066400000000000000000000242171437515270000205030ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils import cli, generate_junction, create_repo # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'include' ) @pytest.mark.datafiles(DATA_DIR) def test_include_project_file(cli, datafiles): project = os.path.join(str(datafiles), 'file') result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['included'] == 'True' @pytest.mark.datafiles(DATA_DIR) def test_include_junction_file(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'junction') generate_junction(tmpdir, os.path.join(project, 'subproject'), os.path.join(project, 'junction.bst'), store_ref=True) result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['included'] == 'True' @pytest.mark.datafiles(DATA_DIR) def test_include_junction_options(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'options') result = cli.run(project=project, args=[ '-o', 'build_arch', 'x86_64', 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['build_arch'] == 'x86_64' @pytest.mark.datafiles(DATA_DIR) def test_junction_element_partial_project_project(cli, tmpdir, datafiles): """ Junction elements never depend on fully include processed project. """ project = os.path.join(str(datafiles), 'junction') subproject_path = os.path.join(project, 'subproject') junction_path = os.path.join(project, 'junction.bst') repo = create_repo('git', str(tmpdir)) ref = repo.create(subproject_path) element = { 'kind': 'junction', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, junction_path) result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'junction.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert 'included' not in loaded @pytest.mark.datafiles(DATA_DIR) def test_junction_element_not_partial_project_file(cli, tmpdir, datafiles): """ Junction elements never depend on fully include processed project. """ project = os.path.join(str(datafiles), 'file_with_subproject') subproject_path = os.path.join(project, 'subproject') junction_path = os.path.join(project, 'junction.bst') repo = create_repo('git', str(tmpdir)) ref = repo.create(subproject_path) element = { 'kind': 'junction', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, junction_path) result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'junction.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert 'included' in loaded @pytest.mark.datafiles(DATA_DIR) def test_include_element_overrides(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'overrides') result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert 'manual_main_override' in loaded assert 'manual_included_override' in loaded @pytest.mark.datafiles(DATA_DIR) def test_include_element_overrides_composition(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'overrides') result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{config}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert 'build-commands' in loaded assert loaded['build-commands'] == ['first', 'second'] @pytest.mark.datafiles(DATA_DIR) def test_include_element_overrides_sub_include(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'sub-include') result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert 'included' in loaded @pytest.mark.datafiles(DATA_DIR) def test_junction_do_not_use_included_overrides(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'overrides-junction') generate_junction(tmpdir, os.path.join(project, 'subproject'), os.path.join(project, 'junction.bst'), store_ref=True) result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'junction.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert 'main_override' in loaded assert 'included_override' not in loaded @pytest.mark.datafiles(DATA_DIR) def test_conditional_in_fragment(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'conditional') result = cli.run(project=project, args=[ '-o', 'build_arch', 'x86_64', 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert 'size' in loaded assert loaded['size'] == '8' @pytest.mark.parametrize( "project_dir", [ "conditional-conflicts-project", "conditional-conflicts-element", "conditional-conflicts-options-included", "conditional-conflicts-complex", "conditional-conflicts-toplevel-precedence", ], ) @pytest.mark.datafiles(DATA_DIR) def test_preserve_conditionals(cli, datafiles, project_dir): project = os.path.join(str(datafiles), project_dir) result = cli.run( project=project, args=["-o", "build_arch", "i586", "show", "--deps", "none", "--format", "%{vars}", "element.bst"], ) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded["enable-work-around"] == "true" assert loaded["size"] == "4" @pytest.mark.datafiles(DATA_DIR) def test_inner(cli, datafiles): project = os.path.join(str(datafiles), 'inner') result = cli.run(project=project, args=[ '-o', 'build_arch', 'x86_64', 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['build_arch'] == 'x86_64' @pytest.mark.datafiles(DATA_DIR) def test_recusive_include(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'recursive') result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.RECURSIVE_INCLUDE) @pytest.mark.datafiles(DATA_DIR) def test_local_to_junction(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'local_to_junction') generate_junction(tmpdir, os.path.join(project, 'subproject'), os.path.join(project, 'junction.bst'), store_ref=True) result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['included'] == 'True' @pytest.mark.datafiles(DATA_DIR) def test_include_project_file(cli, datafiles): project = os.path.join(str(datafiles), 'string') result = cli.run(project=project, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['included'] == 'True' @pytest.mark.datafiles(DATA_DIR) def test_option_from_junction(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), "junction_options") generate_junction( tmpdir, os.path.join(project, "subproject"), os.path.join(project, "junction.bst"), store_ref=True, options={"local_option": "set"}, ) result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded["is-default"] == 'False' @pytest.mark.datafiles(DATA_DIR) def test_option_from_junction_element(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), "junction_options_element") generate_junction( tmpdir, os.path.join(project, "subproject"), os.path.join(project, "junction.bst"), store_ref=True, options={"local_option": "set"}, ) result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded["is-default"] == 'False' @pytest.mark.datafiles(DATA_DIR) def test_option_from_deep_junction(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), "junction_options_deep") generate_junction( tmpdir, os.path.join(project, "subproject-2"), os.path.join(project, "subproject-1", "junction-2.bst"), store_ref=True, options={"local_option": "set"}, ) generate_junction( tmpdir, os.path.join(project, "subproject-1"), os.path.join(project, "junction-1.bst"), store_ref=True, ) result = cli.run(project=project, args=["show", "--deps", "none", "--format", "%{vars}", "element.bst"]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded["is-default"] == 'False' buildstream-1.6.9/tests/format/include/000077500000000000000000000000001437515270000201235ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/000077500000000000000000000000001437515270000260555ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/element.bst000066400000000000000000000000151437515270000302140ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/enable_work_around.yml000066400000000000000000000001531437515270000324370ustar00rootroot00000000000000variables: enable-work-around: "false" (?): - build_arch == "i586": enable-work-around: "true" buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/extra_conf.yml000066400000000000000000000001631437515270000307300ustar00rootroot00000000000000(?): - build_arch == "i586": (@): extra_conf_i586.yml - build_arch == "x86_64": (@): extra_conf_x86_64.yml buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/extra_conf_i586.yml000066400000000000000000000000721437515270000315020ustar00rootroot00000000000000variables: (?): - build_arch == "i586": size: 4 buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/extra_conf_x86_64.yml000066400000000000000000000000741437515270000317470ustar00rootroot00000000000000variables: (?): - build_arch == "x86_64": size: 8 buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/options.yml000066400000000000000000000002061437515270000302710ustar00rootroot00000000000000 options: build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/project.conf000066400000000000000000000001021437515270000303630ustar00rootroot00000000000000name: test (@): - extra_conf.yml - work_around.yml - options.yml buildstream-1.6.9/tests/format/include/conditional-conflicts-complex/work_around.yml000066400000000000000000000000751437515270000311340ustar00rootroot00000000000000(?): - build_arch == "i586": (@): enable_work_around.yml buildstream-1.6.9/tests/format/include/conditional-conflicts-element/000077500000000000000000000000001437515270000260375ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/conditional-conflicts-element/element.bst000066400000000000000000000000721437515270000302010ustar00rootroot00000000000000kind: manual (@): - extra_conf.yml - work_around.yml buildstream-1.6.9/tests/format/include/conditional-conflicts-element/extra_conf.yml000066400000000000000000000001601437515270000307070ustar00rootroot00000000000000variables: (?): - build_arch == "i586": size: "4" - build_arch == "x86_64": size: "8" buildstream-1.6.9/tests/format/include/conditional-conflicts-element/project.conf000066400000000000000000000002211437515270000303470ustar00rootroot00000000000000name: test options: build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 buildstream-1.6.9/tests/format/include/conditional-conflicts-element/work_around.yml000066400000000000000000000001571437515270000311170ustar00rootroot00000000000000variables: enable-work-around: "false" (?): - build_arch == "i586": enable-work-around: "true" buildstream-1.6.9/tests/format/include/conditional-conflicts-options-included/000077500000000000000000000000001437515270000276665ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/conditional-conflicts-options-included/element.bst000066400000000000000000000000151437515270000320250ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/conditional-conflicts-options-included/extra_conf.yml000066400000000000000000000001601437515270000325360ustar00rootroot00000000000000variables: (?): - build_arch == "i586": size: "4" - build_arch == "x86_64": size: "8" buildstream-1.6.9/tests/format/include/conditional-conflicts-options-included/options.yml000066400000000000000000000002061437515270000321020ustar00rootroot00000000000000 options: build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 buildstream-1.6.9/tests/format/include/conditional-conflicts-options-included/project.conf000066400000000000000000000001021437515270000321740ustar00rootroot00000000000000name: test (@): - options.yml - extra_conf.yml - work_around.yml buildstream-1.6.9/tests/format/include/conditional-conflicts-options-included/work_around.yml000066400000000000000000000001571437515270000327460ustar00rootroot00000000000000variables: enable-work-around: "false" (?): - build_arch == "i586": enable-work-around: "true" buildstream-1.6.9/tests/format/include/conditional-conflicts-project/000077500000000000000000000000001437515270000260545ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/conditional-conflicts-project/element.bst000066400000000000000000000000151437515270000302130ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/conditional-conflicts-project/extra_conf.yml000066400000000000000000000001601437515270000307240ustar00rootroot00000000000000variables: (?): - build_arch == "i586": size: "4" - build_arch == "x86_64": size: "8" buildstream-1.6.9/tests/format/include/conditional-conflicts-project/project.conf000066400000000000000000000002761437515270000303760ustar00rootroot00000000000000name: test options: build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 (@): - extra_conf.yml - work_around.yml buildstream-1.6.9/tests/format/include/conditional-conflicts-project/work_around.yml000066400000000000000000000001571437515270000311340ustar00rootroot00000000000000variables: enable-work-around: "false" (?): - build_arch == "i586": enable-work-around: "true" buildstream-1.6.9/tests/format/include/conditional-conflicts-toplevel-precedence/000077500000000000000000000000001437515270000303335ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/conditional-conflicts-toplevel-precedence/element.bst000066400000000000000000000000151437515270000324720ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/conditional-conflicts-toplevel-precedence/extra_conf.yml000066400000000000000000000001601437515270000332030ustar00rootroot00000000000000variables: (?): - build_arch == "i586": size: "4" - build_arch == "x86_64": size: "8" buildstream-1.6.9/tests/format/include/conditional-conflicts-toplevel-precedence/project.conf000066400000000000000000000007011437515270000326460ustar00rootroot00000000000000name: test options: build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 # The work_around.yml sets this to false in it's conditional # and we set it to true, testing here that the including # fragment still takes precedence over any included fragments. variables: (?): - build_arch == "i586": enable-work-around: "true" (@): - extra_conf.yml - work_around.yml buildstream-1.6.9/tests/format/include/conditional-conflicts-toplevel-precedence/work_around.yml000066400000000000000000000001571437515270000334130ustar00rootroot00000000000000variables: enable-work-around: "true" (?): - build_arch == "i586": enable-work-around: "false" buildstream-1.6.9/tests/format/include/conditional/000077500000000000000000000000001437515270000224265ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/conditional/element.bst000066400000000000000000000000151437515270000245650ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/conditional/extra_conf.yml000066400000000000000000000001601437515270000252760ustar00rootroot00000000000000variables: (?): - build_arch == "i586": size: "4" - build_arch == "x86_64": size: "8" buildstream-1.6.9/tests/format/include/conditional/project.conf000066400000000000000000000002521437515270000247420ustar00rootroot00000000000000name: test options: build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 (@): - extra_conf.yml buildstream-1.6.9/tests/format/include/file/000077500000000000000000000000001437515270000210425ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/file/element.bst000066400000000000000000000000151437515270000232010ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/file/extra_conf.yml000066400000000000000000000000361437515270000237140ustar00rootroot00000000000000variables: included: 'True' buildstream-1.6.9/tests/format/include/file/project.conf000066400000000000000000000000441437515270000233550ustar00rootroot00000000000000name: test (@): - extra_conf.yml buildstream-1.6.9/tests/format/include/file_with_subproject/000077500000000000000000000000001437515270000243355ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/file_with_subproject/element.bst000066400000000000000000000000151437515270000264740ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/file_with_subproject/extra_conf.yml000066400000000000000000000000361437515270000272070ustar00rootroot00000000000000variables: included: 'True' buildstream-1.6.9/tests/format/include/file_with_subproject/project.bst000066400000000000000000000000611437515270000265120ustar00rootroot00000000000000name: test (@): - junction.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/file_with_subproject/project.conf000066400000000000000000000000441437515270000266500ustar00rootroot00000000000000name: test (@): - extra_conf.yml buildstream-1.6.9/tests/format/include/file_with_subproject/subproject/000077500000000000000000000000001437515270000265155ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/file_with_subproject/subproject/project.conf000066400000000000000000000000171437515270000310300ustar00rootroot00000000000000name: test-sub buildstream-1.6.9/tests/format/include/inner/000077500000000000000000000000001437515270000212365ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/inner/element.bst000066400000000000000000000000151437515270000233750ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/inner/extra_conf.yml000066400000000000000000000001561437515270000241130ustar00rootroot00000000000000build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 buildstream-1.6.9/tests/format/include/inner/project.conf000066400000000000000000000000611437515270000235500ustar00rootroot00000000000000name: test options: (@): - extra_conf.yml buildstream-1.6.9/tests/format/include/junction/000077500000000000000000000000001437515270000217545ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction/element.bst000066400000000000000000000000151437515270000241130ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/junction/project.conf000066400000000000000000000000611437515270000242660ustar00rootroot00000000000000name: test (@): - junction.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/junction/subproject/000077500000000000000000000000001437515270000241345ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction/subproject/extra_conf.yml000066400000000000000000000000361437515270000270060ustar00rootroot00000000000000variables: included: 'True' buildstream-1.6.9/tests/format/include/junction/subproject/project.conf000066400000000000000000000000171437515270000264470ustar00rootroot00000000000000name: test-sub buildstream-1.6.9/tests/format/include/junction_options/000077500000000000000000000000001437515270000235275ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction_options/element.bst000066400000000000000000000000151437515270000256660ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/junction_options/project.conf000066400000000000000000000000611437515270000260410ustar00rootroot00000000000000name: test (@): - junction.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/junction_options/subproject/000077500000000000000000000000001437515270000257075ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction_options/subproject/extra_conf.yml000066400000000000000000000002141437515270000305570ustar00rootroot00000000000000(?): - local_option == 'default': variables: is-default: 'True' - local_option == 'set': variables: is-default: 'False' buildstream-1.6.9/tests/format/include/junction_options/subproject/project.conf000066400000000000000000000002511437515270000302220ustar00rootroot00000000000000name: test-sub options: local_option: type: enum description: Testing variable: local_option default: default values: - default - set buildstream-1.6.9/tests/format/include/junction_options_deep/000077500000000000000000000000001437515270000245245ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction_options_deep/element.bst000066400000000000000000000000151437515270000266630ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/junction_options_deep/project.conf000066400000000000000000000000631437515270000270400ustar00rootroot00000000000000name: test (@): - junction-1.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/junction_options_deep/subproject-1/000077500000000000000000000000001437515270000270425ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction_options_deep/subproject-1/extra_conf.yml000066400000000000000000000000451437515270000317140ustar00rootroot00000000000000(@): junction-2.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/junction_options_deep/subproject-1/project.conf000066400000000000000000000000211437515270000313500ustar00rootroot00000000000000name: test-sub-1 buildstream-1.6.9/tests/format/include/junction_options_deep/subproject-2/000077500000000000000000000000001437515270000270435ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction_options_deep/subproject-2/extra_conf.yml000066400000000000000000000002141437515270000317130ustar00rootroot00000000000000(?): - local_option == 'default': variables: is-default: 'True' - local_option == 'set': variables: is-default: 'False' buildstream-1.6.9/tests/format/include/junction_options_deep/subproject-2/project.conf000066400000000000000000000002531437515270000313600ustar00rootroot00000000000000name: test-sub-2 options: local_option: type: enum description: Testing variable: local_option default: default values: - default - set buildstream-1.6.9/tests/format/include/junction_options_element/000077500000000000000000000000001437515270000252405ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction_options_element/element.bst000066400000000000000000000000631437515270000274020ustar00rootroot00000000000000kind: manual (@): - junction.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/junction_options_element/project.conf000066400000000000000000000000131437515270000275470ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/format/include/junction_options_element/subproject/000077500000000000000000000000001437515270000274205ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/junction_options_element/subproject/extra_conf.yml000066400000000000000000000002141437515270000322700ustar00rootroot00000000000000(?): - local_option == 'default': variables: is-default: 'True' - local_option == 'set': variables: is-default: 'False' buildstream-1.6.9/tests/format/include/junction_options_element/subproject/project.conf000066400000000000000000000002511437515270000317330ustar00rootroot00000000000000name: test-sub options: local_option: type: enum description: Testing variable: local_option default: default values: - default - set buildstream-1.6.9/tests/format/include/local_to_junction/000077500000000000000000000000001437515270000236305ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/local_to_junction/element.bst000066400000000000000000000000151437515270000257670ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/local_to_junction/project.conf000066400000000000000000000000611437515270000261420ustar00rootroot00000000000000name: test (@): - junction.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/local_to_junction/subproject/000077500000000000000000000000001437515270000260105ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/local_to_junction/subproject/extra_conf.yml000066400000000000000000000000261437515270000306610ustar00rootroot00000000000000(@): - internal.yml buildstream-1.6.9/tests/format/include/local_to_junction/subproject/internal.yml000066400000000000000000000000361437515270000303460ustar00rootroot00000000000000variables: included: 'True' buildstream-1.6.9/tests/format/include/local_to_junction/subproject/project.conf000066400000000000000000000000171437515270000303230ustar00rootroot00000000000000name: test-sub buildstream-1.6.9/tests/format/include/options/000077500000000000000000000000001437515270000216165ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/options/element.bst000066400000000000000000000000151437515270000237550ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/options/extra_conf.yml000066400000000000000000000002051437515270000244660ustar00rootroot00000000000000options: build_arch: type: arch description: Architecture variable: build_arch values: - i586 - x86_64 buildstream-1.6.9/tests/format/include/options/project.conf000066400000000000000000000000441437515270000241310ustar00rootroot00000000000000name: test (@): - extra_conf.yml buildstream-1.6.9/tests/format/include/overrides-junction/000077500000000000000000000000001437515270000237545ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/overrides-junction/element.bst000066400000000000000000000000151437515270000261130ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/overrides-junction/project.conf000066400000000000000000000004251437515270000262720ustar00rootroot00000000000000name: test elements: junction: variables: main_override: True manual: variables: manual_main_override: True config: build-commands: - "first" sources: git: variables: from_main: True (@): - junction.bst:extra_conf.yml buildstream-1.6.9/tests/format/include/overrides-junction/subproject/000077500000000000000000000000001437515270000261345ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/overrides-junction/subproject/extra_conf.yml000066400000000000000000000003771437515270000310160ustar00rootroot00000000000000elements: junction: variables: included_override: True manual: variables: manual_included_override: True config: build-commands: (>): - "second" sources: git: variables: from_included: True buildstream-1.6.9/tests/format/include/overrides-junction/subproject/project.conf000066400000000000000000000000171437515270000304470ustar00rootroot00000000000000name: test-sub buildstream-1.6.9/tests/format/include/overrides/000077500000000000000000000000001437515270000221255ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/overrides/element.bst000066400000000000000000000000151437515270000242640ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/overrides/extra_conf.yml000066400000000000000000000003611437515270000250000ustar00rootroot00000000000000elements: junction: variables: included_override: True manual: variables: manual_included_override: True config: build-commands: - "ignored" sources: git: variables: from_included: True buildstream-1.6.9/tests/format/include/overrides/extra_conf2.yml000066400000000000000000000001101437515270000250520ustar00rootroot00000000000000elements: manual: config: build-commands: - "first" buildstream-1.6.9/tests/format/include/overrides/project.conf000066400000000000000000000004541437515270000244450ustar00rootroot00000000000000name: test elements: junction: variables: main_override: True manual: variables: manual_main_override: True config: build-commands: (>): - "second" sources: git: variables: from_main: True (@): - extra_conf.yml - extra_conf2.yml buildstream-1.6.9/tests/format/include/overrides/subproject/000077500000000000000000000000001437515270000243055ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/overrides/subproject/project.conf000066400000000000000000000000171437515270000266200ustar00rootroot00000000000000name: test-sub buildstream-1.6.9/tests/format/include/recursive/000077500000000000000000000000001437515270000221325ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/recursive/element.bst000066400000000000000000000000151437515270000242710ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/recursive/extra_conf.yml000066400000000000000000000000311437515270000247770ustar00rootroot00000000000000(@): - extra_conf2.yml buildstream-1.6.9/tests/format/include/recursive/extra_conf2.yml000066400000000000000000000000301437515270000250600ustar00rootroot00000000000000(@): - extra_conf.yml buildstream-1.6.9/tests/format/include/recursive/project.conf000066400000000000000000000000441437515270000244450ustar00rootroot00000000000000name: test (@): - extra_conf.yml buildstream-1.6.9/tests/format/include/string/000077500000000000000000000000001437515270000214315ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/string/element.bst000066400000000000000000000000151437515270000235700ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/string/extra_conf.yml000066400000000000000000000000361437515270000243030ustar00rootroot00000000000000variables: included: 'True' buildstream-1.6.9/tests/format/include/string/project.conf000066400000000000000000000000401437515270000237400ustar00rootroot00000000000000name: test (@): extra_conf.yml buildstream-1.6.9/tests/format/include/sub-include/000077500000000000000000000000001437515270000223355ustar00rootroot00000000000000buildstream-1.6.9/tests/format/include/sub-include/element.bst000066400000000000000000000000151437515270000244740ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/include/sub-include/manual_conf.yml000066400000000000000000000000341437515270000253370ustar00rootroot00000000000000variables: included: True buildstream-1.6.9/tests/format/include/sub-include/project.conf000066400000000000000000000001011437515270000246420ustar00rootroot00000000000000name: test elements: manual: (@): - manual_conf.yml buildstream-1.6.9/tests/format/include_composition.py000066400000000000000000000064471437515270000231330ustar00rootroot00000000000000import os from buildstream._context import Context from buildstream._project import Project from buildstream._includes import Includes from buildstream import _yaml def make_includes(basedir): _yaml.dump({'name': 'test'}, os.path.join(basedir, 'project.conf')) context = Context() project = Project(basedir, context) loader = project.loader return Includes(loader) def test_main_has_prority(tmpdir): includes = make_includes(str(tmpdir)) _yaml.dump({'(@)': ['a.yml'], 'test': ['main']}, str(tmpdir.join('main.yml'))) main = _yaml.load(str(tmpdir.join('main.yml'))) _yaml.dump({'test': ['a']}, str(tmpdir.join('a.yml'))) includes.process(main) assert main['test'] == ['main'] def test_include_cannot_append(tmpdir): includes = make_includes(str(tmpdir)) _yaml.dump({'(@)': ['a.yml'], 'test': ['main']}, str(tmpdir.join('main.yml'))) main = _yaml.load(str(tmpdir.join('main.yml'))) _yaml.dump({'test': {'(>)': ['a']}}, str(tmpdir.join('a.yml'))) includes.process(main) assert main['test'] == ['main'] def test_main_can_append(tmpdir): includes = make_includes(str(tmpdir)) _yaml.dump({'(@)': ['a.yml'], 'test': {'(>)': ['main']}}, str(tmpdir.join('main.yml'))) main = _yaml.load(str(tmpdir.join('main.yml'))) _yaml.dump({'test': ['a']}, str(tmpdir.join('a.yml'))) includes.process(main) assert main['test'] == ['a', 'main'] def test_sibling_cannot_append_backward(tmpdir): includes = make_includes(str(tmpdir)) _yaml.dump({'(@)': ['a.yml', 'b.yml']}, str(tmpdir.join('main.yml'))) main = _yaml.load(str(tmpdir.join('main.yml'))) _yaml.dump({'test': {'(>)': ['a']}}, str(tmpdir.join('a.yml'))) _yaml.dump({'test': ['b']}, str(tmpdir.join('b.yml'))) includes.process(main) assert main['test'] == ['b'] def test_sibling_can_append_forward(tmpdir): includes = make_includes(str(tmpdir)) _yaml.dump({'(@)': ['a.yml', 'b.yml']}, str(tmpdir.join('main.yml'))) main = _yaml.load(str(tmpdir.join('main.yml'))) _yaml.dump({'test': ['a']}, str(tmpdir.join('a.yml'))) _yaml.dump({'test': {'(>)': ['b']}}, str(tmpdir.join('b.yml'))) includes.process(main) assert main['test'] == ['a', 'b'] def test_lastest_sibling_has_priority(tmpdir): includes = make_includes(str(tmpdir)) _yaml.dump({'(@)': ['a.yml', 'b.yml']}, str(tmpdir.join('main.yml'))) main = _yaml.load(str(tmpdir.join('main.yml'))) _yaml.dump({'test': ['a']}, str(tmpdir.join('a.yml'))) _yaml.dump({'test': ['b']}, str(tmpdir.join('b.yml'))) includes.process(main) assert main['test'] == ['b'] def test_main_keeps_keys(tmpdir): includes = make_includes(str(tmpdir)) _yaml.dump({'(@)': ['a.yml'], 'something': 'else'}, str(tmpdir.join('main.yml'))) main = _yaml.load(str(tmpdir.join('main.yml'))) _yaml.dump({'test': ['a']}, str(tmpdir.join('a.yml'))) includes.process(main) assert main['test'] == ['a'] assert main['something'] == 'else' buildstream-1.6.9/tests/format/list-directive-error-element/000077500000000000000000000000001437515270000242055ustar00rootroot00000000000000buildstream-1.6.9/tests/format/list-directive-error-element/config.bst000066400000000000000000000002141437515270000261610ustar00rootroot00000000000000kind: autotools config: outigration-frob-mans: (>): - name: Appending to something - value: that is not integration-commands buildstream-1.6.9/tests/format/list-directive-error-element/environment.bst000066400000000000000000000001561437515270000272650ustar00rootroot00000000000000kind: autotools environment: foo: (=): - name: Overriding - value: Something that doesnt exist buildstream-1.6.9/tests/format/list-directive-error-element/project.conf000066400000000000000000000000131437515270000265140ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/format/list-directive-error-element/public.bst000066400000000000000000000002051437515270000261720ustar00rootroot00000000000000kind: autotools public: foo: the-foo-list: (>): - name: Appending to something - value: that does not exist buildstream-1.6.9/tests/format/list-directive-error-element/variables.bst000066400000000000000000000001571437515270000266720ustar00rootroot00000000000000kind: autotools variables: foo: (<): - name: Prepending - value: To something that doesnt exist buildstream-1.6.9/tests/format/list-directive-error-project/000077500000000000000000000000001437515270000242225ustar00rootroot00000000000000buildstream-1.6.9/tests/format/list-directive-error-project/element.bst000066400000000000000000000000201437515270000263550ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/list-directive-error-project/project.conf000066400000000000000000000001371437515270000265400ustar00rootroot00000000000000name: test non-existant: (>): - name: Appending - value: To something that doesnt exist buildstream-1.6.9/tests/format/list-directive-type-error/000077500000000000000000000000001437515270000235355ustar00rootroot00000000000000buildstream-1.6.9/tests/format/list-directive-type-error/element.bst000066400000000000000000000001411437515270000256740ustar00rootroot00000000000000kind: autotools sources: (?): - arch == "x86_64": - url: https://example.com/x86_64 buildstream-1.6.9/tests/format/list-directive-type-error/project.conf000066400000000000000000000001701437515270000260500ustar00rootroot00000000000000name: test options: arch: type: arch description: Example architecture option values: [ x86_32, x86_64 ] buildstream-1.6.9/tests/format/listdirectiveerrors.py000066400000000000000000000030321437515270000231570ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) def test_project_error(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'list-directive-error-project') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.TRAILING_LIST_DIRECTIVE) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target", [ ('variables.bst'), ('environment.bst'), ('config.bst'), ('public.bst') ]) def test_element_error(cli, datafiles, target): project = os.path.join(datafiles.dirname, datafiles.basename, 'list-directive-error-element') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.TRAILING_LIST_DIRECTIVE) @pytest.mark.datafiles(DATA_DIR) def test_project_error(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'list-directive-type-error') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.ILLEGAL_COMPOSITE) buildstream-1.6.9/tests/format/option-arch/000077500000000000000000000000001437515270000207235ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-arch/element.bst000066400000000000000000000002341437515270000230650ustar00rootroot00000000000000kind: autotools variables: result: "Nothing" (?): - machine_arch == "arm": result: "Army" - machine_arch == "aarch64": result: "Aarchy" buildstream-1.6.9/tests/format/option-arch/project.conf000066400000000000000000000002021437515270000232320ustar00rootroot00000000000000name: test options: machine_arch: type: arch description: The machine architecture values: - arm - aarch64 buildstream-1.6.9/tests/format/option-bool/000077500000000000000000000000001437515270000207415ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-bool/element-equals.bst000066400000000000000000000001421437515270000243710ustar00rootroot00000000000000kind: autotools variables: thepony: "not pony" (?): - pony == True: thepony: "a pony" buildstream-1.6.9/tests/format/option-bool/element-not-equals.bst000066400000000000000000000001421437515270000251670ustar00rootroot00000000000000kind: autotools variables: thepony: "a pony" (?): - pony != True: thepony: "not pony" buildstream-1.6.9/tests/format/option-bool/element-not.bst000066400000000000000000000001361437515270000237020ustar00rootroot00000000000000kind: autotools variables: thepony: "a pony" (?): - not pony: thepony: "not pony" buildstream-1.6.9/tests/format/option-bool/element.bst000066400000000000000000000001321437515270000231000ustar00rootroot00000000000000kind: autotools variables: thepony: "not pony" (?): - pony: thepony: "a pony" buildstream-1.6.9/tests/format/option-bool/project.conf000066400000000000000000000001461437515270000232570ustar00rootroot00000000000000name: test options: pony: type: bool description: Whether a pony or not default: False buildstream-1.6.9/tests/format/option-element-mask-invalid/000077500000000000000000000000001437515270000240145ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-element-mask-invalid/pony.bst000066400000000000000000000001471437515270000255150ustar00rootroot00000000000000kind: autotools variables: debug: False (?): - ("pony.bst" in debug_elements): debug: True buildstream-1.6.9/tests/format/option-element-mask-invalid/project.conf000066400000000000000000000003471437515270000263350ustar00rootroot00000000000000name: test options: debug_elements: type: element-mask description: The elements to build in debug mode # Values are not allowed to be declared on element mask options values: - pony - horsy - zebry buildstream-1.6.9/tests/format/option-element-mask/000077500000000000000000000000001437515270000223705ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-element-mask/giraffy.bst000066400000000000000000000001521437515270000245270ustar00rootroot00000000000000kind: autotools variables: debug: False (?): - ("giraffy.bst" in debug_elements): debug: True buildstream-1.6.9/tests/format/option-element-mask/horsy.bst000066400000000000000000000001501437515270000242420ustar00rootroot00000000000000kind: autotools variables: debug: False (?): - ("horsy.bst" in debug_elements): debug: True buildstream-1.6.9/tests/format/option-element-mask/pony.bst000066400000000000000000000001471437515270000240710ustar00rootroot00000000000000kind: autotools variables: debug: False (?): - ("pony.bst" in debug_elements): debug: True buildstream-1.6.9/tests/format/option-element-mask/project.conf000066400000000000000000000001631437515270000247050ustar00rootroot00000000000000name: test options: debug_elements: type: element-mask description: The elements to build in debug mode buildstream-1.6.9/tests/format/option-element-mask/zebry.bst000066400000000000000000000001501437515270000242310ustar00rootroot00000000000000kind: autotools variables: debug: False (?): - ("zebry.bst" in debug_elements): debug: True buildstream-1.6.9/tests/format/option-enum-missing/000077500000000000000000000000001437515270000224215ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-enum-missing/element.bst000066400000000000000000000000201437515270000245540ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/option-enum-missing/project.conf000066400000000000000000000001631437515270000247360ustar00rootroot00000000000000name: test options: empty: type: enum description: Invalid because no values are defined values: [] buildstream-1.6.9/tests/format/option-enum/000077500000000000000000000000001437515270000207525ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-enum/element-compare.bst000066400000000000000000000001441437515270000245400ustar00rootroot00000000000000kind: autotools variables: result: "different" (?): - brother == sister: result: "same" buildstream-1.6.9/tests/format/option-enum/element.bst000066400000000000000000000002251437515270000231140ustar00rootroot00000000000000kind: autotools variables: result: "a pony" (?): - brother == "zebry": result: "a zebry" - brother == "horsy": result: "a horsy" buildstream-1.6.9/tests/format/option-enum/project.conf000066400000000000000000000004601437515270000232670ustar00rootroot00000000000000name: test options: brother: type: enum description: The kind of animal of the brother values: - pony - horsy - zebry default: pony sister: type: enum description: The kind of animal of the sister values: - pony - horsy - zebry default: zebry buildstream-1.6.9/tests/format/option-exports/000077500000000000000000000000001437515270000215125ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-exports/element.bst000066400000000000000000000000201437515270000236450ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/option-exports/project.conf000066400000000000000000000007461437515270000240360ustar00rootroot00000000000000name: test options: bool_export: type: bool description: Exported boolean option default: False variable: exported-bool enum_export: type: enum description: Exported enum option values: - pony - horsy - zebry default: pony variable: exported-enum flags_export: type: flags description: Exported flags option values: - pony - horsy - zebry default: - pony - horsy variable: exported-flags buildstream-1.6.9/tests/format/option-flags-missing/000077500000000000000000000000001437515270000225515ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-flags-missing/element.bst000066400000000000000000000000201437515270000247040ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/option-flags-missing/project.conf000066400000000000000000000001641437515270000250670ustar00rootroot00000000000000name: test options: empty: type: flags description: Invalid because no values are defined values: [] buildstream-1.6.9/tests/format/option-flags/000077500000000000000000000000001437515270000211025ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-flags/element-in.bst000066400000000000000000000003111437515270000236440ustar00rootroot00000000000000kind: autotools variables: result: "a pony" (?): - ("zebry" in farm): result: "a zebry" - ("pony" not in farm): result: "no pony" - (animal not in farm): result: "no horsy" buildstream-1.6.9/tests/format/option-flags/element.bst000066400000000000000000000003641437515270000232500ustar00rootroot00000000000000kind: autotools variables: result: "a pony" (?): - farm == [ "zebry" ]: result: "a zebry" - farm == [ "horsy", "pony" ]: result: "a pony and a horsy" - farm == [ "horsy", "pony", "zebry" ]: result: "all the animals" buildstream-1.6.9/tests/format/option-flags/project.conf000066400000000000000000000005651437515270000234250ustar00rootroot00000000000000name: test options: # Include an enum option here so we can compare it animal: type: enum description: The kind of animal values: - pony - horsy - zebry default: horsy # A flags value to test farm: type: flags description: The kinds of animals on this farm values: - pony - horsy - zebry default: - pony buildstream-1.6.9/tests/format/option-list-directive.py000066400000000000000000000010461437515270000233100ustar00rootroot00000000000000import os import pytest from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("mount_devices", [("true"), ("false")]) def test_override(cli, datafiles, mount_devices): project = os.path.join(datafiles.dirname, datafiles.basename, "option-list-directive") bst_args = ["--option", "shell_mount_devices", mount_devices, "build"] result = cli.run(project=project, silent=True, args=bst_args) result.assert_success() buildstream-1.6.9/tests/format/option-list-directive/000077500000000000000000000000001437515270000227355ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-list-directive/project.conf000066400000000000000000000004231437515270000252510ustar00rootroot00000000000000name: test options: shell_mount_devices: type: bool description: whether to mount devices in the shell default: false shell: host-files: - '/etc/passwd' - '/etc/group' (?): - shell_mount_devices: host-files: (>): - '/dev/dri' buildstream-1.6.9/tests/format/option-overrides/000077500000000000000000000000001437515270000220105ustar00rootroot00000000000000buildstream-1.6.9/tests/format/option-overrides/element.bst000066400000000000000000000000201437515270000241430ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/option-overrides/project.conf000066400000000000000000000006001437515270000243210ustar00rootroot00000000000000# Test case ensuring that we can use options # in the element overrides. # name: test options: arch: type: arch description: architecture values: [i686, x86_64] elements: autotools: variables: (?): - arch == 'i686': conf-global: --host=i686-unknown-linux-gnu - arch == 'x86_64': conf-global: --host=x86_64-unknown-linux-gnu buildstream-1.6.9/tests/format/optionarch.py000066400000000000000000000046041437515270000212240ustar00rootroot00000000000000import os import pytest from contextlib import contextmanager from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) # Context manager to override the reported value of `os.uname()` @contextmanager def override_uname_arch(name): # # Disabling this test since we now run bst in a subprocess during tests. # pytest.xfail("Overriding os.uname() in bst subprocess is unsupported") orig_uname = os.uname orig_tuple = tuple(os.uname()) override_result = (orig_tuple[0], orig_tuple[1], orig_tuple[2], orig_tuple[3], name) def override(): return override_result os.uname = override yield os.uname = orig_uname @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("uname,value,expected", [ # Test explicitly provided arches ('arm', 'arm', 'Army'), ('arm', 'aarch64', 'Aarchy'), # Test automatically derived arches ('arm', None, 'Army'), ('aarch64', None, 'Aarchy'), # Test that explicitly provided arches dont error out # when the `uname` reported arch is not supported ('i386', 'arm', 'Army'), ('x86_64', 'aarch64', 'Aarchy'), ]) def test_conditional(cli, datafiles, uname, value, expected): with override_uname_arch(uname): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-arch') bst_args = [] if value is not None: bst_args += ['--option', 'machine_arch', value] bst_args += [ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst' ] result = cli.run(project=project, silent=True, args=bst_args) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['result'] == expected @pytest.mark.datafiles(DATA_DIR) def test_unsupported_arch(cli, datafiles): with override_uname_arch("x86_64"): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-arch') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst' ]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) buildstream-1.6.9/tests/format/optionbool.py000066400000000000000000000065341437515270000212460ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,option,expected", [ # Test 'foo' syntax, and valid values of 'True' / 'False' ('element.bst', 'True', 'a pony'), ('element.bst', 'true', 'a pony'), ('element.bst', 'False', 'not pony'), ('element.bst', 'false', 'not pony'), # Test 'not foo' syntax ('element-not.bst', 'False', 'not pony'), ('element-not.bst', 'True', 'a pony'), # Test 'foo == True' syntax ('element-equals.bst', 'False', 'not pony'), ('element-equals.bst', 'True', 'a pony'), # Test 'foo != True' syntax ('element-not-equals.bst', 'False', 'not pony'), ('element-not-equals.bst', 'True', 'a pony'), ]) def test_conditional_cli(cli, datafiles, target, option, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-bool') result = cli.run(project=project, silent=True, args=[ '--option', 'pony', option, 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['thepony'] == expected # Test configuration of boolean option in the config file # @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,option,expected", [ ('element.bst', True, 'a pony'), ('element.bst', False, 'not pony'), ]) def test_conditional_config(cli, datafiles, target, option, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-bool') cli.configure({ 'projects': { 'test': { 'options': { 'pony': option } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['thepony'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("cli_option", [ ('falsey'), ('pony'), ('trUE') ]) def test_invalid_value_cli(cli, datafiles, cli_option): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-bool') result = cli.run(project=project, silent=True, args=[ '--option', 'pony', cli_option, 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("config_option", [ ('pony'), (['its', 'a', 'list']), ({'dic': 'tionary'}) ]) def test_invalid_value_config(cli, datafiles, config_option): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-bool') cli.configure({ 'projects': { 'test': { 'options': { 'pony': config_option } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) buildstream-1.6.9/tests/format/optioneltmask.py000066400000000000000000000050761437515270000217530ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,value,expected", [ ('pony.bst', 'pony.bst', 'True'), ('horsy.bst', 'pony.bst, horsy.bst', 'True'), ('zebry.bst', 'pony.bst, horsy.bst', 'False'), ]) def test_conditional_cli(cli, datafiles, target, value, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-element-mask') result = cli.run(project=project, silent=True, args=[ '--option', 'debug_elements', value, 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['debug'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,value,expected", [ ('pony.bst', ['pony.bst'], 'True'), ('horsy.bst', ['pony.bst', 'horsy.bst'], 'True'), ('zebry.bst', ['pony.bst', 'horsy.bst'], 'False'), ]) def test_conditional_config(cli, datafiles, target, value, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-element-mask') cli.configure({ 'projects': { 'test': { 'options': { 'debug_elements': value } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['debug'] == expected @pytest.mark.datafiles(DATA_DIR) def test_invalid_declaration(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-element-mask-invalid') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'pony.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_invalid_value(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-element-mask') result = cli.run(project=project, silent=True, args=[ '--option', 'debug_elements', 'kitten.bst', 'show', '--deps', 'none', '--format', '%{vars}', 'pony.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) buildstream-1.6.9/tests/format/optionenum.py000066400000000000000000000073311437515270000212530ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,option,value,expected", [ # Test 'var == "foo"' syntax ('element.bst', 'brother', 'pony', 'a pony'), ('element.bst', 'brother', 'zebry', 'a zebry'), ('element.bst', 'brother', 'horsy', 'a horsy'), # Test 'var1 == var2' syntax ('element-compare.bst', 'brother', 'horsy', 'different'), ('element-compare.bst', 'brother', 'zebry', 'same'), ('element-compare.bst', 'sister', 'pony', 'same'), ]) def test_conditional_cli(cli, datafiles, target, option, value, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-enum') result = cli.run(project=project, silent=True, args=[ '--option', option, value, 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['result'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,option,value,expected", [ # Test 'var == "foo"' syntax ('element.bst', 'brother', 'pony', 'a pony'), ('element.bst', 'brother', 'zebry', 'a zebry'), ('element.bst', 'brother', 'horsy', 'a horsy'), # Test 'var1 == var2' syntax ('element-compare.bst', 'brother', 'horsy', 'different'), ('element-compare.bst', 'brother', 'zebry', 'same'), ('element-compare.bst', 'sister', 'pony', 'same'), ]) def test_conditional_config(cli, datafiles, target, option, value, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-enum') cli.configure({ 'projects': { 'test': { 'options': { option: value } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['result'] == expected @pytest.mark.datafiles(DATA_DIR) def test_invalid_value_cli(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-enum') result = cli.run(project=project, silent=True, args=[ '--option', 'brother', 'giraffy', 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("config_option", [ ('giraffy'), (['its', 'a', 'list']), ({'dic': 'tionary'}) ]) def test_invalid_value_config(cli, datafiles, config_option): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-enum') cli.configure({ 'projects': { 'test': { 'options': { 'brother': config_option } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_missing_values(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-enum-missing') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) buildstream-1.6.9/tests/format/optionexports.py000066400000000000000000000021711437515270000220100ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("option_name,option_value,var_name,var_value", [ # Test boolean ('bool_export', 'False', 'exported-bool', '0'), ('bool_export', 'True', 'exported-bool', '1'), # Enum ('enum_export', 'pony', 'exported-enum', 'pony'), ('enum_export', 'horsy', 'exported-enum', 'horsy'), # Flags ('flags_export', 'pony', 'exported-flags', 'pony'), ('flags_export', 'pony, horsy', 'exported-flags', 'horsy,pony'), ]) def test_export(cli, datafiles, option_name, option_value, var_name, var_value): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-exports') result = cli.run(project=project, silent=True, args=[ '--option', option_name, option_value, 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded[var_name] == var_value buildstream-1.6.9/tests/format/optionflags.py000066400000000000000000000102311437515270000213740ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,option,value,expected", [ # Test (var == [ "foo" ]) syntax ('element.bst', 'farm', 'pony', 'a pony'), ('element.bst', 'farm', 'zebry', 'a zebry'), ('element.bst', 'farm', 'pony, horsy', 'a pony and a horsy'), ('element.bst', 'farm', 'zebry,horsy , pony', 'all the animals'), # Test ("literal" in var) syntax ('element-in.bst', 'farm', 'zebry, horsy, pony', 'a zebry'), # Test ("literal" not in var) syntax ('element-in.bst', 'farm', 'zebry, horsy', 'no pony'), # Test (var1 not in var2) syntax (where var1 is enum and var2 is flags) ('element-in.bst', 'farm', 'zebry, pony', 'no horsy'), ]) def test_conditional_cli(cli, datafiles, target, option, value, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-flags') result = cli.run(project=project, silent=True, args=[ '--option', option, value, 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['result'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,option,value,expected", [ # Test 'var == [ "foo" ]' syntax ('element.bst', 'farm', ['pony'], 'a pony'), ('element.bst', 'farm', ['zebry'], 'a zebry'), ('element.bst', 'farm', ['pony', 'horsy'], 'a pony and a horsy'), ('element.bst', 'farm', ['zebry', 'horsy', 'pony'], 'all the animals'), ]) def test_conditional_config(cli, datafiles, target, option, value, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-flags') cli.configure({ 'projects': { 'test': { 'options': { option: value } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', target]) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['result'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("cli_option", [ ('giraffy'), # Not a valid animal for the farm option ('horsy pony') # Does not include comma separators ]) def test_invalid_value_cli(cli, datafiles, cli_option): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-flags') result = cli.run(project=project, silent=True, args=[ '--option', 'farm', cli_option, 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("config_option", [ ('pony'), # Not specified as a list (['horsy', 'pony', 'giraffy']), # Invalid giraffy animal for farm option ({'dic': 'tionary'}) # Dicts also dont make sense in the config for flags ]) def test_invalid_value_config(cli, datafiles, config_option): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-flags') cli.configure({ 'projects': { 'test': { 'options': { 'farm': config_option } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_missing_values(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-flags-missing') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) buildstream-1.6.9/tests/format/optionoverrides.py000066400000000000000000000015431437515270000223100ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("arch", [('i686'), ('x86_64')]) def test_override(cli, datafiles, arch): project = os.path.join(datafiles.dirname, datafiles.basename, 'option-overrides') bst_args = ['--option', 'arch', arch] bst_args += [ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst' ] result = cli.run(project=project, silent=True, args=bst_args) result.assert_success() # See the associated project.conf for the expected values expected_value = '--host={}-unknown-linux-gnu'.format(arch) loaded = _yaml.load_data(result.output) assert loaded['conf-global'] == expected_value buildstream-1.6.9/tests/format/options.py000066400000000000000000000200561437515270000205500ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'options' ) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("project_dir", [ ('invalid-name-spaces'), ('invalid-name-dashes'), ('invalid-name-plus'), ('invalid-name-leading-number'), ]) def test_invalid_option_name(cli, datafiles, project_dir): project = os.path.join(datafiles.dirname, datafiles.basename, project_dir) result = cli.run(project=project, silent=True, args=['show', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_SYMBOL_NAME) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("project_dir", [ ('invalid-variable-name-spaces'), ('invalid-variable-name-plus'), ]) def test_invalid_variable_name(cli, datafiles, project_dir): project = os.path.join(datafiles.dirname, datafiles.basename, project_dir) result = cli.run(project=project, silent=True, args=['show', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_SYMBOL_NAME) @pytest.mark.datafiles(DATA_DIR) def test_invalid_option_type(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'invalid-type') # Test with the opt option set result = cli.run(project=project, silent=True, args=[ '--option', 'opt', 'funny', 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_invalid_option_cli(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'simple-condition') # Test with the opt option set result = cli.run(project=project, silent=True, args=[ '--option', 'fart', 'funny', 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_invalid_option_config(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'simple-condition') cli.configure({ 'projects': { 'test': { 'options': { 'fart': 'Hello' } } } }) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_invalid_expression(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'invalid-expression') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.EXPRESSION_FAILED) @pytest.mark.datafiles(DATA_DIR) def test_undefined(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'undefined-variable') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.EXPRESSION_FAILED) @pytest.mark.datafiles(DATA_DIR) def test_invalid_condition(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'invalid-condition') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("opt_option,expected_prefix", [ ('False', '/usr'), ('True', '/opt'), ]) def test_simple_conditional(cli, datafiles, opt_option, expected_prefix): project = os.path.join(datafiles.dirname, datafiles.basename, 'simple-condition') # Test with the opt option set result = cli.run(project=project, silent=True, args=[ '--option', 'opt', opt_option, 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['prefix'] == expected_prefix @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("debug,logging,expected", [ ('False', 'False', 'False'), ('True', 'False', 'False'), ('False', 'True', 'False'), ('True', 'True', 'True'), ]) def test_nested_conditional(cli, datafiles, debug, logging, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'nested-condition') # Test with the opt option set result = cli.run(project=project, silent=True, args=[ '--option', 'debug', debug, '--option', 'logging', logging, 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['debug'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("debug,logging,expected", [ ('False', 'False', 'False'), ('True', 'False', 'False'), ('False', 'True', 'False'), ('True', 'True', 'True'), ]) def test_compound_and_conditional(cli, datafiles, debug, logging, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'compound-and-condition') # Test with the opt option set result = cli.run(project=project, silent=True, args=[ '--option', 'debug', debug, '--option', 'logging', logging, 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['debug'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("debug,logging,expected", [ ('False', 'False', 'False'), ('True', 'False', 'True'), ('False', 'True', 'True'), ('True', 'True', 'True'), ]) def test_compound_or_conditional(cli, datafiles, debug, logging, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'compound-or-condition') # Test with the opt option set result = cli.run(project=project, silent=True, args=[ '--option', 'debug', debug, '--option', 'logging', logging, 'show', '--deps', 'none', '--format', '%{vars}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) assert loaded['logging'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("option,expected", [ ('False', 'horsy'), ('True', 'pony'), ]) def test_deep_nesting_level1(cli, datafiles, option, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'deep-nesting') result = cli.run(project=project, silent=True, args=[ '--option', 'pony', option, 'show', '--deps', 'none', '--format', '%{public}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) shallow_list = loaded['shallow-nest'] first_dict = shallow_list[0] assert first_dict['animal'] == expected @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("option,expected", [ ('False', 'horsy'), ('True', 'pony'), ]) def test_deep_nesting_level2(cli, datafiles, option, expected): project = os.path.join(datafiles.dirname, datafiles.basename, 'deep-nesting') result = cli.run(project=project, silent=True, args=[ '--option', 'pony', option, 'show', '--deps', 'none', '--format', '%{public}', 'element-deeper.bst']) result.assert_success() loaded = _yaml.load_data(result.output) shallow_list = loaded['deep-nest'] deeper_list = shallow_list[0] first_dict = deeper_list[0] assert first_dict['animal'] == expected buildstream-1.6.9/tests/format/options/000077500000000000000000000000001437515270000201735ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/compound-and-condition/000077500000000000000000000000001437515270000245435ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/compound-and-condition/element.bst000066400000000000000000000000201437515270000266760ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/compound-and-condition/project.conf000066400000000000000000000005221437515270000270570ustar00rootroot00000000000000name: test options: debug: type: bool description: Whether debugging is enabled default: False logging: type: bool description: Whether logging is enabled default: False variables: debug: 'False' (?): # Debugging is not enabled unless logging is also enabled - logging and debug: debug: 'True' buildstream-1.6.9/tests/format/options/compound-or-condition/000077500000000000000000000000001437515270000244215ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/compound-or-condition/element.bst000066400000000000000000000000201437515270000265540ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/compound-or-condition/project.conf000066400000000000000000000005321437515270000267360ustar00rootroot00000000000000name: test options: debug: type: bool description: Whether debugging is enabled default: False logging: type: bool description: Whether logging is enabled default: False variables: logging: 'False' (?): # Logging is enabled if specified or if debugging is requested - logging or debug: logging: 'True' buildstream-1.6.9/tests/format/options/deep-nesting/000077500000000000000000000000001437515270000225555ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/deep-nesting/element-deeper.bst000066400000000000000000000002401437515270000261560ustar00rootroot00000000000000kind: autotools # Yep, this is a list of lists of dictionaries. # public: deep-nest: - - animal: horsy (?): - pony: animal: pony buildstream-1.6.9/tests/format/options/deep-nesting/element.bst000066400000000000000000000001451437515270000247200ustar00rootroot00000000000000kind: autotools public: shallow-nest: - animal: horsy (?): - pony: animal: pony buildstream-1.6.9/tests/format/options/deep-nesting/project.conf000066400000000000000000000001451437515270000250720ustar00rootroot00000000000000name: test options: pony: type: bool description: Whether a pony or not default: False buildstream-1.6.9/tests/format/options/invalid-condition/000077500000000000000000000000001437515270000236055ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-condition/element.bst000066400000000000000000000000201437515270000257400ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-condition/project.conf000066400000000000000000000003341437515270000261220ustar00rootroot00000000000000name: test options: opt: type: bool description: Whether to build in an opt prefix default: False variables: (?): - not: Allowed any adjacent keys beside the expression. opt: prefix: "/opt" buildstream-1.6.9/tests/format/options/invalid-expression/000077500000000000000000000000001437515270000240165ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-expression/element.bst000066400000000000000000000000201437515270000261510ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-expression/project.conf000066400000000000000000000002561437515270000263360ustar00rootroot00000000000000name: test options: opt: type: bool description: Whether to build in an opt prefix default: False variables: (?): - is defined pony: prefix: "/opt" buildstream-1.6.9/tests/format/options/invalid-name-dashes/000077500000000000000000000000001437515270000240045ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-name-dashes/element.bst000066400000000000000000000000201437515270000261370ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-name-dashes/project.conf000066400000000000000000000001631437515270000263210ustar00rootroot00000000000000name: test options: name-with-dashes: type: bool description: An invalid option name default: False buildstream-1.6.9/tests/format/options/invalid-name-leading-number/000077500000000000000000000000001437515270000254265ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-name-leading-number/element.bst000066400000000000000000000000201437515270000275610ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-name-leading-number/project.conf000066400000000000000000000001651437515270000277450ustar00rootroot00000000000000name: test options: 123number_is_first: type: bool description: An invalid option name default: False buildstream-1.6.9/tests/format/options/invalid-name-plus/000077500000000000000000000000001437515270000235205ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-name-plus/element.bst000066400000000000000000000000201437515270000256530ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-name-plus/project.conf000066400000000000000000000001561437515270000260370ustar00rootroot00000000000000name: test options: name_with_+: type: bool description: An invalid option name default: False buildstream-1.6.9/tests/format/options/invalid-name-spaces/000077500000000000000000000000001437515270000240135ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-name-spaces/element.bst000066400000000000000000000000201437515270000261460ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-name-spaces/project.conf000066400000000000000000000001631437515270000263300ustar00rootroot00000000000000name: test options: name with spaces: type: bool description: An invalid option name default: False buildstream-1.6.9/tests/format/options/invalid-type/000077500000000000000000000000001437515270000226005ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-type/element.bst000066400000000000000000000000201437515270000247330ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-type/project.conf000066400000000000000000000002521437515270000251140ustar00rootroot00000000000000name: test options: opt: type: funny description: This aint really an option type default: False variables: (?): - opt is funny: prefix: "/opt" buildstream-1.6.9/tests/format/options/invalid-variable-name-plus/000077500000000000000000000000001437515270000253035ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-variable-name-plus/element.bst000066400000000000000000000000211437515270000274370ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-variable-name-plus/project.conf000066400000000000000000000001631437515270000276200ustar00rootroot00000000000000name: test options: pony: type: bool description: Whether a pony default: False variable: pony+ buildstream-1.6.9/tests/format/options/invalid-variable-name-spaces/000077500000000000000000000000001437515270000255765ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/invalid-variable-name-spaces/element.bst000066400000000000000000000000211437515270000277320ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/invalid-variable-name-spaces/project.conf000066400000000000000000000001721437515270000301130ustar00rootroot00000000000000name: test options: pony: type: bool description: Whether a pony default: False variable: the variable buildstream-1.6.9/tests/format/options/nested-condition/000077500000000000000000000000001437515270000234415ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/nested-condition/element.bst000066400000000000000000000000201437515270000255740ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/nested-condition/project.conf000066400000000000000000000005521437515270000257600ustar00rootroot00000000000000name: test options: debug: type: bool description: Whether debugging is enabled default: False logging: type: bool description: Whether logging is enabled default: False variables: debug: 'False' (?): - logging: # Debugging is not enabled unless logging is also enabled (?): - debug: debug: 'True' buildstream-1.6.9/tests/format/options/simple-condition/000077500000000000000000000000001437515270000234505ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/simple-condition/element.bst000066400000000000000000000000201437515270000256030ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/simple-condition/project.conf000066400000000000000000000002421437515270000257630ustar00rootroot00000000000000name: test options: opt: type: bool description: Whether to build in an opt prefix default: False variables: (?): - opt: prefix: "/opt" buildstream-1.6.9/tests/format/options/undefined-variable/000077500000000000000000000000001437515270000237175ustar00rootroot00000000000000buildstream-1.6.9/tests/format/options/undefined-variable/element.bst000066400000000000000000000000211437515270000260530ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/options/undefined-variable/project.conf000066400000000000000000000002301437515270000262270ustar00rootroot00000000000000name: test options: pony: type: bool description: Whether a pony default: False variables: (?): - pony == foo: prefix: "/opt" buildstream-1.6.9/tests/format/project-overrides/000077500000000000000000000000001437515270000221465ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project-overrides/prepend-configure-commands/000077500000000000000000000000001437515270000273615ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project-overrides/prepend-configure-commands/element.bst000066400000000000000000000000201437515270000315140ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/project-overrides/prepend-configure-commands/project.conf000066400000000000000000000002471437515270000317010ustar00rootroot00000000000000name: test # Test that prepending to configure-commands works elements: autotools: config: configure-commands: (<): - echo "Hello World!" buildstream-1.6.9/tests/format/project.py000066400000000000000000000216751437515270000205330ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils import cli, filetypegenerator # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_missing_project_conf(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_PROJECT_CONF) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_missing_project_name(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "missingname") result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_missing_element(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "missing-element") result = cli.run(project=project, args=['show', 'manual.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) # Assert that we have the expected provenance encoded into the error assert "manual.bst [line 4 column 2]" in result.stderr @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_missing_junction(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "missing-junction") result = cli.run(project=project, args=['show', 'manual.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) # Assert that we have the expected provenance encoded into the error assert "manual.bst [line 4 column 2]" in result.stderr @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_empty_project_name(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "emptyname") result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_SYMBOL_NAME) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_invalid_project_name(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "invalidname") result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_SYMBOL_NAME) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_invalid_yaml(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "invalid-yaml") result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_YAML) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_load_default_project(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "default") result = cli.run(project=project, args=[ 'show', '--format', '%{env}', 'manual.bst' ]) result.assert_success() # Read back some of our project defaults from the env env = _yaml.load_data(result.output) assert (env['USER'] == "tomjon") assert (env['TERM'] == "dumb") @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_load_project_from_subdir(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'project-from-subdir') result = cli.run( project=project, cwd=os.path.join(project, 'subdirectory'), args=['show', '--format', '%{env}', 'manual.bst']) result.assert_success() # Read back some of our project defaults from the env env = _yaml.load_data(result.output) assert (env['USER'] == "tomjon") assert (env['TERM'] == "dumb") @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_override_project_path(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "overridepath") result = cli.run(project=project, args=[ 'show', '--format', '%{env}', 'manual.bst' ]) result.assert_success() # Read back the overridden path env = _yaml.load_data(result.output) assert (env['PATH'] == "/bin:/sbin") @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_project_unsupported(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "unsupported") result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.UNSUPPORTED_PROJECT) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_project_unsupported_not_bst1(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, "not-bst-1") result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.UNSUPPORTED_PROJECT) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'element-path')) def test_missing_element_path_directory(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'element-path')) def test_element_path_not_a_directory(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) path = os.path.join(project, 'elements') for file_type in filetypegenerator.generate_file_types(path): result = cli.run(project=project, args=['workspace', 'list']) if not os.path.isdir(path): result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND) else: result.assert_success() @pytest.mark.datafiles(os.path.join(DATA_DIR, 'local-plugin')) def test_missing_local_plugin_directory(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'local-plugin')) def test_local_plugin_not_directory(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) path = os.path.join(project, 'plugins') for file_type in filetypegenerator.generate_file_types(path): result = cli.run(project=project, args=['workspace', 'list']) if not os.path.isdir(path): result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND) else: result.assert_success() @pytest.mark.datafiles(DATA_DIR) def test_project_plugin_load_allowed(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'plugin-allowed') result = cli.run(project=project, silent=True, args=[ 'show', 'element.bst']) result.assert_success() @pytest.mark.datafiles(DATA_DIR) def test_project_plugin_load_forbidden(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'plugin-forbidden') result = cli.run(project=project, silent=True, args=[ 'show', 'element.bst']) result.assert_main_error(ErrorDomain.PLUGIN, None) @pytest.mark.datafiles(DATA_DIR) def test_project_conf_duplicate_plugins(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'duplicate-plugins') result = cli.run(project=project, silent=True, args=[ 'show', 'element.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_YAML) # Assert that we get a different cache key for target.bst, depending # on a conditional statement we have placed in the project.refs file. # @pytest.mark.datafiles(DATA_DIR) def test_project_refs_options(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'refs-options') result1 = cli.run(project=project, silent=True, args=[ '--option', 'test', 'True', 'show', '--deps', 'none', '--format', '%{key}', 'target.bst']) result1.assert_success() result2 = cli.run(project=project, silent=True, args=[ '--option', 'test', 'False', 'show', '--deps', 'none', '--format', '%{key}', 'target.bst']) result2.assert_success() # Assert that the cache keys are different assert result1.output != result2.output @pytest.mark.datafiles(os.path.join(DATA_DIR, 'element-path')) def test_element_path_project_path_contains_symlinks(cli, datafiles, tmpdir): real_project = str(datafiles) linked_project = os.path.join(str(tmpdir), 'linked') os.symlink(real_project, linked_project) os.makedirs(os.path.join(real_project, 'elements'), exist_ok=True) with open(os.path.join(real_project, 'elements', 'element.bst'), 'w') as f: f.write("kind: manual\n") result = cli.run(project=linked_project, args=['show', 'element.bst']) result.assert_success() buildstream-1.6.9/tests/format/project/000077500000000000000000000000001437515270000201465ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/default/000077500000000000000000000000001437515270000215725ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/default/manual.bst000066400000000000000000000000151437515270000235550ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/project/default/project.conf000066400000000000000000000001121437515270000241010ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/format/project/duplicate-plugins/000077500000000000000000000000001437515270000235775ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/duplicate-plugins/bar/000077500000000000000000000000001437515270000243435ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/duplicate-plugins/bar/__init__.py000066400000000000000000000000001437515270000264420ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/duplicate-plugins/bar/foo.py000066400000000000000000000001531437515270000254770ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): pass def setup(): return FooElement buildstream-1.6.9/tests/format/project/duplicate-plugins/bar/frob.py000066400000000000000000000001551437515270000256460ustar00rootroot00000000000000from buildstream import Element class FrobElement(Element): pass def setup(): return FrobElement buildstream-1.6.9/tests/format/project/duplicate-plugins/baz/000077500000000000000000000000001437515270000243535ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/duplicate-plugins/baz/__init__.py000066400000000000000000000000001437515270000264520ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/duplicate-plugins/baz/foo.py000066400000000000000000000001531437515270000255070ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): pass def setup(): return FooElement buildstream-1.6.9/tests/format/project/duplicate-plugins/baz/frob.py000066400000000000000000000001551437515270000256560ustar00rootroot00000000000000from buildstream import Element class FrobElement(Element): pass def setup(): return FrobElement buildstream-1.6.9/tests/format/project/duplicate-plugins/element.bst000066400000000000000000000000201437515270000257320ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/format/project/duplicate-plugins/project.conf000066400000000000000000000002511437515270000261120ustar00rootroot00000000000000name: test plugins: - origin: local path: bar elements: foo: 0 sources: frob: 0 - origin: local path: baz elements: foo: 0 sources: frob: 0 buildstream-1.6.9/tests/format/project/element-path/000077500000000000000000000000001437515270000225315ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/element-path/project.conf000066400000000000000000000000411437515270000250410ustar00rootroot00000000000000name: foo element-path: elements buildstream-1.6.9/tests/format/project/emptyname/000077500000000000000000000000001437515270000221455ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/emptyname/project.conf000066400000000000000000000001561437515270000244640ustar00rootroot00000000000000# A project configuration with an invalid symbol for a project name, # this one is an empty string # name: '' buildstream-1.6.9/tests/format/project/invalid-yaml/000077500000000000000000000000001437515270000225345ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/invalid-yaml/manual.bst000066400000000000000000000000151437515270000245170ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/project/invalid-yaml/project.conf000066400000000000000000000001551437515270000250520ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony variables: sbindir: "%{bindir} buildstream-1.6.9/tests/format/project/invalidname/000077500000000000000000000000001437515270000224355ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/invalidname/project.conf000066400000000000000000000001661437515270000247550ustar00rootroot00000000000000# A project configuration with an invalid symbol for a project name, # this one contains a space # name: Project Name buildstream-1.6.9/tests/format/project/local-plugin/000077500000000000000000000000001437515270000225345ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/local-plugin/project.conf000066400000000000000000000001161437515270000250470ustar00rootroot00000000000000name: foo plugins: - origin: local path: plugins sources: mysource: 0 buildstream-1.6.9/tests/format/project/missing-element/000077500000000000000000000000001437515270000232465ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/missing-element/manual.bst000066400000000000000000000000451437515270000252340ustar00rootroot00000000000000kind: manual depends: - missing.bst buildstream-1.6.9/tests/format/project/missing-element/project.conf000066400000000000000000000000131437515270000255550ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/format/project/missing-junction/000077500000000000000000000000001437515270000234465ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/missing-junction/manual.bst000066400000000000000000000001071437515270000254330ustar00rootroot00000000000000kind: manual depends: - filename: element.bst junction: missing.bst buildstream-1.6.9/tests/format/project/missing-junction/project.conf000066400000000000000000000000131437515270000257550ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/format/project/missingname/000077500000000000000000000000001437515270000224605ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/missingname/project.conf000066400000000000000000000000001437515270000247630ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/not-bst-1/000077500000000000000000000000001437515270000216725ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/not-bst-1/project.conf000066400000000000000000000000661437515270000242110ustar00rootroot00000000000000# A BuildStream 2 project name: foo min-version: 2.0 buildstream-1.6.9/tests/format/project/overridepath/000077500000000000000000000000001437515270000226425ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/overridepath/manual.bst000066400000000000000000000000151437515270000246250ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/project/overridepath/project.conf000066400000000000000000000001741437515270000251610ustar00rootroot00000000000000# A project configuration which overrides the sandbox PATH environment variable name: foo environment: PATH: /bin:/sbin buildstream-1.6.9/tests/format/project/plugin-allowed/000077500000000000000000000000001437515270000230715ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-allowed/__init__.py000066400000000000000000000000001437515270000251700ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-allowed/element.bst000066400000000000000000000000121437515270000252250ustar00rootroot00000000000000kind: foo buildstream-1.6.9/tests/format/project/plugin-allowed/plugins/000077500000000000000000000000001437515270000245525ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-allowed/plugins/__init__.py000066400000000000000000000000001437515270000266510ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-allowed/plugins/foo.py000066400000000000000000000003531437515270000257100ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): def configure(self, config): pass def preflight(self): pass def get_unique_key(self): return "foo" def setup(): return FooElement buildstream-1.6.9/tests/format/project/plugin-allowed/project.conf000066400000000000000000000001141437515270000254020ustar00rootroot00000000000000name: test plugins: - origin: local path: plugins elements: foo: 0 buildstream-1.6.9/tests/format/project/plugin-forbidden/000077500000000000000000000000001437515270000233765ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-forbidden/__init__.py000066400000000000000000000000001437515270000254750ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-forbidden/element.bst000066400000000000000000000000121437515270000255320ustar00rootroot00000000000000kind: bar buildstream-1.6.9/tests/format/project/plugin-forbidden/forbidden-plugins/000077500000000000000000000000001437515270000270115ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-forbidden/forbidden-plugins/__init__.py000066400000000000000000000000001437515270000311100ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/plugin-forbidden/forbidden-plugins/forbidden-plugin.py000066400000000000000000000003531437515270000326140ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): def configure(self, config): pass def preflight(self): pass def get_unique_key(self): return "foo" def setup(): return FooElement buildstream-1.6.9/tests/format/project/plugin-forbidden/project.conf000066400000000000000000000000141437515270000257060ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/format/project/project-from-subdir/000077500000000000000000000000001437515270000240435ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/project-from-subdir/manual.bst000066400000000000000000000000151437515270000260260ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/format/project/project-from-subdir/project.conf000066400000000000000000000001121437515270000263520ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/format/project/project-from-subdir/subdirectory/000077500000000000000000000000001437515270000265615ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/project-from-subdir/subdirectory/README000066400000000000000000000001151437515270000274360ustar00rootroot00000000000000This directory is used to test running commands from a project subdirectory. buildstream-1.6.9/tests/format/project/refs-options/000077500000000000000000000000001437515270000225765ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/refs-options/project.conf000066400000000000000000000001671437515270000251170ustar00rootroot00000000000000name: test ref-storage: project.refs options: test: type: bool description: Test boolean default: False buildstream-1.6.9/tests/format/project/refs-options/project.refs000066400000000000000000000003101437515270000251170ustar00rootroot00000000000000# A project.refs file with a conditional statement # projects: test: target.bst: - ref: pony # Optionally override the ref (?): - test: target.bst: - ref: horsy buildstream-1.6.9/tests/format/project/refs-options/target.bst000066400000000000000000000002101437515270000245670ustar00rootroot00000000000000kind: import description: | Import some git repo with optional refs sources: - kind: git url: http://pony.com/git track: master buildstream-1.6.9/tests/format/project/unsupported/000077500000000000000000000000001437515270000225365ustar00rootroot00000000000000buildstream-1.6.9/tests/format/project/unsupported/project.conf000066400000000000000000000001321437515270000250470ustar00rootroot00000000000000# A project which requires a too new version of the format name: foo format-version: 5000 buildstream-1.6.9/tests/format/projectoverrides.py000066400000000000000000000014041437515270000224420ustar00rootroot00000000000000 import os import pytest from buildstream import _yaml from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project-overrides" ) @pytest.mark.datafiles(DATA_DIR) def test_prepend_configure_commands(cli, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename, 'prepend-configure-commands') result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{config}', 'element.bst']) result.assert_success() loaded = _yaml.load_data(result.output) config_commands = loaded['configure-commands'] assert len(config_commands) == 3 assert config_commands[0] == 'echo "Hello World!"' buildstream-1.6.9/tests/format/variables.py000066400000000000000000000162641437515270000210330ustar00rootroot00000000000000import os import pytest import sys from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "variables" ) ############################################################### # Test proper loading of some default commands from plugins # ############################################################### @pytest.mark.parametrize("target,varname,expected", [ ('autotools.bst', 'make-install', "make -j1 DESTDIR=\"/buildstream-install\" install"), ('cmake.bst', 'cmake', "cmake -B_builddir -H. -G\"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX:PATH=\"/usr\" \\\n" + "-DCMAKE_INSTALL_LIBDIR:PATH=\"lib\" "), ('distutils.bst', 'python-install', "python3 setup.py install --prefix \"/usr\" \\\n" + "--root \"/buildstream-install\""), ('makemaker.bst', 'configure', "perl Makefile.PL PREFIX=/buildstream-install/usr"), ('modulebuild.bst', 'configure', "perl Build.PL --prefix \"/buildstream-install/usr\""), ('qmake.bst', 'make-install', "make -j1 INSTALL_ROOT=\"/buildstream-install\" install"), ]) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'defaults')) def test_defaults(cli, datafiles, tmpdir, target, varname, expected): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', target ]) result.assert_success() result_vars = _yaml.load_data(result.output) assert result_vars[varname] == expected ################################################################ # Test overriding of variables to produce different commands # ################################################################ @pytest.mark.parametrize("target,varname,expected", [ ('autotools.bst', 'make-install', "make -j1 DESTDIR=\"/custom/install/root\" install"), ('cmake.bst', 'cmake', "cmake -B_builddir -H. -G\"Ninja\" -DCMAKE_INSTALL_PREFIX:PATH=\"/opt\" \\\n" + "-DCMAKE_INSTALL_LIBDIR:PATH=\"lib\" "), ('distutils.bst', 'python-install', "python3 setup.py install --prefix \"/opt\" \\\n" + "--root \"/custom/install/root\""), ('makemaker.bst', 'configure', "perl Makefile.PL PREFIX=/custom/install/root/opt"), ('modulebuild.bst', 'configure', "perl Build.PL --prefix \"/custom/install/root/opt\""), ('qmake.bst', 'make-install', "make -j1 INSTALL_ROOT=\"/custom/install/root\" install"), ]) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'overrides')) def test_overrides(cli, datafiles, tmpdir, target, varname, expected): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{vars}', target ]) result.assert_success() result_vars = _yaml.load_data(result.output) assert result_vars[varname] == expected @pytest.mark.parametrize( "element,provenance", [ # This test makes a reference to an undefined variable in a build command ("manual.bst", "manual.bst [line 5 column 6]"), # This test makes a reference to an undefined variable by another variable, # ensuring that we validate variables even when they are unused ("manual2.bst", "manual2.bst [line 4 column 8]"), # This test uses a build command to refer to some variables which ultimately # refer to an undefined variable, testing a more complex case. ("manual3.bst", "manual3.bst [line 6 column 8]"), ], ids=["build-command", "variables", "complex"], ) @pytest.mark.datafiles(os.path.join(DATA_DIR, "missing_variables")) def test_undefined(cli, datafiles, element, provenance): project = str(datafiles) result = cli.run(project=project, silent=True, args=["show", "--deps", "none", "--format", "%{config}", element]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.UNRESOLVED_VARIABLE) assert provenance in result.stderr @pytest.mark.parametrize( "element,provenances", [ # Test a simple a -> b and b -> a reference ("simple-cyclic.bst", ["simple-cyclic.bst [line 4 column 5]", "simple-cyclic.bst [line 5 column 5]"]), # Test a simple a -> b and b -> a reference with some text involved ("cyclic.bst", ["cyclic.bst [line 5 column 10]", "cyclic.bst [line 4 column 5]"]), # Test an indirect circular dependency ( "indirect-cyclic.bst", [ "indirect-cyclic.bst [line 5 column 5]", "indirect-cyclic.bst [line 6 column 5]", "indirect-cyclic.bst [line 7 column 5]", "indirect-cyclic.bst [line 8 column 5]", ], ), # Test an indirect circular dependency ("self-reference.bst", ["self-reference.bst [line 4 column 5]"]), ], ids=["simple", "simple-text", "indirect", "self-reference"], ) @pytest.mark.timeout(15, method="signal") @pytest.mark.datafiles(os.path.join(DATA_DIR, "cyclic_variables")) def test_circular_reference(cli, datafiles, element, provenances): print_warning("Performing cyclic test, if this test times out it will exit the test sequence") project = str(datafiles) result = cli.run(project=project, silent=True, args=["build", element]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.CIRCULAR_REFERENCE_VARIABLE) for provenance in provenances: assert provenance in result.stderr def print_warning(msg): RED, END = "\033[91m", "\033[0m" print(("\n{}{}{}").format(RED, msg, END), file=sys.stderr) # Test that variables which refer to eachother very deeply are # still resolved correctly, this ensures that we are not relying # on a recursive algorithm limited by stack depth. # @pytest.mark.parametrize( "maxvars", [50, 500, 5000], ) @pytest.mark.datafiles(os.path.join(DATA_DIR, "defaults")) def test_deep_references(cli, datafiles, maxvars): project = str(datafiles) # Generate an element with very, very many variables to resolve, # each which expand to the value of the previous variable. # # The bottom variable defines a test value which we check for # in the top variable in `bst show` output. # topvar = "var{}".format(maxvars) bottomvar = "var0" testvalue = "testvalue {}".format(maxvars) # Generate variables = {"var{}".format(idx + 1): "%{var" + str(idx) + "}" for idx in range(maxvars)} variables[bottomvar] = testvalue element = {"kind": "manual", "variables": variables} _yaml.dump(element, os.path.join(project, "test.bst")) # Run `bst show` result = cli.run(project=project, args=["show", "--format", "%{vars}", "test.bst"]) result.assert_success() # Test results result_vars = _yaml.load_data(result.output) assert result_vars[topvar] == testvalue @pytest.mark.datafiles(os.path.join(DATA_DIR, "partial_context")) def test_partial_context_junctions(cli, datafiles): project = str(datafiles) result = cli.run(project=project, args=["show", "--format", "%{vars}", "test.bst"]) result.assert_success() result_vars = _yaml.load_data(result.output) assert result_vars["eltvar"] == "/bar/foo/baz" buildstream-1.6.9/tests/format/variables/000077500000000000000000000000001437515270000204505ustar00rootroot00000000000000buildstream-1.6.9/tests/format/variables/cyclic_variables/000077500000000000000000000000001437515270000237465ustar00rootroot00000000000000buildstream-1.6.9/tests/format/variables/cyclic_variables/cyclic.bst000066400000000000000000000001121437515270000257200ustar00rootroot00000000000000kind: manual variables: a: "%{prefix}/a" prefix: "%{a}/some_prefix/" buildstream-1.6.9/tests/format/variables/cyclic_variables/indirect-cyclic.bst000066400000000000000000000001271437515270000275250ustar00rootroot00000000000000kind: manual variables: foo: "%{a}" a: "%{b}" b: "%{c}" c: "%{d}" d: "%{a}" buildstream-1.6.9/tests/format/variables/cyclic_variables/project.conf000066400000000000000000000000131437515270000262550ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/format/variables/cyclic_variables/self-reference.bst000066400000000000000000000000751437515270000273470ustar00rootroot00000000000000kind: manual variables: a: "Referencing itself with %{a}" buildstream-1.6.9/tests/format/variables/cyclic_variables/simple-cyclic.bst000066400000000000000000000000611437515270000272120ustar00rootroot00000000000000kind: manual variables: a: "%{b}" b: "%{a}" buildstream-1.6.9/tests/format/variables/defaults/000077500000000000000000000000001437515270000222575ustar00rootroot00000000000000buildstream-1.6.9/tests/format/variables/defaults/autotools.bst000066400000000000000000000000721437515270000250210ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element buildstream-1.6.9/tests/format/variables/defaults/cmake.bst000066400000000000000000000000621437515270000240470ustar00rootroot00000000000000kind: cmake description: Some kinda cmake element buildstream-1.6.9/tests/format/variables/defaults/distutils.bst000066400000000000000000000000721437515270000250140ustar00rootroot00000000000000kind: distutils description: Some kinda distutils element buildstream-1.6.9/tests/format/variables/defaults/makemaker.bst000066400000000000000000000000721437515270000247250ustar00rootroot00000000000000kind: makemaker description: Some kinda makemaker element buildstream-1.6.9/tests/format/variables/defaults/modulebuild.bst000066400000000000000000000000761437515270000253010ustar00rootroot00000000000000kind: modulebuild description: Some kinda modulebuild element buildstream-1.6.9/tests/format/variables/defaults/project.conf000066400000000000000000000001111437515270000245650ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/format/variables/defaults/qmake.bst000066400000000000000000000000621437515270000240650ustar00rootroot00000000000000kind: qmake description: Some kinda qmake element buildstream-1.6.9/tests/format/variables/missing_variables/000077500000000000000000000000001437515270000241515ustar00rootroot00000000000000buildstream-1.6.9/tests/format/variables/missing_variables/manual.bst000066400000000000000000000001171437515270000261370ustar00rootroot00000000000000kind: manual config: build-commands: - some undefined variable %{foo} buildstream-1.6.9/tests/format/variables/missing_variables/manual2.bst000066400000000000000000000000621437515270000262200ustar00rootroot00000000000000kind: manual variables: test: hello %{missing} buildstream-1.6.9/tests/format/variables/missing_variables/manual3.bst000066400000000000000000000003441437515270000262240ustar00rootroot00000000000000kind: manual variables: hello: "Hello mister %{pony}" greeting: "The %{hello} string twice: %{hello} again" pony: "The pony is %{undefined}" config: build-commands: - Some indirectly undefined variable %{greeting} buildstream-1.6.9/tests/format/variables/missing_variables/project.conf000066400000000000000000000001111437515270000264570ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/format/variables/overrides/000077500000000000000000000000001437515270000224525ustar00rootroot00000000000000buildstream-1.6.9/tests/format/variables/overrides/autotools.bst000066400000000000000000000001721437515270000252150ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element variables: install-root: /custom/install/root prefix: /opt buildstream-1.6.9/tests/format/variables/overrides/cmake.bst000066400000000000000000000002051437515270000242410ustar00rootroot00000000000000kind: cmake description: Some kinda cmake element variables: generator: Ninja install-root: /custom/install/root prefix: /opt buildstream-1.6.9/tests/format/variables/overrides/distutils.bst000066400000000000000000000001721437515270000252100ustar00rootroot00000000000000kind: distutils description: Some kinda distutils element variables: install-root: /custom/install/root prefix: /opt buildstream-1.6.9/tests/format/variables/overrides/makemaker.bst000066400000000000000000000001721437515270000251210ustar00rootroot00000000000000kind: makemaker description: Some kinda makemaker element variables: install-root: /custom/install/root prefix: /opt buildstream-1.6.9/tests/format/variables/overrides/modulebuild.bst000066400000000000000000000001761437515270000254750ustar00rootroot00000000000000kind: modulebuild description: Some kinda modulebuild element variables: install-root: /custom/install/root prefix: /opt buildstream-1.6.9/tests/format/variables/overrides/project.conf000066400000000000000000000001111437515270000247600ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/format/variables/overrides/qmake.bst000066400000000000000000000001621437515270000242610ustar00rootroot00000000000000kind: qmake description: Some kinda qmake element variables: install-root: /custom/install/root prefix: /opt buildstream-1.6.9/tests/format/variables/partial_context/000077500000000000000000000000001437515270000236505ustar00rootroot00000000000000buildstream-1.6.9/tests/format/variables/partial_context/base.bst000066400000000000000000000000631437515270000252730ustar00rootroot00000000000000kind: junction sources: - kind: local path: base buildstream-1.6.9/tests/format/variables/partial_context/base/000077500000000000000000000000001437515270000245625ustar00rootroot00000000000000buildstream-1.6.9/tests/format/variables/partial_context/base/project.conf000066400000000000000000000000141437515270000270720ustar00rootroot00000000000000name: base buildstream-1.6.9/tests/format/variables/partial_context/base/vars.yml000066400000000000000000000000341437515270000262550ustar00rootroot00000000000000variables: subvar: "/bar" buildstream-1.6.9/tests/format/variables/partial_context/project.conf000066400000000000000000000001071437515270000261630ustar00rootroot00000000000000name: test (@): base.bst:vars.yml variables: var: "%{subvar}/foo" buildstream-1.6.9/tests/format/variables/partial_context/test.bst000066400000000000000000000000571437515270000253430ustar00rootroot00000000000000kind: manual variables: eltvar: "%{var}/baz" buildstream-1.6.9/tests/frontend/000077500000000000000000000000001437515270000170275ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/__init__.py000066400000000000000000000004071437515270000211410ustar00rootroot00000000000000import os from buildstream import _yaml # Shared function to configure the project.conf inline # def configure_project(path, config): config['name'] = 'test' config['element-path'] = 'elements' _yaml.dump(config, os.path.join(path, 'project.conf')) buildstream-1.6.9/tests/frontend/buildcheckout.py000066400000000000000000000610531437515270000222330ustar00rootroot00000000000000import os import tarfile import hashlib import pytest from tests.testutils import cli, create_repo, ALL_REPO_KINDS, generate_junction from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from . import configure_project # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) def strict_args(args, strict): if strict != "strict": return ['--no-strict'] + args return args @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("strict,hardlinks", [ ("strict", "copies"), ("strict", "hardlinks"), ("non-strict", "copies"), ("non-strict", "hardlinks"), ]) def test_build_checkout(datafiles, cli, strict, hardlinks): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') # First build it result = cli.run(project=project, args=strict_args(['build', 'target.bst'], strict)) result.assert_success() # Assert that after a successful build, the builddir is empty builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) # Prepare checkout args checkout_args = strict_args(['checkout'], strict) if hardlinks == "hardlinks": checkout_args += ['--hardlinks'] checkout_args += ['target.bst', checkout] # Now check it out result = cli.run(project=project, args=checkout_args) result.assert_success() # Check that the executable hello file is found in the checkout filename = os.path.join(checkout, 'usr', 'bin', 'hello') assert os.path.exists(filename) filename = os.path.join(checkout, 'usr', 'include', 'pony.h') assert os.path.exists(filename) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("deps", [("run"), ("none")]) def test_build_checkout_deps(datafiles, cli, deps): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = "checkout-deps.bst" # First build it result = cli.run(project=project, args=['build', element_name]) result.assert_success() # Assert that after a successful build, the builddir is empty builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) # Now check it out result = cli.run(project=project, args=['checkout', element_name, '--deps', deps, checkout]) result.assert_success() # Verify output of this element filename = os.path.join(checkout, 'etc', 'buildstream', 'config') assert os.path.exists(filename) # Verify output of this element's runtime dependencies filename = os.path.join(checkout, 'usr', 'bin', 'hello') if deps == "run": assert os.path.exists(filename) else: assert not os.path.exists(filename) @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_unbuilt(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') # Check that checking out an unbuilt element fails nicely result = cli.run(project=project, args=['checkout', 'target.bst', checkout]) result.assert_main_error(ErrorDomain.STREAM, "uncached-checkout-attempt") @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_tarball(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout.tar') result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) checkout_args = ['checkout', '--tar', 'target.bst', checkout] result = cli.run(project=project, args=checkout_args) result.assert_success() tar = tarfile.TarFile(checkout) assert os.path.join('.', 'usr', 'bin', 'hello') in tar.getnames() assert os.path.join('.', 'usr', 'include', 'pony.h') in tar.getnames() @pytest.mark.skip(reason="Capturing the binary output is causing a stacktrace") @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_tarball_stdout(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) tarball = os.path.join(cli.directory, 'tarball.tar') result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) checkout_args = ['checkout', '--tar', 'target.bst', '-'] result = cli.run(project=project, args=checkout_args) result.assert_success() with open(tarball, 'wb') as f: f.write(result.output) tar = tarfile.TarFile(tarball) assert os.path.join('.', 'usr', 'bin', 'hello') in tar.getnames() assert os.path.join('.', 'usr', 'include', 'pony.h') in tar.getnames() @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_tarball_is_deterministic(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) tarball1 = os.path.join(cli.directory, 'tarball1.tar') tarball2 = os.path.join(cli.directory, 'tarball2.tar') result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) checkout_args = ['checkout', '--force', '--tar', 'target.bst'] checkout_args1 = checkout_args + [tarball1] result = cli.run(project=project, args=checkout_args1) result.assert_success() checkout_args2 = checkout_args + [tarball2] result = cli.run(project=project, args=checkout_args2) result.assert_success() with open(tarball1, 'rb') as f: contents = f.read() hash1 = hashlib.sha1(contents).hexdigest() with open(tarball2, 'rb') as f: contents = f.read() hash2 = hashlib.sha1(contents).hexdigest() assert hash1 == hash2 @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("hardlinks", [("copies"), ("hardlinks")]) def test_build_checkout_nonempty(datafiles, cli, hardlinks): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') filename = os.path.join(checkout, "file.txt") # First build it result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() # Assert that after a successful build, the builddir is empty builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) # Create the checkout dir and add a file to it, should cause checkout to fail os.makedirs(checkout, exist_ok=True) with open(filename, "w") as f: f.write("Hello") # Prepare checkout args checkout_args = ['checkout'] if hardlinks == "hardlinks": checkout_args += ['--hardlinks'] checkout_args += ['target.bst', checkout] # Now check it out result = cli.run(project=project, args=checkout_args) result.assert_main_error(ErrorDomain.STREAM, None) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("hardlinks", [("copies"), ("hardlinks")]) def test_build_checkout_force(datafiles, cli, hardlinks): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') filename = os.path.join(checkout, "file.txt") # First build it result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() # Assert that after a successful build, the builddir is empty builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) # Create the checkout dir and add a file to it, should cause checkout to fail os.makedirs(checkout, exist_ok=True) with open(filename, "w") as f: f.write("Hello") # Prepare checkout args checkout_args = ['checkout', '--force'] if hardlinks == "hardlinks": checkout_args += ['--hardlinks'] checkout_args += ['target.bst', checkout] # Now check it out result = cli.run(project=project, args=checkout_args) result.assert_success() # Check that the file we added is still there filename = os.path.join(checkout, 'file.txt') assert os.path.exists(filename) # Check that the executable hello file is found in the checkout filename = os.path.join(checkout, 'usr', 'bin', 'hello') assert os.path.exists(filename) # Check that the executable hello file is found in the checkout filename = os.path.join(checkout, 'usr', 'include', 'pony.h') assert os.path.exists(filename) @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_force_tarball(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) tarball = os.path.join(cli.directory, 'tarball.tar') result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() builddir = os.path.join(cli.directory, 'build') assert os.path.isdir(builddir) assert not os.listdir(builddir) with open(tarball, "w") as f: f.write("Hello") checkout_args = ['checkout', '--force', '--tar', 'target.bst', tarball] result = cli.run(project=project, args=checkout_args) result.assert_success() tar = tarfile.TarFile(tarball) assert os.path.join('.', 'usr', 'bin', 'hello') in tar.getnames() assert os.path.join('.', 'usr', 'include', 'pony.h') in tar.getnames() fetch_build_checkout_combos = \ [("strict", kind) for kind in ALL_REPO_KINDS] + \ [("non-strict", kind) for kind in ALL_REPO_KINDS] @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("strict,kind", fetch_build_checkout_combos) def test_fetch_build_checkout(cli, tmpdir, datafiles, strict, kind): checkout = os.path.join(cli.directory, 'checkout') project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') element_name = 'build-test-{}.bst'.format(kind) # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir)) ref = repo.create(dev_files_path) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(element_path, element_name)) assert cli.get_element_state(project, element_name) == 'fetch needed' result = cli.run(project=project, args=strict_args(['build', element_name], strict)) result.assert_success() assert cli.get_element_state(project, element_name) == 'cached' # Now check it out result = cli.run(project=project, args=strict_args([ 'checkout', element_name, checkout ], strict)) result.assert_success() # Check that the pony.h include from files/dev-files exists filename = os.path.join(checkout, 'usr', 'include', 'pony.h') assert os.path.exists(filename) @pytest.mark.datafiles(DATA_DIR) def test_install_to_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element = 'installed-to-build.bst' # Attempt building the element # We expect this to throw an ElementError, since the element will # attempt to stage into /buildstream/build, which is not allowed. result = cli.run(project=project, args=strict_args(['build', element], True)) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.ELEMENT, None) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_inconsistent_junction(cli, tmpdir, datafiles, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it generate_junction(tmpdir, subproject_path, junction_path, store_ref=False) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Now try to track it, this will bail with the appropriate error # informing the user to track the junction first result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_unfetched_junction(cli, tmpdir, datafiles, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == 'inline')) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Dump a project.refs if we're using project.refs storage # if ref_storage == 'project.refs': project_refs = { 'projects': { 'test': { 'junction.bst': [ { 'ref': ref } ] } } } _yaml.dump(project_refs, os.path.join(project, 'junction.refs')) # Now try to build it, this should automatically result in fetching # the junction itself at load time. result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_success() # Assert that it's cached now assert cli.get_element_state(project, 'junction-dep.bst') == 'cached' @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_junction(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') checkout = os.path.join(cli.directory, 'checkout') # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Now try to build it, this should automatically result in fetching # the junction itself at load time. result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_success() # Assert that it's cached now assert cli.get_element_state(project, 'junction-dep.bst') == 'cached' # Now check it out result = cli.run(project=project, args=[ 'checkout', 'junction-dep.bst', checkout ]) result.assert_success() # Assert the content of /etc/animal.conf filename = os.path.join(checkout, 'etc', 'animal.conf') assert os.path.exists(filename) with open(filename, 'r') as f: contents = f.read() assert contents == 'animal=Pony\n' @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_workspaced_junction(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') workspace = os.path.join(cli.directory, 'workspace') checkout = os.path.join(cli.directory, 'checkout') # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Now open a workspace on the junction # result = cli.run(project=project, args=['workspace', 'open', 'junction.bst', workspace]) result.assert_success() filename = os.path.join(workspace, 'files', 'etc-files', 'etc', 'animal.conf') # Assert the content of /etc/animal.conf in the workspace assert os.path.exists(filename) with open(filename, 'r') as f: contents = f.read() assert contents == 'animal=Pony\n' # Modify the content of the animal.conf in the workspace with open(filename, 'w') as f: f.write('animal=Horsy\n') # Now try to build it, this should automatically result in fetching # the junction itself at load time. result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_success() # Assert that it's cached now assert cli.get_element_state(project, 'junction-dep.bst') == 'cached' # Now check it out result = cli.run(project=project, args=[ 'checkout', 'junction-dep.bst', checkout ]) result.assert_success() # Assert the workspace modified content of /etc/animal.conf filename = os.path.join(checkout, 'etc', 'animal.conf') assert os.path.exists(filename) with open(filename, 'r') as f: contents = f.read() assert contents == 'animal=Horsy\n' @pytest.mark.datafiles(DATA_DIR) def test_build_checkout_cross_junction(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') checkout = os.path.join(cli.directory, 'checkout') generate_junction(tmpdir, subproject_path, junction_path) result = cli.run(project=project, args=['build', 'junction.bst:import-etc.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'junction.bst:import-etc.bst', checkout]) result.assert_success() filename = os.path.join(checkout, 'etc', 'animal.conf') assert os.path.exists(filename) @pytest.mark.datafiles(DATA_DIR) def test_build_junction_short_notation(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') workspace = os.path.join(cli.directory, 'workspace') checkout = os.path.join(cli.directory, 'checkout') # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path) # Create a stack element to depend on a cross junction element, using # colon (:) as the separator element = { 'kind': 'stack', 'depends': ['junction.bst:import-etc.bst'] } _yaml.dump(element, element_path) # Now try to build it, this should automatically result in fetching # the junction itself at load time. result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_success() # Assert that it's cached now assert cli.get_element_state(project, 'junction-dep.bst') == 'cached' # Now check it out result = cli.run(project=project, args=[ 'checkout', 'junction-dep.bst', checkout ]) result.assert_success() # Assert the content of /etc/animal.conf filename = os.path.join(checkout, 'etc', 'animal.conf') assert os.path.exists(filename) with open(filename, 'r') as f: contents = f.read() assert contents == 'animal=Pony\n' @pytest.mark.datafiles(DATA_DIR) def test_build_junction_short_notation_filename(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') checkout = os.path.join(cli.directory, 'checkout') # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path) # Create a stack element to depend on a cross junction element, using # colon (:) as the separator element = { 'kind': 'stack', 'depends': [{'filename': 'junction.bst:import-etc.bst'}] } _yaml.dump(element, element_path) # Now try to build it, this should automatically result in fetching # the junction itself at load time. result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_success() # Assert that it's cached now assert cli.get_element_state(project, 'junction-dep.bst') == 'cached' # Now check it out result = cli.run(project=project, args=[ 'checkout', 'junction-dep.bst', checkout ]) result.assert_success() # Assert the content of /etc/animal.conf filename = os.path.join(checkout, 'etc', 'animal.conf') assert os.path.exists(filename) with open(filename, 'r') as f: contents = f.read() assert contents == 'animal=Pony\n' @pytest.mark.datafiles(DATA_DIR) def test_build_junction_short_notation_with_junction(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') checkout = os.path.join(cli.directory, 'checkout') # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path) # Create a stack element to depend on a cross junction element, using # colon (:) as the separator element = { 'kind': 'stack', 'depends': [{ 'filename': 'junction.bst:import-etc.bst', 'junction': 'junction.bst', }] } _yaml.dump(element, element_path) # Now try to build it, this should fail as filenames should not contain # `:` when junction is explicity specified result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_build_junction_short_notation_with_junction(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') checkout = os.path.join(cli.directory, 'checkout') # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path) # Create a stack element to depend on a cross junction element, using # colon (:) as the separator element = { 'kind': 'stack', 'depends': ['junction.bst:import-etc.bst:foo.bst'] } _yaml.dump(element, element_path) # Now try to build it, this should fail as recursive lookups for # cross-junction elements is not allowed. result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) buildstream-1.6.9/tests/frontend/buildtrack.py000066400000000000000000000320231437515270000215250ustar00rootroot00000000000000import os import re import shutil import itertools import pytest from tests.testutils import cli, create_repo, generate_junction from buildstream import _yaml from buildstream._exceptions import ErrorDomain from . import configure_project # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) def create_element(repo, name, path, dependencies, ref=None): element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ], 'depends': dependencies } _yaml.dump(element, os.path.join(path, name)) @pytest.mark.datafiles(os.path.join(DATA_DIR)) @pytest.mark.parametrize("strict", [True, False], ids=["strict", "no-strict"]) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("track_targets,exceptions,tracked", [ # Test with no exceptions (['0.bst'], [], ['0.bst', '2.bst', '3.bst', '4.bst', '5.bst', '6.bst', '7.bst']), (['3.bst'], [], ['3.bst', '4.bst', '5.bst', '6.bst']), (['2.bst', '3.bst'], [], ['2.bst', '3.bst', '4.bst', '5.bst', '6.bst', '7.bst']), # Test excepting '2.bst' (['0.bst'], ['2.bst'], ['0.bst', '3.bst', '4.bst', '5.bst', '6.bst']), (['3.bst'], ['2.bst'], []), (['2.bst', '3.bst'], ['2.bst'], ['3.bst', '4.bst', '5.bst', '6.bst']), # Test excepting '2.bst' and '3.bst' (['0.bst'], ['2.bst', '3.bst'], ['0.bst']), (['3.bst'], ['2.bst', '3.bst'], []), (['2.bst', '3.bst'], ['2.bst', '3.bst'], []) ]) def test_build_track(cli, datafiles, tmpdir, ref_storage, strict, track_targets, exceptions, tracked): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') repo = create_repo('git', str(tmpdir)) ref = repo.create(dev_files_path) configure_project(project, { 'ref-storage': ref_storage }) cli.configure({ 'projects': { 'test': { 'strict': strict } } }) create_elements = { '0.bst': [ '2.bst', '3.bst' ], '2.bst': [ '3.bst', '7.bst' ], '3.bst': [ '4.bst', '5.bst', '6.bst' ], '4.bst': [], '5.bst': [], '6.bst': [ '5.bst' ], '7.bst': [] } initial_project_refs = {} for element, dependencies in create_elements.items(): # Test the element inconsistency resolution by ensuring that # only elements that aren't tracked have refs if element in set(tracked): # Elements which should not have a ref set # create_element(repo, element, element_path, dependencies) elif ref_storage == 'project.refs': # Store a ref in project.refs # create_element(repo, element, element_path, dependencies) initial_project_refs[element] = [{'ref': ref}] else: # Store a ref in the element itself # create_element(repo, element, element_path, dependencies, ref=ref) # Generate initial project.refs if ref_storage == 'project.refs': project_refs = { 'projects': { 'test': initial_project_refs } } _yaml.dump(project_refs, os.path.join(project, 'project.refs')) args = ['build'] args += itertools.chain.from_iterable(zip(itertools.repeat('--track'), track_targets)) args += itertools.chain.from_iterable(zip(itertools.repeat('--track-except'), exceptions)) args += ['0.bst'] result = cli.run(project=project, silent=True, args=args) result.assert_success() # Assert that the main target 0.bst is cached assert cli.get_element_state(project, '0.bst') == 'cached' # Assert that we tracked exactly the elements we expected to tracked_elements = result.get_tracked_elements() assert set(tracked_elements) == set(tracked) # Delete element sources source_dir = os.path.join(project, 'cache', 'sources') shutil.rmtree(source_dir) # Delete artifacts one by one and assert element states for target in set(tracked): cli.remove_artifact_from_cache(project, target) # Assert that it's tracked assert cli.get_element_state(project, target) == 'fetch needed' # Assert there was a project.refs created, depending on the configuration if ref_storage == 'project.refs': assert os.path.exists(os.path.join(project, 'project.refs')) else: assert not os.path.exists(os.path.join(project, 'project.refs')) # This tests a very specific scenario: # # o Local cache is empty # o Strict mode is disabled # o The build target has only build dependencies # o The build is launched with --track-all # # In this scenario, we have encountered bugs where BuildStream returns # successfully after tracking completes without ever pulling, fetching or # building anything. # @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("strict", [True, False], ids=["strict", "no-strict"]) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_build_track_all(cli, tmpdir, datafiles, strict, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') subproject_element_path = os.path.join(project, 'files', 'sub-project', 'elements') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements') dev_files_path = os.path.join(project, 'files', 'dev-files') configure_project(project, { 'ref-storage': ref_storage }) cli.configure({ 'projects': { 'test': { 'strict': strict } } }) # We need a repo for real trackable elements repo = create_repo('git', str(tmpdir)) ref = repo.create(dev_files_path) # Create a trackable element to depend on the cross junction element, # this one has it's ref resolved already create_element(repo, 'sub-target.bst', subproject_element_path, ['import-etc.bst'], ref=ref) # Create a trackable element to depend on the cross junction element create_element(repo, 'target.bst', element_path, [ { 'junction': 'junction.bst', 'filename': 'sub-target.bst' } ]) # Create a repo to hold the subproject and generate a junction element for it generate_junction(tmpdir, subproject_path, junction_path, store_ref=False) # Now create a compose element at the top level element = { 'kind': 'compose', 'depends': [ { 'filename': 'target.bst', 'type': 'build' } ] } _yaml.dump(element, os.path.join(element_path, 'composed.bst')) # Track the junction itself first. result = cli.run(project=project, args=['track', 'junction.bst']) result.assert_success() # Build it with --track-all result = cli.run(project=project, silent=True, args=['build', '--track-all', 'composed.bst']) result.assert_success() # Assert that the main target is cached as a result assert cli.get_element_state(project, 'composed.bst') == 'cached' @pytest.mark.datafiles(os.path.join(DATA_DIR)) @pytest.mark.parametrize("track_targets,exceptions,tracked", [ # Test with no exceptions (['0.bst'], [], ['0.bst', '2.bst', '3.bst', '4.bst', '5.bst', '6.bst', '7.bst']), (['3.bst'], [], ['3.bst', '4.bst', '5.bst', '6.bst']), (['2.bst', '3.bst'], [], ['2.bst', '3.bst', '4.bst', '5.bst', '6.bst', '7.bst']), # Test excepting '2.bst' (['0.bst'], ['2.bst'], ['0.bst', '3.bst', '4.bst', '5.bst', '6.bst']), (['3.bst'], ['2.bst'], []), (['2.bst', '3.bst'], ['2.bst'], ['3.bst', '4.bst', '5.bst', '6.bst']), # Test excepting '2.bst' and '3.bst' (['0.bst'], ['2.bst', '3.bst'], ['0.bst']), (['3.bst'], ['2.bst', '3.bst'], []), (['2.bst', '3.bst'], ['2.bst', '3.bst'], []) ]) def test_build_track_update(cli, datafiles, tmpdir, track_targets, exceptions, tracked): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') repo = create_repo('git', str(tmpdir)) ref = repo.create(dev_files_path) create_elements = { '0.bst': [ '2.bst', '3.bst' ], '2.bst': [ '3.bst', '7.bst' ], '3.bst': [ '4.bst', '5.bst', '6.bst' ], '4.bst': [], '5.bst': [], '6.bst': [ '5.bst' ], '7.bst': [] } for element, dependencies in create_elements.items(): # We set a ref for all elements, so that we ensure that we # only track exactly those elements that we want to track, # even if others can be tracked create_element(repo, element, element_path, dependencies, ref=ref) repo.add_commit() args = ['build'] args += itertools.chain.from_iterable(zip(itertools.repeat('--track'), track_targets)) args += itertools.chain.from_iterable(zip(itertools.repeat('--track-except'), exceptions)) args += ['0.bst'] result = cli.run(project=project, silent=True, args=args) tracked_elements = result.get_tracked_elements() assert set(tracked_elements) == set(tracked) @pytest.mark.datafiles(os.path.join(DATA_DIR)) @pytest.mark.parametrize("track_targets,exceptions", [ # Test tracking the main target element, but excepting some of its # children (['0.bst'], ['6.bst']), # Test only tracking a child element (['3.bst'], []), ]) def test_build_track_inconsistent(cli, datafiles, tmpdir, track_targets, exceptions): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') repo = create_repo('git', str(tmpdir)) repo.create(dev_files_path) create_elements = { '0.bst': [ '2.bst', '3.bst' ], '2.bst': [ '3.bst', '7.bst' ], '3.bst': [ '4.bst', '5.bst', '6.bst' ], '4.bst': [], '5.bst': [], '6.bst': [ '5.bst' ], '7.bst': [] } for element, dependencies in create_elements.items(): # We don't add refs so that all elements *have* to be tracked create_element(repo, element, element_path, dependencies) args = ['build'] args += itertools.chain.from_iterable(zip(itertools.repeat('--track'), track_targets)) args += itertools.chain.from_iterable(zip(itertools.repeat('--track-except'), exceptions)) args += ['0.bst'] result = cli.run(project=project, args=args, silent=True) result.assert_main_error(ErrorDomain.PIPELINE, "inconsistent-pipeline") # Assert that if a build element has a dependency in the tracking # queue it does not start building before tracking finishes. @pytest.mark.datafiles(os.path.join(DATA_DIR)) @pytest.mark.parametrize("strict", ['--strict', '--no-strict']) def test_build_track_track_first(cli, datafiles, tmpdir, strict): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') repo = create_repo('git', str(tmpdir)) ref = repo.create(dev_files_path) create_elements = { '0.bst': [ '1.bst' ], '1.bst': [], '2.bst': [ '0.bst' ] } for element, dependencies in create_elements.items(): # We set a ref so that 0.bst can already be built even if # 1.bst has not been tracked yet. create_element(repo, element, element_path, dependencies, ref=ref) repo.add_commit() # Build 1.bst and 2.bst first so we have an artifact for them args = [strict, 'build', '2.bst'] result = cli.run(args=args, project=project, silent=True) result.assert_success() # Test building 0.bst while tracking 1.bst cli.remove_artifact_from_cache(project, '0.bst') args = [strict, 'build', '--track', '1.bst', '2.bst'] result = cli.run(args=args, project=project, silent=True) result.assert_success() # Assert that 1.bst successfully tracks before 0.bst builds track_messages = re.finditer(r'\[track:1.bst\s*]', result.stderr) build_0 = re.search(r'\[build:0.bst\s*] START', result.stderr).start() assert all(track_message.start() < build_0 for track_message in track_messages) # Assert that 2.bst is *only* rebuilt if we are in strict mode build_2 = re.search(r'\[build:2.bst\s*] START', result.stderr) if strict == '--strict': assert build_2 is not None else: assert build_2 is None buildstream-1.6.9/tests/frontend/compose_splits.py000066400000000000000000000020671437515270000224510ustar00rootroot00000000000000import os import pytest from tests.testutils.runcli import cli # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) @pytest.mark.parametrize("target", [ ('compose-include-bin.bst'), ('compose-exclude-dev.bst') ]) @pytest.mark.datafiles(DATA_DIR) def test_compose_splits(datafiles, cli, target): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') # First build it result = cli.run(project=project, args=['build', target]) result.assert_success() # Now check it out result = cli.run(project=project, args=[ 'checkout', target, checkout ]) result.assert_success() # Check that the executable hello file is found in the checkout filename = os.path.join(checkout, 'usr', 'bin', 'hello') assert os.path.exists(filename) # Check that the executable hello file is found in the checkout filename = os.path.join(checkout, 'usr', 'include', 'pony.h') assert not os.path.exists(filename) buildstream-1.6.9/tests/frontend/configurable_warnings.py000066400000000000000000000037241437515270000237570ustar00rootroot00000000000000import pytest import os from buildstream.plugin import CoreWarnings from buildstream._exceptions import ErrorDomain, LoadErrorReason from buildstream import _yaml from tests.testutils.runcli import cli TOP_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "configuredwarning" ) def get_project(fatal_warnings): return { "name": "test", "element-path": "elements", "plugins": [ { "origin": "local", "path": "plugins", "elements": { "warninga": 0, "warningb": 0, "corewarn": 0, } } ], "fatal-warnings": fatal_warnings } def build_project(datafiles, fatal_warnings): project_path = os.path.join(datafiles.dirname, datafiles.basename) project = get_project(fatal_warnings) _yaml.dump(project, os.path.join(project_path, "project.conf")) return project_path @pytest.mark.datafiles(TOP_DIR) @pytest.mark.parametrize("element_name, fatal_warnings, expect_fatal, error_domain", [ ("corewarn.bst", [CoreWarnings.OVERLAPS], True, ErrorDomain.STREAM), ("warninga.bst", ["warninga:warning-a"], True, ErrorDomain.STREAM), ("warningb.bst", ["warningb:warning-b"], True, ErrorDomain.STREAM), ("corewarn.bst", [], False, None), ("warninga.bst", [], False, None), ("warningb.bst", [], False, None), ("warninga.bst", [CoreWarnings.OVERLAPS], False, None), ("warningb.bst", [CoreWarnings.OVERLAPS], False, None), ]) def test_fatal_warnings(cli, datafiles, element_name, fatal_warnings, expect_fatal, error_domain): project_path = build_project(datafiles, fatal_warnings) result = cli.run(project=project_path, args=["build", element_name]) if expect_fatal: result.assert_main_error(error_domain, None, "Expected fatal execution") else: result.assert_success("Unexpected fatal execution") buildstream-1.6.9/tests/frontend/configuredwarning/000077500000000000000000000000001437515270000225425ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/configuredwarning/elements/000077500000000000000000000000001437515270000243565ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/configuredwarning/elements/corewarn.bst000066400000000000000000000000161437515270000267050ustar00rootroot00000000000000kind: corewarnbuildstream-1.6.9/tests/frontend/configuredwarning/elements/warninga.bst000066400000000000000000000000171437515270000266740ustar00rootroot00000000000000kind: warninga buildstream-1.6.9/tests/frontend/configuredwarning/elements/warningb.bst000066400000000000000000000000171437515270000266750ustar00rootroot00000000000000kind: warningb buildstream-1.6.9/tests/frontend/configuredwarning/plugins/000077500000000000000000000000001437515270000242235ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/configuredwarning/plugins/corewarn.py000066400000000000000000000016101437515270000264130ustar00rootroot00000000000000import os from buildstream import Element from buildstream.plugin import CoreWarnings class CoreWarn(Element): def configure(self, node): pass def preflight(self): pass def get_unique_key(self): pass def configure_sandbox(self, sandbox): sandbox.mark_directory(self.get_variable('install-root')) def stage(self, sandbox): pass def assemble(self, sandbox): self.warn("Testing: CoreWarning produced during assemble", warning_token=CoreWarnings.OVERLAPS) # Return an arbitrary existing directory in the sandbox # rootdir = sandbox.get_directory() install_root = self.get_variable('install-root') outputdir = os.path.join(rootdir, install_root.lstrip(os.sep)) os.makedirs(outputdir, exist_ok=True) return install_root def setup(): return CoreWarn buildstream-1.6.9/tests/frontend/configuredwarning/plugins/warninga.py000066400000000000000000000014371437515270000264100ustar00rootroot00000000000000import os from buildstream import Element WARNING_A = "warning-a" class WarningA(Element): def configure(self, node): pass def preflight(self): pass def get_unique_key(self): pass def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): self.warn("Testing: warning-a produced during assemble", warning_token=WARNING_A) # Return an arbitrary existing directory in the sandbox # rootdir = sandbox.get_directory() install_root = self.get_variable('install-root') outputdir = os.path.join(rootdir, install_root.lstrip(os.sep)) os.makedirs(outputdir, exist_ok=True) return install_root def setup(): return WarningA buildstream-1.6.9/tests/frontend/configuredwarning/plugins/warningb.py000066400000000000000000000014371437515270000264110ustar00rootroot00000000000000import os from buildstream import Element WARNING_B = "warning-b" class WarningB(Element): def configure(self, node): pass def preflight(self): pass def get_unique_key(self): pass def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): self.warn("Testing: warning-b produced during assemble", warning_token=WARNING_B) # Return an arbitrary existing directory in the sandbox # rootdir = sandbox.get_directory() install_root = self.get_variable('install-root') outputdir = os.path.join(rootdir, install_root.lstrip(os.sep)) os.makedirs(outputdir, exist_ok=True) return install_root def setup(): return WarningB buildstream-1.6.9/tests/frontend/configuredwarning/project.conf000066400000000000000000000001771437515270000250640ustar00rootroot00000000000000name: test element-path: elements plugins: - origin: local path: element_plugins elements: warninga: 0 warningb: 0 buildstream-1.6.9/tests/frontend/consistencyerror/000077500000000000000000000000001437515270000224425ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/consistencyerror/__init__.py000066400000000000000000000000001437515270000245410ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/consistencyerror/bug.bst000066400000000000000000000001711437515270000237300ustar00rootroot00000000000000kind: import description: An element with an unhandled exception at get_consistency time sources: - kind: consistencybug buildstream-1.6.9/tests/frontend/consistencyerror/error.bst000066400000000000000000000001651437515270000243070ustar00rootroot00000000000000kind: import description: An element with a failing source at get_consistency time sources: - kind: consistencyerror buildstream-1.6.9/tests/frontend/consistencyerror/plugins/000077500000000000000000000000001437515270000241235ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/consistencyerror/plugins/__init__.py000066400000000000000000000000001437515270000262220ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/consistencyerror/plugins/consistencybug.py000066400000000000000000000011221437515270000275300ustar00rootroot00000000000000from buildstream import Source, SourceError, Consistency class ConsistencyBugSource(Source): def configure(self, node): pass def preflight(self): pass def get_unique_key(self): return {} def get_consistency(self): # Raise an unhandled exception (not a BstError) raise Exception("Something went terribly wrong") def get_ref(self): return None def set_ref(self, ref, node): pass def fetch(self): pass def stage(self, directory): pass def setup(): return ConsistencyBugSource buildstream-1.6.9/tests/frontend/consistencyerror/plugins/consistencyerror.py000066400000000000000000000012031437515270000301040ustar00rootroot00000000000000from buildstream import Source, SourceError, Consistency class ConsistencyErrorSource(Source): def configure(self, node): pass def preflight(self): pass def get_unique_key(self): return {} def get_consistency(self): # Raise an error unconditionally raise SourceError("Something went terribly wrong", reason="the-consistency-error") def get_ref(self): return None def set_ref(self, ref, node): pass def fetch(self): pass def stage(self, directory): pass def setup(): return ConsistencyErrorSource buildstream-1.6.9/tests/frontend/consistencyerror/project.conf000066400000000000000000000003211437515270000247530ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: test # Whitelist the local test Sources # plugins: - origin: local path: plugins sources: consistencyerror: 0 consistencybug: 0 buildstream-1.6.9/tests/frontend/cross_junction_workspace.py000066400000000000000000000070661437515270000245320ustar00rootroot00000000000000import os from tests.testutils import cli, create_repo from buildstream import _yaml def prepare_junction_project(cli, tmpdir): main_project = tmpdir.join("main") sub_project = tmpdir.join("sub") os.makedirs(str(main_project)) os.makedirs(str(sub_project)) _yaml.dump({'name': 'main'}, str(main_project.join("project.conf"))) _yaml.dump({'name': 'sub'}, str(sub_project.join("project.conf"))) import_dir = tmpdir.join("import") os.makedirs(str(import_dir)) with open(str(import_dir.join("hello.txt")), "w") as f: f.write("hello!") import_repo_dir = tmpdir.join("import_repo") os.makedirs(str(import_repo_dir)) import_repo = create_repo("git", str(import_repo_dir)) import_ref = import_repo.create(str(import_dir)) _yaml.dump({'kind': 'import', 'sources': [import_repo.source_config(ref=import_ref)]}, str(sub_project.join("data.bst"))) sub_repo_dir = tmpdir.join("sub_repo") os.makedirs(str(sub_repo_dir)) sub_repo = create_repo("git", str(sub_repo_dir)) sub_ref = sub_repo.create(str(sub_project)) _yaml.dump({'kind': 'junction', 'sources': [sub_repo.source_config(ref=sub_ref)]}, str(main_project.join("sub.bst"))) args = ['fetch', 'sub.bst'] result = cli.run(project=str(main_project), args=args) result.assert_success() return str(main_project) def open_cross_junction(cli, tmpdir): project = prepare_junction_project(cli, tmpdir) workspace = tmpdir.join("workspace") element = 'sub.bst:data.bst' args = ['workspace', 'open', element, str(workspace)] result = cli.run(project=project, args=args) result.assert_success() assert cli.get_element_state(project, element) == 'buildable' assert os.path.exists(str(workspace.join('hello.txt'))) return project, workspace def test_open_cross_junction(cli, tmpdir): open_cross_junction(cli, tmpdir) def test_list_cross_junction(cli, tmpdir): project, workspace = open_cross_junction(cli, tmpdir) element = 'sub.bst:data.bst' args = ['workspace', 'list'] result = cli.run(project=project, args=args) result.assert_success() loaded = _yaml.load_data(result.output) assert isinstance(loaded.get('workspaces'), list) workspaces = loaded['workspaces'] assert len(workspaces) == 1 assert 'element' in workspaces[0] assert workspaces[0]['element'] == element def test_close_cross_junction(cli, tmpdir): project, workspace = open_cross_junction(cli, tmpdir) element = 'sub.bst:data.bst' args = ['workspace', 'close', '--remove-dir', element] result = cli.run(project=project, args=args) result.assert_success() assert not os.path.exists(str(workspace)) args = ['workspace', 'list'] result = cli.run(project=project, args=args) result.assert_success() loaded = _yaml.load_data(result.output) assert isinstance(loaded.get('workspaces'), list) workspaces = loaded['workspaces'] assert len(workspaces) == 0 def test_close_all_cross_junction(cli, tmpdir): project, workspace = open_cross_junction(cli, tmpdir) args = ['workspace', 'close', '--remove-dir', '--all'] result = cli.run(project=project, args=args) result.assert_success() assert not os.path.exists(str(workspace)) args = ['workspace', 'list'] result = cli.run(project=project, args=args) result.assert_success() loaded = _yaml.load_data(result.output) assert isinstance(loaded.get('workspaces'), list) workspaces = loaded['workspaces'] assert len(workspaces) == 0 buildstream-1.6.9/tests/frontend/fetch.py000066400000000000000000000155551437515270000205050ustar00rootroot00000000000000import os import pytest from tests.testutils import cli, create_repo, ALL_REPO_KINDS, generate_junction from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from . import configure_project # Project directory TOP_DIR = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = os.path.join(TOP_DIR, 'project') @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_fetch(cli, tmpdir, datafiles, kind): project = os.path.join(datafiles.dirname, datafiles.basename) bin_files_path = os.path.join(project, 'files', 'bin-files') element_path = os.path.join(project, 'elements') element_name = 'fetch-test-{}.bst'.format(kind) # Create our repo object of the given source type with # the bin files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir)) ref = repo.create(bin_files_path) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(element_path, element_name)) # Assert that a fetch is needed assert cli.get_element_state(project, element_name) == 'fetch needed' # Now try to fetch it result = cli.run(project=project, args=['fetch', element_name]) result.assert_success() # Assert that we are now buildable because the source is # now cached. assert cli.get_element_state(project, element_name) == 'buildable' @pytest.mark.datafiles(os.path.join(TOP_DIR, 'consistencyerror')) def test_fetch_consistency_error(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # When the error occurs outside of the scheduler at load time, # then the SourceError is reported directly as the main error. result = cli.run(project=project, args=['fetch', 'error.bst']) result.assert_main_error(ErrorDomain.SOURCE, 'the-consistency-error') @pytest.mark.datafiles(os.path.join(TOP_DIR, 'consistencyerror')) def test_fetch_consistency_bug(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # FIXME: # # When a plugin raises an unhandled exception at load # time, as is the case when running Source.get_consistency() # for a fetch command, we could report this to the user # more gracefully as a BUG message. # result = cli.run(project=project, args=['fetch', 'bug.bst']) assert "Something went terribly wrong" in result.stderr @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_unfetched_junction(cli, tmpdir, datafiles, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == 'inline')) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Dump a project.refs if we're using project.refs storage # if ref_storage == 'project.refs': project_refs = { 'projects': { 'test': { 'junction.bst': [ { 'ref': ref } ] } } } _yaml.dump(project_refs, os.path.join(project, 'junction.refs')) # Now try to fetch it, this should automatically result in fetching # the junction itself. result = cli.run(project=project, args=['fetch', 'junction-dep.bst']) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_inconsistent_junction(cli, tmpdir, datafiles, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it generate_junction(tmpdir, subproject_path, junction_path, store_ref=False) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Now try to fetch it, this will bail with the appropriate error # informing the user to track the junction first result = cli.run(project=project, args=['fetch', 'junction-dep.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_fetch_cross_junction(cli, tmpdir, datafiles, ref_storage, kind): project = str(datafiles) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') import_etc_path = os.path.join(subproject_path, 'elements', 'import-etc-repo.bst') etc_files_path = os.path.join(subproject_path, 'files', 'etc-files') repo = create_repo(kind, str(tmpdir.join('import-etc'))) ref = repo.create(etc_files_path) element = { 'kind': 'import', 'sources': [ repo.source_config(ref=(ref if ref_storage == 'inline' else None)) ] } _yaml.dump(element, import_etc_path) configure_project(project, { 'ref-storage': ref_storage }) generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == 'inline')) if ref_storage == 'project.refs': result = cli.run(project=project, args=['track', 'junction.bst']) result.assert_success() result = cli.run(project=project, args=['track', 'junction.bst:import-etc.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'junction.bst:import-etc.bst']) result.assert_success() buildstream-1.6.9/tests/frontend/help.py000066400000000000000000000015241437515270000203330ustar00rootroot00000000000000import pytest from tests.testutils.runcli import cli def assert_help(cli_output): expected_start = "Usage: " if not cli_output.startswith(expected_start): raise AssertionError("Help output expected to begin with '{}'," .format(expected_start) + " output was: {}" .format(cli_output)) def test_help_main(cli): result = cli.run(args=['--help']) result.assert_success() assert_help(result.output) @pytest.mark.parametrize("command", [ ('build'), ('checkout'), ('fetch'), ('pull'), ('push'), ('shell'), ('show'), ('source-bundle'), ('track'), ('workspace') ]) def test_help(cli, command): result = cli.run(args=[command, '--help']) result.assert_success() assert_help(result.output) buildstream-1.6.9/tests/frontend/init.py000066400000000000000000000067601437515270000203550ustar00rootroot00000000000000import os import pytest from tests.testutils import cli from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from buildstream._versions import BST_FORMAT_VERSION def test_defaults(cli, tmpdir): project = str(tmpdir) project_path = os.path.join(project, 'project.conf') result = cli.run(project=project, args=['init', '--project-name', 'foo']) result.assert_success() project_conf = _yaml.load(project_path) assert project_conf['name'] == 'foo' assert project_conf['format-version'] == str(BST_FORMAT_VERSION) assert project_conf['element-path'] == 'elements' def test_all_options(cli, tmpdir): project = str(tmpdir) project_path = os.path.join(project, 'project.conf') result = cli.run(project=project, args=[ 'init', '--project-name', 'foo', '--format-version', '2', '--element-path', 'ponies', ]) result.assert_success() project_conf = _yaml.load(project_path) assert project_conf['name'] == 'foo' assert project_conf['format-version'] == str(2) assert project_conf['element-path'] == 'ponies' elements_dir = os.path.join(project, 'ponies') assert os.path.isdir(elements_dir) def test_no_project_name(cli, tmpdir): result = cli.run(project=str(tmpdir), args=['init']) result.assert_main_error(ErrorDomain.APP, 'unspecified-project-name') def test_project_exists(cli, tmpdir): project = str(tmpdir) project_path = os.path.join(project, 'project.conf') with open(project_path, 'w') as f: f.write('name: pony\n') result = cli.run(project=project, args=['init', '--project-name', 'foo']) result.assert_main_error(ErrorDomain.APP, 'project-exists') def test_force_overwrite_project(cli, tmpdir): project = str(tmpdir) project_path = os.path.join(project, 'project.conf') with open(project_path, 'w') as f: f.write('name: pony\n') result = cli.run(project=project, args=['init', '--project-name', 'foo', '--force']) result.assert_success() project_conf = _yaml.load(project_path) assert project_conf['name'] == 'foo' assert project_conf['format-version'] == str(BST_FORMAT_VERSION) @pytest.mark.parametrize("project_name", [('Micheal Jackson'), ('one+one')]) def test_bad_project_name(cli, tmpdir, project_name): result = cli.run(project=str(tmpdir), args=['init', '--project-name', project_name]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_SYMBOL_NAME) @pytest.mark.parametrize("format_version", [(str(-1)), (str(BST_FORMAT_VERSION + 1))]) def test_bad_format_version(cli, tmpdir, format_version): result = cli.run(project=str(tmpdir), args=[ 'init', '--project-name', 'foo', '--format-version', format_version ]) result.assert_main_error(ErrorDomain.APP, 'invalid-format-version') @pytest.mark.parametrize("element_path", [('/absolute/path'), ('../outside/of/project')]) def test_bad_element_path(cli, tmpdir, element_path): result = cli.run(project=str(tmpdir), args=[ 'init', '--project-name', 'foo', '--element-path', element_path ]) result.assert_main_error(ErrorDomain.APP, 'invalid-element-path') @pytest.mark.parametrize("element_path", [('/absolute/path'), ('../outside/of/project')]) def test_bad_element_path(cli, tmpdir, element_path): result = cli.run(project=str(tmpdir), args=[ 'init', '--project-name', 'foo', '--element-path', element_path ]) result.assert_main_error(ErrorDomain.APP, 'invalid-element-path') buildstream-1.6.9/tests/frontend/logging.py000066400000000000000000000074361437515270000210410ustar00rootroot00000000000000import os import pytest import re from tests.testutils import cli, create_repo, ALL_REPO_KINDS from buildstream import _yaml from buildstream._exceptions import ErrorDomain # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) @pytest.mark.datafiles(DATA_DIR) def test_default_logging(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) bin_files_path = os.path.join(project, 'files', 'bin-files') element_path = os.path.join(project, 'elements') element_name = 'fetch-test-git.bst' # Create our repo object of the given source type with # the bin files, and then collect the initial ref. # repo = create_repo('git', str(tmpdir)) ref = repo.create(bin_files_path) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(element_path, element_name)) # Now try to fetch it result = cli.run(project=project, args=['fetch', element_name]) result.assert_success() m = re.search("\[\d\d:\d\d:\d\d\]\[\]\[\] SUCCESS Checking sources", result.stderr) assert(m is not None) @pytest.mark.datafiles(DATA_DIR) def test_custom_logging(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) bin_files_path = os.path.join(project, 'files', 'bin-files') element_path = os.path.join(project, 'elements') element_name = 'fetch-test-git.bst' custom_log_format = '%{elapsed},%{elapsed-us},%{wallclock},%{key},%{element},%{action},%{message}' user_config = {'logging': {'message-format': custom_log_format}} user_config_file = str(tmpdir.join('buildstream.conf')) _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file) # Create our repo object of the given source type with # the bin files, and then collect the initial ref. # repo = create_repo('git', str(tmpdir)) ref = repo.create(bin_files_path) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(element_path, element_name)) # Now try to fetch it result = cli.run(project=project, args=['-c', user_config_file, 'fetch', element_name]) result.assert_success() m = re.search("\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,,,SUCCESS,Checking sources", result.stderr) assert(m is not None) @pytest.mark.datafiles(DATA_DIR) def test_failed_build_listing(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_names = [] for i in range(3): element_name = 'testfail-{}.bst'.format(i) element_path = os.path.join('elements', element_name) element = { 'kind': 'script', 'config': { 'commands': [ 'false' ] } } _yaml.dump(element, os.path.join(project, element_path)) element_names.append(element_name) result = cli.run(project=project, args=['--on-error=continue', 'build'] + element_names) result.assert_main_error(ErrorDomain.STREAM, None) failure_heading_pos = re.search(r'^Failure Summary$', result.stderr, re.MULTILINE).start() pipeline_heading_pos = re.search(r'^Pipeline Summary$', result.stderr, re.MULTILINE).start() failure_summary_range = range(failure_heading_pos, pipeline_heading_pos) assert all(m.start() in failure_summary_range and m.end() in failure_summary_range for m in re.finditer(r'^\s+testfail-.\.bst.+?\s+Log file', result.stderr, re.MULTILINE)) buildstream-1.6.9/tests/frontend/main.py000066400000000000000000000016261437515270000203320ustar00rootroot00000000000000from buildstream._frontend.app import _prefix_choice_value_proc import pytest import click def test_prefix_choice_value_proc_full_match(): value_proc = _prefix_choice_value_proc(['foo', 'bar', 'baz']) assert("foo" == value_proc("foo")) assert("bar" == value_proc("bar")) assert("baz" == value_proc("baz")) def test_prefix_choice_value_proc_prefix_match(): value_proc = _prefix_choice_value_proc(['foo']) assert ("foo" == value_proc("f")) def test_prefix_choice_value_proc_ambigous_match(): value_proc = _prefix_choice_value_proc(['bar', 'baz']) assert ("bar" == value_proc("bar")) assert ("baz" == value_proc("baz")) with pytest.raises(click.UsageError): value_proc("ba") def test_prefix_choice_value_proc_value_not_in_choices(): value_proc = _prefix_choice_value_proc(['bar', 'baz']) with pytest.raises(click.UsageError): value_proc("foo") buildstream-1.6.9/tests/frontend/mirror.py000066400000000000000000000742471437515270000207310ustar00rootroot00000000000000import os import pytest from tests.testutils import cli, create_repo, ALL_REPO_KINDS, generate_junction from buildstream import _yaml from buildstream._exceptions import ErrorDomain # Project directory TOP_DIR = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = os.path.join(TOP_DIR, 'project') def generate_element(output_file): element = { 'kind': 'import', 'sources': [ { 'kind': 'fetch_source', "output-text": output_file, "urls": ["foo:repo1", "bar:repo2"], "fetch-succeeds": { "FOO/repo1": True, "BAR/repo2": False, "OOF/repo1": False, "RAB/repo2": True, "OFO/repo1": False, "RBA/repo2": False, "ooF/repo1": False, "raB/repo2": False, } } ] } return element def generate_project(): project = { 'name': 'test', 'element-path': 'elements', 'aliases': { 'foo': 'FOO/', 'bar': 'BAR/', }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { 'foo': ['OOF/'], 'bar': ['RAB/'], }, }, { 'name': 'arrakis', 'aliases': { 'foo': ['OFO/'], 'bar': ['RBA/'], }, }, { 'name': 'oz', 'aliases': { 'foo': ['ooF/'], 'bar': ['raB/'], } }, ], 'plugins': [ { 'origin': 'local', 'path': 'sources', 'sources': { 'fetch_source': 0 } } ] } return project @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_mirror_fetch(cli, tmpdir, datafiles, kind): bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') upstream_repodir = os.path.join(str(tmpdir), 'upstream') mirror_repodir = os.path.join(str(tmpdir), 'mirror') project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') # Create repo objects of the upstream and mirror upstream_repo = create_repo(kind, upstream_repodir) upstream_ref = upstream_repo.create(bin_files_path) mirror_repo = upstream_repo.copy(mirror_repodir) mirror_ref = upstream_ref upstream_ref = upstream_repo.create(dev_files_path) element = { 'kind': 'import', 'sources': [ upstream_repo.source_config(ref=upstream_ref) ] } element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) full_repo = element['sources'][0]['url'] upstream_map, repo_name = os.path.split(full_repo) alias = 'foo-' + kind aliased_repo = alias + ':' + repo_name element['sources'][0]['url'] = aliased_repo full_mirror = mirror_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) os.makedirs(element_dir) _yaml.dump(element, element_path) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: upstream_map + "/" }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], }, }, ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) # No obvious ways of checking that the mirror has been fetched # But at least we can be sure it succeeds result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_mirror_fetch_upstream_absent(cli, tmpdir, datafiles, kind): bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') upstream_repodir = os.path.join(str(tmpdir), 'upstream') mirror_repodir = os.path.join(str(tmpdir), 'mirror') project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') # Create repo objects of the upstream and mirror upstream_repo = create_repo(kind, upstream_repodir) ref = upstream_repo.create(dev_files_path) mirror_repo = upstream_repo.copy(mirror_repodir) element = { 'kind': 'import', 'sources': [ upstream_repo.source_config(ref=ref) ] } element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) full_repo = element['sources'][0]['url'] upstream_map, repo_name = os.path.split(full_repo) alias = 'foo-' + kind aliased_repo = alias + ':' + repo_name element['sources'][0]['url'] = aliased_repo full_mirror = mirror_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) os.makedirs(element_dir) _yaml.dump(element, element_path) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: 'http://www.example.com/' }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], }, }, ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) def test_mirror_fetch_multi(cli, tmpdir, datafiles): output_file = os.path.join(str(tmpdir), "output.txt") project_dir = str(tmpdir) element_dir = os.path.join(project_dir, 'elements') os.makedirs(element_dir, exist_ok=True) element_name = "test.bst" element_path = os.path.join(element_dir, element_name) element = generate_element(output_file) _yaml.dump(element, element_path) project_file = os.path.join(project_dir, 'project.conf') project = generate_project() _yaml.dump(project, project_file) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() with open(output_file) as f: contents = f.read() assert "Fetch foo:repo1 succeeded from FOO/repo1" in contents assert "Fetch bar:repo2 succeeded from RAB/repo2" in contents @pytest.mark.datafiles(DATA_DIR) def test_mirror_fetch_default_cmdline(cli, tmpdir, datafiles): output_file = os.path.join(str(tmpdir), "output.txt") project_dir = str(tmpdir) element_dir = os.path.join(project_dir, 'elements') os.makedirs(element_dir, exist_ok=True) element_name = "test.bst" element_path = os.path.join(element_dir, element_name) element = generate_element(output_file) _yaml.dump(element, element_path) project_file = os.path.join(project_dir, 'project.conf') project = generate_project() _yaml.dump(project, project_file) result = cli.run(project=project_dir, args=['--default-mirror', 'arrakis', 'fetch', element_name]) result.assert_success() with open(output_file) as f: contents = f.read() print(contents) # Success if fetching from arrakis' mirror happened before middle-earth's arrakis_str = "OFO/repo1" arrakis_pos = contents.find(arrakis_str) assert arrakis_pos != -1, "'{}' wasn't found".format(arrakis_str) me_str = "OOF/repo1" me_pos = contents.find(me_str) assert me_pos != -1, "'{}' wasn't found".format(me_str) assert arrakis_pos < me_pos, "'{}' wasn't found before '{}'".format(arrakis_str, me_str) @pytest.mark.datafiles(DATA_DIR) def test_mirror_fetch_default_userconfig(cli, tmpdir, datafiles): output_file = os.path.join(str(tmpdir), "output.txt") project_dir = str(tmpdir) element_dir = os.path.join(project_dir, 'elements') os.makedirs(element_dir, exist_ok=True) element_name = "test.bst" element_path = os.path.join(element_dir, element_name) element = generate_element(output_file) _yaml.dump(element, element_path) project_file = os.path.join(project_dir, 'project.conf') project = generate_project() _yaml.dump(project, project_file) userconfig = { 'projects': { 'test': { 'default-mirror': 'oz' } } } cli.configure(userconfig) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() with open(output_file) as f: contents = f.read() print(contents) # Success if fetching from Oz' mirror happened before middle-earth's oz_str = "ooF/repo1" oz_pos = contents.find(oz_str) assert oz_pos != -1, "'{}' wasn't found".format(oz_str) me_str = "OOF/repo1" me_pos = contents.find(me_str) assert me_pos != -1, "'{}' wasn't found".format(me_str) assert oz_pos < me_pos, "'{}' wasn't found before '{}'".format(oz_str, me_str) @pytest.mark.datafiles(DATA_DIR) def test_mirror_fetch_default_cmdline_overrides_config(cli, tmpdir, datafiles): output_file = os.path.join(str(tmpdir), "output.txt") project_dir = str(tmpdir) element_dir = os.path.join(project_dir, 'elements') os.makedirs(element_dir, exist_ok=True) element_name = "test.bst" element_path = os.path.join(element_dir, element_name) element = generate_element(output_file) _yaml.dump(element, element_path) project_file = os.path.join(project_dir, 'project.conf') project = generate_project() _yaml.dump(project, project_file) userconfig = { 'projects': { 'test': { 'default-mirror': 'oz' } } } cli.configure(userconfig) result = cli.run(project=project_dir, args=['--default-mirror', 'arrakis', 'fetch', element_name]) result.assert_success() with open(output_file) as f: contents = f.read() print(contents) # Success if fetching from arrakis' mirror happened before middle-earth's arrakis_str = "OFO/repo1" arrakis_pos = contents.find(arrakis_str) assert arrakis_pos != -1, "'{}' wasn't found".format(arrakis_str) me_str = "OOF/repo1" me_pos = contents.find(me_str) assert me_pos != -1, "'{}' wasn't found".format(me_str) assert arrakis_pos < me_pos, "'{}' wasn't found before '{}'".format(arrakis_str, me_str) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_mirror_track_upstream_present(cli, tmpdir, datafiles, kind): bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') upstream_repodir = os.path.join(str(tmpdir), 'upstream') mirror_repodir = os.path.join(str(tmpdir), 'mirror') project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') # Create repo objects of the upstream and mirror upstream_repo = create_repo(kind, upstream_repodir) upstream_ref = upstream_repo.create(bin_files_path) mirror_repo = upstream_repo.copy(mirror_repodir) mirror_ref = upstream_ref upstream_ref = upstream_repo.create(dev_files_path) element = { 'kind': 'import', 'sources': [ upstream_repo.source_config(ref=upstream_ref) ] } element['sources'][0] element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) full_repo = element['sources'][0]['url'] upstream_map, repo_name = os.path.split(full_repo) alias = 'foo-' + kind aliased_repo = alias + ':' + repo_name element['sources'][0]['url'] = aliased_repo full_mirror = mirror_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) os.makedirs(element_dir) _yaml.dump(element, element_path) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: upstream_map + "/" }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], }, }, ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) result = cli.run(project=project_dir, args=['track', element_name]) result.assert_success() # Tracking tries upstream first. Check the ref is from upstream. new_element = _yaml.load(element_path) source = new_element['sources'][0] if 'ref' in source: assert source['ref'] == upstream_ref @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_mirror_track_upstream_absent(cli, tmpdir, datafiles, kind): bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') upstream_repodir = os.path.join(str(tmpdir), 'upstream') mirror_repodir = os.path.join(str(tmpdir), 'mirror') project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') # Create repo objects of the upstream and mirror upstream_repo = create_repo(kind, upstream_repodir) upstream_ref = upstream_repo.create(bin_files_path) mirror_repo = upstream_repo.copy(mirror_repodir) mirror_ref = upstream_ref upstream_ref = upstream_repo.create(dev_files_path) element = { 'kind': 'import', 'sources': [ upstream_repo.source_config(ref=upstream_ref) ] } element['sources'][0] element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) full_repo = element['sources'][0]['url'] upstream_map, repo_name = os.path.split(full_repo) alias = 'foo-' + kind aliased_repo = alias + ':' + repo_name element['sources'][0]['url'] = aliased_repo full_mirror = mirror_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) os.makedirs(element_dir) _yaml.dump(element, element_path) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: 'http://www.example.com/' }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], }, }, ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) result = cli.run(project=project_dir, args=['track', element_name]) result.assert_success() # Check that tracking fell back to the mirror new_element = _yaml.load(element_path) source = new_element['sources'][0] if 'ref' in source: assert source['ref'] == mirror_ref @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_mirror_from_includes(cli, tmpdir, datafiles, kind): bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') upstream_repodir = os.path.join(str(tmpdir), 'upstream') mirror_repodir = os.path.join(str(tmpdir), 'mirror') project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') # Create repo objects of the upstream and mirror upstream_repo = create_repo(kind, upstream_repodir) upstream_ref = upstream_repo.create(bin_files_path) mirror_repo = upstream_repo.copy(mirror_repodir) element = { 'kind': 'import', 'sources': [ upstream_repo.source_config(ref=upstream_ref) ] } element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) full_repo = element['sources'][0]['url'] upstream_map, repo_name = os.path.split(full_repo) alias = 'foo-' + kind aliased_repo = alias + ':' + repo_name element['sources'][0]['url'] = aliased_repo full_mirror = mirror_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) os.makedirs(element_dir) _yaml.dump(element, element_path) config_project_dir = str(tmpdir.join('config')) os.makedirs(config_project_dir, exist_ok=True) config_project = { 'name': 'config' } _yaml.dump(config_project, os.path.join(config_project_dir, 'project.conf')) extra_mirrors = { 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], } } ] } _yaml.dump(extra_mirrors, os.path.join(config_project_dir, 'mirrors.yml')) generate_junction(str(tmpdir.join('config_repo')), config_project_dir, os.path.join(element_dir, 'config.bst')) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: upstream_map + "/" }, '(@)': [ 'config.bst:mirrors.yml' ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) # Now make the upstream unavailable. os.rename(upstream_repo.repo, '{}.bak'.format(upstream_repo.repo)) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_mirror_junction_from_includes(cli, tmpdir, datafiles, kind): bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') upstream_repodir = os.path.join(str(tmpdir), 'upstream') mirror_repodir = os.path.join(str(tmpdir), 'mirror') project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') # Create repo objects of the upstream and mirror upstream_repo = create_repo(kind, upstream_repodir) upstream_ref = upstream_repo.create(bin_files_path) mirror_repo = upstream_repo.copy(mirror_repodir) element = { 'kind': 'junction', 'sources': [ upstream_repo.source_config(ref=upstream_ref) ] } element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) full_repo = element['sources'][0]['url'] upstream_map, repo_name = os.path.split(full_repo) alias = 'foo-' + kind aliased_repo = alias + ':' + repo_name element['sources'][0]['url'] = aliased_repo full_mirror = mirror_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) os.makedirs(element_dir) _yaml.dump(element, element_path) config_project_dir = str(tmpdir.join('config')) os.makedirs(config_project_dir, exist_ok=True) config_project = { 'name': 'config' } _yaml.dump(config_project, os.path.join(config_project_dir, 'project.conf')) extra_mirrors = { 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], } } ] } _yaml.dump(extra_mirrors, os.path.join(config_project_dir, 'mirrors.yml')) generate_junction(str(tmpdir.join('config_repo')), config_project_dir, os.path.join(element_dir, 'config.bst')) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: upstream_map + "/" }, '(@)': [ 'config.bst:mirrors.yml' ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) # Now make the upstream unavailable. os.rename(upstream_repo.repo, '{}.bak'.format(upstream_repo.repo)) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_main_error(ErrorDomain.STREAM, None) # Now make the upstream available again. os.rename('{}.bak'.format(upstream_repo.repo), upstream_repo.repo) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) def test_mirror_git_submodule_fetch(cli, tmpdir, datafiles): # Test that it behaves as expected with submodules, both defined in config # and discovered when fetching. foo_file = os.path.join(str(datafiles), 'files', 'foo') bar_file = os.path.join(str(datafiles), 'files', 'bar') bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') mirror_dir = os.path.join(str(datafiles), 'mirror') defined_subrepo = create_repo('git', str(tmpdir), 'defined_subrepo') defined_mirror_ref = defined_subrepo.create(bin_files_path) defined_mirror = defined_subrepo.copy(mirror_dir) defined_subref = defined_subrepo.add_file(foo_file) found_subrepo = create_repo('git', str(tmpdir), 'found_subrepo') found_subref = found_subrepo.create(dev_files_path) main_repo = create_repo('git', str(tmpdir)) main_mirror_ref = main_repo.create(bin_files_path) main_repo.add_submodule('defined', 'file://' + defined_subrepo.repo) main_repo.add_submodule('found', 'file://' + found_subrepo.repo) main_mirror = main_repo.copy(mirror_dir) main_ref = main_repo.add_file(bar_file) project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') os.makedirs(element_dir) element = { 'kind': 'import', 'sources': [ main_repo.source_config(ref=main_mirror_ref) ] } element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) # Alias the main repo full_repo = element['sources'][0]['url'] _, repo_name = os.path.split(full_repo) alias = 'foo' aliased_repo = alias + ':' + repo_name element['sources'][0]['url'] = aliased_repo # Hide the found subrepo del element['sources'][0]['submodules']['found'] # Alias the defined subrepo subrepo = element['sources'][0]['submodules']['defined']['url'] _, repo_name = os.path.split(subrepo) aliased_repo = alias + ':' + repo_name element['sources'][0]['submodules']['defined']['url'] = aliased_repo _yaml.dump(element, element_path) full_mirror = main_mirror.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: 'http://www.example.com/' }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], }, }, ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) def test_mirror_fallback_git_only_submodules(cli, tmpdir, datafiles): # Main repo has no mirror or alias. # One submodule is overridden to use a mirror. # There is another submodules not overriden. # Upstream for overriden submodule is down. # # We expect: # - overriden submodule is fetched from mirror. # - other submodule is fetched. bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') upstream_bin_repodir = os.path.join(str(tmpdir), 'bin-upstream') mirror_bin_repodir = os.path.join(str(tmpdir), 'bin-mirror') upstream_bin_repo = create_repo('git', upstream_bin_repodir) upstream_bin_repo.create(bin_files_path) mirror_bin_repo = upstream_bin_repo.copy(mirror_bin_repodir) dev_repodir = os.path.join(str(tmpdir), 'dev-upstream') dev_repo = create_repo('git', dev_repodir) dev_repo.create(dev_files_path) main_files = os.path.join(str(tmpdir), 'main-files') os.makedirs(main_files) with open(os.path.join(main_files, 'README'), 'w') as f: f.write("TEST\n") main_repodir = os.path.join(str(tmpdir), 'main-upstream') main_repo = create_repo('git', main_repodir) main_repo.create(main_files) upstream_url = 'file://{}'.format(upstream_bin_repo.repo) main_repo.add_submodule('bin', url=upstream_url) main_repo.add_submodule('dev', url='file://{}'.format(dev_repo.repo)) # Unlist 'dev'. del main_repo.submodules['dev'] main_ref = main_repo.latest_commit() upstream_map, repo_name = os.path.split(upstream_url) alias = 'foo' aliased_repo = '{}:{}'.format(alias, repo_name) main_repo.submodules['bin']['url'] = aliased_repo full_mirror = mirror_bin_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') element = { 'kind': 'import', 'sources': [ main_repo.source_config(ref=main_ref, checkout_submodules=True) ] } element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) os.makedirs(element_dir) _yaml.dump(element, element_path) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: upstream_map + "/" }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], } } ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) # Now make the upstream unavailable. os.rename(upstream_bin_repo.repo, '{}.bak'.format(upstream_bin_repo.repo)) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() result = cli.run(project=project_dir, args=['build', element_name]) result.assert_success() checkout = os.path.join(str(tmpdir), 'checkout') result = cli.run(project=project_dir, args=['checkout', element_name, checkout]) result.assert_success() assert os.path.exists(os.path.join(checkout, 'bin', 'bin', 'hello')) assert os.path.exists(os.path.join(checkout, 'dev', 'include', 'pony.h')) @pytest.mark.datafiles(DATA_DIR) def test_mirror_fallback_git_with_submodules(cli, tmpdir, datafiles): # Main repo has mirror. But does not list submodules. # # We expect: # - we will fetch submodules anyway bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr') dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr') bin_repodir = os.path.join(str(tmpdir), 'bin-repo') bin_repo = create_repo('git', bin_repodir) bin_repo.create(bin_files_path) dev_repodir = os.path.join(str(tmpdir), 'dev-repo') dev_repo = create_repo('git', dev_repodir) dev_repo.create(dev_files_path) main_files = os.path.join(str(tmpdir), 'main-files') os.makedirs(main_files) with open(os.path.join(main_files, 'README'), 'w') as f: f.write("TEST\n") upstream_main_repodir = os.path.join(str(tmpdir), 'main-upstream') upstream_main_repo = create_repo('git', upstream_main_repodir) upstream_main_repo.create(main_files) upstream_main_repo.add_submodule('bin', url='file://{}'.format(bin_repo.repo)) upstream_main_repo.add_submodule('dev', url='file://{}'.format(dev_repo.repo)) # Unlist submodules. del upstream_main_repo.submodules['bin'] del upstream_main_repo.submodules['dev'] upstream_main_ref = upstream_main_repo.latest_commit() mirror_main_repodir = os.path.join(str(tmpdir), 'main-mirror') mirror_main_repo = upstream_main_repo.copy(mirror_main_repodir) upstream_url = mirror_main_repo.source_config()['url'] upstream_map, repo_name = os.path.split(upstream_url) alias = 'foo' aliased_repo = '{}:{}'.format(alias, repo_name) full_mirror = mirror_main_repo.source_config()['url'] mirror_map, _ = os.path.split(full_mirror) project_dir = os.path.join(str(tmpdir), 'project') os.makedirs(project_dir) element_dir = os.path.join(project_dir, 'elements') element = { 'kind': 'import', 'sources': [ upstream_main_repo.source_config(ref=upstream_main_ref, checkout_submodules=True) ] } element['sources'][0]['url'] = aliased_repo element_name = 'test.bst' element_path = os.path.join(element_dir, element_name) os.makedirs(element_dir) _yaml.dump(element, element_path) project = { 'name': 'test', 'element-path': 'elements', 'aliases': { alias: upstream_map + "/" }, 'mirrors': [ { 'name': 'middle-earth', 'aliases': { alias: [mirror_map + "/"], } } ] } project_file = os.path.join(project_dir, 'project.conf') _yaml.dump(project, project_file) # Now make the upstream unavailable. os.rename(upstream_main_repo.repo, '{}.bak'.format(upstream_main_repo.repo)) result = cli.run(project=project_dir, args=['fetch', element_name]) result.assert_success() result = cli.run(project=project_dir, args=['build', element_name]) result.assert_success() checkout = os.path.join(str(tmpdir), 'checkout') result = cli.run(project=project_dir, args=['checkout', element_name, checkout]) result.assert_success() assert os.path.exists(os.path.join(checkout, 'bin', 'bin', 'hello')) assert os.path.exists(os.path.join(checkout, 'dev', 'include', 'pony.h')) buildstream-1.6.9/tests/frontend/overlaps.py000066400000000000000000000130451437515270000212370ustar00rootroot00000000000000import os import pytest from tests.testutils.runcli import cli from tests.testutils import generate_junction from buildstream._exceptions import ErrorDomain, LoadErrorReason from buildstream import _yaml from buildstream.plugin import CoreWarnings # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "overlaps" ) def gen_project(project_dir, fail_on_overlap, use_fatal_warnings=True, project_name="test"): template = { "name": project_name, } if use_fatal_warnings: template["fatal-warnings"] = [CoreWarnings.OVERLAPS] if fail_on_overlap else [] else: template["fail-on-overlap"] = fail_on_overlap projectfile = os.path.join(project_dir, "project.conf") _yaml.dump(template, projectfile) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("use_fatal_warnings", [True, False]) def test_overlaps(cli, datafiles, use_fatal_warnings): project_dir = str(datafiles) gen_project(project_dir, False, use_fatal_warnings) result = cli.run(project=project_dir, silent=True, args=[ 'build', 'collect.bst']) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("use_fatal_warnings", [True, False]) def test_overlaps_error(cli, datafiles, use_fatal_warnings): project_dir = str(datafiles) gen_project(project_dir, True, use_fatal_warnings) result = cli.run(project=project_dir, silent=True, args=[ 'build', 'collect.bst']) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.OVERLAPS) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("element", ["collect-whitelisted.bst", "collect-whitelisted-abs.bst"]) def test_overlaps_whitelist(cli, datafiles, element): project_dir = str(datafiles) gen_project(project_dir, True) result = cli.run(project=project_dir, silent=True, args=[ 'build', element]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("element", ["collect-whitelisted.bst", "collect-whitelisted-abs.bst"]) def test_overlaps_whitelist_ignored(cli, datafiles, element): project_dir = str(datafiles) gen_project(project_dir, False) result = cli.run(project=project_dir, silent=True, args=[ 'build', element]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) def test_overlaps_whitelist_on_overlapper(cli, datafiles): # Tests that the overlapping element is responsible for whitelisting, # i.e. that if A overlaps B overlaps C, and the B->C overlap is permitted, # it'll still fail because A doesn't permit overlaps. project_dir = str(datafiles) gen_project(project_dir, True) result = cli.run(project=project_dir, silent=True, args=[ 'build', 'collect-partially-whitelisted.bst']) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.OVERLAPS) @pytest.mark.datafiles(DATA_DIR) def test_overlaps_whitelist_undefined_variable(cli, datafiles): project_dir = str(datafiles) gen_project(project_dir, False) result = cli.run(project=project_dir, silent=True, args=["build", "collect-whitelisted-undefined.bst"]) # Assert that we get the expected undefined variable error, # and that it has the provenance we expect from whitelist-undefined.bst # # FIXME: In BuildStream 1, we only encounter this error later when extracting # the variables from an artifact, and we lose the provenance. # # This is not a huge problem in light of the coming of BuildStream 2 and # is probably not worth too much attention, but it is worth noting that # this is an imperfect error message delivered at a late stage. # result.assert_main_error(ErrorDomain.STREAM, None) assert "public.yaml [line 3 column 4]" in result.stderr @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("use_fatal_warnings", [True, False]) def test_overlaps_script(cli, datafiles, use_fatal_warnings): # Test overlaps with script element to test # Element.stage_dependency_artifacts() with Scope.RUN project_dir = str(datafiles) gen_project(project_dir, False, use_fatal_warnings) result = cli.run(project=project_dir, silent=True, args=[ 'build', 'script.bst']) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("project_policy", [('fail'), ('warn')]) @pytest.mark.parametrize("subproject_policy", [('fail'), ('warn')]) def test_overlap_subproject(cli, tmpdir, datafiles, project_policy, subproject_policy): project_dir = str(datafiles) subproject_dir = os.path.join(project_dir, 'sub-project') junction_path = os.path.join(project_dir, 'sub-project.bst') gen_project(project_dir, bool(project_policy == 'fail'), project_name='test') gen_project(subproject_dir, bool(subproject_policy == 'fail'), project_name='subtest') generate_junction(tmpdir, subproject_dir, junction_path) # Here we have a dependency chain where the project element # always overlaps with the subproject element. # # Test that overlap error vs warning policy for this overlap # is always controlled by the project and not the subproject. # result = cli.run(project=project_dir, silent=True, args=['build', 'sub-collect.bst']) if project_policy == 'fail': result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.OVERLAPS) else: result.assert_success() assert "WARNING [overlaps]" in result.stderr buildstream-1.6.9/tests/frontend/overlaps/000077500000000000000000000000001437515270000206625ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/a-whitelisted-abs.bst000066400000000000000000000002421437515270000247000ustar00rootroot00000000000000kind: import config: source: / target: / depends: - b-whitelisted.bst sources: - kind: local path: "a" public: bst: overlap-whitelist: - "/file*" buildstream-1.6.9/tests/frontend/overlaps/a-whitelisted.bst000066400000000000000000000002411437515270000241340ustar00rootroot00000000000000kind: import config: source: / target: / depends: - b-whitelisted.bst sources: - kind: local path: "a" public: bst: overlap-whitelist: - "file*" buildstream-1.6.9/tests/frontend/overlaps/a.bst000066400000000000000000000001411437515270000216100ustar00rootroot00000000000000kind: import config: source: / target: / depends: - b.bst sources: - kind: local path: "a" buildstream-1.6.9/tests/frontend/overlaps/a/000077500000000000000000000000001437515270000211025ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/a/file1000066400000000000000000000000041437515270000220170ustar00rootroot00000000000000foo buildstream-1.6.9/tests/frontend/overlaps/a/file2000066400000000000000000000000041437515270000220200ustar00rootroot00000000000000bar buildstream-1.6.9/tests/frontend/overlaps/b-whitelisted-abs.bst000066400000000000000000000002761437515270000247100ustar00rootroot00000000000000kind: import config: source: / target: / depends: - c.bst sources: - kind: local path: "b" variables: FILE: /file public: bst: overlap-whitelist: - /file2 - "%{FILE}3" buildstream-1.6.9/tests/frontend/overlaps/b-whitelisted.bst000066400000000000000000000002741437515270000241430ustar00rootroot00000000000000kind: import config: source: / target: / depends: - c.bst sources: - kind: local path: "b" variables: FILE: file public: bst: overlap-whitelist: - file2 - "%{FILE}3" buildstream-1.6.9/tests/frontend/overlaps/b.bst000066400000000000000000000001411437515270000216110ustar00rootroot00000000000000kind: import config: source: / target: / depends: - c.bst sources: - kind: local path: "b" buildstream-1.6.9/tests/frontend/overlaps/b/000077500000000000000000000000001437515270000211035ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/b/file2000066400000000000000000000000041437515270000220210ustar00rootroot00000000000000foo buildstream-1.6.9/tests/frontend/overlaps/b/file3000066400000000000000000000000041437515270000220220ustar00rootroot00000000000000bar buildstream-1.6.9/tests/frontend/overlaps/c-whitelisted-abs.bst000066400000000000000000000002051437515270000247010ustar00rootroot00000000000000kind: import config: source: / target: / sources: - kind: local path: "c" public: bst: overlap-whitelist: - "/file*" buildstream-1.6.9/tests/frontend/overlaps/c-whitelisted.bst000066400000000000000000000002041437515270000241350ustar00rootroot00000000000000kind: import config: source: / target: / sources: - kind: local path: "c" public: bst: overlap-whitelist: - "file*" buildstream-1.6.9/tests/frontend/overlaps/c.bst000066400000000000000000000001201437515270000216070ustar00rootroot00000000000000kind: import config: source: / target: / sources: - kind: local path: "c" buildstream-1.6.9/tests/frontend/overlaps/c/000077500000000000000000000000001437515270000211045ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/c/file1000066400000000000000000000000041437515270000220210ustar00rootroot00000000000000baz buildstream-1.6.9/tests/frontend/overlaps/c/file2000066400000000000000000000000041437515270000220220ustar00rootroot00000000000000bar buildstream-1.6.9/tests/frontend/overlaps/c/file3000066400000000000000000000000041437515270000220230ustar00rootroot00000000000000foo buildstream-1.6.9/tests/frontend/overlaps/collect-partially-whitelisted.bst000066400000000000000000000002041437515270000273370ustar00rootroot00000000000000kind: compose depends: - filename: a.bst type: build - filename: b-whitelisted.bst type: build - filename: c.bst type: build buildstream-1.6.9/tests/frontend/overlaps/collect-whitelisted-abs.bst000066400000000000000000000002301437515270000261020ustar00rootroot00000000000000kind: compose depends: - filename: a-whitelisted-abs.bst type: build - filename: b-whitelisted-abs.bst type: build - filename: c.bst type: build buildstream-1.6.9/tests/frontend/overlaps/collect-whitelisted-undefined.bst000066400000000000000000000002261437515270000273030ustar00rootroot00000000000000kind: compose depends: - filename: a-whitelisted.bst type: build - filename: whitelist-undefined.bst type: build - filename: c.bst type: build buildstream-1.6.9/tests/frontend/overlaps/collect-whitelisted.bst000066400000000000000000000002201437515270000253360ustar00rootroot00000000000000kind: compose depends: - filename: a-whitelisted.bst type: build - filename: b-whitelisted.bst type: build - filename: c.bst type: build buildstream-1.6.9/tests/frontend/overlaps/collect.bst000066400000000000000000000001701437515270000230170ustar00rootroot00000000000000kind: compose depends: - filename: a.bst type: build - filename: b.bst type: build - filename: c.bst type: build buildstream-1.6.9/tests/frontend/overlaps/script.bst000066400000000000000000000001661437515270000227030ustar00rootroot00000000000000kind: script depends: - filename: a.bst type: build - filename: b.bst type: build - filename: c.bst type: build buildstream-1.6.9/tests/frontend/overlaps/sub-collect.bst000066400000000000000000000002701437515270000236070ustar00rootroot00000000000000kind: compose depends: - filename: c.bst type: build - filename: a-sub.bst junction: sub-project.bst type: build - filename: z-sub.bst junction: sub-project.bst type: build buildstream-1.6.9/tests/frontend/overlaps/sub-project/000077500000000000000000000000001437515270000231175ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/sub-project/a-sub.bst000066400000000000000000000001261437515270000246370ustar00rootroot00000000000000kind: import config: source: / target: / sources: - kind: local path: "files/a" buildstream-1.6.9/tests/frontend/overlaps/sub-project/files/000077500000000000000000000000001437515270000242215ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/sub-project/files/a/000077500000000000000000000000001437515270000244415ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/sub-project/files/a/file3000066400000000000000000000000061437515270000253620ustar00rootroot00000000000000barny buildstream-1.6.9/tests/frontend/overlaps/sub-project/files/z/000077500000000000000000000000001437515270000244725ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/overlaps/sub-project/files/z/file1000066400000000000000000000000041437515270000254070ustar00rootroot00000000000000foo buildstream-1.6.9/tests/frontend/overlaps/sub-project/files/z/file2000066400000000000000000000000041437515270000254100ustar00rootroot00000000000000bar buildstream-1.6.9/tests/frontend/overlaps/sub-project/z-sub.bst000066400000000000000000000001261437515270000246700ustar00rootroot00000000000000kind: import config: source: / target: / sources: - kind: local path: "files/z" buildstream-1.6.9/tests/frontend/overlaps/whitelist-undefined.bst000066400000000000000000000002631437515270000253500ustar00rootroot00000000000000kind: import config: source: / target: / depends: - b-whitelisted.bst sources: - kind: local path: "a" public: bst: overlap-whitelist: - "%{undefined-variable}/*" buildstream-1.6.9/tests/frontend/project/000077500000000000000000000000001437515270000204755ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/elements/000077500000000000000000000000001437515270000223115ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/elements/checkout-deps.bst000066400000000000000000000003631437515270000255630ustar00rootroot00000000000000kind: import description: It is important for this element to have both build and runtime dependencies sources: - kind: local path: files/etc-files depends: - filename: import-dev.bst type: build - filename: import-bin.bst type: runtime buildstream-1.6.9/tests/frontend/project/elements/compose-all.bst000066400000000000000000000003441437515270000252370ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False buildstream-1.6.9/tests/frontend/project/elements/compose-exclude-dev.bst000066400000000000000000000004251437515270000266740ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False # Exclude the dev domain exclude: - devel buildstream-1.6.9/tests/frontend/project/elements/compose-include-bin.bst000066400000000000000000000004301437515270000266540ustar00rootroot00000000000000kind: compose depends: - filename: import-bin.bst type: build - filename: import-dev.bst type: build config: # Dont try running the sandbox, we dont have a # runtime to run anything in this context. integrate: False # Only include the runtim include: - runtime buildstream-1.6.9/tests/frontend/project/elements/import-bin.bst000066400000000000000000000000741437515270000251040ustar00rootroot00000000000000kind: import sources: - kind: local path: files/bin-files buildstream-1.6.9/tests/frontend/project/elements/import-dev.bst000066400000000000000000000000741437515270000251120ustar00rootroot00000000000000kind: import sources: - kind: local path: files/dev-files buildstream-1.6.9/tests/frontend/project/elements/install-to-build.bst000066400000000000000000000000761437515270000262110ustar00rootroot00000000000000kind: import sources: - kind: local path: files/build-files buildstream-1.6.9/tests/frontend/project/elements/installed-to-build.bst000066400000000000000000000002251437515270000265160ustar00rootroot00000000000000kind: manual sources: - kind: local path: files/build-files depends: - filename: install-to-build.bst type: build config: strip-commands: [] buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/000077500000000000000000000000001437515270000256755ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/dependency/000077500000000000000000000000001437515270000300135ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/dependency/horsey.bst000066400000000000000000000001021437515270000320270ustar00rootroot00000000000000kind: autotools depends: - multiple_targets/dependency/pony.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/dependency/pony.bst000066400000000000000000000000201437515270000315020ustar00rootroot00000000000000kind: autotools buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/dependency/zebry.bst000066400000000000000000000001041437515270000316530ustar00rootroot00000000000000kind: autotools depends: - multiple_targets/dependency/horsey.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/000077500000000000000000000000001437515270000270105ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/0.bst000066400000000000000000000002611437515270000276600ustar00rootroot00000000000000kind: autotools description: Root node depends: - multiple_targets/order/2.bst - multiple_targets/order/3.bst - filename: multiple_targets/order/run.bst type: runtime buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/1.bst000066400000000000000000000001211437515270000276540ustar00rootroot00000000000000kind: autotools description: Root node depends: - multiple_targets/order/9.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/2.bst000066400000000000000000000001361437515270000276630ustar00rootroot00000000000000kind: autotools description: First dependency level depends: - multiple_targets/order/3.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/3.bst000066400000000000000000000002411437515270000276610ustar00rootroot00000000000000kind: autotools description: Second dependency level depends: - multiple_targets/order/4.bst - multiple_targets/order/5.bst - multiple_targets/order/6.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/4.bst000066400000000000000000000000641437515270000276650ustar00rootroot00000000000000kind: autotools description: Third level dependency buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/5.bst000066400000000000000000000000641437515270000276660ustar00rootroot00000000000000kind: autotools description: Fifth level dependency buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/6.bst000066400000000000000000000001371437515270000276700ustar00rootroot00000000000000kind: autotools description: Fourth level dependency depends: - multiple_targets/order/5.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/7.bst000066400000000000000000000001361437515270000276700ustar00rootroot00000000000000kind: autotools description: Third level dependency depends: - multiple_targets/order/6.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/8.bst000066400000000000000000000001371437515270000276720ustar00rootroot00000000000000kind: autotools description: Second level dependency depends: - multiple_targets/order/7.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/9.bst000066400000000000000000000001361437515270000276720ustar00rootroot00000000000000kind: autotools description: First level dependency depends: - multiple_targets/order/8.bst buildstream-1.6.9/tests/frontend/project/elements/multiple_targets/order/run.bst000066400000000000000000000001271437515270000303260ustar00rootroot00000000000000kind: autotools description: Not a root node, yet built at the same time as root nodes buildstream-1.6.9/tests/frontend/project/elements/target.bst000066400000000000000000000001641437515270000243120ustar00rootroot00000000000000kind: stack description: | Main stack target for the bst build test depends: - import-bin.bst - compose-all.bst buildstream-1.6.9/tests/frontend/project/elements/unaliased-tar.bst000066400000000000000000000001201437515270000255450ustar00rootroot00000000000000kind: import sources: - kind: tar url: https://unaliased-url.org/tarball.tar buildstream-1.6.9/tests/frontend/project/files/000077500000000000000000000000001437515270000215775ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/bar000066400000000000000000000000001437515270000222540ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/bin-files/000077500000000000000000000000001437515270000234475ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/bin-files/usr/000077500000000000000000000000001437515270000242605ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/bin-files/usr/bin/000077500000000000000000000000001437515270000250305ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/bin-files/usr/bin/hello000077500000000000000000000000341437515270000260560ustar00rootroot00000000000000#!/bin/bash echo "Hello !" buildstream-1.6.9/tests/frontend/project/files/build-files/000077500000000000000000000000001437515270000237765ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/build-files/buildstream/000077500000000000000000000000001437515270000263115ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/build-files/buildstream/build/000077500000000000000000000000001437515270000274105ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/build-files/buildstream/build/test000066400000000000000000000000051437515270000303050ustar00rootroot00000000000000test buildstream-1.6.9/tests/frontend/project/files/dev-files/000077500000000000000000000000001437515270000234555ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/dev-files/usr/000077500000000000000000000000001437515270000242665ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/dev-files/usr/include/000077500000000000000000000000001437515270000257115ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/dev-files/usr/include/pony.h000066400000000000000000000003711437515270000270500ustar00rootroot00000000000000#ifndef __PONY_H__ #define __PONY_H__ #define PONY_BEGIN "Once upon a time, there was a pony." #define PONY_END "And they lived happily ever after, the end." #define MAKE_PONY(story) \ PONY_BEGIN \ story \ PONY_END #endif /* __PONY_H__ */ buildstream-1.6.9/tests/frontend/project/files/etc-files/000077500000000000000000000000001437515270000234525ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/etc-files/etc/000077500000000000000000000000001437515270000242255ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/etc-files/etc/buildstream/000077500000000000000000000000001437515270000265405ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/etc-files/etc/buildstream/config000066400000000000000000000000071437515270000277250ustar00rootroot00000000000000config buildstream-1.6.9/tests/frontend/project/files/foo000066400000000000000000000000001437515270000222730ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/sub-project/000077500000000000000000000000001437515270000240345ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/sub-project/elements/000077500000000000000000000000001437515270000256505ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/sub-project/elements/import-etc.bst000066400000000000000000000000741437515270000304460ustar00rootroot00000000000000kind: import sources: - kind: local path: files/etc-files buildstream-1.6.9/tests/frontend/project/files/sub-project/files/000077500000000000000000000000001437515270000251365ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/sub-project/files/etc-files/000077500000000000000000000000001437515270000270115ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/sub-project/files/etc-files/etc/000077500000000000000000000000001437515270000275645ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/files/sub-project/files/etc-files/etc/animal.conf000066400000000000000000000000141437515270000316670ustar00rootroot00000000000000animal=Pony buildstream-1.6.9/tests/frontend/project/files/sub-project/project.conf000066400000000000000000000001171437515270000263500ustar00rootroot00000000000000# Project config for frontend build test name: subtest element-path: elements buildstream-1.6.9/tests/frontend/project/project.conf000066400000000000000000000001141437515270000230060ustar00rootroot00000000000000# Project config for frontend build test name: test element-path: elements buildstream-1.6.9/tests/frontend/project/sources/000077500000000000000000000000001437515270000221605ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/project/sources/fetch_source.py000066400000000000000000000061501437515270000252050ustar00rootroot00000000000000import os import sys from buildstream import Source, Consistency, SourceError, SourceFetcher # Expected config # sources: # - output-text: $FILE # urls: # - foo:bar # - baz:quux # fetch-succeeds: # Foo/bar: true # ooF/bar: false class FetchFetcher(SourceFetcher): def __init__(self, source, url, primary=False): super().__init__() self.source = source self.original_url = url self.primary = primary self.mark_download_url(url) def fetch(self, alias_override=None): url = self.source.translate_url(self.original_url, alias_override=alias_override, primary=self.primary) with open(self.source.output_file, "a") as f: success = url in self.source.fetch_succeeds and self.source.fetch_succeeds[url] message = "Fetch {} {} from {}\n".format(self.original_url, "succeeded" if success else "failed", url) f.write(message) if not success: raise SourceError("Failed to fetch {}".format(url)) class FetchSource(Source): # Read config to know which URLs to fetch def configure(self, node): self.original_urls = self.node_get_member(node, list, 'urls') self.output_file = self.node_get_member(node, str, 'output-text') self.fetch_succeeds = {} fetch_succeeds_node = self.node_get_member(node, dict, 'fetch-succeeds', {}) for key, _ in self.node_items(fetch_succeeds_node): self.fetch_succeeds[key] = self.node_get_member(fetch_succeeds_node, bool, key) # First URL is the primary one for this test # primary = True self.fetchers = [] for url in self.original_urls: self.mark_download_url(url, primary=primary) fetcher = FetchFetcher(self, url, primary=primary) self.fetchers.append(fetcher) primary = False def get_source_fetchers(self): return self.fetchers def preflight(self): output_dir = os.path.dirname(self.output_file) if not os.path.exists(output_dir): raise SourceError("Directory '{}' does not exist".format(output_dir)) def fetch(self): for fetcher in self.fetchers: fetcher.fetch() def get_unique_key(self): return {"urls": self.original_urls, "output_file": self.output_file} def get_consistency(self): if not os.path.exists(self.output_file): return Consistency.RESOLVED with open(self.output_file, "r") as f: contents = f.read() for url in self.original_urls: if url not in contents: return Consistency.RESOLVED return Consistency.CACHED # We dont have a ref, we're a local file... def load_ref(self, node): pass def get_ref(self): return None # pragma: nocover def set_ref(self, ref, node): pass # pragma: nocover def setup(): return FetchSource buildstream-1.6.9/tests/frontend/pull.py000066400000000000000000000414171437515270000203640ustar00rootroot00000000000000import os import shutil import stat import pytest from tests.testutils import cli, create_artifact_share, generate_junction # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) # Assert that a given artifact is in the share # def assert_shared(cli, share, project, element_name): # NOTE: 'test' here is the name of the project # specified in the project.conf we are testing with. # cache_key = cli.get_element_key(project, element_name) if not share.has_artifact('test', element_name, cache_key): raise AssertionError("Artifact share at {} does not contain the expected element {}" .format(share.repo, element_name)) # Assert that a given artifact is NOT in the share # def assert_not_shared(cli, share, project, element_name): # NOTE: 'test' here is the name of the project # specified in the project.conf we are testing with. # cache_key = cli.get_element_key(project, element_name) if share.has_artifact('test', element_name, cache_key): raise AssertionError("Artifact share at {} unexpectedly contains the element {}" .format(share.repo, element_name)) # Tests that: # # * `bst build` pushes all build elements to configured 'push' cache # * `bst pull --deps all` downloads everything from cache after local deletion # @pytest.mark.datafiles(DATA_DIR) def test_push_pull_all(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: # First build the target element and push to the remote. cli.configure({ 'artifacts': {'url': share.repo, 'push': True} }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert cli.get_element_state(project, 'target.bst') == 'cached' # Assert that everything is now cached in the remote. all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'] for element_name in all_elements: assert_shared(cli, share, project, element_name) # Now we've pushed, delete the user's local artifact cache # directory and try to redownload it from the share # artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) # Assert that nothing is cached locally anymore for element_name in all_elements: assert cli.get_element_state(project, element_name) != 'cached' # Now try bst pull result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst']) result.assert_success() # And assert that it's again in the local cache, without having built for element_name in all_elements: assert cli.get_element_state(project, element_name) == 'cached' # Tests that: # # * `bst build` pushes all build elements ONLY to configured 'push' cache # * `bst pull` finds artifacts that are available only in the secondary cache # @pytest.mark.datafiles(DATA_DIR) def test_pull_secondary_cache(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1,\ create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2: # Build the target and push it to share2 only. cli.configure({ 'artifacts': [ {'url': share1.repo, 'push': False}, {'url': share2.repo, 'push': True}, ] }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert_not_shared(cli, share1, project, 'target.bst') assert_shared(cli, share2, project, 'target.bst') # Delete the user's local artifact cache. artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) # Assert that the element is not cached anymore. assert cli.get_element_state(project, 'target.bst') != 'cached' # Now try bst pull result = cli.run(project=project, args=['pull', 'target.bst']) result.assert_success() # And assert that it's again in the local cache, without having built, # i.e. we found it in share2. assert cli.get_element_state(project, 'target.bst') == 'cached' # Tests that: # # * `bst push --remote` pushes to the given remote, not one from the config # * `bst pull --remote` pulls from the given remote # @pytest.mark.datafiles(DATA_DIR) def test_push_pull_specific_remote(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'goodartifactshare')) as good_share,\ create_artifact_share(os.path.join(str(tmpdir), 'badartifactshare')) as bad_share: # Build the target so we have it cached locally only. result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() state = cli.get_element_state(project, 'target.bst') assert state == 'cached' # Configure the default push location to be bad_share; we will assert that # nothing actually gets pushed there. cli.configure({ 'artifacts': {'url': bad_share.repo, 'push': True}, }) # Now try `bst push` to the good_share. result = cli.run(project=project, args=[ 'push', 'target.bst', '--remote', good_share.repo ]) result.assert_success() # Assert that all the artifacts are in the share we pushed # to, and not the other. assert_shared(cli, good_share, project, 'target.bst') assert_not_shared(cli, bad_share, project, 'target.bst') # Now we've pushed, delete the user's local artifact cache # directory and try to redownload it from the good_share. # artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) result = cli.run(project=project, args=['pull', 'target.bst', '--remote', good_share.repo]) result.assert_success() # And assert that it's again in the local cache, without having built assert cli.get_element_state(project, 'target.bst') == 'cached' # Tests that: # # * In non-strict mode, dependency changes don't block artifact reuse # @pytest.mark.datafiles(DATA_DIR) def test_push_pull_non_strict(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: workspace = os.path.join(str(tmpdir), 'workspace') # First build the target element and push to the remote. cli.configure({ 'artifacts': {'url': share.repo, 'push': True}, 'projects': { 'test': {'strict': False} } }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert cli.get_element_state(project, 'target.bst') == 'cached' # Assert that everything is now cached in the remote. all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'] for element_name in all_elements: assert_shared(cli, share, project, element_name) # Now we've pushed, delete the user's local artifact cache # directory and try to redownload it from the share # artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) # Assert that nothing is cached locally anymore for element_name in all_elements: assert cli.get_element_state(project, element_name) != 'cached' # Add a file to force change in strict cache key of import-bin.bst with open(os.path.join(str(project), 'files', 'bin-files', 'usr', 'bin', 'world'), 'w') as f: f.write('world') # Assert that the workspaced element requires a rebuild assert cli.get_element_state(project, 'import-bin.bst') == 'buildable' # Assert that the target is still waiting due to --no-strict assert cli.get_element_state(project, 'target.bst') == 'waiting' # Now try bst pull result = cli.run(project=project, args=['pull', '--deps', 'all', 'target.bst']) result.assert_success() # And assert that the target is again in the local cache, without having built assert cli.get_element_state(project, 'target.bst') == 'cached' # Regression test for https://gitlab.com/BuildStream/buildstream/issues/202 @pytest.mark.datafiles(DATA_DIR) def test_push_pull_track_non_strict(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: # First build the target element and push to the remote. cli.configure({ 'artifacts': {'url': share.repo, 'push': True}, 'projects': { 'test': {'strict': False} } }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert cli.get_element_state(project, 'target.bst') == 'cached' # Assert that everything is now cached in the remote. all_elements = {'target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'} for element_name in all_elements: assert_shared(cli, share, project, element_name) # Now we've pushed, delete the user's local artifact cache # directory and try to redownload it from the share # artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) # Assert that nothing is cached locally anymore for element_name in all_elements: assert cli.get_element_state(project, element_name) != 'cached' # Now try bst build with tracking and pulling. # Tracking will be skipped for target.bst as it doesn't have any sources. # With the non-strict build plan target.bst immediately enters the pull queue. # However, pulling has to be deferred until the dependencies have been # tracked as the strict cache key needs to be calculated before querying # the caches. result = cli.run(project=project, args=['build', '--track-all', '--all', 'target.bst']) result.assert_success() assert set(result.get_pulled_elements()) == all_elements @pytest.mark.datafiles(DATA_DIR) def test_push_pull_cross_junction(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') generate_junction(tmpdir, subproject_path, junction_path, store_ref=True) # First build the target element and push to the remote. cli.configure({ 'artifacts': {'url': share.repo, 'push': True} }) result = cli.run(project=project, args=['build', 'junction.bst:import-etc.bst']) result.assert_success() assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached' cache_dir = os.path.join(project, 'cache', 'artifacts') shutil.rmtree(cache_dir) assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'buildable' # Now try bst pull result = cli.run(project=project, args=['pull', 'junction.bst:import-etc.bst']) result.assert_success() # And assert that it's again in the local cache, without having built assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached' @pytest.mark.datafiles(DATA_DIR) def test_pull_missing_blob(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: # First build the target element and push to the remote. cli.configure({ 'artifacts': {'url': share.repo, 'push': True} }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert cli.get_element_state(project, 'target.bst') == 'cached' # Assert that everything is now cached in the remote. all_elements = ['target.bst', 'import-bin.bst', 'import-dev.bst', 'compose-all.bst'] for element_name in all_elements: assert_shared(cli, share, project, element_name) # Now we've pushed, delete the user's local artifact cache # directory and try to redownload it from the share # artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) # Assert that nothing is cached locally anymore for element_name in all_elements: assert cli.get_element_state(project, element_name) != 'cached' # Now delete blobs in the remote without deleting the artifact ref. # This simulates scenarios with concurrent artifact expiry. remote_objdir = os.path.join(share.repodir, 'cas', 'objects') shutil.rmtree(remote_objdir) # Now try bst build result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() # Assert that no artifacts were pulled assert len(result.get_pulled_elements()) == 0 @pytest.mark.datafiles(DATA_DIR) def test_pull_missing_notifies_user(caplog, cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) caplog.set_level(1) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: cli.configure({ 'artifacts': {'url': share.repo} }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert not result.get_pulled_elements(), \ "No elements should have been pulled since the cache was empty" assert "INFO Remote ({}) does not have".format(share.repo) in result.stderr assert "SKIPPED Pull" in result.stderr @pytest.mark.datafiles(DATA_DIR) def test_pull_access_rights(caplog, cli, tmpdir, datafiles): project = str(datafiles) checkout = os.path.join(str(tmpdir), 'checkout') # Work-around datafiles not preserving mode os.chmod(os.path.join(project, 'files/bin-files/usr/bin/hello'), 0o0755) # We need a big file that does not go into a batch to test a different # code path os.makedirs(os.path.join(project, 'files/dev-files/usr/share'), exist_ok=True) with open(os.path.join(project, 'files/dev-files/usr/share/big-file'), 'w') as f: buf = ' ' * 4096 for _ in range(1024): f.write(buf) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: cli.configure({ 'artifacts': {'url': share.repo, 'push': True} }) result = cli.run(project=project, args=['build', 'compose-all.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', '--hardlinks', '--no-integrate', 'compose-all.bst', checkout]) result.assert_success() st = os.lstat(os.path.join(checkout, 'usr/include/pony.h')) assert stat.S_ISREG(st.st_mode) assert stat.S_IMODE(st.st_mode) == 0o0644 st = os.lstat(os.path.join(checkout, 'usr/bin/hello')) assert stat.S_ISREG(st.st_mode) assert stat.S_IMODE(st.st_mode) == 0o0755 st = os.lstat(os.path.join(checkout, 'usr/share/big-file')) assert stat.S_ISREG(st.st_mode) assert stat.S_IMODE(st.st_mode) == 0o0644 shutil.rmtree(checkout) artifacts = os.path.join(cli.directory, 'artifacts') shutil.rmtree(artifacts) result = cli.run(project=project, args=['pull', 'compose-all.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', '--hardlinks', '--no-integrate', 'compose-all.bst', checkout]) result.assert_success() st = os.lstat(os.path.join(checkout, 'usr/include/pony.h')) assert stat.S_ISREG(st.st_mode) assert stat.S_IMODE(st.st_mode) == 0o0644 st = os.lstat(os.path.join(checkout, 'usr/bin/hello')) assert stat.S_ISREG(st.st_mode) assert stat.S_IMODE(st.st_mode) == 0o0755 st = os.lstat(os.path.join(checkout, 'usr/share/big-file')) assert stat.S_ISREG(st.st_mode) assert stat.S_IMODE(st.st_mode) == 0o0644 buildstream-1.6.9/tests/frontend/push.py000066400000000000000000000425351437515270000203710ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain from tests.testutils import cli, create_artifact_share, create_element_size from tests.testutils import generate_junction from . import configure_project # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) # Assert that a given artifact is in the share # def assert_shared(cli, share, project, element_name): # NOTE: 'test' here is the name of the project # specified in the project.conf we are testing with. # cache_key = cli.get_element_key(project, element_name) if not share.has_artifact('test', element_name, cache_key): raise AssertionError("Artifact share at {} does not contain the expected element {}" .format(share.repo, element_name)) # Assert that a given artifact is NOT in the share # def assert_not_shared(cli, share, project, element_name): # NOTE: 'test' here is the name of the project # specified in the project.conf we are testing with. # cache_key = cli.get_element_key(project, element_name) if share.has_artifact('test', element_name, cache_key): raise AssertionError("Artifact share at {} unexpectedly contains the element {}" .format(share.repo, element_name)) # Tests that: # # * `bst push` fails if there are no remotes configured for pushing # * `bst push` successfully pushes to any remote that is configured for pushing # @pytest.mark.datafiles(DATA_DIR) def test_push(cli, tmpdir, datafiles): project = str(datafiles) # First build the project without the artifact cache configured result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() # Assert that we are now cached locally assert cli.get_element_state(project, 'target.bst') == 'cached' # Set up two artifact shares. with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1: with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2: # Try pushing with no remotes configured. This should fail. result = cli.run(project=project, args=['push', 'target.bst']) result.assert_main_error(ErrorDomain.STREAM, None) # Configure bst to pull but not push from a cache and run `bst push`. # This should also fail. cli.configure({ 'artifacts': {'url': share1.repo, 'push': False}, }) result = cli.run(project=project, args=['push', 'target.bst']) result.assert_main_error(ErrorDomain.STREAM, None) # Configure bst to push to one of the caches and run `bst push`. This works. cli.configure({ 'artifacts': [ {'url': share1.repo, 'push': False}, {'url': share2.repo, 'push': True}, ] }) result = cli.run(project=project, args=['push', 'target.bst']) assert_not_shared(cli, share1, project, 'target.bst') assert_shared(cli, share2, project, 'target.bst') # Now try pushing to both with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2: cli.configure({ 'artifacts': [ {'url': share1.repo, 'push': True}, {'url': share2.repo, 'push': True}, ] }) result = cli.run(project=project, args=['push', 'target.bst']) assert_shared(cli, share1, project, 'target.bst') assert_shared(cli, share2, project, 'target.bst') # Tests that `bst push --deps all` pushes all dependencies of the given element. # @pytest.mark.datafiles(DATA_DIR) def test_push_all(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: # First build it without the artifact cache configured result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() # Assert that we are now cached locally assert cli.get_element_state(project, 'target.bst') == 'cached' # Configure artifact share cli.configure({ # # FIXME: This test hangs "sometimes" if we allow # concurrent push. # # It's not too bad to ignore since we're # using the local artifact cache functionality # only, but it should probably be fixed. # 'scheduler': { 'pushers': 1 }, 'artifacts': { 'url': share.repo, 'push': True, } }) # Now try bst push all the deps result = cli.run(project=project, args=[ 'push', 'target.bst', '--deps', 'all' ]) result.assert_success() # And finally assert that all the artifacts are in the share assert_shared(cli, share, project, 'target.bst') assert_shared(cli, share, project, 'import-bin.bst') assert_shared(cli, share, project, 'import-dev.bst') assert_shared(cli, share, project, 'compose-all.bst') # Tests that `bst build` won't push artifacts to the cache it just pulled from. # # Regression test for https://gitlab.com/BuildStream/buildstream/issues/233. @pytest.mark.datafiles(DATA_DIR) def test_push_after_pull(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Set up two artifact shares. with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare1')) as share1,\ create_artifact_share(os.path.join(str(tmpdir), 'artifactshare2')) as share2: # Set the scene: share1 has the artifact, share2 does not. # cli.configure({ 'artifacts': {'url': share1.repo, 'push': True}, }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() cli.remove_artifact_from_cache(project, 'target.bst') assert_shared(cli, share1, project, 'target.bst') assert_not_shared(cli, share2, project, 'target.bst') assert cli.get_element_state(project, 'target.bst') != 'cached' # Now run the build again. Correct `bst build` behaviour is to download the # artifact from share1 but not push it back again. # result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert result.get_pulled_elements() == ['target.bst'] assert result.get_pushed_elements() == [] # Delete the artifact locally again. cli.remove_artifact_from_cache(project, 'target.bst') # Now we add share2 into the mix as a second push remote. This time, # `bst build` should push to share2 after pulling from share1. cli.configure({ 'artifacts': [ {'url': share1.repo, 'push': True}, {'url': share2.repo, 'push': True}, ] }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert result.get_pulled_elements() == ['target.bst'] assert result.get_pushed_elements() == ['target.bst'] # Ensure that when an artifact's size exceeds available disk space # the least recently pushed artifact is deleted in order to make room for # the incoming artifact. @pytest.mark.datafiles(DATA_DIR) def test_artifact_expires(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' # Create an artifact share (remote artifact cache) in the tmpdir/artifactshare # Mock a file system with 12 MB free disk space with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'), min_head_size=int(2e9), max_head_size=int(2e9), total_space=int(10e9), free_space=(int(12e6) + int(2e9))) as share: # Configure bst to push to the cache cli.configure({ 'artifacts': {'url': share.repo, 'push': True}, }) # Create and build an element of 5 MB create_element_size('element1.bst', project, element_path, [], int(5e6)) result = cli.run(project=project, args=['build', 'element1.bst']) result.assert_success() # Create and build an element of 5 MB create_element_size('element2.bst', project, element_path, [], int(5e6)) result = cli.run(project=project, args=['build', 'element2.bst']) result.assert_success() # check that element's 1 and 2 are cached both locally and remotely assert cli.get_element_state(project, 'element1.bst') == 'cached' assert_shared(cli, share, project, 'element1.bst') assert cli.get_element_state(project, 'element2.bst') == 'cached' assert_shared(cli, share, project, 'element2.bst') # Create and build another element of 5 MB (This will exceed the free disk space available) create_element_size('element3.bst', project, element_path, [], int(5e6)) result = cli.run(project=project, args=['build', 'element3.bst']) result.assert_success() # Ensure it is cached both locally and remotely assert cli.get_element_state(project, 'element3.bst') == 'cached' assert_shared(cli, share, project, 'element3.bst') # Ensure element1 has been removed from the share assert_not_shared(cli, share, project, 'element1.bst') # Ensure that elemen2 remains assert_shared(cli, share, project, 'element2.bst') # Test that a large artifact, whose size exceeds the quota, is not pushed # to the remote share @pytest.mark.datafiles(DATA_DIR) def test_artifact_too_large(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' # Create an artifact share (remote cache) in tmpdir/artifactshare # Mock a file system with 5 MB total space with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'), total_space=int(5e6) + int(2e9)) as share: # Configure bst to push to the remote cache cli.configure({ 'artifacts': {'url': share.repo, 'push': True}, }) # Create and push a 3MB element create_element_size('small_element.bst', project, element_path, [], int(3e6)) result = cli.run(project=project, args=['build', 'small_element.bst']) result.assert_success() # Create and try to push a 6MB element. create_element_size('large_element.bst', project, element_path, [], int(6e6)) result = cli.run(project=project, args=['build', 'large_element.bst']) result.assert_success() # Ensure that the small artifact is still in the share assert cli.get_element_state(project, 'small_element.bst') == 'cached' assert_shared(cli, share, project, 'small_element.bst') # Ensure that the artifact is cached locally but NOT remotely assert cli.get_element_state(project, 'large_element.bst') == 'cached' assert_not_shared(cli, share, project, 'large_element.bst') # Test that when an element is pulled recently, it is not considered the LRU element. @pytest.mark.datafiles(DATA_DIR) def test_recently_pulled_artifact_does_not_expire(cli, datafiles, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = 'elements' # Create an artifact share (remote cache) in tmpdir/artifactshare # Mock a file system with 12 MB free disk space with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare'), min_head_size=int(2e9), max_head_size=int(2e9), total_space=int(10e9), free_space=(int(12e6) + int(2e9))) as share: # Configure bst to push to the cache cli.configure({ 'artifacts': {'url': share.repo, 'push': True}, }) # Create and build 2 elements, each of 5 MB. create_element_size('element1.bst', project, element_path, [], int(5e6)) result = cli.run(project=project, args=['build', 'element1.bst']) result.assert_success() create_element_size('element2.bst', project, element_path, [], int(5e6)) result = cli.run(project=project, args=['build', 'element2.bst']) result.assert_success() # Ensure they are cached locally assert cli.get_element_state(project, 'element1.bst') == 'cached' assert cli.get_element_state(project, 'element2.bst') == 'cached' # Ensure that they have been pushed to the cache assert_shared(cli, share, project, 'element1.bst') assert_shared(cli, share, project, 'element2.bst') # Remove element1 from the local cache cli.remove_artifact_from_cache(project, 'element1.bst') assert cli.get_element_state(project, 'element1.bst') != 'cached' # Pull the element1 from the remote cache (this should update its mtime) result = cli.run(project=project, args=['pull', 'element1.bst', '--remote', share.repo]) result.assert_success() # Ensure element1 is cached locally assert cli.get_element_state(project, 'element1.bst') == 'cached' # Create and build the element3 (of 5 MB) create_element_size('element3.bst', project, element_path, [], int(5e6)) result = cli.run(project=project, args=['build', 'element3.bst']) result.assert_success() # Make sure it's cached locally and remotely assert cli.get_element_state(project, 'element3.bst') == 'cached' assert_shared(cli, share, project, 'element3.bst') # Ensure that element2 was deleted from the share and element1 remains assert_not_shared(cli, share, project, 'element2.bst') assert_shared(cli, share, project, 'element1.bst') @pytest.mark.datafiles(DATA_DIR) def test_push_cross_junction(cli, tmpdir, datafiles): project = str(datafiles) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') generate_junction(tmpdir, subproject_path, junction_path, store_ref=True) result = cli.run(project=project, args=['build', 'junction.bst:import-etc.bst']) result.assert_success() assert cli.get_element_state(project, 'junction.bst:import-etc.bst') == 'cached' with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: cli.configure({ 'artifacts': {'url': share.repo, 'push': True}, }) result = cli.run(project=project, args=['push', 'junction.bst:import-etc.bst']) cache_key = cli.get_element_key(project, 'junction.bst:import-etc.bst') assert share.has_artifact('subtest', 'import-etc.bst', cache_key) @pytest.mark.datafiles(DATA_DIR) def test_push_already_cached(caplog, cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) caplog.set_level(1) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: cli.configure({ 'artifacts': {'url': share.repo, 'push': True} }) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() assert "SKIPPED Push" not in result.stderr result = cli.run(project=project, args=['push', 'target.bst']) result.assert_success() assert not result.get_pushed_elements(), "No elements should have been pushed since the cache was populated" assert "INFO Remote ({}) already has ".format(share.repo) in result.stderr assert "SKIPPED Push" in result.stderr # This test ensures that we are able to run `bst push` in non strict mode # and that we do not crash when trying to push elements even though they # have not yet been pulled. # # This is a regression test for issue #990 # @pytest.mark.datafiles(DATA_DIR) def test_push_no_strict(caplog, cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) caplog.set_level(1) with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share: cli.configure({ 'artifacts': { 'url': share.repo, 'push': True }, 'projects': { 'test': { 'strict': False } } }) # First get us a build result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() # Now cause one of the dependenies to change their cache key # # Here we just add a file, causing the strong cache key of the # import-bin.bst element to change due to the local files it # imports changing. path = os.path.join(project, 'files', 'bin-files', 'newfile') with open(path, 'w') as f: f.write("PONY !") # Now build again after having changed the dependencies result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() # Now run `bst push`. result = cli.run(project=project, args=['push', '--deps', 'all', 'target.bst']) result.assert_success() buildstream-1.6.9/tests/frontend/show.py000066400000000000000000000401121437515270000203570ustar00rootroot00000000000000import os import sys import shutil import itertools import pytest from tests.testutils import cli, generate_junction from buildstream.types import CoreWarnings from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from . import configure_project # Project directory TOP_DIR = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = os.path.join(TOP_DIR, "project") @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,format,expected", [ ('import-bin.bst', '%{name}', 'import-bin.bst'), ('import-bin.bst', '%{state}', 'buildable'), ('compose-all.bst', '%{state}', 'waiting') ]) def test_show(cli, datafiles, target, format, expected): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', format, target]) result.assert_success() if result.output.strip() != expected: raise AssertionError("Expected output:\n{}\nInstead received output:\n{}" .format(expected, result.output)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("target,except_,expected", [ ('target.bst', 'import-bin.bst', ['import-dev.bst', 'compose-all.bst', 'target.bst']), ('target.bst', 'import-dev.bst', ['import-bin.bst', 'compose-all.bst', 'target.bst']), ('target.bst', 'compose-all.bst', ['import-bin.bst', 'target.bst']), ('compose-all.bst', 'import-bin.bst', ['import-dev.bst', 'compose-all.bst']) ]) def test_show_except(cli, datafiles, target, except_, expected): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, silent=True, args=[ 'show', '--deps', 'all', '--format', '%{name}', '--except', except_, target]) result.assert_success() results = result.output.strip().splitlines() if results != expected: raise AssertionError("Expected elements:\n{}\nInstead received elements:\n{}" .format(expected, results)) ############################################################### # Testing multiple targets # ############################################################### @pytest.mark.datafiles(DATA_DIR) def test_parallel_order(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) elements = ['multiple_targets/order/0.bst', 'multiple_targets/order/1.bst'] args = ['show', '-d', 'plan', '-f', '%{name}'] + elements result = cli.run(project=project, args=args) result.assert_success() # Get the planned order names = result.output.splitlines() names = [name[len('multiple_targets/order/'):] for name in names] # Create all possible 'correct' topological orderings orderings = itertools.product( [('5.bst', '6.bst')], itertools.permutations(['4.bst', '7.bst']), itertools.permutations(['3.bst', '8.bst']), itertools.permutations(['2.bst', '9.bst']), itertools.permutations(['0.bst', '1.bst', 'run.bst']) ) orderings = [list(itertools.chain.from_iterable(perm)) for perm in orderings] # Ensure that our order is among the correct orderings assert names in orderings, "We got: {}".format(", ".join(names)) @pytest.mark.datafiles(DATA_DIR) def test_target_is_dependency(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) elements = ['multiple_targets/dependency/zebry.bst', 'multiple_targets/dependency/horsey.bst'] args = ['show', '-d', 'plan', '-f', '%{name}'] + elements result = cli.run(project=project, args=args) result.assert_success() # Get the planned order names = result.output.splitlines() names = [name[len('multiple_targets/dependency/'):] for name in names] assert names == ['pony.bst', 'horsey.bst', 'zebry.bst'] @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("element_name", ['junction-dep.bst', 'junction.bst:import-etc.bst']) @pytest.mark.parametrize("workspaced", [True, False], ids=["workspace", "no-workspace"]) def test_unfetched_junction(cli, tmpdir, datafiles, ref_storage, element_name, workspaced): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it ref = generate_junction(tmpdir, subproject_path, junction_path, store_ref=(ref_storage == 'inline')) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Dump a project.refs if we're using project.refs storage # if ref_storage == 'project.refs': project_refs = { 'projects': { 'test': { 'junction.bst': [ { 'ref': ref } ] } } } _yaml.dump(project_refs, os.path.join(project, 'junction.refs')) # Open a workspace if we're testing workspaced behavior if workspaced: result = cli.run(project=project, silent=True, args=[ 'workspace', 'open', '--no-checkout', 'junction.bst', subproject_path ]) result.assert_success() # Assert the correct error when trying to show the pipeline result = cli.run(project=project, silent=True, args=[ 'show', element_name]) # If a workspace is open, no fetch is needed if workspaced: result.assert_success() else: result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_FETCH_NEEDED) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("element_name", ['junction-dep.bst', 'junction.bst:import-etc.bst']) @pytest.mark.parametrize("workspaced", [True, False], ids=["workspace", "no-workspace"]) def test_inconsistent_junction(cli, tmpdir, datafiles, ref_storage, element_name, workspaced): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it generate_junction(tmpdir, subproject_path, junction_path, store_ref=False) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Open a workspace if we're testing workspaced behavior if workspaced: result = cli.run(project=project, silent=True, args=[ 'workspace', 'open', '--no-checkout', 'junction.bst', subproject_path ]) result.assert_success() # Assert the correct error when trying to show the pipeline result = cli.run(project=project, silent=True, args=[ 'show', element_name]) # If a workspace is open, no ref is needed if workspaced: result.assert_success() else: result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("element_name", ['junction-dep.bst', 'junction.bst:import-etc.bst']) @pytest.mark.parametrize("workspaced", [True, False], ids=["workspace", "no-workspace"]) def test_fetched_junction(cli, tmpdir, datafiles, element_name, workspaced): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') # Create a repo to hold the subproject and generate a junction element for it generate_junction(tmpdir, subproject_path, junction_path, store_ref=True) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) result = cli.run(project=project, silent=True, args=[ 'fetch', 'junction.bst']) result.assert_success() # Open a workspace if we're testing workspaced behavior if workspaced: result = cli.run(project=project, silent=True, args=[ 'workspace', 'open', '--no-checkout', 'junction.bst', subproject_path ]) result.assert_success() # Assert the correct error when trying to show the pipeline result = cli.run(project=project, silent=True, args=[ 'show', '--format', '%{name}-%{state}', element_name]) results = result.output.strip().splitlines() assert 'junction.bst:import-etc.bst-buildable' in results ############################################################### # Testing recursion depth # ############################################################### @pytest.mark.xfail(reason="recursion errors not currently detectable") @pytest.mark.parametrize("dependency_depth", [100, 500, 1200]) def test_exceed_max_recursion_depth(cli, tmpdir, dependency_depth): project_name = "recursion-test" path = str(tmpdir) project_path = os.path.join(path, project_name) def setup_test(): """ Creates a bst project with dependencydepth + 1 elements, each of which depends of the previous element to be created. Each element created is of type import and has an empty source file. """ os.mkdir(project_path) result = cli.run(project=project_path, silent=True, args=['init', '--project-name', project_name]) result.assert_success() sourcefiles_path = os.path.join(project_path, "files") os.mkdir(sourcefiles_path) element_path = os.path.join(project_path, "elements") for i in range(0, dependency_depth + 1): element = { 'kind': 'import', 'sources': [{'kind': 'local', 'path': 'files/source{}'.format(str(i))}], 'depends': ['element{}.bst'.format(str(i - 1))] } if i == 0: del element['depends'] _yaml.dump(element, os.path.join(element_path, "element{}.bst".format(str(i)))) source = os.path.join(sourcefiles_path, "source{}".format(str(i))) open(source, 'x').close() assert os.path.exists(source) setup_test() result = cli.run(project=project_path, silent=True, args=['show', "element{}.bst".format(str(dependency_depth))]) recursion_limit = sys.getrecursionlimit() if dependency_depth <= recursion_limit: result.assert_success() else: # XXX Assert exception is thown and handled # # We need to assert that the client has not thrown a stack trace for # a recursion error, this should be done by creating a BstError instead # of just handling it in app.py and doing sys.exit(), because we no longer # have any way of detecting whether the client has thrown an exception # otherwise # assert result.exit_code == -1 shutil.rmtree(project_path) # This tests the resolved value of the 'max-jobs' variable, # ensuring at least that the variables are resolved according # to how the user has configured max-jobs # @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("cli_value, config_value", [ (None, None), (None, '16'), ('16', None), ('5', '16'), ('0', '16'), ('16', '0'), ]) def test_max_jobs(cli, datafiles, cli_value, config_value): project = str(datafiles) target = 'target.bst' # Specify `--max-jobs` if this test sets it args = [] if cli_value is not None: args += ['--max-jobs', cli_value] args += ['show', '--deps', 'none', '--format', '%{vars}', target] # Specify `max-jobs` in user configuration if this test sets it if config_value is not None: cli.configure({ 'build': { 'max-jobs': config_value } }) result = cli.run(project=project, silent=True, args=args) result.assert_success() loaded = _yaml.load_data(result.output) loaded_value = _yaml.node_get(loaded, int, 'max-jobs') # We expect the value provided on the command line to take # precedence over the configuration file value, if specified. # # If neither are specified then we expect the default expected_value = cli_value or config_value or '0' if expected_value == '0': # If we are expecting the automatic behavior of using the maximum # number of cores available, just check that it is a value > 0 assert loaded_value > 0, "Automatic setting of max-jobs didnt work" else: # Check that we got the explicitly set value assert loaded_value == int(expected_value) # This tests that cache keys behave as expected when # dependencies have been specified as `strict` and # when building in strict mode. # # This test will: # # * Build the target once (and assert that it is cached) # * Modify some local files which are imported # by an import element which the target depends on # * Assert that the cached state of the target element # is as expected # # We run the test twice, once with an element which strict # depends on the changing import element, and one which # depends on it regularly. # @pytest.mark.datafiles(os.path.join(TOP_DIR, 'strict-depends')) @pytest.mark.parametrize("target, expected_state", [ ("non-strict-depends.bst", "cached"), ("strict-depends.bst", "waiting"), ]) def test_strict_dependencies(cli, datafiles, target, expected_state): project = str(datafiles) # Configure non strict mode, this will have # an effect on the build and the `bst show` # commands run via cli.get_element_states() cli.configure({ 'projects': { 'test': { 'strict': False } } }) result = cli.run(project=project, silent=True, args=['build', target]) result.assert_success() states = cli.get_element_states(project, target) assert states['base.bst'] == 'cached' assert states[target] == 'cached' # Now modify the file, effectively causing the common base.bst # dependency to change it's cache key hello_path = os.path.join(project, 'files', 'hello.txt') with open(hello_path, 'w') as f: f.write("Goodbye") # Now assert that we have the states we expect as a result states = cli.get_element_states(project, target) assert states['base.bst'] == 'buildable' assert states[target] == expected_state @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("fatal", [True, False], ids=["fatal", "non-fatal"]) def test_unaliased_url(cli, tmpdir, datafiles, fatal): project = str(datafiles) if fatal: configure_project(project, {"fatal-warnings": [CoreWarnings.UNALIASED_URL]}) result = cli.run(project=project, silent=True, args=["show", "unaliased-tar.bst"]) if fatal: result.assert_main_error(ErrorDomain.PLUGIN, CoreWarnings.UNALIASED_URL) else: result.assert_success() assert "WARNING [unaliased-url]" in result.stderr buildstream-1.6.9/tests/frontend/strict-depends/000077500000000000000000000000001437515270000217575ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/strict-depends/elements/000077500000000000000000000000001437515270000235735ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/strict-depends/elements/base.bst000066400000000000000000000000631437515270000252160ustar00rootroot00000000000000kind: import sources: - kind: local path: files buildstream-1.6.9/tests/frontend/strict-depends/elements/non-strict-depends.bst000066400000000000000000000000471437515270000300260ustar00rootroot00000000000000kind: stack build-depends: - base.bst buildstream-1.6.9/tests/frontend/strict-depends/elements/strict-depends.bst000066400000000000000000000001001437515270000272240ustar00rootroot00000000000000kind: stack build-depends: - filename: base.bst strict: true buildstream-1.6.9/tests/frontend/strict-depends/files/000077500000000000000000000000001437515270000230615ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/strict-depends/files/hello.txt000066400000000000000000000000051437515270000247200ustar00rootroot00000000000000pony buildstream-1.6.9/tests/frontend/strict-depends/project.conf000066400000000000000000000000421437515270000242700ustar00rootroot00000000000000name: test element-path: elements buildstream-1.6.9/tests/frontend/track-cross-junction/000077500000000000000000000000001437515270000231115ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-cross-junction/files/000077500000000000000000000000001437515270000242135ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-cross-junction/files/usr/000077500000000000000000000000001437515270000250245ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-cross-junction/files/usr/include/000077500000000000000000000000001437515270000264475ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-cross-junction/files/usr/include/pony.h000066400000000000000000000003711437515270000276060ustar00rootroot00000000000000#ifndef __PONY_H__ #define __PONY_H__ #define PONY_BEGIN "Once upon a time, there was a pony." #define PONY_END "And they lived happily ever after, the end." #define MAKE_PONY(story) \ PONY_BEGIN \ story \ PONY_END #endif /* __PONY_H__ */ buildstream-1.6.9/tests/frontend/track-cross-junction/subproject-junction.bst000066400000000000000000000000711437515270000276300ustar00rootroot00000000000000kind: junction sources: - kind: local path: subproject buildstream-1.6.9/tests/frontend/track-cross-junction/subproject.bst000066400000000000000000000001241437515270000260000ustar00rootroot00000000000000kind: stack depends: - filename: subtarget.bst junction: subproject-junction.bst buildstream-1.6.9/tests/frontend/track-cross-junction/subproject/000077500000000000000000000000001437515270000252715ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-cross-junction/subproject/project.conf000066400000000000000000000000131437515270000276000ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/frontend/track-optional-inline/000077500000000000000000000000001437515270000232325ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-inline/files/000077500000000000000000000000001437515270000243345ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-inline/files/usr/000077500000000000000000000000001437515270000251455ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-inline/files/usr/include/000077500000000000000000000000001437515270000265705ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-inline/files/usr/include/pony.h000066400000000000000000000003711437515270000277270ustar00rootroot00000000000000#ifndef __PONY_H__ #define __PONY_H__ #define PONY_BEGIN "Once upon a time, there was a pony." #define PONY_END "And they lived happily ever after, the end." #define MAKE_PONY(story) \ PONY_BEGIN \ story \ PONY_END #endif /* __PONY_H__ */ buildstream-1.6.9/tests/frontend/track-optional-inline/project.conf000066400000000000000000000001351437515270000255460ustar00rootroot00000000000000name: test options: test: type: bool description: Test boolean default: False buildstream-1.6.9/tests/frontend/track-optional-inline/target.bst000066400000000000000000000003361437515270000252340ustar00rootroot00000000000000# Optionally track the test branch # kind: import (?): - test: sources: - kind: git url: file://{repo} track: test - not test: sources: - kind: git url: file://{repo} track: master buildstream-1.6.9/tests/frontend/track-optional-project-refs/000077500000000000000000000000001437515270000243575ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-project-refs/files/000077500000000000000000000000001437515270000254615ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-project-refs/files/usr/000077500000000000000000000000001437515270000262725ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-project-refs/files/usr/include/000077500000000000000000000000001437515270000277155ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/track-optional-project-refs/files/usr/include/pony.h000066400000000000000000000003711437515270000310540ustar00rootroot00000000000000#ifndef __PONY_H__ #define __PONY_H__ #define PONY_BEGIN "Once upon a time, there was a pony." #define PONY_END "And they lived happily ever after, the end." #define MAKE_PONY(story) \ PONY_BEGIN \ story \ PONY_END #endif /* __PONY_H__ */ buildstream-1.6.9/tests/frontend/track-optional-project-refs/project.conf000066400000000000000000000001701437515270000266720ustar00rootroot00000000000000name: test ref-storage: project.refs options: test: type: bool description: Test boolean default: False buildstream-1.6.9/tests/frontend/track-optional-project-refs/project.refs000066400000000000000000000002031437515270000267010ustar00rootroot00000000000000projects: test: (?): - test: target.bst: - ref: '' - not test: target.bst: - ref: '' buildstream-1.6.9/tests/frontend/track-optional-project-refs/target.bst000066400000000000000000000002421437515270000263550ustar00rootroot00000000000000# Optionally track the test branch # kind: import sources: - kind: git url: file://{repo} (?): - test: track: test - not test: track: master buildstream-1.6.9/tests/frontend/track.py000066400000000000000000000636421437515270000205200ustar00rootroot00000000000000import stat import os import pytest from tests.testutils import cli, create_repo, ALL_REPO_KINDS, generate_junction from buildstream._exceptions import ErrorDomain, LoadErrorReason from buildstream import _yaml from . import configure_project # Project directory TOP_DIR = os.path.dirname(os.path.realpath(__file__)) DATA_DIR = os.path.join(TOP_DIR, 'project') def generate_element(repo, element_path, dep_name=None): element = { 'kind': 'import', 'sources': [ repo.source_config() ] } if dep_name: element['depends'] = [dep_name] _yaml.dump(element, element_path) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track(cli, tmpdir, datafiles, ref_storage, kind): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') element_name = 'track-test-{}.bst'.format(kind) configure_project(project, { 'ref-storage': ref_storage }) # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir)) ref = repo.create(dev_files_path) # Generate the element generate_element(repo, os.path.join(element_path, element_name)) # Assert that a fetch is needed assert cli.get_element_state(project, element_name) == 'no reference' # Now first try to track it result = cli.run(project=project, args=['track', element_name]) result.assert_success() # And now fetch it: The Source has probably already cached the # latest ref locally, but it is not required to have cached # the associated content of the latest ref at track time, that # is the job of fetch. result = cli.run(project=project, args=['fetch', element_name]) result.assert_success() # Assert that we are now buildable because the source is # now cached. assert cli.get_element_state(project, element_name) == 'buildable' # Assert there was a project.refs created, depending on the configuration if ref_storage == 'project.refs': assert os.path.exists(os.path.join(project, 'project.refs')) else: assert not os.path.exists(os.path.join(project, 'project.refs')) # NOTE: # # This test checks that recursive tracking works by observing # element states after running a recursive tracking operation. # # However, this test is ALSO valuable as it stresses the source # plugins in a situation where many source plugins are operating # at once on the same backing repository. # # Do not change this test to use a separate 'Repo' per element # as that would defeat the purpose of the stress test, otherwise # please refactor that aspect into another test. # @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("amount", [(1), (10)]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track_recurse(cli, tmpdir, datafiles, kind, amount): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') # Try to actually launch as many fetch jobs as possible at the same time # # This stresses the Source plugins and helps to ensure that # they handle concurrent access to the store correctly. cli.configure({ 'scheduler': { 'fetchers': amount, } }) # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir)) ref = repo.create(dev_files_path) # Write out our test targets element_names = [] last_element_name = None for i in range(amount + 1): element_name = 'track-test-{}-{}.bst'.format(kind, i + 1) filename = os.path.join(element_path, element_name) element_names.append(element_name) generate_element(repo, filename, dep_name=last_element_name) last_element_name = element_name # Assert that a fetch is needed states = cli.get_element_states(project, last_element_name) for element_name in element_names: assert states[element_name] == 'no reference' # Now first try to track it result = cli.run(project=project, args=[ 'track', '--deps', 'all', last_element_name]) result.assert_success() # And now fetch it: The Source has probably already cached the # latest ref locally, but it is not required to have cached # the associated content of the latest ref at track time, that # is the job of fetch. result = cli.run(project=project, args=[ 'fetch', '--deps', 'all', last_element_name]) result.assert_success() # Assert that the base is buildable and the rest are waiting states = cli.get_element_states(project, last_element_name) for element_name in element_names: if element_name == element_names[0]: assert states[element_name] == 'buildable' else: assert states[element_name] == 'waiting' @pytest.mark.datafiles(DATA_DIR) def test_track_single(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') element_dep_name = 'track-test-dep.bst' element_target_name = 'track-test-target.bst' # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo('git', str(tmpdir)) ref = repo.create(dev_files_path) # Write out our test targets generate_element(repo, os.path.join(element_path, element_dep_name)) generate_element(repo, os.path.join(element_path, element_target_name), dep_name=element_dep_name) # Assert that tracking is needed for both elements assert cli.get_element_state(project, element_dep_name) == 'no reference' assert cli.get_element_state(project, element_target_name) == 'no reference' # Now first try to track only one element result = cli.run(project=project, args=[ 'track', '--deps', 'none', element_target_name]) result.assert_success() # And now fetch it result = cli.run(project=project, args=[ 'fetch', '--deps', 'none', element_target_name]) result.assert_success() # Assert that the dependency is waiting and the target has still never been tracked assert cli.get_element_state(project, element_dep_name) == 'no reference' assert cli.get_element_state(project, element_target_name) == 'waiting' @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track_recurse_except(cli, tmpdir, datafiles, kind): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') element_dep_name = 'track-test-dep-{}.bst'.format(kind) element_target_name = 'track-test-target-{}.bst'.format(kind) # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir)) ref = repo.create(dev_files_path) # Write out our test targets generate_element(repo, os.path.join(element_path, element_dep_name)) generate_element(repo, os.path.join(element_path, element_target_name), dep_name=element_dep_name) # Assert that a fetch is needed assert cli.get_element_state(project, element_dep_name) == 'no reference' assert cli.get_element_state(project, element_target_name) == 'no reference' # Now first try to track it result = cli.run(project=project, args=[ 'track', '--deps', 'all', '--except', element_dep_name, element_target_name]) result.assert_success() # And now fetch it: The Source has probably already cached the # latest ref locally, but it is not required to have cached # the associated content of the latest ref at track time, that # is the job of fetch. result = cli.run(project=project, args=[ 'fetch', '--deps', 'none', element_target_name]) result.assert_success() # Assert that the dependency is buildable and the target is waiting assert cli.get_element_state(project, element_dep_name) == 'no reference' assert cli.get_element_state(project, element_target_name) == 'waiting' @pytest.mark.datafiles(os.path.join(TOP_DIR)) @pytest.mark.parametrize("ref_storage", [('inline'), ('project-refs')]) def test_track_optional(cli, tmpdir, datafiles, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename, 'track-optional-' + ref_storage) dev_files_path = os.path.join(project, 'files') element_path = os.path.join(project, 'target.bst') # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo('git', str(tmpdir)) ref = repo.create(dev_files_path) # Now create an optional test branch and add a commit to that, # so two branches with different heads now exist. # repo.branch('test') repo.add_commit() # Substitute the {repo} for the git repo we created with open(element_path) as f: target_bst = f.read() target_bst = target_bst.format(repo=repo.repo) with open(element_path, 'w') as f: f.write(target_bst) # First track for both options # # We want to track and persist the ref separately in this test # result = cli.run(project=project, args=['--option', 'test', 'False', 'track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['--option', 'test', 'True', 'track', 'target.bst']) result.assert_success() # Now fetch the key for both options # result = cli.run(project=project, args=[ '--option', 'test', 'False', 'show', '--deps', 'none', '--format', '%{key}', 'target.bst' ]) result.assert_success() master_key = result.output result = cli.run(project=project, args=[ '--option', 'test', 'True', 'show', '--deps', 'none', '--format', '%{key}', 'target.bst' ]) result.assert_success() test_key = result.output # Assert that the keys are different when having # tracked separate branches assert test_key != master_key @pytest.mark.datafiles(os.path.join(TOP_DIR, 'track-cross-junction')) @pytest.mark.parametrize("cross_junction", [('cross'), ('nocross')]) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_track_cross_junction(cli, tmpdir, datafiles, cross_junction, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files') target_path = os.path.join(project, 'target.bst') subtarget_path = os.path.join(project, 'subproject', 'subtarget.bst') # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo('git', str(tmpdir)) ref = repo.create(dev_files_path) # Generate two elements using the git source, one in # the main project and one in the subproject. generate_element(repo, target_path, dep_name='subproject.bst') generate_element(repo, subtarget_path) # Generate project.conf # project_conf = { 'name': 'test', 'ref-storage': ref_storage } _yaml.dump(project_conf, os.path.join(project, 'project.conf')) # # FIXME: This can be simplified when we have support # for addressing of junctioned elements. # def get_subproject_element_state(): result = cli.run(project=project, args=[ 'show', '--deps', 'all', '--format', '%{name}|%{state}', 'target.bst' ]) result.assert_success() # Create two dimentional list of the result, # first line should be the junctioned element lines = [ line.split('|') for line in result.output.splitlines() ] assert lines[0][0] == 'subproject-junction.bst:subtarget.bst' return lines[0][1] # # Assert that we have no reference yet for the cross junction element # assert get_subproject_element_state() == 'no reference' # Track recursively across the junction args = ['track', '--deps', 'all'] if cross_junction == 'cross': args += ['--cross-junctions'] args += ['target.bst'] result = cli.run(project=project, args=args) if ref_storage == 'inline': if cross_junction == 'cross': # # Cross junction tracking is not allowed when the toplevel project # is using inline ref storage. # result.assert_main_error(ErrorDomain.PIPELINE, 'untrackable-sources') else: # # No cross juction tracking was requested # result.assert_success() assert get_subproject_element_state() == 'no reference' else: # # Tracking is allowed with project.refs ref storage # result.assert_success() # # If cross junction tracking was enabled, we should now be buildable # if cross_junction == 'cross': assert get_subproject_element_state() == 'buildable' else: assert get_subproject_element_state() == 'no reference' @pytest.mark.datafiles(os.path.join(TOP_DIR, 'consistencyerror')) def test_track_consistency_error(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Track the element causing a consistency error result = cli.run(project=project, args=['track', 'error.bst']) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, 'the-consistency-error') @pytest.mark.datafiles(os.path.join(TOP_DIR, 'consistencyerror')) def test_track_consistency_bug(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Track the element causing an unhandled exception result = cli.run(project=project, args=['track', 'bug.bst']) # We expect BuildStream to fail gracefully, with no recorded exception. result.assert_main_error(ErrorDomain.STREAM, None) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_inconsistent_junction(cli, tmpdir, datafiles, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it generate_junction(tmpdir, subproject_path, junction_path, store_ref=False) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # Now try to track it, this will bail with the appropriate error # informing the user to track the junction first result = cli.run(project=project, args=['track', 'junction-dep.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_junction_element(cli, tmpdir, datafiles, ref_storage): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') element_path = os.path.join(project, 'elements', 'junction-dep.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create a repo to hold the subproject and generate a junction element for it generate_junction(tmpdir, subproject_path, junction_path, store_ref=False) # Create a stack element to depend on a cross junction element # element = { 'kind': 'stack', 'depends': [ { 'junction': 'junction.bst', 'filename': 'import-etc.bst' } ] } _yaml.dump(element, element_path) # First demonstrate that showing the pipeline yields an error result = cli.run(project=project, args=['show', 'junction-dep.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_INCONSISTENT) # Now track the junction itself result = cli.run(project=project, args=['track', 'junction.bst']) result.assert_success() # Now assert element state (via bst show under the hood) of the dep again assert cli.get_element_state(project, 'junction-dep.bst') == 'waiting' @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_cross_junction(cli, tmpdir, datafiles, ref_storage, kind): project = os.path.join(datafiles.dirname, datafiles.basename) subproject_path = os.path.join(project, 'files', 'sub-project') junction_path = os.path.join(project, 'elements', 'junction.bst') etc_files = os.path.join(subproject_path, 'files', 'etc-files') repo_element_path = os.path.join(subproject_path, 'elements', 'import-etc-repo.bst') configure_project(project, { 'ref-storage': ref_storage }) repo = create_repo(kind, str(tmpdir.join('element_repo'))) ref = repo.create(etc_files) generate_element(repo, repo_element_path) generate_junction(str(tmpdir.join('junction_repo')), subproject_path, junction_path, store_ref=False) # Track the junction itself first. result = cli.run(project=project, args=['track', 'junction.bst']) result.assert_success() assert cli.get_element_state(project, 'junction.bst:import-etc-repo.bst') == 'no reference' # Track the cross junction element. -J is not given, it is implied. result = cli.run(project=project, args=['track', 'junction.bst:import-etc-repo.bst']) if ref_storage == 'inline': # This is not allowed to track cross junction without project.refs. result.assert_main_error(ErrorDomain.PIPELINE, 'untrackable-sources') else: result.assert_success() assert cli.get_element_state(project, 'junction.bst:import-etc-repo.bst') == 'buildable' assert os.path.exists(os.path.join(project, 'project.refs')) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track_include(cli, tmpdir, datafiles, ref_storage, kind): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') element_name = 'track-test-{}.bst'.format(kind) configure_project(project, { 'ref-storage': ref_storage }) # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir)) ref = repo.create(dev_files_path) # Generate the element element = { 'kind': 'import', '(@)': ['elements/sources.yml'] } sources = { 'sources': [ repo.source_config() ] } _yaml.dump(element, os.path.join(element_path, element_name)) _yaml.dump(sources, os.path.join(element_path, 'sources.yml')) # Assert that a fetch is needed assert cli.get_element_state(project, element_name) == 'no reference' # Now first try to track it result = cli.run(project=project, args=['track', element_name]) result.assert_success() # And now fetch it: The Source has probably already cached the # latest ref locally, but it is not required to have cached # the associated content of the latest ref at track time, that # is the job of fetch. result = cli.run(project=project, args=['fetch', element_name]) result.assert_success() # Assert that we are now buildable because the source is # now cached. assert cli.get_element_state(project, element_name) == 'buildable' # Assert there was a project.refs created, depending on the configuration if ref_storage == 'project.refs': assert os.path.exists(os.path.join(project, 'project.refs')) else: assert not os.path.exists(os.path.join(project, 'project.refs')) new_sources = _yaml.load(os.path.join(element_path, 'sources.yml')) assert 'sources' in new_sources assert len(new_sources['sources']) == 1 assert 'ref' in new_sources['sources'][0] assert ref == new_sources['sources'][0]['ref'] @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track_include_junction(cli, tmpdir, datafiles, ref_storage, kind): project = os.path.join(datafiles.dirname, datafiles.basename) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') element_name = 'track-test-{}.bst'.format(kind) subproject_path = os.path.join(project, 'files', 'sub-project') sub_element_path = os.path.join(subproject_path, 'elements') junction_path = os.path.join(element_path, 'junction.bst') configure_project(project, { 'ref-storage': ref_storage }) # Create our repo object of the given source type with # the dev files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir.join('element_repo'))) ref = repo.create(dev_files_path) # Generate the element element = { 'kind': 'import', '(@)': ['junction.bst:elements/sources.yml'] } sources = { 'sources': [ repo.source_config() ] } _yaml.dump(element, os.path.join(element_path, element_name)) _yaml.dump(sources, os.path.join(sub_element_path, 'sources.yml')) generate_junction(str(tmpdir.join('junction_repo')), subproject_path, junction_path, store_ref=True) result = cli.run(project=project, args=['track', 'junction.bst']) result.assert_success() # Assert that a fetch is needed assert cli.get_element_state(project, element_name) == 'no reference' # Now first try to track it result = cli.run(project=project, args=['track', element_name]) # Assert there was a project.refs created, depending on the configuration if ref_storage == 'inline': # FIXME: We should expect an error. But only a warning is emitted # result.assert_main_error(ErrorDomain.SOURCE, 'tracking-junction-fragment') assert 'junction.bst:elements/sources.yml: Cannot track source in a fragment from a junction' in result.stderr else: assert os.path.exists(os.path.join(project, 'project.refs')) # And now fetch it: The Source has probably already cached the # latest ref locally, but it is not required to have cached # the associated content of the latest ref at track time, that # is the job of fetch. result = cli.run(project=project, args=['fetch', element_name]) result.assert_success() # Assert that we are now buildable because the source is # now cached. assert cli.get_element_state(project, element_name) == 'buildable' @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track_junction_included(cli, tmpdir, datafiles, ref_storage, kind): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = os.path.join(project, 'elements') subproject_path = os.path.join(project, 'files', 'sub-project') sub_element_path = os.path.join(subproject_path, 'elements') junction_path = os.path.join(element_path, 'junction.bst') configure_project(project, { 'ref-storage': ref_storage, '(@)': ['junction.bst:test.yml'] }) generate_junction(str(tmpdir.join('junction_repo')), subproject_path, junction_path, store_ref=False) result = cli.run(project=project, args=['track', 'junction.bst']) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track_error_cannot_write_file(cli, tmpdir, datafiles, kind): if os.geteuid() == 0: pytest.skip("This is not testable with root permissions") project = str(datafiles) dev_files_path = os.path.join(project, 'files', 'dev-files') element_path = os.path.join(project, 'elements') element_name = 'track-test-{}.bst'.format(kind) configure_project(project, { 'ref-storage': 'inline' }) repo = create_repo(kind, str(tmpdir)) ref = repo.create(dev_files_path) element_full_path = os.path.join(element_path, element_name) generate_element(repo, element_full_path) st = os.stat(element_path) try: read_mask = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH os.chmod(element_path, stat.S_IMODE(st.st_mode) & ~read_mask) result = cli.run(project=project, args=['track', element_name]) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, 'save-ref-error') finally: os.chmod(element_path, stat.S_IMODE(st.st_mode)) buildstream-1.6.9/tests/frontend/track_cross_junction.py000066400000000000000000000136401437515270000236330ustar00rootroot00000000000000import os import pytest from tests.testutils import cli, create_repo, ALL_REPO_KINDS, generate_junction from buildstream import _yaml def generate_element(repo, element_path, dep_name=None): element = { 'kind': 'import', 'sources': [ repo.source_config() ] } if dep_name: element['depends'] = [dep_name] _yaml.dump(element, element_path) def generate_import_element(tmpdir, kind, project, name): element_name = 'import-{}.bst'.format(name) repo_element_path = os.path.join(project, 'elements', element_name) files = str(tmpdir.join("imported_files_{}".format(name))) os.makedirs(files) with open(os.path.join(files, '{}.txt'.format(name)), 'w') as f: f.write(name) subproject_path = os.path.join(str(tmpdir.join('sub-project-{}'.format(name)))) repo = create_repo(kind, str(tmpdir.join('element_{}_repo'.format(name)))) ref = repo.create(files) generate_element(repo, repo_element_path) return element_name def generate_project(tmpdir, name, config={}): project_name = 'project-{}'.format(name) subproject_path = os.path.join(str(tmpdir.join(project_name))) os.makedirs(os.path.join(subproject_path, 'elements')) project_conf = { 'name': name, 'element-path': 'elements' } project_conf.update(config) _yaml.dump(project_conf, os.path.join(subproject_path, 'project.conf')) return project_name, subproject_path def generate_simple_stack(project, name, dependencies): element_name = '{}.bst'.format(name) element_path = os.path.join(project, 'elements', element_name) element = { 'kind': 'stack', 'depends': dependencies } _yaml.dump(element, element_path) return element_name def generate_cross_element(project, subproject_name, import_name): basename, _ = os.path.splitext(import_name) return generate_simple_stack(project, 'import-{}-{}'.format(subproject_name, basename), [{ 'junction': '{}.bst'.format(subproject_name), 'filename': import_name }]) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_cross_junction_multiple_projects(cli, tmpdir, datafiles, kind): tmpdir = tmpdir.join(kind) # Generate 3 projects: main, a, b _, project = generate_project(tmpdir, 'main', {'ref-storage': 'project.refs'}) project_a, project_a_path = generate_project(tmpdir, 'a') project_b, project_b_path = generate_project(tmpdir, 'b') # Generate an element with a trackable source for each project element_a = generate_import_element(tmpdir, kind, project_a_path, 'a') element_b = generate_import_element(tmpdir, kind, project_b_path, 'b') element_c = generate_import_element(tmpdir, kind, project, 'c') # Create some indirections to the elements with dependencies to test --deps stack_a = generate_simple_stack(project_a_path, 'stack-a', [element_a]) stack_b = generate_simple_stack(project_b_path, 'stack-b', [element_b]) # Create junctions for projects a and b in main. junction_a = '{}.bst'.format(project_a) junction_a_path = os.path.join(project, 'elements', junction_a) generate_junction(tmpdir.join('repo_a'), project_a_path, junction_a_path, store_ref=False) junction_b = '{}.bst'.format(project_b) junction_b_path = os.path.join(project, 'elements', junction_b) generate_junction(tmpdir.join('repo_b'), project_b_path, junction_b_path, store_ref=False) # Track the junctions. result = cli.run(project=project, args=['track', junction_a, junction_b]) result.assert_success() # Import elements from a and b in to main. imported_a = generate_cross_element(project, project_a, stack_a) imported_b = generate_cross_element(project, project_b, stack_b) # Generate a top level stack depending on everything all_bst = generate_simple_stack(project, 'all', [imported_a, imported_b, element_c]) # Track without following junctions. But explicitly also track the elements in project a. result = cli.run(project=project, args=['track', '--deps', 'all', all_bst, '{}:{}'.format(junction_a, stack_a)]) result.assert_success() # Elements in project b should not be tracked. But elements in project a and main should. expected = [element_c, '{}:{}'.format(junction_a, element_a)] assert set(result.get_tracked_elements()) == set(expected) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS]) def test_track_exceptions(cli, tmpdir, datafiles, kind): tmpdir = tmpdir.join(kind) _, project = generate_project(tmpdir, 'main', {'ref-storage': 'project.refs'}) project_a, project_a_path = generate_project(tmpdir, 'a') element_a = generate_import_element(tmpdir, kind, project_a_path, 'a') element_b = generate_import_element(tmpdir, kind, project_a_path, 'b') all_bst = generate_simple_stack(project_a_path, 'all', [element_a, element_b]) junction_a = '{}.bst'.format(project_a) junction_a_path = os.path.join(project, 'elements', junction_a) generate_junction(tmpdir.join('repo_a'), project_a_path, junction_a_path, store_ref=False) result = cli.run(project=project, args=['track', junction_a]) result.assert_success() imported_b = generate_cross_element(project, project_a, element_b) indirection = generate_simple_stack(project, 'indirection', [imported_b]) result = cli.run(project=project, args=['track', '--deps', 'all', '--except', indirection, '{}:{}'.format(junction_a, all_bst), imported_b]) result.assert_success() expected = ['{}:{}'.format(junction_a, element_a), '{}:{}'.format(junction_a, element_b)] assert set(result.get_tracked_elements()) == set(expected) buildstream-1.6.9/tests/frontend/version.py000066400000000000000000000012201437515270000210610ustar00rootroot00000000000000from tests.testutils.runcli import cli # For utils.get_bst_version() from buildstream import utils def assert_version(cli_version_output): major, minor = utils.get_bst_version() expected_start = "{}.{}".format(major, minor) if not cli_version_output.startswith(expected_start): raise AssertionError("Version output expected to begin with '{}'," .format(expected_start) + " output was: {}" .format(cli_version_output)) def test_version(cli): result = cli.run(args=['--version']) result.assert_success() assert_version(result.output) buildstream-1.6.9/tests/frontend/workspace.py000066400000000000000000000746021437515270000214100ustar00rootroot00000000000000import os import pytest import shutil import subprocess from ruamel.yaml.comments import CommentedSet from tests.testutils import cli, create_repo, ALL_REPO_KINDS from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadError, LoadErrorReason from buildstream._workspaces import BST_WORKSPACE_FORMAT_VERSION repo_kinds = [(kind) for kind in ALL_REPO_KINDS] # Project directory DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project", ) def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir=None): if not workspace_dir: workspace_dir = os.path.join(str(tmpdir), 'workspace{}'.format(suffix)) project_path = os.path.join(datafiles.dirname, datafiles.basename) bin_files_path = os.path.join(project_path, 'files', 'bin-files') element_path = os.path.join(project_path, 'elements') element_name = 'workspace-test-{}{}.bst'.format(kind, suffix) # Create our repo object of the given source type with # the bin files, and then collect the initial ref. # repo = create_repo(kind, str(tmpdir)) ref = repo.create(bin_files_path) if track: ref = None # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(element_path, element_name)) # Assert that there is no reference, a track & fetch is needed state = cli.get_element_state(project_path, element_name) if track: assert state == 'no reference' else: assert state == 'fetch needed' # Now open the workspace, this should have the effect of automatically # tracking & fetching the source from the repo. args = ['workspace', 'open'] if track: args.append('--track') args.extend([element_name, workspace_dir]) result = cli.run(project=project_path, args=args) result.assert_success() # Assert that we are now buildable because the source is # now cached. assert cli.get_element_state(project_path, element_name) == 'buildable' # Check that the executable hello file is found in the workspace filename = os.path.join(workspace_dir, 'usr', 'bin', 'hello') assert os.path.exists(filename) return (element_name, project_path, workspace_dir) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) def test_open(cli, tmpdir, datafiles, kind): open_workspace(cli, tmpdir, datafiles, kind, False) @pytest.mark.datafiles(DATA_DIR) def test_open_bzr_customize(cli, tmpdir, datafiles): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, "bzr", False) # Check that the .bzr dir exists bzrdir = os.path.join(workspace, ".bzr") assert(os.path.isdir(bzrdir)) # Check that the correct origin branch is set element_config = _yaml.load(os.path.join(project, "elements", element_name)) source_config = element_config['sources'][0] output = subprocess.check_output(["bzr", "info"], cwd=workspace) stripped_url = source_config['url'].lstrip("file:///") expected_output_str = ("checkout of branch: /{}/{}" .format(stripped_url, source_config['track'])) assert(expected_output_str in str(output)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) def test_open_track(cli, tmpdir, datafiles, kind): open_workspace(cli, tmpdir, datafiles, kind, True) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) def test_open_force(cli, tmpdir, datafiles, kind): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False) # Close the workspace result = cli.run(project=project, args=[ 'workspace', 'close', element_name ]) result.assert_success() # Assert the workspace dir still exists assert os.path.exists(workspace) # Now open the workspace again with --force, this should happily succeed result = cli.run(project=project, args=[ 'workspace', 'open', '--force', element_name, workspace ]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) def test_open_force_open(cli, tmpdir, datafiles, kind): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False) # Assert the workspace dir exists assert os.path.exists(workspace) # Now open the workspace again with --force, this should happily succeed result = cli.run(project=project, args=[ 'workspace', 'open', '--force', element_name, workspace ]) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) def test_open_force_different_workspace(cli, tmpdir, datafiles, kind): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False, "-alpha") # Assert the workspace dir exists assert os.path.exists(workspace) hello_path = os.path.join(workspace, 'usr', 'bin', 'hello') hello1_path = os.path.join(workspace, 'usr', 'bin', 'hello1') tmpdir = os.path.join(str(tmpdir), "-beta") shutil.move(hello_path, hello1_path) element_name2, project2, workspace2 = open_workspace(cli, tmpdir, datafiles, kind, False, "-beta") # Assert the workspace dir exists assert os.path.exists(workspace2) # Assert that workspace 1 contains the modified file assert os.path.exists(hello1_path) # Assert that workspace 2 contains the unmodified file assert os.path.exists(os.path.join(workspace2, 'usr', 'bin', 'hello')) # Now open the workspace again with --force, this should happily succeed result = cli.run(project=project, args=[ 'workspace', 'open', '--force', element_name2, workspace ]) # Assert that the file in workspace 1 has been replaced # With the file from workspace 2 assert os.path.exists(hello_path) assert not os.path.exists(hello1_path) result.assert_success() @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) def test_close(cli, tmpdir, datafiles, kind): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False) # Close the workspace result = cli.run(project=project, args=[ 'workspace', 'close', '--remove-dir', element_name ]) result.assert_success() # Assert the workspace dir has been deleted assert not os.path.exists(workspace) @pytest.mark.datafiles(DATA_DIR) def test_close_external_after_move_project(cli, tmpdir, datafiles): tmp_parent = os.path.dirname(str(tmpdir)) workspace_dir = os.path.join(tmp_parent, "workspace") element_name, project_path, _ = open_workspace(cli, tmpdir, datafiles, 'git', False, "", workspace_dir) assert os.path.exists(workspace_dir) tmp_dir = os.path.join(tmp_parent, 'external_project') shutil.move(project_path, tmp_dir) assert os.path.exists(tmp_dir) # Close the workspace result = cli.run(configure=False, project=tmp_dir, args=[ 'workspace', 'close', '--remove-dir', element_name ]) result.assert_success() # Assert the workspace dir has been deleted assert not os.path.exists(workspace_dir) # Move directory back inside tmp directory so it can be recognised shutil.move(tmp_dir, project_path) @pytest.mark.datafiles(DATA_DIR) def test_close_internal_after_move_project(cli, tmpdir, datafiles): element_name, project, _ = open_workspace(cli, tmpdir, datafiles, 'git', False) tmp_dir = os.path.join(os.path.dirname(str(tmpdir)), 'external_project') shutil.move(str(tmpdir), tmp_dir) assert os.path.exists(tmp_dir) # Close the workspace result = cli.run(configure=False, project=tmp_dir, args=[ 'workspace', 'close', '--remove-dir', element_name ]) result.assert_success() # Assert the workspace dir has been deleted workspace = os.path.join(tmp_dir, 'workspace') assert not os.path.exists(workspace) @pytest.mark.datafiles(DATA_DIR) def test_close_removed(cli, tmpdir, datafiles): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, 'git', False) # Remove it first, closing the workspace should work shutil.rmtree(workspace) # Close the workspace result = cli.run(project=project, args=[ 'workspace', 'close', element_name ]) result.assert_success() # Assert the workspace dir has been deleted assert not os.path.exists(workspace) @pytest.mark.datafiles(DATA_DIR) def test_close_nonexistant_element(cli, tmpdir, datafiles): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, 'git', False) element_path = os.path.join(datafiles.dirname, datafiles.basename, 'elements', element_name) # First brutally remove the element.bst file, ensuring that # the element does not exist anymore in the project where # we want to close the workspace. os.remove(element_path) # Close the workspace result = cli.run(project=project, args=[ 'workspace', 'close', '--remove-dir', element_name ]) result.assert_success() # Assert the workspace dir has been deleted assert not os.path.exists(workspace) @pytest.mark.datafiles(DATA_DIR) def test_close_multiple(cli, tmpdir, datafiles): tmpdir_alpha = os.path.join(str(tmpdir), 'alpha') tmpdir_beta = os.path.join(str(tmpdir), 'beta') alpha, project, workspace_alpha = open_workspace( cli, tmpdir_alpha, datafiles, 'git', False, suffix='-alpha') beta, project, workspace_beta = open_workspace( cli, tmpdir_beta, datafiles, 'git', False, suffix='-beta') # Close the workspaces result = cli.run(project=project, args=[ 'workspace', 'close', '--remove-dir', alpha, beta ]) result.assert_success() # Assert the workspace dirs have been deleted assert not os.path.exists(workspace_alpha) assert not os.path.exists(workspace_beta) @pytest.mark.datafiles(DATA_DIR) def test_close_all(cli, tmpdir, datafiles): tmpdir_alpha = os.path.join(str(tmpdir), 'alpha') tmpdir_beta = os.path.join(str(tmpdir), 'beta') alpha, project, workspace_alpha = open_workspace( cli, tmpdir_alpha, datafiles, 'git', False, suffix='-alpha') beta, project, workspace_beta = open_workspace( cli, tmpdir_beta, datafiles, 'git', False, suffix='-beta') # Close the workspaces result = cli.run(project=project, args=[ 'workspace', 'close', '--remove-dir', '--all' ]) result.assert_success() # Assert the workspace dirs have been deleted assert not os.path.exists(workspace_alpha) assert not os.path.exists(workspace_beta) @pytest.mark.datafiles(DATA_DIR) def test_reset(cli, tmpdir, datafiles): # Open the workspace element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, 'git', False) # Modify workspace shutil.rmtree(os.path.join(workspace, 'usr', 'bin')) os.makedirs(os.path.join(workspace, 'etc')) with open(os.path.join(workspace, 'etc', 'pony.conf'), 'w') as f: f.write("PONY='pink'") # Now reset the open workspace, this should have the # effect of reverting our changes. result = cli.run(project=project, args=[ 'workspace', 'reset', element_name ]) result.assert_success() assert os.path.exists(os.path.join(workspace, 'usr', 'bin', 'hello')) assert not os.path.exists(os.path.join(workspace, 'etc', 'pony.conf')) @pytest.mark.datafiles(DATA_DIR) def test_reset_multiple(cli, tmpdir, datafiles): # Open the workspaces tmpdir_alpha = os.path.join(str(tmpdir), 'alpha') tmpdir_beta = os.path.join(str(tmpdir), 'beta') alpha, project, workspace_alpha = open_workspace( cli, tmpdir_alpha, datafiles, 'git', False, suffix='-alpha') beta, project, workspace_beta = open_workspace( cli, tmpdir_beta, datafiles, 'git', False, suffix='-beta') # Modify workspaces shutil.rmtree(os.path.join(workspace_alpha, 'usr', 'bin')) os.makedirs(os.path.join(workspace_beta, 'etc')) with open(os.path.join(workspace_beta, 'etc', 'pony.conf'), 'w') as f: f.write("PONY='pink'") # Now reset the open workspaces, this should have the # effect of reverting our changes. result = cli.run(project=project, args=[ 'workspace', 'reset', alpha, beta, ]) result.assert_success() assert os.path.exists(os.path.join(workspace_alpha, 'usr', 'bin', 'hello')) assert not os.path.exists(os.path.join(workspace_beta, 'etc', 'pony.conf')) @pytest.mark.datafiles(DATA_DIR) def test_reset_all(cli, tmpdir, datafiles): # Open the workspaces tmpdir_alpha = os.path.join(str(tmpdir), 'alpha') tmpdir_beta = os.path.join(str(tmpdir), 'beta') alpha, project, workspace_alpha = open_workspace( cli, tmpdir_alpha, datafiles, 'git', False, suffix='-alpha') beta, project, workspace_beta = open_workspace( cli, tmpdir_beta, datafiles, 'git', False, suffix='-beta') # Modify workspaces shutil.rmtree(os.path.join(workspace_alpha, 'usr', 'bin')) os.makedirs(os.path.join(workspace_beta, 'etc')) with open(os.path.join(workspace_beta, 'etc', 'pony.conf'), 'w') as f: f.write("PONY='pink'") # Now reset the open workspace, this should have the # effect of reverting our changes. result = cli.run(project=project, args=[ 'workspace', 'reset', '--all' ]) result.assert_success() assert os.path.exists(os.path.join(workspace_alpha, 'usr', 'bin', 'hello')) assert not os.path.exists(os.path.join(workspace_beta, 'etc', 'pony.conf')) @pytest.mark.datafiles(DATA_DIR) def test_list(cli, tmpdir, datafiles): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, 'git', False) # Now list the workspaces result = cli.run(project=project, args=[ 'workspace', 'list' ]) result.assert_success() loaded = _yaml.load_data(result.output) assert isinstance(loaded.get('workspaces'), list) workspaces = loaded['workspaces'] assert len(workspaces) == 1 space = workspaces[0] assert space['element'] == element_name assert space['directory'] == workspace @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) @pytest.mark.parametrize("strict", [("strict"), ("non-strict")]) def test_build(cli, tmpdir, datafiles, kind, strict): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False) checkout = os.path.join(str(tmpdir), 'checkout') # Modify workspace shutil.rmtree(os.path.join(workspace, 'usr', 'bin')) os.makedirs(os.path.join(workspace, 'etc')) with open(os.path.join(workspace, 'etc', 'pony.conf'), 'w') as f: f.write("PONY='pink'") # Configure strict mode strict_mode = True if strict != 'strict': strict_mode = False cli.configure({ 'projects': { 'test': { 'strict': strict_mode } } }) # Build modified workspace assert cli.get_element_state(project, element_name) == 'buildable' assert cli.get_element_key(project, element_name) == "{:?<64}".format('') result = cli.run(project=project, args=['build', element_name]) result.assert_success() assert cli.get_element_state(project, element_name) == 'cached' assert cli.get_element_key(project, element_name) != "{:?<64}".format('') # Checkout the result result = cli.run(project=project, args=[ 'checkout', element_name, checkout ]) result.assert_success() # Check that the pony.conf from the modified workspace exists filename = os.path.join(checkout, 'etc', 'pony.conf') assert os.path.exists(filename) # Check that the original /usr/bin/hello is not in the checkout assert not os.path.exists(os.path.join(checkout, 'usr', 'bin', 'hello')) @pytest.mark.datafiles(DATA_DIR) def test_buildable_no_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_name = 'workspace-test-no-ref.bst' element_path = os.path.join(project, 'elements') # Write out our test target without any source ref repo = create_repo('git', str(tmpdir)) element = { 'kind': 'import', 'sources': [ repo.source_config() ] } _yaml.dump(element, os.path.join(element_path, element_name)) # Assert that this target is not buildable when no workspace is associated. assert cli.get_element_state(project, element_name) == 'no reference' # Now open the workspace. We don't need to checkout the source though. workspace = os.path.join(str(tmpdir), 'workspace-no-ref') os.makedirs(workspace) args = ['workspace', 'open', '--no-checkout', element_name, workspace] result = cli.run(project=project, args=args) result.assert_success() # Assert that the target is now buildable. assert cli.get_element_state(project, element_name) == 'buildable' @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("modification", [("addfile"), ("removefile"), ("modifyfile")]) @pytest.mark.parametrize("strict", [("strict"), ("non-strict")]) def test_detect_modifications(cli, tmpdir, datafiles, modification, strict): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, 'git', False) checkout = os.path.join(str(tmpdir), 'checkout') # Configure strict mode strict_mode = True if strict != 'strict': strict_mode = False cli.configure({ 'projects': { 'test': { 'strict': strict_mode } } }) # Build clean workspace assert cli.get_element_state(project, element_name) == 'buildable' assert cli.get_element_key(project, element_name) == "{:?<64}".format('') result = cli.run(project=project, args=['build', element_name]) result.assert_success() assert cli.get_element_state(project, element_name) == 'cached' assert cli.get_element_key(project, element_name) != "{:?<64}".format('') # Modify the workspace in various different ways, ensuring we # properly detect the changes. # if modification == 'addfile': os.makedirs(os.path.join(workspace, 'etc')) with open(os.path.join(workspace, 'etc', 'pony.conf'), 'w') as f: f.write("PONY='pink'") elif modification == 'removefile': os.remove(os.path.join(workspace, 'usr', 'bin', 'hello')) elif modification == 'modifyfile': with open(os.path.join(workspace, 'usr', 'bin', 'hello'), 'w') as f: f.write('cookie') else: # This cannot be reached assert 0 # First assert that the state is properly detected assert cli.get_element_state(project, element_name) == 'buildable' assert cli.get_element_key(project, element_name) == "{:?<64}".format('') # Since there are different things going on at `bst build` time # than `bst show` time, we also want to build / checkout again, # and ensure that the result contains what we expect. result = cli.run(project=project, args=['build', element_name]) result.assert_success() assert cli.get_element_state(project, element_name) == 'cached' assert cli.get_element_key(project, element_name) != "{:?<64}".format('') # Checkout the result result = cli.run(project=project, args=[ 'checkout', element_name, checkout ]) result.assert_success() # Check the result for the changes we made # if modification == 'addfile': filename = os.path.join(checkout, 'etc', 'pony.conf') assert os.path.exists(filename) elif modification == 'removefile': assert not os.path.exists(os.path.join(checkout, 'usr', 'bin', 'hello')) elif modification == 'modifyfile': with open(os.path.join(workspace, 'usr', 'bin', 'hello'), 'r') as f: data = f.read() assert data == 'cookie' else: # This cannot be reached assert 0 # Ensure that various versions that should not be accepted raise a # LoadError.INVALID_DATA @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("workspace_cfg", [ # Test loading a negative workspace version {"format-version": -1}, # Test loading version 0 with two sources { "format-version": 0, "alpha.bst": { 0: "/workspaces/bravo", 1: "/workspaces/charlie", } }, # Test loading a version with decimals {"format-version": 0.5}, # Test loading a future version {"format-version": BST_WORKSPACE_FORMAT_VERSION + 1} ]) def test_list_unsupported_workspace(cli, tmpdir, datafiles, workspace_cfg): project = os.path.join(datafiles.dirname, datafiles.basename) bin_files_path = os.path.join(project, 'files', 'bin-files') element_path = os.path.join(project, 'elements') element_name = 'workspace-version.bst' os.makedirs(os.path.join(project, '.bst')) workspace_config_path = os.path.join(project, '.bst', 'workspaces.yml') _yaml.dump(workspace_cfg, workspace_config_path) result = cli.run(project=project, args=['workspace', 'list']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) # Ensure that various versions that should be accepted are parsed # correctly. @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("workspace_cfg,expected", [ # Test loading version 0 without a dict ({ "alpha.bst": "/workspaces/bravo" }, { "format-version": BST_WORKSPACE_FORMAT_VERSION, "workspaces": { "alpha.bst": { "prepared": False, "path": "/workspaces/bravo", "running_files": {} } } }), # Test loading version 0 with only one source ({ "alpha.bst": { 0: "/workspaces/bravo" } }, { "format-version": BST_WORKSPACE_FORMAT_VERSION, "workspaces": { "alpha.bst": { "prepared": False, "path": "/workspaces/bravo", "running_files": {} } } }), # Test loading version 1 ({ "format-version": 1, "workspaces": { "alpha.bst": { "path": "/workspaces/bravo" } } }, { "format-version": BST_WORKSPACE_FORMAT_VERSION, "workspaces": { "alpha.bst": { "prepared": False, "path": "/workspaces/bravo", "running_files": {} } } }), # Test loading version 2 ({ "format-version": 2, "workspaces": { "alpha.bst": { "path": "/workspaces/bravo", "last_successful": "some_key", "running_files": { "beta.bst": ["some_file"] } } } }, { "format-version": BST_WORKSPACE_FORMAT_VERSION, "workspaces": { "alpha.bst": { "prepared": False, "path": "/workspaces/bravo", "last_successful": "some_key", "running_files": { "beta.bst": ["some_file"] } } } }), # Test loading version 3 ({ "format-version": 3, "workspaces": { "alpha.bst": { "prepared": True, "path": "/workspaces/bravo", "running_files": {} } } }, { "format-version": BST_WORKSPACE_FORMAT_VERSION, "workspaces": { "alpha.bst": { "prepared": True, "path": "/workspaces/bravo", "running_files": {} } } }) ]) def test_list_supported_workspace(cli, tmpdir, datafiles, workspace_cfg, expected): def parse_dict_as_yaml(node): tempfile = os.path.join(str(tmpdir), 'yaml_dump') _yaml.dump(node, tempfile) return _yaml.node_sanitize(_yaml.load(tempfile)) project = os.path.join(datafiles.dirname, datafiles.basename) os.makedirs(os.path.join(project, '.bst')) workspace_config_path = os.path.join(project, '.bst', 'workspaces.yml') _yaml.dump(workspace_cfg, workspace_config_path) # Check that we can still read workspace config that is in old format result = cli.run(project=project, args=['workspace', 'list']) result.assert_success() loaded_config = _yaml.node_sanitize(_yaml.load(workspace_config_path)) # Check that workspace config remains the same if no modifications # to workspaces were made assert loaded_config == parse_dict_as_yaml(workspace_cfg) # Create a test bst file bin_files_path = os.path.join(project, 'files', 'bin-files') element_path = os.path.join(project, 'elements') element_name = 'workspace-test.bst' workspace = os.path.join(str(tmpdir), 'workspace') # Create our repo object of the given source type with # the bin files, and then collect the initial ref. # repo = create_repo('git', str(tmpdir)) ref = repo.create(bin_files_path) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(element_path, element_name)) # Make a change to the workspaces file result = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) result.assert_success() result = cli.run(project=project, args=['workspace', 'close', '--remove-dir', element_name]) result.assert_success() # Check that workspace config is converted correctly if necessary loaded_config = _yaml.node_sanitize(_yaml.load(workspace_config_path)) assert loaded_config == parse_dict_as_yaml(expected) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", repo_kinds) def test_inconsitent_pipeline_message(cli, tmpdir, datafiles, kind): element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False) shutil.rmtree(workspace) result = cli.run(project=project, args=[ 'build', element_name ]) result.assert_main_error(ErrorDomain.PIPELINE, "inconsistent-pipeline-workspaced") @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("strict", [("strict"), ("non-strict")]) def test_cache_key_workspace_in_dependencies(cli, tmpdir, datafiles, strict): checkout = os.path.join(str(tmpdir), 'checkout') element_name, project, workspace = open_workspace(cli, os.path.join(str(tmpdir), 'repo-a'), datafiles, 'git', False) element_path = os.path.join(project, 'elements') back_dep_element_name = 'workspace-test-back-dep.bst' # Write out our test target element = { 'kind': 'compose', 'depends': [ { 'filename': element_name, 'type': 'build' } ] } _yaml.dump(element, os.path.join(element_path, back_dep_element_name)) # Modify workspace shutil.rmtree(os.path.join(workspace, 'usr', 'bin')) os.makedirs(os.path.join(workspace, 'etc')) with open(os.path.join(workspace, 'etc', 'pony.conf'), 'w') as f: f.write("PONY='pink'") # Configure strict mode strict_mode = True if strict != 'strict': strict_mode = False cli.configure({ 'projects': { 'test': { 'strict': strict_mode } } }) # Build artifact with dependency's modified workspace assert cli.get_element_state(project, element_name) == 'buildable' assert cli.get_element_key(project, element_name) == "{:?<64}".format('') assert cli.get_element_state(project, back_dep_element_name) == 'waiting' assert cli.get_element_key(project, back_dep_element_name) == "{:?<64}".format('') result = cli.run(project=project, args=['build', back_dep_element_name]) result.assert_success() assert cli.get_element_state(project, element_name) == 'cached' assert cli.get_element_key(project, element_name) != "{:?<64}".format('') assert cli.get_element_state(project, back_dep_element_name) == 'cached' assert cli.get_element_key(project, back_dep_element_name) != "{:?<64}".format('') result = cli.run(project=project, args=['build', back_dep_element_name]) result.assert_success() # Checkout the result result = cli.run(project=project, args=[ 'checkout', back_dep_element_name, checkout ]) result.assert_success() # Check that the pony.conf from the modified workspace exists filename = os.path.join(checkout, 'etc', 'pony.conf') assert os.path.exists(filename) # Check that the original /usr/bin/hello is not in the checkout assert not os.path.exists(os.path.join(checkout, 'usr', 'bin', 'hello')) # This strange test tests against a regression raised in issue #919, # where opening a workspace on a runtime dependency of a build only # dependency causes `bst build` to not build the specified target # but just successfully builds the workspaced element and happily # exits without completing the build. # TEST_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)) ) @pytest.mark.datafiles(TEST_DIR) @pytest.mark.parametrize( ["case", "non_workspaced_elements_state"], [ ("workspaced-build-dep", ["waiting", "waiting", "waiting", "waiting", "waiting"]), ("workspaced-runtime-dep", ["buildable", "buildable", "waiting", "waiting", "waiting"]) ], ) @pytest.mark.parametrize("strict", [("strict"), ("non-strict")]) def test_build_all(cli, tmpdir, datafiles, case, strict, non_workspaced_elements_state): project = os.path.join(str(datafiles), case) workspace = os.path.join(str(tmpdir), 'workspace') non_leaf_elements = ["elem2.bst", "elem3.bst", "stack.bst", "elem4.bst", "elem5.bst"] all_elements = ["elem1.bst", *non_leaf_elements] # Configure strict mode strict_mode = True if strict != 'strict': strict_mode = False cli.configure({ 'projects': { 'test': { 'strict': strict_mode } } }) # First open the workspace result = cli.run(project=project, args=['workspace', 'open', 'elem1.bst', workspace]) result.assert_success() # Now build the targets elem4.bst and elem5.bst result = cli.run(project=project, args=['build', 'elem4.bst', 'elem5.bst']) result.assert_success() # Assert that the target is built for element in all_elements: assert cli.get_element_state(project, element) == 'cached' buildstream-1.6.9/tests/frontend/workspaced-build-dep/000077500000000000000000000000001437515270000230345ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-build-dep/elements/000077500000000000000000000000001437515270000246505ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-build-dep/elements/elem1.bst000066400000000000000000000000711437515270000263630ustar00rootroot00000000000000kind: import sources: - kind: local path: files/file1 buildstream-1.6.9/tests/frontend/workspaced-build-dep/elements/elem2.bst000066400000000000000000000001471437515270000263700ustar00rootroot00000000000000kind: import depends: - filename: elem1.bst type: build sources: - kind: local path: files/file2 buildstream-1.6.9/tests/frontend/workspaced-build-dep/elements/elem3.bst000066400000000000000000000001471437515270000263710ustar00rootroot00000000000000kind: import depends: - filename: elem2.bst type: build sources: - kind: local path: files/file3 buildstream-1.6.9/tests/frontend/workspaced-build-dep/elements/elem4.bst000066400000000000000000000001471437515270000263720ustar00rootroot00000000000000kind: import depends: - filename: stack.bst type: build sources: - kind: local path: files/file4 buildstream-1.6.9/tests/frontend/workspaced-build-dep/elements/elem5.bst000066400000000000000000000001471437515270000263730ustar00rootroot00000000000000kind: import depends: - filename: elem3.bst type: build sources: - kind: local path: files/file4 buildstream-1.6.9/tests/frontend/workspaced-build-dep/elements/stack.bst000066400000000000000000000000421437515270000264630ustar00rootroot00000000000000kind: stack depends: - elem3.bst buildstream-1.6.9/tests/frontend/workspaced-build-dep/files/000077500000000000000000000000001437515270000241365ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-build-dep/files/file1000066400000000000000000000000001437515270000250470ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-build-dep/files/file2000066400000000000000000000000001437515270000250500ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-build-dep/files/file3000066400000000000000000000000001437515270000250510ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-build-dep/files/file4000066400000000000000000000000001437515270000250520ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-build-dep/project.conf000066400000000000000000000002341437515270000253500ustar00rootroot00000000000000# Unique project name name: test # Required BuildStream format version format-version: 12 # Subdirectory where elements are stored element-path: elements buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/000077500000000000000000000000001437515270000234205ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/elements/000077500000000000000000000000001437515270000252345ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/elements/elem1.bst000066400000000000000000000000711437515270000267470ustar00rootroot00000000000000kind: import sources: - kind: local path: files/file1 buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/elements/elem2.bst000066400000000000000000000001511437515270000267470ustar00rootroot00000000000000kind: import depends: - filename: elem1.bst type: runtime sources: - kind: local path: files/file2 buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/elements/elem3.bst000066400000000000000000000001511437515270000267500ustar00rootroot00000000000000kind: import depends: - filename: elem2.bst type: runtime sources: - kind: local path: files/file3 buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/elements/elem4.bst000066400000000000000000000001471437515270000267560ustar00rootroot00000000000000kind: import depends: - filename: stack.bst type: build sources: - kind: local path: files/file4 buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/elements/elem5.bst000066400000000000000000000001471437515270000267570ustar00rootroot00000000000000kind: import depends: - filename: elem3.bst type: build sources: - kind: local path: files/file4 buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/elements/stack.bst000066400000000000000000000000421437515270000270470ustar00rootroot00000000000000kind: stack depends: - elem3.bst buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/files/000077500000000000000000000000001437515270000245225ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/files/file1000066400000000000000000000000001437515270000254330ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/files/file2000066400000000000000000000000001437515270000254340ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/files/file3000066400000000000000000000000001437515270000254350ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/files/file4000066400000000000000000000000001437515270000254360ustar00rootroot00000000000000buildstream-1.6.9/tests/frontend/workspaced-runtime-dep/project.conf000066400000000000000000000002341437515270000257340ustar00rootroot00000000000000# Unique project name name: test # Required BuildStream format version format-version: 12 # Subdirectory where elements are stored element-path: elements buildstream-1.6.9/tests/integration/000077500000000000000000000000001437515270000175335ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/autotools.py000066400000000000000000000034601437515270000221410ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) # Test that an autotools build 'works' - we use the autotools sample # amhello project for this. @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_autotools_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'autotools/amhello.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 assert_contains(checkout, ['/usr', '/usr/lib', '/usr/bin', '/usr/share', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello', '/usr/bin/hello', '/usr/share/doc', '/usr/share/doc/amhello', '/usr/share/doc/amhello/README']) # Test running an executable built with autotools @pytest.mark.datafiles(DATA_DIR) def test_autotools_run(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_name = 'autotools/amhello.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['shell', element_name, '/usr/bin/hello']) assert result.exit_code == 0 assert result.output == 'Hello World!\nThis is amhello 1.0.\n' buildstream-1.6.9/tests/integration/base/000077500000000000000000000000001437515270000204455ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/base/generate-base.sh000077500000000000000000000043371437515270000235150ustar00rootroot00000000000000#!/bin/sh # Generate a base sysroot for running the BuildStream integration tests. # # The sysroot is based off the Alpine Linux distribution. The script downloads # a release of Alpine, sets up a cheap container using `bwrap` and installs the # packages that are needed by the integration tests, then outputs a .tar.xz # file. set -eux ALPINE_ARCH=${ARCH:-x86_64} ALPINE_BASE=http://dl-cdn.alpinelinux.org/alpine/v3.7/releases/${ALPINE_ARCH}/alpine-minirootfs-3.7.0-${ALPINE_ARCH}.tar.gz mkdir root wget ${ALPINE_BASE} -O alpine-base.tar.gz tar -x -f ./alpine-base.tar.gz -C ./root --exclude dev/\* run() { # This turns the unpacked rootfs into a container using Bubblewrap. # The Alpine package manager (apk) calls `chroot` when running package # triggers so we need to enable CAP_SYS_CHROOT. We also have to fake # UID 0 (root) inside the container to avoid permissions errors. bwrap --bind ./root / --dev /dev --proc /proc --tmpfs /tmp \ --ro-bind /etc/resolv.conf /etc/resolv.conf \ --setenv PATH "/usr/bin:/usr/sbin:/bin:/sbin" \ --unshare-user --uid 0 --gid 0 \ --cap-add CAP_SYS_CHROOT \ /bin/sh -c "$@" } # Enable testing repo for Tiny C Compiler package run "echo http://dl-cdn.alpinelinux.org/alpine/edge/testing >> /etc/apk/repositories" # Fetch the list of Alpine packages. run "apk update" # There are various random errors from `apk add` to do with ownership, probably # because of our hacked up `bwrap` container. The errors seem harmless so I am # just ignoring them. set +e # Install stuff needed by all integration tests that compile C code. # # Note that we use Tiny C Compiler in preference to GCC. There is a huge # size difference -- 600KB for TinyCC vs. 50MB to 100MB for GCC. TinyCC # supports most of the ISO C99 standard, but has no C++ support at all. run "apk add binutils libc-dev make tcc" run "ln -s /usr/bin/tcc /usr/bin/cc" # Install stuff for tests/integration/autotools run "apk add autoconf automake" # Install stuff for tests/integration/cmake run "apk add cmake" # Install stuff for tests/integration/pip run "apk add python3" set -e # Cleanup the package cache run "rm -R /var/cache/apk" tar -c -v -J -f integration-tests-base.tar.xz -C root . buildstream-1.6.9/tests/integration/build-uid.py000066400000000000000000000036071437515270000217710ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains from tests.testutils.site import IS_LINUX pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_build_uid_overridden(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'build-uid/build-uid.bst' project_config = { 'name': 'build-uid-test', 'sandbox': { 'build-uid': 800, 'build-gid': 900 } } result = cli.run(project=project, project_config=project_config, args=['build', element_name]) assert result.exit_code == 0 @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux') @pytest.mark.datafiles(DATA_DIR) def test_build_uid_in_project(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'build-uid/build-uid-1023.bst' project_config = { 'name': 'build-uid-test', 'sandbox': { 'build-uid': 1023, 'build-gid': 3490 } } result = cli.run(project=project, project_config=project_config, args=['build', element_name]) assert result.exit_code == 0 @pytest.mark.datafiles(DATA_DIR) def test_build_uid_default(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'build-uid/build-uid-default.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 buildstream-1.6.9/tests/integration/cmake.py000066400000000000000000000026631437515270000211740ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.datafiles(DATA_DIR) def test_cmake_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'cmake/cmakehello.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 assert_contains(checkout, ['/usr', '/usr/bin', '/usr/bin/hello', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello']) @pytest.mark.datafiles(DATA_DIR) def test_cmake_run(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_name = 'cmake/cmakehello.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['shell', element_name, '/usr/bin/hello']) assert result.exit_code == 0 assert result.output == """Hello World! This is hello. """ buildstream-1.6.9/tests/integration/compose-symlinks.py000066400000000000000000000027011437515270000234210ustar00rootroot00000000000000import io import os import sys import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.integration import walk_dir pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) # Test that staging a file inside a directory symlink works as expected. # # Regression test for https://gitlab.com/BuildStream/buildstream/issues/270 @pytest.mark.datafiles(DATA_DIR) def test_compose_symlinks(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') # Symlinks do not survive being placed in a source distribution # ('setup.py sdist'), so we have to create the one we need here. project_files = os.path.join(project, 'files', 'compose-symlinks', 'base') symlink_file = os.path.join(project_files, 'sbin') os.symlink(os.path.join('usr', 'sbin'), symlink_file, target_is_directory=True) result = cli.run(project=project, args=['build', 'compose-symlinks/compose.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'compose-symlinks/compose.bst', checkout]) result.assert_success() assert set(walk_dir(checkout)) == set(['/sbin', '/usr', '/usr/sbin', '/usr/sbin/init', '/usr/sbin/dummy']) buildstream-1.6.9/tests/integration/compose.py000066400000000000000000000102761437515270000215600ustar00rootroot00000000000000import io import os import sys import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.integration import walk_dir pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) def create_compose_element(name, path, config={}): element = { 'kind': 'compose', 'depends': [{ 'filename': 'compose/amhello.bst', 'type': 'build' }, { 'filename': 'compose/test.bst', 'type': 'build' }], 'config': config } os.makedirs(os.path.dirname(os.path.join(path, name)), exist_ok=True) _yaml.dump(element, os.path.join(path, name)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("include_domains,exclude_domains,expected", [ # Test flat inclusion ([], [], ['/usr', '/usr/lib', '/usr/bin', '/usr/share', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello', '/usr/bin/hello', '/usr/share/doc', '/usr/share/doc/amhello', '/usr/share/doc/amhello/README', '/tests', '/tests/test']), # Test only runtime (['runtime'], [], ['/usr', '/usr/lib', '/usr/share', '/usr/bin', '/usr/bin/hello']), # Test with runtime and doc (['runtime', 'doc'], [], ['/usr', '/usr/lib', '/usr/share', '/usr/bin', '/usr/bin/hello', '/usr/share/doc', '/usr/share/doc/amhello', '/usr/share/doc/amhello/README']), # Test with only runtime excluded ([], ['runtime'], ['/usr', '/usr/lib', '/usr/share', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello', '/usr/share/doc', '/usr/share/doc/amhello', '/usr/share/doc/amhello/README', '/tests', '/tests/test']), # Test with runtime and doc excluded ([], ['runtime', 'doc'], ['/usr', '/usr/lib', '/usr/share', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello', '/tests', '/tests/test']), # Test with runtime simultaneously in- and excluded (['runtime'], ['runtime'], ['/usr', '/usr/lib', '/usr/share']), # Test with runtime included and doc excluded (['runtime'], ['doc'], ['/usr', '/usr/lib', '/usr/share', '/usr/bin', '/usr/bin/hello']), # Test including a custom 'test' domain (['test'], [], ['/usr', '/usr/lib', '/usr/share', '/tests', '/tests/test']), # Test excluding a custom 'test' domain ([], ['test'], ['/usr', '/usr/lib', '/usr/bin', '/usr/share', '/usr/lib/debug', '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin', '/usr/lib/debug/usr/bin/hello', '/usr/bin/hello', '/usr/share/doc', '/usr/share/doc/amhello', '/usr/share/doc/amhello/README']) ]) def test_compose_include(cli, tmpdir, datafiles, include_domains, exclude_domains, expected): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'compose/compose-amhello.bst' # Create a yaml configuration from the specified include and # exclude domains config = { 'include': include_domains, 'exclude': exclude_domains } create_compose_element(element_name, element_path, config=config) result = cli.run(project=project, args=['track', 'compose/amhello.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 assert set(walk_dir(checkout)) == set(expected) buildstream-1.6.9/tests/integration/import.py000066400000000000000000000035321437515270000214220ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.integration import walk_dir pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) def create_import_element(name, path, source, target, source_path): element = { 'kind': 'import', 'sources': [{ 'kind': 'local', 'path': source_path }], 'config': { 'source': source, 'target': target } } os.makedirs(os.path.dirname(os.path.join(path, name)), exist_ok=True) _yaml.dump(element, os.path.join(path, name)) @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("source,target,path,expected", [ ('/', '/', 'files/import-source', ['/test.txt', '/subdir', '/subdir/test.txt']), ('/subdir', '/', 'files/import-source', ['/test.txt']), ('/', '/', 'files/import-source/subdir', ['/test.txt']), ('/', '/output', 'files/import-source', ['/output', '/output/test.txt', '/output/subdir', '/output/subdir/test.txt']), ]) def test_import(cli, tmpdir, datafiles, source, target, path, expected): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'import/import.bst' create_import_element(element_name, element_path, source, target, path) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 assert set(walk_dir(checkout)) == set(expected) buildstream-1.6.9/tests/integration/make.py000066400000000000000000000026301437515270000210230ustar00rootroot00000000000000import os import pytest from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) # Test that a make build 'works' - we use the make sample # makehello project for this. @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_make_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'make/makehello.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 assert_contains(checkout, ['/usr', '/usr/bin', '/usr/bin/hello']) # Test running an executable built with make @pytest.mark.datafiles(DATA_DIR) def test_make_run(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_name = 'make/makehello.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['shell', element_name, '/usr/bin/hello']) assert result.exit_code == 0 assert result.output == 'Hello, world\n' buildstream-1.6.9/tests/integration/manual.py000066400000000000000000000065611437515270000213720ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) def create_manual_element(name, path, config, variables, environment): element = { 'kind': 'manual', 'depends': [{ 'filename': 'base.bst', 'type': 'build' }], 'config': config, 'variables': variables, 'environment': environment } os.makedirs(os.path.dirname(os.path.join(path, name)), exist_ok=True) _yaml.dump(element, os.path.join(path, name)) @pytest.mark.datafiles(DATA_DIR) def test_manual_element(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'import/import.bst' create_manual_element(element_name, element_path, { 'configure-commands': ["echo './configure' >> test"], 'build-commands': ["echo 'make' >> test"], 'install-commands': [ "echo 'make install' >> test", "cp test %{install-root}" ], 'strip-commands': ["echo 'strip' >> %{install-root}/test"] }, {}, {}) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'test')) as f: text = f.read() assert text == """./configure make make install strip """ @pytest.mark.datafiles(DATA_DIR) def test_manual_element_environment(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'import/import.bst' create_manual_element(element_name, element_path, { 'install-commands': [ "echo $V >> test", "cp test %{install-root}" ] }, { }, { 'V': 2 }) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'test')) as f: text = f.read() assert text == "2\n" @pytest.mark.datafiles(DATA_DIR) def test_manual_element_noparallel(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'import/import.bst' create_manual_element(element_name, element_path, { 'install-commands': [ "echo $MAKEFLAGS >> test", "echo $V >> test", "cp test %{install-root}" ] }, { 'notparallel': True }, { 'MAKEFLAGS': '-j%{max-jobs} -Wall', 'V': 2 }) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'test')) as f: text = f.read() assert text == """-j1 -Wall 2 """ buildstream-1.6.9/tests/integration/pip_element.py000066400000000000000000000036531437515270000224150ustar00rootroot00000000000000import os import sys import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.datafiles(DATA_DIR) def test_pip_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'pip/hello.bst' element = { 'kind': 'pip', 'variables': { 'pip': 'pip3' }, 'depends': [{ 'filename': 'base.bst' }], 'sources': [{ 'kind': 'tar', 'url': 'file://{}/files/hello.tar.xz'.format(project), 'ref': 'ad96570b552498807abec33c06210bf68378d854ced6753b77916c5ed517610d' }] } os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True) _yaml.dump(element, os.path.join(element_path, element_name)) result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 assert_contains(checkout, ['/usr', '/usr/lib', '/usr/bin', '/usr/bin/hello', '/usr/lib/python3.6']) # Test running an executable built with pip @pytest.mark.datafiles(DATA_DIR) def test_pip_run(cli, tmpdir, datafiles): # Create and build our test element test_pip_build(cli, tmpdir, datafiles) project = os.path.join(datafiles.dirname, datafiles.basename) element_name = 'pip/hello.bst' result = cli.run(project=project, args=['shell', element_name, '/usr/bin/hello']) assert result.exit_code == 0 assert result.output == 'Hello, world!\n' buildstream-1.6.9/tests/integration/pip_source.py000066400000000000000000000062301437515270000222560ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.datafiles(DATA_DIR) def test_pip_source_import(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'pip/hello.bst' element = { 'kind': 'import', 'sources': [ { 'kind': 'local', 'path': 'files/pip-source' }, { 'kind': 'pip', 'url': 'file://{}'.format(os.path.realpath(os.path.join(project, 'files', 'pypi-repo'))), 'requirements-files': ['myreqs.txt'], 'packages': ['app2'] } ] } os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True) _yaml.dump(element, os.path.join(element_path, element_name)) result = cli.run(project=project, args=['track', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 assert_contains(checkout, ['/.bst_pip_downloads', '/.bst_pip_downloads/HelloLib-0.1.tar.gz', '/.bst_pip_downloads/App2-0.1.tar.gz']) @pytest.mark.datafiles(DATA_DIR) def test_pip_source_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = os.path.join(project, 'elements') element_name = 'pip/hello.bst' element = { 'kind': 'manual', 'depends': ['base.bst'], 'sources': [ { 'kind': 'local', 'path': 'files/pip-source' }, { 'kind': 'pip', 'url': 'file://{}'.format(os.path.realpath(os.path.join(project, 'files', 'pypi-repo'))), 'requirements-files': ['myreqs.txt'], 'packages': ['app2'] } ], 'config': { 'install-commands': [ 'pip3 install --no-index --prefix %{install-root}/usr .bst_pip_downloads/*.tar.gz', 'chmod +x app1.py', 'install app1.py %{install-root}/usr/bin/' ] } } os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True) _yaml.dump(element, os.path.join(element_path, element_name)) result = cli.run(project=project, args=['track', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['shell', element_name, '/usr/bin/app1.py']) assert result.exit_code == 0 assert result.output == """Hello App1! """ buildstream-1.6.9/tests/integration/project/000077500000000000000000000000001437515270000212015ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/000077500000000000000000000000001437515270000230155ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/autotools/000077500000000000000000000000001437515270000250465ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/autotools/amhello.bst000066400000000000000000000003101437515270000271730ustar00rootroot00000000000000kind: autotools description: Autotools test depends: - base.bst sources: - kind: tar url: project_dir:/files/amhello.tar.gz ref: 9ba123fa4e660929e9a0aa99f0c487b7eee59c5e7594f3284d015640b90f5590 buildstream-1.6.9/tests/integration/project/elements/base.bst000066400000000000000000000001031437515270000244330ustar00rootroot00000000000000# elements/base.bst kind: stack depends: - base/base-alpine.bst buildstream-1.6.9/tests/integration/project/elements/base/000077500000000000000000000000001437515270000237275ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/base/base-alpine.bst000066400000000000000000000004651437515270000266260ustar00rootroot00000000000000kind: import description: | Alpine Linux base for tests Generated using the `tests/integration-tests/base/generate-base.sh` script. sources: - kind: tar url: alpine:integration-tests-base.v1.x86_64.tar.xz base-dir: '' ref: 3eb559250ba82b64a68d86d0636a6b127aa5f6d25d3601a79f79214dc9703639 buildstream-1.6.9/tests/integration/project/elements/build-uid/000077500000000000000000000000001437515270000246735ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/build-uid/build-uid-1023.bst000066400000000000000000000002061437515270000276440ustar00rootroot00000000000000kind: manual depends: - filename: base.bst type: build config: build-commands: - "[ `id -u` = 1023 -a `id -g` = 3490 ]" buildstream-1.6.9/tests/integration/project/elements/build-uid/build-uid-default.bst000066400000000000000000000001771437515270000307120ustar00rootroot00000000000000kind: manual depends: - filename: base.bst type: build config: build-commands: - "[ `id -u` = 0 -a `id -g` = 0 ]"buildstream-1.6.9/tests/integration/project/elements/build-uid/build-uid.bst000066400000000000000000000002641437515270000272650ustar00rootroot00000000000000kind: manual depends: - filename: base.bst type: build sandbox: build-uid: 1024 build-gid: 1048 config: build-commands: - "[ `id -u` = 1024 -a `id -g` = 1048 ]" buildstream-1.6.9/tests/integration/project/elements/cmake/000077500000000000000000000000001437515270000240755ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/cmake/cmakehello.bst000066400000000000000000000003131437515270000267100ustar00rootroot00000000000000kind: cmake description: Cmake test depends: - base.bst sources: - kind: tar url: project_dir:/files/cmakehello.tar.gz ref: 508266f40dbc5875293bd24c4e50a9eb6b88cbacab742033f7b92f8c087b64e5 buildstream-1.6.9/tests/integration/project/elements/compose-symlinks/000077500000000000000000000000001437515270000263315ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/compose-symlinks/base.bst000066400000000000000000000001111437515270000277460ustar00rootroot00000000000000kind: import sources: - kind: local path: files/compose-symlinks/base buildstream-1.6.9/tests/integration/project/elements/compose-symlinks/compose.bst000066400000000000000000000002431437515270000305070ustar00rootroot00000000000000kind: compose depends: - filename: compose-symlinks/base.bst type: build - filename: compose-symlinks/overlay.bst type: build config: include: - runtime buildstream-1.6.9/tests/integration/project/elements/compose-symlinks/overlay.bst000066400000000000000000000001141437515270000305200ustar00rootroot00000000000000kind: import sources: - kind: local path: files/compose-symlinks/overlay buildstream-1.6.9/tests/integration/project/elements/compose/000077500000000000000000000000001437515270000244625ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/compose/amhello.bst000066400000000000000000000003521437515270000266150ustar00rootroot00000000000000kind: autotools description: Autotools test depends: - filename: base.bst type: build sources: - kind: tar url: project_dir:/files/amhello.tar.gz ref: 9ba123fa4e660929e9a0aa99f0c487b7eee59c5e7594f3284d015640b90f5590 buildstream-1.6.9/tests/integration/project/elements/compose/test.bst000066400000000000000000000002651437515270000261560ustar00rootroot00000000000000kind: script depends: - filename: base.bst type: build config: commands: - "mkdir -p %{install-root}/tests" - "echo 'This is a test' > %{install-root}/tests/test" buildstream-1.6.9/tests/integration/project/elements/make/000077500000000000000000000000001437515270000237325ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/make/makehello.bst000066400000000000000000000003001437515270000263760ustar00rootroot00000000000000kind: make description: make test depends: - base.bst sources: - kind: tar url: project_dir:/files/makehello.tar.gz ref: fd342a36503a0a0dd37b81ddb4d2b78bd398d912d813339e0de44a6b6c393b8e buildstream-1.6.9/tests/integration/project/elements/sandbox-bwrap/000077500000000000000000000000001437515270000255645ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/sandbox-bwrap/base-with-tmp.bst000066400000000000000000000001671437515270000307630ustar00rootroot00000000000000kind: import description: Base for after-sandbox cleanup test sources: - kind: local path: files/base-with-tmp/ buildstream-1.6.9/tests/integration/project/elements/sandbox-bwrap/build-dev-shm-mounted.bst000066400000000000000000000001661437515270000324120ustar00rootroot00000000000000kind: manual depends: - base.bst config: create-dev-shm: true build-commands: - | mountpoint -q /dev/shm buildstream-1.6.9/tests/integration/project/elements/sandbox-bwrap/build-dev-shm-not-mounted.bst000066400000000000000000000001711437515270000332040ustar00rootroot00000000000000kind: manual depends: - base.bst config: create-dev-shm: false build-commands: - | ! mountpoint -q /dev/shm buildstream-1.6.9/tests/integration/project/elements/sandbox-bwrap/script-dev-shm-mounted.bst000066400000000000000000000001661437515270000326170ustar00rootroot00000000000000kind: script build-depends: - base.bst config: create-dev-shm: true commands: - | mountpoint -q /dev/shm buildstream-1.6.9/tests/integration/project/elements/sandbox-bwrap/script-dev-shm-not-mounted.bst000066400000000000000000000001711437515270000334110ustar00rootroot00000000000000kind: script build-depends: - base.bst config: create-dev-shm: false commands: - | ! mountpoint -q /dev/shm buildstream-1.6.9/tests/integration/project/elements/sandbox-bwrap/test-cleanup.bst000066400000000000000000000003411437515270000307000ustar00rootroot00000000000000kind: manual description: A dummy project to utilize a base with existing /tmp folder. depends: - filename: base.bst type: build - filename: sandbox-bwrap/base-with-tmp.bst config: build-commands: - | true buildstream-1.6.9/tests/integration/project/elements/script/000077500000000000000000000000001437515270000243215ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/script/corruption-2.bst000066400000000000000000000002401437515270000273720ustar00rootroot00000000000000kind: script depends: - filename: base.bst type: build - filename: script/corruption-image.bst type: build config: commands: - echo smashed >>/canary buildstream-1.6.9/tests/integration/project/elements/script/corruption-image.bst000066400000000000000000000000711437515270000303150ustar00rootroot00000000000000kind: import sources: - kind: local path: files/canary buildstream-1.6.9/tests/integration/project/elements/script/corruption-integration.bst000066400000000000000000000001261437515270000315570ustar00rootroot00000000000000kind: stack public: bst: integration-commands: - echo smashed >>/canary buildstream-1.6.9/tests/integration/project/elements/script/corruption.bst000066400000000000000000000006141437515270000272400ustar00rootroot00000000000000kind: script depends: - filename: base.bst type: build - filename: script/corruption-image.bst type: build - filename: script/corruption-integration.bst type: build variables: install-root: "/" config: layout: - element: base.bst destination: "/" - element: script/corruption-image.bst destination: "/" - element: script/corruption-integration.bst destination: "/" buildstream-1.6.9/tests/integration/project/elements/script/marked-tmpdir.bst000066400000000000000000000002171437515270000275730ustar00rootroot00000000000000kind: compose depends: - filename: base.bst type: build public: bst: split-rules: remove: - "/tmp/**" - "/tmp" buildstream-1.6.9/tests/integration/project/elements/script/no-tmpdir.bst000066400000000000000000000002031437515270000267370ustar00rootroot00000000000000kind: filter depends: - filename: script/marked-tmpdir.bst type: build config: exclude: - remove include-orphans: True buildstream-1.6.9/tests/integration/project/elements/script/script-layout.bst000066400000000000000000000006621437515270000276560ustar00rootroot00000000000000kind: script description: Write to root using a script element variables: install-root: /buildstream/nstall build-root: /buildstream/uild depends: - filename: base.bst type: build - filename: script/script.bst type: build config: layout: - element: base.bst destination: / - element: script/script.bst destination: /buildstream/uild commands: - "cp %{build-root}/test %{install-root}" buildstream-1.6.9/tests/integration/project/elements/script/script.bst000066400000000000000000000002251437515270000263360ustar00rootroot00000000000000kind: script description: Script test depends: - filename: base.bst type: build config: commands: - "echo 'Hi' > %{install-root}/test" buildstream-1.6.9/tests/integration/project/elements/script/tmpdir.bst000066400000000000000000000001701437515270000263300ustar00rootroot00000000000000kind: script depends: - filename: script/no-tmpdir.bst type: build config: commands: - | mkdir -p /tmp/blah buildstream-1.6.9/tests/integration/project/elements/stack/000077500000000000000000000000001437515270000241225ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/stack/another-hi.bst000066400000000000000000000003101437515270000266640ustar00rootroot00000000000000kind: script description: Another hi test depends: - filename: base.bst type: build config: commands: - "mkdir -p %{install-root}" - "echo 'Another hi' > %{install-root}/another-hi" buildstream-1.6.9/tests/integration/project/elements/stack/hi.bst000066400000000000000000000002311437515270000252300ustar00rootroot00000000000000kind: script depends: - filename: base.bst type: build config: commands: - "mkdir -p %{install-root}" - "echo 'Hi' > %{install-root}/hi" buildstream-1.6.9/tests/integration/project/elements/stack/stack.bst000066400000000000000000000001271437515270000257410ustar00rootroot00000000000000kind: stack description: Stack test depends: - stack/hi.bst - stack/another-hi.bst buildstream-1.6.9/tests/integration/project/elements/symlinks/000077500000000000000000000000001437515270000246665ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/symlinks/dangling-symlink-overlap.bst000066400000000000000000000022311437515270000323130ustar00rootroot00000000000000kind: manual depends: - base.bst - symlinks/dangling-symlink.bst config: install-commands: # The element that we depend on installs a symlink at `/opt/orgname`, # which points to a non-existant target of `/usr/orgs/orgname`. # BuildStream converts absolute symlink targets into relative ones so it # ends up pointing to ../usr/orgs/orgname, but this resolves to the same # place. # This element creates a directory at `/opt/orgname` and installs files # inside it. When this element is staged on top of the dependency this # directory will be ignored as the symlink will already be there; # BuildStream will then process the files that should be /in/ the # directory. The expected behaviour when installing files within a symlink # is to install them within the symlink's target, so the file # `/opt/orgname/etc/org.conf` should end up at # `/usr/orgs/orgname/etc/org.conf`. And since that directory doesn't exist # BuildStream will also need to create it before installing anything there. # - mkdir -p "%{install-root}"/opt/orgname/etc/ - echo "example" > "%{install-root}"/opt/orgname/etc/org.conf buildstream-1.6.9/tests/integration/project/elements/symlinks/dangling-symlink.bst000066400000000000000000000007731437515270000306560ustar00rootroot00000000000000kind: manual depends: - base.bst config: install-commands: # The installed file `/opt/orgname` will be a symlink to a directory that # doesn't exist (`/usr/orgs/orgname`). BuildStream should store this as a # relative symlink; among other reasons, if we ever stage an absolute # symlinks then we risk subsequent operations trying to write outside the # sandbox to paths on the host. - mkdir -p "%{install-root}"/opt/ - ln -s /usr/orgs/orgname "%{install-root}"/opt/orgname buildstream-1.6.9/tests/integration/project/elements/symlinks/symlink-to-outside-sandbox-overlap.bst000066400000000000000000000021501437515270000342600ustar00rootroot00000000000000kind: manual depends: - base.bst - symlinks/symlink-to-outside-sandbox.bst config: install-commands: # The element we depend on has installed a relative symlink to # `/opt/escape-hatch` which uses `../` path sections so that its # target points outside of the sandbox. # # This element installs a directory to the same `/opt/escape-hatch` # location and installs a file inside the directory. # # When this element is staged on top of its dependency, the directory will # overlap with the symlink and will thus be ignored. BuildStream will then # try to install the `etc/org.conf` file inside the symlinks target and # will end up with a path like `../../usr/etc/org.conf`. # # This could in theory overwrite something on the host system. In practice # the normal UNIX permissions model should prevent any damage, but we # should still detect this happening and raise an error as it is a sure # sign that something is wrong. # - mkdir -p "%{install-root}"/opt/escape-hatch/etc/ - echo "example" > "%{install-root}"/opt/escape-hatch/etc/org.conf buildstream-1.6.9/tests/integration/project/elements/symlinks/symlink-to-outside-sandbox.bst000066400000000000000000000004441437515270000326160ustar00rootroot00000000000000kind: manual depends: - base.bst config: install-commands: # This symlink could be used by a dependent element to trick BuildStream into # trying to create files outside of the sandbox. - mkdir "%{install-root}/opt/" - ln -s ../../usr "%{install-root}"/opt/escape-hatch buildstream-1.6.9/tests/integration/project/elements/workspace/000077500000000000000000000000001437515270000250135ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/workspace/workspace-mount-fail.bst000066400000000000000000000003131437515270000315710ustar00rootroot00000000000000kind: manual description: Workspace mount test depends: - filename: base.bst sources: - kind: local path: files/workspace-mount-src/ config: build-commands: - cc -c hello.c - exit 1 buildstream-1.6.9/tests/integration/project/elements/workspace/workspace-mount.bst000066400000000000000000000003161437515270000306630ustar00rootroot00000000000000kind: manual description: Workspace mount test depends: - filename: base.bst type: build sources: - kind: local path: files/workspace-mount-src/ config: build-commands: - cc -c hello.c workspace-updated-dependency-failed.bst000066400000000000000000000004701437515270000344270ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/workspacekind: manual depends: - base.bst - workspace/dependency.bst sources: - kind: local path: files/workspace-updated-dependency-failed/ config: build-commands: - make - chmod +x test.sh - mkdir -p %{install-root}/usr/bin/ - cp test.sh %{install-root}/usr/bin/ - ls %{install-root} workspace-updated-dependency-nested.bst000066400000000000000000000004701437515270000344650ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/elements/workspacekind: manual depends: - base.bst - workspace/dependency.bst sources: - kind: local path: files/workspace-updated-dependency-nested/ config: build-commands: - make - chmod +x test.sh - mkdir -p %{install-root}/usr/bin/ - cp test.sh %{install-root}/usr/bin/ - ls %{install-root} buildstream-1.6.9/tests/integration/project/elements/workspace/workspace-updated-dependency.bst000066400000000000000000000004611437515270000332640ustar00rootroot00000000000000kind: manual depends: - base.bst - workspace/dependency.bst sources: - kind: local path: files/workspace-updated-dependency/ config: build-commands: - make - chmod +x test.sh - mkdir -p %{install-root}/usr/bin/ - cp test.sh %{install-root}/usr/bin/ - ls %{install-root} buildstream-1.6.9/tests/integration/project/files/000077500000000000000000000000001437515270000223035ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/amhello.tar.gz000066400000000000000000000735331437515270000250660ustar00rootroot00000000000000K2Z\{wƶ|9!sc9$u14rrѱ&wgFNҤU@̞=g؞N|k ף?کMaQcw뛭v}7bK"e$Po/jv^ԍ"׿"c|Ncgh{6+ߜ%6M D+N_C{6X,-/fap*"'tg #7[V?VᾸ'VG-BrJkC?ޭxJ)΃q= f~<1"8񪀀E͓ŋf<$[y-$w:\xjqz:GτN}~.vES5N슳~m q. )嘹dl^S܋7Zt-,"bz&Wg, }7|>. MO602F3@s|H旈[p wL 2.cSRd1MNLt c\ؽLB%T\1h&q@,S{pd$SS“`AoBCŊxiW(`N1)5ڧ~ALQr!p!ʥ-Q-]?<"uy 녱[(86.Ѣ<0sIx&DjƤ65&ADp&ҹduIא){jki22rbkFk+t78`;W5'H<2@l,H\=2/Ծjhw5O>gjLlU8=uNO^[%κϺcq>OzP&(}Wq){t y%Z8I (JdH0)Cj k3p1/ӗ_Scc`̃d l$kK$n;^FT*k"mbnC SvA4݈ba;ؓo4Ԇэ}+Y j92&!R5) fW(_ ҇;Xۇ&`%53{b3i.;/m/'#Trwh, Y¦Fbc?ȁR֢+usn^V%+fTr KN4҉! .]" Sii)CDF RO|M*︄^Fde_ag9u}wL)uW1S5.aaY >ƣ=q{hh}Uh-XN &BkmڤC\#5%3ȩHf}%7wr =@+8jr)-_ƛWa8ȼLj 큎De9m5mK4ݘx@@ʘpKW\]iM3֓5\*nʹ?HڃtI Krya`VJ;eӈNI=V1^KϢݩ†B@*ZL@^2 !=i6voԨzQd:*0?͛P܃{+!w6 ]?+X"(]BwfA>lLH,2A;͊ݜI|w%I>̜/)kuj`.}XeZ|Sĕ@*UkB[FLcXٓO}P=LU䔁tiYANaXkpJ).Q5"ǥ<X|9ie3FfCT"q?xG'ekQf͉#ۈa&9bj\ΖQ\ޑ F ZM GVq7'!E*$}gQ4 *4pXq&\).Z/5wn>e+U뇴'|w B/Ow%;l%qȍ2e@=y}Z G.r SSd:Y,,L+aǼMTCgB A%,Cb1YHk:(e|k-`] jNfslOlMG֍jڧH`]kHڗ_=T*/,Rht_yrz`pPYoX41#^CV<=<׶'*hT(Ӷl8؝ph "0=r#V-jn51|%5%{0x] _2Q|ߡGWU[?1xw{ߩ.䤾gk\V՝䅸m'C[ԝm$s.y;D/)mVfviA&\ٯΤOrϢxt@-*ׁ;R@$->'/8k~l>k{]8QT;%&9|+?t hjIΝ_Wdc-3z\OV*/}/cܦ[7?5jchNruU\ԭ-ZWz<fwz]e W>Jҧ-61V~NOv y~sƻBj "] iwv^zрg:o_㺗+T꼷]:SU;m 3yP 0KTUwT3WOʪÐ*U[q駭 @S^P*b]W_y{k--Dެ-wrYf{:8tɦ '~b;덬\)}Av~٥**/B]&EKz{ zEajqrR)N=kzZZ|iI7Xm,ᕪQ'.)y!*tF#ONpW͐X6hs՗[X^ FTESzҜdJZUp.f:h(yb0%w %l" #?M+\۔.~qh;l[Py2A1&E3 ڬ3%T0v?PVf̽&aQ FM\P&rGt# 䡞{Hx![AxqT#op̩ 8`z_ q>oNMAUrɳ_*TѪ OQ0`ZfciKip6,c[* 62ABa\Zy`ѤRV-jMr#ʳ6a>m2ҕ^p:4t7WX`ܘN*phcr Zn}һjK'9G, maG)S wt eLH%s+bn~mz̳ 6L|S`$~@޲ c;R#:DK`O"&K8f0{&`"U.opPGV]oS dHأIa+V5{:N(ܲC.(?_"`cV->saVСΚ:%Tegɍ ّ1,(̦^d Ba Zj6tyЌt%QBa3LpЌ7Ǧp>ZI7\U2F:o +}J r@ 5y@G]2ÃOw= -l%c>BrH):ңfuf n, Kz6G=LљS\*3Zl>zSH,yءÙͣ;Yw?{YT4U^Kߚv岋CeF8By(rtmC* k$F,%n|~I']Q6Ԟ2MGyt3hzG( x4]ӞmsֿXIFG@͡ CaA#=8P?A^~3Qh4uz| >'.T $^Who|@<>6hw/rW^5Q__9wIFU2Mi7zm |(oaРb4D0>{@CNvjwq\^ζhC? BqIZm6* u-q_]Ixj ?UKRv2E-|Ԝ$`BT(ჼ+`$Y[-!c2y=aJ"#yf>ȯןgI>g~ H!# i18ʝHOSBpJ*V%}VOg(|A0 X:%!r#\cOMإGK<8pVG@9:qj!Ξ I[oϢM/7tiITRN#&Wy[#9ry'܍pF-N B_M ϩ]ۜװ:'a{mm~ToY[-r~$rb R#m [^dv<ݡI}n,7/Gyz  @ Kȓg,unk]R֍qz_ENϣxQZ!J \÷^ALT.y~;$u.NͿZg8R&&~]um,$ޗ6q%k߯ѯ84"j -!f/ع@XRkԒ1TY%aͽHg_ԩ!BE,(ӂ %]PO- 2FaS_v[{)-әrfc͈賓 Dsz Dy6u1e&5!0Ux(L:AhHmY#ZÀ#&ht֛e A/L-aP[8-U03̺;bxbx!,,cSk-=+\v`PޮEvKϲ6TFGm\, vMdfkmi|pd͏ 34+枹on.Ջ1r eΝJ7_#B ])ƕ.e32z 6"84EXCj Ŋ8iGh]N1בiLUbaУ)Q9 l˝Vve- Pk৑WUN`2xA/˨&š+m.*P_ jdmIOP =ėUhqJ]kaLL8IIqʔ?d~HI-CKtH%GQXgSjR󒼻 (o8QuQ#ƧM &X֬N?\)NEI)pGJ2SUC_'s:RhϽ'|vgڏϮ3ޅܨ=(̋a.Fpč۲ Fx \2W d,,- P1 q3;Q>+rfCٮ乫eT }:ڙ5ʝ_ VhY+)3g:w.s͞ MV%׹=ȿ t+ߺ$hD$nE^l&,/?!;qh =SBf `>m&Exz㓶= ܂9xnKP,zT,c5 \{ H!4;8:;k$_do.R6rׄrs ]t-U^wpE$FhK˜"./{D*<>%[ֹ쟇MYcTi47ZF] ;UXū2uVBJoZPI6(}ȍMx`)LaȢA{@^R7WOZ$OS, f5|[6Lq>`-h)/VPrJJ9ĠsjQNS3suG؟)M;j24f` JlhjZYbu"5_^^ۻʞB})/^׆[4ܦ׳L b:)1|We"ok`F7ޮ,Z&1|g*ʖFY|Kui7wy%|E) Qup:Gvj tE _ӭCĬY+:>7/z:(1#˼qLV{PV_'2F%f:-/jSYnFTAi}X}zZ\+Χ%_ lcZ(l]RtK"b㫄X16Jۭau7ן:Hᚔ&;X) qNzw34Xu"Uݳ(.؂r, X4,`Ҝz8K~8* ǒyI~B|%#9޺㾁foO3SmE"oW{:쟷%$=<  )iLG=o?15]M={[+_*Ӟa'4\L8ZXuA;Awg+CקuŲyaBHϑhgo.^HEbQ&EVȨ Sy̕t/BlfL3+-}NDATစipsV/D`+앀ݳn("E4ݺYnֿdqrP?|1l|:qlcaR~υk5j%^~gOWoS!Zߙl{:YYaI^Ɛ,}NIj%ܬ{b*v>ا2Tf>{1̉[5{ȬaːM)S z3=BX"cG3=fOCg>щ@"v^mIea{'[;[jrY%rվwyɛW,P.zyc4!b?6qř8~{S\9͓|4~Sym!7!j{;[:.0F"?8lz}v aFy`Ń {_ӰUe8 aMUL.9iz=:wAp#ffF,f*"~B;~-cYKaF'c-?&TRyY9Lo`%rҽ?6@YDW}{f٤Xځ}C^\") < Hښc̙QZxR;e]F`rzޠ.bWl=a( 1Wbͣh&=v. D~,*ZiuRd|i\+l#ȡA T edέO^ރ=o"گQi/f87tQ;ٸZQɈF&fZJ֋YӅ( +aw"MFuIaDBu栾9mO,"mEx-Od{XUJL֐FƁaurngoK"B(3 Y)Sﶦ9CfX)=4am(=Ajv`L%{ݭtk_/hA[/}^@|d+(6B4uׂ=N\hC{2ze"|Ak\ .kzюx=Ũ@-D;Q%*`JP!jK[8֦陌Deļ(jN2Wh Wv[jZJ̦ 5(' ͳE`غ9?O%;=5C .Hm+|42Z]֤ +1 ˣ^M#jpp1g"+JE뫉(GdeRD RZ1ZQg6߬<LlE4@0pVqnxJɱ:GS\T%6r=$H+y G깊 ;%^ʜ/Tz] ٜyNrL`u#G0aKpK tu;+^X7 6VPRn(P\E}{er鞟'+yy `1BLZ"n UkAtCvǪi{VS#4P5z*℞ *H6a(;{j)Fk&Od0^k SI=#?,JCriT|}]]2|"s"mci nB?vi&eґl-|'7 ]G'z@OIFev3u^P9 `Q7m'_@O)ψA%q1ż9HGG&>П4/'B>Mg39!e6sz_\\{UUÉs$P0Szt6";`ě{~7c##rT[+ cЯ tX~;Xz [4r?u+4GLfMgOEBř)Z-a/#ZX⋒06]ܗ0ߝ;^-}gր#j_TloUy}g^cnSx)@?2pG`tp:[E\hiir2AS0= 6)Lpuh u=m}:X??Uq|ۅ/:)&0h6|N;o~c؍>awo)0. ?ݦ^񘏅˷kQA󚡑s(t<~|&~V(Po噰~GdPr-ʕ+O@^їO/Eg'm5aEBcc| jx_ADUIħ 2(i[h_?#}.McpX%]n++~T>@OzU%a/鵘Óv4PP`~S|\+1Ib_xX,2;H;}~ #m4ou6z qy6S}KGBS CN(;8fq;*] fg)|[$hi"} a. pruǦ?KzC\᭕l;ӠSfvlYe E\^l*䟧'gCb&g٘INO>+l]Ԃqz׊'TF;.^Cw7DŠpz щf`QD镦@ٰզ;[2l,Yp:Ԙm6,zr>5웶 Iaou$lT};^4RFjtOދ}vw) K?5Baz ҉!7΂sA\C~FfPo9¥YWц{>O*@hvp"uIĞA6ۧT:6@#^{yw< acCM]OR(vp2=|]5t2pi ^̛M=5mƠ&ꬦ?M֕* ]FW%kv?8{3v>.qd;0DGl4=[ᇀzOה)@8 't}#xcIupOʯvC\T#z2I3ǐIb͆p$% ;CaQF( aXg_o5>Vk w:%`JЁA2sg}}0e~4T.X>|[}L:[ uBHm9B$AꝯEH%pSqxr垽8M`-;ZCl`;6ǟ\–h>K\#:_"M.b7 bBzs*|~hpOT( ♠Cθņ^wRNQ.F@O0DǚXL.x@=J49a/lʂqPhQ֣rkc k`@TVY䊱hmxqnLy٩⿶b0nW/}x  [ ^T"|A|צ@7@fAmҩ9VIpe 5V=nͨ`uZv)XT +ڴzVqleked ]+-`ӡ bYv.h5cgaCT,LwL k  B- ;qdJOЇ NiVrW`6@AUhK$Ctznoh^WK _eQ^:$N݌XApx*%S_P[TEp>TI*{YZ5KeD#x U ;s3=fx+=a"&XJiTy0Qv^LEn7.~gFkV2nZts3)XZd޴=WVWL-Lb s N=@P7;!'"K3@ 7pwǼFkAvȊT3AZFp|)eh:%MCfܨ`Ռ."e ?5ON~%KJ N8PX,.RڪpbזE>HoZS9sᴮeTODI`Qa(o[EfvPMޏɰ(b?0nswՙLpW\x{u̦ 2Rk mPh'"&=Ŕnm<[<:ũ~ejD_5 }mr-Oe٪S=IV|s[ɉ>E[d2y !QŠD"CIi'kpÚ_ne\u=9oeVbQlRPuo[C25O/wJB9Hl4]B$}d"˪=}-nK9AOǍzgm$ Υ1[Azmz,E~G!эkwRD[4 %0~]#D{fQ\\ Ol@%7ۼ]㞡mCt!X*0XCrv~@,&v3* m٣3]&w.8NfffY#nxɴ, AKaZu|i!Avf{ ,'] +$z t9뒏Zh*Nq}L-f1D܎#N9!/|ͰbvƛWr@Cgf'ȕ~޾ @u,u(Z$aM!_(ؕ vm F-1-{L2 Ⱥ ie=:Ŗ/I%c@/79Ld]4MG~ye_?Yz}CލdSe+|3ihŕq]^3DŽc61Uif5^dKP?pCI=Ei!ͭRgYOlQR8T.i'+7OwئF3l-L4wEwGӉq -<vSkBd{L0ka wrEO92n)S:V(:, Y";WAzh~Wv͜u5gפq\/wI|]ߢY޴&&4vZse1]jsQB 04uTN,Hw+I!=jz:vV~|Ф+?Qc+Oi. L-wk7Wxb}\qgY#@ô zu >ЮQFvǕ`2Gͬ=rqv;6"(Qt# 'PmOc.lyj19Av6^zC9La^2%ʅ+&@59{Om.~U. ݻݻws:t!لxʣ /5^_ {<5$ )dZ0OiÑ]rζ#No8WbhHv)-F~c#akG O+͸^6p*;k"~*WL}GO '`f6åcX0]ϰ=A>ļ.7639û|S.%hR͢ȋ=S~P3p;EBXMbT"Ju?>?(?4ÿiһ-J_fiN#Ǵ k~5SC?VE=eH{yhw4w|t=,baׅ5ek[(;ۇ?Ow1˝}806C't>aV|ӡHE>9Il*q^Sn:> F,ٞ! f]$P݂昽&*;Qk-7v|( vUFPR.iā; N%X.ơ% XX5}b?Mn[Ű)? zR #tJn ȩ{$@rhCXcU~[ |Cpl쁇̫^elMխ@i7PZf68.JTX0m2h4 Tj @VnR0L IC^F*=][!_N磿]n.4ڧ2KDjN;r;#{_D=-])sE_JrB2*<Ždpd춫RQzz3V Oѵڬ-E-M{.ñF!aJT\wHMهch"χ`p?7^% .zdzNgKbʉ˳K vQ{uZ3)^,FH}}WR[,M܊*Al`;Ī r8É )p9~&h`iSyƲ5J}ee56i ~(U߶ϦțfLԏ铼a!bٰIC 'hGo-TKh쵭`.V@b.TQ)b3SdެlX1'P$b@3MyA6)"eG5l-e'J|{_ԖF~[瀅QqÇdž~ *nەF=<.1h*cdF$2Vg$;76!KGAhK .q( *!hljAL5ܟT3!$yd6Dm^^/0{qǢN?\DoP &Z +:\e $ dM6WIEMUW3?(xR88 w|b!G^oAc-׊ԘDM2J6P𴰣Fт0^ʒ1 OY*kT5%6~j,U>4>Dog)he=hyy7sh?k7x0?՗>Dj#q\?N=i@QQ dS8N@d3?ER]qd'q*qMW؊A,;9a錹L{AøSAu(dpxMOZXjAaMfXcyIr!~ -Y 8yuA̓?̎MSwܪ@çu!NT3iNUb߷-uվ4lAJ/z 2 '(xrXo q$eSQj@;$#Yfb6Khc . a/KhҫdtZ7QS"07Q|'K5{V֢|/T<O4_Qgab.cˇه" ʇ&*/xK~R.Q'oe6lTyR_9nv2xtnCf`(ß?I,y%!uA^]ai C5E`C8^35FUB&ie]'T3ѩxI"piTVPn+*Ƅq"_3@C?^JUPyجR{ZY &u_@ -gggPb̯߇ Md;'sZ~ϮIVUX-j T 4A#J]In #O6!vg,-_u3fj5jQ06AO6~~kƗ_ gA){I+>E뙭bS:lZɌ5h]Cսjia{9 tlo1>VJ89ւ4%.{ؒy:3 JI_Th")e3Rv&Լ)W!_ޓh dSJ-PgZU*ѽ{>4M'O8D@~DEjhD(R`ZmĬk\,gxcbĴ(tpfRBj^\ϢTFP^dd"_SF6Z1SщDdHzJìRlrX8yמ;qG0.(񵐗4qe4D5AMW حg dv˖sm`@`!^L$.R*<{} #`20gmPYOM65rb0ǟ Gc-Kۘ ʌTQg} 76(t4<|aKƠzJ? y; ZIvC$g k XY&3 a._S,H+fj~ h%ڠF#1- k)3_zeV~YţϣكC*pۧ)DF@zZ25K9a:Qq<ҲQK(Z~c6p ̩:Y"p*R9K8=lu@ငh7fI1̦=_eIОF{Lm&![oۻD ")1Mbn^qGM*i[eZ{jIe]> W˭nO Vf?yF; Hw澻7NO~6˹+f*A M旕'jBy:hkR bkqvU.c,zbD-[ښ.׿<]3kg#Q^:өe,}7` ^fꈖ~,TQa(mSыv3lLJ!CmԒ5q|@w=V˺t(y)?smkOXG6u6VF6DNh)N,GzLƟ#В]vS#- #L bN2]r5cX4`"2xxC1a`o0ȻfwVtYxÔ8d}ƴUrAf}liʲ~kp-7k@ l ༶ٔ.%_h@@LVvE%Nu"U #(M&0Kl uj^ :adfyG+͋h#r Zg (eBC˒$ 9lcF{unQzݓd_pṸeK뢧6#꽬R@1IYNN:Qe wGjqjR e:I *;\ "fQWK?0Օ!/5uʈՒ+-A[YB).TVާ/Vh(QakOhwҒX,%{d t;Q$op%#S߼{1hjX?kz=ރ- T2ĵ}B ɮ/8!;քNY:g#pژ|gMs4ޡ69R@fRȾXS٦屇 p $Bq3 -E5K$1Gk3̘ ha=m!;YħCc@Mq!+,T*M/έ,u9J)ysb QNFO x>0܊E5n` iBQHd 4&Qk|6{~" f|Ma&xxCҩiikV귫X7k1Hj_!_ R4WKH`@mlō/L={Aj𾎊cD 6l/|8JtTӑDMP*V8Y|Wvl)u|3Hr9I'1zǜI}{{'7:9/5d"E|b9b5J\H7hN ).珖uftB9Q'/'AhvSE~L6B!I@\s [H>ahV4md>F: ɓCWAae.y\J$f;g lu+١:Iz?WALA썫P%R:iuWl {= k̂QGYqJI}H*t>.e8jƥHhph)E7cٽީxCKH\hIF7-Z,*!ArmPd7"9q&$|# \)&g"5]hYiof` W6Jm-ve@tjo h,&>yhV,U7KL(:&,M7IA E=L4Jj36mSx'SRyJOys3Ob- DiWM43T@jL%L6`DCqP^\aX'yAdML0jۓ>\P;7T&¡2$b!H9#Yߴ. BVmV",D.:ug4l CE#;HRgXWt(s7 tr|܀+EDyX"f郩8g 8Pvc( eR9Y9:nh E](ILڳRE/hS[_6e*W.X[`tɠc=ܒ&am$)Fþݏ\-i6Ч@y/yH f}uW`*pp ]u8J_<  <|nB50 Iԩ%ӯ%U]h7J-Z^pkfq>ڑ(ZD@h9K̿kҭM\tR_BoH4 K}D/s ?a UQ!dhJzEHl-?)G!{ZqO 5WCyw0dfT?H~/b[;Ӂ="fy,ʩ.W|,,PŠ/E?M}u7*3\@Qevym|K-z 2 :iRKiH9f˥nM99FŞ.' x:udpH66 #c`,Ȫv;+MKUD45O{bĊ5rL^+@0p#B6BÀN-gm (Qj tY+??_ž0ϋ"υ Hm}:> ,ohi]wx~3A_|UYy0'A{}8Gl{9?>۾0_/.a&ܸ`)eDַy:pH0<,#rv0:{(A f{ raQY0kJȳX7 뿹< E+'c =`qc2RTfb=Zۼ+`D(>u0;"톸HAA!yJJ""Ř[~**}i^@ T 8HFٵf>l08z_( iT6¹dOK?jeY 2nڞ8*Jُl#5IZ=+t,?PGݶxť}LR/L-kښFO wNc!9z_.ga׻x30c*R?7|2H8J!ZU6/~c ৪ίB`X_e-k.EA"ףPd{ ٖ9{rj}l[ͭMS^?嚋`#6'ۻ5>b0b-zrs{rۇTWmoBlo95a }Ol==WK<[Am%H7v[Ԝwt\M7{oI̫g[h{0f}p{og:2>تÓ=H9P nmĘqO%[E[;T lnqh{44pxSr Ɋ.J*o= $΂OO68LaeS1)_d-S4a8MCc9&tR+y-X+Ov?]9L%.F'ϰX1Ӡ0 #:ftrHp ..dk2NeMhOF]@b3<[Y,VG$KdO`+ƭ`1udvsY$?Zx~6bqk@ans:hֺ\ 4jARuƊZpA00׻0Zs orQ'"4N/E&,txzTqwchكŔ] Xw Awq˿ +tVK>Šur NVv!-rb )mn 0zj[cV@vL Xrƾ!/zsf-={AwbI@ _T a_3H`(#ơZ{#%M}a̡,*n(8m.ǪXWVb\c@ >TzuN=!z~m9ٽ Z+3i-trc \@ f.I?xVX CIݡNz|kѢ ]Y'׏=u/-/W/7s9˽} vL-Gho@4fKsYψ1YL\9Z$CybId"A,~5}Dn2"<#ÐȕUI + ,!'ʗ,e:=poq&,{6Le"AW_?W>{$Q{"gqzq({ȿUC*YcKG+'o=ߟ4O^_m,Ay {g+7@\\7.E_ wjU*fhՎbG7%: ' 3˛>پᱟKZ2O_Mgz +`ag,b ;1F;˫ }yN 9 0UQ 6(-\ePϕVX w0^9lЃ0n=+|zx_;(<))t>/,f] : _+Nd3ᢑ!U|f5혾5#Mdl)'S?{WY_ǜv`N߭ m cf,fC'2*+ sezs)VƬljaR>9%+ͬ,Œ|:OQU"rnOgz`^.J$b{Y+>z%rj3#yڼXgr,qaQ8~n RO^rgPh+(Oiݳ"RoЄ?qHr bH$p.}Q^sFsi`|P!6,YI76&x5Wmc iۦ3Ip2N$V7_zTO1j&'CL-bHG ۏ\u'',P` e5`by 7y0+??-ķyCdD)lZ\ԃ>Uуƃj?  ;xփt3F7Ұ(±65(΃*d/h^8it NvlV~qQ~3uhB /7^( kkU8p,:kNͧGqåU 9ܜpٚꍫ0(sMx!?ZabnXWg(x(۞vIH&yTNsTTN娟{79C e%vVysUHT 32Bh#y0_n+F!QT 7]@m]- L ό?S( 4aA>"\, ڰu,`WiD .`|nx 4lY(f:jEZjɦeμl Cl_nAf5nE7jjz&z b+6y*MVV%+ʜ%4GJ!BM45<{=epyFuӹ9Qb8lZ) YT4'Ӆ[~_'E[i;Vy /LfowcK/߇p<|W;>Exey%e 4B]QaQd|[pq_A]. ]KT/? 6m`(i'mnӳ[Ys}>ws}>ws}>ws}>w_Ti2buildstream-1.6.9/tests/integration/project/files/base-with-tmp/000077500000000000000000000000001437515270000247645ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/base-with-tmp/tmp/000077500000000000000000000000001437515270000255645ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/base-with-tmp/tmp/dummy000066400000000000000000000000061437515270000266360ustar00rootroot00000000000000dummy!buildstream-1.6.9/tests/integration/project/files/canary000066400000000000000000000000061437515270000234770ustar00rootroot00000000000000alive buildstream-1.6.9/tests/integration/project/files/cmakehello.tar.gz000066400000000000000000000240001437515270000255320ustar00rootroot00000000000000./cmakehello/0000755000175200017520000000000013227637750011461 5ustar samsam./cmakehello/config.h.in0000644000175200017520000000005713227636401013476 0ustar samsam#define PACKAGE_STRING "${CMAKE_PROJECT_NAME}" ./cmakehello/src/0000755000175200017520000000000013227637750012250 5ustar samsam./cmakehello/src/main.c0000644000175200017520000000051012450526565013332 0ustar samsam/* Copyright (C) 2006-2014 Free Software Foundation, Inc. This program is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. */ #include #include int main (void) { puts ("Hello World!"); puts ("This is " PACKAGE_STRING "."); return 0; } ./cmakehello/src/CMakeLists.txt0000644000175200017520000000020113227637417015001 0ustar samsamadd_executable(hello main.c) message("Bindir is ${BINDIR}") install(TARGETS hello RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}) ./cmakehello/CMakeLists.txt0000644000175200017520000000047113227637610014216 0ustar samsamcmake_minimum_required(VERSION 2.6) # Note that we need to be explicit that we only require C, if not then CMake # will require a C++ compiler to be present. project(hello C) include("GNUInstallDirs") configure_file(config.h.in config.h) include_directories(${CMAKE_CURRENT_BINARY_DIR}) add_subdirectory(src) buildstream-1.6.9/tests/integration/project/files/compose-symlinks/000077500000000000000000000000001437515270000256175ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/compose-symlinks/base/000077500000000000000000000000001437515270000265315ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/compose-symlinks/base/usr/000077500000000000000000000000001437515270000273425ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/compose-symlinks/base/usr/sbin/000077500000000000000000000000001437515270000302755ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/compose-symlinks/base/usr/sbin/dummy000066400000000000000000000000061437515270000313470ustar00rootroot00000000000000dummy buildstream-1.6.9/tests/integration/project/files/compose-symlinks/overlay/000077500000000000000000000000001437515270000273005ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/compose-symlinks/overlay/sbin/000077500000000000000000000000001437515270000302335ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/compose-symlinks/overlay/sbin/init000066400000000000000000000000041437515270000311130ustar00rootroot00000000000000foo buildstream-1.6.9/tests/integration/project/files/hello.tar.xz000066400000000000000000000011641437515270000245600ustar00rootroot000000000000007zXZִF!t/'3]4I' :(3?XeUJʜ/"`3 jBYH,&tB7I|%?VȤd⑄,K-3.tH}p(R"F[p }J7@s5z2?LhBVvųqHĄcģIܻИIUO̺ v]mxB>h![U5(&li-U_Vm&UzGc埼īX, #z)5S7;PM'_LN/e_t6j`6gS!48NWU#wy4HY2O\]OhUkw># ZhFS %Cݱ4gIV vxhQa̹- РUm$B<w=Zܨ[(Gv쒟ħ'&$J,>k|QݮY"r/-=%{_(7qiPFWgYZbuildstream-1.6.9/tests/integration/project/files/import-source/000077500000000000000000000000001437515270000251135ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/import-source/subdir/000077500000000000000000000000001437515270000264035ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/import-source/subdir/test.txt000066400000000000000000000000251437515270000301200ustar00rootroot00000000000000This is another test buildstream-1.6.9/tests/integration/project/files/import-source/test.txt000066400000000000000000000000171437515270000266310ustar00rootroot00000000000000This is a test buildstream-1.6.9/tests/integration/project/files/makehello.tar.gz000066400000000000000000000006601437515270000253750ustar00rootroot00000000000000G[KO@`쯘 6I!&zb-YZ=ۇ]t&FFQҁ@]ԴXfYt)PfmUeKh"Kېx}Ĵ?Gͬõ7mkUfۮZ@κ^ŝ|AH~3炐2xQ~A/1!PYH-Onng׺/1IiUG܆GrOџVQwX8waUX]<`$OOe!եX!<oeRgfEicJPĶ!MBC`mOB!B!B_%gd(buildstream-1.6.9/tests/integration/project/files/pip-source/000077500000000000000000000000001437515270000243715ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/pip-source/app1.py000066400000000000000000000001731437515270000256050ustar00rootroot00000000000000#!/usr/bin/env python3 from hellolib import hello def main(): hello('App1') if __name__ == '__main__': main() buildstream-1.6.9/tests/integration/project/files/pip-source/myreqs.txt000066400000000000000000000000111437515270000264420ustar00rootroot00000000000000hellolib buildstream-1.6.9/tests/integration/project/files/pypi-repo/000077500000000000000000000000001437515270000242275ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/pypi-repo/app2/000077500000000000000000000000001437515270000250715ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/pypi-repo/app2/App2-0.1.tar.gz000066400000000000000000000014011437515270000273120ustar00rootroot00000000000000`[dist/App2-0.1.tarn@}SlHdTii B[8("XG{%4JH'K 3kc4ϰAQXCcdaH|mF iƆi۶azz`U Z2`y8~&ٌ =(Vm@;#4Jx>!Ц4cR mkD;]e-׮CaArhz|>Vₗ!h˫_{ͪEB]hcoTLQڽˢXu&eur<]n0eW,U>*}0LDPjoǪS{oc@}0[) \͕H@a"v~]߄9~PfiA}CJj5Zn6k I۟hDRr˿4䜗i|H*"?ZQ1ivʗw,׃ .yFT穔yVR4MSÞJ 9XRC.Wϵ)ڽƹ5KdU02&c]jVKJ֩&?lT9WG?4"^ZO??sMeu :`@ 0RPbuildstream-1.6.9/tests/integration/project/files/pypi-repo/app2/index.html000066400000000000000000000002171437515270000270660ustar00rootroot00000000000000 Links for app1 App2-0.1.tar.gz
buildstream-1.6.9/tests/integration/project/files/pypi-repo/hellolib/000077500000000000000000000000001437515270000260215ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/pypi-repo/hellolib/HelloLib-0.1.tar.gz000066400000000000000000000013361437515270000311410ustar00rootroot00000000000000q[dist/HelloLib-0.1.tar]o0s_aJk*54&4E^ђ8JAɲ2*ڌܸOFc4˾=n56խ=t1QGGqWaә8~%/ҶO?9z;y}{6mf9WQU"ۜLH69Hyr1"' 595N"QٽɄC~[&GO\ϔxލKBW{)L Uv$*ev7b_ 5/Ţc]v-!2A]2#uޕiEjFLnLɵ%vXuc cKNckyᕶj|n}Aw0vVa;\LAT|\Γ4"cRwt] gۄ{}ovU?Ж N-m޲] M!C%˱YidFM5V&k>3kxu:P_B]^l$ G"AW՚0% "wtr+І6L|Wlwt;Pbuildstream-1.6.9/tests/integration/project/files/pypi-repo/hellolib/index.html000066400000000000000000000002271437515270000300170ustar00rootroot00000000000000 Links for app1 HelloLib-0.1.tar.gz
buildstream-1.6.9/tests/integration/project/files/shell-mount/000077500000000000000000000000001437515270000245525ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/shell-mount/pony.txt000066400000000000000000000000051437515270000262730ustar00rootroot00000000000000pony buildstream-1.6.9/tests/integration/project/files/workspace-configure-only-once/000077500000000000000000000000001437515270000301615ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/workspace-configure-only-once/configure000077500000000000000000000001551437515270000320710ustar00rootroot00000000000000#!/usr/bin/env sh set -eu if [ -f "./prepared" ]; then touch "./prepared-again" fi touch "./prepared" buildstream-1.6.9/tests/integration/project/files/workspace-mount-src/000077500000000000000000000000001437515270000262265ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/workspace-mount-src/hello.c000066400000000000000000000001311437515270000274700ustar00rootroot00000000000000#include int main() { fprintf(stdout, "Hello world!\n"); return 0; } buildstream-1.6.9/tests/integration/project/files/workspace-updated-dependency-failed/000077500000000000000000000000001437515270000312635ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/workspace-updated-dependency-failed/Makefile000066400000000000000000000003121437515270000327170ustar00rootroot00000000000000all: test.sh hello: /etc/test/hello.txt cp $^ $@ brazil: /etc/test/brazil.txt cp $^ $@ test.sh: hello brazil echo "#!/usr/bin/env sh" > $@ echo -n "echo '" >> $@ cat $^ >> $@ echo -n "'" >> $@ buildstream-1.6.9/tests/integration/project/files/workspace-updated-dependency-nested/000077500000000000000000000000001437515270000313215ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/workspace-updated-dependency-nested/Makefile000066400000000000000000000003111437515270000327540ustar00rootroot00000000000000all: test.sh hello: /etc/test/hello.txt cp $^ $@ tests: /etc/test/tests/*.txt cp $^ $@ test.sh: hello tests echo "#!/usr/bin/env sh" > $@ echo -n "echo '" >> $@ cat $^ >> $@ echo -n "'" >> $@ buildstream-1.6.9/tests/integration/project/files/workspace-updated-dependency/000077500000000000000000000000001437515270000300415ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/files/workspace-updated-dependency/Makefile000066400000000000000000000001651437515270000315030ustar00rootroot00000000000000test.sh: /etc/test/hello.txt echo "#!/usr/bin/env sh" > $@ echo -n "echo '" >> $@ cat $^ >> $@ echo -n "'" >> $@ buildstream-1.6.9/tests/integration/project/keys/000077500000000000000000000000001437515270000221545ustar00rootroot00000000000000buildstream-1.6.9/tests/integration/project/keys/gnome-sdk.gpg000066400000000000000000000011651437515270000245420ustar00rootroot00000000000000 Ug+՞WBh4j=wjZ\rj ݖR!4,wgoO|c_vu; 8h\-ȗ|;Nn}0Ap>:jem<10%CMo鉟ø9Ďx#w %k\ڰ!0(sUz'`(n#6 V\a12| t a ILN E ˀdRnZ1ח(Gnome SDK 3.16 8"Ug    ϥURkKk}XH͓ U ]UcePٛJi`jQ}Zs{1ʒiԢ:Z{r6-{AifۄO (V|$\|v,gjY!aIfKL3mvkЖкkI A XD"Iל %{install-root}/test" ], }) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'test')) as f: text = f.read() assert text == "Hi\n" @pytest.mark.datafiles(DATA_DIR) def test_script_root(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'script/script-layout.bst' create_script_element(element_name, element_path, config={ # Root-read only is False by default, we # want to check the default here # 'root-read-only': False, 'commands': [ "mkdir -p %{install-root}", "echo 'I can write to root' > /test", "cp /test %{install-root}" ], }) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'test')) as f: text = f.read() assert text == "I can write to root\n" @pytest.mark.datafiles(DATA_DIR) def test_script_no_root(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = os.path.join(project, 'elements') element_name = 'script/script-layout.bst' create_script_element(element_name, element_path, config={ 'root-read-only': True, 'commands': [ "mkdir -p %{install-root}", "echo 'I can not write to root' > /test", "cp /test %{install-root}" ], }) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code != 0 assert "/test: Read-only file system" in res.stderr @pytest.mark.datafiles(DATA_DIR) def test_script_cwd(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_path = os.path.join(project, 'elements') element_name = 'script/script-layout.bst' create_script_element(element_name, element_path, config={ 'commands': [ "echo 'test' > test", "cp /buildstream/test %{install-root}" ], }, variables={ 'cwd': '/buildstream' }) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'test')) as f: text = f.read() assert text == "test\n" @pytest.mark.datafiles(DATA_DIR) def test_script_layout(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'script/script-layout.bst' res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'test')) as f: text = f.read() assert text == "Hi\n" @pytest.mark.datafiles(DATA_DIR) def test_regression_cache_corruption(cli, tmpdir, datafiles): project = str(datafiles) checkout_original = os.path.join(cli.directory, 'checkout-original') checkout_after = os.path.join(cli.directory, 'checkout-after') element_name = 'script/corruption.bst' canary_element_name = 'script/corruption-image.bst' res = cli.run(project=project, args=['build', canary_element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['checkout', canary_element_name, checkout_original]) assert res.exit_code == 0 with open(os.path.join(checkout_original, 'canary')) as f: assert f.read() == 'alive\n' res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['checkout', canary_element_name, checkout_after]) assert res.exit_code == 0 with open(os.path.join(checkout_after, 'canary')) as f: assert f.read() == 'alive\n' @pytest.mark.datafiles(DATA_DIR) def test_regression_tmpdir(cli, tmpdir, datafiles): project = str(datafiles) element_name = 'script/tmpdir.bst' res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 @pytest.mark.datafiles(DATA_DIR) def test_regression_cache_corruption_2(cli, tmpdir, datafiles): project = str(datafiles) checkout_original = os.path.join(cli.directory, 'checkout-original') checkout_after = os.path.join(cli.directory, 'checkout-after') element_name = 'script/corruption-2.bst' canary_element_name = 'script/corruption-image.bst' res = cli.run(project=project, args=['build', canary_element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['checkout', canary_element_name, checkout_original]) assert res.exit_code == 0 with open(os.path.join(checkout_original, 'canary')) as f: assert f.read() == 'alive\n' res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['checkout', canary_element_name, checkout_after]) assert res.exit_code == 0 with open(os.path.join(checkout_after, 'canary')) as f: assert f.read() == 'alive\n' buildstream-1.6.9/tests/integration/shell.py000066400000000000000000000270211437515270000212160ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain from tests.testutils import cli_integration as cli pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) # execute_shell() # # Helper to run `bst shell` and first ensure that the element is built # # Args: # cli (Cli): The cli runner fixture # project (str): The project directory # command (list): The command argv list # config (dict): A project.conf dictionary to composite over the default # mount (tuple): A (host, target) tuple for the `--mount` option # element (str): The element to build and run a shell with # isolate (bool): Whether to pass --isolate to `bst shell` # def execute_shell(cli, project, command, *, config=None, mount=None, element='base.bst', isolate=False): # Ensure the element is built result = cli.run(project=project, project_config=config, args=['build', element]) assert result.exit_code == 0 args = ['shell'] if isolate: args += ['--isolate'] if mount is not None: host_path, target_path = mount args += ['--mount', host_path, target_path] args += [element, '--'] + command return cli.run(project=project, project_config=config, args=args) # Test running something through a shell, allowing it to find the # executable @pytest.mark.datafiles(DATA_DIR) def test_shell(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = execute_shell(cli, project, ["echo", "Ponies!"]) assert result.exit_code == 0 assert result.output == "Ponies!\n" # Test running an executable directly @pytest.mark.datafiles(DATA_DIR) def test_executable(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = execute_shell(cli, project, ["/bin/echo", "Horseys!"]) assert result.exit_code == 0 assert result.output == "Horseys!\n" # Test shell environment variable explicit assignments @pytest.mark.parametrize("animal", [("Horse"), ("Pony")]) @pytest.mark.datafiles(DATA_DIR) def test_env_assign(cli, tmpdir, datafiles, animal): project = os.path.join(datafiles.dirname, datafiles.basename) expected = animal + '\n' result = execute_shell(cli, project, ['/bin/sh', '-c', 'echo ${ANIMAL}'], config={ 'shell': { 'environment': { 'ANIMAL': animal } } }) assert result.exit_code == 0 assert result.output == expected # Test shell environment variable explicit assignments with host env var expansion @pytest.mark.parametrize("animal", [("Horse"), ("Pony")]) @pytest.mark.datafiles(DATA_DIR) def test_env_assign_expand_host_environ(cli, tmpdir, datafiles, animal): project = os.path.join(datafiles.dirname, datafiles.basename) expected = 'The animal is: {}\n'.format(animal) os.environ['BEAST'] = animal result = execute_shell(cli, project, ['/bin/sh', '-c', 'echo ${ANIMAL}'], config={ 'shell': { 'environment': { 'ANIMAL': 'The animal is: ${BEAST}' } } }) assert result.exit_code == 0 assert result.output == expected # Test that shell environment variable explicit assignments are discarded # when running an isolated shell @pytest.mark.parametrize("animal", [("Horse"), ("Pony")]) @pytest.mark.datafiles(DATA_DIR) def test_env_assign_isolated(cli, tmpdir, datafiles, animal): project = os.path.join(datafiles.dirname, datafiles.basename) result = execute_shell(cli, project, ['/bin/sh', '-c', 'echo ${ANIMAL}'], isolate=True, config={ 'shell': { 'environment': { 'ANIMAL': animal } } }) assert result.exit_code == 0 assert result.output == '\n' # Test running an executable in a runtime with no shell (i.e., no # /bin/sh) @pytest.mark.datafiles(DATA_DIR) def test_no_shell(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) element_path = os.path.join(project, 'elements') element_name = 'shell/no-shell.bst' # Create an element that removes /bin/sh from the base runtime element = { 'kind': 'script', 'depends': [{ 'filename': 'base.bst', 'type': 'build' }], 'variables': { 'install-root': '/' }, 'config': { 'commands': [ 'rm /bin/sh' ] } } os.makedirs(os.path.dirname(os.path.join(element_path, element_name)), exist_ok=True) _yaml.dump(element, os.path.join(element_path, element_name)) result = execute_shell(cli, project, ['/bin/echo', 'Pegasissies!'], element=element_name) assert result.exit_code == 0 assert result.output == "Pegasissies!\n" # Test that bind mounts defined in project.conf work @pytest.mark.parametrize("path", [("/etc/pony.conf"), ("/usr/share/pony/pony.txt")]) @pytest.mark.datafiles(DATA_DIR) def test_host_files(cli, tmpdir, datafiles, path): project = os.path.join(datafiles.dirname, datafiles.basename) ponyfile = os.path.join(project, 'files', 'shell-mount', 'pony.txt') result = execute_shell(cli, project, ['cat', path], config={ 'shell': { 'host-files': [ { 'host_path': ponyfile, 'path': path } ] } }) assert result.exit_code == 0 assert result.output == 'pony\n' # Test that bind mounts defined in project.conf work @pytest.mark.parametrize("path", [("/etc"), ("/usr/share/pony")]) @pytest.mark.datafiles(DATA_DIR) def test_host_files_expand_environ(cli, tmpdir, datafiles, path): project = os.path.join(datafiles.dirname, datafiles.basename) hostpath = os.path.join(project, 'files', 'shell-mount') fullpath = os.path.join(path, 'pony.txt') os.environ['BASE_PONY'] = path os.environ['HOST_PONY_PATH'] = hostpath result = execute_shell(cli, project, ['cat', fullpath], config={ 'shell': { 'host-files': [ { 'host_path': '${HOST_PONY_PATH}/pony.txt', 'path': '${BASE_PONY}/pony.txt' } ] } }) assert result.exit_code == 0 assert result.output == 'pony\n' # Test that bind mounts defined in project.conf dont mount in isolation @pytest.mark.parametrize("path", [("/etc/pony.conf"), ("/usr/share/pony/pony.txt")]) @pytest.mark.datafiles(DATA_DIR) def test_isolated_no_mount(cli, tmpdir, datafiles, path): project = os.path.join(datafiles.dirname, datafiles.basename) ponyfile = os.path.join(project, 'files', 'shell-mount', 'pony.txt') result = execute_shell(cli, project, ['cat', path], isolate=True, config={ 'shell': { 'host-files': [ { 'host_path': ponyfile, 'path': path } ] } }) assert result.exit_code != 0 # Test that we warn about non-existing files on the host if the mount is not # declared as optional, and that there is no warning if it is optional @pytest.mark.parametrize("optional", [("mandatory"), ("optional")]) @pytest.mark.datafiles(DATA_DIR) def test_host_files_missing(cli, tmpdir, datafiles, optional): project = os.path.join(datafiles.dirname, datafiles.basename) ponyfile = os.path.join(project, 'files', 'shell-mount', 'horsy.txt') if optional == "optional": option = True else: option = False # Assert that we did successfully run something in the shell anyway result = execute_shell(cli, project, ['echo', 'Hello'], config={ 'shell': { 'host-files': [ { 'host_path': ponyfile, 'path': '/etc/pony.conf', 'optional': option } ] } }) assert result.exit_code == 0 assert result.output == 'Hello\n' if option: # Assert that there was no warning about the mount assert ponyfile not in result.stderr else: # Assert that there was a warning about the mount assert ponyfile in result.stderr # Test that bind mounts defined in project.conf work @pytest.mark.parametrize("path", [("/etc/pony.conf"), ("/usr/share/pony/pony.txt")]) @pytest.mark.datafiles(DATA_DIR) def test_cli_mount(cli, tmpdir, datafiles, path): project = os.path.join(datafiles.dirname, datafiles.basename) ponyfile = os.path.join(project, 'files', 'shell-mount', 'pony.txt') result = execute_shell(cli, project, ['cat', path], mount=(ponyfile, path)) assert result.exit_code == 0 assert result.output == 'pony\n' # Test that we can see the workspace files in a shell @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_workspace_visible(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workspace = os.path.join(cli.directory, 'workspace') element_name = 'workspace/workspace-mount-fail.bst' # Open a workspace on our build failing element # res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) assert res.exit_code == 0 # Ensure the dependencies of our build failing element are built result = cli.run(project=project, args=['build', 'base.bst']) assert result.exit_code == 0 # Obtain a copy of the hello.c content from the workspace # workspace_hello_path = os.path.join(cli.directory, 'workspace', 'hello.c') assert os.path.exists(workspace_hello_path) with open(workspace_hello_path, 'r') as f: workspace_hello = f.read() # Cat the hello.c file from a bst shell command, and assert # that we got the same content here # result = cli.run(project=project, args=[ 'shell', '--build', element_name, '--', 'cat', 'hello.c' ]) assert result.exit_code == 0 assert result.output == workspace_hello # Test that we can see the workspace files in a shell @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_sysroot_workspace_visible(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workspace = os.path.join(cli.directory, 'workspace') element_name = 'workspace/workspace-mount-fail.bst' # Open a workspace on our build failing element # res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) assert res.exit_code == 0 # Ensure the dependencies of our build failing element are built result = cli.run(project=project, args=['build', element_name]) result.assert_main_error(ErrorDomain.STREAM, None) # Discover the sysroot of the failed build directory, after one # failed build, there should be only one directory there. # build_base = os.path.join(cli.directory, 'build') build_dirs = os.listdir(path=build_base) assert len(build_dirs) == 1 build_dir = os.path.join(build_base, build_dirs[0]) # Obtain a copy of the hello.c content from the workspace # workspace_hello_path = os.path.join(cli.directory, 'workspace', 'hello.c') assert os.path.exists(workspace_hello_path) with open(workspace_hello_path, 'r') as f: workspace_hello = f.read() # Cat the hello.c file from a bst shell command, and assert # that we got the same content here # result = cli.run(project=project, args=[ 'shell', '--build', '--sysroot', build_dir, element_name, '--', 'cat', 'hello.c' ]) assert result.exit_code == 0 assert result.output == workspace_hello buildstream-1.6.9/tests/integration/source-determinism.py000066400000000000000000000125061437515270000237270ustar00rootroot00000000000000import os import pytest from buildstream import _yaml, utils from tests.testutils import create_repo, ALL_REPO_KINDS from tests.testutils import cli_integration as cli DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) def create_test_file(*path, mode=0o644, content='content\n'): path = os.path.join(*path) os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, 'w') as f: f.write(content) os.fchmod(f.fileno(), mode) def create_test_directory(*path, mode=0o644): create_test_file(*path, '.keep', content='') path = os.path.join(*path) os.chmod(path, mode) @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS] + ['local']) def test_deterministic_source_umask(cli, tmpdir, datafiles, kind, integration_cache): project = str(datafiles) element_name = 'list' element_path = os.path.join(project, 'elements', element_name) repodir = os.path.join(str(tmpdir), 'repo') sourcedir = os.path.join(project, 'source') create_test_file(sourcedir, 'a.txt', mode=0o700) create_test_file(sourcedir, 'b.txt', mode=0o755) create_test_file(sourcedir, 'c.txt', mode=0o600) create_test_file(sourcedir, 'd.txt', mode=0o400) create_test_file(sourcedir, 'e.txt', mode=0o644) create_test_file(sourcedir, 'f.txt', mode=0o4755) create_test_file(sourcedir, 'g.txt', mode=0o2755) create_test_file(sourcedir, 'h.txt', mode=0o1755) create_test_directory(sourcedir, 'dir-a', mode=0o0700) create_test_directory(sourcedir, 'dir-c', mode=0o0755) create_test_directory(sourcedir, 'dir-d', mode=0o4755) create_test_directory(sourcedir, 'dir-e', mode=0o2755) create_test_directory(sourcedir, 'dir-f', mode=0o1755) if kind == 'local': source = {'kind': 'local', 'path': 'source'} else: repo = create_repo(kind, repodir) ref = repo.create(sourcedir) source = repo.source_config(ref=ref) element = { 'kind': 'manual', 'depends': [ { 'filename': 'base.bst', 'type': 'build' } ], 'sources': [ source ], 'config': { 'install-commands': [ 'ls -l >"%{install-root}/ls-l"' ] } } _yaml.dump(element, element_path) def get_value_for_umask(umask): checkoutdir = os.path.join(str(tmpdir), 'checkout-{}'.format(umask)) old_umask = os.umask(umask) try: result = cli.run(project=project, args=['build', element_name]) result.assert_success() result = cli.run(project=project, args=['checkout', element_name, checkoutdir]) result.assert_success() with open(os.path.join(checkoutdir, 'ls-l'), 'r') as f: return f.read() finally: os.umask(old_umask) cache_dir = integration_cache.artifacts cli.remove_artifact_from_cache(project, element_name, cache_dir=cache_dir) assert get_value_for_umask(0o022) == get_value_for_umask(0o077) @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_deterministic_source_local(cli, tmpdir, datafiles, integration_cache): """Only user rights should be considered for local source. """ project = str(datafiles) element_name = 'test' element_path = os.path.join(project, 'elements', element_name) sourcedir = os.path.join(project, 'source') element = { 'kind': 'manual', 'depends': [ { 'filename': 'base.bst', 'type': 'build' } ], 'sources': [ { 'kind': 'local', 'path': 'source' } ], 'config': { 'install-commands': [ 'ls -l >"%{install-root}/ls-l"' ] } } _yaml.dump(element, element_path) def get_value_for_mask(mask): checkoutdir = os.path.join(str(tmpdir), 'checkout-{}'.format(mask)) create_test_file(sourcedir, 'a.txt', mode=0o644 & mask) create_test_file(sourcedir, 'b.txt', mode=0o755 & mask) create_test_file(sourcedir, 'c.txt', mode=0o4755 & mask) create_test_file(sourcedir, 'd.txt', mode=0o2755 & mask) create_test_file(sourcedir, 'e.txt', mode=0o1755 & mask) create_test_directory(sourcedir, 'dir-a', mode=0o0755 & mask) create_test_directory(sourcedir, 'dir-b', mode=0o4755 & mask) create_test_directory(sourcedir, 'dir-c', mode=0o2755 & mask) create_test_directory(sourcedir, 'dir-d', mode=0o1755 & mask) try: result = cli.run(project=project, args=['build', element_name]) result.assert_success() result = cli.run(project=project, args=['checkout', element_name, checkoutdir]) result.assert_success() with open(os.path.join(checkoutdir, 'ls-l'), 'r') as f: return f.read() finally: cache_dir = integration_cache.artifacts cli.remove_artifact_from_cache(project, element_name, cache_dir=cache_dir) assert get_value_for_mask(0o7777) == get_value_for_mask(0o0700) buildstream-1.6.9/tests/integration/stack.py000066400000000000000000000016011437515270000212100ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.datafiles(DATA_DIR) def test_stack(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'stack/stack.bst' res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 cli.run(project=project, args=['checkout', element_name, checkout]) assert res.exit_code == 0 with open(os.path.join(checkout, 'hi')) as f: hi = f.read() with open(os.path.join(checkout, 'another-hi')) as f: another_hi = f.read() assert hi == "Hi\n" assert another_hi == "Another hi\n" buildstream-1.6.9/tests/integration/symlinks.py000066400000000000000000000053441437515270000217640ustar00rootroot00000000000000import os import shlex import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.integration import assert_contains pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.datafiles(DATA_DIR) def test_absolute_symlinks_made_relative(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'symlinks/dangling-symlink.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 symlink = os.path.join(checkout, 'opt', 'orgname') assert os.path.islink(symlink) # The symlink is created to point to /usr/orgs/orgname, but BuildStream # should make all symlink target relative when assembling the artifact. # This is done so that nothing points outside the sandbox and so that # staging artifacts in locations other than / doesn't cause the links to # all break. assert os.readlink(symlink) == '../usr/orgs/orgname' @pytest.mark.datafiles(DATA_DIR) def test_allow_overlaps_inside_symlink_with_dangling_target(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'symlinks/dangling-symlink-overlap.bst' result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code == 0 # See the dangling-symlink*.bst elements for details on what we are testing. assert_contains(checkout, ['/usr/orgs/orgname/etc/org.conf']) @pytest.mark.datafiles(DATA_DIR) def test_detect_symlink_overlaps_pointing_outside_sandbox(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkout = os.path.join(cli.directory, 'checkout') element_name = 'symlinks/symlink-to-outside-sandbox-overlap.bst' # Building the two elements should succeed... result = cli.run(project=project, args=['build', element_name]) assert result.exit_code == 0 # ...but when we compose them together, the overlaps create paths that # point outside the sandbox which BuildStream needs to detect before it # tries to actually write there. result = cli.run(project=project, args=['checkout', element_name, checkout]) assert result.exit_code != 0 assert "Destination path resolves to a path outside of the staging area" in result.stderr buildstream-1.6.9/tests/integration/workspace.py000066400000000000000000000251731437515270000221130ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils import cli_integration as cli from tests.testutils.site import IS_LINUX from tests.testutils.integration import walk_dir pytestmark = pytest.mark.integration DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "project" ) @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_workspace_mount(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workspace = os.path.join(cli.directory, 'workspace') element_name = 'workspace/workspace-mount.bst' res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) assert res.exit_code == 0 res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 assert os.path.exists(os.path.join(cli.directory, 'workspace')) @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_workspace_updated_dependency(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workspace = os.path.join(cli.directory, 'workspace') element_path = os.path.join(project, 'elements') element_name = 'workspace/workspace-updated-dependency.bst' dep_name = 'workspace/dependency.bst' dependency = { 'kind': 'manual', 'depends': [{ 'filename': 'base.bst', 'type': 'build' }], 'config': { 'build-commands': [ 'mkdir -p %{install-root}/etc/test/', 'echo "Hello world!" > %{install-root}/etc/test/hello.txt' ] } } os.makedirs(os.path.dirname(os.path.join(element_path, dep_name)), exist_ok=True) _yaml.dump(dependency, os.path.join(element_path, dep_name)) # First open the workspace res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) assert res.exit_code == 0 # We build the workspaced element, so that we have an artifact # with specific built dependencies res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 # Now we update a dependency of our element. dependency['config']['build-commands'] = [ 'mkdir -p %{install-root}/etc/test/', 'echo "Hello china!" > %{install-root}/etc/test/hello.txt' ] _yaml.dump(dependency, os.path.join(element_path, dep_name)) # `Make` would look at timestamps and normally not realize that # our dependency's header files changed. BuildStream must # therefore ensure that we change the mtimes of any files touched # since the last successful build of this element, otherwise this # build will fail. res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['shell', element_name, '/usr/bin/test.sh']) assert res.exit_code == 0 assert res.output == 'Hello china!\n\n' @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_workspace_update_dependency_failed(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workspace = os.path.join(cli.directory, 'workspace') element_path = os.path.join(project, 'elements') element_name = 'workspace/workspace-updated-dependency-failed.bst' dep_name = 'workspace/dependency.bst' dependency = { 'kind': 'manual', 'depends': [{ 'filename': 'base.bst', 'type': 'build' }], 'config': { 'build-commands': [ 'mkdir -p %{install-root}/etc/test/', 'echo "Hello world!" > %{install-root}/etc/test/hello.txt', 'echo "Hello brazil!" > %{install-root}/etc/test/brazil.txt' ] } } os.makedirs(os.path.dirname(os.path.join(element_path, dep_name)), exist_ok=True) _yaml.dump(dependency, os.path.join(element_path, dep_name)) # First open the workspace res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) assert res.exit_code == 0 # We build the workspaced element, so that we have an artifact # with specific built dependencies res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 # Now we update a dependency of our element. dependency['config']['build-commands'] = [ 'mkdir -p %{install-root}/etc/test/', 'echo "Hello china!" > %{install-root}/etc/test/hello.txt', 'echo "Hello brazil!" > %{install-root}/etc/test/brazil.txt' ] _yaml.dump(dependency, os.path.join(element_path, dep_name)) # And our build fails! with open(os.path.join(workspace, 'Makefile'), 'a') as f: f.write("\texit 1") res = cli.run(project=project, args=['build', element_name]) assert res.exit_code != 0 # We update our dependency again... dependency['config']['build-commands'] = [ 'mkdir -p %{install-root}/etc/test/', 'echo "Hello world!" > %{install-root}/etc/test/hello.txt', 'echo "Hello spain!" > %{install-root}/etc/test/brazil.txt' ] _yaml.dump(dependency, os.path.join(element_path, dep_name)) # And fix the source with open(os.path.join(workspace, 'Makefile'), 'r') as f: makefile = f.readlines() with open(os.path.join(workspace, 'Makefile'), 'w') as f: f.write("\n".join(makefile[:-1])) # Since buildstream thinks hello.txt did not change, we could end # up not rebuilding a file! We need to make sure that a case like # this can't blind-side us. res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 res = cli.run(project=project, args=['shell', element_name, '/usr/bin/test.sh']) assert res.exit_code == 0 assert res.output == 'Hello world!\nHello spain!\n\n' @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_updated_dependency_nested(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workspace = os.path.join(cli.directory, 'workspace') element_path = os.path.join(project, 'elements') element_name = 'workspace/workspace-updated-dependency-nested.bst' dep_name = 'workspace/dependency.bst' dependency = { 'kind': 'manual', 'depends': [{ 'filename': 'base.bst', 'type': 'build' }], 'config': { 'build-commands': [ 'mkdir -p %{install-root}/etc/test/tests/', 'echo "Hello world!" > %{install-root}/etc/test/hello.txt', 'echo "Hello brazil!" > %{install-root}/etc/test/tests/brazil.txt' ] } } os.makedirs(os.path.dirname(os.path.join(element_path, dep_name)), exist_ok=True) _yaml.dump(dependency, os.path.join(element_path, dep_name)) # First open the workspace res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) assert res.exit_code == 0 # We build the workspaced element, so that we have an artifact # with specific built dependencies res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 # Now we update a dependency of our element. dependency['config']['build-commands'] = [ 'mkdir -p %{install-root}/etc/test/tests/', 'echo "Hello world!" > %{install-root}/etc/test/hello.txt', 'echo "Hello test!" > %{install-root}/etc/test/tests/tests.txt' ] _yaml.dump(dependency, os.path.join(element_path, dep_name)) res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 # Buildstream should pick up the newly added element, and pick up # the lack of the newly removed element res = cli.run(project=project, args=['shell', element_name, '/usr/bin/test.sh']) assert res.exit_code == 0 assert res.output == 'Hello world!\nHello test!\n\n' @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_incremental_configure_commands_run_only_once(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) workspace = os.path.join(cli.directory, 'workspace') element_path = os.path.join(project, 'elements') element_name = 'workspace/incremental.bst' element = { 'kind': 'manual', 'depends': [{ 'filename': 'base.bst', 'type': 'build' }], 'sources': [{ 'kind': 'local', 'path': 'files/workspace-configure-only-once' }], 'config': { 'configure-commands': [ '$SHELL configure' ] } } _yaml.dump(element, os.path.join(element_path, element_name)) # We open a workspace on the above element res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) res.assert_success() # Then we build, and check whether the configure step succeeded res = cli.run(project=project, args=['build', element_name]) res.assert_success() assert os.path.exists(os.path.join(workspace, 'prepared')) # When we build again, the configure commands should not be # called, and we should therefore exit cleanly (the configure # commands are set to always fail after the first run) res = cli.run(project=project, args=['build', element_name]) res.assert_success() assert not os.path.exists(os.path.join(workspace, 'prepared-again')) # Test that rebuilding an already built workspaced element does # not crash after the last successfully built artifact is removed # from the cache # # A user can remove their artifact cache, or manually remove the # artifact with `bst artifact delete`, or BuildStream can delete # the last successfully built artifact for this workspace as a # part of a cleanup job. # @pytest.mark.integration @pytest.mark.datafiles(DATA_DIR) def test_workspace_missing_last_successful(cli, datafiles, integration_cache): project = str(datafiles) workspace = os.path.join(cli.directory, 'workspace') element_name = 'workspace/workspace-mount.bst' # Open workspace res = cli.run(project=project, args=['workspace', 'open', element_name, workspace]) assert res.exit_code == 0 # Build first, this will record the last successful build in local state res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 # Remove the artifact from the cache, invalidating the last successful build cli.remove_artifact_from_cache(project, element_name, cache_dir=integration_cache.artifacts) # Build again, ensure we dont crash just because the artifact went missing res = cli.run(project=project, args=['build', element_name]) assert res.exit_code == 0 buildstream-1.6.9/tests/loader/000077500000000000000000000000001437515270000164565ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/__init__.py000066400000000000000000000006171437515270000205730ustar00rootroot00000000000000from buildstream._context import Context from buildstream._project import Project from buildstream._loader import Loader # # This is used by the loader test modules, these should # be removed in favor of testing the functionality via # the CLI like in the frontend tests anyway. # def make_loader(basedir): context = Context() project = Project(basedir, context) return project.loader buildstream-1.6.9/tests/loader/basics.py000066400000000000000000000056671437515270000203120ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import LoadError, LoadErrorReason from buildstream._loader import Loader, MetaElement from . import make_loader DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'basics', ) ############################################################## # Basics: Test behavior loading the simplest of projects # ############################################################## @pytest.mark.datafiles(os.path.join(DATA_DIR, 'onefile')) def test_one_file(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/onefile.bst'])[0] assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'onefile')) def test_missing_file(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/missing.bst'])[0] assert (exc.value.reason == LoadErrorReason.MISSING_FILE) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'onefile')) def test_invalid_reference(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/badreference.bst'])[0] assert (exc.value.reason == LoadErrorReason.INVALID_YAML) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'onefile')) def test_invalid_yaml(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/badfile.bst'])[0] assert (exc.value.reason == LoadErrorReason.INVALID_YAML) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'onefile')) def test_fail_fullpath_target(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) fullpath = os.path.join(basedir, 'elements', 'onefile.bst') with pytest.raises(LoadError) as exc: loader = make_loader(basedir) loader.load([fullpath]) assert (exc.value.reason == LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'onefile')) def test_invalid_key(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/invalidkey.bst'])[0] assert (exc.value.reason == LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'onefile')) def test_invalid_directory_load(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/'])[0] assert (exc.value.reason == LoadErrorReason.LOADING_DIRECTORY) buildstream-1.6.9/tests/loader/basics/000077500000000000000000000000001437515270000177225ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/basics/onefile/000077500000000000000000000000001437515270000213435ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/basics/onefile/elements/000077500000000000000000000000001437515270000231575ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/basics/onefile/elements/badfile.bst000066400000000000000000000000751437515270000252610ustar00rootroot00000000000000# This is just invalid YAML - | this is malformed yaml. ) buildstream-1.6.9/tests/loader/basics/onefile/elements/badreference.bst000066400000000000000000000001251437515270000262740ustar00rootroot00000000000000# This bad YAML file makes a reference to an undefined entity name: pony pony: *pony buildstream-1.6.9/tests/loader/basics/onefile/elements/invalidkey.bst000066400000000000000000000000651437515270000260310ustar00rootroot00000000000000kind: pony description: This is the pony wings: blue buildstream-1.6.9/tests/loader/basics/onefile/elements/invalidsourcekey.bst000066400000000000000000000001641437515270000272520ustar00rootroot00000000000000kind: pony description: This is the pony sources: - kind: ponyland url: ptp://pw.ponies.p/ weather: great buildstream-1.6.9/tests/loader/basics/onefile/elements/onefile.bst000066400000000000000000000000511437515270000253060ustar00rootroot00000000000000kind: pony description: This is the pony buildstream-1.6.9/tests/loader/basics/onefile/project.conf000066400000000000000000000000321437515270000236530ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/loader/dependencies.py000066400000000000000000000203411437515270000214560ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import LoadError, LoadErrorReason from buildstream._loader import Loader, MetaElement from tests.testutils import cli from . import make_loader DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'dependencies', ) ############################################################## # Basics: Test behavior loading projects with dependencies # ############################################################## @pytest.mark.datafiles(DATA_DIR) def test_two_files(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/target.bst'])[0] assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') assert(len(element.dependencies) == 1) firstdep = element.dependencies[0] assert(isinstance(firstdep, MetaElement)) assert(firstdep.kind == 'manual') @pytest.mark.datafiles(DATA_DIR) def test_shared_dependency(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/shareddeptarget.bst'])[0] # Toplevel is 'pony' with 2 dependencies # assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') assert(len(element.dependencies) == 2) # The first specified dependency is 'thefirstdep' # firstdep = element.dependencies[0] assert(isinstance(firstdep, MetaElement)) assert(firstdep.kind == 'manual') assert(len(firstdep.dependencies) == 0) # The second specified dependency is 'shareddep' # shareddep = element.dependencies[1] assert(isinstance(shareddep, MetaElement)) assert(shareddep.kind == 'shareddep') assert(len(shareddep.dependencies) == 1) # The element which shareddep depends on is # the same element in memory as firstdep # shareddepdep = shareddep.dependencies[0] assert(isinstance(shareddepdep, MetaElement)) # Assert they are in fact the same LoadElement # # Note we must use 'is' to test that both variables # refer to the same object in memory, not a regular # equality test with '==' which is one of those operator # overridable thingies. # assert(shareddepdep is firstdep) @pytest.mark.datafiles(DATA_DIR) def test_dependency_dict(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/target-depdict.bst'])[0] assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') assert(len(element.dependencies) == 1) firstdep = element.dependencies[0] assert(isinstance(firstdep, MetaElement)) assert(firstdep.kind == 'manual') @pytest.mark.datafiles(DATA_DIR) def test_invalid_dependency_declaration(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/invaliddep.bst'])[0] assert (exc.value.reason == LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_circular_dependency(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/circulartarget.bst'])[0] assert (exc.value.reason == LoadErrorReason.CIRCULAR_DEPENDENCY) @pytest.mark.datafiles(DATA_DIR) def test_invalid_dependency_type(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/invaliddeptype.bst'])[0] assert (exc.value.reason == LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_invalid_strict_dependency(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/invalidstrict.bst'])[0] assert (exc.value.reason == LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_invalid_non_strict_dependency(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) with pytest.raises(LoadError) as exc: element = loader.load(['elements/invalidnonstrict.bst'])[0] assert (exc.value.reason == LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_build_dependency(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/builddep.bst'])[0] assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') assert(len(element.build_dependencies) == 1) firstdep = element.build_dependencies[0] assert(isinstance(firstdep, MetaElement)) assert(len(element.dependencies) == 0) @pytest.mark.datafiles(DATA_DIR) def test_runtime_dependency(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/runtimedep.bst'])[0] assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') assert(len(element.dependencies) == 1) firstdep = element.dependencies[0] assert(isinstance(firstdep, MetaElement)) assert(len(element.build_dependencies) == 0) @pytest.mark.datafiles(DATA_DIR) def test_build_runtime_dependency(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/target.bst'])[0] assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') assert(len(element.dependencies) == 1) assert(len(element.build_dependencies) == 1) firstdep = element.dependencies[0] assert(isinstance(firstdep, MetaElement)) firstbuilddep = element.build_dependencies[0] assert(firstdep == firstbuilddep) @pytest.mark.datafiles(DATA_DIR) def test_all_dependency(datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) loader = make_loader(basedir) element = loader.load(['elements/alldep.bst'])[0] assert(isinstance(element, MetaElement)) assert(element.kind == 'pony') assert(len(element.dependencies) == 1) assert(len(element.build_dependencies) == 1) firstdep = element.dependencies[0] assert(isinstance(firstdep, MetaElement)) firstbuilddep = element.build_dependencies[0] assert(firstdep == firstbuilddep) @pytest.mark.datafiles(DATA_DIR) def test_list_build_dependency(cli, datafiles): project = str(datafiles) # Check that the pipeline includes the build dependency deps = cli.get_pipeline(project, ['elements/builddep-list.bst'], scope="build") assert "elements/firstdep.bst" in deps @pytest.mark.datafiles(DATA_DIR) def test_list_runtime_dependency(cli, datafiles): project = str(datafiles) # Check that the pipeline includes the runtime dependency deps = cli.get_pipeline(project, ['elements/runtimedep-list.bst'], scope="run") assert "elements/firstdep.bst" in deps @pytest.mark.datafiles(DATA_DIR) def test_list_dependencies_combined(cli, datafiles): project = str(datafiles) # Check that runtime deps get combined rundeps = cli.get_pipeline(project, ['elements/list-combine.bst'], scope="run") assert "elements/firstdep.bst" not in rundeps assert "elements/seconddep.bst" in rundeps assert "elements/thirddep.bst" in rundeps # Check that build deps get combined builddeps = cli.get_pipeline(project, ['elements/list-combine.bst'], scope="build") assert "elements/firstdep.bst" in builddeps assert "elements/seconddep.bst" not in builddeps assert "elements/thirddep.bst" in builddeps @pytest.mark.datafiles(DATA_DIR) def test_list_overlap(cli, datafiles): project = str(datafiles) # Check that dependencies get merged rundeps = cli.get_pipeline(project, ['elements/list-overlap.bst'], scope="run") assert "elements/firstdep.bst" in rundeps builddeps = cli.get_pipeline(project, ['elements/list-overlap.bst'], scope="build") assert "elements/firstdep.bst" in builddeps buildstream-1.6.9/tests/loader/dependencies/000077500000000000000000000000001437515270000211045ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/dependencies/elements/000077500000000000000000000000001437515270000227205ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/dependencies/elements/alldep.bst000066400000000000000000000002011437515270000246640ustar00rootroot00000000000000kind: pony description: This element has a dependency with type 'all' depends: - filename: elements/firstdep.bst type: all buildstream-1.6.9/tests/loader/dependencies/elements/builddep-list.bst000066400000000000000000000002071437515270000261720ustar00rootroot00000000000000kind: stack description: This element has a build-only dependency specified via build-depends build-depends: - elements/firstdep.bst buildstream-1.6.9/tests/loader/dependencies/elements/builddep.bst000066400000000000000000000001761437515270000252260ustar00rootroot00000000000000kind: pony description: This element has a build-only dependency depends: - filename: elements/firstdep.bst type: build buildstream-1.6.9/tests/loader/dependencies/elements/circular-firstdep.bst000066400000000000000000000001651437515270000270560ustar00rootroot00000000000000kind: pony description: Depend on another dep which depends on the target depends: - elements/circular-seconddep.bst buildstream-1.6.9/tests/loader/dependencies/elements/circular-seconddep.bst000066400000000000000000000001641437515270000272010ustar00rootroot00000000000000kind: pony description: Depend on the target, creating a circular dependency depends: - elements/circulartarget.bst buildstream-1.6.9/tests/loader/dependencies/elements/circulartarget.bst000066400000000000000000000001771437515270000264520ustar00rootroot00000000000000kind: pony description: This is a main target which introduces a circular dependency depends: - elements/circular-firstdep.bst buildstream-1.6.9/tests/loader/dependencies/elements/firstdep.bst000066400000000000000000000000671437515270000252550ustar00rootroot00000000000000kind: manual description: This is the first dependency buildstream-1.6.9/tests/loader/dependencies/elements/invaliddep.bst000066400000000000000000000001471437515270000255530ustar00rootroot00000000000000kind: pony description: This is an invalid dependency depends: more: it should be a list, not a dict buildstream-1.6.9/tests/loader/dependencies/elements/invaliddeptype.bst000066400000000000000000000002151437515270000264510ustar00rootroot00000000000000kind: pony description: This is an invalid dependency type depends: - filename: elements/firstdep.bst type: should be build or runtime buildstream-1.6.9/tests/loader/dependencies/elements/invalidnonstrict.bst000066400000000000000000000003621437515270000270250ustar00rootroot00000000000000kind: manual description: | This is an invalid non strict dependency because it is currently illegal to explicitly set a dependency to be non-strict (even though this is the default). depends: - filename: firstdep.bst strict: false buildstream-1.6.9/tests/loader/dependencies/elements/invalidstrict.bst000066400000000000000000000002571437515270000263150ustar00rootroot00000000000000kind: manual description: | This is an invalid strict dependency because runtime dependencies cannot be strict. runtime-depends: - filename: firstdep.bst strict: true buildstream-1.6.9/tests/loader/dependencies/elements/list-combine.bst000066400000000000000000000003041437515270000260140ustar00rootroot00000000000000kind: stack description: This element depends on three elements in different ways build-depends: - elements/firstdep.bst runtime-depends: - elements/seconddep.bst depends: - elements/thirddep.bst buildstream-1.6.9/tests/loader/dependencies/elements/list-overlap.bst000066400000000000000000000002621437515270000260530ustar00rootroot00000000000000kind: stack description: This element depends on two elements in different ways build-depends: - elements/firstdep.bst depends: - filename: elements/firstdep.bst type: runtime buildstream-1.6.9/tests/loader/dependencies/elements/runtimedep-list.bst000066400000000000000000000001571437515270000265620ustar00rootroot00000000000000kind: stack description: This element has a runtime-only dependency runtime-depends: - elements/firstdep.bst buildstream-1.6.9/tests/loader/dependencies/elements/runtimedep.bst000066400000000000000000000002021437515270000256000ustar00rootroot00000000000000kind: pony description: This element has a runtime-only dependency depends: - filename: elements/firstdep.bst type: runtime buildstream-1.6.9/tests/loader/dependencies/elements/seconddep.bst000066400000000000000000000000701437515270000253730ustar00rootroot00000000000000kind: manual description: This is the second dependency buildstream-1.6.9/tests/loader/dependencies/elements/shareddep.bst000066400000000000000000000001331437515270000253660ustar00rootroot00000000000000kind: shareddep description: This is the first dependency depends: - elements/firstdep.bst buildstream-1.6.9/tests/loader/dependencies/elements/shareddeptarget.bst000066400000000000000000000001521437515270000265760ustar00rootroot00000000000000kind: pony description: This is the main target depends: - elements/firstdep.bst - elements/shareddep.bst buildstream-1.6.9/tests/loader/dependencies/elements/target-depdict.bst000066400000000000000000000001331437515270000263270ustar00rootroot00000000000000kind: pony description: This is the main target depends: - filename: elements/firstdep.bst buildstream-1.6.9/tests/loader/dependencies/elements/target.bst000066400000000000000000000001211437515270000247120ustar00rootroot00000000000000kind: pony description: This is the main target depends: - elements/firstdep.bst buildstream-1.6.9/tests/loader/dependencies/elements/thirddep.bst000066400000000000000000000000671437515270000252400ustar00rootroot00000000000000kind: manual description: This is the third dependency buildstream-1.6.9/tests/loader/dependencies/project.conf000066400000000000000000000000321437515270000234140ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/loader/junctions.py000066400000000000000000000270141437515270000210500ustar00rootroot00000000000000import os import pytest import shutil from buildstream import _yaml, ElementError from buildstream._exceptions import ErrorDomain, LoadError, LoadErrorReason from tests.testutils import cli, create_repo from tests.testutils.site import HAVE_GIT DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'junctions', ) def copy_subprojects(project, datafiles, subprojects): for subproject in subprojects: shutil.copytree(os.path.join(str(datafiles), subproject), os.path.join(str(project), subproject)) @pytest.mark.datafiles(DATA_DIR) def test_simple_pipeline(cli, datafiles): project = os.path.join(str(datafiles), 'foo') copy_subprojects(project, datafiles, ['base']) # Check that the pipeline includes the subproject element element_list = cli.get_pipeline(project, ['target.bst']) assert 'base.bst:target.bst' in element_list @pytest.mark.datafiles(DATA_DIR) def test_simple_build(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'foo') copy_subprojects(project, datafiles, ['base']) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 # Check that the checkout contains the expected files from both projects assert(os.path.exists(os.path.join(checkoutdir, 'base.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'foo.txt'))) @pytest.mark.datafiles(DATA_DIR) def test_missing_file_in_subproject(cli, datafiles): project = os.path.join(str(datafiles), 'missing-element') result = cli.run(project=project, args=['show', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) # Assert that we have the expected provenance encoded into the error assert "target.bst [line 4 column 2]" in result.stderr @pytest.mark.datafiles(DATA_DIR) def test_missing_file_in_subsubproject(cli, datafiles): project = os.path.join(str(datafiles), 'missing-element') result = cli.run(project=project, args=['show', 'sub-target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) # Assert that we have the expected provenance encoded into the error assert "junction-A.bst:target.bst [line 4 column 2]" in result.stderr @pytest.mark.datafiles(DATA_DIR) def test_missing_junction_in_subproject(cli, datafiles): project = os.path.join(str(datafiles), 'missing-element') result = cli.run(project=project, args=['show', 'sub-target-bad-junction.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) # Assert that we have the expected provenance encoded into the error assert "junction-A.bst:bad-junction-target.bst [line 4 column 2]" in result.stderr @pytest.mark.datafiles(DATA_DIR) def test_nested_simple(cli, tmpdir, datafiles): foo = os.path.join(str(datafiles), 'foo') copy_subprojects(foo, datafiles, ['base']) project = os.path.join(str(datafiles), 'nested') copy_subprojects(project, datafiles, ['foo']) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 # Check that the checkout contains the expected files from all subprojects assert(os.path.exists(os.path.join(checkoutdir, 'base.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'foo.txt'))) @pytest.mark.datafiles(DATA_DIR) def test_nested_double(cli, tmpdir, datafiles): foo = os.path.join(str(datafiles), 'foo') copy_subprojects(foo, datafiles, ['base']) bar = os.path.join(str(datafiles), 'bar') copy_subprojects(bar, datafiles, ['base']) project = os.path.join(str(datafiles), 'toplevel') copy_subprojects(project, datafiles, ['base', 'foo', 'bar']) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 # Check that the checkout contains the expected files from all subprojects assert(os.path.exists(os.path.join(checkoutdir, 'base.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'foo.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'bar.txt'))) @pytest.mark.datafiles(DATA_DIR) def test_nested_conflict(cli, datafiles): foo = os.path.join(str(datafiles), 'foo') copy_subprojects(foo, datafiles, ['base']) bar = os.path.join(str(datafiles), 'bar') copy_subprojects(bar, datafiles, ['base']) project = os.path.join(str(datafiles), 'conflict') copy_subprojects(project, datafiles, ['foo', 'bar']) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.CONFLICTING_JUNCTION) @pytest.mark.datafiles(DATA_DIR) def test_invalid_missing(cli, datafiles): project = os.path.join(str(datafiles), 'invalid') result = cli.run(project=project, args=['build', 'missing.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) @pytest.mark.datafiles(DATA_DIR) def test_invalid_with_deps(cli, datafiles): project = os.path.join(str(datafiles), 'invalid') copy_subprojects(project, datafiles, ['base']) result = cli.run(project=project, args=['build', 'junction-with-deps.bst']) result.assert_main_error(ErrorDomain.ELEMENT, 'element-forbidden-depends') @pytest.mark.datafiles(DATA_DIR) def test_invalid_junction_dep(cli, datafiles): project = os.path.join(str(datafiles), 'invalid') copy_subprojects(project, datafiles, ['base']) result = cli.run(project=project, args=['build', 'junction-dep.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(DATA_DIR) def test_options_default(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'options-default') copy_subprojects(project, datafiles, ['options-base']) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 assert(os.path.exists(os.path.join(checkoutdir, 'pony.txt'))) assert(not os.path.exists(os.path.join(checkoutdir, 'horsy.txt'))) @pytest.mark.datafiles(DATA_DIR) def test_options(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'options') copy_subprojects(project, datafiles, ['options-base']) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 assert(not os.path.exists(os.path.join(checkoutdir, 'pony.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'horsy.txt'))) @pytest.mark.datafiles(DATA_DIR) def test_options_inherit(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'options-inherit') copy_subprojects(project, datafiles, ['options-base']) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 assert(not os.path.exists(os.path.join(checkoutdir, 'pony.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'horsy.txt'))) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(DATA_DIR) def test_git_show(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'foo') checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the repo from 'base' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), 'base')) # Write out junction element with git source element = { 'kind': 'junction', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(project, 'base.bst')) # Verify that bst show does not implicitly fetch subproject result = cli.run(project=project, args=['show', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.SUBPROJECT_FETCH_NEEDED) # Explicitly fetch subproject result = cli.run(project=project, args=['fetch', 'base.bst']) assert result.exit_code == 0 # Check that bst show succeeds now and the pipeline includes the subproject element element_list = cli.get_pipeline(project, ['target.bst']) assert 'base.bst:target.bst' in element_list @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(DATA_DIR) def test_git_build(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'foo') checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the repo from 'base' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), 'base')) # Write out junction element with git source element = { 'kind': 'junction', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(project, 'base.bst')) # Build (with implicit fetch of subproject), checkout result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 # Check that the checkout contains the expected files from both projects assert(os.path.exists(os.path.join(checkoutdir, 'base.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'foo.txt'))) @pytest.mark.datafiles(DATA_DIR) def test_cross_junction_names(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'foo') copy_subprojects(project, datafiles, ['base']) element_list = cli.get_pipeline(project, ['base.bst:target.bst']) assert 'base.bst:target.bst' in element_list @pytest.mark.datafiles(DATA_DIR) def test_build_git_cross_junction_names(cli, tmpdir, datafiles): project = os.path.join(str(datafiles), 'foo') checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the repo from 'base' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), 'base')) # Write out junction element with git source element = { 'kind': 'junction', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(project, 'base.bst')) print(element) print(cli.get_pipeline(project, ['base.bst'])) # Build (with implicit fetch of subproject), checkout result = cli.run(project=project, args=['build', 'base.bst:target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'base.bst:target.bst', checkoutdir]) assert result.exit_code == 0 # Check that the checkout contains the expected files from both projects assert(os.path.exists(os.path.join(checkoutdir, 'base.txt'))) buildstream-1.6.9/tests/loader/junctions/000077500000000000000000000000001437515270000204725ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/bar/000077500000000000000000000000001437515270000212365ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/bar/app.bst000066400000000000000000000001511437515270000225250ustar00rootroot00000000000000kind: import sources: - kind: local path: bar.txt depends: - junction: base.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/bar/bar.txt000066400000000000000000000000041437515270000225350ustar00rootroot00000000000000bar buildstream-1.6.9/tests/loader/junctions/bar/base.bst000066400000000000000000000000631437515270000226610ustar00rootroot00000000000000kind: junction sources: - kind: local path: base buildstream-1.6.9/tests/loader/junctions/bar/project.conf000066400000000000000000000000121437515270000235440ustar00rootroot00000000000000name: bar buildstream-1.6.9/tests/loader/junctions/bar/target.bst000066400000000000000000000001131437515270000232310ustar00rootroot00000000000000kind: stack depends: - junction: base.bst filename: target.bst - app.bst buildstream-1.6.9/tests/loader/junctions/base/000077500000000000000000000000001437515270000214045ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/base/base.txt000066400000000000000000000000241437515270000230530ustar00rootroot00000000000000This is a text file buildstream-1.6.9/tests/loader/junctions/base/project.conf000066400000000000000000000000131437515270000237130ustar00rootroot00000000000000name: base buildstream-1.6.9/tests/loader/junctions/base/target.bst000066400000000000000000000000651437515270000234050ustar00rootroot00000000000000kind: import sources: - kind: local path: base.txt buildstream-1.6.9/tests/loader/junctions/conflict/000077500000000000000000000000001437515270000222735ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/conflict/bar.bst000066400000000000000000000000621437515270000235470ustar00rootroot00000000000000kind: junction sources: - kind: local path: bar buildstream-1.6.9/tests/loader/junctions/conflict/foo.bst000066400000000000000000000000621437515270000235660ustar00rootroot00000000000000kind: junction sources: - kind: local path: foo buildstream-1.6.9/tests/loader/junctions/conflict/project.conf000066400000000000000000000000171437515270000246060ustar00rootroot00000000000000name: conflict buildstream-1.6.9/tests/loader/junctions/conflict/target.bst000066400000000000000000000001531437515270000242720ustar00rootroot00000000000000kind: stack depends: - junction: foo.bst filename: target.bst - junction: bar.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/foo/000077500000000000000000000000001437515270000212555ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/foo/app.bst000066400000000000000000000001511437515270000225440ustar00rootroot00000000000000kind: import sources: - kind: local path: foo.txt depends: - junction: base.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/foo/base.bst000066400000000000000000000000631437515270000227000ustar00rootroot00000000000000kind: junction sources: - kind: local path: base buildstream-1.6.9/tests/loader/junctions/foo/foo.txt000066400000000000000000000000041437515270000225730ustar00rootroot00000000000000foo buildstream-1.6.9/tests/loader/junctions/foo/project.conf000066400000000000000000000000121437515270000235630ustar00rootroot00000000000000name: foo buildstream-1.6.9/tests/loader/junctions/foo/target.bst000066400000000000000000000001131437515270000232500ustar00rootroot00000000000000kind: stack depends: - junction: base.bst filename: target.bst - app.bst buildstream-1.6.9/tests/loader/junctions/invalid/000077500000000000000000000000001437515270000221205ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/invalid/app.bst000066400000000000000000000000641437515270000234120ustar00rootroot00000000000000kind: import sources: - kind: local path: foo.txt buildstream-1.6.9/tests/loader/junctions/invalid/base-with-deps.bst000066400000000000000000000001061437515270000254430ustar00rootroot00000000000000kind: junction sources: - kind: local path: base depends: - app.bst buildstream-1.6.9/tests/loader/junctions/invalid/base.bst000066400000000000000000000000631437515270000235430ustar00rootroot00000000000000kind: junction sources: - kind: local path: base buildstream-1.6.9/tests/loader/junctions/invalid/foo.txt000066400000000000000000000000041437515270000234360ustar00rootroot00000000000000foo buildstream-1.6.9/tests/loader/junctions/invalid/junction-dep.bst000066400000000000000000000000401437515270000252230ustar00rootroot00000000000000kind: stack depends: - base.bst buildstream-1.6.9/tests/loader/junctions/invalid/junction-with-deps.bst000066400000000000000000000001131437515270000263600ustar00rootroot00000000000000kind: stack depends: - junction: base-with-deps.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/invalid/missing.bst000066400000000000000000000001101437515270000242730ustar00rootroot00000000000000kind: stack depends: - junction: missingfile.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/invalid/project.conf000066400000000000000000000000161437515270000244320ustar00rootroot00000000000000name: invalid buildstream-1.6.9/tests/loader/junctions/missing-element/000077500000000000000000000000001437515270000235725ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/missing-element/junction-A.bst000066400000000000000000000000701437515270000263100ustar00rootroot00000000000000kind: junction sources: - kind: local path: junctionA buildstream-1.6.9/tests/loader/junctions/missing-element/junctionA/000077500000000000000000000000001437515270000255245ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/missing-element/junctionA/bad-junction-target.bst000066400000000000000000000001221437515270000320720ustar00rootroot00000000000000kind: manual depends: - filename: noelement.bst junction: missing-junction.bst buildstream-1.6.9/tests/loader/junctions/missing-element/junctionA/junction-B.bst000066400000000000000000000000701437515270000302430ustar00rootroot00000000000000kind: junction sources: - kind: local path: junctionB buildstream-1.6.9/tests/loader/junctions/missing-element/junctionA/junctionB/000077500000000000000000000000001437515270000274575ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/missing-element/junctionA/junctionB/project.conf000066400000000000000000000000171437515270000317720ustar00rootroot00000000000000name: projectB buildstream-1.6.9/tests/loader/junctions/missing-element/junctionA/project.conf000066400000000000000000000000171437515270000300370ustar00rootroot00000000000000name: projectA buildstream-1.6.9/tests/loader/junctions/missing-element/junctionA/target.bst000066400000000000000000000001111437515270000275150ustar00rootroot00000000000000kind: stack depends: - filename: missing.bst junction: junction-B.bst buildstream-1.6.9/tests/loader/junctions/missing-element/project.conf000066400000000000000000000000131437515270000261010ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/loader/junctions/missing-element/sub-target-bad-junction.bst000066400000000000000000000001251437515270000307320ustar00rootroot00000000000000kind: stack depends: - filename: bad-junction-target.bst junction: junction-A.bst buildstream-1.6.9/tests/loader/junctions/missing-element/sub-target.bst000066400000000000000000000001101437515270000263510ustar00rootroot00000000000000kind: stack depends: - filename: target.bst junction: junction-A.bst buildstream-1.6.9/tests/loader/junctions/missing-element/target.bst000066400000000000000000000001111437515270000255630ustar00rootroot00000000000000kind: stack depends: - filename: missing.bst junction: junction-A.bst buildstream-1.6.9/tests/loader/junctions/nested/000077500000000000000000000000001437515270000217545ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/nested/foo.bst000066400000000000000000000000621437515270000232470ustar00rootroot00000000000000kind: junction sources: - kind: local path: foo buildstream-1.6.9/tests/loader/junctions/nested/project.conf000066400000000000000000000000151437515270000242650ustar00rootroot00000000000000name: nested buildstream-1.6.9/tests/loader/junctions/nested/target.bst000066400000000000000000000001001437515270000237430ustar00rootroot00000000000000kind: stack depends: - junction: foo.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/options-base/000077500000000000000000000000001437515270000230755ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/options-base/horsy.txt000066400000000000000000000000061437515270000247760ustar00rootroot00000000000000horsy buildstream-1.6.9/tests/loader/junctions/options-base/pony.txt000066400000000000000000000000051437515270000246160ustar00rootroot00000000000000pony buildstream-1.6.9/tests/loader/junctions/options-base/project.conf000066400000000000000000000002161437515270000254110ustar00rootroot00000000000000name: options-base options: animal: type: enum description: The kind of animal values: - pony - horsy default: pony buildstream-1.6.9/tests/loader/junctions/options-base/target.bst000066400000000000000000000002031437515270000250700ustar00rootroot00000000000000kind: import sources: - kind: local (?): - animal == "pony": path: pony.txt - animal == "horsy": path: horsy.txt buildstream-1.6.9/tests/loader/junctions/options-default/000077500000000000000000000000001437515270000236075ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/options-default/base.bst000066400000000000000000000000731437515270000252330ustar00rootroot00000000000000kind: junction sources: - kind: local path: options-base buildstream-1.6.9/tests/loader/junctions/options-default/project.conf000066400000000000000000000000261437515270000261220ustar00rootroot00000000000000name: options-default buildstream-1.6.9/tests/loader/junctions/options-default/target.bst000066400000000000000000000001011437515270000255770ustar00rootroot00000000000000kind: stack depends: - junction: base.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/options-inherit/000077500000000000000000000000001437515270000236255ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/options-inherit/base.bst000066400000000000000000000001461437515270000252520ustar00rootroot00000000000000kind: junction sources: - kind: local path: options-base config: options: animal: '%{animal}' buildstream-1.6.9/tests/loader/junctions/options-inherit/project.conf000066400000000000000000000002471437515270000261450ustar00rootroot00000000000000name: options-inherit options: animal: type: enum description: The kind of animal values: - pony - horsy default: horsy variable: animal buildstream-1.6.9/tests/loader/junctions/options-inherit/target.bst000066400000000000000000000001011437515270000256150ustar00rootroot00000000000000kind: stack depends: - junction: base.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/options/000077500000000000000000000000001437515270000221655ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/options/base.bst000066400000000000000000000001401437515270000236040ustar00rootroot00000000000000kind: junction sources: - kind: local path: options-base config: options: animal: horsy buildstream-1.6.9/tests/loader/junctions/options/project.conf000066400000000000000000000000161437515270000244770ustar00rootroot00000000000000name: options buildstream-1.6.9/tests/loader/junctions/options/target.bst000066400000000000000000000001011437515270000241550ustar00rootroot00000000000000kind: stack depends: - junction: base.bst filename: target.bst buildstream-1.6.9/tests/loader/junctions/toplevel/000077500000000000000000000000001437515270000223245ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/junctions/toplevel/bar.bst000066400000000000000000000000621437515270000236000ustar00rootroot00000000000000kind: junction sources: - kind: local path: bar buildstream-1.6.9/tests/loader/junctions/toplevel/base.bst000066400000000000000000000000631437515270000237470ustar00rootroot00000000000000kind: junction sources: - kind: local path: base buildstream-1.6.9/tests/loader/junctions/toplevel/foo.bst000066400000000000000000000000621437515270000236170ustar00rootroot00000000000000kind: junction sources: - kind: local path: foo buildstream-1.6.9/tests/loader/junctions/toplevel/project.conf000066400000000000000000000000171437515270000246370ustar00rootroot00000000000000name: toplevel buildstream-1.6.9/tests/loader/junctions/toplevel/target.bst000066400000000000000000000001531437515270000243230ustar00rootroot00000000000000kind: stack depends: - junction: foo.bst filename: target.bst - junction: bar.bst filename: target.bst buildstream-1.6.9/tests/loader/variables.py000066400000000000000000000054161437515270000210060ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils import cli DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'variables', ) PROTECTED_VARIABLES = [('project-name'), ('element-name'), ('max-jobs')] @pytest.mark.parametrize('protected_var', PROTECTED_VARIABLES) @pytest.mark.datafiles(DATA_DIR) def test_use_of_protected_var_project_conf(cli, tmpdir, datafiles, protected_var): project = os.path.join(str(datafiles), 'simple') conf = { 'name': 'test', 'variables': { protected_var: 'some-value' } } _yaml.dump(conf, os.path.join(project, 'project.conf')) element = { 'kind': 'import', 'sources': [ { 'kind': 'local', 'path': 'foo.txt' } ], } _yaml.dump(element, os.path.join(project, 'target.bst')) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED) @pytest.mark.parametrize('protected_var', PROTECTED_VARIABLES) @pytest.mark.datafiles(DATA_DIR) def test_use_of_protected_var_element_overrides(cli, tmpdir, datafiles, protected_var): project = os.path.join(str(datafiles), 'simple') conf = { 'name': 'test', 'elements': { 'manual': { 'variables': { protected_var: 'some-value' } } } } _yaml.dump(conf, os.path.join(project, 'project.conf')) element = { 'kind': 'manual', 'sources': [ { 'kind': 'local', 'path': 'foo.txt' } ], } _yaml.dump(element, os.path.join(project, 'target.bst')) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED) @pytest.mark.parametrize('protected_var', PROTECTED_VARIABLES) @pytest.mark.datafiles(DATA_DIR) def test_use_of_protected_var_in_element(cli, tmpdir, datafiles, protected_var): project = os.path.join(str(datafiles), 'simple') element = { 'kind': 'import', 'sources': [ { 'kind': 'local', 'path': 'foo.txt' } ], 'variables': { protected_var: 'some-value' } } _yaml.dump(element, os.path.join(project, 'target.bst')) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROTECTED_VARIABLE_REDEFINED) buildstream-1.6.9/tests/loader/variables/000077500000000000000000000000001437515270000204265ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/variables/simple/000077500000000000000000000000001437515270000217175ustar00rootroot00000000000000buildstream-1.6.9/tests/loader/variables/simple/foo.txt000066400000000000000000000000041437515270000232350ustar00rootroot00000000000000foo buildstream-1.6.9/tests/loader/variables/simple/project.conf000066400000000000000000000000121437515270000242250ustar00rootroot00000000000000name: foo buildstream-1.6.9/tests/pipeline/000077500000000000000000000000001437515270000170155ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/load.py000066400000000000000000000165371437515270000203220ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain from buildstream import _yaml from tests.testutils.runcli import cli DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'load', ) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'simple')) def test_load_simple(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) result = cli.get_element_config(basedir, 'simple.bst') assert(result['configure-commands'][0] == 'pony') ############################################################### # Testing Element.dependencies() iteration # ############################################################### @pytest.mark.datafiles(os.path.join(DATA_DIR, 'iterate')) def test_iterate_scope_all(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) elements = ['target.bst'] element_list = cli.get_pipeline(basedir, elements, scope='all') assert(len(element_list) == 7) assert(element_list[0] == "build-build.bst") assert(element_list[1] == "run-build.bst") assert(element_list[2] == "build.bst") assert(element_list[3] == "dep-one.bst") assert(element_list[4] == "run.bst") assert(element_list[5] == "dep-two.bst") assert(element_list[6] == "target.bst") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'iterate')) def test_iterate_scope_run(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) elements = ['target.bst'] element_list = cli.get_pipeline(basedir, elements, scope='run') assert(len(element_list) == 4) assert(element_list[0] == "dep-one.bst") assert(element_list[1] == "run.bst") assert(element_list[2] == "dep-two.bst") assert(element_list[3] == "target.bst") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'iterate')) def test_iterate_scope_build(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) elements = ['target.bst'] element_list = cli.get_pipeline(basedir, elements, scope='build') assert(len(element_list) == 3) assert(element_list[0] == "dep-one.bst") assert(element_list[1] == "run.bst") assert(element_list[2] == "dep-two.bst") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'iterate')) def test_iterate_scope_build_of_child(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) elements = ['target.bst'] element_list = cli.get_pipeline(basedir, elements, scope='build') # First pass, lets check dep-two element = element_list[2] # Pass two, let's look at these element_list = cli.get_pipeline(basedir, [element], scope='build') assert(len(element_list) == 2) assert(element_list[0] == "run-build.bst") assert(element_list[1] == "build.bst") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'iterate')) def test_iterate_no_recurse(cli, datafiles): basedir = os.path.join(datafiles.dirname, datafiles.basename) elements = ['target.bst'] # We abuse the 'plan' scope here to ensure that we call # element.dependencies() with recurse=False - currently, no `bst # show` option does this directly. element_list = cli.get_pipeline(basedir, elements, scope='plan') assert(len(element_list) == 7) assert(element_list[0] == 'build-build.bst') assert(element_list[1] in ['build.bst', 'run-build.bst']) assert(element_list[2] in ['build.bst', 'run-build.bst']) assert(element_list[3] in ['dep-one.bst', 'run.bst', 'dep-two.bst']) assert(element_list[4] in ['dep-one.bst', 'run.bst', 'dep-two.bst']) assert(element_list[5] in ['dep-one.bst', 'run.bst', 'dep-two.bst']) assert(element_list[6] == 'target.bst') # This test checks various constructions of a pipeline # with one or more targets and 0 or more exception elements, # each data set provides the targets, exceptions and expected # result list. # @pytest.mark.datafiles(os.path.join(DATA_DIR, 'exceptions')) @pytest.mark.parametrize("elements,exceptions,results", [ # Test without exceptions, lets just see the whole list here (['build.bst'], None, [ 'fourth-level-1.bst', 'third-level-1.bst', 'fourth-level-2.bst', 'third-level-2.bst', 'fourth-level-3.bst', 'third-level-3.bst', 'second-level-1.bst', 'first-level-1.bst', 'first-level-2.bst', 'build.bst', ]), # Test one target and excepting a part of the pipeline, this # removes forth-level-1 and third-level-1 (['build.bst'], ['third-level-1.bst'], [ 'fourth-level-2.bst', 'third-level-2.bst', 'fourth-level-3.bst', 'third-level-3.bst', 'second-level-1.bst', 'first-level-1.bst', 'first-level-2.bst', 'build.bst', ]), # Test one target and excepting a part of the pipeline, check that # excepted dependencies remain in the pipeline if depended on from # outside of the except element (['build.bst'], ['second-level-1.bst'], [ 'fourth-level-2.bst', 'third-level-2.bst', # first-level-2 depends on this, so not excepted 'first-level-1.bst', 'first-level-2.bst', 'build.bst', ]), # The same as the above test, but excluding the toplevel build.bst, # instead only select the two toplevel dependencies as targets (['first-level-1.bst', 'first-level-2.bst'], ['second-level-1.bst'], [ 'fourth-level-2.bst', 'third-level-2.bst', # first-level-2 depends on this, so not excepted 'first-level-1.bst', 'first-level-2.bst', ]), # Test one target and excepting an element outisde the pipeline (['build.bst'], ['unrelated-1.bst'], [ 'fourth-level-2.bst', 'third-level-2.bst', # first-level-2 depends on this, so not excepted 'first-level-1.bst', 'first-level-2.bst', 'build.bst', ]), # Test one target and excepting two elements (['build.bst'], ['unrelated-1.bst', 'unrelated-2.bst'], [ 'first-level-1.bst', 'build.bst', ]), ]) def test_except_elements(cli, datafiles, elements, exceptions, results): basedir = os.path.join(datafiles.dirname, datafiles.basename) # Except second-level-2 and check that the correct dependencies # are removed. element_list = cli.get_pipeline(basedir, elements, except_=exceptions, scope='all') assert element_list == results @pytest.mark.datafiles(os.path.join(DATA_DIR, 'noloadref')) @pytest.mark.parametrize("ref_storage", [('inline'), ('project.refs')]) def test_unsupported_load_ref(cli, datafiles, ref_storage): basedir = os.path.join(datafiles.dirname, datafiles.basename) # Generate project with access to the noloadref plugin and project.refs enabled # config = { 'name': 'test', 'ref-storage': ref_storage, 'plugins': [ { 'origin': 'local', 'path': 'plugins', 'sources': { 'noloadref': 0 } } ] } _yaml.dump(config, os.path.join(basedir, 'project.conf')) result = cli.run(project=basedir, silent=True, args=['show', 'noloadref.bst']) # There is no error if project.refs is not in use, otherwise we # assert our graceful failure if ref_storage == 'inline': result.assert_success() else: result.assert_main_error(ErrorDomain.SOURCE, 'unsupported-load-ref') buildstream-1.6.9/tests/pipeline/load/000077500000000000000000000000001437515270000177345ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/load/exceptions/000077500000000000000000000000001437515270000221155ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/load/exceptions/build.bst000066400000000000000000000001571437515270000237310ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element depends: - first-level-1.bst - first-level-2.bst buildstream-1.6.9/tests/pipeline/load/exceptions/first-level-1.bst000066400000000000000000000001351437515270000252200ustar00rootroot00000000000000kind: autotools description: Depends on a removed dependency depends: - second-level-1.bst buildstream-1.6.9/tests/pipeline/load/exceptions/first-level-2.bst000066400000000000000000000001521437515270000252200ustar00rootroot00000000000000kind: autotools description: Shares a dependency with a removed dependency depends: - third-level-2.bst buildstream-1.6.9/tests/pipeline/load/exceptions/fourth-level-1.bst000066400000000000000000000000571437515270000254030ustar00rootroot00000000000000kind: autotools description: Should be removed buildstream-1.6.9/tests/pipeline/load/exceptions/fourth-level-2.bst000066400000000000000000000000721437515270000254010ustar00rootroot00000000000000kind: autotools description: Should also ~not~ be removed buildstream-1.6.9/tests/pipeline/load/exceptions/fourth-level-3.bst000066400000000000000000000000631437515270000254020ustar00rootroot00000000000000kind: autotools description: Should not be removed buildstream-1.6.9/tests/pipeline/load/exceptions/project.conf000066400000000000000000000001111437515270000244230ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/pipeline/load/exceptions/second-level-1.bst000066400000000000000000000003011437515270000253370ustar00rootroot00000000000000kind: autotools description: Depends uniquely on one dependency, shares another, has another unique nested dependency depends: - third-level-1.bst - third-level-2.bst - third-level-3.bst buildstream-1.6.9/tests/pipeline/load/exceptions/third-level-1.bst000066400000000000000000000001531437515270000252030ustar00rootroot00000000000000kind: autotools description: Should be removed, and not "revive" its child depends: - fourth-level-1.bst buildstream-1.6.9/tests/pipeline/load/exceptions/third-level-2.bst000066400000000000000000000001251437515270000252030ustar00rootroot00000000000000kind: autotools description: Should ~not~ be removed depends: - fourth-level-2.bst buildstream-1.6.9/tests/pipeline/load/exceptions/third-level-3.bst000066400000000000000000000001751437515270000252110ustar00rootroot00000000000000kind: autotools description: Should be an explicit dependency, and *not* remove its children depends: - fourth-level-3.bst buildstream-1.6.9/tests/pipeline/load/exceptions/unrelated-1.bst000066400000000000000000000002101437515270000247410ustar00rootroot00000000000000kind: autotools description: Unrelated to the rest of the pipeline, not loaded when targeting build.bst depends: - second-level-1.bst buildstream-1.6.9/tests/pipeline/load/exceptions/unrelated-2.bst000066400000000000000000000002071437515270000247500ustar00rootroot00000000000000kind: autotools description: Unrelated to the rest of the pipeline, not loaded when targeting build.bst depends: - first-level-2.bst buildstream-1.6.9/tests/pipeline/load/iterate/000077500000000000000000000000001437515270000213715ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/load/iterate/build-build.bst000066400000000000000000000001411437515270000242730ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony buildstream-1.6.9/tests/pipeline/load/iterate/build.bst000066400000000000000000000002761437515270000232070ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony depends: - filename: build-build.bst type: build - filename: run-build.bst type: runtime buildstream-1.6.9/tests/pipeline/load/iterate/dep-one.bst000066400000000000000000000002161437515270000234310ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony depends: - filename: build.bst type: build buildstream-1.6.9/tests/pipeline/load/iterate/dep-two.bst000066400000000000000000000002621437515270000234620ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony depends: - filename: build.bst type: build - filename: run.bst type: runtime buildstream-1.6.9/tests/pipeline/load/iterate/project.conf000066400000000000000000000001111437515270000236770ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/pipeline/load/iterate/run-build.bst000066400000000000000000000001411437515270000240000ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony buildstream-1.6.9/tests/pipeline/load/iterate/run.bst000066400000000000000000000001411437515270000227030ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony buildstream-1.6.9/tests/pipeline/load/iterate/target.bst000066400000000000000000000002061437515270000233670ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony depends: - dep-one.bst - dep-two.bst buildstream-1.6.9/tests/pipeline/load/noloadref/000077500000000000000000000000001437515270000217055ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/load/noloadref/noloadref.bst000066400000000000000000000001701437515270000243660ustar00rootroot00000000000000kind: import description: | Import a source which does not support the load_ref() method sources: - kind: noloadref buildstream-1.6.9/tests/pipeline/load/noloadref/plugins/000077500000000000000000000000001437515270000233665ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/load/noloadref/plugins/noloadref.py000066400000000000000000000011771437515270000257170ustar00rootroot00000000000000from buildstream import Source, Consistency # Just a dummy plugin which does not support the new load_ref() method. # # Use this to test that the core behaves as expected with such plugins. # class NoLoadRefSource(Source): def configure(self, node): pass def preflight(self): pass def get_unique_key(self): return {} def get_consistency(self): return Consistency.CACHED def get_ref(self): return None def set_ref(self, ref, node): pass def fetch(self): pass def stage(self, directory): pass def setup(): return NoLoadRefSource buildstream-1.6.9/tests/pipeline/load/noloadref/project.refs000066400000000000000000000001751437515270000242370ustar00rootroot00000000000000# A project.refs file with an existing ref for the noloadref element # projects: test: noloadref.bst: - ref: dummy buildstream-1.6.9/tests/pipeline/load/simple/000077500000000000000000000000001437515270000212255ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/load/simple/project.conf000066400000000000000000000001111437515270000235330ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony buildstream-1.6.9/tests/pipeline/load/simple/simple.bst000066400000000000000000000001411437515270000232240ustar00rootroot00000000000000kind: autotools description: Some kinda autotools element config: configure-commands: - pony buildstream-1.6.9/tests/pipeline/preflight-error/000077500000000000000000000000001437515270000221305ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/preflight-error/error.bst000066400000000000000000000001551437515270000237740ustar00rootroot00000000000000kind: import description: An element with a failing source at preflight time sources: - kind: preflighterror buildstream-1.6.9/tests/pipeline/preflight-error/errorplugin/000077500000000000000000000000001437515270000245005ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/preflight-error/errorplugin/__init__.py000066400000000000000000000000001437515270000265770ustar00rootroot00000000000000buildstream-1.6.9/tests/pipeline/preflight-error/errorplugin/preflighterror.py000066400000000000000000000012671437515270000301160ustar00rootroot00000000000000from buildstream import Source, SourceError, Consistency class PreflightErrorSource(Source): def configure(self, node): pass def preflight(self): # Raise a preflight error unconditionally raise SourceError("Unsatisfied requirements in preflight, raising this error", reason="the-preflight-error") def get_unique_key(self): return {} def get_consistency(self): return Consistency.CACHED def get_ref(self): return None def set_ref(self, ref, node): pass def fetch(self): pass def stage(self, directory): pass def setup(): return PreflightErrorSource buildstream-1.6.9/tests/pipeline/preflight-error/project.conf000066400000000000000000000003271437515270000244470ustar00rootroot00000000000000# Basic project configuration that doesnt override anything # name: pony # Whitelist the local test Source "errorplugin" to be loaded # plugins: - origin: local path: errorplugin sources: preflighterror: 0 buildstream-1.6.9/tests/pipeline/preflight.py000066400000000000000000000010211437515270000213450ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain from tests.testutils.runcli import cli DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'preflight-error', ) @pytest.mark.datafiles(DATA_DIR) def test_load_simple(cli, datafiles, tmpdir): basedir = os.path.join(datafiles.dirname, datafiles.basename) # Lets try to fetch it... result = cli.run(project=basedir, args=['fetch', 'error.bst']) result.assert_main_error(ErrorDomain.SOURCE, "the-preflight-error") buildstream-1.6.9/tests/plugins/000077500000000000000000000000001437515270000166715ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/__init__.py000066400000000000000000000000001437515270000207700ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics.py000066400000000000000000000247421437515270000205200ustar00rootroot00000000000000import os import pytest from pluginbase import PluginBase from buildstream._elementfactory import ElementFactory from buildstream._sourcefactory import SourceFactory from buildstream._exceptions import PluginError DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'basics', ) # Simple fixture to create a PluginBase object that # we use for loading plugins. @pytest.fixture() def plugin_fixture(): return { 'base': PluginBase(package='buildstream.plugins') } ############################################################## # Basics: test the fixture, test we can create the factories # ############################################################## def test_fixture(plugin_fixture): assert(isinstance(plugin_fixture['base'], PluginBase)) def test_source_factory(plugin_fixture): factory = SourceFactory(plugin_fixture['base']) assert(isinstance(factory, SourceFactory)) def test_element_factory(plugin_fixture): factory = ElementFactory(plugin_fixture['base']) assert(isinstance(factory, ElementFactory)) ############################################################## # Check that we can load custom sources & elements # ############################################################## @pytest.mark.datafiles(os.path.join(DATA_DIR, 'customsource')) def test_custom_source(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = SourceFactory(plugin_fixture['base'], plugin_origins=plugins) assert(isinstance(factory, SourceFactory)) foo_type, _ = factory.lookup('foo') assert(foo_type.__name__ == 'FooSource') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'customelement')) def test_custom_element(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = ElementFactory(plugin_fixture['base'], plugin_origins=plugins) assert(isinstance(factory, ElementFactory)) foo_type, _ = factory.lookup('foo') assert(foo_type.__name__ == 'FooElement') ############################################################## # Check plugin loading failure modes # ############################################################## def test_missing_source(plugin_fixture): factory = SourceFactory(plugin_fixture['base']) assert(isinstance(factory, SourceFactory)) # Test fails if PluginError is not raised with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') def test_missing_element(plugin_fixture): factory = ElementFactory(plugin_fixture['base']) assert(isinstance(factory, ElementFactory)) # Test fails if PluginError is not raised with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin that returns a value instead of Source subclass @pytest.mark.datafiles(os.path.join(DATA_DIR, 'notatype')) def test_source_notatype(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = SourceFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin that returns a value instead of Element subclass @pytest.mark.datafiles(os.path.join(DATA_DIR, 'notatype')) def test_element_notatype(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = ElementFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin that returns a type # which is not a Source subclass @pytest.mark.datafiles(os.path.join(DATA_DIR, 'wrongtype')) def test_source_wrongtype(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = SourceFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin that returns a type # which is not a Element subclass @pytest.mark.datafiles(os.path.join(DATA_DIR, 'wrongtype')) def test_element_wrongtype(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = ElementFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin which fails to provide a setup() function @pytest.mark.datafiles(os.path.join(DATA_DIR, 'nosetup')) def test_source_missing_setup(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = SourceFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin which fails to provide a setup() function @pytest.mark.datafiles(os.path.join(DATA_DIR, 'nosetup')) def test_element_missing_setup(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = ElementFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin which provides a setup symbol # that is not a function @pytest.mark.datafiles(os.path.join(DATA_DIR, 'badsetup')) def test_source_bad_setup(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = SourceFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin which provides a setup symbol # that is not a function @pytest.mark.datafiles(os.path.join(DATA_DIR, 'badsetup')) def test_element_bad_setup(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = ElementFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin which requires an absurdly # high version of buildstream @pytest.mark.datafiles(os.path.join(DATA_DIR, 'badversionsource')) def test_source_badversion(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = SourceFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') # Load a factory with a plugin which requires an absurdly # high version of buildstream @pytest.mark.datafiles(os.path.join(DATA_DIR, 'badversionelement')) def test_element_badversion(plugin_fixture, datafiles): plugins = [{ 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename), 'plugins': {'foo': 0} }] factory = ElementFactory(plugin_fixture['base'], plugin_origins=plugins) with pytest.raises(PluginError) as exc: foo_type = factory.lookup('foo') ############################################################## # Check we can load different contexts of plugin # ############################################################## # Load two factories, both of which define a different 'foo' plugin @pytest.mark.datafiles(DATA_DIR) def test_source_multicontext(plugin_fixture, datafiles): plugins1 = { 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename, 'customsource'), 'plugins': {'foo': 0} } plugins2 = { 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename, 'anothersource'), 'plugins': {'foo': 0} } factory1 = SourceFactory(plugin_fixture['base'], plugin_origins=[plugins1]) factory2 = SourceFactory(plugin_fixture['base'], plugin_origins=[plugins2]) assert(isinstance(factory1, SourceFactory)) assert(isinstance(factory2, SourceFactory)) foo_type1, _ = factory1.lookup('foo') foo_type2, _ = factory2.lookup('foo') assert(foo_type1.__name__ == 'FooSource') assert(foo_type2.__name__ == 'AnotherFooSource') # Load two factories, both of which define a different 'foo' plugin @pytest.mark.datafiles(DATA_DIR) def test_element_multicontext(plugin_fixture, datafiles): plugins1 = { 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename, 'customelement'), 'plugins': {'foo': 0} } plugins2 = { 'origin': 'local', 'path': os.path.join(datafiles.dirname, datafiles.basename, 'anotherelement'), 'plugins': {'foo': 0} } factory1 = ElementFactory(plugin_fixture['base'], plugin_origins=[plugins1]) factory2 = ElementFactory(plugin_fixture['base'], plugin_origins=[plugins2]) assert(isinstance(factory1, ElementFactory)) assert(isinstance(factory2, ElementFactory)) foo_type1, _ = factory1.lookup('foo') foo_type2, _ = factory2.lookup('foo') assert(foo_type1.__name__ == 'FooElement') assert(foo_type2.__name__ == 'AnotherFooElement') buildstream-1.6.9/tests/plugins/basics/000077500000000000000000000000001437515270000201355ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/anotherelement/000077500000000000000000000000001437515270000231475ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/anotherelement/__init__.py000066400000000000000000000000001437515270000252460ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/anotherelement/foo.py000066400000000000000000000001711437515270000243030ustar00rootroot00000000000000from buildstream import Element class AnotherFooElement(Element): pass def setup(): return AnotherFooElement buildstream-1.6.9/tests/plugins/basics/anothersource/000077500000000000000000000000001437515270000230165ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/anothersource/__init__.py000066400000000000000000000000001437515270000251150ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/anothersource/foo.py000066400000000000000000000001651437515270000241550ustar00rootroot00000000000000from buildstream import Source class AnotherFooSource(Source): pass def setup(): return AnotherFooSource buildstream-1.6.9/tests/plugins/basics/badsetup/000077500000000000000000000000001437515270000217445ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/badsetup/__init__.py000066400000000000000000000000001437515270000240430ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/badsetup/foo.py000066400000000000000000000002661437515270000231050ustar00rootroot00000000000000# A plugin is supposed to define a setup function # which returns the type that the plugin provides # # This plugin provides a setup() symbol that is # not even a function setup = 5 buildstream-1.6.9/tests/plugins/basics/badversionelement/000077500000000000000000000000001437515270000236435ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/badversionelement/__init__.py000066400000000000000000000000001437515270000257420ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/badversionelement/foo.py000066400000000000000000000003141437515270000247760ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): # We have a little while until we have to manually modify this BST_REQUIRED_VERSION_MAJOR = 5000 def setup(): return FooElement buildstream-1.6.9/tests/plugins/basics/badversionsource/000077500000000000000000000000001437515270000235125ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/badversionsource/__init__.py000066400000000000000000000000001437515270000256110ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/badversionsource/foo.py000066400000000000000000000003101437515270000246410ustar00rootroot00000000000000from buildstream import Source class FooSource(Source): # We have a little while until we have to manually modify this BST_REQUIRED_VERSION_MAJOR = 5000 def setup(): return FooSource buildstream-1.6.9/tests/plugins/basics/customelement/000077500000000000000000000000001437515270000230215ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/customelement/__init__.py000066400000000000000000000000001437515270000251200ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/customelement/foo.py000066400000000000000000000001531437515270000241550ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): pass def setup(): return FooElement buildstream-1.6.9/tests/plugins/basics/customsource/000077500000000000000000000000001437515270000226705ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/customsource/__init__.py000066400000000000000000000000001437515270000247670ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/customsource/foo.py000066400000000000000000000001471437515270000240270ustar00rootroot00000000000000from buildstream import Source class FooSource(Source): pass def setup(): return FooSource buildstream-1.6.9/tests/plugins/basics/nosetup/000077500000000000000000000000001437515270000216325ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/nosetup/__init__.py000066400000000000000000000000001437515270000237310ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/nosetup/foo.py000066400000000000000000000002551437515270000227710ustar00rootroot00000000000000# A plugin is supposed to define a setup function # which returns the type that the plugin provides # # This plugin fails to do so def useless(): print("Hello World") buildstream-1.6.9/tests/plugins/basics/notatype/000077500000000000000000000000001437515270000220005ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/notatype/__init__.py000066400000000000000000000000001437515270000240770ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/notatype/foo.py000066400000000000000000000001771437515270000231420ustar00rootroot00000000000000# Plugins are supposed to return a subclass type # of Source or Element, depending on plugin type. def setup(): return 5 buildstream-1.6.9/tests/plugins/basics/wrongtype/000077500000000000000000000000001437515270000221735ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/wrongtype/__init__.py000066400000000000000000000000001437515270000242720ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/basics/wrongtype/foo.py000066400000000000000000000002741437515270000233330ustar00rootroot00000000000000# Plugins are supposed to return a subclass type # of Source or Element, depending on plugin type. # # This one fails the requirement class Foo(): pass def setup(): return Foo buildstream-1.6.9/tests/plugins/bst2.py000066400000000000000000000035031437515270000201160ustar00rootroot00000000000000# Pylint doesn't play well with fixtures and dependency injection from pytest # pylint: disable=redefined-outer-name # # This test case tests the failure modes of loading a plugin # after it has already been discovered via it's origin. # import os import pytest from buildstream._exceptions import ErrorDomain from tests.testutils import cli # pylint: disable=unused-import from buildstream import _yaml DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "bst2") # Sets up the element.bst file so that it requires a source # or element plugin. # def setup_element(project_path, plugin_type, plugin_name): element_path = os.path.join(project_path, "element.bst") if plugin_type == "elements": element = {"kind": plugin_name} else: element = {"kind": "manual", "sources": [{"kind": plugin_name}]} _yaml.dump(element, element_path) #################################################### # Tests # #################################################### @pytest.mark.datafiles(DATA_DIR) @pytest.mark.parametrize("plugin_type", ["elements", "sources"]) @pytest.mark.parametrize("plugin", ["bst2", "malformed"]) def test_plugin_bst2(cli, datafiles, plugin_type, plugin): project = str(datafiles) project_conf_path = os.path.join(project, "project.conf") project_conf = { "name": "test", "plugins": [ { "origin": "local", "path": plugin_type, plugin_type: { plugin: 0 } } ] } _yaml.dump(project_conf, project_conf_path) setup_element(project, plugin_type, plugin) result = cli.run(project=project, args=["show", "element.bst"]) result.assert_main_error(ErrorDomain.PLUGIN, "plugin-version-mismatch") buildstream-1.6.9/tests/plugins/bst2/000077500000000000000000000000001437515270000175435ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/bst2/elements/000077500000000000000000000000001437515270000213575ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/bst2/elements/bst2.py000066400000000000000000000004161437515270000226040ustar00rootroot00000000000000from buildstream import Element class Found(Element): BST_MIN_VERSION = "2.0" def configure(self, node): pass def preflight(self): pass def get_unique_key(self): return {} # Plugin entry point def setup(): return Found buildstream-1.6.9/tests/plugins/bst2/elements/malformed.py000066400000000000000000000004121437515270000236740ustar00rootroot00000000000000from buildstream import Element class Found(Element): BST_MIN_VERSION = 5 def configure(self, node): pass def preflight(self): pass def get_unique_key(self): return {} # Plugin entry point def setup(): return Found buildstream-1.6.9/tests/plugins/bst2/sources/000077500000000000000000000000001437515270000212265ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/bst2/sources/bst2.py000066400000000000000000000007021437515270000224510ustar00rootroot00000000000000from buildstream import Source class Found(Source): BST_MIN_VERSION = "2.0" def configure(self, node): pass def preflight(self): pass def get_unique_key(self): return {} def load_ref(self, node): pass def get_ref(self): return {} def set_ref(self, ref, node): pass def is_cached(self): return False # Plugin entry point def setup(): return Found buildstream-1.6.9/tests/plugins/bst2/sources/malformed.py000066400000000000000000000007051437515270000235500ustar00rootroot00000000000000from buildstream import Source class Found(Source): BST_MIN_VERSION = "a pony" def configure(self, node): pass def preflight(self): pass def get_unique_key(self): return {} def load_ref(self, node): pass def get_ref(self): return {} def set_ref(self, ref, node): pass def is_cached(self): return False # Plugin entry point def setup(): return Found buildstream-1.6.9/tests/plugins/filter.py000066400000000000000000000421071437515270000205340ustar00rootroot00000000000000import os import pytest import shutil from tests.testutils import cli, create_repo, ALL_REPO_KINDS from buildstream._exceptions import ErrorDomain from buildstream import _yaml DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'filter', ) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_include(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'output-include.bst']) result.assert_success() checkout = os.path.join(tmpdir.dirname, tmpdir.basename, 'checkout') result = cli.run(project=project, args=['checkout', 'output-include.bst', checkout]) result.assert_success() assert os.path.exists(os.path.join(checkout, "foo")) assert not os.path.exists(os.path.join(checkout, "bar")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_include_dynamic(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'output-dynamic-include.bst']) result.assert_success() checkout = os.path.join(tmpdir.dirname, tmpdir.basename, 'checkout') result = cli.run(project=project, args=['checkout', 'output-dynamic-include.bst', checkout]) result.assert_success() assert os.path.exists(os.path.join(checkout, "foo")) assert not os.path.exists(os.path.join(checkout, "bar")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_exclude(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'output-exclude.bst']) result.assert_success() checkout = os.path.join(tmpdir.dirname, tmpdir.basename, 'checkout') result = cli.run(project=project, args=['checkout', 'output-exclude.bst', checkout]) result.assert_success() assert not os.path.exists(os.path.join(checkout, "foo")) assert os.path.exists(os.path.join(checkout, "bar")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_orphans(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'output-orphans.bst']) result.assert_success() checkout = os.path.join(tmpdir.dirname, tmpdir.basename, 'checkout') result = cli.run(project=project, args=['checkout', 'output-orphans.bst', checkout]) result.assert_success() assert os.path.exists(os.path.join(checkout, "baz")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_deps_ok(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'deps-permitted.bst']) result.assert_success() result = cli.run(project=project, args=['show', '--deps=run', "--format='%{name}'", 'deps-permitted.bst']) result.assert_success() assert 'output-exclude.bst' in result.output assert 'output-orphans.bst' in result.output @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_forbid_sources(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'forbidden-source.bst']) result.assert_main_error(ErrorDomain.ELEMENT, 'element-forbidden-sources') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_forbid_multi_bdep(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'forbidden-multi-bdep.bst']) result.assert_main_error(ErrorDomain.ELEMENT, 'filter-bdepend-wrong-count') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_forbid_no_bdep(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'forbidden-no-bdep.bst']) result.assert_main_error(ErrorDomain.ELEMENT, 'filter-bdepend-wrong-count') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_forbid_also_rdep(datafiles, cli): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'forbidden-also-rdep.bst']) result.assert_main_error(ErrorDomain.ELEMENT, 'filter-bdepend-also-rdepend') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_workspace_open(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) workspace_dir = os.path.join(tmpdir.dirname, tmpdir.basename, "workspace") result = cli.run(project=project, args=['workspace', 'open', 'deps-permitted.bst', workspace_dir]) result.assert_success() assert os.path.exists(os.path.join(workspace_dir, "foo")) assert os.path.exists(os.path.join(workspace_dir, "bar")) assert os.path.exists(os.path.join(workspace_dir, "baz")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_workspace_build(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) tempdir = os.path.join(tmpdir.dirname, tmpdir.basename) workspace_dir = os.path.join(tempdir, "workspace") result = cli.run(project=project, args=['workspace', 'open', 'output-orphans.bst', workspace_dir]) result.assert_success() src = os.path.join(workspace_dir, "foo") dst = os.path.join(workspace_dir, "quux") shutil.copyfile(src, dst) result = cli.run(project=project, args=['build', 'output-orphans.bst']) result.assert_success() checkout_dir = os.path.join(tempdir, "checkout") result = cli.run(project=project, args=['checkout', 'output-orphans.bst', checkout_dir]) result.assert_success() assert os.path.exists(os.path.join(checkout_dir, "quux")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_workspace_close(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) tempdir = os.path.join(tmpdir.dirname, tmpdir.basename) workspace_dir = os.path.join(tempdir, "workspace") result = cli.run(project=project, args=['workspace', 'open', 'output-orphans.bst', workspace_dir]) result.assert_success() src = os.path.join(workspace_dir, "foo") dst = os.path.join(workspace_dir, "quux") shutil.copyfile(src, dst) result = cli.run(project=project, args=['workspace', 'close', 'deps-permitted.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'output-orphans.bst']) result.assert_success() checkout_dir = os.path.join(tempdir, "checkout") result = cli.run(project=project, args=['checkout', 'output-orphans.bst', checkout_dir]) result.assert_success() assert not os.path.exists(os.path.join(checkout_dir, "quux")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_workspace_reset(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) tempdir = os.path.join(tmpdir.dirname, tmpdir.basename) workspace_dir = os.path.join(tempdir, "workspace") result = cli.run(project=project, args=['workspace', 'open', 'output-orphans.bst', workspace_dir]) result.assert_success() src = os.path.join(workspace_dir, "foo") dst = os.path.join(workspace_dir, "quux") shutil.copyfile(src, dst) result = cli.run(project=project, args=['workspace', 'reset', 'deps-permitted.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'output-orphans.bst']) result.assert_success() checkout_dir = os.path.join(tempdir, "checkout") result = cli.run(project=project, args=['checkout', 'output-orphans.bst', checkout_dir]) result.assert_success() assert not os.path.exists(os.path.join(checkout_dir, "quux")) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_track(datafiles, cli, tmpdir): repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), "files")) elements_dir = os.path.join(str(tmpdir), "elements") project = str(tmpdir) input_name = "input.bst" project_config = { "name": "filter-track-test", "element-path": "elements", } project_file = os.path.join(str(tmpdir), "project.conf") _yaml.dump(project_config, project_file) input_config = { "kind": "import", "sources": [repo.source_config()], } input_file = os.path.join(elements_dir, input_name) _yaml.dump(input_config, input_file) filter1_config = { "kind": "filter", "depends": [ {"filename": input_name, "type": "build"} ] } filter1_file = os.path.join(elements_dir, "filter1.bst") _yaml.dump(filter1_config, filter1_file) filter2_config = { "kind": "filter", "depends": [ {"filename": "filter1.bst", "type": "build"} ] } filter2_file = os.path.join(elements_dir, "filter2.bst") _yaml.dump(filter2_config, filter2_file) # Assert that a fetch is needed assert cli.get_element_state(project, input_name) == 'no reference' # Now try to track it result = cli.run(project=project, args=["track", "filter2.bst"]) result.assert_success() # Now check that a ref field exists new_input = _yaml.load(input_file) assert new_input["sources"][0]["ref"] == ref @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_track_excepted(datafiles, cli, tmpdir): repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), "files")) elements_dir = os.path.join(str(tmpdir), "elements") project = str(tmpdir) input_name = "input.bst" project_config = { "name": "filter-track-test", "element-path": "elements", } project_file = os.path.join(str(tmpdir), "project.conf") _yaml.dump(project_config, project_file) input_config = { "kind": "import", "sources": [repo.source_config()], } input_file = os.path.join(elements_dir, input_name) _yaml.dump(input_config, input_file) filter1_config = { "kind": "filter", "depends": [ {"filename": input_name, "type": "build"} ] } filter1_file = os.path.join(elements_dir, "filter1.bst") _yaml.dump(filter1_config, filter1_file) filter2_config = { "kind": "filter", "depends": [ {"filename": "filter1.bst", "type": "build"} ] } filter2_file = os.path.join(elements_dir, "filter2.bst") _yaml.dump(filter2_config, filter2_file) # Assert that a fetch is needed assert cli.get_element_state(project, input_name) == 'no reference' # Now try to track it result = cli.run(project=project, args=["track", "filter2.bst", "--except", "input.bst"]) result.assert_success() # Now check that a ref field exists new_input = _yaml.load(input_file) assert "ref" not in new_input["sources"][0] @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_track_multi_to_one(datafiles, cli, tmpdir): repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), "files")) elements_dir = os.path.join(str(tmpdir), "elements") project = str(tmpdir) input_name = "input.bst" project_config = { "name": "filter-track-test", "element-path": "elements", } project_file = os.path.join(str(tmpdir), "project.conf") _yaml.dump(project_config, project_file) input_config = { "kind": "import", "sources": [repo.source_config()], } input_file = os.path.join(elements_dir, input_name) _yaml.dump(input_config, input_file) filter1_config = { "kind": "filter", "depends": [ {"filename": input_name, "type": "build"} ] } filter1_file = os.path.join(elements_dir, "filter1.bst") _yaml.dump(filter1_config, filter1_file) filter2_config = { "kind": "filter", "depends": [ {"filename": input_name, "type": "build"} ] } filter2_file = os.path.join(elements_dir, "filter2.bst") _yaml.dump(filter2_config, filter2_file) # Assert that a fetch is needed assert cli.get_element_state(project, input_name) == 'no reference' # Now try to track it result = cli.run(project=project, args=["track", "filter1.bst", "filter2.bst"]) result.assert_success() # Now check that a ref field exists new_input = _yaml.load(input_file) assert new_input["sources"][0]["ref"] == ref @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_track_multi(datafiles, cli, tmpdir): repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), "files")) elements_dir = os.path.join(str(tmpdir), "elements") project = str(tmpdir) input_name = "input.bst" input2_name = "input2.bst" project_config = { "name": "filter-track-test", "element-path": "elements", } project_file = os.path.join(str(tmpdir), "project.conf") _yaml.dump(project_config, project_file) input_config = { "kind": "import", "sources": [repo.source_config()], } input_file = os.path.join(elements_dir, input_name) _yaml.dump(input_config, input_file) input2_config = dict(input_config) input2_file = os.path.join(elements_dir, input2_name) _yaml.dump(input2_config, input2_file) filter1_config = { "kind": "filter", "depends": [ {"filename": input_name, "type": "build"} ] } filter1_file = os.path.join(elements_dir, "filter1.bst") _yaml.dump(filter1_config, filter1_file) filter2_config = { "kind": "filter", "depends": [ {"filename": input2_name, "type": "build"} ] } filter2_file = os.path.join(elements_dir, "filter2.bst") _yaml.dump(filter2_config, filter2_file) # Assert that a fetch is needed assert cli.get_element_state(project, input_name) == 'no reference' assert cli.get_element_state(project, input2_name) == 'no reference' # Now try to track it result = cli.run(project=project, args=["track", "filter1.bst", "filter2.bst"]) result.assert_success() # Now check that a ref field exists new_input = _yaml.load(input_file) assert new_input["sources"][0]["ref"] == ref new_input2 = _yaml.load(input2_file) assert new_input2["sources"][0]["ref"] == ref @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_track_multi_exclude(datafiles, cli, tmpdir): repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(str(datafiles), "files")) elements_dir = os.path.join(str(tmpdir), "elements") project = str(tmpdir) input_name = "input.bst" input2_name = "input2.bst" project_config = { "name": "filter-track-test", "element-path": "elements", } project_file = os.path.join(str(tmpdir), "project.conf") _yaml.dump(project_config, project_file) input_config = { "kind": "import", "sources": [repo.source_config()], } input_file = os.path.join(elements_dir, input_name) _yaml.dump(input_config, input_file) input2_config = dict(input_config) input2_file = os.path.join(elements_dir, input2_name) _yaml.dump(input2_config, input2_file) filter1_config = { "kind": "filter", "depends": [ {"filename": input_name, "type": "build"} ] } filter1_file = os.path.join(elements_dir, "filter1.bst") _yaml.dump(filter1_config, filter1_file) filter2_config = { "kind": "filter", "depends": [ {"filename": input2_name, "type": "build"} ] } filter2_file = os.path.join(elements_dir, "filter2.bst") _yaml.dump(filter2_config, filter2_file) # Assert that a fetch is needed assert cli.get_element_state(project, input_name) == 'no reference' assert cli.get_element_state(project, input2_name) == 'no reference' # Now try to track it result = cli.run(project=project, args=["track", "filter1.bst", "filter2.bst", "--except", input_name]) result.assert_success() # Now check that a ref field exists new_input = _yaml.load(input_file) assert "ref" not in new_input["sources"][0] new_input2 = _yaml.load(input2_file) assert new_input2["sources"][0]["ref"] == ref @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_filter_include_with_indirect_deps(datafiles, cli, tmpdir): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=[ 'build', 'output-include-with-indirect-deps.bst']) result.assert_success() checkout = os.path.join(tmpdir.dirname, tmpdir.basename, 'checkout') result = cli.run(project=project, args=[ 'checkout', 'output-include-with-indirect-deps.bst', checkout]) result.assert_success() # direct dependencies should be staged and filtered assert os.path.exists(os.path.join(checkout, "baz")) # indirect dependencies shouldn't be staged and filtered assert not os.path.exists(os.path.join(checkout, "foo")) assert not os.path.exists(os.path.join(checkout, "bar")) buildstream-1.6.9/tests/plugins/filter/000077500000000000000000000000001437515270000201565ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/000077500000000000000000000000001437515270000212375ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/element_plugins/000077500000000000000000000000001437515270000244315ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/element_plugins/dynamic.py000066400000000000000000000016551437515270000264360ustar00rootroot00000000000000from buildstream import Element, Scope # Copies files from the dependent element but inserts split-rules using dynamic data class DynamicElement(Element): def configure(self, node): self.node_validate(node, ['split-rules']) self.split_rules = self.node_get_member(node, dict, 'split-rules') def preflight(self): pass def get_unique_key(self): return {'split-rules': self.split_rules} def configure_sandbox(self, sandbox): pass def stage(self, sandbox): pass def assemble(self, sandbox): with self.timed_activity("Staging artifact", silent_nested=True): for dep in self.dependencies(Scope.BUILD): dep.stage_artifact(sandbox) bstdata = self.get_public_data("bst") bstdata["split-rules"] = self.split_rules self.set_public_data("bst", bstdata) return "" def setup(): return DynamicElement buildstream-1.6.9/tests/plugins/filter/basic/elements/000077500000000000000000000000001437515270000230535ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/elements/deps-permitted.bst000066400000000000000000000002741437515270000265160ustar00rootroot00000000000000kind: filter depends: - filename: output-include.bst type: build - filename: output-exclude.bst type: runtime - filename: output-orphans.bst type: runtime config: include: - foo buildstream-1.6.9/tests/plugins/filter/basic/elements/forbidden-also-rdep.bst000066400000000000000000000002371437515270000274070ustar00rootroot00000000000000kind: filter depends: - filename: output-include.bst type: all - filename: output-exclude.bst type: runtime - filename: output-orphans.bst type: runtime buildstream-1.6.9/tests/plugins/filter/basic/elements/forbidden-multi-bdep.bst000066400000000000000000000002371437515270000275630ustar00rootroot00000000000000kind: filter depends: - filename: output-include.bst type: build - filename: output-exclude.bst type: build - filename: output-orphans.bst type: runtime buildstream-1.6.9/tests/plugins/filter/basic/elements/forbidden-no-bdep.bst000066400000000000000000000002431437515270000270420ustar00rootroot00000000000000kind: filter depends: - filename: output-include.bst type: runtime - filename: output-exclude.bst type: runtime - filename: output-orphans.bst type: runtime buildstream-1.6.9/tests/plugins/filter/basic/elements/forbidden-source.bst000066400000000000000000000002031437515270000270120ustar00rootroot00000000000000kind: filter depends: - filename: output-include.bst type: build config: include: - foo sources: - kind: local path: files buildstream-1.6.9/tests/plugins/filter/basic/elements/input-dynamic.bst000066400000000000000000000001721437515270000263460ustar00rootroot00000000000000kind: dynamic depends: - filename: input.bst type: build config: split-rules: foo: - /foo bar: - /bar buildstream-1.6.9/tests/plugins/filter/basic/elements/input-with-deps.bst000066400000000000000000000002141437515270000266230ustar00rootroot00000000000000kind: import depends: - filename: input.bst sources: - kind: local path: files public: bst: split-rules: baz: - /baz buildstream-1.6.9/tests/plugins/filter/basic/elements/input.bst000066400000000000000000000002021437515270000247160ustar00rootroot00000000000000kind: import sources: - kind: local path: files public: bst: split-rules: foo: - /foo bar: - /bar buildstream-1.6.9/tests/plugins/filter/basic/elements/output-dynamic-include.bst000066400000000000000000000001351437515270000301670ustar00rootroot00000000000000kind: filter depends: - filename: input-dynamic.bst type: build config: include: - foo buildstream-1.6.9/tests/plugins/filter/basic/elements/output-exclude.bst000066400000000000000000000001251437515270000265520ustar00rootroot00000000000000kind: filter depends: - filename: input.bst type: build config: exclude: - foo buildstream-1.6.9/tests/plugins/filter/basic/elements/output-include-with-indirect-deps.bst000066400000000000000000000001051437515270000322430ustar00rootroot00000000000000kind: filter depends: - filename: input-with-deps.bst type: build buildstream-1.6.9/tests/plugins/filter/basic/elements/output-include.bst000066400000000000000000000001251437515270000265440ustar00rootroot00000000000000kind: filter depends: - filename: input.bst type: build config: include: - foo buildstream-1.6.9/tests/plugins/filter/basic/elements/output-orphans.bst000066400000000000000000000001651437515270000265770ustar00rootroot00000000000000kind: filter depends: - filename: input.bst type: build config: exclude: - foo - bar include-orphans: True buildstream-1.6.9/tests/plugins/filter/basic/files/000077500000000000000000000000001437515270000223415ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/files/bar000066400000000000000000000000001437515270000230160ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/files/baz000066400000000000000000000000001437515270000230260ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/files/foo000066400000000000000000000000001437515270000230350ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/filter/basic/project.conf000066400000000000000000000001561437515270000235560ustar00rootroot00000000000000name: test element-path: elements plugins: - origin: local path: element_plugins elements: dynamic: 0 buildstream-1.6.9/tests/plugins/pipeline.py000066400000000000000000000037611437515270000210570ustar00rootroot00000000000000import os import pytest from buildstream._context import Context from buildstream._project import Project from buildstream._exceptions import LoadError, LoadErrorReason from buildstream._pipeline import Pipeline DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'pipeline', ) def create_pipeline(tmpdir, basedir, target): context = Context() project = Project(basedir, context) context.deploydir = os.path.join(str(tmpdir), 'deploy') context.artifactdir = os.path.join(str(tmpdir), 'artifact') def dummy_handler(message, context): pass context.set_message_handler(dummy_handler) pipeline = Pipeline(context, project, None) targets, = pipeline.load([(target,)]) return targets @pytest.mark.datafiles(os.path.join(DATA_DIR, 'customsource')) def test_customsource(datafiles, tmpdir): basedir = os.path.join(datafiles.dirname, datafiles.basename) targets = create_pipeline(tmpdir, basedir, 'simple.bst') assert(targets[0].get_kind() == "autotools") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'customelement')) def test_customelement(datafiles, tmpdir): basedir = os.path.join(datafiles.dirname, datafiles.basename) targets = create_pipeline(tmpdir, basedir, 'simple.bst') assert(targets[0].get_kind() == "foo") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'badversionsource')) def test_badversionsource(datafiles, tmpdir): basedir = os.path.join(datafiles.dirname, datafiles.basename) with pytest.raises(LoadError) as exc: targets = create_pipeline(tmpdir, basedir, 'simple.bst') assert exc.value.reason == LoadErrorReason.UNSUPPORTED_PLUGIN @pytest.mark.datafiles(os.path.join(DATA_DIR, 'badversionelement')) def test_badversionelement(datafiles, tmpdir): basedir = os.path.join(datafiles.dirname, datafiles.basename) with pytest.raises(LoadError) as exc: targets = create_pipeline(tmpdir, basedir, 'simple.bst') assert exc.value.reason == LoadErrorReason.UNSUPPORTED_PLUGIN buildstream-1.6.9/tests/plugins/pipeline/000077500000000000000000000000001437515270000204765ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionelement/000077500000000000000000000000001437515270000242045ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionelement/customelements/000077500000000000000000000000001437515270000272535ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionelement/customelements/__init__.py000066400000000000000000000000001437515270000313520ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionelement/customelements/foo.py000066400000000000000000000004031437515270000304050ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): BST_FORMAT_VERSION = 5 def preflight(self): pass def configure(self, node): pass def get_unique_key(self): return {} def setup(): return FooElement buildstream-1.6.9/tests/plugins/pipeline/badversionelement/elements/000077500000000000000000000000001437515270000260205ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionelement/elements/simple.bst000066400000000000000000000001001437515270000300120ustar00rootroot00000000000000kind: foo description: Custom foo element config: some: thing buildstream-1.6.9/tests/plugins/pipeline/badversionelement/project.conf000066400000000000000000000002451437515270000265220ustar00rootroot00000000000000name: pony element-path: elements plugins: - origin: local path: customelements elements: # We provided bar at version 5, should be a conflict. foo: 10 buildstream-1.6.9/tests/plugins/pipeline/badversionsource/000077500000000000000000000000001437515270000240535ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionsource/customsources/000077500000000000000000000000001437515270000267715ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionsource/customsources/__init__.py000066400000000000000000000000001437515270000310700ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionsource/customsources/foo.py000066400000000000000000000004431437515270000301270ustar00rootroot00000000000000from buildstream import Source, Consistency class BarSource(Source): BST_FORMAT_VERSION = 5 def preflight(self): pass def configure(self, node): pass def get_consistency(self): return Consistency.INCONSISTENT def setup(): return BarSource buildstream-1.6.9/tests/plugins/pipeline/badversionsource/elements/000077500000000000000000000000001437515270000256675ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/badversionsource/elements/simple.bst000066400000000000000000000001541437515270000276720ustar00rootroot00000000000000kind: autotools description: Custom foo source sources: - kind: foo ref: 1.2.3 uri: http://ponyland.com buildstream-1.6.9/tests/plugins/pipeline/badversionsource/project.conf000066400000000000000000000002431437515270000263670ustar00rootroot00000000000000name: pony element-path: elements plugins: - origin: local path: customsources sources: # We provided bar at version 5, should be a conflict. foo: 10 buildstream-1.6.9/tests/plugins/pipeline/customelement/000077500000000000000000000000001437515270000233625ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customelement/elements/000077500000000000000000000000001437515270000251765ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customelement/elements/simple.bst000066400000000000000000000001041437515270000271740ustar00rootroot00000000000000kind: foo description: Custom foo source config: pony-color: pink buildstream-1.6.9/tests/plugins/pipeline/customelement/pluginelements/000077500000000000000000000000001437515270000264155ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customelement/pluginelements/__init__.py000066400000000000000000000000001437515270000305140ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customelement/pluginelements/foo.py000066400000000000000000000003471437515270000275560ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): def preflight(self): pass def configure(self, node): pass def get_unique_key(self): return {} def setup(): return FooElement buildstream-1.6.9/tests/plugins/pipeline/customelement/project.conf000066400000000000000000000001511437515270000256740ustar00rootroot00000000000000name: pony element-path: elements plugins: - origin: local path: pluginelements elements: foo: 0 buildstream-1.6.9/tests/plugins/pipeline/customsource/000077500000000000000000000000001437515270000232315ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customsource/elements/000077500000000000000000000000001437515270000250455ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customsource/elements/simple.bst000066400000000000000000000001541437515270000270500ustar00rootroot00000000000000kind: autotools description: Custom foo source sources: - kind: foo ref: 1.2.3 uri: http://ponyland.com buildstream-1.6.9/tests/plugins/pipeline/customsource/pluginsources/000077500000000000000000000000001437515270000261335ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customsource/pluginsources/__init__.py000066400000000000000000000000001437515270000302320ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/pipeline/customsource/pluginsources/foo.py000066400000000000000000000004071437515270000272710ustar00rootroot00000000000000from buildstream import Source, Consistency class FooSource(Source): def preflight(self): pass def configure(self, node): pass def get_consistency(self): return Consistency.INCONSISTENT def setup(): return FooSource buildstream-1.6.9/tests/plugins/pipeline/customsource/project.conf000066400000000000000000000001471437515270000255500ustar00rootroot00000000000000name: pony element-path: elements plugins: - origin: local path: pluginsources sources: foo: 0 buildstream-1.6.9/tests/plugins/third_party.py000066400000000000000000000037041437515270000216000ustar00rootroot00000000000000import os import pytest from pluginbase import PluginBase from buildstream._elementfactory import ElementFactory from buildstream._sourcefactory import SourceFactory from tests.testutils.setuptools import entry_fixture DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'third_party' ) # Simple fixture to create a PluginBase object that # we use for loading plugins. @pytest.fixture() def plugin_fixture(): return { 'base': PluginBase(package='buildstream.plugins') } ################################################################## # Tests # ################################################################## # Test that external element plugin loading works. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'third_party_element')) def test_custom_pip_element(plugin_fixture, entry_fixture, datafiles): origin_data = [{ 'origin': 'local', 'path': str(datafiles), 'plugins': {'foop': 0} }] factory = ElementFactory(plugin_fixture['base'], plugin_origins=origin_data) assert(isinstance(factory, ElementFactory)) entry_fixture(datafiles, 'buildstream.plugins', 'third_party_element:foop') foo_type, _ = factory.lookup('foop') assert(foo_type.__name__ == 'FooElement') # Test that external source plugin loading works. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'third_party_source')) def test_custom_pip_source(plugin_fixture, entry_fixture, datafiles): origin_data = [{ 'origin': 'local', 'path': str(datafiles), 'plugins': {'foop': 0} }] factory = SourceFactory(plugin_fixture['base'], plugin_origins=origin_data) assert(isinstance(factory, SourceFactory)) entry_fixture(datafiles, 'buildstream.plugins', 'third_party_source:foop') foo_type, _ = factory.lookup('foop') assert(foo_type.__name__ == 'FooSource') buildstream-1.6.9/tests/plugins/third_party/000077500000000000000000000000001437515270000212225ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/third_party/third_party_element/000077500000000000000000000000001437515270000252645ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/third_party/third_party_element/__init__.py000066400000000000000000000000001437515270000273630ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/third_party/third_party_element/foop.py000066400000000000000000000001531437515270000266000ustar00rootroot00000000000000from buildstream import Element class FooElement(Element): pass def setup(): return FooElement buildstream-1.6.9/tests/plugins/third_party/third_party_source/000077500000000000000000000000001437515270000251335ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/third_party/third_party_source/__init__.py000066400000000000000000000000001437515270000272320ustar00rootroot00000000000000buildstream-1.6.9/tests/plugins/third_party/third_party_source/foop.py000066400000000000000000000001471437515270000264520ustar00rootroot00000000000000from buildstream import Source class FooSource(Source): pass def setup(): return FooSource buildstream-1.6.9/tests/sandboxes/000077500000000000000000000000001437515270000171765ustar00rootroot00000000000000buildstream-1.6.9/tests/sandboxes/missing-command.py000066400000000000000000000007621437515270000226420ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain from tests.testutils import cli DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), "missing-command" ) @pytest.mark.datafiles(DATA_DIR) def test_missing_command(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['build', 'no-runtime.bst']) result.assert_task_error(ErrorDomain.SANDBOX, 'missing-command') buildstream-1.6.9/tests/sandboxes/missing-command/000077500000000000000000000000001437515270000222635ustar00rootroot00000000000000buildstream-1.6.9/tests/sandboxes/missing-command/no-runtime.bst000066400000000000000000000000151437515270000250660ustar00rootroot00000000000000kind: manual buildstream-1.6.9/tests/sandboxes/missing-command/project.conf000066400000000000000000000000131437515270000245720ustar00rootroot00000000000000name: test buildstream-1.6.9/tests/sandboxes/mounting/000077500000000000000000000000001437515270000210365ustar00rootroot00000000000000buildstream-1.6.9/tests/sandboxes/mounting/mount_simple.py000066400000000000000000000033461437515270000241310ustar00rootroot00000000000000import os import tempfile from contextlib import ExitStack import pytest from buildstream.sandbox._mounter import Mounter @pytest.mark.skipif(not os.geteuid() == 0, reason="requires root permissions") def test_bind_mount(): with ExitStack() as stack: src = stack.enter_context(tempfile.TemporaryDirectory()) target = stack.enter_context(tempfile.TemporaryDirectory()) with open(os.path.join(src, 'test'), 'a') as test: test.write('Test') with Mounter.bind_mount(target, src) as dest: # Ensure we get the correct path back assert dest == target # Ensure we can access files from src from target with open(os.path.join(target, 'test'), 'r') as test: assert test.read() == 'Test' # Ensure the files from src are gone from target with pytest.raises(FileNotFoundError): with open(os.path.join(target, 'test'), 'r') as test: # Actual contents don't matter pass # Ensure the files in src are still in src with open(os.path.join(src, 'test'), 'r') as test: assert test.read() == 'Test' @pytest.mark.skipif(not os.geteuid() == 0, reason="requires root permissions") def test_mount_proc(): with ExitStack() as stack: src = '/proc' target = stack.enter_context(tempfile.TemporaryDirectory()) with Mounter.mount(target, src, mount_type='proc', ro=True) as dest: # Ensure we get the correct path back assert dest == target # Ensure /proc is actually mounted assert os.listdir(src) == os.listdir(target) # Ensure /proc is unmounted correctly assert os.listdir(target) == [] buildstream-1.6.9/tests/sources/000077500000000000000000000000001437515270000166735ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/__init__.py000066400000000000000000000003461437515270000210070ustar00rootroot00000000000000import os def list_dir_contents(srcdir): contents = set() for _, dirs, files in os.walk(srcdir): for d in dirs: contents.add(d) for f in files: contents.add(f) return contents buildstream-1.6.9/tests/sources/bzr.py000066400000000000000000000025611437515270000200460ustar00rootroot00000000000000import os import pytest from buildstream._pipeline import PipelineError from buildstream import _yaml from tests.testutils import cli, create_repo from tests.testutils.site import HAVE_BZR DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'bzr' ) @pytest.mark.skipif(HAVE_BZR is False, reason="bzr is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_fetch_checkout(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), 'checkout') repo = create_repo('bzr', str(tmpdir)) ref = repo.create(os.path.join(project, 'basic')) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['build', 'target.bst']) assert result.exit_code == 0 result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) assert result.exit_code == 0 # Assert we checked out the file as it was commited with open(os.path.join(checkoutdir, 'test')) as f: text = f.read() assert text == 'test\n' buildstream-1.6.9/tests/sources/bzr/000077500000000000000000000000001437515270000174705ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/bzr/basic/000077500000000000000000000000001437515270000205515ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/bzr/basic/test000066400000000000000000000000051437515270000214460ustar00rootroot00000000000000test buildstream-1.6.9/tests/sources/bzr/project.conf000066400000000000000000000000321437515270000220000ustar00rootroot00000000000000# Basic Project name: foo buildstream-1.6.9/tests/sources/deb.py000066400000000000000000000151641437515270000200060ustar00rootroot00000000000000import os import pytest import tarfile import tempfile import subprocess import shutil from buildstream._exceptions import ErrorDomain from buildstream import _yaml from tempfile import TemporaryFile from tests.testutils import cli from tests.testutils.site import HAVE_ARPY from . import list_dir_contents DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'deb', ) deb_name = "a_deb.deb" def generate_project(project_dir, tmpdir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({ 'name': 'foo', 'aliases': { 'tmpdir': "file:///" + str(tmpdir) } }, project_file) def _copy_deb(start_location, tmpdir): source = os.path.join(start_location, deb_name) destination = os.path.join(str(tmpdir), deb_name) shutil.copyfile(source, destination) # Test that without ref, consistency is set appropriately. @pytest.mark.skipif(HAVE_ARPY is False, reason="arpy is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-ref')) def test_no_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) assert cli.get_element_state(project, 'target.bst') == 'no reference' # Test that when I fetch a nonexistent URL, errors are handled gracefully and a retry is performed. @pytest.mark.skipif(HAVE_ARPY is False, reason="arpy is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_fetch_bad_url(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) assert "FAILURE Try #" in result.stderr result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) @pytest.mark.skipif(HAVE_ARPY is False, reason="arpy is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_fetch_bad_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Copy test deb to tmpdir _copy_deb(DATA_DIR, tmpdir) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) # Test that when tracking with a ref set, there is a warning @pytest.mark.skipif(HAVE_ARPY is False, reason="arpy is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_track_warning(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Copy test deb to tmpdir _copy_deb(DATA_DIR, tmpdir) # Track it result = cli.run(project=project, args=[ 'track', 'target.bst' ]) result.assert_success() assert "Potential man-in-the-middle attack!" in result.stderr # Test that a staged checkout matches what was tarred up, with the default first subdir @pytest.mark.skipif(HAVE_ARPY is False, reason="arpy is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_stage_default_basedir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Copy test deb to tmpdir _copy_deb(DATA_DIR, tmpdir) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '') original_dir = os.path.join(str(datafiles), "content") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) # Test that a staged checkout matches what was tarred up, with an empty base-dir @pytest.mark.skipif(HAVE_ARPY is False, reason="arpy is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-basedir')) def test_stage_no_basedir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Copy test deb to tmpdir _copy_deb(DATA_DIR, tmpdir) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the full content of the tarball is checked out (base-dir: '') original_dir = os.path.join(str(datafiles), "content") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) # Test that a staged checkout matches what was tarred up, with an explicit basedir @pytest.mark.skipif(HAVE_ARPY is False, reason="arpy is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'explicit-basedir')) def test_stage_explicit_basedir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Copy test deb to tmpdir _copy_deb(DATA_DIR, tmpdir) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '') original_dir = os.path.join(str(datafiles), "content") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) buildstream-1.6.9/tests/sources/deb/000077500000000000000000000000001437515270000174255ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/a_deb.deb000066400000000000000000000167101437515270000211400ustar00rootroot00000000000000! debian-binary 1477548769 0 0 100644 4 ` 2.0 control.tar.gz 1477548769 0 0 100644 687 ` k07Xl˶# 6hYYauEeHrC-t{Ɔ>(g*g_dD:O;Ӵ F*77ya6uGL/3|\FQ4ŹݍqzN]vtc uL;`qg<`Aybi:jt^L 5\+}KBo&Ezbcwї~}Qn/Ϣ+knD:1[M {~5EӑPif>O hg;8fy=vIr<㳳ɜMMrΒD{tU]f:~…6#;=.$@@HTGq"p8@'5@Z(Oɭ ,wr{ xCpٚފqTQDðbrp4ldͩ4 Ne5Gn{PAte&Qr`iϲ/?E)V]+eRfe[M6%6(Y}7P7DmxS8r3Yqɴn&/yuff>Y>kׂ`yݕMQؖ +DUCzc Bc @ @ @ 2?z( data.tar.xz 1477548769 0 0 100644 6744 ` 7zXZִF!t/w] }J>y&\ օ:|KT A ("`Xomp;;(s[5oy2_ٱ0IT.0r7PU6*oṚ/^v5H6eXRǸ=Q;Dt0Fv #j台YЄ Z XRv9)0jY>YoYqOF-s[<V۾%nܕRb uJ aŽ8q}|3q,Jx,`oH*^oH$Ci;MNnЮMq=҃ݕЂ=kfD*'jS:Ƿm̴>~L1΅=u_w?D2m ζ(zeNY)Hı *B@Zyvpvh.֥=;oB|6(a x Jrtl3(FIW:SҔȒ^{mF|M6x;RM~9D04s}7LJ%2`  Iqs)';/^ss#O C: I@"8l؂w/Wt&.^h`(qlcƕd!'?}zA[gw?`l4Evy@CUId3?TjwMrb" %VHIl|"; ,'|@Ff Jt3`}`# ; -a9= ~`U/@K+JBuh]>J'K/GNObĮSr]?kM&T #"{݈9tC&Wܚԕ22C&~3XZ3m75} N nrMg 5CD 33M/Q ?4R/!;aeF!p蹴*-eUPU.Z.)֤ɩļ <@sR)lvx)*tBP!u0eJm곐прP$.,/_+1 ޖ0 ;{z? sSW^2~,BKzqd n~;Y= 5ҁ0rmhG9{& 2ceD|T)ĩ ':%=[leK,$no@WR\ uQI*h$~DSm"ٵFDLg#NE3q~6ַ=* V|ׅ{_UChOl]~x(]['ksmsG`*;m̯lKAj1U{.Qe11'Z98FI?AB )`0AugO}:+!yE1Nn ('9O;݃iU9L xFAd6|@O(Wj@ۛ B6$(,MDf^GA%u%D@!Atn3/LSV#;*p( a*SJFޞ+7 |A=N2xǨN@|w2Kqn!æ #OD4;8ʮ3Cv{V,Lq"7hW[|0. Zs7zcMچ 6%I7~,4ܛʨ)2P a!j. .[ALz4Pw?@;a:D,?O ;6^7nv6bB9(v6C2 Hy_*,YרɯLξʊL,;- ^= _-i%"݆D P8'MeC5xA{w\"cJqeK#X]>{.)!,}rCΞ~W+˳"5**G)҆npODemH\lmgIIV݀yBSLvxDiO0yPHM)j;6c"CmL6}5zCH!JP$ZhP:_{(:E/.ksuO^vb-0ɤ<e#L27CAN LC!flKz4q!Ju&aBkһҸmćQ7"_r@Ռ!eQIHs8v˿+ N'KBuix@"b:C!BV"?&V=͆FIt'΃dyeS_.dlx7YaT9+\ ',uuG"XUŦ<w2A7ٚ%ledZ\<·qA38!,; n]Kp( Ʊ?̬xB[lFv'ua/"E1@ucֱ_ެ Z-ox&CJ/(TL-Y|G)C7KƊ:쟭Hi+Hgeю}^\|z l. !]-6|:5{ ,}b?،^dG^_Zu ˇbȈ*dc'Њm"ķzI9117㠆,-ĠG1fJ1k-2PJOA sߛ 6G`ZQ/3lTObNJ})4sfX+1n}E,/5w"zde toe+-ݦ0Ӽ̀]PۚJf&&ac5 /Izv}eb%6'q4#h[? Dt{Inϖ ȟU}KvQ'gTgCs8S_pV zmaПڮAH"yY7eM4h /=B%^x+@^O_U0*b]uU?AN'%E$O"OsÒvS?;xն,7Cʪ'T# ^M7 nMzzFe^3Q:ʟHzb^)qLtaXl^@F:@04iWUKv47hrRn}A~OqlUw(CMO'ՠ}m'7[8ry( K&)Y0/C0K3,… !,Rݑx6s.Dd/Vj$[/Y0yhJ^|w0 soY|?eܙX[Ar W#m%Q}U쟏x-(^]P(?+B{-1~ *Jz^\ t# "*h\Z߾]a S:[9'>wZ'=+(-|9y\f[1@il m8I$p>&C6]E <ýOխ.mfkwq`4GK T{$5-cFGʓ+v]ipPbCB&¶cvg}~l phG'>c Gx4RoQ'!f HW-ǽ$;59̡~e:%6߹8 _T[k}bB؎zX{:zڠbdz*_H:2w[gQ52gSs7ln=R*b.6.z`?`ߖiypna PLlS $l^nZ_Vmh&J_Z2 Ðm4yH?ך @?vMcǟYe=y C1wkr 罀; 2@u/[+ Pwm8!SW!cV>cګq(de*ꂒ :are;ivL7( Ջ }b`SxB+G-,3F_Id҉txvIl|&W*[ihOS(_55Cދd (7aJTH} Tǡ#VH.Rwg:&lEЈϗ^0(l 6 kI;8'(*'|(R!\5ҋl5[f%bb6Q>ihoJxP$̆Qdo Qˁ5iHzn#,cC(g^zM0[>8E  2Y+wvW }9G3MԴE{`Z| S~ֶXF"I xKvی z%Lt5s|chɦS}EXH&bvkvM5ܳ!R_3Ԗi֠xeJپ>=[>qCICvb P~_0Dv'E(Ɣ@957+#O9 H` 6 BD5lB4݀aaW􊫞<š}Zg;7QbI]KߥuR~xusޕ5aQ7 |?+l;6yL'T1'+5WZMa-@Zʹ1{|>hTձRmf22#):^Ěy [GrxY0C+5[n˦7%xtaiOѕmjD#n1EvX0€p$[P묀hBsr ÝW5xYR XY* -} ї򻏉">w`nJ{Ƞ<4^wcB-|(:F]ˏXF)㯤U_v1Ԙc/b:ʍ`C#_ҵ/xc,oLnAA8?s-DB3\.b:J_LE^#]&M3}8%|i\$WMʹHL? ήF X \,\y36 `!%>O0QHP3di(H[./\Yjyq3ڮfn/0 9 ~c:H$!z^g?G_jV)}uv>`&t/aTV%+ EQp0f射qޒC a,7r_B1MY4Ro)WgYZbuildstream-1.6.9/tests/sources/deb/explicit-basedir/000077500000000000000000000000001437515270000226555ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/000077500000000000000000000000001437515270000243275ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/000077500000000000000000000000001437515270000254315ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/doc/000077500000000000000000000000001437515270000261765ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/doc/lua-clod/000077500000000000000000000000001437515270000276765ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/doc/lua-clod/README000066400000000000000000000000001437515270000305440ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/doc/lua-clod/changelog.Debian.gz000066400000000000000000000000451437515270000333470ustar00rootroot00000000000000Zchangelog.Debianbuildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/doc/lua-clod/copyright000066400000000000000000000000001437515270000316170ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/lua/000077500000000000000000000000001437515270000262125ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/lua/5.1/000077500000000000000000000000001437515270000265155ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/lua/5.1/clod.lua000066400000000000000000000000001437515270000301270ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/lua/5.2/000077500000000000000000000000001437515270000265165ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/content/share/lua/5.2/clod.lua000077700000000000000000000000001437515270000322772../5.1/clod.luaustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/explicit-basedir/target.bst000066400000000000000000000002151437515270000246530ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: deb url: tmpdir:/a_deb.deb ref: foo base-dir: 'usr' buildstream-1.6.9/tests/sources/deb/fetch/000077500000000000000000000000001437515270000205165ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/000077500000000000000000000000001437515270000221705ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/000077500000000000000000000000001437515270000230015ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/000077500000000000000000000000001437515270000241035ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/doc/000077500000000000000000000000001437515270000246505ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/doc/lua-clod/000077500000000000000000000000001437515270000263505ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/doc/lua-clod/README000066400000000000000000000000001437515270000272160ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/doc/lua-clod/changelog.Debian.gz000066400000000000000000000000451437515270000320210ustar00rootroot00000000000000 Zchangelog.Debianbuildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/doc/lua-clod/copyright000066400000000000000000000000001437515270000302710ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/lua/000077500000000000000000000000001437515270000246645ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/lua/5.1/000077500000000000000000000000001437515270000251675ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/lua/5.1/clod.lua000066400000000000000000000000001437515270000266010ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/lua/5.2/000077500000000000000000000000001437515270000251705ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/content/usr/share/lua/5.2/clod.lua000077700000000000000000000000001437515270000307512../5.1/clod.luaustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/fetch/target-lz.bst000066400000000000000000000001721437515270000231410ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: tar url: tmpdir:/a.tar.lz ref: foo buildstream-1.6.9/tests/sources/deb/fetch/target.bst000066400000000000000000000001731437515270000225170ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: deb url: tmpdir:/a_deb.deb ref: foo buildstream-1.6.9/tests/sources/deb/no-basedir/000077500000000000000000000000001437515270000214505ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/000077500000000000000000000000001437515270000231225ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/000077500000000000000000000000001437515270000237335ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/000077500000000000000000000000001437515270000250355ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/doc/000077500000000000000000000000001437515270000256025ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/doc/lua-clod/000077500000000000000000000000001437515270000273025ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/doc/lua-clod/README000066400000000000000000000000001437515270000301500ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/doc/lua-clod/changelog.Debian.gz000066400000000000000000000000451437515270000327530ustar00rootroot00000000000000Zchangelog.Debianbuildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/doc/lua-clod/copyright000066400000000000000000000000001437515270000312230ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/lua/000077500000000000000000000000001437515270000256165ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/lua/5.1/000077500000000000000000000000001437515270000261215ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/lua/5.1/clod.lua000066400000000000000000000000001437515270000275330ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/lua/5.2/000077500000000000000000000000001437515270000261225ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/content/usr/share/lua/5.2/clod.lua000077700000000000000000000000001437515270000317032../5.1/clod.luaustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-basedir/target.bst000066400000000000000000000002121437515270000234430ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: deb url: tmpdir:/a_deb.deb ref: foo base-dir: '' buildstream-1.6.9/tests/sources/deb/no-ref/000077500000000000000000000000001437515270000206135ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-ref/a/000077500000000000000000000000001437515270000210335ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-ref/a/b/000077500000000000000000000000001437515270000212545ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/deb/no-ref/a/b/d000066400000000000000000000000021437515270000214120ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/deb/no-ref/a/c000066400000000000000000000000021437515270000211700ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/deb/no-ref/target.bst000066400000000000000000000001601437515270000226100ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: deb url: tmpdir:/a_deb.deb buildstream-1.6.9/tests/sources/git.py000066400000000000000000000734501437515270000200410ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # Copyright (C) 2018 Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: Tristan Van Berkom # Jonathan Maw # William Salmon # import os import pytest import shutil import subprocess from buildstream._exceptions import ErrorDomain from buildstream import _yaml from buildstream.plugin import CoreWarnings from tests.testutils import cli, create_repo from tests.testutils.site import HAVE_GIT DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'git', ) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_fetch_bad_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Write out our test target with a bad ref element = { 'kind': 'import', 'sources': [ repo.source_config(ref='5') ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Assert that fetch raises an error here result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_fetch_checkout(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subref = subrepo.create(os.path.join(project, 'subrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Assert we checked out both files at their expected location assert os.path.exists(os.path.join(checkoutdir, 'file.txt')) assert os.path.exists(os.path.join(checkoutdir, 'subdir', 'ponyfile.txt')) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_fetch_source_enable_explicit(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref, checkout_submodules=True) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Assert we checked out both files at their expected location assert os.path.exists(os.path.join(checkoutdir, 'file.txt')) assert os.path.exists(os.path.join(checkoutdir, 'subdir', 'ponyfile.txt')) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_fetch_source_disable(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref, checkout_submodules=False) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Assert we checked out both files at their expected location assert os.path.exists(os.path.join(checkoutdir, 'file.txt')) assert not os.path.exists(os.path.join(checkoutdir, 'subdir', 'ponyfile.txt')) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_fetch_submodule_does_override(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo, checkout=True) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref, checkout_submodules=False) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Assert we checked out both files at their expected location assert os.path.exists(os.path.join(checkoutdir, 'file.txt')) assert os.path.exists(os.path.join(checkoutdir, 'subdir', 'ponyfile.txt')) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_fetch_submodule_individual_checkout(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create another submodule from the 'othersubrepofiles' subdir other_subrepo = create_repo('git', str(tmpdir), 'othersubrepo') other_subrepo.create(os.path.join(project, 'othersubrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo, checkout=False) ref = repo.add_submodule('othersubdir', 'file://' + other_subrepo.repo) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref, checkout_submodules=True) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Assert we checked out files at their expected location assert os.path.exists(os.path.join(checkoutdir, 'file.txt')) assert not os.path.exists(os.path.join(checkoutdir, 'subdir', 'ponyfile.txt')) assert os.path.exists(os.path.join(checkoutdir, 'othersubdir', 'unicornfile.txt')) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_fetch_submodule_individual_checkout_explicit(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create another submodule from the 'othersubrepofiles' subdir other_subrepo = create_repo('git', str(tmpdir), 'othersubrepo') other_subrepo.create(os.path.join(project, 'othersubrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo, checkout=False) ref = repo.add_submodule('othersubdir', 'file://' + other_subrepo.repo, checkout=True) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref, checkout_submodules=True) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Assert we checked out files at their expected location assert os.path.exists(os.path.join(checkoutdir, 'file.txt')) assert not os.path.exists(os.path.join(checkoutdir, 'subdir', 'ponyfile.txt')) assert os.path.exists(os.path.join(checkoutdir, 'othersubdir', 'unicornfile.txt')) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'project-override')) def test_submodule_fetch_project_override(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch, build, checkout result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Assert we checked out both files at their expected location assert os.path.exists(os.path.join(checkoutdir, 'file.txt')) assert not os.path.exists(os.path.join(checkoutdir, 'subdir', 'ponyfile.txt')) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_track_ignore_inconsistent(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Now add a .gitmodules file with an inconsistent submodule, # we are calling this inconsistent because the file was created # but `git submodule add` was never called, so there is no reference # associated to the submodule. # repo.add_file(os.path.join(project, 'inconsistent-submodule', '.gitmodules')) # Fetch should work, we're not yet at the offending ref result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() # Track will encounter an inconsistent submodule without any ref result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() # Assert that we are just fine without it, and emit a warning to the user. assert "Ignoring inconsistent submodule" in result.stderr @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_track_no_ref_or_track(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Write out our test target gitsource = repo.source_config(ref=None) gitsource.pop('track') element = { 'kind': 'import', 'sources': [ gitsource ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Track will encounter an inconsistent submodule without any ref result = cli.run(project=project, args=['show', 'target.bst']) result.assert_main_error(ErrorDomain.SOURCE, "missing-track-and-ref") result.assert_task_error(None, None) @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) @pytest.mark.parametrize("fail", ['warn', 'error']) def test_ref_not_in_track(cli, tmpdir, datafiles, fail): project = os.path.join(datafiles.dirname, datafiles.basename) # Make the warning an error if we're testing errors if fail == 'error': project_template = { "name": "foo", "fatal-warnings": [CoreWarnings.REF_NOT_IN_TRACK] } _yaml.dump(project_template, os.path.join(project, 'project.conf')) # Create the repo from 'repofiles', create a branch without latest commit repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) gitsource = repo.source_config(ref=ref) # Overwrite the track value to the added branch gitsource['track'] = 'foo' # Write out our test target element = { 'kind': 'import', 'sources': [ gitsource ] } _yaml.dump(element, os.path.join(project, 'target.bst')) result = cli.run(project=project, args=['build', 'target.bst']) # Assert a warning or an error depending on what we're checking if fail == 'error': result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.REF_NOT_IN_TRACK) else: result.assert_success() assert "ref-not-in-track" in result.stderr @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) @pytest.mark.parametrize("ref_format", ['sha1', 'git-describe']) @pytest.mark.parametrize("tag,extra_commit", [(False, False), (True, False), (True, True)]) def test_track_fetch(cli, tmpdir, datafiles, ref_format, tag, extra_commit): project = os.path.join(datafiles.dirname, datafiles.basename) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) if tag: repo.add_tag('tag') if extra_commit: repo.add_commit() # Write out our test target element = { 'kind': 'import', 'sources': [ repo.source_config() ] } element['sources'][0]['ref-format'] = ref_format element_path = os.path.join(project, 'target.bst') _yaml.dump(element, element_path) # Track it result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() element = _yaml.load(element_path) new_ref = element['sources'][0]['ref'] if ref_format == 'git-describe' and tag: # Check and strip prefix prefix = 'tag-{}-g'.format(0 if not extra_commit else 1) assert new_ref.startswith(prefix) new_ref = new_ref[len(prefix):] # 40 chars for SHA-1 assert len(new_ref) == 40 # Fetch it result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) @pytest.mark.parametrize("fail", ['warn', 'error']) def test_unlisted_submodule(cli, tmpdir, datafiles, fail): project = os.path.join(datafiles.dirname, datafiles.basename) # Make the warning an error if we're testing errors if fail == 'error': project_template = { "name": "foo", "fatal-warnings": ['git:unlisted-submodule'] } _yaml.dump(project_template, os.path.join(project, 'project.conf')) # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo) # Create the source, and delete the explicit configuration # of the submodules. # # We expect this to cause an unlisted submodule warning # after the source has been fetched. # gitsource = repo.source_config(ref=ref) del gitsource['submodules'] # Write out our test target element = { 'kind': 'import', 'sources': [ gitsource ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # We will not see the warning or error before the first fetch, because # we don't have the repository yet and so we have no knowledge of # the unlisted submodule. result = cli.run(project=project, args=['show', 'target.bst']) result.assert_success() assert "git:unlisted-submodule" not in result.stderr # We will notice this directly in fetch, as it will try to fetch # the submodules it discovers as a result of fetching the primary repo. result = cli.run(project=project, args=['fetch', 'target.bst']) # Assert a warning or an error depending on what we're checking if fail == 'error': result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, 'git:unlisted-submodule') else: result.assert_success() assert "git:unlisted-submodule" in result.stderr # Now that we've fetched it, `bst show` will discover the unlisted submodule too result = cli.run(project=project, args=['show', 'target.bst']) # Assert a warning or an error depending on what we're checking if fail == 'error': result.assert_main_error(ErrorDomain.PLUGIN, 'git:unlisted-submodule') else: result.assert_success() assert "git:unlisted-submodule" in result.stderr @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) @pytest.mark.parametrize("fail", ['warn', 'error']) def test_invalid_submodule(cli, tmpdir, datafiles, fail): project = os.path.join(datafiles.dirname, datafiles.basename) # Make the warning an error if we're testing errors if fail == 'error': project_template = { "name": "foo", "fatal-warnings": ['git:invalid-submodule'] } _yaml.dump(project_template, os.path.join(project, 'project.conf')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Create the source without any submodules, and add # an invalid submodule configuration to it. # # We expect this to cause an invalid submodule warning # after the source has been fetched and we know what # the real submodules actually are. # gitsource = repo.source_config(ref=ref) gitsource['submodules'] = { 'subdir': { 'url': 'https://pony.org/repo.git' } } # Write out our test target element = { 'kind': 'import', 'sources': [ gitsource ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # We will not see the warning or error before the first fetch, because # we don't have the repository yet and so we have no knowledge of # the unlisted submodule. result = cli.run(project=project, args=['show', 'target.bst']) result.assert_success() assert "git:invalid-submodule" not in result.stderr # We will notice this directly in fetch, as it will try to fetch # the submodules it discovers as a result of fetching the primary repo. result = cli.run(project=project, args=['fetch', 'target.bst']) # Assert a warning or an error depending on what we're checking if fail == 'error': result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, 'git:invalid-submodule') else: result.assert_success() assert "git:invalid-submodule" in result.stderr # Now that we've fetched it, `bst show` will discover the unlisted submodule too result = cli.run(project=project, args=['show', 'target.bst']) # Assert a warning or an error depending on what we're checking if fail == 'error': result.assert_main_error(ErrorDomain.PLUGIN, 'git:invalid-submodule') else: result.assert_success() assert "git:invalid-submodule" in result.stderr @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) @pytest.mark.parametrize("fail", ['warn', 'error']) def test_track_invalid_submodule(cli, tmpdir, datafiles, fail): project = os.path.join(datafiles.dirname, datafiles.basename) # For some reason, old fashioned git from way back in centos 7 land # doesnt behave quite the same, resulting in an inconsistent-submodule # warning being issued by the git plugin here, instead of an invalid-submodule # warning. # # Let's just overlook this minor issue and skip the test with ancient versions of git. # output = subprocess.check_output(['git', '--version']) output = output.decode('UTF-8').strip() git_version = output.rsplit(maxsplit=1)[-1] git_version_major = git_version.split(".", maxsplit=1)[0] if git_version_major == "1": pytest.skip("Git behaves subtly differently in the ancient version {}".format(git_version)) # Make the warning an error if we're testing errors if fail == 'error': project_template = { "name": "foo", "fatal-warnings": ['git:invalid-submodule'] } _yaml.dump(project_template, os.path.join(project, 'project.conf')) # Create the submodule first from the 'subrepofiles' subdir subrepo = create_repo('git', str(tmpdir), 'subrepo') subrepo.create(os.path.join(project, 'subrepofiles')) # Create the repo from 'repofiles' subdir repo = create_repo('git', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Add a submodule pointing to the one we created ref = repo.add_submodule('subdir', 'file://' + subrepo.repo) # Add a commit beyond the ref which *removes* the submodule we've added repo.remove_path('subdir') # Create the source, this will keep the submodules so initially # the configuration is valid for the ref we're using gitsource = repo.source_config(ref=ref) # Write out our test target element = { 'kind': 'import', 'sources': [ gitsource ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Fetch the repo, we will not see the warning because we # are still pointing to a ref which predates the submodules result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() assert "git:invalid-submodule" not in result.stderr # In this case, we will get the error directly after tracking, # since the new HEAD does not require any submodules which are # not locally cached, the Source will be CACHED directly after # tracking and the validations will occur as a result. # result = cli.run(project=project, args=['track', 'target.bst']) if fail == 'error': result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.PLUGIN, 'git:invalid-submodule') else: result.assert_success() assert "git:invalid-submodule" in result.stderr @pytest.mark.skipif(HAVE_GIT is False, reason="git is not available") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_overwrite_rogue_tag_multiple_remotes(cli, tmpdir, datafiles): """When using multiple remotes in cache (i.e. when using aliases), we need to make sure we override tags. This is not allowed to fetch tags that were present from different origins """ project = str(datafiles) repofiles = os.path.join(str(tmpdir), 'repofiles') os.makedirs(repofiles, exist_ok=True) file0 = os.path.join(repofiles, 'file0') with open(file0, 'w') as f: f.write('test\n') repo = create_repo('git', str(tmpdir)) top_commit = repo.create(repofiles) repodir, reponame = os.path.split(repo.repo) project_config = _yaml.load(os.path.join(project, 'project.conf')) project_config['aliases'] = { 'repo': 'http://example.com/' } project_config['mirrors'] = [ { 'name': 'middle-earth', 'aliases': { 'repo': ['file://{}/'.format(repodir)] } } ] _yaml.dump(_yaml.node_sanitize(project_config), os.path.join(project, 'project.conf')) repo.add_annotated_tag('tag', 'tag') file1 = os.path.join(repofiles, 'file1') with open(file1, 'w') as f: f.write('test\n') ref = repo.add_file(file1) config = repo.source_config(ref=ref) del config['track'] config['url'] = 'repo:{}'.format(reponame) # Write out our test target element = { 'kind': 'import', 'sources': [ config ], } element_path = os.path.join(project, 'target.bst') _yaml.dump(element, element_path) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() repo.checkout(top_commit) file2 = os.path.join(repofiles, 'file2') with open(file2, 'w') as f: f.write('test\n') new_ref = repo.add_file(file2) repo.delete_tag('tag') repo.add_annotated_tag('tag', 'tag') repo.checkout('master') otherpath = os.path.join(str(tmpdir), 'other_path') shutil.copytree(repo.repo, os.path.join(otherpath, 'repo')) new_repo = create_repo('git', otherpath) repodir, reponame = os.path.split(repo.repo) _yaml.dump(_yaml.node_sanitize(project_config), os.path.join(project, 'project.conf')) config = repo.source_config(ref=new_ref) del config['track'] config['url'] = 'repo:{}'.format(reponame) element = { 'kind': 'import', 'sources': [ config ], } _yaml.dump(element, element_path) result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() buildstream-1.6.9/tests/sources/git/000077500000000000000000000000001437515270000174565ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/project-override/000077500000000000000000000000001437515270000227415ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/project-override/project.conf000066400000000000000000000002331437515270000252540ustar00rootroot00000000000000# Basic project name: foo sources: git: config: checkout-submodules: False elements: manual: config: build-commands: - "foo" buildstream-1.6.9/tests/sources/git/project-override/repofiles/000077500000000000000000000000001437515270000247315ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/project-override/repofiles/file.txt000066400000000000000000000000051437515270000264040ustar00rootroot00000000000000pony buildstream-1.6.9/tests/sources/git/project-override/subrepofiles/000077500000000000000000000000001437515270000254435ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/project-override/subrepofiles/ponyfile.txt000066400000000000000000000000051437515270000300240ustar00rootroot00000000000000file buildstream-1.6.9/tests/sources/git/template/000077500000000000000000000000001437515270000212715ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/template/inconsistent-submodule/000077500000000000000000000000001437515270000260065ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/template/inconsistent-submodule/.gitmodules000066400000000000000000000001001437515270000301520ustar00rootroot00000000000000[submodule "farm/pony"] path = farm/pony url = git://pony.com buildstream-1.6.9/tests/sources/git/template/othersubrepofiles/000077500000000000000000000000001437515270000250355ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/template/othersubrepofiles/unicornfile.txt000066400000000000000000000000051437515270000301060ustar00rootroot00000000000000file buildstream-1.6.9/tests/sources/git/template/project.conf000066400000000000000000000000321437515270000236010ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/git/template/repofiles/000077500000000000000000000000001437515270000232615ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/template/repofiles/file.txt000066400000000000000000000000051437515270000247340ustar00rootroot00000000000000pony buildstream-1.6.9/tests/sources/git/template/subrepofiles/000077500000000000000000000000001437515270000237735ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/git/template/subrepofiles/ponyfile.txt000066400000000000000000000000051437515270000263540ustar00rootroot00000000000000file buildstream-1.6.9/tests/sources/local.py000066400000000000000000000121651437515270000203440ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils import cli, filetypegenerator DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'local', ) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_missing_path(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Removing the local file causes preflight to fail localfile = os.path.join(project, 'file.txt') os.remove(localfile) result = cli.run(project=project, args=[ 'show', 'target.bst' ]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_non_regular_file_or_directory(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) localfile = os.path.join(project, 'file.txt') for file_type in filetypegenerator.generate_file_types(localfile): result = cli.run(project=project, args=[ 'show', 'target.bst' ]) if os.path.isdir(localfile) and not os.path.islink(localfile): result.assert_success() elif os.path.isfile(localfile) and not os.path.islink(localfile): result.assert_success() else: result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_invalid_absolute_path(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with open(os.path.join(project, "target.bst"), 'r') as f: old_yaml = f.read() new_yaml = old_yaml.replace("file.txt", os.path.join(project, "file.txt")) assert old_yaml != new_yaml with open(os.path.join(project, "target.bst"), 'w') as f: f.write(new_yaml) result = cli.run(project=project, args=['show', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'invalid-relative-path')) def test_invalid_relative_path(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['show', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_stage_file(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the checkout contains the expected file assert(os.path.exists(os.path.join(checkoutdir, 'file.txt'))) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'directory')) def test_stage_directory(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the checkout contains the expected file and directory and other file assert(os.path.exists(os.path.join(checkoutdir, 'file.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'subdir', 'anotherfile.txt'))) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'symlink')) def test_stage_symlink(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Workaround datafiles bug: # # https://github.com/omarkohl/pytest-datafiles/issues/1 # # Create the symlink by hand. symlink = os.path.join(project, 'files', 'symlink-to-file.txt') os.symlink('file.txt', symlink) # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the checkout contains the expected file and directory and other file assert(os.path.exists(os.path.join(checkoutdir, 'file.txt'))) assert(os.path.exists(os.path.join(checkoutdir, 'symlink-to-file.txt'))) assert(os.path.islink(os.path.join(checkoutdir, 'symlink-to-file.txt'))) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'file-exists')) def test_stage_file_exists(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, 'ensure-stage-dir-fail') buildstream-1.6.9/tests/sources/local/000077500000000000000000000000001437515270000177655ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/basic/000077500000000000000000000000001437515270000210465ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/basic/file.txt000066400000000000000000000000241437515270000225220ustar00rootroot00000000000000This is a text file buildstream-1.6.9/tests/sources/local/basic/project.conf000066400000000000000000000000321437515270000233560ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/local/basic/target.bst000066400000000000000000000001231437515270000230420ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: file.txt buildstream-1.6.9/tests/sources/local/directory/000077500000000000000000000000001437515270000217715ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/directory/files/000077500000000000000000000000001437515270000230735ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/directory/files/file.txt000066400000000000000000000000201437515270000245430ustar00rootroot00000000000000I'm a text file buildstream-1.6.9/tests/sources/local/directory/files/subdir/000077500000000000000000000000001437515270000243635ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/directory/files/subdir/anotherfile.txt000066400000000000000000000000261437515270000274220ustar00rootroot00000000000000I'm another text file buildstream-1.6.9/tests/sources/local/directory/project.conf000066400000000000000000000000321437515270000243010ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/local/directory/target.bst000066400000000000000000000001201437515270000237620ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: files buildstream-1.6.9/tests/sources/local/file-exists/000077500000000000000000000000001437515270000222215ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/file-exists/files/000077500000000000000000000000001437515270000233235ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/file-exists/files/file.txt000066400000000000000000000000201437515270000247730ustar00rootroot00000000000000I'm a text file buildstream-1.6.9/tests/sources/local/file-exists/project.conf000066400000000000000000000000321437515270000245310ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/local/file-exists/target.bst000066400000000000000000000005651437515270000242270ustar00rootroot00000000000000kind: import description: | This is the regular file staged twice, second time we stage into a subdir of the staging area. The subdir we specify is the file we already staged, provoking a plausible error where the user tries to stage something unreasonable. sources: - kind: local path: files/file.txt - kind: local path: files/file.txt directory: file.txt buildstream-1.6.9/tests/sources/local/invalid-relative-path/000077500000000000000000000000001437515270000241565ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/invalid-relative-path/file.txt000066400000000000000000000000241437515270000256320ustar00rootroot00000000000000This is a text file buildstream-1.6.9/tests/sources/local/invalid-relative-path/project.conf000066400000000000000000000000321437515270000264660ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/local/invalid-relative-path/target.bst000066400000000000000000000001541437515270000261560ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: ../invalid-relative-path/file.txt buildstream-1.6.9/tests/sources/local/symlink/000077500000000000000000000000001437515270000214535ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/symlink/files/000077500000000000000000000000001437515270000225555ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/local/symlink/files/file.txt000066400000000000000000000000201437515270000242250ustar00rootroot00000000000000I'm a text file buildstream-1.6.9/tests/sources/local/symlink/project.conf000066400000000000000000000000321437515270000237630ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/local/symlink/target.bst000066400000000000000000000001201437515270000234440ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: files buildstream-1.6.9/tests/sources/ostree.py000066400000000000000000000071061437515270000205520ustar00rootroot00000000000000# # Copyright (C) 2018 Bloomberg Finance LP # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: William Salmon # import os import pytest import subprocess from buildstream._exceptions import ErrorDomain from buildstream import _yaml from tests.testutils import cli, create_repo DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'ostree', ) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_submodule_track_no_ref_or_track(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Create the repo from 'repofiles' subdir repo = create_repo('ostree', str(tmpdir)) ref = repo.create(os.path.join(project, 'repofiles')) # Write out our test target ostreesource = repo.source_config(ref=None) ostreesource.pop('track') element = { 'kind': 'import', 'sources': [ ostreesource ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Track will encounter an inconsistent submodule without any ref result = cli.run(project=project, args=['show', 'target.bst']) result.assert_main_error(ErrorDomain.SOURCE, "missing-track-and-ref") result.assert_task_error(None, None) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'template')) def test_fetch_gpg_verify(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) gpg_homedir = os.path.join(DATA_DIR, "gpghome") # Some older versions of gpg, like the gpg (GnuPG) 2.0.22 / libgcrypt 1.5.3 # combination present on centos 7, does not recognize the gpg key we use # for this test. # # Just skip the test on these older platforms (techinically ostree should work # so long as you are using a gpg key that is properly installed for your platform) # output = subprocess.check_output([ 'gpg', '--homedir={}'.format(gpg_homedir), '--list-keys' ]) output = output.decode('UTF-8').strip() if not output: pytest.skip("Our test GPG key is not supported on this platform") # Create the repo from 'repofiles' subdir repo = create_repo('ostree', str(tmpdir)) ref = repo.create( os.path.join(project, 'repofiles'), gpg_sign="FFFF54C070353B52D046DEB087FA0F41A6EFD9E9", gpg_homedir=gpg_homedir ) # Write out our test target ostreesource = repo.source_config(ref=ref, gpg_key='test.gpg') element = { 'kind': 'import', 'sources': [ ostreesource ] } _yaml.dump(element, os.path.join(project, 'target.bst')) # Assert that a fetch is needed assert cli.get_element_state(project, 'target.bst') == 'fetch needed' # Now try to fetch it result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() # Assert that we are now buildable because the source is # now cached. assert cli.get_element_state(project, 'target.bst') == 'buildable' buildstream-1.6.9/tests/sources/ostree/000077500000000000000000000000001437515270000201745ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/gpghome/000077500000000000000000000000001437515270000216225ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/gpghome/openpgp-revocs.d/000077500000000000000000000000001437515270000250135ustar00rootroot00000000000000FFFF54C070353B52D046DEB087FA0F41A6EFD9E9.rev000066400000000000000000000023741437515270000322450ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/gpghome/openpgp-revocs.dThis is a revocation certificate for the OpenPGP key: pub rsa1024 2020-05-10 [S] FFFF54C070353B52D046DEB087FA0F41A6EFD9E9 uid Ponyman (It's a flying pony) A revocation certificate is a kind of "kill switch" to publicly declare that a key shall not anymore be used. It is not possible to retract such a revocation certificate once it has been published. Use it to revoke this key in case of a compromise or loss of the secret key. However, if the secret key is still accessible, it is better to generate a new revocation certificate and give a reason for the revocation. For details see the description of of the gpg command "--generate-revocation" in the GnuPG manual. To avoid an accidental use of this file, a colon has been inserted before the 5 dashes below. Remove this colon with a text editor before importing and publishing this revocation certificate. :-----BEGIN PGP PUBLIC KEY BLOCK----- Comment: This is a revocation certificate iLYEIAEKACAWIQT//1TAcDU7UtBG3rCH+g9Bpu/Z6QUCXrfEHAIdAAAKCRCH+g9B pu/Z6ez3BACQL3lnMaePfXhewvavv4iHChRXBZ7sMXdBVOvQb56d/5YIr/YzdFo/ O8Xt/5DFw4uwcs6pTVgc5i4GyJsouTmZSqCeQzQ2i4BjXd4HBlYw6OUAQTdOJfwg 1XlvSbMfNA6qh6eFOknf3VWpbDK6Fc0v9qEbyUxVyCggOZdT8EC2jA== =yz0g -----END PGP PUBLIC KEY BLOCK----- buildstream-1.6.9/tests/sources/ostree/gpghome/private-keys-v1.d/000077500000000000000000000000001437515270000250135ustar00rootroot00000000000000C68F72B3B1BABC2986B2D5C311D8B8F5F26D59C3.key000066400000000000000000000010161437515270000322100ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/gpghome/private-keys-v1.d(11:private-key(3:rsa(1:n129:\![Q. /;WiFP@^2g5I1meI0K ܡSFQ'|V,VKBYX.$9 qS_/.Dg8c )(1:e3:)(1:d128:e7L& g׶ wv?fP> 0& yt]+NlmHf?mA/A:S=q? ' 3Y7)(1:p65:>ʥMRw46p l .}!Y +xoka$;t·Z)(1:q65:> ~+ %(c'vM*3^WiXKoSfl\P)(1:u64:Zme7ۙќ|?d>S<:\bs1{W1%CR>"N?T)))E18E82A1918D5926329EEB985E537DEB5E6934B5.key000066400000000000000000000010161437515270000321440ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/gpghome/private-keys-v1.d(11:private-key(3:rsa(1:n129:޺ZT@:L4q=!8oޗd^&%ÂqwΓYSJod*^ Aw5aGiVZ[s]OdLoBQ::$Ȕ:)(1:e3:)(1:d128:&Dߔ^,ގK$x`*wawTHk rHh!jE of7 ɳ3[1]$";)3xTLE1M()(1:p65:&0KFNuAS,dB[P6lh8}-mA)L`}׬>k)(1:q65:D6@`-TS 9>lä=4>N|*8pD{ u_]'=~a+)(1:u64: 4ba RQ BBx!*}q^)6n*ܽXٸ)))buildstream-1.6.9/tests/sources/ostree/gpghome/pubring.kbx000066400000000000000000000016611437515270000240020ustar00rootroot00000000000000 KBXf^^~Tp5;RFްA Uw12|ޥ$gg< +^^ \![Q. /;WiFP@^2g5I1meI0K ܡSFQ'|V,VKBYX.$9 qS_/.Dg8c  gpg+Ponyman (It's a flying pony) gpg 8!Tp5;RFްA^     A5(4GR=T h/3$HG#]NdqC^wǠA 'YZ2R-aXRjq@3xsngpg+:)7 buildstream-1.6.9/tests/sources/ostree/gpghome/pubring.kbx~000066400000000000000000000000401437515270000241660ustar00rootroot00000000000000 KBXf^^buildstream-1.6.9/tests/sources/ostree/gpghome/trustdb.gpg000066400000000000000000000024001437515270000240040ustar00rootroot00000000000000gpg^  Tp5;RFްA t1ӄ֘L! buildstream-1.6.9/tests/sources/ostree/template/000077500000000000000000000000001437515270000220075ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/template/project.conf000066400000000000000000000000321437515270000243170ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/ostree/template/repofiles/000077500000000000000000000000001437515270000237775ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/template/repofiles/file000066400000000000000000000000001437515270000246270ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/ostree/template/test.gpg000066400000000000000000000020431437515270000234640ustar00rootroot00000000000000-----BEGIN PGP PUBLIC KEY BLOCK----- mI0EXrfECQEEALtcIRRbUcGnLiDULztXaUboEKNQQIJeEOgG8wjmMsRnhjX78A7S ScsxtBhtZUm/s/KciTCTSwv43KHi9VOBwuBGUZAGpMCkJwF8E/WsVh+fxCxWS4hC s1lYky6VBhDKJJ6vkTkgHwVxf1Nf4C+MhIYu4K5EZ7SSOIkcY7ulqQkJABEBAAG0 K1BvbnltYW4gKEl0J3MgYSBmbHlpbmcgcG9ueSkgPHBvbnlAbWFuLmNvbT6IzgQT AQoAOBYhBP//VMBwNTtS0EbesIf6D0Gm79npBQJet8QJAhsDBQsJCAcCBhUKCQgL AgQWAgMBAh4BAheAAAoJEIf6D0Gm79np548D/jXDKOc0jphHllI99vRUuQyMEJVo LzP+2fskSKeCokePGCPlE5BdE05kcUNed6yDAceg8r2m4UEglhsGvKb6xdMSJ1la PLhMCbtr7UQo4Dg/SyPYql/S5tqRz/ayhVtTQ7jbO70LKjm/QvbkYZGM1riYFpmX fHlX/ux1JRnn982TuI0EXrfECQEEAN66k8damFTpQDocTPg0ta/scT0hGTiPwwDz 8dn+pG/el7v1/pVkXsXY0eUmJcOC8ea/cXfOk+wVWZ5TpkpvyxnOzs3bGdRk8pL2 lyr4r14O9g3rQbR3j401n7FhvgWRR2lWGLuoHrZaW8Zz4l1PqMcUZExvQvtRwjq8 OiTIlDqJABEBAAGItgQYAQoAIBYhBP//VMBwNTtS0EbesIf6D0Gm79npBQJet8QJ AhsMAAoJEIf6D0Gm79np/2UD/2+nEwRykN3YmImtST11edEUQ66sxxhzZFQRWn1s MgyJVM7xgHyxk1XLAASZS1IXDqNtF5uuwEZimTjbBByLqHayMfRukpXVj82+Uhuo JpaitHtph2N0eJTP4S3ia6qTOpaSORTxDdFhf/6Rfj7A0TJSLedhWFJqcUDKDzN4 cwBu =dIhT -----END PGP PUBLIC KEY BLOCK----- buildstream-1.6.9/tests/sources/patch.py000066400000000000000000000140131437515270000203430ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain, LoadErrorReason from tests.testutils import cli, filetypegenerator DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'patch', ) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_missing_patch(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Removing the local file causes preflight to fail localfile = os.path.join(project, 'file_1.patch') os.remove(localfile) result = cli.run(project=project, args=[ 'show', 'target.bst' ]) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.MISSING_FILE) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_non_regular_file_patch(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) patch_path = os.path.join(project, 'irregular_file.patch') for file_type in filetypegenerator.generate_file_types(patch_path): result = cli.run(project=project, args=[ 'show', 'irregular.bst' ]) if os.path.isfile(patch_path) and not os.path.islink(patch_path): result.assert_success() else: result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID_KIND) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_invalid_absolute_path(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) with open(os.path.join(project, "target.bst"), 'r') as f: old_yaml = f.read() new_yaml = old_yaml.replace("file_1.patch", os.path.join(project, "file_1.patch")) assert old_yaml != new_yaml with open(os.path.join(project, "target.bst"), 'w') as f: f.write(new_yaml) result = cli.run(project=project, args=['show', 'target.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'invalid-relative-path')) def test_invalid_relative_path(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) result = cli.run(project=project, args=['show', 'irregular.bst']) result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.PROJ_PATH_INVALID) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_stage_and_patch(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Test the file.txt was patched and changed with open(os.path.join(checkoutdir, 'file.txt')) as f: assert(f.read() == 'This is text file with superpowers\n') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_stage_file_nonexistent_dir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Fails at build time because it tries to patch into a non-existing directory result = cli.run(project=project, args=['build', 'failure-nonexistent-dir.bst']) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, "patch-no-files") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic')) def test_stage_file_empty_dir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Fails at build time because it tries to patch with nothing else staged result = cli.run(project=project, args=['build', 'failure-empty-dir.bst']) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, "patch-no-files") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'separate-patch-dir')) def test_stage_separate_patch_dir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Track, fetch, build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Test the file.txt was patched and changed with open(os.path.join(checkoutdir, 'test-dir', 'file.txt')) as f: assert(f.read() == 'This is text file in a directory with superpowers\n') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'multiple-patches')) def test_stage_multiple_patches(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Track, fetch, build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Test the file.txt was patched and changed with open(os.path.join(checkoutdir, 'file.txt')) as f: assert(f.read() == 'This is text file with more superpowers\n') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'different-strip-level')) def test_patch_strip_level(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) checkoutdir = os.path.join(str(tmpdir), "checkout") # Track, fetch, build, checkout result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Test the file.txt was patched and changed with open(os.path.join(checkoutdir, 'file.txt')) as f: assert(f.read() == 'This is text file with superpowers\n') buildstream-1.6.9/tests/sources/patch/000077500000000000000000000000001437515270000177725ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/basic/000077500000000000000000000000001437515270000210535ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/basic/failure-empty-dir.bst000066400000000000000000000001341437515270000251220ustar00rootroot00000000000000kind: import description: This is also the pony sources: - kind: patch path: file_1.patch buildstream-1.6.9/tests/sources/patch/basic/failure-nonexistent-dir.bst000066400000000000000000000001651437515270000263460ustar00rootroot00000000000000kind: import description: This is also the pony sources: - kind: patch path: file_1.patch directory: /idontexist buildstream-1.6.9/tests/sources/patch/basic/file.txt000066400000000000000000000000241437515270000225270ustar00rootroot00000000000000This is a text file buildstream-1.6.9/tests/sources/patch/basic/file_1.patch000066400000000000000000000002421437515270000232310ustar00rootroot00000000000000diff --git a/file.txt b/file.txt index a496efe..341ef26 100644 --- a/file.txt +++ b/file.txt @@ -1 +1 @@ -This is a text file +This is text file with superpowers buildstream-1.6.9/tests/sources/patch/basic/irregular.bst000066400000000000000000000001761437515270000235650ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: file.txt - kind: patch path: irregular_file.patch buildstream-1.6.9/tests/sources/patch/basic/project.conf000066400000000000000000000000321437515270000233630ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/patch/basic/target.bst000066400000000000000000000001661437515270000230560ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: file.txt - kind: patch path: file_1.patch buildstream-1.6.9/tests/sources/patch/different-strip-level/000077500000000000000000000000001437515270000242045ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/different-strip-level/file.txt000066400000000000000000000000241437515270000256600ustar00rootroot00000000000000This is a text file buildstream-1.6.9/tests/sources/patch/different-strip-level/file_1.patch000066400000000000000000000002621437515270000263640ustar00rootroot00000000000000diff --git foo/a/file.txt foo/b/file.txt index a496efe..341ef26 100644 --- foo/a/file.txt +++ foo/b/file.txt @@ -1 +1 @@ -This is a text file +This is text file with superpowers buildstream-1.6.9/tests/sources/patch/different-strip-level/project.conf000066400000000000000000000000321437515270000265140ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/patch/different-strip-level/target.bst000066400000000000000000000002071437515270000262030ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: file.txt - kind: patch path: file_1.patch strip-level: 2 buildstream-1.6.9/tests/sources/patch/invalid-relative-path/000077500000000000000000000000001437515270000241635ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/invalid-relative-path/file_1.patch000066400000000000000000000002421437515270000263410ustar00rootroot00000000000000diff --git a/file.txt b/file.txt index a496efe..341ef26 100644 --- a/file.txt +++ b/file.txt @@ -1 +1 @@ -This is a text file +This is text file with superpowers buildstream-1.6.9/tests/sources/patch/invalid-relative-path/irregular.bst000066400000000000000000000001701437515270000266670ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: patch path: ../invalid-relative-path/irregular_file.patch buildstream-1.6.9/tests/sources/patch/invalid-relative-path/project.conf000066400000000000000000000000321437515270000264730ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/patch/multiple-patches/000077500000000000000000000000001437515270000232525ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/multiple-patches/file.txt000066400000000000000000000000241437515270000247260ustar00rootroot00000000000000This is a text file buildstream-1.6.9/tests/sources/patch/multiple-patches/file_1.patch000066400000000000000000000002421437515270000254300ustar00rootroot00000000000000diff --git a/file.txt b/file.txt index a496efe..341ef26 100644 --- a/file.txt +++ b/file.txt @@ -1 +1 @@ -This is a text file +This is text file with superpowers buildstream-1.6.9/tests/sources/patch/multiple-patches/file_2.patch000066400000000000000000000002661437515270000254370ustar00rootroot00000000000000diff --git a/file.txt b/file.txt index a496efe..341ef26 100644 --- a/file.txt +++ b/file.txt @@ -1 +1 @@ -This is text file with superpowers +This is text file with more superpowers buildstream-1.6.9/tests/sources/patch/multiple-patches/project.conf000066400000000000000000000000321437515270000255620ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/patch/multiple-patches/target.bst000066400000000000000000000002311437515270000252460ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: file.txt - kind: patch path: file_1.patch - kind: patch path: file_2.patch buildstream-1.6.9/tests/sources/patch/separate-patch-dir/000077500000000000000000000000001437515270000234475ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/separate-patch-dir/file_1.patch000066400000000000000000000003001437515270000256200ustar00rootroot00000000000000diff --git a/file.txt b/file.txt index a496efe..341ef26 100644 --- a/file.txt +++ b/file.txt @@ -1 +1 @@ -This is a text file in a directory +This is text file in a directory with superpowers buildstream-1.6.9/tests/sources/patch/separate-patch-dir/files/000077500000000000000000000000001437515270000245515ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/separate-patch-dir/files/test-dir/000077500000000000000000000000001437515270000263045ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/patch/separate-patch-dir/files/test-dir/file.txt000066400000000000000000000000431437515270000277610ustar00rootroot00000000000000This is a text file in a directory buildstream-1.6.9/tests/sources/patch/separate-patch-dir/project.conf000066400000000000000000000000321437515270000257570ustar00rootroot00000000000000# Basic project name: foo buildstream-1.6.9/tests/sources/patch/separate-patch-dir/target.bst000066400000000000000000000002111437515270000254410ustar00rootroot00000000000000kind: import description: This is the pony sources: - kind: local path: files - kind: patch path: file_1.patch directory: test-dir buildstream-1.6.9/tests/sources/pip.py000066400000000000000000000030371437515270000200400ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain from buildstream import _yaml from tests.testutils import cli DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'pip', ) def generate_project(project_dir, tmpdir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({'name': 'foo'}, project_file) # Test that without ref, consistency is set appropriately. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-ref')) def test_no_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) assert cli.get_element_state(project, 'target.bst') == 'no reference' # Test that pip is not allowed to be the first source @pytest.mark.datafiles(os.path.join(DATA_DIR, 'first-source-pip')) def test_first_source(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) result = cli.run(project=project, args=[ 'show', 'target.bst' ]) result.assert_main_error(ErrorDomain.ELEMENT, None) # Test that error is raised when neither packges nor requirements files # have been specified @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-packages')) def test_no_packages(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) result = cli.run(project=project, args=[ 'show', 'target.bst' ]) result.assert_main_error(ErrorDomain.SOURCE, None) buildstream-1.6.9/tests/sources/pip/000077500000000000000000000000001437515270000174635ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/pip/first-source-pip/000077500000000000000000000000001437515270000226765ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/pip/first-source-pip/target.bst000066400000000000000000000001671437515270000247020ustar00rootroot00000000000000kind: import description: pip should not be allowed to be the first source sources: - kind: pip packages: - flake8 buildstream-1.6.9/tests/sources/pip/no-packages/000077500000000000000000000000001437515270000216535ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/pip/no-packages/file000066400000000000000000000000151437515270000225110ustar00rootroot00000000000000Hello World! buildstream-1.6.9/tests/sources/pip/no-packages/target.bst000066400000000000000000000001621437515270000236520ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: local path: file - kind: pip buildstream-1.6.9/tests/sources/pip/no-ref/000077500000000000000000000000001437515270000206515ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/pip/no-ref/file000066400000000000000000000000151437515270000215070ustar00rootroot00000000000000Hello World! buildstream-1.6.9/tests/sources/pip/no-ref/target.bst000066400000000000000000000002111437515270000226430ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: local path: file - kind: pip packages: - flake8 buildstream-1.6.9/tests/sources/previous_source_access.py000066400000000000000000000036751437515270000240350ustar00rootroot00000000000000import os import pytest from buildstream import _yaml from tests.testutils import cli DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'previous_source_access' ) ################################################################## # Tests # ################################################################## # Test that plugins can access data from previous sources @pytest.mark.datafiles(DATA_DIR) def test_custom_transform_source(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) # Set the project_dir alias in project.conf to the path to the tested project project_config_path = os.path.join(project, "project.conf") project_config = _yaml.load(project_config_path) aliases = _yaml.node_get(project_config, dict, "aliases") aliases["project_dir"] = "file://{}".format(project) _yaml.dump(_yaml.node_sanitize(project_config), project_config_path) # Ensure we can track result = cli.run(project=project, args=[ 'track', 'target.bst' ]) result.assert_success() # Ensure we can fetch result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_success() # Ensure we get correct output from foo_transform result = cli.run(project=project, args=[ 'build', 'target.bst' ]) destpath = os.path.join(cli.directory, 'checkout') result = cli.run(project=project, args=[ 'checkout', 'target.bst', destpath ]) result.assert_success() # Assert that files from both sources exist, and that they have # the same content assert os.path.exists(os.path.join(destpath, 'file')) assert os.path.exists(os.path.join(destpath, 'filetransform')) with open(os.path.join(destpath, 'file')) as file1: with open(os.path.join(destpath, 'filetransform')) as file2: assert file1.read() == file2.read() buildstream-1.6.9/tests/sources/previous_source_access/000077500000000000000000000000001437515270000234505ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/previous_source_access/elements/000077500000000000000000000000001437515270000252645ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/previous_source_access/elements/target.bst000066400000000000000000000001331437515270000272610ustar00rootroot00000000000000kind: import sources: - kind: remote url: project_dir:/files/file - kind: foo_transform buildstream-1.6.9/tests/sources/previous_source_access/files/000077500000000000000000000000001437515270000245525ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/previous_source_access/files/file000066400000000000000000000000151437515270000254100ustar00rootroot00000000000000Hello World! buildstream-1.6.9/tests/sources/previous_source_access/plugins/000077500000000000000000000000001437515270000251315ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/previous_source_access/plugins/sources/000077500000000000000000000000001437515270000266145ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/previous_source_access/plugins/sources/foo_transform.py000066400000000000000000000055041437515270000320500ustar00rootroot00000000000000""" foo_transform - transform "file" from previous sources into "filetransform" =========================================================================== This is a test source plugin that looks for a file named "file" staged by previous sources, and copies its contents to a file called "filetransform". """ import os import hashlib from buildstream import Consistency, Source, SourceError, utils class FooTransformSource(Source): # We need access to previous both at track time and fetch time BST_REQUIRES_PREVIOUS_SOURCES_TRACK = True BST_REQUIRES_PREVIOUS_SOURCES_FETCH = True @property def mirror(self): """Directory where this source should stage its files """ path = os.path.join(self.get_mirror_directory(), self.name, self.ref.strip()) os.makedirs(path, exist_ok=True) return path def configure(self, node): self.node_validate(node, ['ref'] + Source.COMMON_CONFIG_KEYS) self.ref = self.node_get_member(node, str, 'ref', None) def preflight(self): pass def get_unique_key(self): return (self.ref,) def get_consistency(self): if self.ref is None: return Consistency.INCONSISTENT # If we have a file called "filetransform", verify that its checksum # matches our ref. Otherwise, it resolved but not cached. fpath = os.path.join(self.mirror, 'filetransform') try: with open(fpath, 'rb') as f: if hashlib.sha256(f.read()).hexdigest() == self.ref.strip(): return Consistency.CACHED except Exception: pass return Consistency.RESOLVED def get_ref(self): return self.ref def set_ref(self, ref, node): self.ref = node['ref'] = ref def track(self, previous_sources_dir): # Store the checksum of the file from previous source as our ref fpath = os.path.join(previous_sources_dir, 'file') with open(fpath, 'rb') as f: return hashlib.sha256(f.read()).hexdigest() def fetch(self, previous_sources_dir): fpath = os.path.join(previous_sources_dir, 'file') # Verify that the checksum of the file from previous source matches # our ref with open(fpath, 'rb') as f: if hashlib.sha256(f.read()).hexdigest() != self.ref.strip(): raise SourceError("Element references do not match") # Copy "file" as "filetransform" newfpath = os.path.join(self.mirror, 'filetransform') utils.safe_copy(fpath, newfpath) def stage(self, directory): # Simply stage the "filetransform" file utils.safe_copy(os.path.join(self.mirror, 'filetransform'), os.path.join(directory, 'filetransform')) def setup(): return FooTransformSource buildstream-1.6.9/tests/sources/previous_source_access/project.conf000066400000000000000000000003061437515270000257640ustar00rootroot00000000000000# Project with local source plugins name: foo element-path: elements aliases: project_dir: file://{project_dir} plugins: - origin: local path: plugins/sources sources: foo_transform: 0 buildstream-1.6.9/tests/sources/remote.py000066400000000000000000000135411437515270000205440ustar00rootroot00000000000000import os import pytest from buildstream._exceptions import ErrorDomain from buildstream import _yaml from tests.testutils import cli from tests.testutils.file_server import create_file_server DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'remote', ) def generate_project(project_dir, tmpdir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({ 'name': 'foo', 'aliases': { 'tmpdir': "file:///" + str(tmpdir) } }, project_file) def generate_project_file_server(server, project_dir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({ 'name': 'foo', 'aliases': { 'tmpdir': server.base_url() } }, project_file) # Test that without ref, consistency is set appropriately. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-ref')) def test_no_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) assert cli.get_element_state(project, 'target.bst') == 'no reference' # Here we are doing a fetch on a file that doesn't exist. target.bst # refers to 'file' but that file is not present. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'missing-file')) def test_missing_file(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'path-in-filename')) def test_path_in_filename(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) # The bst file has a / in the filename param result.assert_main_error(ErrorDomain.SOURCE, "filename-contains-directory") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'single-file')) def test_simple_file_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_success() result = cli.run(project=project, args=[ 'build', 'target.bst' ]) result.assert_success() result = cli.run(project=project, args=[ 'checkout', 'target.bst', checkoutdir ]) result.assert_success() # Note that the url of the file in target.bst is actually /dir/file # but this tests confirms we take the basename assert(os.path.exists(os.path.join(checkoutdir, 'file'))) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'single-file-custom-name')) def test_simple_file_custom_name_build(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_success() result = cli.run(project=project, args=[ 'build', 'target.bst' ]) result.assert_success() result = cli.run(project=project, args=[ 'checkout', 'target.bst', checkoutdir ]) result.assert_success() assert(not os.path.exists(os.path.join(checkoutdir, 'file'))) assert(os.path.exists(os.path.join(checkoutdir, 'custom-file'))) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'unique-keys')) def test_unique_key(cli, tmpdir, datafiles): '''This test confirms that the 'filename' parameter is honoured when it comes to generating a cache key for the source. ''' project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) assert cli.get_element_state(project, 'target.bst') == "fetch needed" assert cli.get_element_state(project, 'target-custom.bst') == "fetch needed" # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) # We should download the file only once assert cli.get_element_state(project, 'target.bst') == 'buildable' assert cli.get_element_state(project, 'target-custom.bst') == 'buildable' # But the cache key is different because the 'filename' is different. assert cli.get_element_key(project, 'target.bst') != \ cli.get_element_key(project, 'target-custom.bst') @pytest.mark.parametrize('server_type', ('FTP', 'HTTP')) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'single-file')) def test_use_netrc(cli, datafiles, server_type, tmpdir): fake_home = os.path.join(str(tmpdir), 'fake_home') os.makedirs(fake_home, exist_ok=True) project = str(datafiles) checkoutdir = os.path.join(str(tmpdir), 'checkout') os.environ['HOME'] = fake_home with open(os.path.join(fake_home, '.netrc'), 'wb') as f: os.fchmod(f.fileno(), 0o700) f.write(b'machine 127.0.0.1\n') f.write(b'login testuser\n') f.write(b'password 12345\n') with create_file_server(server_type) as server: server.add_user('testuser', '12345', project) generate_project_file_server(server, project) server.start() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() checkout_file = os.path.join(checkoutdir, 'file') assert(os.path.exists(checkout_file)) buildstream-1.6.9/tests/sources/remote/000077500000000000000000000000001437515270000201665ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/missing-file/000077500000000000000000000000001437515270000225545ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/missing-file/target.bst000066400000000000000000000002241437515270000245520ustar00rootroot00000000000000kind: autotools description: The kind of this element is irrelevant. sources: - kind: remote url: tmpdir:/file ref: abcdef filename: filename buildstream-1.6.9/tests/sources/remote/no-ref/000077500000000000000000000000001437515270000213545ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/no-ref/file000066400000000000000000000000141437515270000222110ustar00rootroot00000000000000filecontent buildstream-1.6.9/tests/sources/remote/no-ref/target.bst000066400000000000000000000001611437515270000233520ustar00rootroot00000000000000kind: autotools description: The kind of this element is irrelevant. sources: - kind: remote url: tmpdir:/file buildstream-1.6.9/tests/sources/remote/path-in-filename/000077500000000000000000000000001437515270000233045ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/path-in-filename/dir/000077500000000000000000000000001437515270000240625ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/path-in-filename/dir/file000066400000000000000000000000001437515270000247120ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/path-in-filename/target.bst000066400000000000000000000002601437515270000253020ustar00rootroot00000000000000kind: import description: test sources: - kind: remote url: tmpdir:/dir/file ref: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 filename: path/to/file buildstream-1.6.9/tests/sources/remote/single-file-custom-name/000077500000000000000000000000001437515270000246125ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/single-file-custom-name/dir/000077500000000000000000000000001437515270000253705ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/single-file-custom-name/dir/file000066400000000000000000000000001437515270000262200ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/single-file-custom-name/target.bst000066400000000000000000000002571437515270000266160ustar00rootroot00000000000000kind: import description: test sources: - kind: remote url: tmpdir:/dir/file ref: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 filename: custom-file buildstream-1.6.9/tests/sources/remote/single-file/000077500000000000000000000000001437515270000223645ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/single-file/dir/000077500000000000000000000000001437515270000231425ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/single-file/dir/file000066400000000000000000000000001437515270000237720ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/single-file/target.bst000066400000000000000000000002271437515270000243650ustar00rootroot00000000000000kind: import description: test sources: - kind: remote url: tmpdir:/dir/file ref: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 buildstream-1.6.9/tests/sources/remote/unique-keys/000077500000000000000000000000001437515270000224455ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/unique-keys/dir/000077500000000000000000000000001437515270000232235ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/unique-keys/dir/file000066400000000000000000000000001437515270000240530ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/remote/unique-keys/target-custom.bst000066400000000000000000000002641437515270000257570ustar00rootroot00000000000000kind: import description: test sources: - kind: remote url: tmpdir:/dir/file ref: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 filename: some-custom-file buildstream-1.6.9/tests/sources/remote/unique-keys/target.bst000066400000000000000000000002271437515270000244460ustar00rootroot00000000000000kind: import description: test sources: - kind: remote url: tmpdir:/dir/file ref: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 buildstream-1.6.9/tests/sources/tar.py000066400000000000000000000345051437515270000200420ustar00rootroot00000000000000import os import pytest import tarfile import tempfile import subprocess import urllib.parse from shutil import copyfile, rmtree from buildstream._exceptions import ErrorDomain from buildstream import _yaml from tests.testutils import cli from tests.testutils.file_server import create_file_server from tests.testutils.site import HAVE_LZIP from . import list_dir_contents DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'tar', ) def _assemble_tar(workingdir, srcdir, dstfile): old_dir = os.getcwd() os.chdir(workingdir) with tarfile.open(dstfile, "w:gz") as tar: tar.add(srcdir) os.chdir(old_dir) def _assemble_tar_lz(workingdir, srcdir, dstfile): old_dir = os.getcwd() os.chdir(workingdir) with tempfile.TemporaryFile() as uncompressed: with tarfile.open(fileobj=uncompressed, mode="w:") as tar: tar.add(srcdir) uncompressed.seek(0, 0) with open(dstfile, 'wb') as dst: subprocess.call(['lzip'], stdin=uncompressed, stdout=dst) os.chdir(old_dir) def generate_project(project_dir, tmpdir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({ 'name': 'foo', 'aliases': { 'tmpdir': "file:///" + str(tmpdir) } }, project_file) def generate_project_file_server(base_url, project_dir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({ 'name': 'foo', 'aliases': { 'tmpdir': base_url } }, project_file) # Test that without ref, consistency is set appropriately. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-ref')) def test_no_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) assert cli.get_element_state(project, 'target.bst') == 'no reference' # Test that when I fetch a nonexistent URL, errors are handled gracefully and a retry is performed. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_fetch_bad_url(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) assert "FAILURE Try #" in result.stderr result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) # Test that when I fetch with an invalid ref, it fails. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_fetch_bad_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.gz") _assemble_tar(os.path.join(str(datafiles), "content"), "a", src_tar) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) # Test that when tracking with a ref set, there is a warning @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_track_warning(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.gz") _assemble_tar(os.path.join(str(datafiles), "content"), "a", src_tar) # Track it result = cli.run(project=project, args=[ 'track', 'target.bst' ]) result.assert_success() assert "Potential man-in-the-middle attack!" in result.stderr # Test that a staged checkout matches what was tarred up, with the default first subdir @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) @pytest.mark.parametrize("srcdir", ["a", "./a"]) def test_stage_default_basedir(cli, tmpdir, datafiles, srcdir): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.gz") _assemble_tar(os.path.join(str(datafiles), "content"), srcdir, src_tar) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '*') original_dir = os.path.join(str(datafiles), "content", "a") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) # Test that a staged checkout matches what was tarred up, with an empty base-dir @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-basedir')) @pytest.mark.parametrize("srcdir", ["a", "./a"]) def test_stage_no_basedir(cli, tmpdir, datafiles, srcdir): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.gz") _assemble_tar(os.path.join(str(datafiles), "content"), srcdir, src_tar) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the full content of the tarball is checked out (base-dir: '') original_dir = os.path.join(str(datafiles), "content") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) # Test that a staged checkout matches what was tarred up, with an explicit basedir @pytest.mark.datafiles(os.path.join(DATA_DIR, 'explicit-basedir')) @pytest.mark.parametrize("srcdir", ["a", "./a"]) def test_stage_explicit_basedir(cli, tmpdir, datafiles, srcdir): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.gz") _assemble_tar(os.path.join(str(datafiles), "content"), srcdir, src_tar) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '*') original_dir = os.path.join(str(datafiles), "content", "a") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) # Test that we succeed to extract tarballs with hardlinks when stripping the # leading paths @pytest.mark.datafiles(os.path.join(DATA_DIR, 'contains-links')) def test_stage_contains_links(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.gz") # Create a hardlink, we wont trust git to store that info for us os.makedirs(os.path.join(str(datafiles), "content", "base-directory", "subdir2"), exist_ok=True) file1 = os.path.join(str(datafiles), "content", "base-directory", "subdir1", "file.txt") file2 = os.path.join(str(datafiles), "content", "base-directory", "subdir2", "file.txt") os.link(file1, file2) _assemble_tar(os.path.join(str(datafiles), "content"), "base-directory", src_tar) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '*') original_dir = os.path.join(str(datafiles), "content", "base-directory") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) @pytest.mark.skipif(not HAVE_LZIP, reason='lzip is not available') @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) @pytest.mark.parametrize("srcdir", ["a", "./a"]) def test_stage_default_basedir_lzip(cli, tmpdir, datafiles, srcdir): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.lz") _assemble_tar_lz(os.path.join(str(datafiles), "content"), srcdir, src_tar) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target-lz.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target-lz.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target-lz.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target-lz.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '*') original_dir = os.path.join(str(datafiles), "content", "a") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) @pytest.mark.parametrize('server_type', ('FTP', 'HTTP')) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_use_netrc(cli, datafiles, server_type, tmpdir): file_server_files = os.path.join(str(tmpdir), 'file_server') fake_home = os.path.join(str(tmpdir), 'fake_home') os.makedirs(file_server_files, exist_ok=True) os.makedirs(fake_home, exist_ok=True) project = str(datafiles) checkoutdir = os.path.join(str(tmpdir), 'checkout') os.environ['HOME'] = fake_home with open(os.path.join(fake_home, '.netrc'), 'wb') as f: os.fchmod(f.fileno(), 0o700) f.write(b'machine 127.0.0.1\n') f.write(b'login testuser\n') f.write(b'password 12345\n') with create_file_server(server_type) as server: server.add_user('testuser', '12345', file_server_files) generate_project_file_server(server.base_url(), project) src_tar = os.path.join(file_server_files, 'a.tar.gz') _assemble_tar(os.path.join(str(datafiles), 'content'), 'a', src_tar) server.start() result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() original_dir = os.path.join(str(datafiles), 'content', 'a') original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) @pytest.mark.parametrize('server_type', ('FTP', 'HTTP')) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_netrc_already_specified_user(cli, datafiles, server_type, tmpdir): file_server_files = os.path.join(str(tmpdir), 'file_server') fake_home = os.path.join(str(tmpdir), 'fake_home') os.makedirs(file_server_files, exist_ok=True) os.makedirs(fake_home, exist_ok=True) project = str(datafiles) checkoutdir = os.path.join(str(tmpdir), 'checkout') os.environ['HOME'] = fake_home with open(os.path.join(fake_home, '.netrc'), 'wb') as f: os.fchmod(f.fileno(), 0o700) f.write(b'machine 127.0.0.1\n') f.write(b'login testuser\n') f.write(b'password 12345\n') with create_file_server(server_type) as server: server.add_user('otheruser', '12345', file_server_files) parts = urllib.parse.urlsplit(server.base_url()) base_url = urllib.parse.urlunsplit([parts[0]] + ['otheruser@{}'.format(parts[1])] + list(parts[2:])) generate_project_file_server(base_url, project) src_tar = os.path.join(file_server_files, 'a.tar.gz') _assemble_tar(os.path.join(str(datafiles), 'content'), 'a', src_tar) server.start() result = cli.run(project=project, args=['track', 'target.bst']) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) # Test that BuildStream doesnt crash if HOME is unset while # the netrc module is trying to find it's ~/.netrc file. @pytest.mark.xfail(reason="Cannot set environment variable to None when running tests in subprocesses") @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_homeless_environment(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Create a local tar src_tar = os.path.join(str(tmpdir), "a.tar.gz") _assemble_tar(os.path.join(str(datafiles), "content"), "a", src_tar) # Use a track, make sure the plugin tries to find a ~/.netrc result = cli.run(project=project, args=['track', 'target.bst'], env={'HOME': None}) result.assert_success() buildstream-1.6.9/tests/sources/tar/000077500000000000000000000000001437515270000174615ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/contains-links/000077500000000000000000000000001437515270000224155ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/contains-links/content/000077500000000000000000000000001437515270000240675ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/contains-links/content/base-directory/000077500000000000000000000000001437515270000270035ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/contains-links/content/base-directory/subdir1/000077500000000000000000000000001437515270000303545ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/contains-links/content/base-directory/subdir1/file.txt000066400000000000000000000000051437515270000320270ustar00rootroot00000000000000pony buildstream-1.6.9/tests/sources/tar/contains-links/target.bst000066400000000000000000000001721437515270000244150ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: tar url: tmpdir:/a.tar.gz ref: foo buildstream-1.6.9/tests/sources/tar/explicit-basedir/000077500000000000000000000000001437515270000227115ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/explicit-basedir/content/000077500000000000000000000000001437515270000243635ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/explicit-basedir/content/a/000077500000000000000000000000001437515270000246035ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/explicit-basedir/content/a/b/000077500000000000000000000000001437515270000250245ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/explicit-basedir/content/a/b/d000066400000000000000000000000021437515270000251620ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/tar/explicit-basedir/content/a/c000066400000000000000000000000021437515270000247400ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/tar/explicit-basedir/target.bst000066400000000000000000000002121437515270000247040ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: tar url: tmpdir:/a.tar.gz ref: foo base-dir: 'a' buildstream-1.6.9/tests/sources/tar/fetch/000077500000000000000000000000001437515270000205525ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/fetch/content/000077500000000000000000000000001437515270000222245ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/fetch/content/a/000077500000000000000000000000001437515270000224445ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/fetch/content/a/b/000077500000000000000000000000001437515270000226655ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/fetch/content/a/b/d000066400000000000000000000000021437515270000230230ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/tar/fetch/content/a/c000066400000000000000000000000021437515270000226010ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/tar/fetch/target-lz.bst000066400000000000000000000001721437515270000231750ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: tar url: tmpdir:/a.tar.lz ref: foo buildstream-1.6.9/tests/sources/tar/fetch/target.bst000066400000000000000000000001721437515270000225520ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: tar url: tmpdir:/a.tar.gz ref: foo buildstream-1.6.9/tests/sources/tar/no-basedir/000077500000000000000000000000001437515270000215045ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/no-basedir/content/000077500000000000000000000000001437515270000231565ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/no-basedir/content/a/000077500000000000000000000000001437515270000233765ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/no-basedir/content/a/b/000077500000000000000000000000001437515270000236175ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/no-basedir/content/a/b/d000066400000000000000000000000021437515270000237550ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/tar/no-basedir/content/a/c000066400000000000000000000000021437515270000235330ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/tar/no-basedir/target.bst000066400000000000000000000002111437515270000234760ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: tar url: tmpdir:/a.tar.gz ref: foo base-dir: '' buildstream-1.6.9/tests/sources/tar/no-ref/000077500000000000000000000000001437515270000206475ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/no-ref/a/000077500000000000000000000000001437515270000210675ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/no-ref/a/b/000077500000000000000000000000001437515270000213105ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/tar/no-ref/a/b/d000066400000000000000000000000021437515270000214460ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/tar/no-ref/a/c000066400000000000000000000000021437515270000212240ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/tar/no-ref/target.bst000066400000000000000000000001621437515270000226460ustar00rootroot00000000000000kind: autotools description: The kind of this element is irrelevant. sources: - kind: tar url: tmpdir:/a.tar.gz buildstream-1.6.9/tests/sources/zip.py000066400000000000000000000211351437515270000200510ustar00rootroot00000000000000import os import pytest import zipfile from buildstream._exceptions import ErrorDomain from buildstream import _yaml from tests.testutils import cli from tests.testutils.file_server import create_file_server from . import list_dir_contents DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'zip', ) def _assemble_zip(workingdir, dstfile): old_dir = os.getcwd() os.chdir(workingdir) with zipfile.ZipFile(dstfile, "w") as zip: for root, dirs, files in os.walk('.'): names = dirs + files names = [os.path.join(root, name) for name in names] for name in names: zip.write(name) os.chdir(old_dir) def generate_project(project_dir, tmpdir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({ 'name': 'foo', 'aliases': { 'tmpdir': "file:///" + str(tmpdir) } }, project_file) def generate_project_file_server(server, project_dir): project_file = os.path.join(project_dir, "project.conf") _yaml.dump({ 'name': 'foo', 'aliases': { 'tmpdir': server.base_url() } }, project_file) # Test that without ref, consistency is set appropriately. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-ref')) def test_no_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) assert cli.get_element_state(project, 'target.bst') == 'no reference' # Test that when I fetch a nonexistent URL, errors are handled gracefully and a retry is performed. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_fetch_bad_url(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) assert "FAILURE Try #" in result.stderr result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) # Test that when I fetch with an invalid ref, it fails. @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_fetch_bad_ref(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Create a local tar src_zip = os.path.join(str(tmpdir), "a.zip") _assemble_zip(os.path.join(str(datafiles), "content"), src_zip) # Try to fetch it result = cli.run(project=project, args=[ 'fetch', 'target.bst' ]) result.assert_main_error(ErrorDomain.STREAM, None) result.assert_task_error(ErrorDomain.SOURCE, None) # Test that when tracking with a ref set, there is a warning @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_track_warning(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) # Create a local tar src_zip = os.path.join(str(tmpdir), "a.zip") _assemble_zip(os.path.join(str(datafiles), "content"), src_zip) # Track it result = cli.run(project=project, args=[ 'track', 'target.bst' ]) result.assert_success() assert "Potential man-in-the-middle attack!" in result.stderr # Test that a staged checkout matches what was tarred up, with the default first subdir @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_stage_default_basedir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_zip = os.path.join(str(tmpdir), "a.zip") _assemble_zip(os.path.join(str(datafiles), "content"), src_zip) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '*') original_dir = os.path.join(str(datafiles), "content", "a") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) # Test that a staged checkout matches what was tarred up, with an empty base-dir @pytest.mark.datafiles(os.path.join(DATA_DIR, 'no-basedir')) def test_stage_no_basedir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_zip = os.path.join(str(tmpdir), "a.zip") _assemble_zip(os.path.join(str(datafiles), "content"), src_zip) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the full content of the tarball is checked out (base-dir: '') original_dir = os.path.join(str(datafiles), "content") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) # Test that a staged checkout matches what was tarred up, with an explicit basedir @pytest.mark.datafiles(os.path.join(DATA_DIR, 'explicit-basedir')) def test_stage_explicit_basedir(cli, tmpdir, datafiles): project = os.path.join(datafiles.dirname, datafiles.basename) generate_project(project, tmpdir) checkoutdir = os.path.join(str(tmpdir), "checkout") # Create a local tar src_zip = os.path.join(str(tmpdir), "a.zip") _assemble_zip(os.path.join(str(datafiles), "content"), src_zip) # Track, fetch, build, checkout result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() # Check that the content of the first directory is checked out (base-dir: '*') original_dir = os.path.join(str(datafiles), "content", "a") original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) @pytest.mark.parametrize('server_type', ('FTP', 'HTTP')) @pytest.mark.datafiles(os.path.join(DATA_DIR, 'fetch')) def test_use_netrc(cli, datafiles, server_type, tmpdir): file_server_files = os.path.join(str(tmpdir), 'file_server') fake_home = os.path.join(str(tmpdir), 'fake_home') os.makedirs(file_server_files, exist_ok=True) os.makedirs(fake_home, exist_ok=True) project = str(datafiles) checkoutdir = os.path.join(str(tmpdir), 'checkout') os.environ['HOME'] = fake_home with open(os.path.join(fake_home, '.netrc'), 'wb') as f: os.fchmod(f.fileno(), 0o700) f.write(b'machine 127.0.0.1\n') f.write(b'login testuser\n') f.write(b'password 12345\n') with create_file_server(server_type) as server: server.add_user('testuser', '12345', file_server_files) generate_project_file_server(server, project) src_zip = os.path.join(file_server_files, 'a.zip') _assemble_zip(os.path.join(str(datafiles), 'content'), src_zip) server.start() result = cli.run(project=project, args=['track', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['fetch', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['build', 'target.bst']) result.assert_success() result = cli.run(project=project, args=['checkout', 'target.bst', checkoutdir]) result.assert_success() original_dir = os.path.join(str(datafiles), 'content', 'a') original_contents = list_dir_contents(original_dir) checkout_contents = list_dir_contents(checkoutdir) assert(checkout_contents == original_contents) buildstream-1.6.9/tests/sources/zip/000077500000000000000000000000001437515270000174755ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/explicit-basedir/000077500000000000000000000000001437515270000227255ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/explicit-basedir/content/000077500000000000000000000000001437515270000243775ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/explicit-basedir/content/a/000077500000000000000000000000001437515270000246175ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/explicit-basedir/content/a/b/000077500000000000000000000000001437515270000250405ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/explicit-basedir/content/a/b/d000066400000000000000000000000021437515270000251760ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/zip/explicit-basedir/content/a/c000066400000000000000000000000021437515270000247540ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/zip/explicit-basedir/target.bst000066400000000000000000000002071437515270000247240ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: zip url: tmpdir:/a.zip ref: foo base-dir: 'a' buildstream-1.6.9/tests/sources/zip/fetch/000077500000000000000000000000001437515270000205665ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/fetch/content/000077500000000000000000000000001437515270000222405ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/fetch/content/a/000077500000000000000000000000001437515270000224605ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/fetch/content/a/b/000077500000000000000000000000001437515270000227015ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/fetch/content/a/b/d000066400000000000000000000000021437515270000230370ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/zip/fetch/content/a/c000066400000000000000000000000021437515270000226150ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/zip/fetch/target.bst000066400000000000000000000001671437515270000225720ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: zip url: tmpdir:/a.zip ref: foo buildstream-1.6.9/tests/sources/zip/no-basedir/000077500000000000000000000000001437515270000215205ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/no-basedir/content/000077500000000000000000000000001437515270000231725ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/no-basedir/content/a/000077500000000000000000000000001437515270000234125ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/no-basedir/content/a/b/000077500000000000000000000000001437515270000236335ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/no-basedir/content/a/b/d000066400000000000000000000000021437515270000237710ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/zip/no-basedir/content/a/c000066400000000000000000000000021437515270000235470ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/zip/no-basedir/target.bst000066400000000000000000000002061437515270000235160ustar00rootroot00000000000000kind: import description: The kind of this element is irrelevant. sources: - kind: zip url: tmpdir:/a.zip ref: foo base-dir: '' buildstream-1.6.9/tests/sources/zip/no-ref/000077500000000000000000000000001437515270000206635ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/no-ref/a/000077500000000000000000000000001437515270000211035ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/no-ref/a/b/000077500000000000000000000000001437515270000213245ustar00rootroot00000000000000buildstream-1.6.9/tests/sources/zip/no-ref/a/b/d000066400000000000000000000000021437515270000214620ustar00rootroot00000000000000d buildstream-1.6.9/tests/sources/zip/no-ref/a/c000066400000000000000000000000021437515270000212400ustar00rootroot00000000000000c buildstream-1.6.9/tests/sources/zip/no-ref/target.bst000066400000000000000000000001571437515270000226660ustar00rootroot00000000000000kind: autotools description: The kind of this element is irrelevant. sources: - kind: zip url: tmpdir:/a.zip buildstream-1.6.9/tests/testutils/000077500000000000000000000000001437515270000172505ustar00rootroot00000000000000buildstream-1.6.9/tests/testutils/__init__.py000066400000000000000000000003711437515270000213620ustar00rootroot00000000000000from .runcli import cli, cli_integration from .repo import create_repo, ALL_REPO_KINDS from .artifactshare import create_artifact_share from .element_generators import create_element_size, update_element_size from .junction import generate_junction buildstream-1.6.9/tests/testutils/artifactshare.py000066400000000000000000000132341437515270000224450ustar00rootroot00000000000000import string import pytest import subprocess import os import sys import shutil import signal from collections import namedtuple from contextlib import contextmanager from multiprocessing import Process, Queue from buildstream import _yaml from buildstream._artifactcache.casserver import create_server from buildstream._artifactcache.cascache import CASCache from buildstream._exceptions import CASError # ArtifactShare() # # Abstract class providing scaffolding for # generating data to be used with various sources # # Args: # directory (str): The base temp directory for the test # total_space (int): Mock total disk space on artifact server # free_space (int): Mock free disk space on artifact server # class ArtifactShare(): def __init__(self, directory, *, total_space=None, free_space=None, min_head_size=int(2e9), max_head_size=int(10e9)): # The working directory for the artifact share (in case it # needs to do something outside of it's backend's storage folder). # self.directory = os.path.abspath(directory) # The directory the actual repo will be stored in. # # Unless this gets more complicated, just use this directly # in tests as a remote artifact push/pull configuration # self.repodir = os.path.join(self.directory, 'repo') os.makedirs(self.repodir) self.cas = CASCache(self.repodir) self.total_space = total_space self.free_space = free_space self.max_head_size = max_head_size self.min_head_size = min_head_size q = Queue() self.process = Process(target=self.run, args=(q,)) self.process.start() # Retrieve port from server subprocess port = q.get() self.repo = 'http://localhost:{}'.format(port) # run(): # # Run the artifact server. # def run(self, q): try: import pytest_cov except ImportError: pass else: pytest_cov.embed.cleanup_on_sigterm() # Optionally mock statvfs if self.total_space: if self.free_space is None: self.free_space = self.total_space os.statvfs = self._mock_statvfs server = create_server(self.repodir, max_head_size=self.max_head_size, min_head_size=self.min_head_size, enable_push=True) port = server.add_insecure_port('127.0.0.1:0') server.start() # Send port to parent q.put(port) # Sleep until termination by signal signal.pause() # has_artifact(): # # Checks whether the artifact is present in the share # # Args: # project_name (str): The project name # element_name (str): The element name # cache_key (str): The cache key # # Returns: # (bool): True if the artifact exists in the share, otherwise false. def has_artifact(self, project_name, element_name, cache_key): # NOTE: This should be kept in line with our # artifact cache code, the below is the # same algo for creating an artifact reference # # Chop off the .bst suffix first assert element_name.endswith('.bst') element_name = element_name[:-4] valid_chars = string.digits + string.ascii_letters + '-._' element_name = ''.join([ x if x in valid_chars else '_' for x in element_name ]) artifact_key = '{0}/{1}/{2}'.format(project_name, element_name, cache_key) try: tree = self.cas.resolve_ref(artifact_key) reachable = set() try: self.cas._reachable_refs_dir(reachable, tree, update_mtime=False) except FileNotFoundError: return False for digest in reachable: object_name = os.path.join(self.cas.casdir, 'objects', digest[:2], digest[2:]) if not os.path.exists(object_name): return False return True except CASError: return False # close(): # # Remove the artifact share. # def close(self): self.process.terminate() self.process.join() shutil.rmtree(self.directory) def _mock_statvfs(self, path): repo_size = 0 for root, _, files in os.walk(self.repodir): for filename in files: repo_size += os.path.getsize(os.path.join(root, filename)) return statvfs_result(f_blocks=self.total_space, f_bfree=self.free_space - repo_size, f_bavail=self.free_space - repo_size, f_bsize=1) def _message_handler(self, message, context): # We need a message handler because this will own an ArtifactCache # which can in turn fire messages. # Just unconditionally print the messages to stderr print(message.message, file=sys.stderr) # create_artifact_share() # # Create an ArtifactShare for use in a test case # @contextmanager def create_artifact_share(directory, *, total_space=None, free_space=None, min_head_size=int(2e9), max_head_size=int(10e9)): share = ArtifactShare(directory, total_space=total_space, free_space=free_space, min_head_size=min_head_size, max_head_size=max_head_size) try: yield share finally: share.close() statvfs_result = namedtuple('statvfs_result', 'f_blocks f_bfree f_bsize f_bavail') buildstream-1.6.9/tests/testutils/element_generators.py000066400000000000000000000063401437515270000235070ustar00rootroot00000000000000import os from buildstream import _yaml from buildstream import utils from . import create_repo # create_element_size() # # Creates an import element with a git repo, using random # data to create a file in that repo of the specified size, # such that building it will add an artifact of the specified # size to the artifact cache. # # Args: # name: (str) of the element name (e.g. target.bst) # project_dir (str): The path to the project # element_path (str): The element path within the project # dependencies: A list of strings (can also be an empty list) # size: (int) size of the element in bytes # # Returns: # (Repo): A git repo which can be used to introduce trackable changes # by using the update_element_size() function below. # def create_element_size(name, project_dir, elements_path, dependencies, size): full_elements_path = os.path.join(project_dir, elements_path) os.makedirs(full_elements_path, exist_ok=True) # Create a git repo repodir = os.path.join(project_dir, 'repos') repo = create_repo('git', repodir, subdir=name) with utils._tempdir(dir=project_dir) as tmp: # We use a data/ subdir in the git repo we create, # and we set the import element to only extract that # part; this ensures we never include a .git/ directory # in the cached artifacts for these sized elements. # datadir = os.path.join(tmp, 'data') os.makedirs(datadir) # Use /dev/urandom to create the sized file in the datadir with open(os.path.join(datadir, name), 'wb+') as f: f.write(os.urandom(size)) # Create the git repo from the temp directory ref = repo.create(tmp) element = { 'kind': 'import', 'sources': [ repo.source_config(ref=ref) ], 'config': { # Extract only the data directory 'source': 'data' }, 'depends': dependencies } _yaml.dump(element, os.path.join(project_dir, elements_path, name)) # Return the repo, so that it can later be used to add commits return repo # update_element_size() # # Updates a repo returned by create_element_size() such that # the newly added commit is completely changed, and has the newly # specified size. # # The name and project_dir arguments must match the arguments # previously given to create_element_size() # # Args: # name: (str) of the element name (e.g. target.bst) # project_dir (str): The path to the project # repo: (Repo) The Repo returned by create_element_size() # size: (int) The new size which the element generates, in bytes # # Returns: # (Repo): A git repo which can be used to introduce trackable changes # by using the update_element_size() function below. # def update_element_size(name, project_dir, repo, size): with utils._tempdir(dir=project_dir) as tmp: new_file = os.path.join(tmp, name) # Use /dev/urandom to create the sized file in the datadir with open(new_file, 'wb+') as f: f.write(os.urandom(size)) # Modify the git repo with a new commit to the same path, # replacing the original file with a new one. repo.modify_file(new_file, os.path.join('data', name)) buildstream-1.6.9/tests/testutils/file_server.py000066400000000000000000000006441437515270000221330ustar00rootroot00000000000000from contextlib import contextmanager from .ftp_server import SimpleFtpServer from .http_server import SimpleHttpServer @contextmanager def create_file_server(file_server_type): if file_server_type == 'FTP': server = SimpleFtpServer() elif file_server_type == 'HTTP': server = SimpleHttpServer() else: assert False try: yield server finally: server.stop() buildstream-1.6.9/tests/testutils/filetypegenerator.py000066400000000000000000000032041437515270000233510ustar00rootroot00000000000000# # Copyright (C) 2018 Codethink Limited # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library. If not, see . # # Authors: # Tiago Gomes import os import socket # generate_file_types() # # Generator that creates a regular file directory, symbolic link, fifo # and socket at the specified path. # # Args: # path: (str) path where to create each different type of file # def generate_file_types(path): def clean(): if os.path.exists(path): if os.path.isdir(path): os.rmdir(path) else: os.remove(path) clean() with open(path, 'w') as f: pass yield clean() os.makedirs(path) yield clean() os.symlink("project.conf", path) yield clean() os.mkfifo(path) yield clean() s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) # On some platforms we get "AF_UNIX path too long" errors # try: s.bind(path) except OSError as e: pass else: yield clean() buildstream-1.6.9/tests/testutils/ftp_server.py000066400000000000000000000016131437515270000220020ustar00rootroot00000000000000import multiprocessing from pyftpdlib.authorizers import DummyAuthorizer from pyftpdlib.handlers import FTPHandler from pyftpdlib.servers import FTPServer class SimpleFtpServer(multiprocessing.Process): def __init__(self): super().__init__() self.authorizer = DummyAuthorizer() handler = FTPHandler handler.authorizer = self.authorizer self.server = FTPServer(('127.0.0.1', 0), handler) def run(self): self.server.serve_forever() def stop(self): self.server.close_all() self.server.close() self.terminate() self.join() def allow_anonymous(self, cwd): self.authorizer.add_anonymous(cwd) def add_user(self, user, password, cwd): self.authorizer.add_user(user, password, cwd, perm='elradfmwMT') def base_url(self): return 'ftp://127.0.0.1:{}'.format(self.server.address[1]) buildstream-1.6.9/tests/testutils/http_server.py000066400000000000000000000070341437515270000221730ustar00rootroot00000000000000import multiprocessing import os import posixpath import html import threading import base64 from http.server import SimpleHTTPRequestHandler, HTTPServer, HTTPStatus class Unauthorized(Exception): pass class RequestHandler(SimpleHTTPRequestHandler): def get_root_dir(self): authorization = self.headers.get('authorization') if not authorization: if not self.server.anonymous_dir: raise Unauthorized('unauthorized') return self.server.anonymous_dir else: authorization = authorization.split() if len(authorization) != 2 or authorization[0].lower() != 'basic': raise Unauthorized('unauthorized') try: decoded = base64.decodebytes(authorization[1].encode('ascii')) user, password = decoded.decode('ascii').split(':') expected_password, directory = self.server.users[user] if password == expected_password: return directory except: raise Unauthorized('unauthorized') return None def unauthorized(self): shortmsg, longmsg = self.responses[HTTPStatus.UNAUTHORIZED] self.send_response(HTTPStatus.UNAUTHORIZED, shortmsg) self.send_header('Connection', 'close') content = (self.error_message_format % { 'code': HTTPStatus.UNAUTHORIZED, 'message': html.escape(longmsg, quote=False), 'explain': html.escape(longmsg, quote=False) }) body = content.encode('UTF-8', 'replace') self.send_header('Content-Type', self.error_content_type) self.send_header('Content-Length', str(len(body))) self.send_header('WWW-Authenticate', 'Basic realm="{}"'.format(self.server.realm)) self.end_headers() self.end_headers() if self.command != 'HEAD' and body: self.wfile.write(body) def do_GET(self): try: super().do_GET() except Unauthorized: self.unauthorized() def do_HEAD(self): try: super().do_HEAD() except Unauthorized: self.unauthorized() def translate_path(self, path): path = path.split('?', 1)[0] path = path.split('#', 1)[0] path = posixpath.normpath(path) assert(posixpath.isabs(path)) path = posixpath.relpath(path, '/') return os.path.join(self.get_root_dir(), path) class AuthHTTPServer(HTTPServer): def __init__(self, *args, **kwargs): self.users = {} self.anonymous_dir = None self.realm = 'Realm' super().__init__(*args, **kwargs) class SimpleHttpServer(multiprocessing.Process): def __init__(self): self.__stop = multiprocessing.Queue() super().__init__() self.server = AuthHTTPServer(('127.0.0.1', 0), RequestHandler) self.started = False def start(self): self.started = True super().start() def run(self): t = threading.Thread(target=self.server.serve_forever) t.start() self.__stop.get() self.server.shutdown() t.join() def stop(self): if not self.started: return self.__stop.put(None) self.terminate() self.join() def allow_anonymous(self, cwd): self.server.anonymous_dir = cwd def add_user(self, user, password, cwd): self.server.users[user] = (password, cwd) def base_url(self): return 'http://127.0.0.1:{}'.format(self.server.server_port) buildstream-1.6.9/tests/testutils/integration.py000066400000000000000000000016661437515270000221560ustar00rootroot00000000000000import os from buildstream import _yaml # Return a list of files relative to the given directory def walk_dir(root): for dirname, dirnames, filenames in os.walk(root): # ensure consistent traversal order, needed for consistent # handling of symlinks. dirnames.sort() filenames.sort() # print path to all subdirectories first. for subdirname in dirnames: yield os.path.join(dirname, subdirname)[len(root):] # print path to all filenames. for filename in filenames: yield os.path.join(dirname, filename)[len(root):] # Ensure that a directory contains the given filenames. def assert_contains(directory, expected): missing = set(expected) missing.difference_update(walk_dir(directory)) if len(missing) > 0: raise AssertionError("Missing {} expected elements from list: {}" .format(len(missing), missing)) buildstream-1.6.9/tests/testutils/junction.py000066400000000000000000000020531437515270000214530ustar00rootroot00000000000000import os from tests.testutils import create_repo from buildstream import _yaml # generate_junction() # # Generates a junction element with a git repository # # Args: # tmpdir: The tmpdir fixture, for storing the generated git repo # subproject_path: The path for the subproject, to add to the git repo # junction_path: The location to store the generated junction element # store_ref: Whether to store the ref in the junction.bst file # # Returns: # (str): The ref # def generate_junction(tmpdir, subproject_path, junction_path, *, store_ref=True, options={}): # Create a repo to hold the subproject and generate # a junction element for it # repo = create_repo('git', str(tmpdir)) source_ref = ref = repo.create(subproject_path) if not store_ref: source_ref = None element = { 'kind': 'junction', 'sources': [ repo.source_config(ref=source_ref) ] } if options: element["config"] = {"options": options} _yaml.dump(element, junction_path) return ref buildstream-1.6.9/tests/testutils/repo/000077500000000000000000000000001437515270000202155ustar00rootroot00000000000000buildstream-1.6.9/tests/testutils/repo/__init__.py000066400000000000000000000014721437515270000223320ustar00rootroot00000000000000from collections import OrderedDict import pytest from ..site import HAVE_OSTREE from .git import Git from .bzr import Bzr from .ostree import OSTree from .tar import Tar from .zip import Zip ALL_REPO_KINDS = OrderedDict() ALL_REPO_KINDS['git'] = Git ALL_REPO_KINDS['bzr'] = Bzr ALL_REPO_KINDS['ostree'] = OSTree ALL_REPO_KINDS['tar'] = Tar ALL_REPO_KINDS['zip'] = Zip # create_repo() # # Convenience for creating a Repo # # Args: # kind (str): The kind of repo to create (a source plugin basename) # directory (str): The path where the repo will keep a cache # def create_repo(kind, directory, subdir='repo'): try: constructor = ALL_REPO_KINDS[kind] except KeyError as e: raise AssertionError("Unsupported repo kind {}".format(kind)) from e return constructor(directory, subdir=subdir) buildstream-1.6.9/tests/testutils/repo/bzr.py000066400000000000000000000032621437515270000213670ustar00rootroot00000000000000import os import subprocess import pytest from .repo import Repo from ..site import HAVE_BZR # Use several variations, different environment variables can be # used depending on the platform, and depending on whether `breezy` # is being used as the bzr implementation. # BZR_ENV = { "BZR_EMAIL": "Testy McTesterson ", "BRZ_EMAIL": "Testy McTesterson ", "EMAIL": "Testy McTesterson " } class Bzr(Repo): def __init__(self, directory, subdir): if not HAVE_BZR: pytest.skip("bzr is not available") super(Bzr, self).__init__(directory, subdir) def create(self, directory): branch_dir = os.path.join(self.repo, 'trunk') subprocess.call(['bzr', 'init-repo', self.repo], env=BZR_ENV) subprocess.call(['bzr', 'init', branch_dir], env=BZR_ENV) self.copy_directory(directory, branch_dir) subprocess.call(['bzr', 'add', '.'], env=BZR_ENV, cwd=branch_dir) subprocess.call(['bzr', 'commit', '--message="Initial commit"'], env=BZR_ENV, cwd=branch_dir) return self.latest_commit() def source_config(self, ref=None): config = { 'kind': 'bzr', 'url': 'file://' + self.repo, 'track': 'trunk' } if ref is not None: config['ref'] = ref return config def latest_commit(self): output = subprocess.check_output([ 'bzr', 'version-info', '--custom', '--template={revno}', os.path.join(self.repo, 'trunk') ], env=BZR_ENV) return output.decode('UTF-8').strip() buildstream-1.6.9/tests/testutils/repo/git.py000066400000000000000000000100171437515270000213510ustar00rootroot00000000000000import os import pytest import shutil import subprocess from .repo import Repo from ..site import HAVE_GIT GIT_ENV = { 'GIT_AUTHOR_DATE': '1320966000 +0200', 'GIT_AUTHOR_NAME': 'tomjon', 'GIT_AUTHOR_EMAIL': 'tom@jon.com', 'GIT_COMMITTER_DATE': '1320966000 +0200', 'GIT_COMMITTER_NAME': 'tomjon', 'GIT_COMMITTER_EMAIL': 'tom@jon.com' } class Git(Repo): def __init__(self, directory, subdir): if not HAVE_GIT: pytest.skip("git is not available") self.submodules = {} super(Git, self).__init__(directory, subdir) def create(self, directory): self.copy_directory(directory, self.repo) subprocess.call(['git', 'init', '.'], env=GIT_ENV, cwd=self.repo) subprocess.call( ['git', 'checkout', '-b', 'master'], env=GIT_ENV, cwd=self.repo ) subprocess.call(['git', 'add', '.'], env=GIT_ENV, cwd=self.repo) subprocess.call(['git', 'commit', '-m', 'Initial commit'], env=GIT_ENV, cwd=self.repo) return self.latest_commit() def add_tag(self, tag): subprocess.call(['git', 'tag', tag], env=GIT_ENV, cwd=self.repo) def add_commit(self): subprocess.call(['git', 'commit', '--allow-empty', '-m', 'Additional commit'], env=GIT_ENV, cwd=self.repo) return self.latest_commit() def add_file(self, filename): shutil.copy(filename, self.repo) subprocess.call(['git', 'add', os.path.basename(filename)], env=GIT_ENV, cwd=self.repo) subprocess.call([ 'git', 'commit', '-m', 'Added {}'.format(os.path.basename(filename)) ], env=GIT_ENV, cwd=self.repo) return self.latest_commit() def modify_file(self, new_file, path): shutil.copy(new_file, os.path.join(self.repo, path)) subprocess.call([ 'git', 'commit', path, '-m', 'Modified {}'.format(os.path.basename(path)) ], env=GIT_ENV, cwd=self.repo) return self.latest_commit() def add_submodule(self, subdir, url=None, checkout=None): submodule = {} if checkout is not None: submodule['checkout'] = checkout if url is not None: submodule['url'] = url self.submodules[subdir] = submodule subprocess.call(['git', 'submodule', 'add', url, subdir], env=GIT_ENV, cwd=self.repo) subprocess.call(['git', 'commit', '-m', 'Added the submodule'], env=GIT_ENV, cwd=self.repo) return self.latest_commit() # This can also be used to a file or a submodule def remove_path(self, path): subprocess.call(['git', 'rm', path], env=GIT_ENV, cwd=self.repo) subprocess.call(['git', 'commit', '-m', 'Removing {}'.format(path)], env=GIT_ENV, cwd=self.repo) return self.latest_commit() def source_config(self, ref=None, checkout_submodules=None): config = { 'kind': 'git', 'url': 'file://' + self.repo, 'track': 'master' } if ref is not None: config['ref'] = ref if checkout_submodules is not None: config['checkout-submodules'] = checkout_submodules if self.submodules: config['submodules'] = dict(self.submodules) return config def latest_commit(self): output = subprocess.check_output([ 'git', 'rev-parse', 'HEAD' ], env=GIT_ENV, cwd=self.repo) return output.decode('UTF-8').strip() def branch(self, branch_name): subprocess.call(['git', 'checkout', '-b', branch_name], env=GIT_ENV, cwd=self.repo) def delete_tag(self, tag_name): subprocess.call(['git', 'tag', '-d', tag_name], env=GIT_ENV, cwd=self.repo) def checkout(self, commit): subprocess.call(['git', 'checkout', commit], env=GIT_ENV, cwd=self.repo) def add_annotated_tag(self, tag, message): subprocess.call(['git', 'tag', '-a', tag, '-m', message], env=GIT_ENV, cwd=self.repo) return self.latest_commit() buildstream-1.6.9/tests/testutils/repo/ostree.py000066400000000000000000000031151437515270000220700ustar00rootroot00000000000000import pytest import subprocess from .repo import Repo from ..site import HAVE_OSTREE_CLI, HAVE_OSTREE class OSTree(Repo): def __init__(self, directory, subdir): if not HAVE_OSTREE_CLI or not HAVE_OSTREE: pytest.skip("ostree cli is not available") super(OSTree, self).__init__(directory, subdir) def create(self, directory, *, gpg_sign=None, gpg_homedir=None): subprocess.call(['ostree', 'init', '--repo', self.repo, '--mode', 'archive-z2']) commit_args = ['ostree', 'commit', '--repo', self.repo, '--branch', 'master', '--subject', 'Initial commit'] if gpg_sign and gpg_homedir: commit_args += [ '--gpg-sign={}'.format(gpg_sign), '--gpg-homedir={}'.format(gpg_homedir) ] commit_args += [directory] subprocess.call(commit_args) latest = self.latest_commit() return latest def source_config(self, ref=None, *, gpg_key=None): config = { 'kind': 'ostree', 'url': 'file://' + self.repo, 'track': 'master' } if ref is not None: config['ref'] = ref if gpg_key is not None: config['gpg-key'] = gpg_key return config def latest_commit(self): output = subprocess.check_output([ 'ostree', 'rev-parse', '--repo', self.repo, 'master' ]) return output.decode('UTF-8').strip() buildstream-1.6.9/tests/testutils/repo/repo.py000066400000000000000000000047061437515270000215430ustar00rootroot00000000000000import os import shutil # Repo() # # Abstract class providing scaffolding for # generating data to be used with various sources # # Args: # directory (str): The base temp directory for the test # subdir (str): The subdir for the repo, in case there is more than one # class Repo(): def __init__(self, directory, subdir='repo'): # The working directory for the repo object # self.directory = os.path.abspath(directory) # The directory the actual repo will be stored in self.repo = os.path.join(self.directory, subdir) os.makedirs(self.repo, exist_ok=True) # create(): # # Create a repository in self.directory and add the initial content # # Args: # directory: A directory with content to commit # # Returns: # (smth): A new ref corresponding to this commit, which can # be passed as the ref in the Repo.source_config() API. # def create(self, directory): pass # source_config() # # Args: # ref (smth): An optional abstract ref object, usually a string. # # Returns: # (dict): A configuration which can be serialized as a # source when generating an element file on the fly # def source_config(self, ref=None): pass # copy_directory(): # # Copies the content of src to the directory dest # # Like shutil.copytree(), except dest is expected # to exist. # # Args: # src (str): The source directory # dest (str): The destination directory # def copy_directory(self, src, dest): for filename in os.listdir(src): src_path = os.path.join(src, filename) dest_path = os.path.join(dest, filename) if os.path.isdir(src_path): shutil.copytree(src_path, dest_path) else: shutil.copy2(src_path, dest_path) # copy(): # # Creates a copy of this repository in the specified # destination. # # Args: # dest (str): The destination directory # # Returns: # (Repo): A Repo object for the new repository. def copy(self, dest): subdir = self.repo[len(self.directory):].lstrip(os.sep) new_dir = os.path.join(dest, subdir) os.makedirs(new_dir, exist_ok=True) self.copy_directory(self.repo, new_dir) repo_type = type(self) new_repo = repo_type(dest, subdir) return new_repo buildstream-1.6.9/tests/testutils/repo/tar.py000066400000000000000000000013301437515270000213520ustar00rootroot00000000000000import os import tarfile from buildstream.utils import sha256sum from .repo import Repo class Tar(Repo): def create(self, directory): tarball = os.path.join(self.repo, 'file.tar.gz') old_dir = os.getcwd() os.chdir(directory) with tarfile.open(tarball, "w:gz") as tar: tar.add(".") os.chdir(old_dir) return sha256sum(tarball) def source_config(self, ref=None): tarball = os.path.join(self.repo, 'file.tar.gz') config = { 'kind': 'tar', 'url': 'file://' + tarball, 'directory': '', 'base-dir': '' } if ref is not None: config['ref'] = ref return config buildstream-1.6.9/tests/testutils/repo/zip.py000066400000000000000000000016371437515270000214000ustar00rootroot00000000000000import os import zipfile from buildstream.utils import sha256sum from .repo import Repo class Zip(Repo): def create(self, directory): archive = os.path.join(self.repo, 'file.zip') old_dir = os.getcwd() os.chdir(directory) with zipfile.ZipFile(archive, "w") as zip: for root, dirs, files in os.walk('.'): names = dirs + files names = [os.path.join(root, name) for name in names] for name in names: zip.write(name) os.chdir(old_dir) return sha256sum(archive) def source_config(self, ref=None): archive = os.path.join(self.repo, 'file.zip') config = { 'kind': 'zip', 'url': 'file://' + archive, 'directory': '', 'base-dir': '' } if ref is not None: config['ref'] = ref return config buildstream-1.6.9/tests/testutils/runcli.py000066400000000000000000000407711437515270000211270ustar00rootroot00000000000000import os import re import sys import shutil import tempfile import itertools import traceback import subprocess from contextlib import contextmanager, ExitStack from enum import Enum import ujson import pytest # XXX Using pytest private internals here # # We use pytest internals to capture the stdout/stderr during # a run of the buildstream CLI. We do this because click's # CliRunner convenience API (click.testing module) does not support # separation of stdout/stderr. # from _pytest.capture import MultiCapture, FDCapture # Import the main cli entrypoint from buildstream._frontend import cli as bst_cli from buildstream import _yaml # Wrapper for the click.testing result class Result(): def __init__(self, exit_code=None, output=None, stderr=None): self.exit_code = exit_code self.output = output self.stderr = stderr self.main_error_domain = None self.main_error_reason = None self.task_error_domain = None self.task_error_reason = None # assert_success() # # Asserts that the buildstream session completed successfully # # Args: # fail_message (str): An optional message to override the automatic # assertion error messages # Raises: # (AssertionError): If the session did not complete successfully # def assert_success(self, fail_message=''): assert self.exit_code == 0, fail_message # assert_main_error() # # Asserts that the buildstream session failed, and that # the main process error report is as expected # # Args: # error_domain (ErrorDomain): The domain of the error which occurred # error_reason (any): The reason field of the error which occurred # fail_message (str): An optional message to override the automatic # assertion error messages # debug (bool): If true, prints information regarding the exit state of the result() # Raises: # (AssertionError): If any of the assertions fail # def assert_main_error(self, error_domain, error_reason, fail_message='', *, debug=False): if debug: print( """ Exit code: {} Domain: {} Reason: {} """.format( self.exit_code, self.main_error_domain, self.main_error_reason )) assert self.exit_code != 0, fail_message test_domain = error_domain.value if isinstance (error_domain, Enum) else error_domain test_reason = error_reason.value if isinstance (error_reason, Enum) else error_reason assert self.main_error_domain == test_domain, fail_message assert self.main_error_reason == test_reason, fail_message # assert_task_error() # # Asserts that the buildstream session failed, and that # the child task error which caused buildstream to exit # is as expected. # # Args: # error_domain (ErrorDomain): The domain of the error which occurred # error_reason (any): The reason field of the error which occurred # fail_message (str): An optional message to override the automatic # assertion error messages # Raises: # (AssertionError): If any of the assertions fail # def assert_task_error(self, error_domain, error_reason, fail_message=''): test_domain = error_domain.value if isinstance (error_domain, Enum) else error_domain test_reason = error_reason.value if isinstance (error_reason, Enum) else error_reason assert self.exit_code != 0, fail_message assert self.task_error_domain == test_domain, fail_message assert self.task_error_reason == test_reason, fail_message # get_tracked_elements() # # Produces a list of element names on which tracking occurred # during the session. # # This is done by parsing the buildstream stderr log # # Returns: # (list): A list of element names # def get_tracked_elements(self): tracked = re.findall(r'\[track:(\S+)\s*]', self.stderr) if tracked is None: return [] return list(tracked) def get_pushed_elements(self): pushed = re.findall(r'\[\s*push:(\S+)\s*\]\s*INFO\s*Pushed artifact', self.stderr) if pushed is None: return [] return list(pushed) def get_pulled_elements(self): pulled = re.findall(r'\[\s*pull:(\S+)\s*\]\s*INFO\s*Pulled artifact', self.stderr) if pulled is None: return [] return list(pulled) class Cli(): def __init__(self, directory, verbose=True, default_options=None): self.directory = directory self.config = None self.verbose = verbose if default_options is None: default_options = [] self.default_options = default_options # configure(): # # Serializes a user configuration into a buildstream.conf # to use for this test cli. # # Args: # config (dict): The user configuration to use # def configure(self, config): if self.config is None: self.config = {} for key, val in config.items(): self.config[key] = val def remove_artifact_from_cache(self, project, element_name, *, cache_dir=None): if not cache_dir: cache_dir = os.path.join(project, 'cache', 'artifacts') cache_dir = os.path.join(cache_dir, 'cas', 'refs', 'heads') normal_name = element_name.replace(os.sep, '-') cache_dir = os.path.splitext(os.path.join(cache_dir, 'test', normal_name))[0] shutil.rmtree(cache_dir) # run(): # # Runs buildstream with the given arguments, additionally # also passes some global options to buildstream in order # to stay contained in the testing environment. # # Args: # configure (bool): Whether to pass a --config argument # project (str): An optional path to a project # silent (bool): Whether to pass --no-verbose # env (dict): Environment variables to temporarily set during the test # args (list): A list of arguments to pass buildstream # def run(self, configure=True, project=None, silent=False, env=None, cwd=None, options=None, args=None): if args is None: args = [] if options is None: options = [] if env is None: env = os.environ.copy() else: orig_env = os.environ.copy() orig_env.update (env) env = orig_env options = self.default_options + options with ExitStack() as stack: # Prepare a tempfile for buildstream to record machine readable error codes error_codes = stack.enter_context (tempfile.NamedTemporaryFile()) env['BST_TEST_ERROR_CODES'] = error_codes.name bst_args = ['--no-colors'] if silent: bst_args += ['--no-verbose'] if configure: config_file = stack.enter_context( configured(self.directory, self.config) ) bst_args += ['--config', config_file] if project: bst_args += ['--directory', project] for option, value in options: bst_args += ['--option', option, value] bst_args += args cmd = ["bst"] + bst_args process = subprocess.Popen( cmd, env=env, cwd=cwd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) out, err = process.communicate() result = Result( exit_code=process.poll(), output=out.decode('utf-8'), stderr=err.decode('utf-8') ) # # Collect machine readable error codes from the tmpfile # result_error_codes_string = error_codes.read() result_error_codes = {} if result_error_codes_string: result_error_codes = ujson.loads (result_error_codes_string) if result_error_codes: result.main_error_domain = result_error_codes['main_error_domain'] result.main_error_reason = result_error_codes['main_error_reason'] result.task_error_domain = result_error_codes['task_error_domain'] result.task_error_reason = result_error_codes['task_error_reason'] # Some informative stdout we can observe when anything fails if self.verbose: command = "bst " + " ".join(bst_args) print("BuildStream exited with code {} for invocation:\n\t{}" .format(result.exit_code, command)) if result.output: print("Program output was:\n{}".format(result.output)) if result.stderr: print("Program stderr was:\n{}".format(result.stderr)) return result # Fetch an element state by name by # invoking bst show on the project with the CLI # # If you need to get the states of multiple elements, # then use get_element_states(s) instead. # def get_element_state(self, project, element_name): result = self.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{state}', element_name ]) result.assert_success() return result.output.strip() # Fetch the states of elements for a given target / deps # # Returns a dictionary with the element names as keys # def get_element_states(self, project, target, deps='all'): result = self.run(project=project, silent=True, args=[ 'show', '--deps', deps, '--format', '%{name}||%{state}', target ]) result.assert_success() lines = result.output.splitlines() states = {} for line in lines: split = line.split(sep='||') states[split[0]] = split[1] return states # Fetch an element's cache key by invoking bst show # on the project with the CLI # def get_element_key(self, project, element_name): result = self.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{full-key}', element_name ]) result.assert_success() return result.output.strip() # Get the decoded config of an element. # def get_element_config(self, project, element_name): result = self.run(project=project, silent=True, args=[ 'show', '--deps', 'none', '--format', '%{config}', element_name ]) result.assert_success() yml = _yaml.prepare_roundtrip_yaml() return yml.load(result.output) # Fetch the elements that would be in the pipeline with the given # arguments. # def get_pipeline(self, project, elements, except_=None, scope='plan'): if except_ is None: except_ = [] args = ['show', '--deps', scope, '--format', '%{name}'] args += list(itertools.chain.from_iterable(zip(itertools.repeat('--except'), except_))) result = self.run(project=project, silent=True, args=args + elements) result.assert_success() return result.output.splitlines() class CliIntegration(Cli): # run() # # This supports the same arguments as Cli.run() and additionally # it supports the project_config keyword argument. # # This will first load the project.conf file from the specified # project directory ('project' keyword argument) and perform substitutions # of any {project_dir} specified in the existing project.conf. # # If the project_config parameter is specified, it is expected to # be a dictionary of additional project configuration options, and # will be composited on top of the already loaded project.conf # def run(self, *args, project_config=None, **kwargs): # First load the project.conf and substitute {project_dir} # # Save the original project.conf, because we will run more than # once in the same temp directory # project_directory = kwargs['project'] project_filename = os.path.join(project_directory, 'project.conf') project_backup = os.path.join(project_directory, 'project.conf.backup') project_load_filename = project_filename if not os.path.exists(project_backup): shutil.copy(project_filename, project_backup) else: project_load_filename = project_backup with open(project_load_filename) as f: config = f.read() config = config.format(project_dir=project_directory) if project_config is not None: # If a custom project configuration dictionary was # specified, composite it on top of the already # substituted base project configuration # base_config = _yaml.load_data(config) # In order to leverage _yaml.composite_dict(), both # dictionaries need to be loaded via _yaml.load_data() first # with tempfile.TemporaryDirectory(dir=project_directory) as scratchdir: temp_project = os.path.join(scratchdir, 'project.conf') _yaml.dump(project_config, temp_project) project_config = _yaml.load(temp_project) _yaml.composite_dict(base_config, project_config) base_config = _yaml.node_sanitize(base_config) _yaml.dump(base_config, project_filename) else: # Otherwise, just dump it as is with open(project_filename, 'w') as f: f.write(config) return super().run(*args, **kwargs) # Main fixture # # Use result = cli.run([arg1, arg2]) to run buildstream commands # @pytest.fixture() def cli(tmpdir): directory = os.path.join(str(tmpdir), 'cache') os.makedirs(directory) return Cli(directory) # A variant of the main fixture that keeps persistent artifact and # source caches. # # It also does not use the click test runner to avoid deadlock issues # when running `bst shell`, but unfortunately cannot produce nice # stacktraces. @pytest.fixture() def cli_integration(tmpdir, integration_cache): directory = os.path.join(str(tmpdir), 'cache') os.makedirs(directory) if os.environ.get('BST_FORCE_BACKEND') == 'unix': fixture = CliIntegration(directory, default_options=[('linux', 'False')]) else: fixture = CliIntegration(directory) # We want to cache sources for integration tests more permanently, # to avoid downloading the huge base-sdk repeatedly fixture.configure({ 'sourcedir': integration_cache.sources, 'artifactdir': integration_cache.artifacts }) return fixture @contextmanager def chdir(directory): old_dir = os.getcwd() os.chdir(directory) yield os.chdir(old_dir) @contextmanager def environment(env): old_env = {} for key, value in env.items(): old_env[key] = os.environ.get(key) if value is None: os.environ.pop(key, None) else: os.environ[key] = value yield for key, value in old_env.items(): if value is None: os.environ.pop(key, None) else: os.environ[key] = value @contextmanager def configured(directory, config=None): # Ensure we've at least relocated the caches to a temp directory if not config: config = {} if not config.get('sourcedir', False): config['sourcedir'] = os.path.join(directory, 'sources') if not config.get('builddir', False): config['builddir'] = os.path.join(directory, 'build') if not config.get('artifactdir', False): config['artifactdir'] = os.path.join(directory, 'artifacts') if not config.get('logdir', False): config['logdir'] = os.path.join(directory, 'logs') # Dump it and yield the filename for test scripts to feed it # to buildstream as an artument filename = os.path.join(directory, "buildstream.conf") _yaml.dump(config, filename) yield filename buildstream-1.6.9/tests/testutils/setuptools.py000066400000000000000000000023441437515270000220460ustar00rootroot00000000000000import os import pytest import pkg_resources # A mock setuptools dist object. class MockDist(): def __init__(self, datafiles, module_name): self.datafiles = datafiles self.module_name = module_name def get_resource_filename(self, *args, **kwargs): return os.path.join(self.datafiles.dirname, self.datafiles.basename, self.module_name) # A mock setuptools entry object. class MockEntry(): def __init__(self, datafiles, module_name): self.dist = MockDist(datafiles, module_name) self.module_name = module_name # Patch setuptools.get_entry_info # # Use result = entry_fixture(datafiles, entry_point, lookup_string) to # patch setuptools for external plugin loading. # @pytest.fixture() def entry_fixture(monkeypatch): def patch(datafiles, entry_point, lookup_string): dist, package = lookup_string.split(':') def mock_entry(pdist, pentry_point, ppackage): assert(pdist == dist) assert(pentry_point == entry_point) assert(ppackage == package) return MockEntry(datafiles, package) monkeypatch.setattr(pkg_resources, 'get_entry_info', mock_entry) return patch buildstream-1.6.9/tests/testutils/site.py000066400000000000000000000017671437515270000206010ustar00rootroot00000000000000# Some things resolved about the execution site, # so we dont have to repeat this everywhere # import os import sys from buildstream import utils, ProgramNotFoundError try: utils.get_host_tool('bzr') HAVE_BZR = True except ProgramNotFoundError: HAVE_BZR = False try: utils.get_host_tool('git') HAVE_GIT = True except ProgramNotFoundError: HAVE_GIT = False try: utils.get_host_tool('ostree') HAVE_OSTREE_CLI = True except ProgramNotFoundError: HAVE_OSTREE_CLI = False try: from buildstream import _ostree HAVE_OSTREE = True except (ImportError, ValueError): HAVE_OSTREE = False try: utils.get_host_tool('bwrap') HAVE_BWRAP = True except ProgramNotFoundError: HAVE_BWRAP = False try: utils.get_host_tool('lzip') HAVE_LZIP = True except ProgramNotFoundError: HAVE_LZIP = False try: import arpy HAVE_ARPY = True except ImportError: HAVE_ARPY = False IS_LINUX = os.getenv('BST_FORCE_BACKEND', sys.platform).startswith('linux') buildstream-1.6.9/tests/utils/000077500000000000000000000000001437515270000163505ustar00rootroot00000000000000buildstream-1.6.9/tests/utils/__init__.py000066400000000000000000000000001437515270000204470ustar00rootroot00000000000000buildstream-1.6.9/tests/utils/savefile.py000066400000000000000000000035101437515270000205170ustar00rootroot00000000000000import os import pytest from buildstream.utils import save_file_atomic def test_save_new_file(tmpdir): filename = os.path.join(str(tmpdir), 'savefile-success.test') with save_file_atomic(filename, 'w') as f: f.write('foo\n') assert os.listdir(str(tmpdir)) == ['savefile-success.test'] with open(filename) as f: assert f.read() == 'foo\n' def test_save_over_existing_file(tmpdir): filename = os.path.join(str(tmpdir), 'savefile-overwrite.test') with open(filename, 'w') as f: f.write('existing contents\n') with save_file_atomic(filename, 'w') as f: f.write('overwritten contents\n') assert os.listdir(str(tmpdir)) == ['savefile-overwrite.test'] with open(filename) as f: assert f.read() == 'overwritten contents\n' def test_exception_new_file(tmpdir): filename = os.path.join(str(tmpdir), 'savefile-exception.test') with pytest.raises(RuntimeError): with save_file_atomic(filename, 'w') as f: f.write('Some junk\n') raise RuntimeError("Something goes wrong") assert os.listdir(str(tmpdir)) == [] def test_exception_existing_file(tmpdir): filename = os.path.join(str(tmpdir), 'savefile-existing.test') with open(filename, 'w') as f: f.write('existing contents\n') with pytest.raises(RuntimeError): with save_file_atomic(filename, 'w') as f: f.write('Some junk\n') raise RuntimeError("Something goes wrong") assert os.listdir(str(tmpdir)) == ['savefile-existing.test'] with open(filename) as f: assert f.read() == 'existing contents\n' def test_attributes(tmpdir): filename = os.path.join(str(tmpdir), 'savefile-attributes.test') with save_file_atomic(filename, 'w') as f: assert f.real_filename == filename assert f.name != filename buildstream-1.6.9/tests/yaml/000077500000000000000000000000001437515270000161525ustar00rootroot00000000000000buildstream-1.6.9/tests/yaml/__init__.py000066400000000000000000000000001437515270000202510ustar00rootroot00000000000000buildstream-1.6.9/tests/yaml/data/000077500000000000000000000000001437515270000170635ustar00rootroot00000000000000buildstream-1.6.9/tests/yaml/data/basics.yaml000066400000000000000000000005171437515270000212160ustar00rootroot00000000000000kind: pony description: The vehicle of choice for rainbow travel moods: - happy - sad children: - name: dopey mood: silly - name: grumpy mood: grumpy - name: happy mood: happy - name: doc mood: happy - name: sneezy mood: curious - name: bashful mood: bashful - name: sleepy mood: sleepy extra: this: that old: new buildstream-1.6.9/tests/yaml/data/composite.yaml000066400000000000000000000002161437515270000217500ustar00rootroot00000000000000kind: horse description: The horse you ride children: - name: extra mood: funny extra: old: override another: one: one two: two buildstream-1.6.9/tests/yaml/data/convert_value_to_str.yaml000066400000000000000000000000731437515270000242150ustar00rootroot00000000000000Test1: 1_23_4 Test2: 1.23.4 Test3: 1.20 Test4: OneTwoThree buildstream-1.6.9/tests/yaml/data/implicitoverwrite.yaml000066400000000000000000000002311437515270000235240ustar00rootroot00000000000000# Composited on top of basics.yaml, overwriting it's children list children: - name: overwrite1 mood: overwrite1 - name: overwrite2 mood: overwrite2 buildstream-1.6.9/tests/yaml/data/invalid.yaml000066400000000000000000000002031437515270000213700ustar00rootroot00000000000000kind: pony description: The vehicle of choice for rainbow travel mods: - happy - sad children: - naam: dopey mood: silly buildstream-1.6.9/tests/yaml/data/listappend.yaml000066400000000000000000000002451437515270000221130ustar00rootroot00000000000000# Composited on top of basics.yaml, appending to it's children list children: (>): - name: appended1 mood: appended1 - name: appended2 mood: appended2 buildstream-1.6.9/tests/yaml/data/listappendprepend.yaml000066400000000000000000000004171437515270000234720ustar00rootroot00000000000000# Composited on top of basics.yaml, prepending and appending to it's children list children: (<): - name: prepended1 mood: prepended1 - name: prepended2 mood: prepended2 (>): - name: appended1 mood: appended1 - name: appended2 mood: appended2 buildstream-1.6.9/tests/yaml/data/listoverwrite.yaml000066400000000000000000000002501437515270000226660ustar00rootroot00000000000000# Composited on top of basics.yaml, overwriting it's children list children: (=): - name: overwrite1 mood: overwrite1 - name: overwrite2 mood: overwrite2 buildstream-1.6.9/tests/yaml/data/listoverwriteempty.yaml000066400000000000000000000001371437515270000237510ustar00rootroot00000000000000# Composited on top of basics.yaml, effectively deleting its children list children: (=): [] buildstream-1.6.9/tests/yaml/data/listprepend.yaml000066400000000000000000000002521437515270000222770ustar00rootroot00000000000000# Composited on top of basics.yaml, prepending to it's children list children: (<): - name: prepended1 mood: prepended1 - name: prepended2 mood: prepended2 buildstream-1.6.9/tests/yaml/data/secondappend.yaml000066400000000000000000000002711437515270000224120ustar00rootroot00000000000000# Composited on top of listappend.yaml, appending to it's children list children: (>): - name: secondappend1 mood: secondappend1 - name: secondappend2 mood: secondappend2 buildstream-1.6.9/tests/yaml/data/secondprepend.yaml000066400000000000000000000002771437515270000226060ustar00rootroot00000000000000# Composited on top of listprepend.yaml, prepending to it's children list children: (<): - name: secondprepend1 mood: secondprepend1 - name: secondprepend2 mood: secondprepend2 buildstream-1.6.9/tests/yaml/yaml.py000066400000000000000000000404341437515270000174730ustar00rootroot00000000000000import os import pytest from collections.abc import Mapping from buildstream import _yaml from buildstream._exceptions import LoadError, LoadErrorReason DATA_DIR = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'data', ) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_load_yaml(datafiles): filename = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') loaded = _yaml.load(filename) assert(loaded.get('kind') == 'pony') def assert_provenance(filename, line, col, node, key=None, indices=[]): provenance = _yaml.node_get_provenance(node, key=key, indices=indices) if key: if indices: assert(isinstance(provenance, _yaml.ElementProvenance)) else: assert(isinstance(provenance, _yaml.MemberProvenance)) else: assert(isinstance(provenance, _yaml.DictProvenance)) assert(provenance.filename.shortname == filename) assert(provenance.line == line) assert(provenance.col == col) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_basic_provenance(datafiles): filename = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') loaded = _yaml.load(filename) assert(loaded.get('kind') == 'pony') assert_provenance(filename, 1, 0, loaded) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_member_provenance(datafiles): filename = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') loaded = _yaml.load(filename) assert(loaded.get('kind') == 'pony') assert_provenance(filename, 2, 13, loaded, 'description') @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_element_provenance(datafiles): filename = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') loaded = _yaml.load(filename) assert(loaded.get('kind') == 'pony') assert_provenance(filename, 5, 2, loaded, 'moods', [1]) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_node_validate(datafiles): valid = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') invalid = os.path.join(datafiles.dirname, datafiles.basename, 'invalid.yaml') base = _yaml.load(valid) _yaml.node_validate(base, ['kind', 'description', 'moods', 'children', 'extra']) base = _yaml.load(invalid) with pytest.raises(LoadError) as exc: _yaml.node_validate(base, ['kind', 'description', 'moods', 'children', 'extra']) assert (exc.value.reason == LoadErrorReason.INVALID_DATA) @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_node_get(datafiles): filename = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') base = _yaml.load(filename) assert(base.get('kind') == 'pony') children = _yaml.node_get(base, list, 'children') assert(isinstance(children, list)) assert(len(children) == 7) child = _yaml.node_get(base, Mapping, 'children', indices=[6]) assert_provenance(filename, 20, 8, child, 'mood') extra = _yaml.node_get(base, Mapping, 'extra') with pytest.raises(LoadError) as exc: wrong = _yaml.node_get(extra, Mapping, 'old') assert (exc.value.reason == LoadErrorReason.INVALID_DATA) # Really this is testing _yaml.node_chain_copy(), we want to # be sure that when using a ChainMap copy, compositing values # still preserves the original values in the copied dict. # @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_composite_preserve_originals(datafiles): filename = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') overlayfile = os.path.join(datafiles.dirname, datafiles.basename, 'composite.yaml') base = _yaml.load(filename) overlay = _yaml.load(overlayfile) base_copy = _yaml.node_chain_copy(base) _yaml.composite_dict(base_copy, overlay) copy_extra = _yaml.node_get(base_copy, Mapping, 'extra') orig_extra = _yaml.node_get(base, Mapping, 'extra') # Test that the node copy has the overridden value... assert(_yaml.node_get(copy_extra, str, 'old') == 'override') # But the original node is not effected by the override. assert(_yaml.node_get(orig_extra, str, 'old') == 'new') # Tests for list composition # # Each test composits a filename on top of basics.yaml, and tests # the toplevel children list at the specified index # # Parameters: # filename: The file to composite on top of basics.yaml # index: The index in the children list # length: The expected length of the children list # mood: The expected value of the mood attribute of the dictionary found at index in children # prov_file: The expected provenance filename of "mood" # prov_line: The expected provenance line of "mood" # prov_col: The expected provenance column of "mood" # @pytest.mark.datafiles(os.path.join(DATA_DIR)) @pytest.mark.parametrize("filename,index,length,mood,prov_file,prov_line,prov_col", [ # Test results of compositing with the (<) prepend directive ('listprepend.yaml', 0, 9, 'prepended1', 'listprepend.yaml', 5, 10), ('listprepend.yaml', 1, 9, 'prepended2', 'listprepend.yaml', 7, 10), ('listprepend.yaml', 2, 9, 'silly', 'basics.yaml', 8, 8), ('listprepend.yaml', 8, 9, 'sleepy', 'basics.yaml', 20, 8), # Test results of compositing with the (>) append directive ('listappend.yaml', 7, 9, 'appended1', 'listappend.yaml', 5, 10), ('listappend.yaml', 8, 9, 'appended2', 'listappend.yaml', 7, 10), ('listappend.yaml', 0, 9, 'silly', 'basics.yaml', 8, 8), ('listappend.yaml', 6, 9, 'sleepy', 'basics.yaml', 20, 8), # Test results of compositing with both (<) and (>) directives ('listappendprepend.yaml', 0, 11, 'prepended1', 'listappendprepend.yaml', 5, 10), ('listappendprepend.yaml', 1, 11, 'prepended2', 'listappendprepend.yaml', 7, 10), ('listappendprepend.yaml', 2, 11, 'silly', 'basics.yaml', 8, 8), ('listappendprepend.yaml', 8, 11, 'sleepy', 'basics.yaml', 20, 8), ('listappendprepend.yaml', 9, 11, 'appended1', 'listappendprepend.yaml', 10, 10), ('listappendprepend.yaml', 10, 11, 'appended2', 'listappendprepend.yaml', 12, 10), # Test results of compositing with the (=) overwrite directive ('listoverwrite.yaml', 0, 2, 'overwrite1', 'listoverwrite.yaml', 5, 10), ('listoverwrite.yaml', 1, 2, 'overwrite2', 'listoverwrite.yaml', 7, 10), # Test results of compositing without any directive, implicitly overwriting ('implicitoverwrite.yaml', 0, 2, 'overwrite1', 'implicitoverwrite.yaml', 4, 8), ('implicitoverwrite.yaml', 1, 2, 'overwrite2', 'implicitoverwrite.yaml', 6, 8), ]) def test_list_composition(datafiles, filename, index, length, mood, prov_file, prov_line, prov_col): base = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') overlay = os.path.join(datafiles.dirname, datafiles.basename, filename) base = _yaml.load(base, shortname='basics.yaml') overlay = _yaml.load(overlay, shortname=filename) _yaml.composite_dict(base, overlay) children = _yaml.node_get(base, list, 'children') assert len(children) == length child = children[index] assert child['mood'] == mood assert_provenance(prov_file, prov_line, prov_col, child, 'mood') # Test that overwriting a list with an empty list works as expected. @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_list_deletion(datafiles): base = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') overlay = os.path.join(datafiles.dirname, datafiles.basename, 'listoverwriteempty.yaml') base = _yaml.load(base, shortname='basics.yaml') overlay = _yaml.load(overlay, shortname='listoverwriteempty.yaml') _yaml.composite_dict(base, overlay) children = _yaml.node_get(base, list, 'children') assert len(children) == 0 # Tests for deep list composition # # Same as test_list_composition(), but adds an additional file # in between so that lists are composited twice. # # This test will to two iterations for each parameter # specification, expecting the same results # # First iteration: # composited = basics.yaml & filename1 # composited = composited & filename2 # # Second iteration: # composited = filename1 & filename2 # composited = basics.yaml & composited # # Parameters: # filename1: The file to composite on top of basics.yaml # filename2: The file to composite on top of filename1 # index: The index in the children list # length: The expected length of the children list # mood: The expected value of the mood attribute of the dictionary found at index in children # prov_file: The expected provenance filename of "mood" # prov_line: The expected provenance line of "mood" # prov_col: The expected provenance column of "mood" # @pytest.mark.datafiles(os.path.join(DATA_DIR)) @pytest.mark.parametrize("filename1,filename2,index,length,mood,prov_file,prov_line,prov_col", [ # Test results of compositing literal list with (>) and then (<) ('listprepend.yaml', 'listappend.yaml', 0, 11, 'prepended1', 'listprepend.yaml', 5, 10), ('listprepend.yaml', 'listappend.yaml', 1, 11, 'prepended2', 'listprepend.yaml', 7, 10), ('listprepend.yaml', 'listappend.yaml', 2, 11, 'silly', 'basics.yaml', 8, 8), ('listprepend.yaml', 'listappend.yaml', 8, 11, 'sleepy', 'basics.yaml', 20, 8), ('listprepend.yaml', 'listappend.yaml', 9, 11, 'appended1', 'listappend.yaml', 5, 10), ('listprepend.yaml', 'listappend.yaml', 10, 11, 'appended2', 'listappend.yaml', 7, 10), # Test results of compositing literal list with (<) and then (>) ('listappend.yaml', 'listprepend.yaml', 0, 11, 'prepended1', 'listprepend.yaml', 5, 10), ('listappend.yaml', 'listprepend.yaml', 1, 11, 'prepended2', 'listprepend.yaml', 7, 10), ('listappend.yaml', 'listprepend.yaml', 2, 11, 'silly', 'basics.yaml', 8, 8), ('listappend.yaml', 'listprepend.yaml', 8, 11, 'sleepy', 'basics.yaml', 20, 8), ('listappend.yaml', 'listprepend.yaml', 9, 11, 'appended1', 'listappend.yaml', 5, 10), ('listappend.yaml', 'listprepend.yaml', 10, 11, 'appended2', 'listappend.yaml', 7, 10), # Test results of compositing literal list with (>) and then (>) ('listappend.yaml', 'secondappend.yaml', 0, 11, 'silly', 'basics.yaml', 8, 8), ('listappend.yaml', 'secondappend.yaml', 6, 11, 'sleepy', 'basics.yaml', 20, 8), ('listappend.yaml', 'secondappend.yaml', 7, 11, 'appended1', 'listappend.yaml', 5, 10), ('listappend.yaml', 'secondappend.yaml', 8, 11, 'appended2', 'listappend.yaml', 7, 10), ('listappend.yaml', 'secondappend.yaml', 9, 11, 'secondappend1', 'secondappend.yaml', 5, 10), ('listappend.yaml', 'secondappend.yaml', 10, 11, 'secondappend2', 'secondappend.yaml', 7, 10), # Test results of compositing literal list with (>) and then (>) ('listprepend.yaml', 'secondprepend.yaml', 0, 11, 'secondprepend1', 'secondprepend.yaml', 5, 10), ('listprepend.yaml', 'secondprepend.yaml', 1, 11, 'secondprepend2', 'secondprepend.yaml', 7, 10), ('listprepend.yaml', 'secondprepend.yaml', 2, 11, 'prepended1', 'listprepend.yaml', 5, 10), ('listprepend.yaml', 'secondprepend.yaml', 3, 11, 'prepended2', 'listprepend.yaml', 7, 10), ('listprepend.yaml', 'secondprepend.yaml', 4, 11, 'silly', 'basics.yaml', 8, 8), ('listprepend.yaml', 'secondprepend.yaml', 10, 11, 'sleepy', 'basics.yaml', 20, 8), # Test results of compositing literal list with (>) or (<) and then another literal list ('listappend.yaml', 'implicitoverwrite.yaml', 0, 2, 'overwrite1', 'implicitoverwrite.yaml', 4, 8), ('listappend.yaml', 'implicitoverwrite.yaml', 1, 2, 'overwrite2', 'implicitoverwrite.yaml', 6, 8), ('listprepend.yaml', 'implicitoverwrite.yaml', 0, 2, 'overwrite1', 'implicitoverwrite.yaml', 4, 8), ('listprepend.yaml', 'implicitoverwrite.yaml', 1, 2, 'overwrite2', 'implicitoverwrite.yaml', 6, 8), # Test results of compositing literal list with (>) or (<) and then an explicit (=) overwrite ('listappend.yaml', 'listoverwrite.yaml', 0, 2, 'overwrite1', 'listoverwrite.yaml', 5, 10), ('listappend.yaml', 'listoverwrite.yaml', 1, 2, 'overwrite2', 'listoverwrite.yaml', 7, 10), ('listprepend.yaml', 'listoverwrite.yaml', 0, 2, 'overwrite1', 'listoverwrite.yaml', 5, 10), ('listprepend.yaml', 'listoverwrite.yaml', 1, 2, 'overwrite2', 'listoverwrite.yaml', 7, 10), # Test results of compositing literal list an explicit overwrite (=) and then with (>) or (<) ('listoverwrite.yaml', 'listappend.yaml', 0, 4, 'overwrite1', 'listoverwrite.yaml', 5, 10), ('listoverwrite.yaml', 'listappend.yaml', 1, 4, 'overwrite2', 'listoverwrite.yaml', 7, 10), ('listoverwrite.yaml', 'listappend.yaml', 2, 4, 'appended1', 'listappend.yaml', 5, 10), ('listoverwrite.yaml', 'listappend.yaml', 3, 4, 'appended2', 'listappend.yaml', 7, 10), ('listoverwrite.yaml', 'listprepend.yaml', 0, 4, 'prepended1', 'listprepend.yaml', 5, 10), ('listoverwrite.yaml', 'listprepend.yaml', 1, 4, 'prepended2', 'listprepend.yaml', 7, 10), ('listoverwrite.yaml', 'listprepend.yaml', 2, 4, 'overwrite1', 'listoverwrite.yaml', 5, 10), ('listoverwrite.yaml', 'listprepend.yaml', 3, 4, 'overwrite2', 'listoverwrite.yaml', 7, 10), ]) def test_list_composition_twice(datafiles, filename1, filename2, index, length, mood, prov_file, prov_line, prov_col): file_base = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml') file1 = os.path.join(datafiles.dirname, datafiles.basename, filename1) file2 = os.path.join(datafiles.dirname, datafiles.basename, filename2) ##################### # Round 1 - Fight ! ##################### base = _yaml.load(file_base, shortname='basics.yaml') overlay1 = _yaml.load(file1, shortname=filename1) overlay2 = _yaml.load(file2, shortname=filename2) _yaml.composite_dict(base, overlay1) _yaml.composite_dict(base, overlay2) children = _yaml.node_get(base, list, 'children') assert len(children) == length child = children[index] assert child['mood'] == mood assert_provenance(prov_file, prov_line, prov_col, child, 'mood') ##################### # Round 2 - Fight ! ##################### base = _yaml.load(file_base, shortname='basics.yaml') overlay1 = _yaml.load(file1, shortname=filename1) overlay2 = _yaml.load(file2, shortname=filename2) _yaml.composite_dict(overlay1, overlay2) _yaml.composite_dict(base, overlay1) children = _yaml.node_get(base, list, 'children') assert len(children) == length child = children[index] assert child['mood'] == mood assert_provenance(prov_file, prov_line, prov_col, child, 'mood') @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_convert_value_to_string(datafiles): conf_file = os.path.join(datafiles.dirname, datafiles.basename, 'convert_value_to_str.yaml') # Run file through yaml to convert it test_dict = _yaml.load(conf_file) user_config = _yaml.node_get(test_dict, str, "Test1") assert isinstance(user_config, str) assert user_config == "1_23_4" user_config = _yaml.node_get(test_dict, str, "Test2") assert isinstance(user_config, str) assert user_config == "1.23.4" user_config = _yaml.node_get(test_dict, str, "Test3") assert isinstance(user_config, str) assert user_config == "1.20" user_config = _yaml.node_get(test_dict, str, "Test4") assert isinstance(user_config, str) assert user_config == "OneTwoThree" @pytest.mark.datafiles(os.path.join(DATA_DIR)) def test_value_doesnt_match_expected(datafiles): conf_file = os.path.join(datafiles.dirname, datafiles.basename, 'convert_value_to_str.yaml') # Run file through yaml to convert it test_dict = _yaml.load(conf_file) with pytest.raises(LoadError) as exc: user_config = _yaml.node_get(test_dict, int, "Test4") assert exc.value.reason == LoadErrorReason.INVALID_DATA buildstream-1.6.9/tox.ini000066400000000000000000000046671437515270000153760ustar00rootroot00000000000000# # Tox global configuration # [tox] envlist = py36-nocover,py37-nocover,py38-nocover,py39-nocover,py310-nocover,py311-nocover skip_missing_interpreters = true # # Defaults for all environments # # Anything specified here is iherited by the sections # [testenv] commands = # Running with coverage reporting enabled py{36,37,38,39,310,311}-!nocover: pytest --basetemp {envtmpdir} --cov=buildstream --cov-config .coveragerc {posargs} py{36,37,38,39,310,311}-!nocover: mkdir -p .coverage-reports py{36,37,38,39,310,311}-!nocover: mv {envtmpdir}/.coverage {toxinidir}/.coverage-reports/.coverage.{env:COVERAGE_PREFIX:}{envname} # Running with coverage reporting disabled py{36,37,38,39,310,311}-nocover: pytest --basetemp {envtmpdir} {posargs} deps = py{36,37,38,39,310,311}: -rrequirements/requirements.txt py{36,37,38,39,310,311}: -rrequirements/dev-requirements.txt py{36,37,38,39,310,311}: -rrequirements/plugin-requirements.txt # Only require coverage and pytest-cov when using it !nocover: -rrequirements/cov-requirements.txt passenv = BST_FORCE_BACKEND GI_TYPELIB_PATH INTEGRATION_CACHE # # These keys are not inherited by any other sections # setenv = py{36,37,38,39,310,311}: COVERAGE_FILE = {envtmpdir}/.coverage whitelist_externals = py{36,37,38,39,310,311}: mv mkdir # # Coverage reporting # [testenv:coverage] commands = - coverage combine --rcfile={toxinidir}/.coveragerc {toxinidir}/.coverage-reports/ coverage report --rcfile={toxinidir}/.coveragerc -m deps = -rrequirements/requirements.txt -rrequirements/dev-requirements.txt -rrequirements/cov-requirements.txt setenv = COVERAGE_FILE = {toxinidir}/.coverage-reports/.coverage # # Running linters # [testenv:lint] commands_pre = # Build C extensions to allow Pylint to analyse them {envpython} setup.py build_ext --inplace commands = pylint {posargs: buildstream tests doc/source/conf.py setup.py} deps = -rrequirements/requirements.txt -rrequirements/dev-requirements.txt # # Building documentation # [testenv:docs] commands = make -C doc # sphinx_rtd_theme < 0.4.2 breaks search functionality for Sphinx >= 1.8 deps = sphinx >= 1.8.5 sphinx-click sphinx_rtd_theme >= 0.4.2 -rrequirements/requirements.txt -rrequirements/plugin-requirements.txt passenv = BST_FORCE_SESSION_REBUILD BST_SOURCE_CACHE HOME LANG LC_ALL whitelist_externals = make buildstream-1.6.9/versioneer.py000066400000000000000000002065461437515270000166160ustar00rootroot00000000000000# pylint: skip-file # Version: 0.18 """The Versioneer - like a rocketeer, but for versions. The Versioneer ============== * like a rocketeer, but for versions! * https://github.com/warner/python-versioneer * Brian Warner * License: Public Domain * Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy * [![Latest Version] (https://pypip.in/version/versioneer/badge.svg?style=flat) ](https://pypi.python.org/pypi/versioneer/) * [![Build Status] (https://travis-ci.org/warner/python-versioneer.png?branch=master) ](https://travis-ci.org/warner/python-versioneer) This is a tool for managing a recorded version number in distutils-based python projects. The goal is to remove the tedious and error-prone "update the embedded version string" step from your release process. Making a new release should be as easy as recording a new tag in your version-control system, and maybe making new tarballs. ## Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results ## Version Identifiers Source trees come from a variety of places: * a version-control system checkout (mostly used by developers) * a nightly tarball, produced by build automation * a snapshot tarball, produced by a web-based VCS browser, like github's "tarball from tag" feature * a release tarball, produced by "setup.py sdist", distributed through PyPI Within each source tree, the version identifier (either a string or a number, this tool is format-agnostic) can come from a variety of places: * ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows about recent "tags" and an absolute revision-id * the name of the directory into which the tarball was unpacked * an expanded VCS keyword ($Id$, etc) * a `_version.py` created by some earlier build step For released software, the version identifier is closely related to a VCS tag. Some projects use tag names that include more than just the version string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool needs to strip the tag prefix to extract the version identifier. For unreleased software (between tags), the version identifier should provide enough information to help developers recreate the same tree, while also giving them an idea of roughly how old the tree is (after version 1.2, before version 1.3). Many VCS systems can report a description that captures this, for example `git describe --tags --dirty --always` reports things like "0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the 0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has uncommitted changes. The version identifier is used for multiple purposes: * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball ## Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to dynamically ask the VCS tool for version information at import time. `_version.py` also contains `$Revision$` markers, and the installation process marks `_version.py` to have this marker rewritten with a tag name during the `git archive` command. As a result, generated tarballs will contain enough information to get the proper version. To allow `setup.py` to compute a version too, a `versioneer.py` is added to the top level of your source tree, next to `setup.py` and the `setup.cfg` that configures it. This overrides several distutils/setuptools commands to compute the version when invoked, and changes `setup.py build` and `setup.py sdist` to replace `_version.py` with a small static file that contains just the generated version data. ## Installation See [INSTALL.md](./INSTALL.md) for detailed installation instructions. ## Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the `get_versions()` function. From the "outside" (e.g. in `setup.py`), you can import the top-level `versioneer.py` and run `get_versions()`. Both functions return a dictionary with different flavors of version information: * `['version']`: A condensed version string, rendered using the selected style. This is the most commonly used value for the project's version string. The default "pep440" style yields strings like `0.11`, `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section below for alternative styles. * `['full-revisionid']`: detailed revision identifier. For Git, this is the full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". * `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the commit date in ISO 8601 format. This will be None if the date is not available. * `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that this is only accurate if run in a VCS checkout, otherwise it is likely to be False or None * `['error']`: if the version string could not be computed, this will be set to a string describing the problem, otherwise it will be None. It may be useful to throw an exception in setup.py if this is set, to avoid e.g. creating tarballs with a version string of "unknown". Some variants are more useful than others. Including `full-revisionid` in a bug report should allow developers to reconstruct the exact code being tested (or indicate the presence of local changes that should be shared with the developers). `version` is suitable for display in an "about" box or a CLI `--version` output: it can be easily compared against release notes and lists of bugs fixed in various releases. The installer adds the following text to your `__init__.py` to place a basic version in `YOURPROJECT.__version__`: from ._version import get_versions __version__ = get_versions()['version'] del get_versions ## Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. The default style, "pep440", produces a PEP440-compliant string, equal to the un-prefixed tag name for actual releases, and containing an additional "local version" section with more detail for in-between builds. For Git, this is TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags --dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and that this commit is two revisions ("+2") beyond the "0.11" tag. For released software (exactly equal to a known tag), the identifier will only contain the stripped tag, e.g. "0.11". Other styles are available. See [details.md](details.md) in the Versioneer source tree for descriptions. ## Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py version`, which will run the version-lookup code in a verbose mode, and will display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). ## Known Limitations Some situations are known to cause problems for Versioneer. This details the most significant ones. More can be found on Github [issues page](https://github.com/warner/python-versioneer/issues). ### Subprojects Versioneer has limited support for source trees in which `setup.py` is not in the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are two common reasons why `setup.py` might not be in the root: * Source trees which contain multiple subprojects, such as [Buildbot](https://github.com/buildbot/buildbot), which contains both "master" and "slave" subprojects, each with their own `setup.py`, `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI distributions (and upload multiple independently-installable tarballs). * Source trees whose main purpose is to contain a C library, but which also provide bindings to Python (and perhaps other langauges) in subdirectories. Versioneer will look for `.git` in parent directories, and most operations should get the right version string. However `pip` and `setuptools` have bugs and implementation details which frequently cause `pip install .` from a subproject directory to fail to find a correct version string (so it usually defaults to `0+unknown`). `pip install --editable .` should work correctly. `setup.py install` might work too. Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in some later version. [Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking this issue. The discussion in [PR #61](https://github.com/warner/python-versioneer/pull/61) describes the issue from the Versioneer side in more detail. [pip PR#3176](https://github.com/pypa/pip/pull/3176) and [pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve pip to let Versioneer work correctly. Versioneer-0.16 and earlier only looked for a `.git` directory next to the `setup.cfg`, so subprojects were completely unsupported with those releases. ### Editable installs with setuptools <= 18.5 `setup.py develop` and `pip install --editable .` allow you to install a project into a virtualenv once, then continue editing the source code (and test) without re-installing after every change. "Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a convenient way to specify executable scripts that should be installed along with the python package. These both work as expected when using modern setuptools. When using setuptools-18.5 or earlier, however, certain operations will cause `pkg_resources.DistributionNotFound` errors when running the entrypoint script, which must be resolved by re-installing the package. This happens when the install happens with one version, then the egg_info data is regenerated while a different version is checked out. Many setup.py commands cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into a different virtualenv), so this can be surprising. [Bug #83](https://github.com/warner/python-versioneer/issues/83) describes this one, but upgrading to a newer version of setuptools should probably resolve it. ### Unicode version strings While Versioneer works (and is continually tested) with both Python 2 and Python 3, it is not entirely consistent with bytes-vs-unicode distinctions. Newer releases probably generate unicode version strings on py2. It's not clear that this is wrong, but it may be surprising for applications when then write these strings to a network connection or include them in bytes-oriented APIs like cryptographic checksums. [Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates this question. ## Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: * install the new Versioneer (`pip install -U versioneer` or equivalent) * edit `setup.cfg`, if necessary, to include any new configuration settings indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details. * re-run `versioneer install` in your source tree, to replace `SRC/_version.py` * commit any changed files ## Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like src/git/ . The top-level `versioneer.py` script is assembled from these components by running make-versioneer.py . In the future, make-versioneer.py will take a VCS name as an argument, and will construct a version of `versioneer.py` that is specific to the given VCS. It might also take the configuration arguments that are currently provided manually during installation by editing setup.py . Alternatively, it might go the other direction and include code from all supported VCS systems, reducing the number of intermediate scripts. ## License To make Versioneer easier to embed, all its code is dedicated to the public domain. The `_version.py` that it creates is also in the public domain. Specifically, both are released under the Creative Commons "Public Domain Dedication" license (CC0-1.0), as described in https://creativecommons.org/publicdomain/zero/1.0/ . """ from __future__ import print_function try: import configparser except ImportError: import ConfigParser as configparser import errno import json import os import re import subprocess import sys class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_root(): """Get the project root directory. We require that all commands are run from the project root, i.e. the directory that contains setup.py, setup.cfg, and versioneer.py . """ root = os.path.realpath(os.path.abspath(os.getcwd())) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): # allow 'python path/to/setup.py COMMAND' root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) setup_py = os.path.join(root, "setup.py") versioneer_py = os.path.join(root, "versioneer.py") if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)): err = ("Versioneer was unable to run the project root directory. " "Versioneer requires setup.py to be executed from " "its immediate directory (like 'python setup.py COMMAND'), " "or in a way that lets it use sys.argv[0] to find the root " "(like 'python path/to/setup.py COMMAND').") raise VersioneerBadRootError(err) try: # Certain runtime workflows (setup.py install/develop in a setuptools # tree) execute all dependencies in a single python process, so # "versioneer" may be imported multiple times, and python's shared # module-import table will cache the first one. So we can't use # os.path.dirname(__file__), as that will find whichever # versioneer.py was first imported, even in later projects. me = os.path.realpath(os.path.abspath(__file__)) me_dir = os.path.normcase(os.path.splitext(me)[0]) vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) if me_dir != vsr_dir: print("Warning: build in %s is using versioneer.py from %s" % (os.path.dirname(me), versioneer_py)) except NameError: pass return root def get_config_from_root(root): """Read the project setup.cfg file to determine Versioneer config.""" # This might raise EnvironmentError (if setup.cfg is missing), or # configparser.NoSectionError (if it lacks a [versioneer] section), or # configparser.NoOptionError (if it lacks "VCS="). See the docstring at # the top of versioneer.py for instructions on writing your setup.cfg . setup_cfg = os.path.join(root, "setup.cfg") parser = configparser.SafeConfigParser() with open(setup_cfg, "r") as f: parser.readfp(f) VCS = parser.get("versioneer", "VCS") # mandatory def get(parser, name): if parser.has_option("versioneer", name): return parser.get("versioneer", name) return None cfg = VersioneerConfig() cfg.VCS = VCS cfg.style = get(parser, "style") or "" cfg.versionfile_source = get(parser, "versionfile_source") cfg.versionfile_build = get(parser, "versionfile_build") cfg.tag_prefix = get(parser, "tag_prefix") cfg.tag_regex = get(parser, "tag_regex") or "*" if cfg.tag_prefix in ("''", '""'): cfg.tag_prefix = "" cfg.parentdir_prefix = get(parser, "parentdir_prefix") cfg.verbose = get(parser, "verbose") return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" # these dictionaries contain VCS-specific tools LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %s" % dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %s" % (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %s (error)" % dispcmd) print("stdout was %s" % stdout) return None, p.returncode return stdout, p.returncode LONG_VERSION_PY['git'] = ''' # This file helps to compute a version number in source trees obtained from # git-archive tarball (such as those provided by githubs download-from-tag # feature). Distribution tarballs (built by setup.py sdist) and build # directories (produced by setup.py build) will contain a much shorter file # that just contains the computed version number. # This file is released into the public domain. Generated by # versioneer-0.18 (https://github.com/warner/python-versioneer) """Git implementation of _version.py.""" import errno import os import re import subprocess import sys def get_keywords(): """Get the keywords needed to look up the version information.""" # these strings will be replaced by git during git-archive. # setup.py/versioneer.py will grep for the variable names, so they must # each be defined on a line of their own. _version.py will just call # get_keywords(). git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} return keywords class VersioneerConfig: """Container for Versioneer configuration parameters.""" def get_config(): """Create, populate and return the VersioneerConfig() object.""" # these strings are filled in when 'setup.py versioneer' creates # _version.py cfg = VersioneerConfig() cfg.VCS = "git" cfg.style = "%(STYLE)s" cfg.tag_prefix = "%(TAG_PREFIX)s" cfg.tag_regex = "%(TAG_REGEX)s" cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" cfg.verbose = False return cfg class NotThisMethod(Exception): """Exception raised if a method is not valid for the current scenario.""" LONG_VERSION_PY = {} HANDLERS = {} def register_vcs_handler(vcs, method): # decorator """Decorator to mark a method as the handler for a particular VCS.""" def decorate(f): """Store f in HANDLERS[vcs][method].""" if vcs not in HANDLERS: HANDLERS[vcs] = {} HANDLERS[vcs][method] = f return f return decorate def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False, env=None): """Call the given command(s).""" assert isinstance(commands, list) p = None for c in commands: try: dispcmd = str([c] + args) # remember shell=False, so use git.cmd on windows, not just git p = subprocess.Popen([c] + args, cwd=cwd, env=env, stdout=subprocess.PIPE, stderr=(subprocess.PIPE if hide_stderr else None)) break except EnvironmentError: e = sys.exc_info()[1] if e.errno == errno.ENOENT: continue if verbose: print("unable to run %%s" %% dispcmd) print(e) return None, None else: if verbose: print("unable to find command, tried %%s" %% (commands,)) return None, None stdout = p.communicate()[0].strip() if sys.version_info[0] >= 3: stdout = stdout.decode() if p.returncode != 0: if verbose: print("unable to run %%s (error)" %% dispcmd) print("stdout was %%s" %% stdout) return None, p.returncode return stdout, p.returncode def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %%s but none started with prefix %%s" %% (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %%d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%%s', no digits" %% ",".join(refs - tags)) if verbose: print("likely tags: %%s" %% ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %%s" %% r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, tag_regex, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %%s not under git control" %% root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%%s%%s" %% (tag_prefix, tag_regex)], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%%s'" %% describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%%s' doesn't start with prefix '%%s'" print(fmt %% (full_tag, tag_prefix)) pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" %% (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%%d" %% pieces["distance"] else: # exception #1 rendered = "0.post.dev%%d" %% pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%%s" %% pieces["short"] else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%%s" %% pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%%d" %% pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%%s'" %% style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} def get_versions(): """Get version information or return default if unable to do so.""" # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have # __file__, we can work backwards from there to the root. Some # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which # case we can only use expanded keywords. cfg = get_config() verbose = cfg.verbose try: return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, verbose) except NotThisMethod: pass try: root = os.path.realpath(__file__) # versionfile_source is the relative path from the top of the source # tree (where the .git directory might live) to this file. Invert # this to find the root from __file__. for i in cfg.versionfile_source.split('/'): root = os.path.dirname(root) except NameError: return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to find root of source tree", "date": None} try: pieces = git_pieces_from_vcs(cfg.tag_prefix, cfg.tag_regex, root, verbose) return render(pieces, cfg.style) except NotThisMethod: pass try: if cfg.parentdir_prefix: return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) except NotThisMethod: pass return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} ''' @register_vcs_handler("git", "get_keywords") def git_get_keywords(versionfile_abs): """Extract version information from the given file.""" # the code embedded in _version.py can just fetch the value of these # keywords. When used from setup.py, we don't want to import _version.py, # so we do it with a regexp instead. This function is not used from # _version.py. keywords = {} try: f = open(versionfile_abs, "r") for line in f.readlines(): if line.strip().startswith("git_refnames ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["refnames"] = mo.group(1) if line.strip().startswith("git_full ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["full"] = mo.group(1) if line.strip().startswith("git_date ="): mo = re.search(r'=\s*"(.*)"', line) if mo: keywords["date"] = mo.group(1) f.close() except EnvironmentError: pass return keywords @register_vcs_handler("git", "keywords") def git_versions_from_keywords(keywords, tag_prefix, verbose): """Get version information from git keywords.""" if not keywords: raise NotThisMethod("no keywords at all, weird") date = keywords.get("date") if date is not None: # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 # -like" string, which we must then edit to make compliant), because # it's been around since git-1.5.3, and it's too difficult to # discover which version we're using, or to work around using an # older one. date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) refnames = keywords["refnames"].strip() if refnames.startswith("$Format"): if verbose: print("keywords are unexpanded, not using") raise NotThisMethod("unexpanded keywords, not a git-archive tarball") refs = set([r.strip() for r in refnames.strip("()").split(",")]) # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of # just "foo-1.0". If we see a "tag: " prefix, prefer those. TAG = "tag: " tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)]) if not tags: # Either we're using git < 1.8.3, or there really are no tags. We use # a heuristic: assume all version tags have a digit. The old git %d # expansion behaves like git log --decorate=short and strips out the # refs/heads/ and refs/tags/ prefixes that would let us distinguish # between branches and tags. By ignoring refnames without digits, we # filter out many common branch names like "release" and # "stabilization", as well as "HEAD" and "master". tags = set([r for r in refs if re.search(r'\d', r)]) if verbose: print("discarding '%s', no digits" % ",".join(refs - tags)) if verbose: print("likely tags: %s" % ",".join(sorted(tags))) for ref in sorted(tags): # sorting will prefer e.g. "2.0" over "2.0rc1" if ref.startswith(tag_prefix): r = ref[len(tag_prefix):] if verbose: print("picking %s" % r) return {"version": r, "full-revisionid": keywords["full"].strip(), "dirty": False, "error": None, "date": date} # no suitable tags, so version is "0+unknown", but full hex is still there if verbose: print("no suitable tags, using unknown + full revision id") return {"version": "0+unknown", "full-revisionid": keywords["full"].strip(), "dirty": False, "error": "no suitable tags", "date": None} @register_vcs_handler("git", "pieces_from_vcs") def git_pieces_from_vcs(tag_prefix, tag_regex, root, verbose, run_command=run_command): """Get version from 'git describe' in the root of the source tree. This only gets called if the git-archive 'subst' keywords were *not* expanded, and _version.py hasn't already been rewritten with a short version string, meaning we're inside a checked out source tree. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root, hide_stderr=True) if rc != 0: if verbose: print("Directory %s not under git control" % root) raise NotThisMethod("'git rev-parse --git-dir' returned error") # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] # if there isn't one, this yields HEX[-dirty] (no NUM) describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty", "--always", "--long", "--match", "%s%s" % (tag_prefix, tag_regex)], cwd=root) # --long was added in git-1.5.5 if describe_out is None: raise NotThisMethod("'git describe' failed") describe_out = describe_out.strip() full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root) if full_out is None: raise NotThisMethod("'git rev-parse' failed") full_out = full_out.strip() pieces = {} pieces["long"] = full_out pieces["short"] = full_out[:7] # maybe improved later pieces["error"] = None # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] # TAG might have hyphens. git_describe = describe_out # look for -dirty suffix dirty = git_describe.endswith("-dirty") pieces["dirty"] = dirty if dirty: git_describe = git_describe[:git_describe.rindex("-dirty")] # now we have TAG-NUM-gHEX or HEX if "-" in git_describe: # TAG-NUM-gHEX mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) if not mo: # unparseable. Maybe git-describe is misbehaving? pieces["error"] = ("unable to parse git-describe output: '%s'" % describe_out) return pieces # tag full_tag = mo.group(1) if not full_tag.startswith(tag_prefix): if verbose: fmt = "tag '%s' doesn't start with prefix '%s'" print(fmt % (full_tag, tag_prefix)) pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" % (full_tag, tag_prefix)) return pieces pieces["closest-tag"] = full_tag[len(tag_prefix):] # distance: number of commits since tag pieces["distance"] = int(mo.group(2)) # commit: short hex revision ID pieces["short"] = mo.group(3) else: # HEX: no tags pieces["closest-tag"] = None count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"], cwd=root) pieces["distance"] = int(count_out) # total number of commits # commit date: see ISO-8601 comment in git_versions_from_keywords() date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) return pieces def do_vcs_install(manifest_in, versionfile_source, ipy): """Git-specific installation logic for Versioneer. For Git, this means creating/changing .gitattributes to mark _version.py for export-subst keyword substitution. """ GITS = ["git"] if sys.platform == "win32": GITS = ["git.cmd", "git.exe"] files = [manifest_in, versionfile_source] if ipy: files.append(ipy) try: me = __file__ if me.endswith(".pyc") or me.endswith(".pyo"): me = os.path.splitext(me)[0] + ".py" versioneer_file = os.path.relpath(me) except NameError: versioneer_file = "versioneer.py" files.append(versioneer_file) present = False try: f = open(".gitattributes", "r") for line in f.readlines(): if line.strip().startswith(versionfile_source): if "export-subst" in line.strip().split()[1:]: present = True f.close() except EnvironmentError: pass if not present: f = open(".gitattributes", "a+") f.write("%s export-subst\n" % versionfile_source) f.close() files.append(".gitattributes") run_command(GITS, ["add", "--"] + files) def versions_from_parentdir(parentdir_prefix, root, verbose): """Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory """ rootdirs = [] for i in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {"version": dirname[len(parentdir_prefix):], "full-revisionid": None, "dirty": False, "error": None, "date": None} else: rootdirs.append(root) root = os.path.dirname(root) # up a level if verbose: print("Tried directories %s but none started with prefix %s" % (str(rootdirs), parentdir_prefix)) raise NotThisMethod("rootdir doesn't start with parentdir_prefix") SHORT_VERSION_PY = """ # This file was generated by 'versioneer.py' (0.18) from # revision-control system data, or from the parent directory name of an # unpacked source archive. Distribution tarballs contain a pre-generated copy # of this file. import json version_json = ''' %s ''' # END VERSION_JSON def get_versions(): return json.loads(version_json) """ def versions_from_file(filename): """Try to determine the version from _version.py if present.""" try: with open(filename) as f: contents = f.read() except EnvironmentError: raise NotThisMethod("unable to read _version.py") mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", contents, re.M | re.S) if not mo: raise NotThisMethod("no version_json in _version.py") return json.loads(mo.group(1)) def write_to_version_file(filename, versions): """Write the given version number to the given _version.py file.""" os.unlink(filename) contents = json.dumps(versions, sort_keys=True, indent=1, separators=(",", ": ")) with open(filename, "w") as f: f.write(SHORT_VERSION_PY % contents) print("set %s to '%s'" % (filename, versions["version"])) def plus_or_dot(pieces): """Return a + if we don't already have one, else return a .""" if "+" in pieces.get("closest-tag", ""): return "." return "+" def render_pep440(pieces): """Build up version string, with post-release "local version identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty Exceptions: 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += plus_or_dot(pieces) rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" else: # exception #1 rendered = "0+untagged.%d.g%s" % (pieces["distance"], pieces["short"]) if pieces["dirty"]: rendered += ".dirty" return rendered def render_pep440_pre(pieces): """TAG[.post.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post.devDISTANCE """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += ".post.dev%d" % pieces["distance"] else: # exception #1 rendered = "0.post.dev%d" % pieces["distance"] return rendered def render_pep440_post(pieces): """TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear "older" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += plus_or_dot(pieces) rendered += "g%s" % pieces["short"] else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" rendered += "+g%s" % pieces["short"] return rendered def render_pep440_old(pieces): """TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty. Eexceptions: 1: no tags. 0.postDISTANCE[.dev0] """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"] or pieces["dirty"]: rendered += ".post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" else: # exception #1 rendered = "0.post%d" % pieces["distance"] if pieces["dirty"]: rendered += ".dev0" return rendered def render_git_describe(pieces): """TAG[-DISTANCE-gHEX][-dirty]. Like 'git describe --tags --dirty --always'. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] if pieces["distance"]: rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render_git_describe_long(pieces): """TAG-DISTANCE-gHEX[-dirty]. Like 'git describe --tags --dirty --always -long'. The distance/hash is unconditional. Exceptions: 1: no tags. HEX[-dirty] (note: no 'g' prefix) """ if pieces["closest-tag"]: rendered = pieces["closest-tag"] rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) else: # exception #1 rendered = pieces["short"] if pieces["dirty"]: rendered += "-dirty" return rendered def render(pieces, style): """Render the given version pieces into the requested style.""" if pieces["error"]: return {"version": "unknown", "full-revisionid": pieces.get("long"), "dirty": None, "error": pieces["error"], "date": None} if not style or style == "default": style = "pep440" # the default if style == "pep440": rendered = render_pep440(pieces) elif style == "pep440-pre": rendered = render_pep440_pre(pieces) elif style == "pep440-post": rendered = render_pep440_post(pieces) elif style == "pep440-old": rendered = render_pep440_old(pieces) elif style == "git-describe": rendered = render_git_describe(pieces) elif style == "git-describe-long": rendered = render_git_describe_long(pieces) else: raise ValueError("unknown style '%s'" % style) return {"version": rendered, "full-revisionid": pieces["long"], "dirty": pieces["dirty"], "error": None, "date": pieces.get("date")} class VersioneerBadRootError(Exception): """The project root directory is unknown or missing key files.""" def get_versions(verbose=False): """Get the project version from whatever source is available. Returns dict with two keys: 'version' and 'full'. """ if "versioneer" in sys.modules: # see the discussion in cmdclass.py:get_cmdclass() del sys.modules["versioneer"] root = get_root() cfg = get_config_from_root(root) assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" handlers = HANDLERS.get(cfg.VCS) assert handlers, "unrecognized VCS '%s'" % cfg.VCS verbose = verbose or cfg.verbose assert cfg.versionfile_source is not None, \ "please set versioneer.versionfile_source" assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" versionfile_abs = os.path.join(root, cfg.versionfile_source) # extract version from first of: _version.py, VCS command (e.g. 'git # describe'), parentdir. This is meant to work for developers using a # source checkout, for users of a tarball created by 'setup.py sdist', # and for users of a tarball/zipball created by 'git archive' or github's # download-from-tag feature or the equivalent in other VCSes. get_keywords_f = handlers.get("get_keywords") from_keywords_f = handlers.get("keywords") if get_keywords_f and from_keywords_f: try: keywords = get_keywords_f(versionfile_abs) ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) if verbose: print("got version from expanded keyword %s" % ver) return ver except NotThisMethod: pass try: ver = versions_from_file(versionfile_abs) if verbose: print("got version from file %s %s" % (versionfile_abs, ver)) return ver except NotThisMethod: pass from_vcs_f = handlers.get("pieces_from_vcs") if from_vcs_f: try: pieces = from_vcs_f(cfg.tag_prefix, cfg.tag_regex, root, verbose) ver = render(pieces, cfg.style) if verbose: print("got version from VCS %s" % ver) return ver except NotThisMethod: pass try: if cfg.parentdir_prefix: ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) if verbose: print("got version from parentdir %s" % ver) return ver except NotThisMethod: pass if verbose: print("unable to compute version") return {"version": "0+unknown", "full-revisionid": None, "dirty": None, "error": "unable to compute version", "date": None} def get_version(): """Get the short version string for this project.""" return get_versions()["version"] def get_cmdclass(): """Get the custom setuptools/distutils subclasses used by Versioneer.""" if "versioneer" in sys.modules: del sys.modules["versioneer"] # this fixes the "python setup.py develop" case (also 'install' and # 'easy_install .'), in which subdependencies of the main project are # built (using setup.py bdist_egg) in the same python process. Assume # a main project A and a dependency B, which use different versions # of Versioneer. A's setup.py imports A's Versioneer, leaving it in # sys.modules by the time B's setup.py is executed, causing B to run # with the wrong versioneer. Setuptools wraps the sub-dep builds in a # sandbox that restores sys.modules to it's pre-build state, so the # parent is protected against the child's "import versioneer". By # removing ourselves from sys.modules here, before the child build # happens, we protect the child from the parent's versioneer too. # Also see https://github.com/warner/python-versioneer/issues/52 cmds = {} # we add "version" to both distutils and setuptools from distutils.core import Command class cmd_version(Command): description = "report generated version string" user_options = [] boolean_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): vers = get_versions(verbose=True) print("Version: %s" % vers["version"]) print(" full-revisionid: %s" % vers.get("full-revisionid")) print(" dirty: %s" % vers.get("dirty")) print(" date: %s" % vers.get("date")) if vers["error"]: print(" error: %s" % vers["error"]) cmds["version"] = cmd_version # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: # distutils/build -> build_py # distutils/install -> distutils/build ->.. # setuptools/bdist_wheel -> distutils/install ->.. # setuptools/bdist_egg -> distutils/install_lib -> build_py # setuptools/install -> bdist_egg ->.. # setuptools/develop -> ? # pip install: # copies source tree to a tempdir before running egg_info/etc # if .git isn't copied too, 'git describe' will fail # then does setup.py bdist_wheel, or sometimes setup.py install # setup.py egg_info -> ? # we override different "build_py" commands for both environments if "setuptools" in sys.modules: from setuptools.command.build_py import build_py as _build_py else: from distutils.command.build_py import build_py as _build_py class cmd_build_py(_build_py): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() _build_py.run(self) # now locate _version.py in the new build/ directory and replace # it with an updated value if cfg.versionfile_build: target_versionfile = os.path.join(self.build_lib, cfg.versionfile_build) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) cmds["build_py"] = cmd_build_py if "cx_Freeze" in sys.modules: # cx_freeze enabled? from cx_Freeze.dist import build_exe as _build_exe # nczeczulin reports that py2exe won't like the pep440-style string # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. # setup(console=[{ # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION # "product_version": versioneer.get_version(), # ... class cmd_build_exe(_build_exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _build_exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "TAG_REGEX": cfg.tag_regex, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["build_exe"] = cmd_build_exe del cmds["build_py"] if 'py2exe' in sys.modules: # py2exe enabled? try: from py2exe.distutils_buildexe import py2exe as _py2exe # py3 except ImportError: from py2exe.build_exe import py2exe as _py2exe # py2 class cmd_py2exe(_py2exe): def run(self): root = get_root() cfg = get_config_from_root(root) versions = get_versions() target_versionfile = cfg.versionfile_source print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, versions) _py2exe.run(self) os.unlink(target_versionfile) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "TAG_REGEX": cfg.tag_regex, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) cmds["py2exe"] = cmd_py2exe # we override different "sdist" commands for both environments if "setuptools" in sys.modules: from setuptools.command.sdist import sdist as _sdist else: from distutils.command.sdist import sdist as _sdist class cmd_sdist(_sdist): def run(self): versions = get_versions() self._versioneer_generated_versions = versions # unless we update this, the command will keep using the old # version self.distribution.metadata.version = versions["version"] return _sdist.run(self) def make_release_tree(self, base_dir, files): root = get_root() cfg = get_config_from_root(root) _sdist.make_release_tree(self, base_dir, files) # now locate _version.py in the new base_dir directory # (remembering that it may be a hardlink) and replace it with an # updated value target_versionfile = os.path.join(base_dir, cfg.versionfile_source) print("UPDATING %s" % target_versionfile) write_to_version_file(target_versionfile, self._versioneer_generated_versions) cmds["sdist"] = cmd_sdist return cmds CONFIG_ERROR = """ setup.cfg is missing the necessary Versioneer configuration. You need a section like: [versioneer] VCS = git style = pep440 versionfile_source = src/myproject/_version.py versionfile_build = myproject/_version.py tag_prefix = parentdir_prefix = myproject- You will also need to edit your setup.py to use the results: import versioneer setup(version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), ...) Please read the docstring in ./versioneer.py for configuration instructions, edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. """ SAMPLE_CONFIG = """ # See the docstring in versioneer.py for instructions. Note that you must # re-run 'versioneer.py setup' after changing this section, and commit the # resulting files. [versioneer] #VCS = git #style = pep440 #versionfile_source = #versionfile_build = #tag_prefix = #parentdir_prefix = """ INIT_PY_SNIPPET = """ from ._version import get_versions __version__ = get_versions()['version'] del get_versions """ def do_setup(): """Main VCS-independent setup function for installing Versioneer.""" root = get_root() try: cfg = get_config_from_root(root) except (EnvironmentError, configparser.NoSectionError, configparser.NoOptionError) as e: if isinstance(e, (EnvironmentError, configparser.NoSectionError)): print("Adding sample versioneer config to setup.cfg", file=sys.stderr) with open(os.path.join(root, "setup.cfg"), "a") as f: f.write(SAMPLE_CONFIG) print(CONFIG_ERROR, file=sys.stderr) return 1 print(" creating %s" % cfg.versionfile_source) with open(cfg.versionfile_source, "w") as f: LONG = LONG_VERSION_PY[cfg.VCS] f.write(LONG % {"DOLLAR": "$", "STYLE": cfg.style, "TAG_PREFIX": cfg.tag_prefix, "TAG_REGEX": cfg.tag_regex, "PARENTDIR_PREFIX": cfg.parentdir_prefix, "VERSIONFILE_SOURCE": cfg.versionfile_source, }) ipy = os.path.join(os.path.dirname(cfg.versionfile_source), "__init__.py") if os.path.exists(ipy): try: with open(ipy, "r") as f: old = f.read() except EnvironmentError: old = "" if INIT_PY_SNIPPET not in old: print(" appending to %s" % ipy) with open(ipy, "a") as f: f.write(INIT_PY_SNIPPET) else: print(" %s unmodified" % ipy) else: print(" %s doesn't exist, ok" % ipy) ipy = None # Make sure both the top-level "versioneer.py" and versionfile_source # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so # they'll be copied into source distributions. Pip won't be able to # install the package without this. manifest_in = os.path.join(root, "MANIFEST.in") simple_includes = set() try: with open(manifest_in, "r") as f: for line in f: if line.startswith("include "): for include in line.split()[1:]: simple_includes.add(include) except EnvironmentError: pass # That doesn't cover everything MANIFEST.in can do # (http://docs.python.org/2/distutils/sourcedist.html#commands), so # it might give some false negatives. Appending redundant 'include' # lines is safe, though. if "versioneer.py" not in simple_includes: print(" appending 'versioneer.py' to MANIFEST.in") with open(manifest_in, "a") as f: f.write("include versioneer.py\n") else: print(" 'versioneer.py' already in MANIFEST.in") if cfg.versionfile_source not in simple_includes: print(" appending versionfile_source ('%s') to MANIFEST.in" % cfg.versionfile_source) with open(manifest_in, "a") as f: f.write("include %s\n" % cfg.versionfile_source) else: print(" versionfile_source already in MANIFEST.in") # Make VCS-specific changes. For git, this means creating/changing # .gitattributes to mark _version.py for export-subst keyword # substitution. do_vcs_install(manifest_in, cfg.versionfile_source, ipy) return 0 def scan_setup_py(): """Validate the contents of setup.py against Versioneer's expectations.""" found = set() setters = False errors = 0 with open("setup.py", "r") as f: for line in f.readlines(): if "import versioneer" in line: found.add("import") if "versioneer.get_cmdclass()" in line: found.add("cmdclass") if "versioneer.get_version()" in line: found.add("get_version") if "versioneer.VCS" in line: setters = True if "versioneer.versionfile_source" in line: setters = True if len(found) != 3: print("") print("Your setup.py appears to be missing some important items") print("(but I might be wrong). Please make sure it has something") print("roughly like the following:") print("") print(" import versioneer") print(" setup( version=versioneer.get_version(),") print(" cmdclass=versioneer.get_cmdclass(), ...)") print("") errors += 1 if setters: print("You should remove lines like 'versioneer.VCS = ' and") print("'versioneer.versionfile_source = ' . This configuration") print("now lives in setup.cfg, and should be removed from setup.py") print("") errors += 1 return errors if __name__ == "__main__": cmd = sys.argv[1] if cmd == "setup": errors = do_setup() errors += scan_setup_py() if errors: sys.exit(1)