pax_global_header 0000666 0000000 0000000 00000000064 14375152700 0014516 g ustar 00root root 0000000 0000000 52 comment=4abd1f3e1b5e5d128bc24e45ec9a37d61723be87
buildstream-1.6.9/ 0000775 0000000 0000000 00000000000 14375152700 0014046 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/.coveragerc 0000664 0000000 0000000 00000000644 14375152700 0016173 0 ustar 00root root 0000000 0000000 [run]
concurrency = multiprocessing
include =
*/buildstream/*
omit =
# Omit some internals
*/buildstream/_profile.py
*/buildstream/__main__.py
*/buildstream/_version.py
# Omit generated code
*/buildstream/_protos/*
*/.eggs/*
# Omit .tox directory
*/.tox/*
[report]
show_missing = True
precision = 2
[paths]
source =
buildstream/
*/site-packages/buildstream/
*/buildstream/buildstream/
buildstream-1.6.9/.gitattributes 0000664 0000000 0000000 00000000045 14375152700 0016740 0 ustar 00root root 0000000 0000000 buildstream/_version.py export-subst
buildstream-1.6.9/.github/ 0000775 0000000 0000000 00000000000 14375152700 0015406 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/.github/CODEOWNERS 0000664 0000000 0000000 00000000364 14375152700 0017004 0 ustar 00root root 0000000 0000000 # Each line is a file pattern followed by one or more owners.
# These owners will be the default owners for everything in
# the repo, unless a later match takes precedence.
#
* @gtristan @juergbi @BenjaminSchubert @cs-shadow @abderrahim
buildstream-1.6.9/.github/common.env 0000664 0000000 0000000 00000000276 14375152700 0017415 0 ustar 00root root 0000000 0000000 # Shared common variables
CI_IMAGE_VERSION=master-784208155
CI_TOXENV_MAIN=py36-nocover,py37-nocover,py38-nocover,py39-nocover,py310-nocover,py311-nocover
CI_TOXENV_ALL="${CI_TOXENV_MAIN}"
buildstream-1.6.9/.github/compose/ 0000775 0000000 0000000 00000000000 14375152700 0017053 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/.github/compose/ci.docker-compose.yml 0000664 0000000 0000000 00000002305 14375152700 0023102 0 ustar 00root root 0000000 0000000 version: '3.4'
x-tests-template: &tests-template
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:36-${CI_IMAGE_VERSION:-latest}
command: tox -vvvvv -- --color=yes --integration
environment:
TOXENV: ${CI_TOXENV_ALL}
# Enable privileges to run the sandbox
#
privileged: true
devices:
- /dev/fuse:/dev/fuse
# Mount the local directory and set the working directory
# to run the tests from.
#
volumes:
- ../..:/home/testuser/buildstream
working_dir: /home/testuser/buildstream
services:
fedora-36:
<<: *tests-template
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:36-${CI_IMAGE_VERSION:-latest}
fedora-37:
<<: *tests-template
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:37-${CI_IMAGE_VERSION:-latest}
debian-10:
<<: *tests-template
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-debian:10-${CI_IMAGE_VERSION:-latest}
docs:
<<: *tests-template
command: tox -e docs
environment:
BST_FORCE_SESSION_REBUILD: 1
lint:
<<: *tests-template
command: tox -e lint
buildstream-1.6.9/.github/run-ci.sh 0000775 0000000 0000000 00000003036 14375152700 0017144 0 ustar 00root root 0000000 0000000 #!/bin/bash
topdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function usage () {
echo "Usage: "
echo " run-ci.sh [OPTIONS] [TEST NAME [TEST NAME...]]"
echo
echo "Runs the CI tests locally using docker"
echo
echo "The test names are based on the names of tests in the CI yaml files"
echo
echo "If no test names are specified, all tests will be run"
echo
echo "Options:"
echo
echo " -h --help Display this help message and exit"
echo " "
exit 1;
}
while : ; do
case "$1" in
-h|--help)
usage;
shift ;;
*)
break ;;
esac
done
test_names="${@}"
# We need to give ownership to the docker image user `testuser`,
# chances are high that this will be the same UID as the primary
# user on this host
#
user_uid="$(id -u)"
user_gid="$(id -g)"
if [ "${user_uid}" -ne "1000" ] || [ "${user_gid}" -ne "1000" ]; then
sudo chown -R 1000:1000 "${topdir}/.."
fi
# runTest()
#
# $1 = test name
#
function runTest() {
test_name=$1
# Run docker-compose from it's directory, because it will use
# relative paths
cd "${topdir}/compose"
docker-compose \
--env-file ${topdir}/common.env \
--file ${topdir}/compose/ci.docker-compose.yml \
run "${test_name}"
}
# Lazily ensure that the script exits when a command fails
#
set -e
if [ -z "${test_names}" ]; then
runTest "lint"
runTest "debian-10"
runTest "fedora-36"
runTest "fedora-37"
else
for test_name in "${test_names}"; do
runTest "${test_name}"
done
fi
buildstream-1.6.9/.github/workflows/ 0000775 0000000 0000000 00000000000 14375152700 0017443 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/.github/workflows/ci.yml 0000664 0000000 0000000 00000003437 14375152700 0020570 0 ustar 00root root 0000000 0000000 name: PR Checks
# Pre-merge CI to run on push and pull_request events, even if this seems
# redundant, we avoid concurrency with the below configuration.
#
on:
push:
pull_request:
# Use the concurrency feature to ensure we don't run redundant workflows
#
concurrency:
group: ${{ github.repository }}-${{ github.ref }}-${{ github.workflow }}
cancel-in-progress: true
jobs:
tests:
runs-on: ubuntu-20.04
continue-on-error: ${{ matrix.allow-failure || false }}
strategy:
fail-fast: false
matrix:
# The names here should map to a valid service defined in
# "../compose/ci.docker-compose.yml"
test-name:
- debian-10
- fedora-36
- fedora-37
- lint
steps:
- name: Check out repository
uses: actions/checkout@v2
# BuildStream requires tags to be able to find its version.
with:
fetch-depth: 0
- name: Run tests with Docker Compose
run: |
${GITHUB_WORKSPACE}/.github/run-ci.sh ${{ matrix.test-name }}
docs:
runs-on: ubuntu-20.04
steps:
- name: Check out repository
uses: actions/checkout@v2
# BuildStream requires tags to be able to find its version.
with:
fetch-depth: 0
- name: Give `testuser` ownership of the source directory
run: sudo chown -R 1000:1000 ${GITHUB_WORKSPACE}
- name: Build documentation using Docker Compose
run: |
docker-compose \
--env-file ${GITHUB_WORKSPACE}/.github/common.env \
--file ${GITHUB_WORKSPACE}/.github/compose/ci.docker-compose.yml \
run \
docs
- name: Upload artifacts
uses: actions/upload-artifact@v2
with:
name: docs
path: doc/build/html
buildstream-1.6.9/.github/workflows/release.yml 0000664 0000000 0000000 00000002107 14375152700 0021606 0 ustar 00root root 0000000 0000000 name: Upload Release Asset
on:
push:
tags:
- '*.*.*'
jobs:
build:
name: Upload Release Asset
runs-on: ubuntu-20.04
steps:
- name: Checkout code
uses: actions/checkout@v2
# BuildStream requires tags to be able to find its version.
with:
fetch-depth: 0
- name: Give `testuser` ownership of the source directory
run: sudo chown -R 1000:1000 ${GITHUB_WORKSPACE}
- name: Build documentation using Docker Compose
run: |
docker-compose \
--env-file ${GITHUB_WORKSPACE}/.github/common.env \
--file ${GITHUB_WORKSPACE}/.github/compose/ci.docker-compose.yml \
run \
docs
# Restore permissions to the current user
sudo chown -R ${USER} ${GITHUB_WORKSPACE}
tar -C doc/build/html -zcf docs.tgz .
- name: Upload release assets
run: |
tag_name="${GITHUB_REF##*/}"
hub release create -a "docs.tgz" -m "$tag_name" "$tag_name"
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
buildstream-1.6.9/.gitignore 0000664 0000000 0000000 00000001151 14375152700 0016034 0 ustar 00root root 0000000 0000000 # Compiled python modules.
buildstream/**/*.pyc
tests/**/*.pyc
# Setuptools distribution folder.
/dist/
# Python egg metadata, regenerated from source files by setuptools.
/*.egg-info
.eggs
# Some testing related things
integration-cache/
tmp
.coverage
.coverage-reports/
.coverage.*
.cache
.pytest_cache/
*.bst/
.tox/
# Pycache, in case buildstream is ran directly from within the source
# tree
__pycache__/
# Generated version file
buildstream/__version__.py
#Autogenerated doc
doc/source/elements/
doc/source/sources/
doc/source/modules.rst
doc/source/buildstream.rst
doc/source/buildstream.*.rst
doc/build/
buildstream-1.6.9/.pylintrc 0000664 0000000 0000000 00000040217 14375152700 0015717 0 ustar 00root root 0000000 0000000 [MASTER]
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=ujson
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS,tests,doc
# Add files or directories matching the regex patterns to the blacklist. The
# regex matches against base names, not paths.
ignore-patterns=.*_pb2.py,.*_pb2_grpc.py
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Use multiple processes to speed up Pylint.
jobs=1
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Pickle collected data for later comparisons.
persistent=yes
# Specify a configuration file.
#rcfile=
# When enabled, pylint would attempt to guess common misconfiguration and emit
# user-friendly hints instead of false-positive error messages
suggestion-mode=yes
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
# We have two groups of disabled messages:
#
# 1) Messages that are of no use to us
# This is either because we don't follow the convention
# (missing-docstring and protected-access come to mind), or because
# it's not very useful in CI (too-many-arguments, for example)
#
# 2) Messages that we would like to enable at some point
# We introduced linting quite late into the project, so there are
# some issues that just grew out of control. Resolving these would
# be nice, but too much work atm.
#
disable=,
#####################################
# Messages that are of no use to us #
#####################################
consider-using-f-string,
fixme,
missing-docstring,
no-else-return,
protected-access,
too-few-public-methods,
too-many-arguments,
too-many-boolean-expressions,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-nested-blocks,
too-many-public-methods,
too-many-statements,
too-many-return-statements,
too-many-ancestors,
# Chained comparisons let us write confusing statements
# such as "version >= 1 <= CURRENT_VERSION" and pylint
# complains when we use more clear and easier to understand
# statements like "version >= 1 and version <= CURRENT_VERSION"
#
# Disable this nonsense.
chained-comparison,
#######################################################
# Messages that we would like to enable at some point #
#######################################################
# Overriden methods don't actually override but redefine
arguments-differ,
duplicate-code,
# Some invalid names are alright, we should configure pylint
# to accept them, and curb the others
invalid-name,
unused-argument,
# We can probably enable this soon, it is a bit experimental
# for the moment and current releases of pylint (August 2021) raise
# a lot of false positives.
unused-private-member,
###########################################################
# Messages that report warnings which should be addressed #
###########################################################
logging-format-interpolation,
cyclic-import,
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time (only on the command line, not in the configuration file where
# it should appear only once). See also the "--disable" option for examples.
enable=c-extension-no-member
[REPORTS]
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
# Set the output format. Available formats are text, parseable, colorized, json
# and msvs (visual studio).You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=colorized
# Tells whether to display a full report or only the messages
reports=no
# Activate the evaluation score.
score=yes
[REFACTORING]
# Maximum number of nested blocks for function / method body
max-nested-blocks=5
# Complete name of functions that never returns. When checking for
# inconsistent-return-statements if a never returning function is called then
# it will be considered as an explicit return statement and no message will be
# printed.
never-returning-functions=optparse.Values,sys.exit
[TYPECHECK]
# List of decorators that produce context managers, such as
# contextlib.contextmanager. Add to this list to register other decorators that
# produce valid context managers.
contextmanager-decorators=contextlib.contextmanager
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=__enter__
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# This flag controls whether pylint should warn about no-member and similar
# checks whenever an opaque object is returned when inferring. The inference
# can return multiple potential results while evaluating a Python object, but
# some branches might not be evaluated, which results in partial inference. In
# that case, it might be useful to still emit no-member and other checks for
# the rest of the inferred objects.
ignore-on-opaque-inference=yes
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,contextlib.closing,gi.repository.GLib.GError,pathlib.PurePath
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=pkg_resources,gi.repository,grpc,buildstream._protos.*
# Show a hint with possible names when a member name was not found. The aspect
# of finding the hint is based on edit distance.
missing-member-hint=yes
# The minimum edit distance a name should have in order to be considered a
# similar match for a missing member name.
missing-member-hint-distance=1
# The total number of similar names that should be taken in consideration when
# showing a hint for a missing member.
missing-member-max-choices=1
[BASIC]
# Naming style matching correct argument names
argument-naming-style=snake_case
# Regular expression matching correct argument names. Overrides argument-
# naming-style
#argument-rgx=
# Naming style matching correct attribute names
attr-naming-style=snake_case
# Regular expression matching correct attribute names. Overrides attr-naming-
# style
#attr-rgx=
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,
bar,
baz,
toto,
tutu,
tata
# Naming style matching correct class attribute names
class-attribute-naming-style=any
# Regular expression matching correct class attribute names. Overrides class-
# attribute-naming-style
#class-attribute-rgx=
# Naming style matching correct class names
class-naming-style=PascalCase
# Regular expression matching correct class names. Overrides class-naming-style
#class-rgx=
# Naming style matching correct constant names
const-naming-style=UPPER_CASE
# Regular expression matching correct constant names. Overrides const-naming-
# style
#const-rgx=
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
# Naming style matching correct function names
function-naming-style=snake_case
# Regular expression matching correct function names. Overrides function-
# naming-style
#function-rgx=
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_,e,f
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# Naming style matching correct inline iteration names
inlinevar-naming-style=any
# Regular expression matching correct inline iteration names. Overrides
# inlinevar-naming-style
#inlinevar-rgx=
# Naming style matching correct method names
method-naming-style=snake_case
# Regular expression matching correct method names. Overrides method-naming-
# style
#method-rgx=
# Naming style matching correct module names
module-naming-style=snake_case
# Regular expression matching correct module names. Overrides module-naming-
# style
#module-rgx=
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# List of decorators that produce properties, such as abc.abstractproperty. Add
# to this list to register other decorators that produce valid properties.
property-classes=abc.abstractproperty
# Naming style matching correct variable names
variable-naming-style=snake_case
# Regular expression matching correct variable names. Overrides variable-
# naming-style
#variable-rgx=
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# Tells whether unused global variables should be treated as a violation.
allow-global-unused-variables=yes
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,
_cb
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*|^ignored_|^unused_
# Tells whether we should check for unused import in __init__ files.
init-import=no
# List of qualified module names which can have objects that can redefine
# builtins.
redefining-builtins-modules=six.moves,past.builtins,future.builtins
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[SPELLING]
# Limits count of emitted suggestions for spelling mistakes
max-spelling-suggestions=4
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,
XXX,
TODO
[SIMILARITIES]
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
# Minimum lines number of a similarity.
min-similarity-lines=4
[FORMAT]
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )??$
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Maximum number of characters on a single line.
max-line-length=119
# Maximum number of lines in a module
max-module-lines=1000
# Allow the body of a class to be on the same line as the declaration if body
# contains single statement.
single-line-class-stmt=no
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
[IMPORTS]
# Allow wildcard imports from modules that define __all__.
allow-wildcard-with-all=no
# Analyse import fallback blocks. This can be used to support both Python 2 and
# 3 compatible code, which means that the block might have code that exists
# only in one or another interpreter, leading to false positives when analysed.
analyse-fallback-blocks=no
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=optparse,tkinter.tix
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
# Force import order to recognize a module as part of the standard
# compatibility libraries.
known-standard-library=
# Force import order to recognize a module as part of a third party library.
known-third-party=enchant
[DESIGN]
# Maximum number of arguments for function / method
max-args=5
# Maximum number of attributes for a class (see R0902).
max-attributes=7
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
# Maximum number of branch for function / method body
max-branches=12
# Maximum number of locals for function / method body
max-locals=15
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of public methods for a class (see R0904).
max-public-methods=20
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of statements in function / method body
max-statements=50
# Minimum number of public methods for a class (see R0903).
min-public-methods=2
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,
__new__,
setUp
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,
_fields,
_replace,
_source,
_make
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception
buildstream-1.6.9/BuildStream.doap 0000664 0000000 0000000 00000002571 14375152700 0017133 0 ustar 00root root 0000000 0000000
BuildStream
Build tool for running abstract, deterministic build pipelines
python3
Tristan Van Berkom
tvb
Jürg Billeter
juergbi
buildstream-1.6.9/COPYING 0000664 0000000 0000000 00000063642 14375152700 0015114 0 ustar 00root root 0000000 0000000 GNU LESSER GENERAL PUBLIC LICENSE
Version 2.1, February 1999
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
[This is the first released version of the Lesser GPL. It also counts
as the successor of the GNU Library Public License, version 2, hence
the version number 2.1.]
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
Licenses are intended to guarantee your freedom to share and change
free software--to make sure the software is free for all its users.
This license, the Lesser General Public License, applies to some
specially designated software packages--typically libraries--of the
Free Software Foundation and other authors who decide to use it. You
can use it too, but we suggest you first think carefully about whether
this license or the ordinary General Public License is the better
strategy to use in any particular case, based on the explanations below.
When we speak of free software, we are referring to freedom of use,
not price. Our General Public Licenses are designed to make sure that
you have the freedom to distribute copies of free software (and charge
for this service if you wish); that you receive source code or can get
it if you want it; that you can change the software and use pieces of
it in new free programs; and that you are informed that you can do
these things.
To protect your rights, we need to make restrictions that forbid
distributors to deny you these rights or to ask you to surrender these
rights. These restrictions translate to certain responsibilities for
you if you distribute copies of the library or if you modify it.
For example, if you distribute copies of the library, whether gratis
or for a fee, you must give the recipients all the rights that we gave
you. You must make sure that they, too, receive or can get the source
code. If you link other code with the library, you must provide
complete object files to the recipients, so that they can relink them
with the library after making changes to the library and recompiling
it. And you must show them these terms so they know their rights.
We protect your rights with a two-step method: (1) we copyright the
library, and (2) we offer you this license, which gives you legal
permission to copy, distribute and/or modify the library.
To protect each distributor, we want to make it very clear that
there is no warranty for the free library. Also, if the library is
modified by someone else and passed on, the recipients should know
that what they have is not the original version, so that the original
author's reputation will not be affected by problems that might be
introduced by others.
Finally, software patents pose a constant threat to the existence of
any free program. We wish to make sure that a company cannot
effectively restrict the users of a free program by obtaining a
restrictive license from a patent holder. Therefore, we insist that
any patent license obtained for a version of the library must be
consistent with the full freedom of use specified in this license.
Most GNU software, including some libraries, is covered by the
ordinary GNU General Public License. This license, the GNU Lesser
General Public License, applies to certain designated libraries, and
is quite different from the ordinary General Public License. We use
this license for certain libraries in order to permit linking those
libraries into non-free programs.
When a program is linked with a library, whether statically or using
a shared library, the combination of the two is legally speaking a
combined work, a derivative of the original library. The ordinary
General Public License therefore permits such linking only if the
entire combination fits its criteria of freedom. The Lesser General
Public License permits more lax criteria for linking other code with
the library.
We call this license the "Lesser" General Public License because it
does Less to protect the user's freedom than the ordinary General
Public License. It also provides other free software developers Less
of an advantage over competing non-free programs. These disadvantages
are the reason we use the ordinary General Public License for many
libraries. However, the Lesser license provides advantages in certain
special circumstances.
For example, on rare occasions, there may be a special need to
encourage the widest possible use of a certain library, so that it becomes
a de-facto standard. To achieve this, non-free programs must be
allowed to use the library. A more frequent case is that a free
library does the same job as widely used non-free libraries. In this
case, there is little to gain by limiting the free library to free
software only, so we use the Lesser General Public License.
In other cases, permission to use a particular library in non-free
programs enables a greater number of people to use a large body of
free software. For example, permission to use the GNU C Library in
non-free programs enables many more people to use the whole GNU
operating system, as well as its variant, the GNU/Linux operating
system.
Although the Lesser General Public License is Less protective of the
users' freedom, it does ensure that the user of a program that is
linked with the Library has the freedom and the wherewithal to run
that program using a modified version of the Library.
The precise terms and conditions for copying, distribution and
modification follow. Pay close attention to the difference between a
"work based on the library" and a "work that uses the library". The
former contains code derived from the library, whereas the latter must
be combined with the library in order to run.
GNU LESSER GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License Agreement applies to any software library or other
program which contains a notice placed by the copyright holder or
other authorized party saying it may be distributed under the terms of
this Lesser General Public License (also called "this License").
Each licensee is addressed as "you".
A "library" means a collection of software functions and/or data
prepared so as to be conveniently linked with application programs
(which use some of those functions and data) to form executables.
The "Library", below, refers to any such software library or work
which has been distributed under these terms. A "work based on the
Library" means either the Library or any derivative work under
copyright law: that is to say, a work containing the Library or a
portion of it, either verbatim or with modifications and/or translated
straightforwardly into another language. (Hereinafter, translation is
included without limitation in the term "modification".)
"Source code" for a work means the preferred form of the work for
making modifications to it. For a library, complete source code means
all the source code for all modules it contains, plus any associated
interface definition files, plus the scripts used to control compilation
and installation of the library.
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running a program using the Library is not restricted, and output from
such a program is covered only if its contents constitute a work based
on the Library (independent of the use of the Library in a tool for
writing it). Whether that is true depends on what the Library does
and what the program that uses the Library does.
1. You may copy and distribute verbatim copies of the Library's
complete source code as you receive it, in any medium, provided that
you conspicuously and appropriately publish on each copy an
appropriate copyright notice and disclaimer of warranty; keep intact
all the notices that refer to this License and to the absence of any
warranty; and distribute a copy of this License along with the
Library.
You may charge a fee for the physical act of transferring a copy,
and you may at your option offer warranty protection in exchange for a
fee.
2. You may modify your copy or copies of the Library or any portion
of it, thus forming a work based on the Library, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) The modified work must itself be a software library.
b) You must cause the files modified to carry prominent notices
stating that you changed the files and the date of any change.
c) You must cause the whole of the work to be licensed at no
charge to all third parties under the terms of this License.
d) If a facility in the modified Library refers to a function or a
table of data to be supplied by an application program that uses
the facility, other than as an argument passed when the facility
is invoked, then you must make a good faith effort to ensure that,
in the event an application does not supply such function or
table, the facility still operates, and performs whatever part of
its purpose remains meaningful.
(For example, a function in a library to compute square roots has
a purpose that is entirely well-defined independent of the
application. Therefore, Subsection 2d requires that any
application-supplied function or table used by this function must
be optional: if the application does not supply it, the square
root function must still compute square roots.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Library,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Library, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote
it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Library.
In addition, mere aggregation of another work not based on the Library
with the Library (or with a work based on the Library) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may opt to apply the terms of the ordinary GNU General Public
License instead of this License to a given copy of the Library. To do
this, you must alter all the notices that refer to this License, so
that they refer to the ordinary GNU General Public License, version 2,
instead of to this License. (If a newer version than version 2 of the
ordinary GNU General Public License has appeared, then you can specify
that version instead if you wish.) Do not make any other change in
these notices.
Once this change is made in a given copy, it is irreversible for
that copy, so the ordinary GNU General Public License applies to all
subsequent copies and derivative works made from that copy.
This option is useful when you wish to copy part of the code of
the Library into a program that is not a library.
4. You may copy and distribute the Library (or a portion or
derivative of it, under Section 2) in object code or executable form
under the terms of Sections 1 and 2 above provided that you accompany
it with the complete corresponding machine-readable source code, which
must be distributed under the terms of Sections 1 and 2 above on a
medium customarily used for software interchange.
If distribution of object code is made by offering access to copy
from a designated place, then offering equivalent access to copy the
source code from the same place satisfies the requirement to
distribute the source code, even though third parties are not
compelled to copy the source along with the object code.
5. A program that contains no derivative of any portion of the
Library, but is designed to work with the Library by being compiled or
linked with it, is called a "work that uses the Library". Such a
work, in isolation, is not a derivative work of the Library, and
therefore falls outside the scope of this License.
However, linking a "work that uses the Library" with the Library
creates an executable that is a derivative of the Library (because it
contains portions of the Library), rather than a "work that uses the
library". The executable is therefore covered by this License.
Section 6 states terms for distribution of such executables.
When a "work that uses the Library" uses material from a header file
that is part of the Library, the object code for the work may be a
derivative work of the Library even though the source code is not.
Whether this is true is especially significant if the work can be
linked without the Library, or if the work is itself a library. The
threshold for this to be true is not precisely defined by law.
If such an object file uses only numerical parameters, data
structure layouts and accessors, and small macros and small inline
functions (ten lines or less in length), then the use of the object
file is unrestricted, regardless of whether it is legally a derivative
work. (Executables containing this object code plus portions of the
Library will still fall under Section 6.)
Otherwise, if the work is a derivative of the Library, you may
distribute the object code for the work under the terms of Section 6.
Any executables containing that work also fall under Section 6,
whether or not they are linked directly with the Library itself.
6. As an exception to the Sections above, you may also combine or
link a "work that uses the Library" with the Library to produce a
work containing portions of the Library, and distribute that work
under terms of your choice, provided that the terms permit
modification of the work for the customer's own use and reverse
engineering for debugging such modifications.
You must give prominent notice with each copy of the work that the
Library is used in it and that the Library and its use are covered by
this License. You must supply a copy of this License. If the work
during execution displays copyright notices, you must include the
copyright notice for the Library among them, as well as a reference
directing the user to the copy of this License. Also, you must do one
of these things:
a) Accompany the work with the complete corresponding
machine-readable source code for the Library including whatever
changes were used in the work (which must be distributed under
Sections 1 and 2 above); and, if the work is an executable linked
with the Library, with the complete machine-readable "work that
uses the Library", as object code and/or source code, so that the
user can modify the Library and then relink to produce a modified
executable containing the modified Library. (It is understood
that the user who changes the contents of definitions files in the
Library will not necessarily be able to recompile the application
to use the modified definitions.)
b) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (1) uses at run time a
copy of the library already present on the user's computer system,
rather than copying library functions into the executable, and (2)
will operate properly with a modified version of the library, if
the user installs one, as long as the modified version is
interface-compatible with the version that the work was made with.
c) Accompany the work with a written offer, valid for at
least three years, to give the same user the materials
specified in Subsection 6a, above, for a charge no more
than the cost of performing this distribution.
d) If distribution of the work is made by offering access to copy
from a designated place, offer equivalent access to copy the above
specified materials from the same place.
e) Verify that the user has already received a copy of these
materials or that you have already sent this user a copy.
For an executable, the required form of the "work that uses the
Library" must include any data and utility programs needed for
reproducing the executable from it. However, as a special exception,
the materials to be distributed need not include anything that is
normally distributed (in either source or binary form) with the major
components (compiler, kernel, and so on) of the operating system on
which the executable runs, unless that component itself accompanies
the executable.
It may happen that this requirement contradicts the license
restrictions of other proprietary libraries that do not normally
accompany the operating system. Such a contradiction means you cannot
use both them and the Library together in an executable that you
distribute.
7. You may place library facilities that are a work based on the
Library side-by-side in a single library together with other library
facilities not covered by this License, and distribute such a combined
library, provided that the separate distribution of the work based on
the Library and of the other library facilities is otherwise
permitted, and provided that you do these two things:
a) Accompany the combined library with a copy of the same work
based on the Library, uncombined with any other library
facilities. This must be distributed under the terms of the
Sections above.
b) Give prominent notice with the combined library of the fact
that part of it is a work based on the Library, and explaining
where to find the accompanying uncombined form of the same work.
8. You may not copy, modify, sublicense, link with, or distribute
the Library except as expressly provided under this License. Any
attempt otherwise to copy, modify, sublicense, link with, or
distribute the Library is void, and will automatically terminate your
rights under this License. However, parties who have received copies,
or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
9. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Library or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Library (or any work based on the
Library), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Library or works based on it.
10. Each time you redistribute the Library (or any work based on the
Library), the recipient automatically receives a license from the
original licensor to copy, distribute, link with or modify the Library
subject to these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties with
this License.
11. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Library at all. For example, if a patent
license would not permit royalty-free redistribution of the Library by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Library.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply,
and the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
12. If the distribution and/or use of the Library is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Library under this License may add
an explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
13. The Free Software Foundation may publish revised and/or new
versions of the Lesser General Public License from time to time.
Such new versions will be similar in spirit to the present version,
but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Library
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by
the Free Software Foundation. If the Library does not specify a
license version number, you may choose any version ever published by
the Free Software Foundation.
14. If you wish to incorporate parts of the Library into other free
programs whose distribution conditions are incompatible with these,
write to the author to ask for permission. For software which is
copyrighted by the Free Software Foundation, write to the Free
Software Foundation; we sometimes make exceptions for this. Our
decision will be guided by the two goals of preserving the free status
of all derivatives of our free software and of promoting the sharing
and reuse of software generally.
NO WARRANTY
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Libraries
If you develop a new library, and you want it to be of the greatest
possible use to the public, we recommend making it free software that
everyone can redistribute and change. You can do so by permitting
redistribution under these terms (or, alternatively, under the terms of the
ordinary General Public License).
To apply these terms, attach the following notices to the library. It is
safest to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least the
"copyright" line and a pointer to where the full notice is found.
Copyright (C)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the library, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
, 1 April 1990
Ty Coon, President of Vice
That's all there is to it!
buildstream-1.6.9/HACKING.rst 0000664 0000000 0000000 00000051515 14375152700 0015653 0 ustar 00root root 0000000 0000000 Contributing
============
Some tips and guidelines for developers hacking on BuildStream
Feature additions
-----------------
Major feature additions should be proposed on the
`mailing list `_
before being considered for inclusion, we strongly recommend proposing
in advance of commencing work.
New features must be well documented and tested either in our main
test suite if possible, or otherwise in the integration tests.
It is expected that the individual submitting the work take ownership
of their feature within BuildStream for a reasonable timeframe of at least
one release cycle after their work has landed on the master branch. This is
to say that the submitter is expected to address and fix any side effects and
bugs which may have fell through the cracks in the review process, giving us
a reasonable timeframe for identifying these.
Patch submissions
-----------------
Branches must be submitted as merge requests in gitlab and should usually
be associated to an issue report on gitlab.
Commits in the branch which address specific issues must specify the
issue number in the commit message.
Merge requests that are not yet ready for review must be prefixed with the
``WIP:`` identifier. A merge request is not ready for review until the
submitter expects that the patch is ready to actually land.
Submitted branches must not contain a history of the work done in the
feature branch. Please use git's interactive rebase feature in order to
compose a clean patch series suitable for submission.
We prefer that test case and documentation changes be submitted
in separate commits from the code changes which they test.
Ideally every commit in the history of master passes its test cases. This
makes bisections more easy to perform, but is not always practical with
more complex branches.
Commit messages
~~~~~~~~~~~~~~~
Commit messages must be formatted with a brief summary line, optionally
followed by an empty line and then a free form detailed description of
the change.
The summary line must start with what changed, followed by a colon and
a very brief description of the change.
If there is an associated issue, it **must** be mentioned somewhere
in the commit message.
**Example**::
element.py: Added the frobnicator so that foos are properly frobbed.
The new frobnicator frobnicates foos all the way throughout
the element. Elements that are not properly frobnicated raise
an error to inform the user of invalid frobnication rules.
This fixes issue #123
Coding style
------------
Coding style details for BuildStream
Style guide
~~~~~~~~~~~
Python coding style for BuildStream is pep8, which is documented here: https://www.python.org/dev/peps/pep-0008/
We have a couple of minor exceptions to this standard, we dont want to compromise
code readability by being overly restrictive on line length for instance.
The pep8 linter will run automatically when running the test suite.
Imports
~~~~~~~
Module imports inside BuildStream are done with relative ``.`` notation
Good::
from .context import Context
Bad::
from buildstream.context import Context
The exception to the above rule is when authoring plugins,
plugins do not reside in the same namespace so they must
address buildstream in the imports.
An element plugin will derive from Element by importing::
from buildstream import Element
When importing utilities specifically, dont import function names
from there, instead import the module itself::
from . import utils
This makes things clear when reading code that said functions
are not defined in the same file but come from utils.py for example.
Policy for private symbols
~~~~~~~~~~~~~~~~~~~~~~~~~~
Private symbols are expressed via a leading ``_`` single underscore, or
in some special circumstances with a leading ``__`` double underscore.
Before understanding the naming policy, it is first important to understand
that in BuildStream, there are two levels of privateness which need to be
considered.
These are treated subtly differently and thus need to be understood:
* API Private
A symbol is considered to be *API private* if it is not exposed in the *public API*.
Even if a symbol does not have any leading underscore, it may still be *API private*
if the containing *class* or *module* is named with a leading underscore.
* Local private
A symbol is considered to be *local private* if it is not intended for access
outside of the defining *scope*.
If a symbol has a leading underscore, it might not be *local private* if it is
declared on a publicly visible class, but needs to be accessed internally by
other modules in the BuildStream core.
Ordering
''''''''
For better readability and consistency, we try to keep private symbols below
public symbols. In the case of public modules where we may have a mix of
*API private* and *local private* symbols, *API private* symbols should come
before *local private* symbols.
Symbol naming
'''''''''''''
Any private symbol must start with a single leading underscore for two reasons:
* So that it does not bleed into documentation and *public API*.
* So that it is clear to developers which symbols are not used outside of the declaring *scope*
Remember that with python, the modules (python files) are also symbols
within their containing *package*, as such; modules which are entirely
private to BuildStream are named as such, e.g. ``_thismodule.py``.
Cases for double underscores
''''''''''''''''''''''''''''
The double underscore in python has a special function. When declaring
a symbol in class scope which has a leading underscore, it can only be
accessed within the class scope using the same name. Outside of class
scope, it can only be accessed with a *cheat*.
We use the double underscore in cases where the type of privateness can be
ambiguous.
* For private modules and classes
We never need to disambiguate with a double underscore
* For private symbols declared in a public *scope*
In the case that we declare a private method on a public object, it
becomes ambiguous whether:
* The symbol is *local private*, and only used within the given scope
* The symbol is *API private*, and will be used internally by BuildStream
from other parts of the codebase.
In this case, we use a single underscore for *API private* methods which
are not *local private*, and we use a double underscore for *local private*
methods declared in public scope.
Documenting private symbols
'''''''''''''''''''''''''''
Any symbol which is *API Private* (regardless of whether it is also
*local private*), should have some documentation for developers to
better understand the codebase.
Contrary to many other python projects, we do not use docstrings to
document private symbols, but prefer to keep *API Private* symbols
documented in code comments placed *above* the symbol (or *beside* the
symbol in some cases, such as variable declarations in a class where
a shorter comment is more desirable), rather than docstrings placed *below*
the symbols being documented.
Other than this detail, follow the same guidelines for documenting
symbols as described below.
Documenting BuildStream
-----------------------
BuildStream starts out as a documented project from day one and uses
sphinx to document itself.
Documentation formatting policy
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The BuildStream documentation style is as follows:
* Titles and headings require two leading empty lines above them. Only the first word should be capitalized.
* If there is an ``.. _internal_link`` anchor, there should be two empty lines above the anchor, followed by one leading empty line.
* Within a section, paragraphs should be separated by one empty line.
* Notes are defined using: ``.. note::`` blocks, followed by an empty line and then indented (3 spaces) text.
* Code blocks are defined using: ``.. code:: LANGUAGE`` blocks, followed by an empty line and then indented (3 spaces) text. Note that the default language is `python`.
* Cross references should be of the form ``:role:`target```.
* To cross reference arbitrary locations with, for example, the anchor ``_anchor_name``, you must give the link an explicit title: ``:ref:`Link text ```. Note that the "_" prefix is not required.
Useful links:
For further information, please see the `Sphinx Documentation `_.
Building Docs
~~~~~~~~~~~~~
The documentation build is not integrated into the ``setup.py`` and is
difficult (or impossible) to do so, so there is a little bit of setup
you need to take care of first.
Before you can build the BuildStream documentation yourself, you need
to first install ``sphinx`` along with some additional plugins and dependencies,
using pip or some other mechanism::
# Install sphinx
pip3 install --user sphinx
# Install some sphinx extensions
pip3 install --user sphinx-click
pip3 install --user sphinx_rtd_theme
# Additional optional dependencies required
pip3 install --user arpy
To build the documentation, just run the following::
make -C doc
This will give you a ``doc/build/html`` directory with the html docs which
you can view in your browser locally to test.
Regenerating session html
'''''''''''''''''''''''''
The documentation build will build the session files if they are missing,
or if explicitly asked to rebuild. We revision the generated session html files
in order to reduce the burden on documentation contributors.
To explicitly rebuild the session snapshot html files, it is recommended that you
first set the ``BST_SOURCE_CACHE`` environment variable to your source cache, this
will make the docs build reuse already downloaded sources::
export BST_SOURCE_CACHE=~/.cache/buildstream/sources
To force rebuild session html while building the doc, simply build the docs like this::
make BST_FORCE_SESSION_REBUILD=1 -C doc
Man pages
~~~~~~~~~
Unfortunately it is quite difficult to integrate the man pages build
into the ``setup.py``, as such, whenever the frontend command line
interface changes, the static man pages should be regenerated and
committed with that.
To do this, first ensure you have ``click_man`` installed, possibly
with::
pip install --user click_man
Then, in the toplevel directory of buildstream, run the following::
python3 setup.py --command-packages=click_man.commands man_pages
And commit the result, ensuring that you have added anything in
the ``man/`` subdirectory, which will be automatically included
in the buildstream distribution.
Documenting conventions
~~~~~~~~~~~~~~~~~~~~~~~
We use the sphinx.ext.napoleon extension for the purpose of having
a bit nicer docstrings than the default sphinx docstrings.
A docstring for a method, class or function should have the following
format::
"""Brief description of entity
Args:
argument1 (type): Description of arg
argument2 (type): Description of arg
Returns:
(type): Description of returned thing of the specified type
Raises:
(SomeError): When some error occurs
(SomeOtherError): When some other error occurs
A detailed description can go here if one is needed, only
after the above part documents the calling conventions.
"""
Documentation Examples
~~~~~~~~~~~~~~~~~~~~~~
The examples section of the documentation contains a series of standalone
examples, here are the criteria for an example addition.
* The example has a ``${name}``
* The example has a project users can copy and use
* This project is added in the directory ``doc/examples/${name}``
* The example has a documentation component
* This is added at ``doc/source/examples/${name}.rst``
* A reference to ``examples/${name}`` is added to the toctree in ``doc/source/examples.rst``
* This documentation discusses the project elements declared in the project and may
provide some BuildStream command examples
* This documentation links out to the reference manual at every opportunity
* The example has a CI test component
* This is an integration test added at ``tests/examples/${name}``
* This test runs BuildStream in the ways described in the example
and assert that we get the results which we advertize to users in
the said examples.
Adding BuildStream command output
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As a part of building the docs, BuildStream will run itself and extract
some html for the colorized output which is produced.
If you want to run BuildStream to produce some nice html for your
documentation, then you can do so by adding new ``.run`` files to the
``doc/sessions/`` directory.
Any files added as ``doc/sessions/${example}.run`` will result in generated
file at ``doc/source/sessions/${example}.html``, and these files can be
included in the reStructuredText documentation at any time with::
.. raw:: html
:file: sessions/${example}.html
The ``.run`` file format is just another YAML dictionary which consists of a
``commands`` list, instructing the program what to do command by command.
Each *command* is a dictionary, the members of which are listed here:
* ``directory``: The input file relative project directory
* ``output``: The input file relative output html file to generate (optional)
* ``fake-output``: Don't really run the command, just pretend to and pretend
this was the output, an empty string will enable this too.
* ``command``: The command to run, without the leading ``bst``
When adding a new ``.run`` file, one should normally also commit the new
resulting generated ``.html`` file(s) into the ``doc/source/sessions-stored/``
directory at the same time, this ensures that other developers do not need to
regenerate them locally in order to build the docs.
**Example**:
.. code:: yaml
commands:
# Make it fetch first
- directory: ../examples/foo
command: fetch hello.bst
# Capture a build output
- directory: ../examples/foo
output: ../source/sessions/foo-build.html
command: build hello.bst
Protocol Buffers
----------------
BuildStream uses protobuf and gRPC for serialization and communication with
artifact cache servers. This requires ``.proto`` files and Python code
generated from the ``.proto`` files using protoc. All these files live in the
``buildstream/_protos`` directory. The generated files are included in the
git repository to avoid depending on grpcio-tools for user installations.
Regenerating code
~~~~~~~~~~~~~~~~~
When ``.proto`` files are modified, the corresponding Python code needs to
be regenerated. As a prerequisite for code generation you need to install
``grpcio-tools`` using pip or some other mechanism::
pip3 install --user grpcio-tools
To actually regenerate the code::
./setup.py build_grpc
Testing BuildStream
-------------------
BuildStream uses pytest for regression tests and testing out
the behavior of newly added components.
The elaborate documentation for pytest can be found here: http://doc.pytest.org/en/latest/contents.html
Don't get lost in the docs if you don't need to, follow existing examples instead.
Running tests
~~~~~~~~~~~~~
To run the tests, just type::
./setup.py test
At the toplevel.
When debugging a test, it can be desirable to see the stdout
and stderr generated by a test, to do this use the --addopts
function to feed arguments to pytest as such::
./setup.py test --addopts -s
You can always abort on the first failure by running::
./setup.py test --addopts -x
If you want to run a specific test or a group of tests, you
can specify a prefix to match. E.g. if you want to run all of
the frontend tests you can do::
./setup.py test --addopts '-k tests/frontend/'
We also have a set of slow integration tests that are disabled by
default - you will notice most of them marked with SKIP in the pytest
output. To run them, you can use::
./setup.py test --addopts '--integration'
By default, buildstream also runs pylint on all files. Should you want
to run just pylint (these checks are a lot faster), you can do so
with::
./setup.py test --addopts '-m pylint'
Alternatively, any IDE plugin that uses pytest should automatically
detect the ``.pylintrc`` in the project's root directory.
Adding tests
~~~~~~~~~~~~
Tests are found in the tests subdirectory, inside of which
there is a separarate directory for each *domain* of tests.
All tests are collected as::
tests/*/*.py
If the new test is not appropriate for the existing test domains,
then simply create a new directory for it under the tests subdirectory.
Various tests may include data files to test on, there are examples
of this in the existing tests. When adding data for a test, create
a subdirectory beside your test in which to store data.
When creating a test that needs data, use the datafiles extension
to decorate your test case (again, examples exist in the existing
tests for this), documentation on the datafiles extension can
be found here: https://pypi.python.org/pypi/pytest-datafiles
Tests that run a sandbox should be decorated with::
@pytest.mark.integration
and use the integration cli helper.
Measuring BuildStream performance
---------------------------------
Benchmarking framework
~~~~~~~~~~~~~~~~~~~~~~~
BuildStream has a utility to measure performance which is available from a
separate repository at https://gitlab.com/BuildStream/benchmarks. This tool
allows you to run a fixed set of workloads with multiple versions of
BuildStream. From this you can see whether one version performs better or
worse than another which is useful when looking for regressions and when
testing potential optimizations.
For full documentation on how to use the benchmarking tool see the README in
the 'benchmarks' repository.
Profiling tools
~~~~~~~~~~~~~~~
When looking for ways to speed up the code you should make use of a profiling
tool.
Python provides `cProfile `_
which gives you a list of all functions called during execution and how much
time was spent in each function. Here is an example of running `bst --help`
under cProfile:
python3 -m cProfile -o bst.cprofile -- $(which bst) --help
You can then analyze the results interactively using the 'pstats' module:
python3 -m pstats ./bst.cprofile
For more detailed documentation of cProfile and 'pstats', see:
https://docs.python.org/3/library/profile.html.
For a richer visualisation of the callstack you can try `Pyflame
`_. Once you have followed the instructions in
Pyflame's README to install the tool, you can profile `bst` commands as in the
following example:
pyflame --output bst.flame --trace bst --help
You may see an `Unexpected ptrace(2) exception:` error. Note that the `bst`
operation will continue running in the background in this case, you will need
to wait for it to complete or kill it. Once this is done, rerun the above
command which appears to fix the issue.
Once you have output from pyflame, you can use the ``flamegraph.pl`` script
from the `Flamegraph project `_
to generate an .svg image:
./flamegraph.pl bst.flame > bst-flamegraph.svg
The generated SVG file can then be viewed in your preferred web browser.
Profiling specific parts of BuildStream with BST_PROFILE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
BuildStream can also turn on cProfile for specific parts of execution
using BST_PROFILE.
BST_PROFILE can be set to a section name, or 'all' for all
sections. There is a list of topics in `buildstream/_profile.py`. For
example, running::
BST_PROFILE=load-pipeline bst build bootstrap-system-x86.bst
will produce a profile in the current directory for the time take to
call most of `initialized`, for each element. These profile files
are in the same cProfile format as those mentioned in the previous
section, and can be analysed with `pstats` or `pyflame`.
Profiling the artifact cache receiver
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Since the artifact cache receiver is not normally run directly, it's
necessary to alter the ForceCommand part of sshd_config to enable
profiling. See the main documentation in `doc/source/artifacts.rst`
for general information on setting up the artifact cache. It's also
useful to change directory to a logging directory before starting
`bst-artifact-receive` with profiling on.
This is an example of a ForceCommand section of sshd_config used to
obtain profiles::
Match user artifacts
ForceCommand BST_PROFILE=artifact-receive cd /tmp && bst-artifact-receive --pull-url https://example.com/ /home/artifacts/artifacts
The MANIFEST.in and setup.py
----------------------------
When adding a dependency to BuildStream, it's important to update the setup.py accordingly.
When adding data files which need to be discovered at runtime by BuildStream, update setup.py accordingly.
When adding data files for the purpose of docs or tests, or anything that is not covered by
setup.py, update the MANIFEST.in accordingly.
At any time, running the following command to create a source distribution should result in
creating a tarball which contains everything we want it to include::
./setup.py sdist
buildstream-1.6.9/MAINTAINERS 0000664 0000000 0000000 00000000220 14375152700 0015535 0 ustar 00root root 0000000 0000000 Tristan Van Berkom
E-mail: tristan.vanberkom@codethink.co.uk
Userid: tvb
Jürg Billeter
E-mail: juerg.billeter@codethink.co.uk
Userid: juergbi
buildstream-1.6.9/MANIFEST.in 0000664 0000000 0000000 00000001467 14375152700 0015614 0 ustar 00root root 0000000 0000000 # Basic toplevel package includes
include BuildStream.doap
include COPYING
include HACKING.rst
include MAINTAINERS
include NEWS
include README.rst
# Documentation package includes
include doc/Makefile
include doc/source/conf.py
include doc/source/index.rst
# Tests
recursive-include tests *.py
recursive-include tests *.yaml
recursive-include tests *.bst
recursive-include tests *.conf
recursive-include tests *.sh
recursive-include tests *.expected
# Protocol Buffers
recursive-include buildstream/_protos *.proto
# Requirements files
include requirements/requirements.in
include requirements/requirements.txt
include requirements/dev-requirements.in
include requirements/dev-requirements.txt
include requirements/plugin-requirements.in
include requirements/plugin-requirements.txt
# Versioneer
include versioneer.py
buildstream-1.6.9/NEWS 0000664 0000000 0000000 00000041612 14375152700 0014551 0 ustar 00root root 0000000 0000000 =================
buildstream 1.6.9
=================
o Further Python 3.11 fixes to regex flags.
=================
buildstream 1.6.8
=================
o Only test remote CAS push capabilities if push is enabled.
This improves compatibility with new grpcio releases.
o Dummy sandbox for checking out from dirrefernt arches.
Ability to check out build artifacts with incompatible arch
assuming no commands need to be run.
o Backport regex flags fix to support newer versions of Python
=================
buildstream 1.6.7
=================
o Some documentation updates
o Support newer versions of ruamel.yam (issue #1623)
=================
buildstream 1.6.6
=================
o BuildStream git tests have always assumed default git branch is master.
This is now explicit with test helpers
o project.refs of subprojects are properly taken into account
o ostree regressed as part of migrating to command-line that it left
whitespace into ref when tracking. Any whitespace around ref is now removed.
o pb2 files are regenerated with protobuf 3.20.1 to mitigate forward
compatibility issues. This has the implication that protobuf 3.19.0 or higher
is now required.
=================
buildstream 1.6.5
=================
o Make it easier to override parameters to `make` in the `make` element
o ostree: Remove `--mirror` parameter which has been causing some issues
o Fix test suite to work on some CI runners which hang while resolving localhost
when trying to open a port for the CAS server
=================
buildstream 1.6.4
=================
o BuildElement classes now support `create-dev-shm`
o script element plugin now supports `create-dev-shm`
o Python 3.6 is no longer tested in CI but support is maintained on
best effort level.
o New fatal warnings for unaliased sources
o New errors raised when using an unresolved source alias
o Add support for .netrc in remote/tar/zip sources
o Bugfixes and better stability in fuse layer
o Drop CI support for EOL python 3.6 (although BuildStream
should still work when installed in python 3.6 environments)
o Various bug fixes, documentation updates and CI related cleanup
=================
buildstream 1.6.3
=================
o Support for python 3.9 now being tested in CI
o CI overhaul to work more like it does in master
o Refresh all dependencies which are being tested in CI, addressing
a corner case crash that would occur when using bash completions
and bleeding edge versions of the click library
o Updated minimum required version of grpcio library to 1.30, as older
versions are not working properly with existing artifact cache services.
=================
buildstream 1.6.2
=================
o Fix some issues with a previous fix for #532
o Ported to github CI infrastructure
o Ensure paths specified in user configuration are absolute
o Import some symbols from collections.abc, required for python 3.10
=================
buildstream 1.6.1
=================
o Fix failure handling with CAS (#1403)
=================
buildstream 1.6.0
=================
o Fixed edge case issue when dealing with git remotes (#1372)
=================
buildstream 1.5.1
=================
o Support `buildstream1.conf` as well as `buildstream.conf` for
parallel installations.
o Lazy resolution of variables, this allows junctions to use variables
without requiring the project to have fully resolved variables, while
still reporting the right error messages if a junction uses unresolved
variables.
o Fix an issue where conditional statements were being lost instead of
processed at include time, only when the include happens in project.conf
o Backport some artifact cache related structural changes, and allow
BuildStream 1 clients to interface with BuildStream 2 remote asset
caches, while still allowing BuildStream 1 to interface with its own
bst-artifact-server implementation.
o Added sandbox configuration for `os` and `architecture` to mirror
the added options in BuildStream 2, fixing issue #523.
=================
buildstream 1.5.0
=================
o Process options in included files in the context of the project they
were included from.
This is technically a breaking change, however it is highly unlikely
that this will break projects. In some cases projects were working around
the broken behavior by ensuring matching project option names in junctioned
projects, and in other cases simply avoiding including files which have
project option conditional statements.
o Added errors when trying to load BuildStream 2 projects, recommending to
install the appropriate BuildStream version for the project.
o Added errors when loading BuildStream 2 plugins in a BuildStream 1
project, recommending to use BuildStream 1 plugins with BuildStream 1 projects.
=================
buildstream 1.4.3
=================
o Fix support for conditional list append/prepend in project.conf,
Merge request !1857
o Fix internal imports to import from "collections" instead
of "collections.abc", this improves support for Python 3.8,
see issue #831
o Fix some downloads from gitlab.com by setting custom user agent,
fixes issue #1285
o Work around python API break from ostree's repo.remote_gpg_import(),
this was changed in ostree commit v2019.2-10-gaa5df899, and we now
have a fallback to support both versions of the API, see merge request !1917.
=================
buildstream 1.4.2
=================
o Support for python 3.8
o Fix a stacktrace with a hang we can experience when we CTRL-C a job twice.
o Workaround some servers which do not honor the 'If-None-Match' HTTP header
and avoid downloading files redundantly in these cases.
o Allow specifying absolute paths in overlap-whitelist (issue #721)
o Support systems with fuse3 (avoid passing unsupported argument
to fusermount3)
=================
buildstream 1.4.1
=================
o Depend on a newer version of ruamel.yaml (>= 0.16).
=================
buildstream 1.4.0
=================
o Elements may now specify 'build-depends' and 'runtime-depends' fields
to avoid having to specify the dependency type for every entry in
'depends'.
o Elements may now specify cross-junction dependencies as simple strings
using the format '{junction-name}:{element-name}'.
o New `fatal-warnings` has been added to the project.conf format, allowing
projects to specify which warnings they want to consider as fatal.
Support for the following warnings is included:
o overlaps: When staged artifact files overlap
(deprecates: 'fail-on-overlap')
o ref-not-in-track: When the source implementation finds that
the ref is out of bounds for the tracking config
o git:inconsistent-submodule: A .gitmodules file is present but the
submodule was never added to the repo.
o git:unlisted-submodule: A submodule exists but is not specified
in the YAML declaration.
o git:invalid-submodule: A submodule is specified in the YAML
declaration but does not exist at the
given ref in the git repository.
o BuildStream now depends on python3 ujson (for some internal serializations)
o Workspaces can now be opened as relative paths.
Existing open workspaces will not be converted to relative paths,
(they need to be closed and opened again to get the new behavior).
o Dependencies can now be specified as strict to force rebuild in
non-strict mode. This is useful for statically linked dependencies
(#254).
o Git source plugins can optionally track human readable refs using
the output of `git describe`.
=================
buildstream 1.3.1
=================
o The `max-jobs` variable is now controllable in user configuration
and on the command line.
o Source plugins may now request access access to previous during track and
fetch by setting `BST_REQUIRES_PREVIOUS_SOURCES_TRACK` and/or
`BST_REQUIRES_PREVIOUS_SOURCES_FETCH` attributes.
o Add new `pip` source plugin for downloading python packages using pip,
based on requirements files from previous sources.
=================
buildstream 1.2.8
=================
o Fixed issues with workspaced junctions which need fetches (#1030)
o Bail out with informative error if stdout/stderr are O_NONBLOCK (#929)
=================
buildstream 1.2.7
=================
o Improved messaging around unknown artifact cache keys (#981)
o Fixed crash which occurs when deleting artifact cache with
open workspaces (#1017)
o Fixed `bst --no-strict build --track-all ...` which sometimes
exited successfully without building anything (#1014)
o Fixed incorrect error message with malformed YAML in project.conf (#1019)
=================
buildstream 1.2.6
=================
o Fix 'quit' option when interrupting a build (#525)
o Only queue one cache size calculation job at a time
o Fix stack traces on forceful termination
o Fix scheduler processing order regression (#712)
o Fix race condition in bzr source plugin
o Better error messages for insufficient disk space
o UI/Logging improvements regarding cache quota usage
o Fix `bst push` in non-strict mode (#990)
o Fix crash (regression) when tracking a single element (#1012)
=================
buildstream 1.2.5
=================
o Fixed failure to process some elements when workspaces are open (#919)
o Better error reporting when files are missing, or when encountering
errors in sub projects (#947)
o Do not require exact versions of dependencies for running tests (#916)
o Fail on overlap policy no longer inherited from subprojects (#926)
=================
buildstream 1.2.4
=================
o Migration of scripts to use tox
o Force updating tags when fetching from git repos (#812)
o Avoid downloading unused submodules (#804)
o Fixed cleanup of cache server with disk is full (#609)
o Fixed possible artifact cache corruption (#749)
o Fixed `bst checkout --deps none` behavior (#670)
=================
buildstream 1.2.3
=================
o Fixed an unhandled exception when cleaning up a build sandbox (#153)
o Fixed race condition when calculating cache size and commiting artifacts
o Fixed regression where terminating with `^C` results in a double user interrogation (#693)
o Fixed regression in summary when builds are terminated (#479)
o Fixed regression where irrelevant status messages appear from git sources
o Improve performance of artifact uploads by batching file transfers (#676/#677)
o Fixed performance of artifact downloads by batching file transfers (#554)
o Fixed checks for paths which escape the project directory (#673)
=================
buildstream 1.2.2
=================
* Fixed incomplete removal of blessings dependency
=================
buildstream 1.2.1
=================
o Fixed corruption of artifact cache at cache cleanup time (#623)
o Fixed accidental deletion of artifacts when tracking is enabled
o Error out when protected variables are set by project authors (#287)
o Fixed option resolution in project wide element & source configurations (#658)
o Error out gracefully when push remote is mal-specified (#625)
o Improved logging regarding skipped push / pull jobs (#515)
o Fixed crash in `bst fetch` when project.refs and source mirroring are in use (#666)
o Removed blessings dependency
o Support for batch file downloads on the artifact cache server
=================
buildstream 1.2.0
=================
o Various last minute bug fixes
o Final update to the SourceFetcher related mirroring APIs
=================
buildstream 1.1.7
=================
o Fix CAS resource_name format
Artifact servers need to be updated.
o Improved startup performance and performance of
calculating artifact cache size
o Various other bug fixes
=================
buildstream 1.1.6
=================
o A lot of bug fixes
=================
buildstream 1.1.5
=================
o Add a `--tar` option to `bst checkout` which allows a tarball to be
created from the artifact contents.
o Fetching and tracking will consult mirrors defined in project config,
and the preferred mirror to fetch from can be defined in the command
line or user config.
o Added new `remote` source plugin for downloading file blobs
o Add support for the new include '(@)' directive in project.conf and .bst files
=================
buildstream 1.1.4
=================
o `bst workspace` commands and `bst track` will substitute their
source elements when performing those operations, e.g. performing
`bst track` on a filter element will track the sources on the
element that it depends on (if it has sources).
o Added new simple `make` element
o Switch to Remote Execution CAS-based artifact cache on all platforms.
Artifact servers need to be migrated.
o BuildStream now requires python version >= 3.5
o BuildStream will now automatically clean up old artifacts when it
runs out of space. The exact behavior is configurable in the user's
buildstream.conf.
=================
buildstream 1.1.3
=================
o Added new `bst init` command to initialize a new project.
o Cross junction tracking is now disabled by default for projects
which can support this by using project.refs ref-storage
New options have been added to explicitly enable cross-junction
tracking.
o Failed jobs are now summarised at the end of a build.
Use `--verbose` and `--no-verbose` to adjust the amount of detail given.
o BuildElements' `configure-commands` are only run once for
workspaces now, which allows for incremental builds.
Appropriate API for plugins is also exposed through
`Element.prepare`.
o The `cmake` plugin now supports building with ninja with the
newly added `generator` configuration option.
o `bst workspace close` and `bst workspace reset` now support multiple
elements. All elements can be specified using `--all`.
o The elements whose cache keys had to be determined during the build
are summarised at the end of the build.
o Fixed versioning introspection to be dynamic, many users use
a developer install mode so they can update with git, now the
version information is always up to date in logs.
This causes a minor API break: The --version output now only
outputs the version.
=================
buildstream 1.1.2
=================
o New ref-storage option allows one to store source refs, such
as git shas, in one central project.refs file instead of inline
with the source declarations.
o Deprecated `--track-save` optionality in `bst build`, this
does not make sense to support now that we have project.refs.
o Added the `sandbox` configuration option which can be used in
`project.conf` and elements, to control the user ID and group ID
used in build sandboxes.
o Added new `deb` source implementation, for staging of downloaded
deb package files.
=================
buildstream 1.1.1
=================
o New project configuration controlling how the sandbox behaves
when `bst shell` is used; allowing projects to provide a more
functional shell environment.
o The `bst shell` command now has a `--mount` option allowing
users to mount files and directories into the sandbox for
testing purposes.
o Log lines are now configurable with the new "message-format"
user configuration, allowing one to express optional fields
such as microsecond precision and wallclock time.
o Newly added filter element
o Git source plugin now allows disabling of submodule checkouts
o In the same way we allow overriding element configurations
by their 'kind' in project.conf, we now support the same
for source plugin configurations.
o Tar and zip sources now automatically recall an `etag`
from the http headers, optimizing tracking of tarballs
significantly (issue #62)
=================
buildstream 1.1.0
=================
o Multiple artifact caches are now supported in project and
user configuration with a priority order (issue #85)
o Add junction support for subprojects
o Changes towards incremental builds in workspaces
o `bst shell --build` now creates true build sandbox
o Many bug fixes
=================
buildstream 1.0.0
=================
First stable release of BuildStream
BuildStream 1.0.0 is all about API stability - for the past months we
have been reviewing our various API surfaces, implementing strategies
for revisioning of our interfaces and cleaning up. Long term stability
is very important for build reproducibility over time, and this release
is the first promise we are making on any API surfaces.
Stable API surfaces include:
o The command line interface
o The YAML user configuration file format
o The YAML project `.bst` file format
o The core Python module imported by external plugins
buildstream-1.6.9/README.rst 0000664 0000000 0000000 00000006541 14375152700 0015543 0 ustar 00root root 0000000 0000000 About
-----
.. image:: https://img.shields.io/github/workflow/status/apache/buildstream/PR%20Checks/bst-1
:alt: GitHub Workflow Status
:target: https://github.com/apache/buildstream/actions/workflows/ci.yml?query=branch%3Abst-1
What is BuildStream?
====================
BuildStream is a Free Software tool for building/integrating software stacks.
It takes inspiration, lessons and use-cases from various projects including
OBS, Reproducible Builds, Yocto, Baserock, Buildroot, Aboriginal, GNOME Continuous,
JHBuild, Flatpak Builder and Android repo.
BuildStream supports multiple build-systems (e.g. autotools, cmake, cpan, distutils,
make, meson, qmake), and can create outputs in a range of formats (e.g. debian packages,
flatpak runtimes, sysroots, system images) for multiple platforms and chipsets.
Why should I use BuildStream?
=============================
BuildStream offers the following advantages:
* **Declarative build instructions/definitions**
BuildStream provides a a flexible and extensible framework for the modelling
of software build pipelines in a declarative YAML format, which allows you to
manipulate filesystem data in a controlled, reproducible sandboxed environment.
* **Support for developer and integrator workflows**
BuildStream provides traceability and reproducibility for integrators handling
stacks of hundreds/thousands of components, as well as workspace features and
shortcuts to minimise cycle-time for developers.
* **Fast and predictable**
BuildStream can cache previous builds and track changes to source file content
and build/config commands. BuildStream only rebuilds the things that have changed.
* **Extensible**
You can extend BuildStream to support your favourite build-system.
* **Bootstrap toolchains and bootable systems**
BuildStream can create full systems and complete toolchains from scratch, for
a range of ISAs including x86_32, x86_64, ARMv7, ARMv8, MIPS.
How do I use BuildStream?
=========================
Please refer to the `documentation `_
for information about installing BuildStream, and about the BuildStream YAML format
and plugin options.
How does BuildStream work?
==========================
BuildStream operates on a set of YAML files (.bst files), as follows:
* loads the YAML files which describe the target(s) and all dependencies
* evaluates the version information and build instructions to calculate a build
graph for the target(s) and all dependencies and unique cache-keys for each
element
* retrieves elements from cache if they are already built, or builds them in a
sandboxed environment using the instructions declared in the .bst files
* transforms/configures and/or deploys the resulting target(s) based on the
instructions declared in the .bst files.
How can I get started?
======================
The easiest way to get started is to explore some existing .bst files, for example:
* https://gitlab.gnome.org/GNOME/gnome-build-meta/
* https://gitlab.com/freedesktop-sdk/freedesktop-sdk
* https://gitlab.com/baserock/definitions
* https://gitlab.com/BuildStream/buildstream-examples/tree/master/build-x86image
* https://gitlab.com/BuildStream/buildstream-examples/tree/master/netsurf-flatpak
If you have any questions please ask on our `#buildstream `_ channel in `irc.gnome.org `_
buildstream-1.6.9/buildstream/ 0000775 0000000 0000000 00000000000 14375152700 0016361 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/__init__.py 0000664 0000000 0000000 00000002625 14375152700 0020477 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Plugin author facing APIs
import os
if "_BST_COMPLETION" not in os.environ:
# Special sauce to get the version from versioneer
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
from .utils import UtilError, ProgramNotFoundError
from .sandbox import Sandbox, SandboxFlags
from .types import Scope, Consistency, CoreWarnings
from .plugin import Plugin
from .source import Source, SourceError, Consistency, SourceFetcher
from .element import Element, ElementError, Scope
from .buildelement import BuildElement
from .scriptelement import ScriptElement
buildstream-1.6.9/buildstream/__main__.py 0000664 0000000 0000000 00000001135 14375152700 0020453 0 ustar 00root root 0000000 0000000 ##################################################################
# Private Entry Point #
##################################################################
#
# This allows running the cli when BuildStream is uninstalled,
# as long as BuildStream repo is in PYTHONPATH, one can run it
# with:
#
# python3 -m buildstream [program args]
#
# This is used when we need to run BuildStream before installing,
# like when we build documentation.
#
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
from ._frontend.cli import cli
cli()
buildstream-1.6.9/buildstream/_artifactcache/ 0000775 0000000 0000000 00000000000 14375152700 0021301 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_artifactcache/__init__.py 0000664 0000000 0000000 00000001623 14375152700 0023414 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017-2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .artifactcache import ArtifactCache, ArtifactCacheSpec, CACHE_SIZE_FILE
from .artifactcache import ArtifactCacheUsage
buildstream-1.6.9/buildstream/_artifactcache/artifactcache.py 0000664 0000000 0000000 00000110411 14375152700 0024432 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017-2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
import multiprocessing
import os
import signal
import string
from collections import namedtuple
from collections.abc import Mapping
from ..types import _KeyStrength
from .._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason
from .._message import Message, MessageType
from .. import _signals
from .. import utils
from .. import _yaml
from .cascache import CASCache, CASRemote, BlobNotFound
CACHE_SIZE_FILE = "cache_size"
# An ArtifactCacheSpec holds the user configuration for a single remote
# artifact cache.
#
# Args:
# url (str): Location of the remote artifact cache
# push (bool): Whether we should attempt to push artifacts to this cache,
# in addition to pulling from it.
#
class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert client_key client_cert')):
# _new_from_config_node
#
# Creates an ArtifactCacheSpec() from a YAML loaded node
#
@staticmethod
def _new_from_config_node(spec_node, basedir=None):
_yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert'])
url = _yaml.node_get(spec_node, str, 'url')
push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
if not url:
provenance = _yaml.node_get_provenance(spec_node, 'url')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: empty artifact cache URL".format(provenance))
server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
if server_cert and basedir:
server_cert = os.path.join(basedir, server_cert)
client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None)
if client_key and basedir:
client_key = os.path.join(basedir, client_key)
client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None)
if client_cert and basedir:
client_cert = os.path.join(basedir, client_cert)
if client_key and not client_cert:
provenance = _yaml.node_get_provenance(spec_node, 'client-key')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-key' was specified without 'client-cert'".format(provenance))
if client_cert and not client_key:
provenance = _yaml.node_get_provenance(spec_node, 'client-cert')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-cert' was specified without 'client-key'".format(provenance))
return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert)
ArtifactCacheSpec.__new__.__defaults__ = (None, None, None)
# ArtifactCacheUsage
#
# A simple object to report the current artifact cache
# usage details.
#
# Note that this uses the user configured cache quota
# rather than the internal quota with protective headroom
# removed, to provide a more sensible value to display to
# the user.
#
# Args:
# artifacts (ArtifactCache): The artifact cache to get the status of
#
class ArtifactCacheUsage():
def __init__(self, artifacts):
context = artifacts.context
self.quota_config = context.config_cache_quota # Configured quota
self.quota_size = artifacts._cache_quota_original # Resolved cache quota in bytes
self.used_size = artifacts.get_cache_size() # Size used by artifacts in bytes
self.used_percent = 0 # Percentage of the quota used
if self.quota_size is not None:
self.used_percent = int(self.used_size * 100 / self.quota_size)
# Formattable into a human readable string
#
def __str__(self):
return "{} / {} ({}%)" \
.format(utils._pretty_size(self.used_size, dec_places=1),
self.quota_config,
self.used_percent)
# An ArtifactCache manages artifacts.
#
# Args:
# context (Context): The BuildStream context
#
class ArtifactCache():
def __init__(self, context):
self.context = context
self.extractdir = os.path.join(context.artifactdir, 'extract')
self.cas = CASCache(context.artifactdir)
self.global_remote_specs = []
self.project_remote_specs = {}
self._required_elements = set() # The elements required for this session
self._cache_size = None # The current cache size, sometimes it's an estimate
self._cache_quota = None # The cache quota
self._cache_quota_original = None # The cache quota as specified by the user, in bytes
self._cache_lower_threshold = None # The target cache size for a cleanup
# Per-project list of _CASRemote instances.
self._remotes = {}
self._has_fetch_remotes = False
self._has_push_remotes = False
os.makedirs(self.extractdir, exist_ok=True)
self._calculate_cache_quota()
# get_artifact_fullname()
#
# Generate a full name for an artifact, including the
# project namespace, element name and cache key.
#
# This can also be used as a relative path safely, and
# will normalize parts of the element name such that only
# digits, letters and some select characters are allowed.
#
# Args:
# element (Element): The Element object
# key (str): The element's cache key
#
# Returns:
# (str): The relative path for the artifact
#
def get_artifact_fullname(self, element, key):
project = element._get_project()
# Normalize ostree ref unsupported chars
valid_chars = string.digits + string.ascii_letters + '-._'
element_name = ''.join([
x if x in valid_chars else '_'
for x in element.normal_name
])
assert key is not None
# assume project and element names are not allowed to contain slashes
return '{0}/{1}/{2}'.format(project.name, element_name, key)
# setup_remotes():
#
# Sets up which remotes to use
#
# Args:
# use_config (bool): Whether to use project configuration
# remote_url (str): Remote artifact cache URL
#
# This requires that all of the projects which are to be processed in the session
# have already been loaded and are observable in the Context.
#
def setup_remotes(self, *, use_config=False, remote_url=None):
# Initialize remote artifact caches. We allow the commandline to override
# the user config in some cases (for example `bst push --remote=...`).
has_remote_caches = False
if remote_url:
self._set_remotes([ArtifactCacheSpec(remote_url, push=True)])
has_remote_caches = True
if use_config:
for project in self.context.get_projects():
artifact_caches = _configured_remote_artifact_cache_specs(self.context, project)
if artifact_caches: # artifact_caches is a list of ArtifactCacheSpec instances
self._set_remotes(artifact_caches, project=project)
has_remote_caches = True
if has_remote_caches:
self._initialize_remotes()
# specs_from_config_node()
#
# Parses the configuration of remote artifact caches from a config block.
#
# Args:
# config_node (dict): The config block, which may contain the 'artifacts' key
# basedir (str): The base directory for relative paths
#
# Returns:
# A list of ArtifactCacheSpec instances.
#
# Raises:
# LoadError, if the config block contains invalid keys.
#
@staticmethod
def specs_from_config_node(config_node, basedir=None):
cache_specs = []
artifacts = config_node.get('artifacts', [])
if isinstance(artifacts, Mapping):
cache_specs.append(ArtifactCacheSpec._new_from_config_node(artifacts, basedir))
elif isinstance(artifacts, list):
for spec_node in artifacts:
cache_specs.append(ArtifactCacheSpec._new_from_config_node(spec_node, basedir))
else:
provenance = _yaml.node_get_provenance(config_node, key='artifacts')
raise _yaml.LoadError(_yaml.LoadErrorReason.INVALID_DATA,
"%s: 'artifacts' must be a single 'url:' mapping, or a list of mappings" %
(str(provenance)))
return cache_specs
# mark_required_elements():
#
# Mark elements whose artifacts are required for the current run.
#
# Artifacts whose elements are in this list will be locked by the artifact
# cache and not touched for the duration of the current pipeline.
#
# Args:
# elements (iterable): A set of elements to mark as required
#
def mark_required_elements(self, elements):
# We risk calling this function with a generator, so we
# better consume it first.
#
elements = list(elements)
# Mark the elements as required. We cannot know that we know the
# cache keys yet, so we only check that later when deleting.
#
self._required_elements.update(elements)
# For the cache keys which were resolved so far, we bump
# the mtime of them.
#
# This is just in case we have concurrent instances of
# BuildStream running with the same artifact cache, it will
# reduce the likelyhood of one instance deleting artifacts
# which are required by the other.
for element in elements:
strong_key = element._get_cache_key(strength=_KeyStrength.STRONG)
weak_key = element._get_cache_key(strength=_KeyStrength.WEAK)
for key in (strong_key, weak_key):
if key:
try:
ref = self.get_artifact_fullname(element, key)
self.cas.update_mtime(ref)
except CASError:
pass
# clean():
#
# Clean the artifact cache as much as possible.
#
# Args:
# progress (callable): A callback to call when a ref is removed
#
# Returns:
# (int): The size of the cache after having cleaned up
#
def clean(self, progress=None):
artifacts = self.list_artifacts()
context = self.context
# Some accumulative statistics
removed_ref_count = 0
space_saved = 0
# Start off with an announcement with as much info as possible
volume_size, volume_avail = self._get_cache_volume_size()
self._message(MessageType.STATUS, "Starting cache cleanup",
detail=("Elements required by the current build plan: {}\n" +
"User specified quota: {} ({})\n" +
"Cache usage: {}\n" +
"Cache volume: {} total, {} available")
.format(len(self._required_elements),
context.config_cache_quota,
utils._pretty_size(self._cache_quota_original, dec_places=2),
utils._pretty_size(self.get_cache_size(), dec_places=2),
utils._pretty_size(volume_size, dec_places=2),
utils._pretty_size(volume_avail, dec_places=2)))
# Build a set of the cache keys which are required
# based on the required elements at cleanup time
#
# We lock both strong and weak keys - deleting one but not the
# other won't save space, but would be a user inconvenience.
required_artifacts = set()
for element in self._required_elements:
required_artifacts.update([
element._get_cache_key(strength=_KeyStrength.STRONG),
element._get_cache_key(strength=_KeyStrength.WEAK)
])
# Do a real computation of the cache size once, just in case
self.compute_cache_size()
while self.get_cache_size() >= self._cache_lower_threshold:
try:
to_remove = artifacts.pop(0)
except IndexError as e:
# If too many artifacts are required, and we therefore
# can't remove them, we have to abort the build.
#
# FIXME: Asking the user what to do may be neater
#
default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'],
'buildstream.conf')
detail = ("Aborted after removing {} refs and saving {} disk space.\n"
"The remaining {} in the cache is required by the {} elements in your build plan\n\n"
"There is not enough space to complete the build.\n"
"Please increase the cache-quota in {} and/or make more disk space."
.format(removed_ref_count,
utils._pretty_size(space_saved, dec_places=2),
utils._pretty_size(self.get_cache_size(), dec_places=2),
len(self._required_elements),
(context.config_origin or default_conf)))
if self.has_quota_exceeded():
raise ArtifactError("Cache too full. Aborting.",
detail=detail,
reason="cache-too-full") from e
break
key = to_remove.rpartition('/')[2]
if key not in required_artifacts:
# Remove the actual artifact, if it's not required.
size = self.remove(to_remove)
removed_ref_count += 1
space_saved += size
self._message(MessageType.STATUS,
"Freed {: <7} {}".format(
utils._pretty_size(size, dec_places=2),
to_remove))
# Remove the size from the removed size
self.set_cache_size(self._cache_size - size)
# User callback
#
# Currently this process is fairly slow, but we should
# think about throttling this progress() callback if this
# becomes too intense.
if progress:
progress()
# Informational message about the side effects of the cleanup
self._message(MessageType.INFO, "Cleanup completed",
detail=("Removed {} refs and saving {} disk space.\n" +
"Cache usage is now: {}")
.format(removed_ref_count,
utils._pretty_size(space_saved, dec_places=2),
utils._pretty_size(self.get_cache_size(), dec_places=2)))
return self.get_cache_size()
# compute_cache_size()
#
# Computes the real artifact cache size by calling
# the abstract calculate_cache_size() method.
#
# Returns:
# (int): The size of the artifact cache.
#
def compute_cache_size(self):
old_cache_size = self._cache_size
new_cache_size = self.cas.calculate_cache_size()
if old_cache_size != new_cache_size:
self._cache_size = new_cache_size
usage = ArtifactCacheUsage(self)
self._message(MessageType.STATUS, "Cache usage recomputed: {}".format(usage))
return self._cache_size
# add_artifact_size()
#
# Adds the reported size of a newly cached artifact to the
# overall estimated size.
#
# Args:
# artifact_size (int): The size to add.
#
def add_artifact_size(self, artifact_size):
cache_size = self.get_cache_size()
cache_size += artifact_size
self.set_cache_size(cache_size)
# get_cache_size()
#
# Fetches the cached size of the cache, this is sometimes
# an estimate and periodically adjusted to the real size
# when a cache size calculation job runs.
#
# When it is an estimate, the value is either correct, or
# it is greater than the actual cache size.
#
# Returns:
# (int) An approximation of the artifact cache size, in bytes.
#
def get_cache_size(self):
# If we don't currently have an estimate, figure out the real cache size.
if self._cache_size is None:
stored_size = self._read_cache_size()
if stored_size is not None:
self._cache_size = stored_size
else:
self.compute_cache_size()
# Computing cache doesn't actually write the value.
# Write cache size explicitly here since otherwise
# in some cases it's not stored on disk.
self.set_cache_size(self._cache_size)
return self._cache_size
# set_cache_size()
#
# Forcefully set the overall cache size.
#
# This is used to update the size in the main process after
# having calculated in a cleanup or a cache size calculation job.
#
# Args:
# cache_size (int): The size to set.
#
def set_cache_size(self, cache_size):
assert cache_size is not None
self._cache_size = cache_size
self._write_cache_size(self._cache_size)
# has_quota_exceeded()
#
# Checks if the current artifact cache size exceeds the quota.
#
# Returns:
# (bool): True of the quota is exceeded
#
def has_quota_exceeded(self):
return self.get_cache_size() > self._cache_quota
# preflight():
#
# Preflight check.
#
def preflight(self):
self.cas.preflight()
# initialize_remotes():
#
# This will contact each remote cache.
#
# Args:
# on_failure (callable): Called if we fail to contact one of the caches.
#
def initialize_remotes(self, *, on_failure=None):
remote_specs = self.global_remote_specs
for _, project_specs in self.project_remote_specs.items():
remote_specs += project_specs
remote_specs = list(utils._deduplicate(remote_specs))
remotes = {}
q = multiprocessing.Queue()
for remote_spec in remote_specs:
# Use subprocess to avoid creation of gRPC threads in main BuildStream process
# See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
p = multiprocessing.Process(target=self.cas.initialize_remote, args=(remote_spec, q))
try:
# Keep SIGINT blocked in the child process
with _signals.blocked([signal.SIGINT], ignore=False):
p.start()
error = q.get()
p.join()
except KeyboardInterrupt:
utils._kill_process_tree(p.pid)
raise
if error and on_failure:
on_failure(remote_spec.url, error)
elif error:
raise ArtifactError(error)
else:
self._has_fetch_remotes = True
if remote_spec.push:
self._has_push_remotes = True
remotes[remote_spec.url] = CASRemote(remote_spec)
for project in self.context.get_projects():
remote_specs = self.global_remote_specs
if project in self.project_remote_specs:
remote_specs = list(utils._deduplicate(remote_specs + self.project_remote_specs[project]))
project_remotes = []
for remote_spec in remote_specs:
# Errors are already handled in the loop above,
# skip unreachable remotes here.
if remote_spec.url not in remotes:
continue
remote = remotes[remote_spec.url]
project_remotes.append(remote)
self._remotes[project] = project_remotes
# contains():
#
# Check whether the artifact for the specified Element is already available
# in the local artifact cache.
#
# Args:
# element (Element): The Element to check
# key (str): The cache key to use
#
# Returns: True if the artifact is in the cache, False otherwise
#
def contains(self, element, key):
ref = self.get_artifact_fullname(element, key)
return self.cas.contains(ref)
# list_artifacts():
#
# List artifacts in this cache in LRU order.
#
# Returns:
# ([str]) - A list of artifact names as generated by
# `ArtifactCache.get_artifact_fullname` in LRU order
#
def list_artifacts(self):
return self.cas.list_refs()
# remove():
#
# Removes the artifact for the specified ref from the local
# artifact cache.
#
# Args:
# ref (artifact_name): The name of the artifact to remove (as
# generated by
# `ArtifactCache.get_artifact_fullname`)
#
# Returns:
# (int|None) The amount of space pruned from the repository in
# Bytes, or None if defer_prune is True
#
def remove(self, ref):
# Remove extract if not used by other ref
tree = self.cas.resolve_ref(ref)
ref_name, ref_hash = os.path.split(ref)
extract = os.path.join(self.extractdir, ref_name, tree.hash)
keys_file = os.path.join(extract, 'meta', 'keys.yaml')
if os.path.exists(keys_file):
keys_meta = _yaml.load(keys_file)
keys = [keys_meta['strong'], keys_meta['weak']]
remove_extract = True
for other_hash in keys:
if other_hash == ref_hash:
continue
remove_extract = False
break
if remove_extract:
utils._force_rmtree(extract)
return self.cas.remove(ref)
# extract():
#
# Extract cached artifact for the specified Element if it hasn't
# already been extracted.
#
# Assumes artifact has previously been fetched or committed.
#
# Args:
# element (Element): The Element to extract
# key (str): The cache key to use
#
# Raises:
# ArtifactError: In cases there was an OSError, or if the artifact
# did not exist.
#
# Returns: path to extracted artifact
#
def extract(self, element, key):
ref = self.get_artifact_fullname(element, key)
path = os.path.join(self.extractdir, element._get_project().name, element.normal_name)
return self.cas.extract(ref, path)
# commit():
#
# Commit built artifact to cache.
#
# Args:
# element (Element): The Element commit an artifact for
# content (str): The element's content directory
# keys (list): The cache keys to use
#
def commit(self, element, content, keys):
refs = [self.get_artifact_fullname(element, key) for key in keys]
self.cas.commit(refs, content)
# diff():
#
# Return a list of files that have been added or modified between
# the artifacts described by key_a and key_b.
#
# Args:
# element (Element): The element whose artifacts to compare
# key_a (str): The first artifact key
# key_b (str): The second artifact key
# subdir (str): A subdirectory to limit the comparison to
#
def diff(self, element, key_a, key_b, *, subdir=None):
ref_a = self.get_artifact_fullname(element, key_a)
ref_b = self.get_artifact_fullname(element, key_b)
return self.cas.diff(ref_a, ref_b, subdir=subdir)
# has_fetch_remotes():
#
# Check whether any remote repositories are available for fetching.
#
# Args:
# element (Element): The Element to check
#
# Returns: True if any remote repositories are configured, False otherwise
#
def has_fetch_remotes(self, *, element=None):
if not self._has_fetch_remotes:
# No project has fetch remotes
return False
elif element is None:
# At least one (sub)project has fetch remotes
return True
else:
# Check whether the specified element's project has fetch remotes
remotes_for_project = self._remotes[element._get_project()]
return bool(remotes_for_project)
# has_push_remotes():
#
# Check whether any remote repositories are available for pushing.
#
# Args:
# element (Element): The Element to check
#
# Returns: True if any remote repository is configured, False otherwise
#
def has_push_remotes(self, *, element=None):
if not self._has_push_remotes:
# No project has push remotes
return False
elif element is None:
# At least one (sub)project has push remotes
return True
else:
# Check whether the specified element's project has push remotes
remotes_for_project = self._remotes[element._get_project()]
return any(remote.spec.push for remote in remotes_for_project)
# push():
#
# Push committed artifact to remote repository.
#
# Args:
# element (Element): The Element whose artifact is to be pushed
# keys (list): The cache keys to use
#
# Returns:
# (bool): True if any remote was updated, False if no pushes were required
#
# Raises:
# (ArtifactError): if there was an error
#
def push(self, element, keys):
refs = [self.get_artifact_fullname(element, key) for key in list(keys)]
project = element._get_project()
push_remotes = [r for r in self._remotes[project] if r.spec.push]
pushed = False
for remote in push_remotes:
remote.init()
display_key = element._get_brief_display_key()
element.status("Pushing artifact {} -> {}".format(display_key, remote.spec.url))
if self.cas.push(refs, remote):
element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url))
pushed = True
else:
element.info("Remote ({}) already has {} cached".format(
remote.spec.url, element._get_brief_display_key()
))
return pushed
# pull():
#
# Pull artifact from one of the configured remote repositories.
#
# Args:
# element (Element): The Element whose artifact is to be fetched
# key (str): The cache key to use
# progress (callable): The progress callback, if any
#
# Returns:
# (bool): True if pull was successful, False if artifact was not available
#
def pull(self, element, key, *, progress=None):
ref = self.get_artifact_fullname(element, key)
display_key = key[:self.context.log_key_length]
project = element._get_project()
for remote in self._remotes[project]:
try:
element.status("Pulling artifact {} <- {}".format(display_key, remote.spec.url))
if self.cas.pull(ref, remote, progress=progress):
element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
# no need to pull from additional remotes
return True
else:
element.info("Remote ({}) does not have {} cached".format(
remote.spec.url, display_key
))
except BlobNotFound:
element.info("Remote ({}) does not have {} cached".format(
remote.spec.url, display_key
))
except CASError as e:
raise ArtifactError("Failed to pull artifact {}: {}".format(
display_key, e)) from e
return False
# link_key():
#
# Add a key for an existing artifact.
#
# Args:
# element (Element): The Element whose artifact is to be linked
# oldkey (str): An existing cache key for the artifact
# newkey (str): A new cache key for the artifact
#
def link_key(self, element, oldkey, newkey):
oldref = self.get_artifact_fullname(element, oldkey)
newref = self.get_artifact_fullname(element, newkey)
self.cas.link_ref(oldref, newref)
################################################
# Local Private Methods #
################################################
# _message()
#
# Local message propagator
#
def _message(self, message_type, message, **kwargs):
args = dict(kwargs)
self.context.message(
Message(None, message_type, message, **args))
# _set_remotes():
#
# Set the list of remote caches. If project is None, the global list of
# remote caches will be set, which is used by all projects. If a project is
# specified, the per-project list of remote caches will be set.
#
# Args:
# remote_specs (list): List of ArtifactCacheSpec instances, in priority order.
# project (Project): The Project instance for project-specific remotes
def _set_remotes(self, remote_specs, *, project=None):
if project is None:
# global remotes
self.global_remote_specs = remote_specs
else:
self.project_remote_specs[project] = remote_specs
# _initialize_remotes()
#
# An internal wrapper which calls the abstract method and
# reports takes care of messaging
#
def _initialize_remotes(self):
def remote_failed(url, error):
self._message(MessageType.WARN, "Failed to initialize remote {}: {}".format(url, error))
with self.context.timed_activity("Initializing remote caches", silent_nested=True):
self.initialize_remotes(on_failure=remote_failed)
# _write_cache_size()
#
# Writes the given size of the artifact to the cache's size file
#
# Args:
# size (int): The size of the artifact cache to record
#
def _write_cache_size(self, size):
assert isinstance(size, int)
size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE)
with utils.save_file_atomic(size_file_path, "w") as f:
f.write(str(size))
# _read_cache_size()
#
# Reads and returns the size of the artifact cache that's stored in the
# cache's size file
#
# Returns:
# (int): The size of the artifact cache, as recorded in the file
#
def _read_cache_size(self):
size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE)
try:
with open(size_file_path, "r", encoding="utf-8") as f:
size = f.read()
except FileNotFoundError:
return None
try:
num_size = int(size)
except ValueError:
self._message(MessageType.WARN, "Failure resolving cache size",
detail="Size '{}' parsed from '{}' was not an integer"
.format(size, size_file_path))
return None
else:
return num_size
# _calculate_cache_quota()
#
# Calculates and sets the cache quota and lower threshold based on the
# quota set in Context.
# It checks that the quota is both a valid expression, and that there is
# enough disk space to satisfy that quota
#
def _calculate_cache_quota(self):
# Headroom intended to give BuildStream a bit of leeway.
# This acts as the minimum size of cache_quota and also
# is taken from the user requested cache_quota.
#
if 'BST_TEST_SUITE' in os.environ:
headroom = 0
else:
headroom = 2e9
try:
cache_quota = utils._parse_size(self.context.config_cache_quota,
self.context.artifactdir)
except utils.UtilError as e:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}\nPlease specify the value in bytes or as a % of full disk space.\n"
"\nValid values are, for example: 800M 10G 1T 50%\n"
.format(str(e))) from e
total_size, available_space = self._get_cache_volume_size()
cache_size = self.get_cache_size()
# Ensure system has enough storage for the cache_quota
#
# If cache_quota is none, set it to the maximum it could possibly be.
#
# Also check that cache_quota is at least as large as our headroom.
#
if cache_quota is None: # Infinity, set to max system storage
cache_quota = cache_size + available_space
if cache_quota < headroom: # Check minimum
raise LoadError(LoadErrorReason.INVALID_DATA,
"Invalid cache quota ({}): ".format(utils._pretty_size(cache_quota)) +
"BuildStream requires a minimum cache quota of 2G.")
if cache_quota > cache_size + available_space: # Check maximum
if '%' in self.context.config_cache_quota:
available = (available_space / total_size) * 100
available = '{}% of total disk space'.format(round(available, 1))
else:
available = utils._pretty_size(available_space)
raise ArtifactError("Your system does not have enough available " +
"space to support the cache quota specified.",
detail=("You have specified a quota of {quota} total disk space.\n" +
"The filesystem containing {local_cache_path} only " +
"has {available_size} available.")
.format(
quota=self.context.config_cache_quota,
local_cache_path=self.context.artifactdir,
available_size=available),
reason='insufficient-storage-for-quota')
# Place a slight headroom (2e9 (2GB) on the cache_quota) into
# cache_quota to try and avoid exceptions.
#
# Of course, we might still end up running out during a build
# if we end up writing more than 2G, but hey, this stuff is
# already really fuzzy.
#
self._cache_quota_original = cache_quota
self._cache_quota = cache_quota - headroom
self._cache_lower_threshold = self._cache_quota / 2
# _get_cache_volume_size()
#
# Get the available space and total space for the volume on
# which the artifact cache is located.
#
# Returns:
# (int): The total number of bytes on the volume
# (int): The number of available bytes on the volume
#
# NOTE: We use this stub to allow the test cases
# to override what an artifact cache thinks
# about it's disk size and available bytes.
#
def _get_cache_volume_size(self):
return utils._get_volume_size(self.context.artifactdir)
# _configured_remote_artifact_cache_specs():
#
# Return the list of configured artifact remotes for a given project, in priority
# order. This takes into account the user and project configuration.
#
# Args:
# context (Context): The BuildStream context
# project (Project): The BuildStream project
#
# Returns:
# A list of ArtifactCacheSpec instances describing the remote artifact caches.
#
def _configured_remote_artifact_cache_specs(context, project):
project_overrides = context.get_overrides(project.name)
project_extra_specs = ArtifactCache.specs_from_config_node(project_overrides)
return list(utils._deduplicate(
project_extra_specs + project.artifact_cache_specs + context.artifact_cache_specs))
buildstream-1.6.9/buildstream/_artifactcache/cascache.py 0000664 0000000 0000000 00000132520 14375152700 0023410 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jürg Billeter
import hashlib
import itertools
import os
import stat
import tempfile
import uuid
import errno
import contextlib
from urllib.parse import urlparse
import grpc
from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
from .._protos.build.bazel.remote.asset.v1 import remote_asset_pb2, remote_asset_pb2_grpc
from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .. import utils
from .._exceptions import CASError
# The default limit for gRPC messages is 4 MiB.
# Limit payload to 1 MiB to leave sufficient headroom for metadata.
_MAX_PAYLOAD_BYTES = 1024 * 1024
# How often is a keepalive ping sent to the server to make sure the transport is still alive
_KEEPALIVE_TIME_MS = 60000
REMOTE_ASSET_URN_TEMPLATE = "urn:fdc:buildstream.build:2020:v1:{}"
class _Attempt():
def __init__(self, last_attempt=False):
self.__passed = None
self.__last_attempt = last_attempt
def passed(self):
return self.__passed
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
try:
if exc_type is None:
self.__passed = True
else:
self.__passed = False
if exc_value is not None:
raise exc_value
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.UNAVAILABLE:
return not self.__last_attempt
elif e.code() == grpc.StatusCode.ABORTED:
raise CASError("grpc aborted: {}".format(str(e)),
detail=e.details(),
temporary=True) from e
else:
return False
return False
def _retry(tries=5):
for a in range(tries):
attempt = _Attempt(last_attempt=(a == tries - 1))
yield attempt
if attempt.passed():
break
class BlobNotFound(CASError):
def __init__(self, blob, msg):
self.blob = blob
super().__init__(msg)
# A CASCache manages a CAS repository as specified in the Remote Execution API.
#
# Args:
# path (str): The root directory for the CAS repository
#
class CASCache():
def __init__(self, path):
self.casdir = os.path.join(path, 'cas')
self.tmpdir = os.path.join(path, 'tmp')
os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True)
os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True)
os.makedirs(self.tmpdir, exist_ok=True)
# preflight():
#
# Preflight check.
#
def preflight(self):
if (not os.path.isdir(os.path.join(self.casdir, 'refs', 'heads')) or
not os.path.isdir(os.path.join(self.casdir, 'objects'))):
raise CASError("CAS repository check failed for '{}'".format(self.casdir))
# contains():
#
# Check whether the specified ref is already available in the local CAS cache.
#
# Args:
# ref (str): The ref to check
#
# Returns: True if the ref is in the cache, False otherwise
#
def contains(self, ref):
refpath = self._refpath(ref)
# This assumes that the repository doesn't have any dangling pointers
return os.path.exists(refpath)
# extract():
#
# Extract cached directory for the specified ref if it hasn't
# already been extracted.
#
# Args:
# ref (str): The ref whose directory to extract
# path (str): The destination path
#
# Raises:
# CASError: In cases there was an OSError, or if the ref did not exist.
#
# Returns: path to extracted directory
#
def extract(self, ref, path):
tree = self.resolve_ref(ref, update_mtime=True)
dest = os.path.join(path, tree.hash)
if os.path.isdir(dest):
# directory has already been extracted
return dest
with tempfile.TemporaryDirectory(prefix='tmp', dir=self.tmpdir) as tmpdir:
checkoutdir = os.path.join(tmpdir, ref)
self._checkout(checkoutdir, tree)
os.makedirs(os.path.dirname(dest), exist_ok=True)
try:
os.rename(checkoutdir, dest)
except OSError as e:
# With rename it's possible to get either ENOTEMPTY or EEXIST
# in the case that the destination path is a not empty directory.
#
# If rename fails with these errors, another process beat
# us to it so just ignore.
if e.errno not in [errno.ENOTEMPTY, errno.EEXIST]:
raise CASError("Failed to extract directory for ref '{}': {}".format(ref, e)) from e
return dest
# commit():
#
# Commit directory to cache.
#
# Args:
# refs (list): The refs to set
# path (str): The directory to import
#
def commit(self, refs, path):
tree = self._commit_directory(path)
for ref in refs:
self.set_ref(ref, tree)
# diff():
#
# Return a list of files that have been added or modified between
# the refs described by ref_a and ref_b.
#
# Args:
# ref_a (str): The first ref
# ref_b (str): The second ref
# subdir (str): A subdirectory to limit the comparison to
#
def diff(self, ref_a, ref_b, *, subdir=None):
tree_a = self.resolve_ref(ref_a)
tree_b = self.resolve_ref(ref_b)
if subdir:
tree_a = self._get_subdir(tree_a, subdir)
tree_b = self._get_subdir(tree_b, subdir)
added = []
removed = []
modified = []
self._diff_trees(tree_a, tree_b, added=added, removed=removed, modified=modified)
return modified, removed, added
def initialize_remote(self, remote_spec, q):
try:
remote = CASRemote(remote_spec)
remote.init()
if remote.asset_fetch_supported:
if remote_spec.push and not remote.asset_push_supported:
q.put('Remote Asset server does not allow push')
else:
# No error
q.put(None)
else:
request = buildstream_pb2.StatusRequest()
for attempt in _retry():
with attempt:
response = remote.ref_storage.Status(request)
if remote_spec.push and not response.allow_updates:
q.put('CAS server does not allow push')
else:
# No error
q.put(None)
except grpc.RpcError as e:
# str(e) is too verbose for errors reported to the user
q.put(e.details())
except Exception as e: # pylint: disable=broad-except
# Whatever happens, we need to return it to the calling process
#
q.put(str(e))
# pull():
#
# Pull a ref from a remote repository.
#
# Args:
# ref (str): The ref to pull
# remote (CASRemote): The remote repository to pull from
# progress (callable): The progress callback, if any
#
# Returns:
# (bool): True if pull was successful, False if ref was not available
#
def pull(self, ref, remote, *, progress=None):
try:
remote.init()
if remote.asset_fetch_supported:
request = remote_asset_pb2.FetchDirectoryRequest()
request.uris.append(REMOTE_ASSET_URN_TEMPLATE.format(ref))
for attempt in _retry():
with attempt:
response = remote.remote_asset_fetch.FetchDirectory(request)
digest = response.root_directory_digest
else:
request = buildstream_pb2.GetReferenceRequest()
request.key = ref
for attempt in _retry():
with attempt:
response = remote.ref_storage.GetReference(request)
digest = response.digest
tree = remote_execution_pb2.Digest()
tree.hash = digest.hash
tree.size_bytes = digest.size_bytes
self._fetch_directory(remote, tree)
self.set_ref(ref, tree)
return True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
raise CASError("Failed to pull ref {}: {}".format(ref, e)) from e
return False
# link_ref():
#
# Add an alias for an existing ref.
#
# Args:
# oldref (str): An existing ref
# newref (str): A new ref for the same directory
#
def link_ref(self, oldref, newref):
tree = self.resolve_ref(oldref)
self.set_ref(newref, tree)
# push():
#
# Push committed refs to remote repository.
#
# Args:
# refs (list): The refs to push
# remote (CASRemote): The remote to push to
#
# Returns:
# (bool): True if any remote was updated, False if no pushes were required
#
# Raises:
# (CASError): if there was an error
#
def push(self, refs, remote):
skipped_remote = True
try:
for ref in refs:
tree = self.resolve_ref(ref)
# Check whether ref is already on the server in which case
# there is no need to push the ref
try:
if remote.asset_fetch_supported:
request = remote_asset_pb2.FetchDirectoryRequest()
request.uris.append(REMOTE_ASSET_URN_TEMPLATE.format(ref))
for attempt in _retry():
with attempt:
response = remote.remote_asset_fetch.FetchDirectory(request)
digest = response.root_directory_digest
else:
request = buildstream_pb2.GetReferenceRequest()
request.key = ref
for attempt in _retry():
with attempt:
response = remote.ref_storage.GetReference(request)
digest = response.digest
if digest.hash == tree.hash and digest.size_bytes == tree.size_bytes:
# ref is already on the server with the same tree
continue
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
# Intentionally re-raise RpcError for outer except block.
raise
self._send_directory(remote, tree)
if remote.asset_push_supported:
request = remote_asset_pb2.PushDirectoryRequest()
request.uris.append(REMOTE_ASSET_URN_TEMPLATE.format(ref))
request.root_directory_digest.hash = tree.hash
request.root_directory_digest.size_bytes = tree.size_bytes
for attempt in _retry():
with attempt:
remote.remote_asset_push.PushDirectory(request)
else:
request = buildstream_pb2.UpdateReferenceRequest()
request.keys.append(ref)
request.digest.hash = tree.hash
request.digest.size_bytes = tree.size_bytes
for attempt in _retry():
with attempt:
remote.ref_storage.UpdateReference(request)
skipped_remote = False
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
raise CASError("Failed to push ref {}: {}".format(refs, e), temporary=True) from e
return not skipped_remote
# objpath():
#
# Return the path of an object based on its digest.
#
# Args:
# digest (Digest): The digest of the object
#
# Returns:
# (str): The path of the object
#
def objpath(self, digest):
return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:])
# add_object():
#
# Hash and write object to CAS.
#
# Args:
# digest (Digest): An optional Digest object to populate
# path (str): Path to file to add
# buffer (bytes): Byte buffer to add
# link_directly (bool): Whether file given by path can be linked
#
# Returns:
# (Digest): The digest of the added object
#
# Either `path` or `buffer` must be passed, but not both.
#
def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False):
# Exactly one of the two parameters has to be specified
assert (path is None) != (buffer is None)
if digest is None:
digest = remote_execution_pb2.Digest()
try:
h = hashlib.sha256()
# Always write out new file to avoid corruption if input file is modified
with contextlib.ExitStack() as stack:
if path is not None and link_directly:
tmp = stack.enter_context(open(path, 'rb'))
for chunk in iter(lambda: tmp.read(4096), b""):
h.update(chunk)
else:
tmp = stack.enter_context(self._temporary_object())
if path:
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""):
h.update(chunk)
tmp.write(chunk)
else:
h.update(buffer)
tmp.write(buffer)
tmp.flush()
digest.hash = h.hexdigest()
digest.size_bytes = os.fstat(tmp.fileno()).st_size
# Place file at final location
objpath = self.objpath(digest)
os.makedirs(os.path.dirname(objpath), exist_ok=True)
os.link(tmp.name, objpath)
except FileExistsError:
# We can ignore the failed link() if the object is already in the repo.
pass
except OSError as e:
raise CASError("Failed to hash object: {}".format(e)) from e
return digest
# set_ref():
#
# Create or replace a ref.
#
# Args:
# ref (str): The name of the ref
#
def set_ref(self, ref, tree):
refpath = self._refpath(ref)
os.makedirs(os.path.dirname(refpath), exist_ok=True)
with utils.save_file_atomic(refpath, 'wb', tempdir=self.tmpdir) as f:
f.write(tree.SerializeToString())
# resolve_ref():
#
# Resolve a ref to a digest.
#
# Args:
# ref (str): The name of the ref
# update_mtime (bool): Whether to update the mtime of the ref
#
# Returns:
# (Digest): The digest stored in the ref
#
def resolve_ref(self, ref, *, update_mtime=False):
refpath = self._refpath(ref)
try:
with open(refpath, 'rb') as f:
if update_mtime:
os.utime(refpath)
digest = remote_execution_pb2.Digest()
digest.ParseFromString(f.read())
return digest
except FileNotFoundError as e:
raise CASError("Attempt to access unavailable ref: {}".format(e)) from e
# update_mtime()
#
# Update the mtime of a ref.
#
# Args:
# ref (str): The ref to update
#
def update_mtime(self, ref):
try:
os.utime(self._refpath(ref))
except FileNotFoundError as e:
raise CASError("Attempt to access unavailable ref: {}".format(e)) from e
# calculate_cache_size()
#
# Return the real disk usage of the CAS cache.
#
# Returns:
# (int): The size of the cache.
#
def calculate_cache_size(self):
return utils._get_dir_size(self.casdir)
# list_refs():
#
# List refs in Least Recently Modified (LRM) order.
#
# Returns:
# (list) - A list of refs in LRM order
#
def list_refs(self):
# string of: /path/to/repo/refs/heads
ref_heads = os.path.join(self.casdir, 'refs', 'heads')
refs = []
mtimes = []
for root, _, files in os.walk(ref_heads):
for filename in files:
ref_path = os.path.join(root, filename)
refs.append(os.path.relpath(ref_path, ref_heads))
# Obtain the mtime (the time a file was last modified)
mtimes.append(os.path.getmtime(ref_path))
# NOTE: Sorted will sort from earliest to latest, thus the
# first ref of this list will be the file modified earliest.
return [ref for _, ref in sorted(zip(mtimes, refs))]
# list_objects():
#
# List cached objects in Least Recently Modified (LRM) order.
#
# Returns:
# (list) - A list of objects and timestamps in LRM order
#
def list_objects(self):
objs = []
mtimes = []
for root, _, files in os.walk(os.path.join(self.casdir, 'objects')):
for filename in files:
obj_path = os.path.join(root, filename)
try:
mtimes.append(os.path.getmtime(obj_path))
except FileNotFoundError:
pass
else:
objs.append(obj_path)
# NOTE: Sorted will sort from earliest to latest, thus the
# first element of this list will be the file modified earliest.
return sorted(zip(mtimes, objs))
def clean_up_refs_until(self, time):
ref_heads = os.path.join(self.casdir, 'refs', 'heads')
for root, _, files in os.walk(ref_heads):
for filename in files:
ref_path = os.path.join(root, filename)
# Obtain the mtime (the time a file was last modified)
if os.path.getmtime(ref_path) < time:
os.unlink(ref_path)
# remove():
#
# Removes the given symbolic ref from the repo.
#
# Args:
# ref (str): A symbolic ref
# defer_prune (bool): Whether to defer pruning to the caller. NOTE:
# The space won't be freed until you manually
# call prune.
#
# Returns:
# (int|None) The amount of space pruned from the repository in
# Bytes, or None if defer_prune is True
#
def remove(self, ref, *, defer_prune=False):
# Remove cache ref
refpath = self._refpath(ref)
if not os.path.exists(refpath):
raise CASError("Could not find ref '{}'".format(ref))
os.unlink(refpath)
if not defer_prune:
pruned = self.prune()
return pruned
return None
# prune():
#
# Prune unreachable objects from the repo.
#
def prune(self):
ref_heads = os.path.join(self.casdir, 'refs', 'heads')
pruned = 0
reachable = set()
# Check which objects are reachable
for root, _, files in os.walk(ref_heads):
for filename in files:
ref_path = os.path.join(root, filename)
ref = os.path.relpath(ref_path, ref_heads)
tree = self.resolve_ref(ref)
self._reachable_refs_dir(reachable, tree)
# Prune unreachable objects
for root, _, files in os.walk(os.path.join(self.casdir, 'objects')):
for filename in files:
objhash = os.path.basename(root) + filename
if objhash not in reachable:
obj_path = os.path.join(root, filename)
pruned += os.stat(obj_path).st_size
os.unlink(obj_path)
return pruned
def update_tree_mtime(self, tree):
reachable = set()
self._reachable_refs_dir(reachable, tree, update_mtime=True)
################################################
# Local Private Methods #
################################################
def _checkout(self, dest, tree):
os.makedirs(dest, exist_ok=True)
directory = remote_execution_pb2.Directory()
with open(self.objpath(tree), 'rb') as f:
directory.ParseFromString(f.read())
for filenode in directory.files:
# regular file, create hardlink
fullpath = os.path.join(dest, filenode.name)
os.link(self.objpath(filenode.digest), fullpath)
if filenode.is_executable:
os.chmod(fullpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
for dirnode in directory.directories:
fullpath = os.path.join(dest, dirnode.name)
self._checkout(fullpath, dirnode.digest)
for symlinknode in directory.symlinks:
# symlink
fullpath = os.path.join(dest, symlinknode.name)
os.symlink(symlinknode.target, fullpath)
def _refpath(self, ref):
return os.path.join(self.casdir, 'refs', 'heads', ref)
# _commit_directory():
#
# Adds local directory to content addressable store.
#
# Adds files, symbolic links and recursively other directories in
# a local directory to the content addressable store.
#
# Args:
# path (str): Path to the directory to add.
# dir_digest (Digest): An optional Digest object to use.
#
# Returns:
# (Digest): Digest object for the directory added.
#
def _commit_directory(self, path, *, dir_digest=None):
directory = remote_execution_pb2.Directory()
for name in sorted(os.listdir(path)):
full_path = os.path.join(path, name)
mode = os.lstat(full_path).st_mode
if stat.S_ISDIR(mode):
dirnode = directory.directories.add()
dirnode.name = name
self._commit_directory(full_path, dir_digest=dirnode.digest)
elif stat.S_ISREG(mode):
filenode = directory.files.add()
filenode.name = name
self.add_object(path=full_path, digest=filenode.digest)
filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR
elif stat.S_ISLNK(mode):
symlinknode = directory.symlinks.add()
symlinknode.name = name
symlinknode.target = os.readlink(full_path)
else:
raise CASError("Unsupported file type for {}".format(full_path))
return self.add_object(digest=dir_digest,
buffer=directory.SerializeToString())
def _get_subdir(self, tree, subdir):
head, name = os.path.split(subdir)
if head:
tree = self._get_subdir(tree, head)
directory = remote_execution_pb2.Directory()
with open(self.objpath(tree), 'rb') as f:
directory.ParseFromString(f.read())
for dirnode in directory.directories:
if dirnode.name == name:
return dirnode.digest
raise CASError("Subdirectory {} not found".format(name))
def _diff_trees(self, tree_a, tree_b, *, added, removed, modified, path=""):
dir_a = remote_execution_pb2.Directory()
dir_b = remote_execution_pb2.Directory()
if tree_a:
with open(self.objpath(tree_a), 'rb') as f:
dir_a.ParseFromString(f.read())
if tree_b:
with open(self.objpath(tree_b), 'rb') as f:
dir_b.ParseFromString(f.read())
a = 0
b = 0
while a < len(dir_a.files) or b < len(dir_b.files):
if b < len(dir_b.files) and (a >= len(dir_a.files) or
dir_a.files[a].name > dir_b.files[b].name):
added.append(os.path.join(path, dir_b.files[b].name))
b += 1
elif a < len(dir_a.files) and (b >= len(dir_b.files) or
dir_b.files[b].name > dir_a.files[a].name):
removed.append(os.path.join(path, dir_a.files[a].name))
a += 1
else:
# File exists in both directories
if dir_a.files[a].digest.hash != dir_b.files[b].digest.hash:
modified.append(os.path.join(path, dir_a.files[a].name))
a += 1
b += 1
a = 0
b = 0
while a < len(dir_a.directories) or b < len(dir_b.directories):
if b < len(dir_b.directories) and (a >= len(dir_a.directories) or
dir_a.directories[a].name > dir_b.directories[b].name):
self._diff_trees(None, dir_b.directories[b].digest,
added=added, removed=removed, modified=modified,
path=os.path.join(path, dir_b.directories[b].name))
b += 1
elif a < len(dir_a.directories) and (b >= len(dir_b.directories) or
dir_b.directories[b].name > dir_a.directories[a].name):
self._diff_trees(dir_a.directories[a].digest, None,
added=added, removed=removed, modified=modified,
path=os.path.join(path, dir_a.directories[a].name))
a += 1
else:
# Subdirectory exists in both directories
if dir_a.directories[a].digest.hash != dir_b.directories[b].digest.hash:
self._diff_trees(dir_a.directories[a].digest, dir_b.directories[b].digest,
added=added, removed=removed, modified=modified,
path=os.path.join(path, dir_a.directories[a].name))
a += 1
b += 1
def _reachable_refs_dir(self, reachable, tree, update_mtime=False):
if tree.hash in reachable:
return
if update_mtime:
os.utime(self.objpath(tree))
reachable.add(tree.hash)
directory = remote_execution_pb2.Directory()
with open(self.objpath(tree), 'rb') as f:
directory.ParseFromString(f.read())
for filenode in directory.files:
if update_mtime:
os.utime(self.objpath(filenode.digest))
reachable.add(filenode.digest.hash)
for dirnode in directory.directories:
self._reachable_refs_dir(reachable, dirnode.digest, update_mtime=update_mtime)
def _required_blobs(self, directory_digest):
# parse directory, and recursively add blobs
d = remote_execution_pb2.Digest()
d.hash = directory_digest.hash
d.size_bytes = directory_digest.size_bytes
yield d
directory = remote_execution_pb2.Directory()
with open(self.objpath(directory_digest), 'rb') as f:
directory.ParseFromString(f.read())
for filenode in directory.files:
d = remote_execution_pb2.Digest()
d.hash = filenode.digest.hash
d.size_bytes = filenode.digest.size_bytes
yield d
for dirnode in directory.directories:
yield from self._required_blobs(dirnode.digest)
def _fetch_blob(self, remote, digest, stream):
resource_name = '/'.join(['blobs', digest.hash, str(digest.size_bytes)])
request = bytestream_pb2.ReadRequest()
request.resource_name = resource_name
request.read_offset = 0
for response in remote.bytestream.Read(request):
stream.write(response.data)
stream.flush()
assert digest.size_bytes == os.fstat(stream.fileno()).st_size
# _temporary_object():
#
# Returns:
# (file): A file object to a named temporary file.
#
# Create a named temporary file with 0o0644 access rights.
@contextlib.contextmanager
def _temporary_object(self):
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
os.chmod(f.name,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
yield f
# _ensure_blob():
#
# Fetch and add blob if it's not already local.
#
# Args:
# remote (Remote): The remote to use.
# digest (Digest): Digest object for the blob to fetch.
#
# Returns:
# (str): The path of the object
#
def _ensure_blob(self, remote, digest):
objpath = self.objpath(digest)
if os.path.exists(objpath):
# already in local repository
return objpath
with self._temporary_object() as f:
self._fetch_blob(remote, digest, f)
added_digest = self.add_object(path=f.name, link_directly=True)
assert added_digest.hash == digest.hash
return objpath
def _batch_download_complete(self, batch):
for digest, data in batch.send():
with self._temporary_object() as f:
f.write(data)
f.flush()
added_digest = self.add_object(path=f.name, link_directly=True)
assert added_digest.hash == digest.hash
# Helper function for _fetch_directory().
def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
self._batch_download_complete(batch)
# All previously scheduled directories are now locally available,
# move them to the processing queue.
fetch_queue.extend(fetch_next_queue)
fetch_next_queue.clear()
return _CASBatchRead(remote)
# Helper function for _fetch_directory().
def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
in_local_cache = os.path.exists(self.objpath(digest))
if in_local_cache:
# Skip download, already in local cache.
pass
elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
not remote.batch_read_supported):
# Too large for batch request, download in independent request.
self._ensure_blob(remote, digest)
in_local_cache = True
else:
if not batch.add(digest):
# Not enough space left in batch request.
# Complete pending batch first.
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
batch.add(digest)
if recursive:
if in_local_cache:
# Add directory to processing queue.
fetch_queue.append(digest)
else:
# Directory will be available after completing pending batch.
# Add directory to deferred processing queue.
fetch_next_queue.append(digest)
return batch
# _fetch_directory():
#
# Fetches remote directory and adds it to content addressable store.
#
# Fetches files, symbolic links and recursively other directories in
# the remote directory and adds them to the content addressable
# store.
#
# Args:
# remote (Remote): The remote to use.
# dir_digest (Digest): Digest object for the directory to fetch.
#
def _fetch_directory(self, remote, dir_digest):
fetch_queue = [dir_digest]
fetch_next_queue = []
batch = _CASBatchRead(remote)
while len(fetch_queue) + len(fetch_next_queue) > 0:
if len(fetch_queue) == 0:
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
dir_digest = fetch_queue.pop(0)
objpath = self._ensure_blob(remote, dir_digest)
directory = remote_execution_pb2.Directory()
with open(objpath, 'rb') as f:
directory.ParseFromString(f.read())
for dirnode in directory.directories:
batch = self._fetch_directory_node(remote, dirnode.digest, batch,
fetch_queue, fetch_next_queue, recursive=True)
for filenode in directory.files:
batch = self._fetch_directory_node(remote, filenode.digest, batch,
fetch_queue, fetch_next_queue)
# Fetch final batch
self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
def _send_blob(self, remote, digest, stream, u_uid=uuid.uuid4()):
resource_name = '/'.join(['uploads', str(u_uid), 'blobs',
digest.hash, str(digest.size_bytes)])
def request_stream(resname, instream):
offset = 0
finished = False
remaining = digest.size_bytes
while not finished:
chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
remaining -= chunk_size
request = bytestream_pb2.WriteRequest()
request.write_offset = offset
# max. _MAX_PAYLOAD_BYTES chunks
request.data = instream.read(chunk_size)
request.resource_name = resname
request.finish_write = remaining <= 0
yield request
offset += chunk_size
finished = request.finish_write
for attempt in _retry():
with attempt:
response = remote.bytestream.Write(request_stream(resource_name, stream))
assert response.committed_size == digest.size_bytes
def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
required_blobs = self._required_blobs(digest)
missing_blobs = {}
# Limit size of FindMissingBlobs request
for required_blobs_group in _grouper(required_blobs, 512):
request = remote_execution_pb2.FindMissingBlobsRequest()
for required_digest in required_blobs_group:
d = request.blob_digests.add()
d.hash = required_digest.hash
d.size_bytes = required_digest.size_bytes
for attempt in _retry():
with attempt:
response = remote.cas.FindMissingBlobs(request)
for missing_digest in response.missing_blob_digests:
d = remote_execution_pb2.Digest()
d.hash = missing_digest.hash
d.size_bytes = missing_digest.size_bytes
missing_blobs[d.hash] = d
# Upload any blobs missing on the server
self._send_blobs(remote, missing_blobs.values(), u_uid)
def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
batch = _CASBatchUpdate(remote)
for digest in digests:
with open(self.objpath(digest), 'rb') as f:
assert os.fstat(f.fileno()).st_size == digest.size_bytes
if (digest.size_bytes >= remote.max_batch_total_size_bytes or
not remote.batch_update_supported):
# Too large for batch request, upload in independent request.
self._send_blob(remote, digest, f, u_uid=u_uid)
else:
if not batch.add(digest, f):
# Not enough space left in batch request.
# Complete pending batch first.
batch.send()
batch = _CASBatchUpdate(remote)
batch.add(digest, f)
# Send final batch
batch.send()
# Represents a single remote CAS cache.
#
class CASRemote():
# pylint: disable=attribute-defined-outside-init
def __init__(self, spec):
self.spec = spec
self._initialized = False
self.channel = None
self.bytestream = None
self.cas = None
self.ref_storage = None
def init(self):
if not self._initialized:
url = urlparse(self.spec.url)
if url.scheme == 'http':
port = url.port or 80
self.channel = grpc.insecure_channel('{}:{}'.format(url.hostname, port),
options=[("grpc.keepalive_time_ms", _KEEPALIVE_TIME_MS)])
elif url.scheme == 'https':
port = url.port or 443
if self.spec.server_cert:
with open(self.spec.server_cert, 'rb') as f:
server_cert_bytes = f.read()
else:
server_cert_bytes = None
if self.spec.client_key:
with open(self.spec.client_key, 'rb') as f:
client_key_bytes = f.read()
else:
client_key_bytes = None
if self.spec.client_cert:
with open(self.spec.client_cert, 'rb') as f:
client_cert_bytes = f.read()
else:
client_cert_bytes = None
credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_bytes,
private_key=client_key_bytes,
certificate_chain=client_cert_bytes)
self.channel = grpc.secure_channel('{}:{}'.format(url.hostname, port), credentials,
options=[("grpc.keepalive_time_ms", _KEEPALIVE_TIME_MS)])
else:
raise CASError("Unsupported URL: {}".format(self.spec.url))
self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
self.remote_asset_fetch = remote_asset_pb2_grpc.FetchStub(self.channel)
self.remote_asset_push = remote_asset_pb2_grpc.PushStub(self.channel)
self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
try:
request = remote_execution_pb2.GetCapabilitiesRequest()
for attempt in _retry():
with attempt:
response = self.capabilities.GetCapabilities(request)
server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
except grpc.RpcError as e:
# Simply use the defaults for servers that don't implement GetCapabilities()
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
# Check whether the server supports BatchReadBlobs()
self.batch_read_supported = False
try:
request = remote_execution_pb2.BatchReadBlobsRequest()
for attempt in _retry():
with attempt:
response = self.cas.BatchReadBlobs(request)
self.batch_read_supported = True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
self.asset_fetch_supported = False
try:
request = remote_asset_pb2.FetchDirectoryRequest()
for attempt in _retry():
with attempt:
response = self.remote_asset_fetch.FetchDirectory(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.INVALID_ARGUMENT:
# Expected error as the request doesn't specify any URIs.
self.asset_fetch_supported = True
elif e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
self.batch_update_supported = False
self.asset_push_supported = False
if self.spec.push:
# Check whether the server supports BatchUpdateBlobs()
try:
request = remote_execution_pb2.BatchUpdateBlobsRequest()
for attempt in _retry():
with attempt:
response = self.cas.BatchUpdateBlobs(request)
self.batch_update_supported = True
except grpc.RpcError as e:
if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
e.code() != grpc.StatusCode.PERMISSION_DENIED):
raise
# Check whether the server supports PushDirectory()
try:
request = remote_asset_pb2.PushDirectoryRequest()
for attempt in _retry():
with attempt:
response = self.remote_asset_push.PushDirectory(request)
except grpc.RpcError as e:
if e.code() == grpc.StatusCode.INVALID_ARGUMENT:
# Expected error as the request doesn't specify any URIs.
self.asset_push_supported = True
elif (e.code() != grpc.StatusCode.UNIMPLEMENTED and
e.code() != grpc.StatusCode.PERMISSION_DENIED):
raise
self._initialized = True
# Represents a batch of blobs queued for fetching.
#
class _CASBatchRead():
def __init__(self, remote):
self._remote = remote
self._max_total_size_bytes = remote.max_batch_total_size_bytes
self._request = remote_execution_pb2.BatchReadBlobsRequest()
self._size = 0
self._sent = False
def add(self, digest):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
if new_batch_size > self._max_total_size_bytes:
# Not enough space left in current batch
return False
request_digest = self._request.digests.add()
request_digest.hash = digest.hash
request_digest.size_bytes = digest.size_bytes
self._size = new_batch_size
return True
def send(self):
assert not self._sent
self._sent = True
if len(self._request.digests) == 0:
return
for attempt in _retry():
with attempt:
batch_response = self._remote.cas.BatchReadBlobs(self._request)
for response in batch_response.responses:
if response.status.code == grpc.StatusCode.NOT_FOUND.value[0]:
raise BlobNotFound(response.digest.hash, "Failed to download blob {}: {}".format(
response.digest.hash, response.status.code))
if response.status.code != grpc.StatusCode.OK.value[0]:
raise CASError("Failed to download blob {}: {}".format(
response.digest.hash, response.status.code))
if response.digest.size_bytes != len(response.data):
raise CASError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
response.digest.hash, response.digest.size_bytes, len(response.data)))
yield (response.digest, response.data)
# Represents a batch of blobs queued for upload.
#
class _CASBatchUpdate():
def __init__(self, remote):
self._remote = remote
self._max_total_size_bytes = remote.max_batch_total_size_bytes
self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
self._size = 0
self._sent = False
def add(self, digest, stream):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
if new_batch_size > self._max_total_size_bytes:
# Not enough space left in current batch
return False
blob_request = self._request.requests.add()
blob_request.digest.hash = digest.hash
blob_request.digest.size_bytes = digest.size_bytes
blob_request.data = stream.read(digest.size_bytes)
self._size = new_batch_size
return True
def send(self):
assert not self._sent
self._sent = True
if len(self._request.requests) == 0:
return
for attempt in _retry():
with attempt:
batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
for response in batch_response.responses:
if response.status.code != grpc.StatusCode.OK.value[0]:
raise CASError("Failed to upload blob {}: {}".format(
response.digest.hash, response.status.code))
def _grouper(iterable, n):
while True:
try:
current = next(iterable)
except StopIteration:
return
yield itertools.chain([current], itertools.islice(iterable, n - 1))
buildstream-1.6.9/buildstream/_artifactcache/casserver.py 0000664 0000000 0000000 00000046251 14375152700 0023660 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jürg Billeter
from concurrent import futures
import logging
import os
import signal
import sys
import tempfile
import uuid
import errno
import threading
import click
import grpc
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .._exceptions import CASError
from .cascache import CASCache
# The default limit for gRPC messages is 4 MiB.
# Limit payload to 1 MiB to leave sufficient headroom for metadata.
_MAX_PAYLOAD_BYTES = 1024 * 1024
# Trying to push an artifact that is too large
class ArtifactTooLargeException(Exception):
pass
# We need a message handler because this will own an ArtifactCache
# which can in turn fire messages.
def message_handler(message, context):
logging.info(message.message)
logging.info(message.detail)
# create_server():
#
# Create gRPC CAS artifact server as specified in the Remote Execution API.
#
# Args:
# repo (str): Path to CAS repository
# enable_push (bool): Whether to allow blob uploads and artifact updates
#
def create_server(repo, *, enable_push,
max_head_size=int(10e9),
min_head_size=int(2e9)):
cas = CASCache(os.path.abspath(repo))
# Use max_workers default from Python 3.5+
max_workers = (os.cpu_count() or 1) * 5
server = grpc.server(futures.ThreadPoolExecutor(max_workers))
cache_cleaner = _CacheCleaner(cas, max_head_size, min_head_size)
bytestream_pb2_grpc.add_ByteStreamServicer_to_server(
_ByteStreamServicer(cas, cache_cleaner, enable_push=enable_push), server)
remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
_ContentAddressableStorageServicer(cas, cache_cleaner, enable_push=enable_push), server)
remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
_CapabilitiesServicer(), server)
buildstream_pb2_grpc.add_ReferenceStorageServicer_to_server(
_ReferenceStorageServicer(cas, enable_push=enable_push), server)
return server
@click.command(short_help="CAS Artifact Server")
@click.option('--port', '-p', type=click.INT, required=True, help="Port number")
@click.option('--server-key', help="Private server key for TLS (PEM-encoded)")
@click.option('--server-cert', help="Public server certificate for TLS (PEM-encoded)")
@click.option('--client-certs', help="Public client certificates for TLS (PEM-encoded)")
@click.option('--enable-push', default=False, is_flag=True,
help="Allow clients to upload blobs and update artifact cache")
@click.option('--head-room-min', type=click.INT,
help="Disk head room minimum in bytes",
default=2e9)
@click.option('--head-room-max', type=click.INT,
help="Disk head room maximum in bytes",
default=10e9)
@click.argument('repo')
def server_main(repo, port, server_key, server_cert, client_certs, enable_push,
head_room_min, head_room_max):
server = create_server(repo,
max_head_size=head_room_max,
min_head_size=head_room_min,
enable_push=enable_push)
use_tls = bool(server_key)
if bool(server_cert) != use_tls:
click.echo("ERROR: --server-key and --server-cert are both required for TLS", err=True)
sys.exit(-1)
if client_certs and not use_tls:
click.echo("ERROR: --client-certs can only be used with --server-key", err=True)
sys.exit(-1)
if use_tls:
# Read public/private key pair
with open(server_key, 'rb') as f:
server_key_bytes = f.read()
with open(server_cert, 'rb') as f:
server_cert_bytes = f.read()
if client_certs:
with open(client_certs, 'rb') as f:
client_certs_bytes = f.read()
else:
client_certs_bytes = None
credentials = grpc.ssl_server_credentials([(server_key_bytes, server_cert_bytes)],
root_certificates=client_certs_bytes,
require_client_auth=bool(client_certs))
server.add_secure_port('[::]:{}'.format(port), credentials)
else:
server.add_insecure_port('[::]:{}'.format(port))
# Run artifact server
server.start()
try:
while True:
signal.pause()
except KeyboardInterrupt:
server.stop(0)
class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
def __init__(self, cas, cache_cleaner, *, enable_push):
super().__init__()
self.cas = cas
self.enable_push = enable_push
self.cache_cleaner = cache_cleaner
def Read(self, request, context):
resource_name = request.resource_name
client_digest = _digest_from_download_resource_name(resource_name)
if client_digest is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return
if request.read_offset > client_digest.size_bytes:
context.set_code(grpc.StatusCode.OUT_OF_RANGE)
return
try:
with open(self.cas.objpath(client_digest), 'rb') as f:
if os.fstat(f.fileno()).st_size != client_digest.size_bytes:
context.set_code(grpc.StatusCode.NOT_FOUND)
return
if request.read_offset > 0:
f.seek(request.read_offset)
remaining = client_digest.size_bytes - request.read_offset
while remaining > 0:
chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
remaining -= chunk_size
response = bytestream_pb2.ReadResponse()
# max. 64 kB chunks
response.data = f.read(chunk_size)
yield response
except FileNotFoundError:
context.set_code(grpc.StatusCode.NOT_FOUND)
def Write(self, request_iterator, context):
response = bytestream_pb2.WriteResponse()
if not self.enable_push:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
return response
offset = 0
finished = False
resource_name = None
with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
for request in request_iterator:
if finished or request.write_offset != offset:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
if resource_name is None:
# First request
resource_name = request.resource_name
client_digest = _digest_from_upload_resource_name(resource_name)
if client_digest is None:
context.set_code(grpc.StatusCode.NOT_FOUND)
return response
while True:
if client_digest.size_bytes == 0:
break
try:
self.cache_cleaner.clean_up(client_digest.size_bytes)
except ArtifactTooLargeException as e:
context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
context.set_details(str(e))
return response
try:
os.posix_fallocate(out.fileno(), 0, client_digest.size_bytes)
break
except OSError as e:
# Multiple upload can happen in the same time
if e.errno != errno.ENOSPC:
raise
elif request.resource_name:
# If it is set on subsequent calls, it **must** match the value of the first request.
if request.resource_name != resource_name:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
if (offset + len(request.data)) > client_digest.size_bytes:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
out.write(request.data)
offset += len(request.data)
if request.finish_write:
if client_digest.size_bytes != offset:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
out.flush()
digest = self.cas.add_object(path=out.name, link_directly=True)
if digest.hash != client_digest.hash:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
finished = True
assert finished
response.committed_size = offset
return response
class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
def __init__(self, cas, cache_cleaner, *, enable_push):
super().__init__()
self.cas = cas
self.enable_push = enable_push
self.cache_cleaner = cache_cleaner
def FindMissingBlobs(self, request, context):
response = remote_execution_pb2.FindMissingBlobsResponse()
for digest in request.blob_digests:
objpath = self.cas.objpath(digest)
try:
os.utime(objpath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
d = response.missing_blob_digests.add()
d.hash = digest.hash
d.size_bytes = digest.size_bytes
return response
def BatchReadBlobs(self, request, context):
response = remote_execution_pb2.BatchReadBlobsResponse()
batch_size = 0
for digest in request.digests:
batch_size += digest.size_bytes
if batch_size > _MAX_PAYLOAD_BYTES:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return response
blob_response = response.responses.add()
blob_response.digest.hash = digest.hash
blob_response.digest.size_bytes = digest.size_bytes
try:
with open(self.cas.objpath(digest), 'rb') as f:
if os.fstat(f.fileno()).st_size != digest.size_bytes:
blob_response.status.code = grpc.StatusCode.NOT_FOUND.value[0]
continue
blob_response.data = f.read(digest.size_bytes)
except FileNotFoundError:
blob_response.status.code = grpc.StatusCode.NOT_FOUND.value[0]
return response
def BatchUpdateBlobs(self, request, context):
response = remote_execution_pb2.BatchUpdateBlobsResponse()
if not self.enable_push:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
return response
batch_size = 0
for blob_request in request.requests:
digest = blob_request.digest
batch_size += digest.size_bytes
if batch_size > _MAX_PAYLOAD_BYTES:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return response
blob_response = response.responses.add()
blob_response.digest.hash = digest.hash
blob_response.digest.size_bytes = digest.size_bytes
if len(blob_request.data) != digest.size_bytes:
blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
continue
try:
self.cache_cleaner.clean_up(digest.size_bytes)
with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
out.write(blob_request.data)
out.flush()
server_digest = self.cas.add_object(path=out.name)
if server_digest.hash != digest.hash:
blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
except ArtifactTooLargeException:
blob_response.status.code = grpc.StatusCode.RESOURCE_EXHAUSTED
return response
class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
def GetCapabilities(self, request, context):
response = remote_execution_pb2.ServerCapabilities()
cache_capabilities = response.cache_capabilities
cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
cache_capabilities.action_cache_update_capabilities.update_enabled = False
cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
response.deprecated_api_version.major = 2
response.low_api_version.major = 2
response.high_api_version.major = 2
return response
class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
def __init__(self, cas, *, enable_push):
super().__init__()
self.cas = cas
self.enable_push = enable_push
def GetReference(self, request, context):
response = buildstream_pb2.GetReferenceResponse()
try:
tree = self.cas.resolve_ref(request.key, update_mtime=True)
try:
self.cas.update_tree_mtime(tree)
except FileNotFoundError:
self.cas.remove(request.key, defer_prune=True)
context.set_code(grpc.StatusCode.NOT_FOUND)
return response
response.digest.hash = tree.hash
response.digest.size_bytes = tree.size_bytes
except CASError:
context.set_code(grpc.StatusCode.NOT_FOUND)
return response
def UpdateReference(self, request, context):
response = buildstream_pb2.UpdateReferenceResponse()
if not self.enable_push:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
return response
for key in request.keys:
self.cas.set_ref(key, request.digest)
return response
def Status(self, request, context):
response = buildstream_pb2.StatusResponse()
response.allow_updates = self.enable_push
return response
def _digest_from_download_resource_name(resource_name):
parts = resource_name.split('/')
# Accept requests from non-conforming BuildStream 1.1.x clients
if len(parts) == 2:
parts.insert(0, 'blobs')
if len(parts) != 3 or parts[0] != 'blobs':
return None
try:
digest = remote_execution_pb2.Digest()
digest.hash = parts[1]
digest.size_bytes = int(parts[2])
return digest
except ValueError:
return None
def _digest_from_upload_resource_name(resource_name):
parts = resource_name.split('/')
# Accept requests from non-conforming BuildStream 1.1.x clients
if len(parts) == 2:
parts.insert(0, 'uploads')
parts.insert(1, str(uuid.uuid4()))
parts.insert(2, 'blobs')
if len(parts) < 5 or parts[0] != 'uploads' or parts[2] != 'blobs':
return None
try:
uuid_ = uuid.UUID(hex=parts[1])
if uuid_.version != 4:
return None
digest = remote_execution_pb2.Digest()
digest.hash = parts[3]
digest.size_bytes = int(parts[4])
return digest
except ValueError:
return None
class _CacheCleaner:
__cleanup_cache_lock = threading.Lock()
def __init__(self, cas, max_head_size, min_head_size=int(2e9)):
self.__cas = cas
self.__max_head_size = max_head_size
self.__min_head_size = min_head_size
def __has_space(self, object_size):
stats = os.statvfs(self.__cas.casdir)
free_disk_space = (stats.f_bavail * stats.f_bsize) - self.__min_head_size
total_disk_space = (stats.f_blocks * stats.f_bsize) - self.__min_head_size
if object_size > total_disk_space:
raise ArtifactTooLargeException("Artifact of size: {} is too large for "
"the filesystem which mounts the remote "
"cache".format(object_size))
return object_size <= free_disk_space
# _clean_up_cache()
#
# Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
# is enough space for the incoming artifact
#
# Args:
# object_size: The size of the object being received in bytes
#
# Returns:
# int: The total bytes removed on the filesystem
#
def clean_up(self, object_size):
if self.__has_space(object_size):
return 0
with _CacheCleaner.__cleanup_cache_lock:
if self.__has_space(object_size):
# Another thread has done the cleanup for us
return 0
stats = os.statvfs(self.__cas.casdir)
target_disk_space = (stats.f_bavail * stats.f_bsize) - self.__max_head_size
# obtain a list of LRP artifacts
LRP_objects = self.__cas.list_objects()
removed_size = 0 # in bytes
last_mtime = 0
while object_size - removed_size > target_disk_space:
try:
last_mtime, to_remove = LRP_objects.pop(0) # The first element in the list is the LRP artifact
except IndexError as e:
# This exception is caught if there are no more artifacts in the list
# LRP_artifacts. This means the the artifact is too large for the filesystem
# so we abort the process
raise ArtifactTooLargeException("Artifact of size {} is too large for "
"the filesystem which mounts the remote "
"cache".format(object_size)) from e
try:
size = os.stat(to_remove).st_size
os.unlink(to_remove)
removed_size += size
except FileNotFoundError:
pass
self.__cas.clean_up_refs_until(last_mtime)
if removed_size > 0:
logging.info("Successfully removed {} bytes from the cache".format(removed_size))
else:
logging.info("No artifacts were removed from the cache.")
return removed_size
buildstream-1.6.9/buildstream/_cachekey.py 0000664 0000000 0000000 00000002506 14375152700 0020651 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import hashlib
import ujson
from . import _yaml
# generate_key()
#
# Generate an sha256 hex digest from the given value. The value
# can be a simple value or recursive dictionary with lists etc,
# anything simple enough to serialize.
#
# Args:
# value: A value to get a key for
#
# Returns:
# (str): An sha256 hex digest of the given value
#
def generate_key(value):
ordered = _yaml.node_sanitize(value)
ustring = ujson.dumps(ordered, sort_keys=True, escape_forward_slashes=False).encode('utf-8')
return hashlib.sha256(ustring).hexdigest()
buildstream-1.6.9/buildstream/_context.py 0000664 0000000 0000000 00000051767 14375152700 0020576 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016-2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import datetime
from collections import deque
from collections.abc import Mapping
from contextlib import contextmanager
from . import utils
from . import _cachekey
from . import _signals
from . import _site
from . import _yaml
from ._exceptions import LoadError, LoadErrorReason, BstError
from ._message import Message, MessageType
from ._profile import Topics, profile_start, profile_end
from ._artifactcache import ArtifactCache, ArtifactCacheUsage
from ._workspaces import Workspaces
from .plugin import Plugin
# Context()
#
# The Context object holds all of the user preferences
# and context for a given invocation of BuildStream.
#
# This is a collection of data from configuration files and command
# line arguments and consists of information such as where to store
# logs and artifacts, where to perform builds and cache downloaded sources,
# verbosity levels and basically anything pertaining to the context
# in which BuildStream was invoked.
#
class Context():
def __init__(self):
# Filename indicating which configuration file was used, or None for the defaults
self.config_origin = None
# The directory where various sources are stored
self.sourcedir = None
# The directory where build sandboxes will be created
self.builddir = None
# The local binary artifact cache directory
self.artifactdir = None
# The locations from which to push and pull prebuilt artifacts
self.artifact_cache_specs = []
# The directory to store build logs
self.logdir = None
# The abbreviated cache key length to display in the UI
self.log_key_length = 0
# Whether debug mode is enabled
self.log_debug = False
# Whether verbose mode is enabled
self.log_verbose = False
# Maximum number of lines to print from build logs
self.log_error_lines = 0
# Maximum number of lines to print in the master log for a detailed message
self.log_message_lines = 0
# Format string for printing the pipeline at startup time
self.log_element_format = None
# Format string for printing message lines in the master log
self.log_message_format = None
# Maximum number of fetch or refresh tasks
self.sched_fetchers = 4
# Maximum number of build tasks
self.sched_builders = 4
# Maximum number of push tasks
self.sched_pushers = 4
# Maximum number of retries for network tasks
self.sched_network_retries = 2
# What to do when a build fails in non interactive mode
self.sched_error_action = 'continue'
# Maximum jobs per build
self.build_max_jobs = None
# Whether elements must be rebuilt when their dependencies have changed
self._strict_build_plan = None
# Make sure the XDG vars are set in the environment before loading anything
self._init_xdg()
# Private variables
self._cache_key = None
self._message_handler = None
self._message_depth = deque()
self._artifactcache = None
self._projects = []
self._project_overrides = {}
self._workspaces = None
self._log_handle = None
self._log_filename = None
self.config_cache_quota = 'infinity'
self.artifactdir_volume = None
# load()
#
# Loads the configuration files
#
# Args:
# config (filename): The user specified configuration file, if any
#
# Raises:
# LoadError
#
# This will first load the BuildStream default configuration and then
# override that configuration with the configuration file indicated
# by *config*, if any was specified.
#
def load(self, config=None):
profile_start(Topics.LOAD_CONTEXT, 'load')
# If a specific config file is not specified, default to trying
# a $XDG_CONFIG_HOME/buildstream.conf file
#
if not config:
#
# Support parallel installations of BuildStream by first
# trying buildstream1.conf and then falling back to buildstream.conf.
#
for config_filename in ("buildstream1.conf", "buildstream.conf"):
default_config = os.path.join(os.environ["XDG_CONFIG_HOME"], config_filename)
if os.path.exists(default_config):
config = default_config
break
# Load default config
#
defaults = _yaml.load(_site.default_user_config)
if config:
self.config_origin = os.path.abspath(config)
user_config = _yaml.load(config)
_yaml.composite(defaults, user_config)
_yaml.node_validate(defaults, [
'sourcedir', 'builddir', 'artifactdir', 'logdir',
'scheduler', 'artifacts', 'logging', 'projects',
'cache', 'build'
])
for directory in ['sourcedir', 'builddir', 'artifactdir', 'logdir']:
# Allow the ~ tilde expansion and any environment variables in
# path specification in the config files.
#
path = _yaml.node_get(defaults, str, directory)
path = os.path.expanduser(path)
path = os.path.expandvars(path)
path = os.path.normpath(path)
setattr(self, directory, path)
# Relative paths don't make sense in user configuration. The exception is
# workspacedir where `.` is useful as it will be combined with the name
# specified on the command line.
if not os.path.isabs(path) and not (directory == 'workspacedir' and path == '.'):
raise LoadError("{} must be an absolute path".format(directory), LoadErrorReason.INVALID_DATA)
# Load quota configuration
# We need to find the first existing directory in the path of
# our artifactdir - the artifactdir may not have been created
# yet.
cache = _yaml.node_get(defaults, Mapping, 'cache')
_yaml.node_validate(cache, ['quota'])
self.config_cache_quota = _yaml.node_get(cache, str, 'quota', default_value='infinity')
# Load artifact share configuration
self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults)
# Load logging config
logging = _yaml.node_get(defaults, Mapping, 'logging')
_yaml.node_validate(logging, [
'key-length', 'verbose',
'error-lines', 'message-lines',
'debug', 'element-format', 'message-format'
])
self.log_key_length = _yaml.node_get(logging, int, 'key-length')
self.log_debug = _yaml.node_get(logging, bool, 'debug')
self.log_verbose = _yaml.node_get(logging, bool, 'verbose')
self.log_error_lines = _yaml.node_get(logging, int, 'error-lines')
self.log_message_lines = _yaml.node_get(logging, int, 'message-lines')
self.log_element_format = _yaml.node_get(logging, str, 'element-format')
self.log_message_format = _yaml.node_get(logging, str, 'message-format')
# Load scheduler config
scheduler = _yaml.node_get(defaults, Mapping, 'scheduler')
_yaml.node_validate(scheduler, [
'on-error', 'fetchers', 'builders',
'pushers', 'network-retries'
])
self.sched_error_action = _yaml.node_get(scheduler, str, 'on-error')
self.sched_fetchers = _yaml.node_get(scheduler, int, 'fetchers')
self.sched_builders = _yaml.node_get(scheduler, int, 'builders')
self.sched_pushers = _yaml.node_get(scheduler, int, 'pushers')
self.sched_network_retries = _yaml.node_get(scheduler, int, 'network-retries')
# Load build config
build = _yaml.node_get(defaults, dict, 'build')
_yaml.node_validate(build, ['max-jobs'])
self.build_max_jobs = _yaml.node_get(build, int, 'max-jobs')
# Load per-projects overrides
self._project_overrides = _yaml.node_get(defaults, Mapping, 'projects', default_value={})
# Shallow validation of overrides, parts of buildstream which rely
# on the overrides are expected to validate elsewhere.
for _, overrides in _yaml.node_items(self._project_overrides):
_yaml.node_validate(overrides, ['artifacts', 'options', 'strict', 'default-mirror'])
profile_end(Topics.LOAD_CONTEXT, 'load')
valid_actions = ['continue', 'quit']
if self.sched_error_action not in valid_actions:
provenance = _yaml.node_get_provenance(scheduler, 'on-error')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: on-error should be one of: {}".format(
provenance, ", ".join(valid_actions)))
@property
def artifactcache(self):
if not self._artifactcache:
self._artifactcache = ArtifactCache(self)
return self._artifactcache
# get_artifact_cache_usage()
#
# Fetches the current usage of the artifact cache
#
# Returns:
# (ArtifactCacheUsage): The current status
#
def get_artifact_cache_usage(self):
return ArtifactCacheUsage(self.artifactcache)
# add_project():
#
# Add a project to the context.
#
# Args:
# project (Project): The project to add
#
def add_project(self, project):
if not self._projects:
self._workspaces = Workspaces(project)
self._projects.append(project)
# get_projects():
#
# Return the list of projects in the context.
#
# Returns:
# (list): The list of projects
#
def get_projects(self):
return self._projects
# get_toplevel_project():
#
# Return the toplevel project, the one which BuildStream was
# invoked with as opposed to a junctioned subproject.
#
# Returns:
# (list): The list of projects
#
def get_toplevel_project(self):
return self._projects[0]
def get_workspaces(self):
return self._workspaces
# get_overrides():
#
# Fetch the override dictionary for the active project. This returns
# a node loaded from YAML and as such, values loaded from the returned
# node should be loaded using the _yaml.node_get() family of functions.
#
# Args:
# project_name (str): The project name
#
# Returns:
# (Mapping): The overrides dictionary for the specified project
#
def get_overrides(self, project_name):
return _yaml.node_get(self._project_overrides, Mapping, project_name, default_value={})
# get_strict():
#
# Fetch whether we are strict or not
#
# Returns:
# (bool): Whether or not to use strict build plan
#
def get_strict(self):
# If it was set by the CLI, it overrides any config
if self._strict_build_plan is not None:
return self._strict_build_plan
toplevel = self.get_toplevel_project()
overrides = self.get_overrides(toplevel.name)
return _yaml.node_get(overrides, bool, 'strict', default_value=True)
# get_cache_key():
#
# Returns the cache key, calculating it if necessary
#
# Returns:
# (str): A hex digest cache key for the Context
#
def get_cache_key(self):
if self._cache_key is None:
# Anything that alters the build goes into the unique key
self._cache_key = _cachekey.generate_key({})
return self._cache_key
# set_message_handler()
#
# Sets the handler for any status messages propagated through
# the context.
#
# The message handler should have the same signature as
# the message() method
def set_message_handler(self, handler):
self._message_handler = handler
# silent_messages():
#
# Returns:
# (bool): Whether messages are currently being silenced
#
def silent_messages(self):
for silent in self._message_depth:
if silent:
return True
return False
# message():
#
# Proxies a message back to the caller, this is the central
# point through which all messages pass.
#
# Args:
# message: A Message object
#
def message(self, message):
# Tag message only once
if message.depth is None:
message.depth = len(list(self._message_depth))
# If we are recording messages, dump a copy into the open log file.
self._record_message(message)
# Send it off to the log handler (can be the frontend,
# or it can be the child task which will propagate
# to the frontend)
assert self._message_handler
self._message_handler(message, context=self)
# silence()
#
# A context manager to silence messages, this behaves in
# the same way as the `silent_nested` argument of the
# Context._timed_activity() context manager: especially
# important messages will not be silenced.
#
@contextmanager
def silence(self):
self._push_message_depth(True)
try:
yield
finally:
self._pop_message_depth()
# timed_activity()
#
# Context manager for performing timed activities and logging those
#
# Args:
# context (Context): The invocation context object
# activity_name (str): The name of the activity
# detail (str): An optional detailed message, can be multiline output
# silent_nested (bool): If specified, nested messages will be silenced
#
@contextmanager
def timed_activity(self, activity_name, *, unique_id=None, detail=None, silent_nested=False):
starttime = datetime.datetime.now()
stopped_time = None
def stop_time():
nonlocal stopped_time
stopped_time = datetime.datetime.now()
def resume_time():
nonlocal stopped_time
nonlocal starttime
sleep_time = datetime.datetime.now() - stopped_time
starttime += sleep_time
with _signals.suspendable(stop_time, resume_time):
try:
# Push activity depth for status messages
message = Message(unique_id, MessageType.START, activity_name, detail=detail)
self.message(message)
self._push_message_depth(silent_nested)
yield
except BstError:
# Note the failure in status messages and reraise, the scheduler
# expects an error when there is an error.
elapsed = datetime.datetime.now() - starttime
message = Message(unique_id, MessageType.FAIL, activity_name, elapsed=elapsed)
self._pop_message_depth()
self.message(message)
raise
elapsed = datetime.datetime.now() - starttime
message = Message(unique_id, MessageType.SUCCESS, activity_name, elapsed=elapsed)
self._pop_message_depth()
self.message(message)
# recorded_messages()
#
# Records all messages in a log file while the context manager
# is active.
#
# In addition to automatically writing all messages to the
# specified logging file, an open file handle for process stdout
# and stderr will be available via the Context.get_log_handle() API,
# and the full logfile path will be available via the
# Context.get_log_filename() API.
#
# Args:
# filename (str): A logging directory relative filename,
# the pid and .log extension will be automatically
# appended
#
# Yields:
# (str): The fully qualified log filename
#
@contextmanager
def recorded_messages(self, filename):
# We dont allow recursing in this context manager, and
# we also do not allow it in the main process.
assert self._log_handle is None
assert self._log_filename is None
assert not utils._is_main_process()
# Create the fully qualified logfile in the log directory,
# appending the pid and .log extension at the end.
self._log_filename = os.path.join(self.logdir,
'{}.{}.log'.format(filename, os.getpid()))
# Ensure the directory exists first
directory = os.path.dirname(self._log_filename)
os.makedirs(directory, exist_ok=True)
with open(self._log_filename, 'a', encoding='utf-8') as logfile:
# Write one last line to the log and flush it to disk
def flush_log():
# If the process currently had something happening in the I/O stack
# then trying to reenter the I/O stack will fire a runtime error.
#
# So just try to flush as well as we can at SIGTERM time
try:
logfile.write('\n\nForcefully terminated\n')
logfile.flush()
except RuntimeError:
os.fsync(logfile.fileno())
self._log_handle = logfile
with _signals.terminator(flush_log):
yield self._log_filename
self._log_handle = None
self._log_filename = None
# get_log_handle()
#
# Fetches the active log handle, this will return the active
# log file handle when the Context.recorded_messages() context
# manager is active
#
# Returns:
# (file): The active logging file handle, or None
#
def get_log_handle(self):
return self._log_handle
# get_log_filename()
#
# Fetches the active log filename, this will return the active
# log filename when the Context.recorded_messages() context
# manager is active
#
# Returns:
# (str): The active logging filename, or None
#
def get_log_filename(self):
return self._log_filename
# _record_message()
#
# Records the message if recording is enabled
#
# Args:
# message (Message): The message to record
#
def _record_message(self, message):
if self._log_handle is None:
return
INDENT = " "
EMPTYTIME = "--:--:--"
template = "[{timecode: <8}] {type: <7}"
# If this message is associated with a plugin, print what
# we know about the plugin.
plugin_name = ""
if message.unique_id:
template += " {plugin}"
plugin = Plugin._lookup(message.unique_id)
plugin_name = plugin.name
template += ": {message}"
detail = ''
if message.detail is not None:
template += "\n\n{detail}"
detail = message.detail.rstrip('\n')
detail = INDENT + INDENT.join(detail.splitlines(True))
timecode = EMPTYTIME
if message.message_type in (MessageType.SUCCESS, MessageType.FAIL):
hours, remainder = divmod(int(message.elapsed.total_seconds()), 60**2)
minutes, seconds = divmod(remainder, 60)
timecode = "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds)
text = template.format(timecode=timecode,
plugin=plugin_name,
type=message.message_type.upper(),
message=message.message,
detail=detail)
# Write to the open log file
self._log_handle.write('{}\n'.format(text))
self._log_handle.flush()
# _push_message_depth() / _pop_message_depth()
#
# For status messages, send the depth of timed
# activities inside a given task through the message
#
def _push_message_depth(self, silent_nested):
self._message_depth.appendleft(silent_nested)
def _pop_message_depth(self):
assert self._message_depth
self._message_depth.popleft()
# Force the resolved XDG variables into the environment,
# this is so that they can be used directly to specify
# preferred locations of things from user configuration
# files.
def _init_xdg(self):
if not os.environ.get('XDG_CACHE_HOME'):
os.environ['XDG_CACHE_HOME'] = os.path.expanduser('~/.cache')
if not os.environ.get('XDG_CONFIG_HOME'):
os.environ['XDG_CONFIG_HOME'] = os.path.expanduser('~/.config')
if not os.environ.get('XDG_DATA_HOME'):
os.environ['XDG_DATA_HOME'] = os.path.expanduser('~/.local/share')
buildstream-1.6.9/buildstream/_elementfactory.py 0000664 0000000 0000000 00000004522 14375152700 0022116 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from . import _site
from ._plugincontext import PluginContext
from .element import Element
# A ElementFactory creates Element instances
# in the context of a given factory
#
# Args:
# plugin_base (PluginBase): The main PluginBase object to work with
# plugin_origins (list): Data used to search for external Element plugins
#
class ElementFactory(PluginContext):
def __init__(self, plugin_base, *,
format_versions=None,
plugin_origins=None):
if format_versions is None:
format_versions = {}
super().__init__(plugin_base, Element, [_site.element_plugins],
plugin_origins=plugin_origins,
format_versions=format_versions)
# create():
#
# Create an Element object, the pipeline uses this to create Element
# objects on demand for a given pipeline.
#
# Args:
# context (object): The Context object for processing
# project (object): The project object
# meta (object): The loaded MetaElement
#
# Returns: A newly created Element object of the appropriate kind
#
# Raises:
# PluginError (if the kind lookup failed)
# LoadError (if the element itself took issue with the config)
#
def create(self, context, project, meta):
element_type, default_config = self.lookup(meta.kind)
element = element_type(context, project, meta, default_config)
version = self._format_versions.get(meta.kind, 0)
self._assert_plugin_format(element, version)
return element
buildstream-1.6.9/buildstream/_exceptions.py 0000664 0000000 0000000 00000020756 14375152700 0021265 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Tiago Gomes
from enum import Enum
# Disable pylint warnings for whole file here:
# pylint: disable=global-statement
# The last raised exception, this is used in test cases only
_last_task_error_domain = None
_last_task_error_reason = None
# get_last_task_error()
#
# Fetches the last exception from a task
#
# Used by regression tests
#
def get_last_task_error():
global _last_task_error_domain
global _last_task_error_reason
d = _last_task_error_domain
r = _last_task_error_reason
_last_task_error_domain = _last_task_error_reason = None
return (d, r)
# set_last_task_error()
#
# Sets the last exception of a task
#
# This is set by some internals to inform regression
# tests about how things failed in a machine readable way
#
def set_last_task_error(domain, reason):
global _last_task_error_domain
global _last_task_error_reason
_last_task_error_domain = domain
_last_task_error_reason = reason
class ErrorDomain(Enum):
PLUGIN = 1
LOAD = 2
IMPL = 3
PLATFORM = 4
SANDBOX = 5
ARTIFACT = 6
PIPELINE = 7
OSTREE = 8
UTIL = 9
PROG_NOT_FOUND = 12
SOURCE = 10
ELEMENT = 11
APP = 12
STREAM = 13
CAS = 15
# BstError is an internal base exception class for BuildSream
# exceptions.
#
# The sole purpose of using the base class is to add additional
# context to exceptions raised by plugins in child tasks, this
# context can then be communicated back to the main process.
#
class BstError(Exception):
def __init__(self, message, *, detail=None, domain=None, reason=None, temporary=False):
super().__init__(message)
# Additional error detail, these are used to construct detail
# portions of the logging messages when encountered.
#
self.detail = detail
# The build sandbox in which the error occurred, if the
# error occurred at element assembly time.
#
self.sandbox = None
# When this exception occurred during the handling of a job, indicate
# whether or not there is any point retrying the job.
#
self.temporary = temporary
# Error domain and reason
#
self.domain = domain
self.reason = reason
# PluginError
#
# Raised on plugin related errors.
#
# This exception is raised either by the plugin loading process,
# or by the base :class:`.Plugin` element itself.
#
class PluginError(BstError):
def __init__(self, message, *, detail=None, reason=None, temporary=False):
super().__init__(message, domain=ErrorDomain.PLUGIN, detail=detail, reason=reason, temporary=False)
# LoadErrorReason
#
# Describes the reason why a :class:`.LoadError` was raised.
#
class LoadErrorReason(Enum):
# A file was not found.
MISSING_FILE = 1
# The parsed data was not valid YAML.
INVALID_YAML = 2
# Data was malformed, a value was not of the expected type, etc
INVALID_DATA = 3
# An error occurred during YAML dictionary composition.
#
# This can happen by overriding a value with a new differently typed
# value, or by overwriting some named value when that was not allowed.
ILLEGAL_COMPOSITE = 4
# An circular dependency chain was detected
CIRCULAR_DEPENDENCY = 5
# A variable could not be resolved. This can happen if your project
# has cyclic dependencies in variable declarations, or, when substituting
# a string which refers to an undefined variable.
UNRESOLVED_VARIABLE = 6
# BuildStream does not support the required project format version
UNSUPPORTED_PROJECT = 7
# Project requires a newer version of a plugin than the one which was loaded
UNSUPPORTED_PLUGIN = 8
# A conditional expression failed to resolve
EXPRESSION_FAILED = 9
# An assertion was intentionally encoded into project YAML
USER_ASSERTION = 10
# A list composition directive did not apply to any underlying list
TRAILING_LIST_DIRECTIVE = 11
# Conflicting junctions in subprojects
CONFLICTING_JUNCTION = 12
# Failure to load a project from a specified junction
INVALID_JUNCTION = 13
# Subproject needs to be fetched
SUBPROJECT_FETCH_NEEDED = 14
# Subproject has no ref
SUBPROJECT_INCONSISTENT = 15
# An invalid symbol name was encountered
INVALID_SYMBOL_NAME = 16
# A project.conf file was missing
MISSING_PROJECT_CONF = 17
# Try to load a directory not a yaml file
LOADING_DIRECTORY = 18
# A project path leads outside of the project directory
PROJ_PATH_INVALID = 19
# A project path points to a file of the not right kind (e.g. a
# socket)
PROJ_PATH_INVALID_KIND = 20
# A recursive include has been encountered.
RECURSIVE_INCLUDE = 21
# A recursive variable has been encountered
CIRCULAR_REFERENCE_VARIABLE = 22
# An attempt so set the value of a protected variable
PROTECTED_VARIABLE_REDEFINED = 23
# LoadError
#
# Raised while loading some YAML.
#
# Args:
# reason (LoadErrorReason): machine readable error reason
# message (str): human readable error explanation
#
# This exception is raised when loading or parsing YAML, or when
# interpreting project YAML
#
class LoadError(BstError):
def __init__(self, reason, message, *, detail=None):
super().__init__(message, detail=detail, domain=ErrorDomain.LOAD, reason=reason)
# ImplError
#
# Raised when a :class:`.Source` or :class:`.Element` plugin fails to
# implement a mandatory method
#
class ImplError(BstError):
def __init__(self, message, reason=None):
super().__init__(message, domain=ErrorDomain.IMPL, reason=reason)
# PlatformError
#
# Raised if the current platform is not supported.
class PlatformError(BstError):
def __init__(self, message, reason=None):
super().__init__(message, domain=ErrorDomain.PLATFORM, reason=reason)
# SandboxError
#
# Raised when errors are encountered by the sandbox implementation
#
class SandboxError(BstError):
def __init__(self, message, reason=None):
super().__init__(message, domain=ErrorDomain.SANDBOX, reason=reason)
# ArtifactError
#
# Raised when errors are encountered in the artifact caches
#
class ArtifactError(BstError):
def __init__(self, message, *, detail=None, reason=None, temporary=False):
super().__init__(message, detail=detail, domain=ErrorDomain.ARTIFACT, reason=reason, temporary=True)
# CASError
#
# Raised when errors are encountered in the CAS
#
class CASError(BstError):
def __init__(self, message, *, detail=None, reason=None, temporary=False):
super().__init__(message, detail=detail, domain=ErrorDomain.CAS, reason=reason, temporary=True)
# PipelineError
#
# Raised from pipeline operations
#
class PipelineError(BstError):
def __init__(self, message, *, detail=None, reason=None):
super().__init__(message, detail=detail, domain=ErrorDomain.PIPELINE, reason=reason)
# StreamError
#
# Raised when a stream operation fails
#
class StreamError(BstError):
def __init__(self, message=None, *, detail=None, reason=None, terminated=False):
# The empty string should never appear to a user,
# this only allows us to treat this internal error as
# a BstError from the frontend.
if message is None:
message = ""
super().__init__(message, detail=detail, domain=ErrorDomain.STREAM, reason=reason)
self.terminated = terminated
# AppError
#
# Raised from the frontend App directly
#
class AppError(BstError):
def __init__(self, message, detail=None, reason=None):
super().__init__(message, detail=detail, domain=ErrorDomain.APP, reason=reason)
# SkipJob
#
# Raised from a child process within a job when the job should be
# considered skipped by the parent process.
#
class SkipJob(Exception):
pass
buildstream-1.6.9/buildstream/_frontend/ 0000775 0000000 0000000 00000000000 14375152700 0020337 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_frontend/__init__.py 0000664 0000000 0000000 00000001672 14375152700 0022456 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
from .cli import cli
if "_BST_COMPLETION" not in os.environ:
from .profile import Profile
from .status import Status
from .widget import LogLine
buildstream-1.6.9/buildstream/_frontend/app.py 0000664 0000000 0000000 00000110562 14375152700 0021476 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016-2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import sys
import resource
import traceback
import datetime
from enum import Enum
from textwrap import TextWrapper
from contextlib import contextmanager
import ujson
import click
from click import UsageError
# Import buildstream public symbols
from .. import Scope
# Import various buildstream internals
from .._context import Context
from .._platform import Platform
from .._project import Project
from .._exceptions import BstError, StreamError, LoadError, LoadErrorReason, AppError, get_last_task_error
from .._message import Message, MessageType, unconditional_messages
from .._stream import Stream
from .._versions import BST_FORMAT_VERSION
from .. import _yaml
from .._scheduler import ElementJob, JobStatus
# Import frontend assets
from . import Profile, LogLine, Status
# Intendation for all logging
INDENT = 4
# App()
#
# Main Application State
#
# Args:
# main_options (dict): The main CLI options of the `bst`
# command, before any subcommand
#
class App():
def __init__(self, main_options):
#
# Public members
#
self.context = None # The Context object
self.stream = None # The Stream object
self.project = None # The toplevel Project object
self.logger = None # The LogLine object
self.interactive = None # Whether we are running in interactive mode
self.colors = None # Whether to use colors in logging
#
# Private members
#
self._session_start = datetime.datetime.now()
self._session_name = None
self._main_options = main_options # Main CLI options, before any command
self._status = None # The Status object
self._fail_messages = {} # Failure messages by unique plugin id
self._interactive_failures = None # Whether to handle failures interactively
self._started = False # Whether a session has started
# UI Colors Profiles
self._content_profile = Profile(fg='yellow')
self._format_profile = Profile(fg='cyan', dim=True)
self._success_profile = Profile(fg='green')
self._error_profile = Profile(fg='red', dim=True)
self._detail_profile = Profile(dim=True)
#
# Earily initialization
#
is_a_tty = sys.stdout.isatty() and sys.stderr.isatty()
# Enable interactive mode if we're attached to a tty
if main_options['no_interactive']:
self.interactive = False
else:
self.interactive = is_a_tty
# Handle errors interactively if we're in interactive mode
# and --on-error was not specified on the command line
if main_options.get('on_error') is not None:
self._interactive_failures = False
else:
self._interactive_failures = self.interactive
# Use color output if we're attached to a tty, unless
# otherwise specified on the comand line
if main_options['colors'] is None:
self.colors = is_a_tty
elif main_options['colors']:
self.colors = True
else:
self.colors = False
# Increase the soft limit for open file descriptors to the maximum.
# SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
# Avoid hitting the limit too quickly.
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
if limits[0] != limits[1]:
# Set soft limit to hard limit
resource.setrlimit(resource.RLIMIT_NOFILE, (limits[1], limits[1]))
# create()
#
# Should be used instead of the regular constructor.
#
# This will select a platform specific App implementation
#
# Args:
# The same args as the App() constructor
#
@classmethod
def create(cls, *args, **kwargs):
if sys.platform.startswith('linux'):
# Use an App with linux specific features
from .linuxapp import LinuxApp # pylint: disable=import-outside-toplevel
return LinuxApp(*args, **kwargs)
else:
# The base App() class is default
return App(*args, **kwargs)
# initialized()
#
# Context manager to initialize the application and optionally run a session
# within the context manager.
#
# This context manager will take care of catching errors from within the
# context and report them consistently, so the CLI need not take care of
# reporting the errors and exiting with a consistent error status.
#
# Args:
# session_name (str): The name of the session, or None for no session
#
# Note that the except_ argument may have a subtly different meaning depending
# on the activity performed on the Pipeline. In normal circumstances the except_
# argument excludes elements from the `elements` list. In a build session, the
# except_ elements are excluded from the tracking plan.
#
# If a session_name is provided, we treat the block as a session, and print
# the session header and summary, and time the main session from startup time.
#
@contextmanager
def initialized(self, *, session_name=None):
directory = self._main_options['directory']
config = self._main_options['config']
self._session_name = session_name
#
# Load the Context
#
try:
self.context = Context()
self.context.load(config)
except BstError as e:
self._error_exit(e, "Error loading user configuration")
# Override things in the context from our command line options,
# the command line when used, trumps the config files.
#
override_map = {
'strict': '_strict_build_plan',
'debug': 'log_debug',
'verbose': 'log_verbose',
'error_lines': 'log_error_lines',
'message_lines': 'log_message_lines',
'on_error': 'sched_error_action',
'fetchers': 'sched_fetchers',
'builders': 'sched_builders',
'pushers': 'sched_pushers',
'max_jobs': 'build_max_jobs',
'network_retries': 'sched_network_retries'
}
for cli_option, context_attr in override_map.items():
option_value = self._main_options.get(cli_option)
if option_value is not None:
setattr(self.context, context_attr, option_value)
try:
Platform.get_platform()
except BstError as e:
self._error_exit(e, "Error instantiating platform")
# Create the logger right before setting the message handler
self.logger = LogLine(self.context,
self._content_profile,
self._format_profile,
self._success_profile,
self._error_profile,
self._detail_profile,
indent=INDENT)
# Propagate pipeline feedback to the user
self.context.set_message_handler(self._message_handler)
# Preflight the artifact cache after initializing logging,
# this can cause messages to be emitted.
try:
self.context.artifactcache.preflight()
except BstError as e:
self._error_exit(e, "Error instantiating artifact cache")
#
# Load the Project
#
try:
self.project = Project(directory, self.context, cli_options=self._main_options['option'],
default_mirror=self._main_options.get('default_mirror'))
except LoadError as e:
# Let's automatically start a `bst init` session in this case
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF and self.interactive:
click.echo("A project was not detected in the directory: {}".format(directory), err=True)
click.echo("", err=True)
if click.confirm("Would you like to create a new project here ?"):
self.init_project(None)
self._error_exit(e, "Error loading project")
except BstError as e:
self._error_exit(e, "Error loading project")
# Now that we have a logger and message handler,
# we can override the global exception hook.
sys.excepthook = self._global_exception_handler
# Create the stream right away, we'll need to pass it around
self.stream = Stream(self.context, self.project, self._session_start,
session_start_callback=self.session_start_cb,
interrupt_callback=self._interrupt_handler,
ticker_callback=self._tick,
job_start_callback=self._job_started,
job_complete_callback=self._job_completed)
# Create our status printer, only available in interactive
self._status = Status(self.context,
self._content_profile, self._format_profile,
self._success_profile, self._error_profile,
self.stream, colors=self.colors)
# Mark the beginning of the session
if session_name:
self._message(MessageType.START, session_name)
# Run the body of the session here, once everything is loaded
try:
yield
except BstError as e:
# Print a nice summary if this is a session
if session_name:
elapsed = self.stream.elapsed_time
if isinstance(e, StreamError) and e.terminated: # pylint: disable=no-member
self._message(MessageType.WARN, session_name + ' Terminated', elapsed=elapsed)
else:
self._message(MessageType.FAIL, session_name, elapsed=elapsed)
# Notify session failure
self._notify("{} failed".format(session_name), "{}".format(e))
if self._started:
self._print_summary()
# Exit with the error
self._error_exit(e)
except RecursionError:
click.echo("RecursionError: Depency depth is too large. Maximum recursion depth exceeded.",
err=True)
sys.exit(-1)
else:
# No exceptions occurred, print session time and summary
if session_name:
self._message(MessageType.SUCCESS, session_name, elapsed=self.stream.elapsed_time)
if self._started:
self._print_summary()
# Notify session success
self._notify("{} succeeded".format(session_name), "")
# init_project()
#
# Initialize a new BuildStream project, either with the explicitly passed options,
# or by starting an interactive session if project_name is not specified and the
# application is running in interactive mode.
#
# Args:
# project_name (str): The project name, must be a valid symbol name
# format_version (int): The project format version, default is the latest version
# element_path (str): The subdirectory to store elements in, default is 'elements'
# force (bool): Allow overwriting an existing project.conf
#
def init_project(self, project_name, format_version=BST_FORMAT_VERSION, element_path='elements', force=False):
directory = self._main_options['directory']
directory = os.path.abspath(directory)
project_path = os.path.join(directory, 'project.conf')
elements_path = os.path.join(directory, element_path)
try:
# Abort if the project.conf already exists, unless `--force` was specified in `bst init`
if not force and os.path.exists(project_path):
raise AppError("A project.conf already exists at: {}".format(project_path),
reason='project-exists')
if project_name:
# If project name was specified, user interaction is not desired, just
# perform some validation and write the project.conf
_yaml.assert_symbol_name(None, project_name, 'project name')
self._assert_format_version(format_version)
self._assert_element_path(element_path)
elif not self.interactive:
raise AppError("Cannot initialize a new project without specifying the project name",
reason='unspecified-project-name')
else:
# Collect the parameters using an interactive session
project_name, format_version, element_path = \
self._init_project_interactive(project_name, format_version, element_path)
# Create the directory if it doesnt exist
try:
os.makedirs(directory, exist_ok=True)
except IOError as e:
raise AppError("Error creating project directory {}: {}".format(directory, e)) from e
# Create the elements sub-directory if it doesnt exist
try:
os.makedirs(elements_path, exist_ok=True)
except IOError as e:
raise AppError("Error creating elements sub-directory {}: {}"
.format(elements_path, e)) from e
# Dont use ruamel.yaml here, because it doesnt let
# us programatically insert comments or whitespace at
# the toplevel.
try:
with open(project_path, 'w', encoding='utf-8') as f:
f.write("# Unique project name\n" +
"name: {}\n\n".format(project_name) +
"# Required BuildStream format version\n" +
"format-version: {}\n\n".format(format_version) +
"# Subdirectory where elements are stored\n" +
"element-path: {}\n".format(element_path))
except IOError as e:
raise AppError("Error writing {}: {}".format(project_path, e)) from e
except BstError as e:
self._error_exit(e)
click.echo("", err=True)
click.echo("Created project.conf at: {}".format(project_path), err=True)
sys.exit(0)
# shell_prompt():
#
# Creates a prompt for a shell environment, using ANSI color codes
# if they are available in the execution context.
#
# Args:
# element (Element): The Element object to resolve a prompt for
#
# Returns:
# (str): The formatted prompt to display in the shell
#
def shell_prompt(self, element):
_, key, dim = element._get_display_key()
element_name = element._get_full_name()
if self.colors:
prompt = self._format_profile.fmt('[') + \
self._content_profile.fmt(key, dim=dim) + \
self._format_profile.fmt('@') + \
self._content_profile.fmt(element_name) + \
self._format_profile.fmt(':') + \
self._content_profile.fmt('$PWD') + \
self._format_profile.fmt(']$') + ' '
else:
prompt = '[{}@{}:${{PWD}}]$ '.format(key, element_name)
return prompt
# cleanup()
#
# Cleans up application state
#
# This is called by Click at exit time
#
def cleanup(self):
if self.stream:
self.stream.cleanup()
############################################################
# Abstract Class Methods #
############################################################
# notify()
#
# Notify the user of something which occurred, this
# is intended to grab attention from the user.
#
# This is guaranteed to only be called in interactive mode
#
# Args:
# title (str): The notification title
# text (str): The notification text
#
def notify(self, title, text):
pass
############################################################
# Local Functions #
############################################################
# Local function for calling the notify() virtual method
#
def _notify(self, title, text):
if self.interactive:
self.notify(title, text)
# Local message propagator
#
def _message(self, message_type, message, **kwargs):
args = dict(kwargs)
self.context.message(
Message(None, message_type, message, **args))
# Exception handler
#
def _global_exception_handler(self, etype, value, tb):
# Print the regular BUG message
formatted = "".join(traceback.format_exception(etype, value, tb))
self._message(MessageType.BUG, str(value),
detail=formatted)
# If the scheduler has started, try to terminate all jobs gracefully,
# otherwise exit immediately.
if self.stream.running:
self.stream.terminate()
else:
sys.exit(-1)
#
# Render the status area, conditional on some internal state
#
def _maybe_render_status(self):
# If we're suspended or terminating, then dont render the status area
if self._status and self.stream and \
not (self.stream.suspended or self.stream.terminated):
self._status.render()
#
# Handle ^C SIGINT interruptions in the scheduling main loop
#
def _interrupt_handler(self):
# Only handle ^C interactively in interactive mode
if not self.interactive:
self._status.clear()
self.stream.terminate()
return
# Here we can give the user some choices, like whether they would
# like to continue, abort immediately, or only complete processing of
# the currently ongoing tasks. We can also print something more
# intelligent, like how many tasks remain to complete overall.
with self._interrupted():
click.echo("\nUser interrupted with ^C\n" +
"\n"
"Choose one of the following options:\n" +
" (c)ontinue - Continue queueing jobs as much as possible\n" +
" (q)uit - Exit after all ongoing jobs complete\n" +
" (t)erminate - Terminate any ongoing jobs and exit\n" +
"\n" +
"Pressing ^C again will terminate jobs and exit\n",
err=True)
try:
choice = click.prompt("Choice:",
value_proc=_prefix_choice_value_proc(['continue', 'quit', 'terminate']),
default='continue', err=True)
except (click.Abort, SystemError):
# In some cases, the readline buffer underlying the prompt gets corrupted on the second CTRL+C
# This throws a SystemError, which doesn't seem to be problematic for the rest of the program
# Ensure a newline after automatically printed '^C'
click.echo("", err=True)
choice = 'terminate'
if choice == 'terminate':
click.echo("\nTerminating all jobs at user request\n", err=True)
self.stream.terminate()
else:
if choice == 'quit':
click.echo("\nCompleting ongoing tasks before quitting\n", err=True)
self.stream.quit()
elif choice == 'continue':
click.echo("\nContinuing\n", err=True)
def _tick(self, elapsed):
self._maybe_render_status()
def _job_started(self, job):
self._status.add_job(job)
self._maybe_render_status()
def _job_completed(self, job, status):
self._status.remove_job(job)
self._maybe_render_status()
# Dont attempt to handle a failure if the user has already opted to
# terminate
if status == JobStatus.FAIL and not self.stream.terminated:
if isinstance(job, ElementJob):
element = job.element
queue = job.queue
# Get the last failure message for additional context
failure = self._fail_messages.get(element._unique_id)
# XXX This is dangerous, sometimes we get the job completed *before*
# the failure message reaches us ??
if not failure:
self._status.clear()
click.echo("\n\n\nBUG: Message handling out of sync, " +
"unable to retrieve failure message for element {}\n\n\n\n\n"
.format(element), err=True)
else:
self._handle_failure(element, queue, failure)
else:
click.echo("\nTerminating all jobs\n", err=True)
self.stream.terminate()
def _handle_failure(self, element, queue, failure):
# Handle non interactive mode setting of what to do when a job fails.
if not self._interactive_failures:
if self.context.sched_error_action == 'terminate':
self.stream.terminate()
elif self.context.sched_error_action == 'quit':
self.stream.quit()
elif self.context.sched_error_action == 'continue':
pass
return
# Interactive mode for element failures
with self._interrupted():
summary = ("\n{} failure on element: {}\n".format(failure.action_name, element.name) +
"\n" +
"Choose one of the following options:\n" +
" (c)ontinue - Continue queueing jobs as much as possible\n" +
" (q)uit - Exit after all ongoing jobs complete\n" +
" (t)erminate - Terminate any ongoing jobs and exit\n" +
" (r)etry - Retry this job\n")
if failure.logfile:
summary += " (l)og - View the full log file\n"
if failure.sandbox:
summary += " (s)hell - Drop into a shell in the failed build sandbox\n"
summary += "\nPressing ^C will terminate jobs and exit\n"
choices = ['continue', 'quit', 'terminate', 'retry']
if failure.logfile:
choices += ['log']
if failure.sandbox:
choices += ['shell']
choice = ''
while choice not in ['continue', 'quit', 'terminate', 'retry']:
click.echo(summary, err=True)
self._notify("BuildStream failure", "{} on element {}"
.format(failure.action_name, element.name))
try:
choice = click.prompt("Choice:", default='continue', err=True,
value_proc=_prefix_choice_value_proc(choices))
except (click.Abort, SystemError):
# In some cases, the readline buffer underlying the prompt gets corrupted on the second CTRL+C
# This throws a SystemError, which doesn't seem to be problematic for the rest of the program
# Ensure a newline after automatically printed '^C'
click.echo("", err=True)
choice = 'terminate'
# Handle choices which you can come back from
#
if choice == 'shell':
click.echo("\nDropping into an interactive shell in the failed build sandbox\n", err=True)
try:
prompt = self.shell_prompt(element)
self.stream.shell(element, Scope.BUILD, prompt, directory=failure.sandbox, isolate=True)
except BstError as e:
click.echo("Error while attempting to create interactive shell: {}".format(e), err=True)
elif choice == 'log':
with open(failure.logfile, 'r', encoding='utf-8') as logfile:
content = logfile.read()
click.echo_via_pager(content)
if choice == 'terminate':
click.echo("\nTerminating all jobs\n", err=True)
self.stream.terminate()
else:
if choice == 'quit':
click.echo("\nCompleting ongoing tasks before quitting\n", err=True)
self.stream.quit()
elif choice == 'continue':
click.echo("\nContinuing with other non failing elements\n", err=True)
elif choice == 'retry':
click.echo("\nRetrying failed job\n", err=True)
queue.failed_elements.remove(element)
queue.enqueue([element])
#
# Print the session heading if we've loaded a pipeline and there
# is going to be a session
#
def session_start_cb(self):
self._started = True
if self._session_name:
self.logger.print_heading(self.project,
self.stream,
log_file=self._main_options['log_file'],
styling=self.colors)
#
# Print a summary of the queues
#
def _print_summary(self):
click.echo("", err=True)
self.logger.print_summary(self.stream,
self._main_options['log_file'],
styling=self.colors)
# _error_exit()
#
# Exit with an error
#
# This will print the passed error to stderr and exit the program
# with -1 status
#
# Args:
# error (BstError): A BstError exception to print
# prefix (str): An optional string to prepend to the error message
#
def _error_exit(self, error, prefix=None):
click.echo("", err=True)
main_error = "{}".format(error)
if prefix is not None:
main_error = "{}: {}".format(prefix, main_error)
click.echo(main_error, err=True)
if error.detail:
indent = " " * INDENT
detail = '\n' + indent + indent.join(error.detail.splitlines(True))
click.echo("{}".format(detail), err=True)
# Record machine readable errors in a tempfile for the test harness to read back
if 'BST_TEST_ERROR_CODES' in os.environ:
task_error_domain, task_error_reason = get_last_task_error ()
error_codes = ujson.dumps ({
'main_error_domain': error.domain.value if error.domain else None,
'main_error_reason': error.reason.value if isinstance (error.reason, Enum) else error.reason,
'task_error_domain': task_error_domain.value if task_error_domain else None,
'task_error_reason': (
task_error_reason.value if isinstance (task_error_reason, Enum) else task_error_reason
)
})
with open (os.environ['BST_TEST_ERROR_CODES'], "w", encoding="utf-8") as f:
f.write (error_codes)
sys.exit(-1)
#
# Handle messages from the pipeline
#
def _message_handler(self, message, context):
# Drop status messages from the UI if not verbose, we'll still see
# info messages and status messages will still go to the log files.
if not context.log_verbose and message.message_type == MessageType.STATUS:
return
# Hold on to the failure messages
if message.message_type in [MessageType.FAIL, MessageType.BUG] and message.unique_id is not None:
self._fail_messages[message.unique_id] = message
# Send to frontend if appropriate
if self.context.silent_messages() and (message.message_type not in unconditional_messages):
return
if self._status:
self._status.clear()
text = self.logger.render(message)
click.echo(text, color=self.colors, nl=False, err=True)
# Maybe render the status area
self._maybe_render_status()
# Additionally log to a file
if self._main_options['log_file']:
click.echo(text, file=self._main_options['log_file'], color=False, nl=False)
@contextmanager
def _interrupted(self):
self._status.clear()
try:
with self.stream.suspend():
yield
finally:
self._maybe_render_status()
# Some validation routines for project initialization
#
def _assert_format_version(self, format_version):
message = "The version must be supported by this " + \
"version of buildstream (0 - {})\n".format(BST_FORMAT_VERSION)
# Validate that it is an integer
try:
number = int(format_version)
except ValueError as e:
raise AppError(message, reason='invalid-format-version') from e
# Validate that the specified version is supported
if number < 0 or number > BST_FORMAT_VERSION:
raise AppError(message, reason='invalid-format-version')
def _assert_element_path(self, element_path):
message = "The element path cannot be an absolute path or contain any '..' components\n"
# Validate the path is not absolute
if os.path.isabs(element_path):
raise AppError(message, reason='invalid-element-path')
# Validate that the path does not contain any '..' components
path = element_path
while path:
split = os.path.split(path)
path = split[0]
basename = split[1]
if basename == '..':
raise AppError(message, reason='invalid-element-path')
# _init_project_interactive()
#
# Collect the user input for an interactive session for App.init_project()
#
# Args:
# project_name (str): The project name, must be a valid symbol name
# format_version (int): The project format version, default is the latest version
# element_path (str): The subdirectory to store elements in, default is 'elements'
#
# Returns:
# project_name (str): The user selected project name
# format_version (int): The user selected format version
# element_path (str): The user selected element path
#
def _init_project_interactive(self, project_name, format_version=BST_FORMAT_VERSION, element_path='elements'):
def project_name_proc(user_input):
try:
_yaml.assert_symbol_name(None, user_input, 'project name')
except LoadError as e:
message = "{}\n\n{}\n".format(e, e.detail)
raise UsageError(message) from e
return user_input
def format_version_proc(user_input):
try:
self._assert_format_version(user_input)
except AppError as e:
raise UsageError(str(e)) from e
return user_input
def element_path_proc(user_input):
try:
self._assert_element_path(user_input)
except AppError as e:
raise UsageError(str(e)) from e
return user_input
w = TextWrapper(initial_indent=' ', subsequent_indent=' ', width=79)
# Collect project name
click.echo("", err=True)
click.echo(self._content_profile.fmt("Choose a unique name for your project"), err=True)
click.echo(self._format_profile.fmt("-------------------------------------"), err=True)
click.echo("", err=True)
click.echo(self._detail_profile.fmt(
w.fill("The project name is a unique symbol for your project and will be used "
"to distinguish your project from others in user preferences, namspaceing "
"of your project's artifacts in shared artifact caches, and in any case where "
"BuildStream needs to distinguish between multiple projects.")), err=True)
click.echo("", err=True)
click.echo(self._detail_profile.fmt(
w.fill("The project name must contain only alphanumeric characters, "
"may not start with a digit, and may contain dashes or underscores.")), err=True)
click.echo("", err=True)
project_name = click.prompt(self._content_profile.fmt("Project name"),
value_proc=project_name_proc, err=True)
click.echo("", err=True)
# Collect format version
click.echo(self._content_profile.fmt("Select the minimum required format version for your project"), err=True)
click.echo(self._format_profile.fmt("-----------------------------------------------------------"), err=True)
click.echo("", err=True)
click.echo(self._detail_profile.fmt(
w.fill("The format version is used to provide users who build your project "
"with a helpful error message in the case that they do not have a recent "
"enough version of BuildStream supporting all the features which your "
"project might use.")), err=True)
click.echo("", err=True)
click.echo(self._detail_profile.fmt(
w.fill("The lowest version allowed is 0, the currently installed version of BuildStream "
"supports up to format version {}.".format(BST_FORMAT_VERSION))), err=True)
click.echo("", err=True)
format_version = click.prompt(self._content_profile.fmt("Format version"),
value_proc=format_version_proc,
default=format_version, err=True)
click.echo("", err=True)
# Collect element path
click.echo(self._content_profile.fmt("Select the element path"), err=True)
click.echo(self._format_profile.fmt("-----------------------"), err=True)
click.echo("", err=True)
click.echo(self._detail_profile.fmt(
w.fill("The element path is a project subdirectory where element .bst files are stored "
"within your project.")), err=True)
click.echo("", err=True)
click.echo(self._detail_profile.fmt(
w.fill("Elements will be displayed in logs as filenames relative to "
"the element path, and similarly, dependencies must be expressed as filenames "
"relative to the element path.")), err=True)
click.echo("", err=True)
element_path = click.prompt(self._content_profile.fmt("Element path"),
value_proc=element_path_proc,
default=element_path, err=True)
return (project_name, format_version, element_path)
#
# Return a value processor for partial choice matching.
# The returned values processor will test the passed value with all the item
# in the 'choices' list. If the value is a prefix of one of the 'choices'
# element, the element is returned. If no element or several elements match
# the same input, a 'click.UsageError' exception is raised with a description
# of the error.
#
# Note that Click expect user input errors to be signaled by raising a
# 'click.UsageError' exception. That way, Click display an error message and
# ask for a new input.
#
def _prefix_choice_value_proc(choices):
def value_proc(user_input):
remaining_candidate = [choice for choice in choices if choice.startswith(user_input)]
if not remaining_candidate:
raise UsageError("Expected one of {}, got {}".format(choices, user_input))
if len(remaining_candidate) == 1:
return remaining_candidate[0]
raise UsageError("Ambiguous input. '{}' can refer to one of {}".format(user_input, remaining_candidate))
return value_proc
buildstream-1.6.9/buildstream/_frontend/cli.py 0000664 0000000 0000000 00000105565 14375152700 0021474 0 ustar 00root root 0000000 0000000 import os
import sys
import fcntl
import click
from .. import _yaml
from .._exceptions import BstError, LoadError, AppError
from .._versions import BST_FORMAT_VERSION
from .complete import main_bashcomplete, complete_path, CompleteUnhandled
##################################################################
# Override of click's main entry point #
##################################################################
# search_command()
#
# Helper function to get a command and context object
# for a given command.
#
# Args:
# commands (list): A list of command words following `bst` invocation
# context (click.Context): An existing toplevel context, or None
#
# Returns:
# context (click.Context): The context of the associated command, or None
#
def search_command(args, *, context=None):
if context is None:
context = cli.make_context('bst', args, resilient_parsing=True)
# Loop into the deepest command
command = cli
command_ctx = context
for cmd in args:
command = command_ctx.command.get_command(command_ctx, cmd)
if command is None:
return None
command_ctx = command.make_context(command.name, [command.name],
parent=command_ctx,
resilient_parsing=True)
return command_ctx
# Completion for completing command names as help arguments
def complete_commands(cmd, args, incomplete):
command_ctx = search_command(args[1:])
if command_ctx and command_ctx.command and isinstance(command_ctx.command, click.MultiCommand):
return [subcommand + " " for subcommand in command_ctx.command.list_commands(command_ctx)]
return []
# Special completion for completing the bst elements in a project dir
def complete_target(args, incomplete):
"""
:param args: full list of args typed before the incomplete arg
:param incomplete: the incomplete text to autocomplete
:return: all the possible user-specified completions for the param
"""
project_conf = 'project.conf'
def ensure_project_dir(directory):
directory = os.path.abspath(directory)
while not os.path.isfile(os.path.join(directory, project_conf)):
parent_dir = os.path.dirname(directory)
if directory == parent_dir:
break
directory = parent_dir
return directory
# First resolve the directory, in case there is an
# active --directory/-C option
#
base_directory = '.'
idx = -1
try:
idx = args.index('-C')
except ValueError:
try:
idx = args.index('--directory')
except ValueError:
pass
if idx >= 0 and len(args) > idx + 1:
base_directory = args[idx + 1]
else:
# Check if this directory or any of its parent directories
# contain a project config file
base_directory = ensure_project_dir(base_directory)
# Now parse the project.conf just to find the element path,
# this is unfortunately a bit heavy.
project_file = os.path.join(base_directory, project_conf)
try:
project = _yaml.load(project_file)
except LoadError:
# If there is no project directory in context, just dont
# even bother trying to complete anything.
return []
# The project is not required to have an element-path
element_directory = project.get('element-path')
# If a project was loaded, use it's element-path to
# adjust our completion's base directory
if element_directory:
base_directory = os.path.join(base_directory, element_directory)
return complete_path("File", incomplete, base_directory=base_directory)
def override_completions(cmd, cmd_param, args, incomplete):
"""
:param cmd_param: command definition
:param args: full list of args typed before the incomplete arg
:param incomplete: the incomplete text to autocomplete
:return: all the possible user-specified completions for the param
"""
if cmd.name == 'help':
return complete_commands(cmd, args, incomplete)
# We can't easily extend click's data structures without
# modifying click itself, so just do some weak special casing
# right here and select which parameters we want to handle specially.
if isinstance(cmd_param.type, click.Path) and \
(cmd_param.name == 'elements' or
cmd_param.name == 'element' or
cmd_param.name == 'except_' or
cmd_param.opts == ['--track'] or
cmd_param.opts == ['--track-except']):
return complete_target(args, incomplete)
raise CompleteUnhandled()
def override_main(self, args=None, prog_name=None, complete_var=None,
standalone_mode=True, **extra):
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
if main_bashcomplete(self, prog_name, override_completions):
# If we're running tests we cant just go calling exit()
# from the main process.
#
# The below is a quicker exit path for the sake
# of making completions respond faster.
if 'BST_TEST_SUITE' not in os.environ:
sys.stdout.flush()
sys.stderr.flush()
os._exit(0)
# Regular client return for test cases
return
# Check output file descriptor at earliest opportunity, to
# provide a reasonable error message instead of a stack trace
# in the case that it is blocking
for stream in (sys.stdout, sys.stderr):
fileno = stream.fileno()
flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
if flags & os.O_NONBLOCK:
click.echo("{} is currently set to O_NONBLOCK, try opening a new shell"
.format(stream.name), err=True)
sys.exit(-1)
original_main(self, args=args, prog_name=prog_name, complete_var=None,
standalone_mode=standalone_mode, **extra)
original_main = click.BaseCommand.main
click.BaseCommand.main = override_main
##################################################################
# Main Options #
##################################################################
def print_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
from .. import __version__ # pylint: disable=import-outside-toplevel
click.echo(__version__)
ctx.exit()
@click.group(context_settings=dict(help_option_names=['-h', '--help']))
@click.option('--version', is_flag=True, callback=print_version,
expose_value=False, is_eager=True)
@click.option('--config', '-c',
type=click.Path(exists=True, dir_okay=False, readable=True),
help="Configuration file to use")
@click.option('--directory', '-C', default=os.getcwd(),
type=click.Path(file_okay=False, readable=True),
help="Project directory (default: current directory)")
@click.option('--on-error', default=None,
type=click.Choice(['continue', 'quit', 'terminate']),
help="What to do when an error is encountered")
@click.option('--fetchers', type=click.INT, default=None,
help="Maximum simultaneous download tasks")
@click.option('--builders', type=click.INT, default=None,
help="Maximum simultaneous build tasks")
@click.option('--pushers', type=click.INT, default=None,
help="Maximum simultaneous upload tasks")
@click.option('--max-jobs', type=click.INT, default=None,
help="Number of parallel jobs allowed for a given build task")
@click.option('--network-retries', type=click.INT, default=None,
help="Maximum retries for network tasks")
@click.option('--no-interactive', is_flag=True, default=False,
help="Force non interactive mode, otherwise this is automatically decided")
@click.option('--verbose/--no-verbose', default=None,
help="Be extra verbose")
@click.option('--debug/--no-debug', default=None,
help="Print debugging output")
@click.option('--error-lines', type=click.INT, default=None,
help="Maximum number of lines to show from a task log")
@click.option('--message-lines', type=click.INT, default=None,
help="Maximum number of lines to show in a detailed message")
@click.option('--log-file',
type=click.File(mode='w', encoding='UTF-8'),
help="A file to store the main log (allows storing the main log while in interactive mode)")
@click.option('--colors/--no-colors', default=None,
help="Force enable/disable ANSI color codes in output")
@click.option('--strict/--no-strict', default=None, is_flag=True,
help="Elements must be rebuilt when their dependencies have changed")
@click.option('--option', '-o', type=click.Tuple([str, str]), multiple=True, metavar='OPTION VALUE',
help="Specify a project option")
@click.option('--default-mirror', default=None,
help="The mirror to fetch from first, before attempting other mirrors")
@click.pass_context
def cli(context, **kwargs):
"""Build and manipulate BuildStream projects
Most of the main options override options in the
user preferences configuration file.
"""
from .app import App # pylint: disable=import-outside-toplevel
# Create the App, giving it the main arguments
context.obj = App.create(dict(kwargs))
context.call_on_close(context.obj.cleanup)
##################################################################
# Help Command #
##################################################################
@cli.command(name="help", short_help="Print usage information",
context_settings={"help_option_names": []})
@click.argument("command", nargs=-1, metavar='COMMAND')
@click.pass_context
def help_command(ctx, command):
"""Print usage information about a given command
"""
command_ctx = search_command(command, context=ctx.parent)
if not command_ctx:
click.echo("Not a valid command: '{} {}'"
.format(ctx.parent.info_name, " ".join(command)), err=True)
sys.exit(-1)
click.echo(command_ctx.command.get_help(command_ctx), err=True)
# Hint about available sub commands
if isinstance(command_ctx.command, click.MultiCommand):
detail = " "
if command:
detail = " {} ".format(" ".join(command))
click.echo("\nFor usage on a specific command: {} help{}COMMAND"
.format(ctx.parent.info_name, detail), err=True)
##################################################################
# Init Command #
##################################################################
@cli.command(short_help="Initialize a new BuildStream project")
@click.option('--project-name', type=click.STRING,
help="The project name to use")
@click.option('--format-version', type=click.INT, default=BST_FORMAT_VERSION,
help="The required format version (default: {})".format(BST_FORMAT_VERSION))
@click.option('--element-path', type=click.Path(), default="elements",
help="The subdirectory to store elements in (default: elements)")
@click.option('--force', '-f', default=False, is_flag=True,
help="Allow overwriting an existing project.conf")
@click.pass_obj
def init(app, project_name, format_version, element_path, force):
"""Initialize a new BuildStream project
Creates a new BuildStream project.conf in the project
directory.
Unless `--project-name` is specified, this will be an
interactive session.
"""
app.init_project(project_name, format_version, element_path, force)
##################################################################
# Build Command #
##################################################################
@cli.command(short_help="Build elements in a pipeline")
@click.option('--all', 'all_', default=False, is_flag=True,
help="Build elements that would not be needed for the current build plan")
@click.option('--track', 'track_', multiple=True,
type=click.Path(readable=False),
help="Specify elements to track during the build. Can be used "
"repeatedly to specify multiple elements")
@click.option('--track-all', default=False, is_flag=True,
help="Track all elements in the pipeline")
@click.option('--track-except', multiple=True,
type=click.Path(readable=False),
help="Except certain dependencies from tracking")
@click.option('--track-cross-junctions', '-J', default=False, is_flag=True,
help="Allow tracking to cross junction boundaries")
@click.option('--track-save', default=False, is_flag=True,
help="Deprecated: This is ignored")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def build(app, elements, all_, track_, track_save, track_all, track_except, track_cross_junctions):
"""Build elements in a pipeline"""
if (track_except or track_cross_junctions) and not (track_ or track_all):
click.echo("ERROR: The --track-except and --track-cross-junctions options "
"can only be used with --track or --track-all", err=True)
sys.exit(-1)
if track_save:
click.echo("WARNING: --track-save is deprecated, saving is now unconditional", err=True)
if track_all:
track_ = elements
with app.initialized(session_name="Build"):
app.stream.build(elements,
track_targets=track_,
track_except=track_except,
track_cross_junctions=track_cross_junctions,
build_all=all_)
##################################################################
# Fetch Command #
##################################################################
@cli.command(short_help="Fetch sources in a pipeline")
@click.option('--except', 'except_', multiple=True,
type=click.Path(readable=False),
help="Except certain dependencies from fetching")
@click.option('--deps', '-d', default='plan',
type=click.Choice(['none', 'plan', 'all']),
help='The dependencies to fetch (default: plan)')
@click.option('--track', 'track_', default=False, is_flag=True,
help="Track new source references before fetching")
@click.option('--track-cross-junctions', '-J', default=False, is_flag=True,
help="Allow tracking to cross junction boundaries")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def fetch(app, elements, deps, track_, except_, track_cross_junctions):
"""Fetch sources required to build the pipeline
By default this will only try to fetch sources which are
required for the build plan of the specified target element,
omitting sources for any elements which are already built
and available in the artifact cache.
Specify `--deps` to control which sources to fetch:
\b
none: No dependencies, just the element itself
plan: Only dependencies required for the build plan
all: All dependencies
"""
from .._pipeline import PipelineSelection # pylint: disable=import-outside-toplevel
if track_cross_junctions and not track_:
click.echo("ERROR: The --track-cross-junctions option can only be used with --track", err=True)
sys.exit(-1)
if track_ and deps == PipelineSelection.PLAN:
click.echo("WARNING: --track specified for tracking of a build plan\n\n"
"Since tracking modifies the build plan, all elements will be tracked.", err=True)
deps = PipelineSelection.ALL
with app.initialized(session_name="Fetch"):
app.stream.fetch(elements,
selection=deps,
except_targets=except_,
track_targets=track_,
track_cross_junctions=track_cross_junctions)
##################################################################
# Track Command #
##################################################################
@cli.command(short_help="Track new source references")
@click.option('--except', 'except_', multiple=True,
type=click.Path(readable=False),
help="Except certain dependencies from tracking")
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependencies to track (default: none)')
@click.option('--cross-junctions', '-J', default=False, is_flag=True,
help="Allow crossing junction boundaries")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def track(app, elements, deps, except_, cross_junctions):
"""Consults the specified tracking branches for new versions available
to build and updates the project with any newly available references.
By default this will track just the specified element, but you can also
update a whole tree of dependencies in one go.
Specify `--deps` to control which sources to track:
\b
none: No dependencies, just the specified elements
all: All dependencies of all specified elements
"""
with app.initialized(session_name="Track"):
# Substitute 'none' for 'redirect' so that element redirections
# will be done
if deps == 'none':
deps = 'redirect'
app.stream.track(elements,
selection=deps,
except_targets=except_,
cross_junctions=cross_junctions)
##################################################################
# Pull Command #
##################################################################
@cli.command(short_help="Pull a built artifact")
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependency artifacts to pull (default: none)')
@click.option('--remote', '-r',
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def pull(app, elements, deps, remote):
"""Pull a built artifact from the configured remote artifact cache.
By default the artifact will be pulled one of the configured caches
if possible, following the usual priority order. If the `--remote` flag
is given, only the specified cache will be queried.
Specify `--deps` to control which artifacts to pull:
\b
none: No dependencies, just the element itself
all: All dependencies
"""
with app.initialized(session_name="Pull"):
app.stream.pull(elements, selection=deps, remote=remote)
##################################################################
# Push Command #
##################################################################
@cli.command(short_help="Push a built artifact")
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependencies to push (default: none)')
@click.option('--remote', '-r', default=None,
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def push(app, elements, deps, remote):
"""Push a built artifact to a remote artifact cache.
The default destination is the highest priority configured cache. You can
override this by passing a different cache URL with the `--remote` flag.
Specify `--deps` to control which artifacts to push:
\b
none: No dependencies, just the element itself
all: All dependencies
"""
with app.initialized(session_name="Push"):
app.stream.push(elements, selection=deps, remote=remote)
##################################################################
# Show Command #
##################################################################
@cli.command(short_help="Show elements in the pipeline")
@click.option('--except', 'except_', multiple=True,
type=click.Path(readable=False),
help="Except certain dependencies")
@click.option('--deps', '-d', default='all',
type=click.Choice(['none', 'plan', 'run', 'build', 'all']),
help='The dependencies to show (default: all)')
@click.option('--order', default="stage",
type=click.Choice(['stage', 'alpha']),
help='Staging or alphabetic ordering of dependencies')
@click.option('--format', '-f', 'format_', metavar='FORMAT', default=None,
type=click.STRING,
help='Format string for each element')
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def show(app, elements, deps, except_, order, format_):
"""Show elements in the pipeline
By default this will show all of the dependencies of the
specified target element.
Specify `--deps` to control which elements to show:
\b
none: No dependencies, just the element itself
plan: Dependencies required for a build plan
run: Runtime dependencies, including the element itself
build: Build time dependencies, excluding the element itself
all: All dependencies
\b
FORMAT
~~~~~~
The --format option controls what should be printed for each element,
the following symbols can be used in the format string:
\b
%{name} The element name
%{key} The abbreviated cache key (if all sources are consistent)
%{full-key} The full cache key (if all sources are consistent)
%{state} cached, buildable, waiting or inconsistent
%{config} The element configuration
%{vars} Variable configuration
%{env} Environment settings
%{public} Public domain data
%{workspaced} If the element is workspaced
%{workspace-dirs} A list of workspace directories
The value of the %{symbol} without the leading '%' character is understood
as a pythonic formatting string, so python formatting features apply,
examle:
\b
bst show target.bst --format \\
'Name: %{name: ^20} Key: %{key: ^8} State: %{state}'
If you want to use a newline in a format string in bash, use the '$' modifier:
\b
bst show target.bst --format \\
$'---------- %{name} ----------\\n%{vars}'
"""
with app.initialized():
dependencies = app.stream.load_selection(elements,
selection=deps,
except_targets=except_)
if order == "alpha":
dependencies = sorted(dependencies)
if not format_:
format_ = app.context.log_element_format
report = app.logger.show_pipeline(dependencies, format_)
click.echo(report, color=app.colors)
##################################################################
# Shell Command #
##################################################################
@cli.command(short_help="Shell into an element's sandbox environment")
@click.option('--build', '-b', 'build_', is_flag=True, default=False,
help='Stage dependencies and sources to build')
@click.option('--sysroot', '-s', default=None,
type=click.Path(exists=True, file_okay=False, readable=True),
help="An existing sysroot")
@click.option('--mount', type=click.Tuple([click.Path(exists=True), str]), multiple=True,
metavar='HOSTPATH PATH',
help="Mount a file or directory into the sandbox")
@click.option('--isolate', is_flag=True, default=False,
help='Create an isolated build sandbox')
@click.argument('element',
type=click.Path(readable=False))
@click.argument('command', type=click.STRING, nargs=-1)
@click.pass_obj
def shell(app, element, sysroot, mount, isolate, build_, command):
"""Run a command in the target element's sandbox environment
This will stage a temporary sysroot for running the target
element, assuming it has already been built and all required
artifacts are in the local cache.
Use the --build option to create a temporary sysroot for
building the element instead.
Use the --sysroot option with an existing failed build
directory or with a checkout of the given target, in order
to use a specific sysroot.
If no COMMAND is specified, the default is to attempt
to run an interactive shell.
"""
# pylint: disable=import-outside-toplevel
from ..element import Scope
from .._project import HostMount
from .._pipeline import PipelineSelection
if build_:
scope = Scope.BUILD
else:
scope = Scope.RUN
with app.initialized():
dependencies = app.stream.load_selection((element,), selection=PipelineSelection.NONE)
element = dependencies[0]
prompt = app.shell_prompt(element)
mounts = [
HostMount(path, host_path)
for host_path, path in mount
]
try:
exitcode = app.stream.shell(element, scope, prompt,
directory=sysroot,
mounts=mounts,
isolate=isolate,
command=command)
except BstError as e:
raise AppError("Error launching shell: {}".format(e), detail=e.detail) from e
# If there were no errors, we return the shell's exit code here.
sys.exit(exitcode)
##################################################################
# Checkout Command #
##################################################################
@cli.command(short_help="Checkout a built artifact")
@click.option('--force', '-f', default=False, is_flag=True,
help="Allow files to be overwritten")
@click.option('--deps', '-d', default='run',
type=click.Choice(['run', 'none']),
help='The dependencies to checkout (default: run)')
@click.option('--integrate/--no-integrate', default=True, is_flag=True,
help="Whether to run integration commands")
@click.option('--hardlinks', default=False, is_flag=True,
help="Checkout hardlinks instead of copies (handle with care)")
@click.option('--tar', default=False, is_flag=True,
help="Create a tarball from the artifact contents instead "
"of a file tree. If LOCATION is '-', the tarball "
"will be dumped to the standard output.")
@click.argument('element',
type=click.Path(readable=False))
@click.argument('location', type=click.Path())
@click.pass_obj
def checkout(app, element, location, force, deps, integrate, hardlinks, tar):
"""Checkout a built artifact to the specified location
"""
if hardlinks and tar:
click.echo("ERROR: options --hardlinks and --tar conflict", err=True)
sys.exit(-1)
with app.initialized():
app.stream.checkout(element,
location=location,
force=force,
deps=deps,
integrate=integrate,
hardlinks=hardlinks,
tar=tar)
##################################################################
# Workspace Command #
##################################################################
@cli.group(short_help="Manipulate developer workspaces")
def workspace():
"""Manipulate developer workspaces"""
##################################################################
# Workspace Open Command #
##################################################################
@workspace.command(name='open', short_help="Open a new workspace")
@click.option('--no-checkout', default=False, is_flag=True,
help="Do not checkout the source, only link to the given directory")
@click.option('--force', '-f', default=False, is_flag=True,
help="Overwrite files existing in checkout directory")
@click.option('--track', 'track_', default=False, is_flag=True,
help="Track and fetch new source references before checking out the workspace")
@click.argument('element',
type=click.Path(readable=False))
@click.argument('directory', type=click.Path(file_okay=False))
@click.pass_obj
def workspace_open(app, no_checkout, force, track_, element, directory):
"""Open a workspace for manual source modification"""
if os.path.exists(directory):
if not os.path.isdir(directory):
click.echo("Checkout directory is not a directory: {}".format(directory), err=True)
sys.exit(-1)
if not (no_checkout or force) and os.listdir(directory):
click.echo("Checkout directory is not empty: {}".format(directory), err=True)
sys.exit(-1)
with app.initialized():
app.stream.workspace_open(element, directory,
no_checkout=no_checkout,
track_first=track_,
force=force)
##################################################################
# Workspace Close Command #
##################################################################
@workspace.command(name='close', short_help="Close workspaces")
@click.option('--remove-dir', default=False, is_flag=True,
help="Remove the path that contains the closed workspace")
@click.option('--all', '-a', 'all_', default=False, is_flag=True,
help="Close all open workspaces")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def workspace_close(app, remove_dir, all_, elements):
"""Close a workspace"""
if not (all_ or elements):
click.echo('ERROR: no elements specified', err=True)
sys.exit(-1)
with app.initialized():
# Early exit if we specified `all` and there are no workspaces
if all_ and not app.stream.workspace_exists():
click.echo('No open workspaces to close', err=True)
sys.exit(0)
if all_:
elements = [element_name for element_name, _ in app.context.get_workspaces().list()]
elements = app.stream.redirect_element_names(elements)
# Check that the workspaces in question exist
nonexisting = []
for element_name in elements:
if not app.stream.workspace_exists(element_name):
nonexisting.append(element_name)
if nonexisting:
raise AppError("Workspace does not exist", detail="\n".join(nonexisting))
if app.interactive and remove_dir:
if not click.confirm('This will remove all your changes, are you sure?'):
click.echo('Aborting', err=True)
sys.exit(-1)
for element_name in elements:
app.stream.workspace_close(element_name, remove_dir=remove_dir)
##################################################################
# Workspace Reset Command #
##################################################################
@workspace.command(name='reset', short_help="Reset a workspace to its original state")
@click.option('--soft', default=False, is_flag=True,
help="Reset workspace state without affecting its contents")
@click.option('--track', 'track_', default=False, is_flag=True,
help="Track and fetch the latest source before resetting")
@click.option('--all', '-a', 'all_', default=False, is_flag=True,
help="Reset all open workspaces")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def workspace_reset(app, soft, track_, all_, elements):
"""Reset a workspace to its original state"""
# Check that the workspaces in question exist
with app.initialized():
if not (all_ or elements):
raise AppError('No elements specified to reset')
if all_ and not app.stream.workspace_exists():
raise AppError("No open workspaces to reset")
if app.interactive and not soft:
if not click.confirm('This will remove all your changes, are you sure?'):
click.echo('Aborting', err=True)
sys.exit(-1)
if all_:
elements = tuple(element_name for element_name, _ in app.context.get_workspaces().list())
app.stream.workspace_reset(elements, soft=soft, track_first=track_)
##################################################################
# Workspace List Command #
##################################################################
@workspace.command(name='list', short_help="List open workspaces")
@click.pass_obj
def workspace_list(app):
"""List open workspaces"""
with app.initialized():
app.stream.workspace_list()
##################################################################
# Source Bundle Command #
##################################################################
@cli.command(name="source-bundle", short_help="Produce a build bundle to be manually executed")
@click.option('--except', 'except_', multiple=True,
type=click.Path(readable=False),
help="Elements to except from the tarball")
@click.option('--compression', default='gz',
type=click.Choice(['none', 'gz', 'bz2', 'xz']),
help="Compress the tar file using the given algorithm.")
@click.option('--track', 'track_', default=False, is_flag=True,
help="Track new source references before bundling")
@click.option('--force', '-f', default=False, is_flag=True,
help="Overwrite an existing tarball")
@click.option('--directory', default=os.getcwd(),
help="The directory to write the tarball to")
@click.argument('element',
type=click.Path(readable=False))
@click.pass_obj
def source_bundle(app, element, force, directory,
track_, compression, except_):
"""Produce a source bundle to be manually executed
"""
with app.initialized():
app.stream.source_bundle(element, directory,
track_first=track_,
force=force,
compression=compression,
except_targets=except_)
buildstream-1.6.9/buildstream/_frontend/complete.py 0000664 0000000 0000000 00000031305 14375152700 0022523 0 ustar 00root root 0000000 0000000 #
# Copyright (c) 2014 by Armin Ronacher.
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# This module was forked from the python click library, Included
# original copyright notice from the Click library and following disclaimer
# as per their LICENSE requirements.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections.abc
import copy
import os
import click
from click.core import MultiCommand, Option, Argument
from click.parser import split_arg_string
WORDBREAK = '='
COMPLETION_SCRIPT = '''
%(complete_func)s() {
local IFS=$'\n'
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
COMP_CWORD=$COMP_CWORD \\
%(autocomplete_var)s=complete $1 ) )
return 0
}
complete -F %(complete_func)s -o nospace %(script_names)s
'''
# An exception for our custom completion handler to
# indicate that it does not want to handle completion
# for this parameter
#
class CompleteUnhandled(Exception):
pass
def complete_path(path_type, incomplete, base_directory='.'):
"""Helper method for implementing the completions() method
for File and Path parameter types.
"""
# Try listing the files in the relative or absolute path
# specified in `incomplete` minus the last path component,
# otherwise list files starting from the current working directory.
entries = []
base_path = ''
# This is getting a bit messy
listed_base_directory = False
if os.path.sep in incomplete:
split = incomplete.rsplit(os.path.sep, 1)
base_path = split[0]
# If there was nothing on the left of the last separator,
# we are completing files in the filesystem root
base_path = os.path.join(base_directory, base_path)
else:
incomplete_base_path = os.path.join(base_directory, incomplete)
if os.path.isdir(incomplete_base_path):
base_path = incomplete_base_path
try:
if base_path:
if os.path.isdir(base_path):
entries = [os.path.join(base_path, e) for e in os.listdir(base_path)]
else:
entries = os.listdir(base_directory)
listed_base_directory = True
except OSError:
# If for any reason the os reports an error from os.listdir(), just
# ignore this and avoid a stack trace
pass
base_directory_slash = base_directory
if not base_directory_slash.endswith(os.sep):
base_directory_slash += os.sep
base_directory_len = len(base_directory_slash)
def entry_is_dir(entry):
if listed_base_directory:
entry = os.path.join(base_directory, entry)
return os.path.isdir(entry)
def fix_path(path):
# Append slashes to any entries which are directories, or
# spaces for other files since they cannot be further completed
if entry_is_dir(path) and not path.endswith(os.sep):
path = path + os.sep
else:
path = path + " "
# Remove the artificial leading path portion which
# may have been prepended for search purposes.
if path.startswith(base_directory_slash):
path = path[base_directory_len:]
return path
return [
# Return an appropriate path for each entry
fix_path(e) for e in sorted(entries)
# Filter out non directory elements when searching for a directory,
# the opposite is fine, however.
if not (path_type == 'Directory' and not entry_is_dir(e))
]
# Instead of delegating completions to the param type,
# hard code all of buildstream's completions here.
#
# This whole module should be removed in favor of more
# generic code in click once this issue is resolved:
# https://github.com/pallets/click/issues/780
#
def get_param_type_completion(param_type, incomplete):
if isinstance(param_type, click.Choice):
return [c + " " for c in param_type.choices]
elif isinstance(param_type, click.File):
return complete_path("File", incomplete)
elif isinstance(param_type, click.Path):
# Workaround click 8.x API break:
#
# https://github.com/pallets/click/issues/2037
#
if param_type.file_okay and not param_type.dir_okay:
path_type = "File"
elif param_type.dir_okay and not param_type.file_okay:
path_type = "Directory"
else:
path_type = "Path"
return complete_path(path_type, incomplete)
return []
def resolve_ctx(cli, prog_name, args):
"""
Parse into a hierarchy of contexts. Contexts are connected through the parent variable.
:param cli: command definition
:param prog_name: the program that is running
:param args: full list of args typed before the incomplete arg
:return: the final context/command parsed
"""
ctx = cli.make_context(prog_name, args, resilient_parsing=True)
args_remaining = ctx.protected_args + ctx.args
while ctx is not None and args_remaining:
if isinstance(ctx.command, MultiCommand):
cmd = ctx.command.get_command(ctx, args_remaining[0])
if cmd is None:
return None
ctx = cmd.make_context(args_remaining[0], args_remaining[1:], parent=ctx, resilient_parsing=True)
args_remaining = ctx.protected_args + ctx.args
else:
ctx = ctx.parent
return ctx
def start_of_option(param_str):
"""
:param param_str: param_str to check
:return: whether or not this is the start of an option declaration (i.e. starts "-" or "--")
"""
return param_str and param_str[:1] == '-'
def is_incomplete_option(all_args, cmd_param):
"""
:param all_args: the full original list of args supplied
:param cmd_param: the current command paramter
:return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and
corresponds to this cmd_param. In other words whether this cmd_param option can still accept
values
"""
if cmd_param.is_flag:
return False
last_option = None
for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):
if index + 1 > cmd_param.nargs:
break
if start_of_option(arg_str):
last_option = arg_str
return last_option and last_option in cmd_param.opts
def is_incomplete_argument(current_params, cmd_param):
"""
:param current_params: the current params and values for this argument as already entered
:param cmd_param: the current command parameter
:return: whether or not the last argument is incomplete and corresponds to this cmd_param. In
other words whether or not the this cmd_param argument can still accept values
"""
current_param_values = current_params[cmd_param.name]
if current_param_values is None:
return True
if cmd_param.nargs == -1:
return True
if isinstance(current_param_values, collections.abc.Iterable) \
and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
return True
return False
def get_user_autocompletions(args, incomplete, cmd, cmd_param, override):
"""
:param args: full list of args typed before the incomplete arg
:param incomplete: the incomplete text of the arg to autocomplete
:param cmd_param: command definition
:param override: a callable (cmd_param, args, incomplete) that will be
called to override default completion based on parameter type. Should raise
'CompleteUnhandled' if it could not find a completion.
:return: all the possible user-specified completions for the param
"""
# Use the type specific default completions unless it was overridden
try:
return override(cmd=cmd,
cmd_param=cmd_param,
args=args,
incomplete=incomplete)
except CompleteUnhandled:
return get_param_type_completion(cmd_param.type, incomplete) or []
def get_choices(cli, prog_name, args, incomplete, override):
"""
:param cli: command definition
:param prog_name: the program that is running
:param args: full list of args typed before the incomplete arg
:param incomplete: the incomplete text of the arg to autocomplete
:param override: a callable (cmd_param, args, incomplete) that will be
called to override default completion based on parameter type. Should raise
'CompleteUnhandled' if it could not find a completion.
:return: all the possible completions for the incomplete
"""
all_args = copy.deepcopy(args)
ctx = resolve_ctx(cli, prog_name, args)
if ctx is None:
return
# In newer versions of bash long opts with '='s are partitioned, but it's easier to parse
# without the '='
if start_of_option(incomplete) and WORDBREAK in incomplete:
partition_incomplete = incomplete.partition(WORDBREAK)
all_args.append(partition_incomplete[0])
incomplete = partition_incomplete[2]
elif incomplete == WORDBREAK:
incomplete = ''
choices = []
found_param = False
if start_of_option(incomplete):
# completions for options
for param in ctx.command.params:
if isinstance(param, Option):
choices.extend([param_opt + " " for param_opt in param.opts + param.secondary_opts
if param_opt not in all_args or param.multiple])
found_param = True
if not found_param:
# completion for option values by choices
for cmd_param in ctx.command.params:
if isinstance(cmd_param, Option) and is_incomplete_option(all_args, cmd_param):
choices.extend(get_user_autocompletions(all_args, incomplete, ctx.command, cmd_param, override))
found_param = True
break
if not found_param:
# completion for argument values by choices
for cmd_param in ctx.command.params:
if isinstance(cmd_param, Argument) and is_incomplete_argument(ctx.params, cmd_param):
choices.extend(get_user_autocompletions(all_args, incomplete, ctx.command, cmd_param, override))
found_param = True
break
if not found_param and isinstance(ctx.command, MultiCommand):
# completion for any subcommands
choices.extend([cmd + " " for cmd in ctx.command.list_commands(ctx)])
if not start_of_option(incomplete) and ctx.parent is not None \
and isinstance(ctx.parent.command, MultiCommand) and ctx.parent.command.chain:
# completion for chained commands
remaining_comands = set(ctx.parent.command.list_commands(ctx.parent)) - set(ctx.parent.protected_args)
choices.extend([cmd + " " for cmd in remaining_comands])
for item in choices:
if item.startswith(incomplete):
yield item
def do_complete(cli, prog_name, override):
cwords = split_arg_string(os.environ['COMP_WORDS'])
cword = int(os.environ['COMP_CWORD'])
args = cwords[1:cword]
try:
incomplete = cwords[cword]
except IndexError:
incomplete = ''
for item in get_choices(cli, prog_name, args, incomplete, override):
click.echo(item)
# Main function called from main.py at startup here
#
def main_bashcomplete(cmd, prog_name, override):
"""Internal handler for the bash completion support."""
if '_BST_COMPLETION' in os.environ:
do_complete(cmd, prog_name, override)
return True
return False
buildstream-1.6.9/buildstream/_frontend/linuxapp.py 0000664 0000000 0000000 00000003715 14375152700 0022557 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import click
from .app import App
# This trick is currently only supported on some terminals,
# avoid using it where it can cause garbage to be printed
# to the terminal.
#
def _osc_777_supported():
term = os.environ.get('TERM')
if term and (term.startswith('xterm') or term.startswith('vte')):
# Since vte version 4600, upstream silently ignores
# the OSC 777 without printing garbage to the terminal.
#
# For distros like Fedora who have patched vte, this
# will trigger a desktop notification and bring attention
# to the terminal.
#
vte_version = os.environ.get('VTE_VERSION')
try:
vte_version_int = int(vte_version)
except (ValueError, TypeError):
return False
if vte_version_int >= 4600:
return True
return False
# A linux specific App implementation
#
class LinuxApp(App):
def notify(self, title, text):
# Currently we only try this notification method
# of sending an escape sequence to the terminal
#
if _osc_777_supported():
click.echo("\033]777;notify;{};{}\007".format(title, text), err=True)
buildstream-1.6.9/buildstream/_frontend/profile.py 0000664 0000000 0000000 00000004730 14375152700 0022355 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import re
import copy
import click
# Profile()
#
# A class for formatting text with ansi color codes
#
# Kwargs:
# The same keyword arguments which can be used with click.style()
#
class Profile():
def __init__(self, **kwargs):
self._kwargs = dict(kwargs)
# fmt()
#
# Format some text with ansi color codes
#
# Args:
# text (str): The text to format
#
# Kwargs:
# Keyword arguments to apply on top of the base click.style()
# arguments
#
def fmt(self, text, **kwargs):
kwargs = dict(kwargs)
fmtargs = copy.copy(self._kwargs)
fmtargs.update(kwargs)
return click.style(text, **fmtargs)
# fmt_subst()
#
# Substitute a variable of the %{varname} form, formatting
# only the substituted text with the given click.style() configurations
#
# Args:
# text (str): The text to format, with possible variables
# varname (str): The variable name to substitute
# value (str): The value to substitute the variable with
#
# Kwargs:
# Keyword arguments to apply on top of the base click.style()
# arguments
#
def fmt_subst(self, text, varname, value, **kwargs):
def subst_callback(match):
# Extract and format the "{(varname)...}" portion of the match
inner_token = match.group(1)
formatted = inner_token.format(**{varname: value})
# Colorize after the pythonic format formatting, which may have padding
return self.fmt(formatted, **kwargs)
# Lazy regex, after our word, match anything that does not have '%'
return re.sub(r"%(\{(" + varname + r")[^%]*\})", subst_callback, text)
buildstream-1.6.9/buildstream/_frontend/status.py 0000664 0000000 0000000 00000042704 14375152700 0022243 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import sys
import curses
import shutil
import click
# Import a widget internal for formatting time codes
from .widget import TimeCode
from .._scheduler import ElementJob
# Status()
#
# A widget for formatting overall status.
#
# Note that the render() and clear() methods in this class are
# simply noops in the case that the application is not connected
# to a terminal, or if the terminal does not support ANSI escape codes.
#
# Args:
# context (Context): The Context
# content_profile (Profile): Formatting profile for content text
# format_profile (Profile): Formatting profile for formatting text
# success_profile (Profile): Formatting profile for success text
# error_profile (Profile): Formatting profile for error text
# stream (Stream): The Stream
# colors (bool): Whether to print the ANSI color codes in the output
#
class Status():
# Table of the terminal capabilities we require and use
_TERM_CAPABILITIES = {
'move_up': 'cuu1',
'move_x': 'hpa',
'clear_eol': 'el'
}
def __init__(self, context,
content_profile, format_profile,
success_profile, error_profile,
stream, colors=False):
self._context = context
self._content_profile = content_profile
self._format_profile = format_profile
self._success_profile = success_profile
self._error_profile = error_profile
self._stream = stream
self._jobs = []
self._last_lines = 0 # Number of status lines we last printed to console
self._spacing = 1
self._colors = colors
self._header = _StatusHeader(context,
content_profile, format_profile,
success_profile, error_profile,
stream)
self._term_width, _ = shutil.get_terminal_size()
self._alloc_lines = 0
self._alloc_columns = None
self._line_length = 0
self._need_alloc = True
self._term_caps = self._init_terminal()
# add_job()
#
# Adds a job to track in the status area
#
# Args:
# element (Element): The element of the job to track
# action_name (str): The action name for this job
#
def add_job(self, job):
elapsed = self._stream.elapsed_time
job = _StatusJob(self._context, job, self._content_profile, self._format_profile, elapsed)
self._jobs.append(job)
self._need_alloc = True
# remove_job()
#
# Removes a job currently being tracked in the status area
#
# Args:
# element (Element): The element of the job to track
# action_name (str): The action name for this job
#
def remove_job(self, job):
action_name = job.action_name
if not isinstance(job, ElementJob):
element = None
else:
element = job.element
self._jobs = [
job for job in self._jobs
if not (job.element is element and
job.action_name == action_name)
]
self._need_alloc = True
# clear()
#
# Clear the status area, it is necessary to call
# this before printing anything to the console if
# a status area is in use.
#
# To print some logging to the output and then restore
# the status, use the following:
#
# status.clear()
# ... print something to console ...
# status.render()
#
def clear(self):
if not self._term_caps:
return
for _ in range(self._last_lines):
self._move_up()
self._clear_line()
self._last_lines = 0
# render()
#
# Render the status area.
#
# If you are not printing a line in addition to rendering
# the status area, for instance in a timeout, then it is
# not necessary to call clear().
def render(self):
if not self._term_caps:
return
elapsed = self._stream.elapsed_time
self.clear()
self._check_term_width()
self._allocate()
# Nothing to render, early return
if self._alloc_lines == 0:
return
# Before rendering the actual lines, we need to add some line
# feeds for the amount of lines we intend to print first, and
# move cursor position back to the first line
for _ in range(self._alloc_lines + self._header.lines):
click.echo('', err=True)
for _ in range(self._alloc_lines + self._header.lines):
self._move_up()
# Render the one line header
text = self._header.render(self._term_width, elapsed)
click.echo(text, color=self._colors, err=True)
# Now we have the number of columns, and an allocation for
# alignment of each column
n_columns = len(self._alloc_columns)
for line in self._job_lines(n_columns):
text = ''
for job in line:
column = line.index(job)
text += job.render(self._alloc_columns[column] - job.size, elapsed)
# Add spacing between columns
if column < (n_columns - 1):
text += ' ' * self._spacing
# Print the line
click.echo(text, color=self._colors, err=True)
# Track what we printed last, for the next clear
self._last_lines = self._alloc_lines + self._header.lines
###################################################
# Private Methods #
###################################################
# _init_terminal()
#
# Initialize the terminal and return the resolved terminal
# capabilities dictionary.
#
# Returns:
# (dict|None): The resolved terminal capabilities dictionary,
# or None if the terminal does not support all
# of the required capabilities.
#
def _init_terminal(self):
# We need both output streams to be connected to a terminal
if not (sys.stdout.isatty() and sys.stderr.isatty()):
return None
# Initialized terminal, curses might decide it doesnt
# support this terminal
try:
curses.setupterm(os.environ.get('TERM', 'dumb'))
except curses.error:
return None
term_caps = {}
# Resolve the string capabilities we need for the capability
# names we need.
#
for capname, capval in self._TERM_CAPABILITIES.items():
code = curses.tigetstr(capval)
# If any of the required capabilities resolve empty strings or None,
# then we don't have the capabilities we need for a status bar on
# this terminal.
if not code:
return None
# Decode sequences as latin1, as they are always 8-bit bytes,
# so when b'\xff' is returned, this must be decoded to u'\xff'.
#
# This technique is employed by the python blessings library
# as well, and should provide better compatibility with most
# terminals.
#
term_caps[capname] = code.decode('latin1')
return term_caps
def _check_term_width(self):
term_width, _ = shutil.get_terminal_size()
if self._term_width != term_width:
self._term_width = term_width
self._need_alloc = True
def _move_up(self):
assert self._term_caps is not None
# Explicitly move to beginning of line, fixes things up
# when there was a ^C or ^Z printed to the terminal.
move_x = curses.tparm(self._term_caps['move_x'].encode('latin1'), 0)
move_x = move_x.decode('latin1')
move_up = curses.tparm(self._term_caps['move_up'].encode('latin1'))
move_up = move_up.decode('latin1')
click.echo(move_x + move_up, nl=False, err=True)
def _clear_line(self):
assert self._term_caps is not None
clear_eol = curses.tparm(self._term_caps['clear_eol'].encode('latin1'))
clear_eol = clear_eol.decode('latin1')
click.echo(clear_eol, nl=False, err=True)
def _allocate(self):
if not self._need_alloc:
return
# State when there is no jobs to display
alloc_lines = 0
alloc_columns = []
line_length = 0
# Test for the widest width which fits columnized jobs
for columns in reversed(range(len(self._jobs))):
alloc_lines, alloc_columns = self._allocate_columns(columns + 1)
# If the sum of column widths with spacing in between
# fits into the terminal width, this is a good allocation.
line_length = sum(alloc_columns) + (columns * self._spacing)
if line_length < self._term_width:
break
self._alloc_lines = alloc_lines
self._alloc_columns = alloc_columns
self._line_length = line_length
self._need_alloc = False
def _job_lines(self, columns):
for i in range(0, len(self._jobs), columns):
yield self._jobs[i:i + columns]
# Returns an array of integers representing the maximum
# length in characters for each column, given the current
# list of jobs to render.
#
def _allocate_columns(self, columns):
column_widths = [0 for _ in range(columns)]
lines = 0
for line in self._job_lines(columns):
line_len = len(line)
lines += 1
for col in range(columns):
if col < line_len:
job = line[col]
column_widths[col] = max(column_widths[col], job.size)
return lines, column_widths
# _StatusHeader()
#
# A delegate object for rendering the header part of the Status() widget
#
# Args:
# context (Context): The Context
# content_profile (Profile): Formatting profile for content text
# format_profile (Profile): Formatting profile for formatting text
# success_profile (Profile): Formatting profile for success text
# error_profile (Profile): Formatting profile for error text
# stream (Stream): The Stream
#
class _StatusHeader():
def __init__(self, context,
content_profile, format_profile,
success_profile, error_profile,
stream):
#
# Public members
#
self.lines = 3
#
# Private members
#
self._content_profile = content_profile
self._format_profile = format_profile
self._success_profile = success_profile
self._error_profile = error_profile
self._stream = stream
self._time_code = TimeCode(context, content_profile, format_profile)
self._context = context
def render(self, line_length, elapsed):
project = self._context.get_toplevel_project()
line_length = max(line_length, 80)
#
# Line 1: Session time, project name, session / total elements
#
# ========= 00:00:00 project-name (143/387) =========
#
session = str(len(self._stream.session_elements))
total = str(len(self._stream.total_elements))
size = 0
text = ''
size += len(total) + len(session) + 4 # Size for (N/N) with a leading space
size += 8 # Size of time code
size += len(project.name) + 1
text += self._time_code.render_time(elapsed)
text += ' ' + self._content_profile.fmt(project.name)
text += ' ' + self._format_profile.fmt('(') + \
self._content_profile.fmt(session) + \
self._format_profile.fmt('/') + \
self._content_profile.fmt(total) + \
self._format_profile.fmt(')')
line1 = self._centered(text, size, line_length, '=')
#
# Line 2: Dynamic list of queue status reports
#
# (Fetched:0 117 0)→ (Built:4 0 0)
#
size = 0
text = ''
# Format and calculate size for each queue progress
for queue in self._stream.queues:
# Add spacing
if self._stream.queues.index(queue) > 0:
size += 2
text += self._format_profile.fmt('→ ')
queue_text, queue_size = self._render_queue(queue)
size += queue_size
text += queue_text
line2 = self._centered(text, size, line_length, ' ')
#
# Line 3: Cache usage percentage report
#
# ~~~~~~ cache: 69% ~~~~~~
#
usage = self._context.get_artifact_cache_usage()
usage_percent = '{}%'.format(usage.used_percent)
size = 21
size += len(usage_percent)
if usage.used_percent >= 95:
formatted_usage_percent = self._error_profile.fmt(usage_percent)
elif usage.used_percent >= 80:
formatted_usage_percent = self._content_profile.fmt(usage_percent)
else:
formatted_usage_percent = self._success_profile.fmt(usage_percent)
text = self._format_profile.fmt("~~~~~~ ") + \
self._content_profile.fmt('cache') + \
self._format_profile.fmt(': ') + \
formatted_usage_percent + \
self._format_profile.fmt(' ~~~~~~')
line3 = self._centered(text, size, line_length, ' ')
return line1 + '\n' + line2 + '\n' + line3
###################################################
# Private Methods #
###################################################
def _render_queue(self, queue):
processed = str(len(queue.processed_elements))
skipped = str(len(queue.skipped_elements))
failed = str(len(queue.failed_elements))
size = 5 # Space for the formatting '[', ':', ' ', ' ' and ']'
size += len(queue.complete_name)
size += len(processed) + len(skipped) + len(failed)
text = self._format_profile.fmt("(") + \
self._content_profile.fmt(queue.complete_name) + \
self._format_profile.fmt(":") + \
self._success_profile.fmt(processed) + ' ' + \
self._content_profile.fmt(skipped) + ' ' + \
self._error_profile.fmt(failed) + \
self._format_profile.fmt(")")
return (text, size)
def _centered(self, text, size, line_length, fill):
remaining = line_length - size
remaining -= 2
final_text = self._format_profile.fmt(fill * (remaining // 2)) + ' '
final_text += text
final_text += ' ' + self._format_profile.fmt(fill * (remaining // 2))
return final_text
# _StatusJob()
#
# A delegate object for rendering a job in the status area
#
# Args:
# context (Context): The Context
# job (Job): The job being processed
# content_profile (Profile): Formatting profile for content text
# format_profile (Profile): Formatting profile for formatting text
# elapsed (datetime): The offset into the session when this job is created
#
class _StatusJob():
def __init__(self, context, job, content_profile, format_profile, elapsed):
action_name = job.action_name
if not isinstance(job, ElementJob):
element = None
else:
element = job.element
#
# Public members
#
self.element = element # The Element
self.action_name = action_name # The action name
self.size = None # The number of characters required to render
self.full_name = element._get_full_name() if element else action_name
#
# Private members
#
self._offset = elapsed
self._content_profile = content_profile
self._format_profile = format_profile
self._time_code = TimeCode(context, content_profile, format_profile)
# Calculate the size needed to display
self.size = 10 # Size of time code with brackets
self.size += len(action_name)
self.size += len(self.full_name)
self.size += 3 # '[' + ':' + ']'
# render()
#
# Render the Job, return a rendered string
#
# Args:
# padding (int): Amount of padding to print in order to align with columns
# elapsed (datetime): The session elapsed time offset
#
def render(self, padding, elapsed):
text = self._format_profile.fmt('[') + \
self._time_code.render_time(elapsed - self._offset) + \
self._format_profile.fmt(']')
# Add padding after the display name, before terminating ']'
name = self.full_name + (' ' * padding)
text += self._format_profile.fmt('[') + \
self._content_profile.fmt(self.action_name) + \
self._format_profile.fmt(':') + \
self._content_profile.fmt(name) + \
self._format_profile.fmt(']')
return text
buildstream-1.6.9/buildstream/_frontend/widget.py 0000664 0000000 0000000 00000071517 14375152700 0022207 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import datetime
import os
from collections import defaultdict, OrderedDict
from contextlib import ExitStack
from mmap import mmap
import re
import textwrap
import click
from . import Profile
from .. import Element, Consistency
from .. import _yaml
from .. import __version__ as bst_version
from .._exceptions import ImplError
from .._message import MessageType
from ..plugin import Plugin
# These messages are printed a bit differently
ERROR_MESSAGES = [MessageType.FAIL, MessageType.ERROR, MessageType.BUG]
# Widget()
#
# Args:
# content_profile (Profile): The profile to use for rendering content
# format_profile (Profile): The profile to use for rendering formatting
#
# An abstract class for printing output columns in our text UI.
#
class Widget():
def __init__(self, context, content_profile, format_profile):
# The context
self.context = context
# The content profile
self.content_profile = content_profile
# The formatting profile
self.format_profile = format_profile
# render()
#
# Renders a string to be printed in the UI
#
# Args:
# message (Message): A message to print
#
# Returns:
# (str): The string this widget prints for the given message
#
def render(self, message):
raise ImplError("{} does not implement render()".format(type(self).__name__))
# Used to add spacing between columns
class Space(Widget):
def render(self, message):
return ' '
# Used to add fixed text between columns
class FixedText(Widget):
def __init__(self, context, text, content_profile, format_profile):
super().__init__(context, content_profile, format_profile)
self.text = text
def render(self, message):
return self.format_profile.fmt(self.text)
# Used to add the wallclock time this message was created at
class WallclockTime(Widget):
def render(self, message):
fields = [self.content_profile.fmt("{:02d}".format(x)) for x in
[message.creation_time.hour,
message.creation_time.minute,
message.creation_time.second]]
return self.format_profile.fmt(":").join(fields)
# A widget for rendering the debugging column
class Debug(Widget):
def render(self, message):
unique_id = 0 if message.unique_id is None else message.unique_id
text = self.format_profile.fmt('pid:')
text += self.content_profile.fmt("{: <5}".format(message.pid))
text += self.format_profile.fmt(" id:")
text += self.content_profile.fmt("{:0>3}".format(unique_id))
return text
# A widget for rendering the time codes
class TimeCode(Widget):
def __init__(self, context, content_profile, format_profile, microseconds=False):
self._microseconds = microseconds
super().__init__(context, content_profile, format_profile)
def render(self, message):
return self.render_time(message.elapsed)
def render_time(self, elapsed):
if elapsed is None:
fields = [
self.content_profile.fmt('--')
for i in range(3)
]
else:
hours, remainder = divmod(int(elapsed.total_seconds()), 60 * 60)
minutes, seconds = divmod(remainder, 60)
fields = [
self.content_profile.fmt("{0:02d}".format(field))
for field in [hours, minutes, seconds]
]
text = self.format_profile.fmt(':').join(fields)
if self._microseconds:
if elapsed is not None:
text += self.content_profile.fmt(".{0:06d}".format(elapsed.microseconds))
else:
text += self.content_profile.fmt(".------")
return text
# A widget for rendering the MessageType
class TypeName(Widget):
_action_colors = {
MessageType.DEBUG: "cyan",
MessageType.STATUS: "cyan",
MessageType.INFO: "magenta",
MessageType.WARN: "yellow",
MessageType.START: "blue",
MessageType.SUCCESS: "green",
MessageType.FAIL: "red",
MessageType.SKIPPED: "yellow",
MessageType.ERROR: "red",
MessageType.BUG: "red",
}
def render(self, message):
return self.content_profile.fmt("{: <7}"
.format(message.message_type.upper()),
bold=True, dim=True,
fg=self._action_colors[message.message_type])
# A widget for displaying the Element name
class ElementName(Widget):
def __init__(self, context, content_profile, format_profile):
super().__init__(context, content_profile, format_profile)
# Pre initialization format string, before we know the length of
# element names in the pipeline
self._fmt_string = '{: <30}'
def render(self, message):
element_id = message.task_id or message.unique_id
if element_id is None:
return ""
plugin = Plugin._lookup(element_id)
name = plugin._get_full_name()
# Sneak the action name in with the element name
action_name = message.action_name
if not action_name:
action_name = "Main"
return self.content_profile.fmt("{: >5}".format(action_name.lower())) + \
self.format_profile.fmt(':') + \
self.content_profile.fmt(self._fmt_string.format(name))
# A widget for displaying the primary message text
class MessageText(Widget):
def render(self, message):
return message.message
# A widget for formatting the element cache key
class CacheKey(Widget):
def __init__(self, context, content_profile, format_profile, err_profile):
super().__init__(context, content_profile, format_profile)
self._err_profile = err_profile
self._key_length = context.log_key_length
def render(self, message):
element_id = message.task_id or message.unique_id
if element_id is None or not self._key_length:
return ""
missing = False
key = ' ' * self._key_length
plugin = Plugin._lookup(element_id)
if isinstance(plugin, Element):
_, key, missing = plugin._get_display_key()
if message.message_type in ERROR_MESSAGES:
text = self._err_profile.fmt(key)
else:
text = self.content_profile.fmt(key, dim=missing)
return text
# A widget for formatting the log file
class LogFile(Widget):
def __init__(self, context, content_profile, format_profile, err_profile):
super().__init__(context, content_profile, format_profile)
self._err_profile = err_profile
self._logdir = context.logdir
def render(self, message, abbrev=True):
if message.logfile and message.scheduler:
logfile = message.logfile
if abbrev and self._logdir != "" and logfile.startswith(self._logdir):
logfile = logfile[len(self._logdir):]
logfile = logfile.lstrip(os.sep)
if message.message_type in ERROR_MESSAGES:
text = self._err_profile.fmt(logfile)
else:
text = self.content_profile.fmt(logfile, dim=True)
else:
text = ''
return text
# START and SUCCESS messages are expected to have no useful
# information in the message text, so we display the logfile name for
# these messages, and the message text for other types.
#
class MessageOrLogFile(Widget):
def __init__(self, context, content_profile, format_profile, err_profile):
super().__init__(context, content_profile, format_profile)
self._message_widget = MessageText(context, content_profile, format_profile)
self._logfile_widget = LogFile(context, content_profile, format_profile, err_profile)
def render(self, message):
# Show the log file only in the main start/success messages
if message.logfile and message.scheduler and \
message.message_type in [MessageType.START, MessageType.SUCCESS]:
text = self._logfile_widget.render(message)
else:
text = self._message_widget.render(message)
return text
# LogLine
#
# A widget for formatting a log line
#
# Args:
# context (Context): The Context
# content_profile (Profile): Formatting profile for content text
# format_profile (Profile): Formatting profile for formatting text
# success_profile (Profile): Formatting profile for success text
# error_profile (Profile): Formatting profile for error text
# detail_profile (Profile): Formatting profile for detail text
# indent (int): Number of spaces to use for general indentation
#
class LogLine(Widget):
def __init__(self, context,
content_profile,
format_profile,
success_profile,
err_profile,
detail_profile,
indent=4):
super().__init__(context, content_profile, format_profile)
self._columns = []
self._failure_messages = defaultdict(list)
self._success_profile = success_profile
self._err_profile = err_profile
self._detail_profile = detail_profile
self._indent = ' ' * indent
self._log_lines = context.log_error_lines
self._message_lines = context.log_message_lines
self._resolved_keys = None
self._space_widget = Space(context, content_profile, format_profile)
self._logfile_widget = LogFile(context, content_profile, format_profile, err_profile)
if context.log_debug:
self._columns.extend([
Debug(context, content_profile, format_profile)
])
self.logfile_variable_names = {
"elapsed": TimeCode(context, content_profile, format_profile, microseconds=False),
"elapsed-us": TimeCode(context, content_profile, format_profile, microseconds=True),
"wallclock": WallclockTime(context, content_profile, format_profile),
"key": CacheKey(context, content_profile, format_profile, err_profile),
"element": ElementName(context, content_profile, format_profile),
"action": TypeName(context, content_profile, format_profile),
"message": MessageOrLogFile(context, content_profile, format_profile, err_profile)
}
logfile_tokens = self._parse_logfile_format(context.log_message_format, content_profile, format_profile)
self._columns.extend(logfile_tokens)
# show_pipeline()
#
# Display a list of elements in the specified format.
#
# The formatting string is the one currently documented in `bst show`, this
# is used in pipeline session headings and also to implement `bst show`.
#
# Args:
# dependencies (list of Element): A list of Element objects
# format_: A formatting string, as specified by `bst show`
#
# Returns:
# (str): The formatted list of elements
#
def show_pipeline(self, dependencies, format_):
report = ''
p = Profile()
for element in dependencies:
line = format_
full_key, cache_key, dim_keys = element._get_display_key()
line = p.fmt_subst(line, 'name', element._get_full_name(), fg='blue', bold=True)
line = p.fmt_subst(line, 'key', cache_key, fg='yellow', dim=dim_keys)
line = p.fmt_subst(line, 'full-key', full_key, fg='yellow', dim=dim_keys)
consistency = element._get_consistency()
if consistency == Consistency.INCONSISTENT:
line = p.fmt_subst(line, 'state', "no reference", fg='red')
else:
if element._cached():
line = p.fmt_subst(line, 'state', "cached", fg='magenta')
elif consistency == Consistency.RESOLVED:
line = p.fmt_subst(line, 'state', "fetch needed", fg='red')
elif element._buildable():
line = p.fmt_subst(line, 'state', "buildable", fg='green')
else:
line = p.fmt_subst(line, 'state', "waiting", fg='blue')
# Element configuration
if "%{config" in format_:
config = _yaml.node_sanitize(element._Element__config)
line = p.fmt_subst(line, 'config', _yaml.dump_string(config))
# Variables
if "%{vars" in format_:
variables = dict(element._Element__variables)
line = p.fmt_subst(line, 'vars', _yaml.dump_string(variables))
# Environment
if "%{env" in format_:
environment = _yaml.node_sanitize(element._Element__environment)
line = p.fmt_subst(line, 'env', _yaml.dump_string(environment))
# Public
if "%{public" in format_:
environment = _yaml.node_sanitize(element._Element__public)
line = p.fmt_subst(line, 'public', _yaml.dump_string(environment))
# Workspaced
if "%{workspaced" in format_:
line = p.fmt_subst(
line, 'workspaced',
'(workspaced)' if element._get_workspace() else '', fg='yellow')
# Workspace-dirs
if "%{workspace-dirs" in format_:
workspace = element._get_workspace()
if workspace is not None:
path = workspace.get_absolute_path()
if path.startswith("~/"):
path = os.path.join(os.getenv('HOME', '/root'), path[2:])
line = p.fmt_subst(line, 'workspace-dirs', "Workspace: {}".format(path))
else:
line = p.fmt_subst(
line, 'workspace-dirs', '')
report += line + '\n'
return report.rstrip('\n')
# print_heading()
#
# A message to be printed at program startup, indicating
# some things about user configuration and BuildStream version
# and so on.
#
# Args:
# project (Project): The toplevel project we were invoked from
# stream (Stream): The stream
# log_file (file): An optional file handle for additional logging
# styling (bool): Whether to enable ansi escape codes in the output
#
def print_heading(self, project, stream, *, log_file, styling=False):
context = self.context
starttime = datetime.datetime.now()
text = ''
self._resolved_keys = {element: element._get_cache_key() for element in stream.session_elements}
# Main invocation context
text += '\n'
text += self.content_profile.fmt("BuildStream Version {}\n".format(bst_version), bold=True)
values = OrderedDict()
values["Session Start"] = starttime.strftime('%A, %d-%m-%Y at %H:%M:%S')
values["Project"] = "{} ({})".format(project.name, project.directory)
values["Targets"] = ", ".join([t.name for t in stream.targets])
values["Cache Usage"] = "{}".format(context.get_artifact_cache_usage())
text += self._format_values(values)
# User configurations
text += '\n'
text += self.content_profile.fmt("User Configuration\n", bold=True)
values = OrderedDict()
values["Configuration File"] = \
"Default Configuration" if not context.config_origin else context.config_origin
values["Log Files"] = context.logdir
values["Source Mirrors"] = context.sourcedir
values["Build Area"] = context.builddir
values["Artifact Cache"] = context.artifactdir
values["Strict Build Plan"] = "Yes" if context.get_strict() else "No"
values["Maximum Fetch Tasks"] = context.sched_fetchers
values["Maximum Build Tasks"] = context.sched_builders
values["Maximum Push Tasks"] = context.sched_pushers
values["Maximum Network Retries"] = context.sched_network_retries
text += self._format_values(values)
text += '\n'
# Project Options
values = OrderedDict()
project.options.printable_variables(values)
if values:
text += self.content_profile.fmt("Project Options\n", bold=True)
text += self._format_values(values)
text += '\n'
# Plugins
text += self._format_plugins(project.first_pass_config.element_factory.loaded_dependencies,
project.first_pass_config.source_factory.loaded_dependencies)
if project.config.element_factory and project.config.source_factory:
text += self._format_plugins(project.config.element_factory.loaded_dependencies,
project.config.source_factory.loaded_dependencies)
# Pipeline state
text += self.content_profile.fmt("Pipeline\n", bold=True)
text += self.show_pipeline(stream.total_elements, context.log_element_format)
text += '\n'
# Separator line before following output
text += self.format_profile.fmt("=" * 79 + '\n')
click.echo(text, color=styling, nl=False, err=True)
if log_file:
click.echo(text, file=log_file, color=False, nl=False)
# print_summary()
#
# Print a summary of activities at the end of a session
#
# Args:
# stream (Stream): The Stream
# log_file (file): An optional file handle for additional logging
# styling (bool): Whether to enable ansi escape codes in the output
#
def print_summary(self, stream, log_file, styling=False):
# Early silent return if there are no queues, can happen
# only in the case that the stream early returned due to
# an inconsistent pipeline state.
if not stream.queues:
return
text = ''
assert self._resolved_keys is not None
elements = sorted(e for (e, k) in self._resolved_keys.items() if k != e._get_cache_key())
if elements:
text += self.content_profile.fmt("Resolved key Summary\n", bold=True)
text += self.show_pipeline(elements, self.context.log_element_format)
text += "\n\n"
if self._failure_messages:
values = OrderedDict()
for element, messages in sorted(self._failure_messages.items(), key=lambda x: x[0].name):
for queue in stream.queues:
if any(el.name == element.name for el in queue.failed_elements):
values[element.name] = ''.join(self._render(v) for v in messages)
if values:
text += self.content_profile.fmt("Failure Summary\n", bold=True)
text += self._format_values(values, style_value=False)
text += self.content_profile.fmt("Pipeline Summary\n", bold=True)
values = OrderedDict()
values['Total'] = self.content_profile.fmt(str(len(stream.total_elements)))
values['Session'] = self.content_profile.fmt(str(len(stream.session_elements)))
processed_maxlen = 1
skipped_maxlen = 1
failed_maxlen = 1
for queue in stream.queues:
processed_maxlen = max(len(str(len(queue.processed_elements))), processed_maxlen)
skipped_maxlen = max(len(str(len(queue.skipped_elements))), skipped_maxlen)
failed_maxlen = max(len(str(len(queue.failed_elements))), failed_maxlen)
for queue in stream.queues:
processed = str(len(queue.processed_elements))
skipped = str(len(queue.skipped_elements))
failed = str(len(queue.failed_elements))
processed_align = ' ' * (processed_maxlen - len(processed))
skipped_align = ' ' * (skipped_maxlen - len(skipped))
failed_align = ' ' * (failed_maxlen - len(failed))
status_text = self.content_profile.fmt("processed ") + \
self._success_profile.fmt(processed) + \
self.format_profile.fmt(', ') + processed_align
status_text += self.content_profile.fmt("skipped ") + \
self.content_profile.fmt(skipped) + \
self.format_profile.fmt(', ') + skipped_align
status_text += self.content_profile.fmt("failed ") + \
self._err_profile.fmt(failed) + ' ' + failed_align
values["{} Queue".format(queue.action_name)] = status_text
text += self._format_values(values, style_value=False)
click.echo(text, color=styling, nl=False, err=True)
if log_file:
click.echo(text, file=log_file, color=False, nl=False)
###################################################
# Widget Abstract Methods #
###################################################
def render(self, message):
# Track logfiles for later use
element_id = message.task_id or message.unique_id
if message.message_type in ERROR_MESSAGES and element_id is not None:
plugin = Plugin._lookup(element_id)
self._failure_messages[plugin].append(message)
return self._render(message)
###################################################
# Private Methods #
###################################################
def _parse_logfile_format(self, format_string, content_profile, format_profile):
logfile_tokens = []
while format_string:
if format_string.startswith("%%"):
logfile_tokens.append(FixedText(self.context, "%", content_profile, format_profile))
format_string = format_string[2:]
continue
m = re.search(r"^%\{([^\}]+)\}", format_string)
if m is not None:
variable = m.group(1)
format_string = format_string[m.end(0):]
if variable not in self.logfile_variable_names:
raise Exception("'{0}' is not a valid log variable name.".format(variable))
logfile_tokens.append(self.logfile_variable_names[variable])
else:
m = re.search("^[^%]+", format_string)
if m is not None:
text = FixedText(self.context, m.group(0), content_profile, format_profile)
format_string = format_string[m.end(0):]
logfile_tokens.append(text)
else:
# No idea what to do now
raise Exception("'{0}' could not be parsed into a valid logging format.".format(format_string))
return logfile_tokens
def _render(self, message):
# Render the column widgets first
text = ''
for widget in self._columns:
text += widget.render(message)
text += '\n'
extra_nl = False
# Now add some custom things
if message.detail:
# Identify frontend messages, we never abbreviate these
frontend_message = not (message.task_id or message.unique_id)
# Split and truncate message detail down to message_lines lines
lines = message.detail.splitlines(True)
n_lines = len(lines)
abbrev = False
if message.message_type not in ERROR_MESSAGES \
and not frontend_message and n_lines > self._message_lines:
abbrev = True
lines = lines[0:self._message_lines]
else:
lines[n_lines - 1] = lines[n_lines - 1].rstrip('\n')
detail = self._indent + self._indent.join(lines)
text += '\n'
if message.message_type in ERROR_MESSAGES:
text += self._err_profile.fmt(detail, bold=True)
else:
text += self._detail_profile.fmt(detail)
if abbrev:
text += self._indent + \
self.content_profile.fmt('Message contains {} additional lines'
.format(n_lines - self._message_lines), dim=True)
text += '\n'
extra_nl = True
if message.sandbox is not None:
sandbox = self._indent + 'Sandbox directory: ' + message.sandbox
text += '\n'
if message.message_type == MessageType.FAIL:
text += self._err_profile.fmt(sandbox, bold=True)
else:
text += self._detail_profile.fmt(sandbox)
text += '\n'
extra_nl = True
if message.scheduler and message.message_type == MessageType.FAIL:
text += '\n'
if self.context is not None and not self.context.log_verbose:
text += self._indent + self._err_profile.fmt("Log file: ")
text += self._indent + self._logfile_widget.render(message) + '\n'
else:
text += self._indent + self._err_profile.fmt("Printing the last {} lines from log file:"
.format(self._log_lines)) + '\n'
text += self._indent + self._logfile_widget.render(message, abbrev=False) + '\n'
text += self._indent + self._err_profile.fmt("=" * 70) + '\n'
log_content = self._read_last_lines(message.logfile)
log_content = textwrap.indent(log_content, self._indent)
text += self._detail_profile.fmt(log_content)
text += '\n'
text += self._indent + self._err_profile.fmt("=" * 70) + '\n'
extra_nl = True
if extra_nl:
text += '\n'
return text
def _read_last_lines(self, logfile):
with ExitStack() as stack:
# mmap handles low-level memory details, allowing for
# faster searches
f = stack.enter_context(open(logfile, 'r+', encoding='utf-8'))
log = stack.enter_context(mmap(f.fileno(), os.path.getsize(f.name)))
count = 0
end = log.size() - 1
while count < self._log_lines and end >= 0:
location = log.rfind(b'\n', 0, end)
count += 1
# If location is -1 (none found), this will print the
# first character because of the later +1
end = location
# end+1 is correct whether or not a newline was found at
# that location. If end is -1 (seek before beginning of file)
# then we get the first characther. If end is a newline position,
# we discard it and only want to print the beginning of the next
# line.
lines = log[(end + 1):].splitlines()
return '\n'.join([line.decode('utf-8') for line in lines]).rstrip()
def _format_plugins(self, element_plugins, source_plugins):
text = ""
if not (element_plugins or source_plugins):
return text
text += self.content_profile.fmt("Loaded Plugins\n", bold=True)
if element_plugins:
text += self.format_profile.fmt(" Element Plugins\n")
for plugin in element_plugins:
text += self.content_profile.fmt(" - {}\n".format(plugin))
if source_plugins:
text += self.format_profile.fmt(" Source Plugins\n")
for plugin in source_plugins:
text += self.content_profile.fmt(" - {}\n".format(plugin))
text += '\n'
return text
# _format_values()
#
# Formats an indented dictionary of titles / values, ensuring
# the values are aligned.
#
# Args:
# values: A dictionary, usually an OrderedDict()
# style_value: Whether to use the content profile for the values
#
# Returns:
# (str): The formatted values
#
def _format_values(self, values, style_value=True):
text = ''
max_key_len = 0
for key, value in values.items():
max_key_len = max(len(key), max_key_len)
for key, value in values.items():
if isinstance(value, str) and '\n' in value:
text += self.format_profile.fmt(" {}:\n".format(key))
text += textwrap.indent(value, self._indent)
continue
text += self.format_profile.fmt(" {}: {}".format(key, ' ' * (max_key_len - len(key))))
if style_value:
text += self.content_profile.fmt(str(value))
else:
text += str(value)
text += '\n'
return text
buildstream-1.6.9/buildstream/_fuse/ 0000775 0000000 0000000 00000000000 14375152700 0017462 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_fuse/__init__.py 0000664 0000000 0000000 00000001470 14375152700 0021575 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .hardlinks import SafeHardlinks
buildstream-1.6.9/buildstream/_fuse/fuse.py 0000664 0000000 0000000 00000076520 14375152700 0021010 0 ustar 00root root 0000000 0000000 # This is an embedded copy of fuse.py taken from the following upstream commit:
#
# https://github.com/terencehonles/fusepy/commit/0eafeb557e0e70926ed9450008ef17057d302391
#
# Our local modifications are recorded in the Git history of this repo.
# Copyright (c) 2012 Terence Honles (maintainer)
# Copyright (c) 2008 Giorgos Verigakis (author)
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# pylint: skip-file
from __future__ import print_function, absolute_import, division
from ctypes import *
from ctypes.util import find_library
from errno import *
from os import strerror
from platform import machine, system
from signal import signal, SIGINT, SIG_DFL
from stat import S_IFDIR
from traceback import print_exc
import logging
try:
from functools import partial
except ImportError:
# http://docs.python.org/library/functools.html#functools.partial
def partial(func, *args, **keywords):
def newfunc(*fargs, **fkeywords):
newkeywords = keywords.copy()
newkeywords.update(fkeywords)
return func(*(args + fargs), **newkeywords)
newfunc.func = func
newfunc.args = args
newfunc.keywords = keywords
return newfunc
try:
basestring
except NameError:
basestring = str
class c_timespec(Structure):
_fields_ = [('tv_sec', c_long), ('tv_nsec', c_long)]
class c_utimbuf(Structure):
_fields_ = [('actime', c_timespec), ('modtime', c_timespec)]
class c_stat(Structure):
pass # Platform dependent
_system = system()
_machine = machine()
if _system == 'Darwin':
_libiconv = CDLL(find_library('iconv'), RTLD_GLOBAL) # libfuse dependency
_libfuse_path = (find_library('fuse4x') or find_library('osxfuse') or
find_library('fuse'))
else:
_libfuse_path = find_library('fuse')
if not _libfuse_path:
raise EnvironmentError('Unable to find libfuse')
else:
_libfuse = CDLL(_libfuse_path)
if _system == 'Darwin' and hasattr(_libfuse, 'macfuse_version'):
_system = 'Darwin-MacFuse'
if _system in ('Darwin', 'Darwin-MacFuse', 'FreeBSD'):
ENOTSUP = 45
c_dev_t = c_int32
c_fsblkcnt_t = c_ulong
c_fsfilcnt_t = c_ulong
c_gid_t = c_uint32
c_mode_t = c_uint16
c_off_t = c_int64
c_pid_t = c_int32
c_uid_t = c_uint32
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int, c_uint32)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_uint32)
if _system == 'Darwin':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_ino', c_uint64),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_birthtimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32),
('st_flags', c_int32),
('st_gen', c_int32),
('st_lspare', c_int32),
('st_qspare', c_int64)]
else:
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_uint32),
('st_mode', c_mode_t),
('st_nlink', c_uint16),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_size', c_off_t),
('st_blocks', c_int64),
('st_blksize', c_int32)]
elif _system == 'Linux':
ENOTSUP = 95
c_dev_t = c_ulonglong
c_fsblkcnt_t = c_ulonglong
c_fsfilcnt_t = c_ulonglong
c_gid_t = c_uint
c_mode_t = c_uint
c_off_t = c_longlong
c_pid_t = c_int
c_uid_t = c_uint
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
if _machine == 'x86_64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad0', c_int),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'mips':
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1_1', c_ulong),
('__pad1_2', c_ulong),
('__pad1_3', c_ulong),
('st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_ulong),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2_1', c_ulong),
('__pad2_2', c_ulong),
('st_size', c_off_t),
('__pad3', c_ulong),
('st_atimespec', c_timespec),
('__pad4', c_ulong),
('st_mtimespec', c_timespec),
('__pad5', c_ulong),
('st_ctimespec', c_timespec),
('__pad6', c_ulong),
('st_blksize', c_long),
('st_blocks', c_long),
('__pad7_1', c_ulong),
('__pad7_2', c_ulong),
('__pad7_3', c_ulong),
('__pad7_4', c_ulong),
('__pad7_5', c_ulong),
('__pad7_6', c_ulong),
('__pad7_7', c_ulong),
('__pad7_8', c_ulong),
('__pad7_9', c_ulong),
('__pad7_10', c_ulong),
('__pad7_11', c_ulong),
('__pad7_12', c_ulong),
('__pad7_13', c_ulong),
('__pad7_14', c_ulong)]
elif _machine == 'ppc':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulonglong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'ppc64' or _machine == 'ppc64le':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_nlink', c_ulong),
('st_mode', c_mode_t),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('__pad', c_uint),
('st_rdev', c_dev_t),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
elif _machine == 'aarch64':
c_stat._fields_ = [
('st_dev', c_dev_t),
('st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad1', c_ulong),
('st_size', c_off_t),
('st_blksize', c_int),
('__pad2', c_int),
('st_blocks', c_long),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec)]
else:
# i686, use as fallback for everything else
c_stat._fields_ = [
('st_dev', c_dev_t),
('__pad1', c_ushort),
('__st_ino', c_ulong),
('st_mode', c_mode_t),
('st_nlink', c_uint),
('st_uid', c_uid_t),
('st_gid', c_gid_t),
('st_rdev', c_dev_t),
('__pad2', c_ushort),
('st_size', c_off_t),
('st_blksize', c_long),
('st_blocks', c_longlong),
('st_atimespec', c_timespec),
('st_mtimespec', c_timespec),
('st_ctimespec', c_timespec),
('st_ino', c_ulonglong)]
else:
raise NotImplementedError('{} is not supported.'.format(_system))
class c_statvfs(Structure):
_fields_ = [
('f_bsize', c_ulong),
('f_frsize', c_ulong),
('f_blocks', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_bavail', c_fsblkcnt_t),
('f_files', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_favail', c_fsfilcnt_t),
('f_fsid', c_ulong),
#('unused', c_int),
('f_flag', c_ulong),
('f_namemax', c_ulong)]
if _system == 'FreeBSD':
c_fsblkcnt_t = c_uint64
c_fsfilcnt_t = c_uint64
setxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t, c_int)
getxattr_t = CFUNCTYPE(c_int, c_char_p, c_char_p, POINTER(c_byte),
c_size_t)
class c_statvfs(Structure):
_fields_ = [
('f_bavail', c_fsblkcnt_t),
('f_bfree', c_fsblkcnt_t),
('f_blocks', c_fsblkcnt_t),
('f_favail', c_fsfilcnt_t),
('f_ffree', c_fsfilcnt_t),
('f_files', c_fsfilcnt_t),
('f_bsize', c_ulong),
('f_flag', c_ulong),
('f_frsize', c_ulong)]
class fuse_file_info(Structure):
_fields_ = [
('flags', c_int),
('fh_old', c_ulong),
('writepage', c_int),
('direct_io', c_uint, 1),
('keep_cache', c_uint, 1),
('flush', c_uint, 1),
('padding', c_uint, 29),
('fh', c_uint64),
('lock_owner', c_uint64)]
class fuse_context(Structure):
_fields_ = [
('fuse', c_voidp),
('uid', c_uid_t),
('gid', c_gid_t),
('pid', c_pid_t),
('private_data', c_voidp)]
_libfuse.fuse_get_context.restype = POINTER(fuse_context)
class fuse_operations(Structure):
_fields_ = [
('getattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat))),
('readlink', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('getdir', c_voidp), # Deprecated, use readdir
('mknod', CFUNCTYPE(c_int, c_char_p, c_mode_t, c_dev_t)),
('mkdir', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('unlink', CFUNCTYPE(c_int, c_char_p)),
('rmdir', CFUNCTYPE(c_int, c_char_p)),
('symlink', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('rename', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('link', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('chmod', CFUNCTYPE(c_int, c_char_p, c_mode_t)),
('chown', CFUNCTYPE(c_int, c_char_p, c_uid_t, c_gid_t)),
('truncate', CFUNCTYPE(c_int, c_char_p, c_off_t)),
('utime', c_voidp), # Deprecated, use utimens
('open', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('read', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('write', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t,
c_off_t, POINTER(fuse_file_info))),
('statfs', CFUNCTYPE(c_int, c_char_p, POINTER(c_statvfs))),
('flush', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('release', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsync', CFUNCTYPE(c_int, c_char_p, c_int, POINTER(fuse_file_info))),
('setxattr', setxattr_t),
('getxattr', getxattr_t),
('listxattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_byte), c_size_t)),
('removexattr', CFUNCTYPE(c_int, c_char_p, c_char_p)),
('opendir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('readdir', CFUNCTYPE(c_int, c_char_p, c_voidp,
CFUNCTYPE(c_int, c_voidp, c_char_p,
POINTER(c_stat), c_off_t),
c_off_t, POINTER(fuse_file_info))),
('releasedir', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info))),
('fsyncdir', CFUNCTYPE(c_int, c_char_p, c_int,
POINTER(fuse_file_info))),
('init', CFUNCTYPE(c_voidp, c_voidp)),
('destroy', CFUNCTYPE(c_voidp, c_voidp)),
('access', CFUNCTYPE(c_int, c_char_p, c_int)),
('create', CFUNCTYPE(c_int, c_char_p, c_mode_t,
POINTER(fuse_file_info))),
('ftruncate', CFUNCTYPE(c_int, c_char_p, c_off_t,
POINTER(fuse_file_info))),
('fgetattr', CFUNCTYPE(c_int, c_char_p, POINTER(c_stat),
POINTER(fuse_file_info))),
('lock', CFUNCTYPE(c_int, c_char_p, POINTER(fuse_file_info),
c_int, c_voidp)),
('utimens', CFUNCTYPE(c_int, c_char_p, POINTER(c_utimbuf))),
('bmap', CFUNCTYPE(c_int, c_char_p, c_size_t, POINTER(c_ulonglong))),
('flag_nullpath_ok', c_uint, 1),
('flag_nopath', c_uint, 1),
('flag_utime_omit_ok', c_uint, 1),
('flag_reserved', c_uint, 29),
]
def time_of_timespec(ts):
return ts.tv_sec + ts.tv_nsec / 10 ** 9
def set_st_attrs(st, attrs):
for key, val in attrs.items():
if key in ('st_atime', 'st_mtime', 'st_ctime', 'st_birthtime'):
timespec = getattr(st, key + 'spec', None)
if timespec is None:
continue
timespec.tv_sec = int(val)
timespec.tv_nsec = int((val - timespec.tv_sec) * 10 ** 9)
elif hasattr(st, key):
setattr(st, key, val)
def fuse_get_context():
'Returns a (uid, gid, pid) tuple'
ctxp = _libfuse.fuse_get_context()
ctx = ctxp.contents
return ctx.uid, ctx.gid, ctx.pid
class FuseOSError(OSError):
def __init__(self, errno):
super(FuseOSError, self).__init__(errno, strerror(errno))
class FUSE(object):
'''
This class is the lower level interface and should not be subclassed under
normal use. Its methods are called by fuse.
Assumes API version 2.6 or later.
'''
OPTIONS = (
('foreground', '-f'),
('debug', '-d'),
('nothreads', '-s'),
)
def __init__(self, operations, mountpoint, raw_fi=False, encoding='utf-8',
**kwargs):
'''
Setting raw_fi to True will cause FUSE to pass the fuse_file_info
class as is to Operations, instead of just the fh field.
This gives you access to direct_io, keep_cache, etc.
'''
self.operations = operations
self.raw_fi = raw_fi
self.encoding = encoding
args = ['fuse']
args.extend(flag for arg, flag in self.OPTIONS
if kwargs.pop(arg, False))
kwargs.setdefault('fsname', operations.__class__.__name__)
args.append('-o')
args.append(','.join(self._normalize_fuse_options(**kwargs)))
args.append(mountpoint)
args = [arg.encode(encoding) for arg in args]
argv = (c_char_p * len(args))(*args)
fuse_ops = fuse_operations()
for ent in fuse_operations._fields_:
name, prototype = ent[:2]
val = getattr(operations, name, None)
if val is None:
continue
# Function pointer members are tested for using the
# getattr(operations, name) above but are dynamically
# invoked using self.operations(name)
if hasattr(prototype, 'argtypes'):
val = prototype(partial(self._wrapper, getattr(self, name)))
setattr(fuse_ops, name, val)
try:
old_handler = signal(SIGINT, SIG_DFL)
except ValueError:
old_handler = SIG_DFL
err = _libfuse.fuse_main_real(len(args), argv, pointer(fuse_ops),
sizeof(fuse_ops), None)
try:
signal(SIGINT, old_handler)
except ValueError:
pass
del self.operations # Invoke the destructor
if err:
raise RuntimeError(err)
@staticmethod
def _normalize_fuse_options(**kargs):
for key, value in kargs.items():
if isinstance(value, bool):
if value is True: yield key
else:
yield '{}={}'.format(key, value)
@staticmethod
def _wrapper(func, *args, **kwargs):
'Decorator for the methods that follow'
try:
return func(*args, **kwargs) or 0
except OSError as e:
return -(e.errno or EFAULT)
except:
print_exc()
return -EFAULT
def _decode_optional_path(self, path):
# NB: this method is intended for fuse operations that
# allow the path argument to be NULL,
# *not* as a generic path decoding method
if path is None:
return None
return path.decode(self.encoding)
def getattr(self, path, buf):
return self.fgetattr(path, buf, None)
def readlink(self, path, buf, bufsize):
ret = self.operations('readlink', path.decode(self.encoding)) \
.encode(self.encoding)
# copies a string into the given buffer
# (null terminated and truncated if necessary)
data = create_string_buffer(ret[:bufsize - 1])
memmove(buf, data, len(data))
return 0
def mknod(self, path, mode, dev):
return self.operations('mknod', path.decode(self.encoding), mode, dev)
def mkdir(self, path, mode):
return self.operations('mkdir', path.decode(self.encoding), mode)
def unlink(self, path):
return self.operations('unlink', path.decode(self.encoding))
def rmdir(self, path):
return self.operations('rmdir', path.decode(self.encoding))
def symlink(self, source, target):
'creates a symlink `target -> source` (e.g. ln -s source target)'
return self.operations('symlink', target.decode(self.encoding),
source.decode(self.encoding))
def rename(self, old, new):
return self.operations('rename', old.decode(self.encoding),
new.decode(self.encoding))
def link(self, source, target):
'creates a hard link `target -> source` (e.g. ln source target)'
return self.operations('link', target.decode(self.encoding),
source.decode(self.encoding))
def chmod(self, path, mode):
return self.operations('chmod', path.decode(self.encoding), mode)
def chown(self, path, uid, gid):
# Check if any of the arguments is a -1 that has overflowed
if c_uid_t(uid + 1).value == 0:
uid = -1
if c_gid_t(gid + 1).value == 0:
gid = -1
return self.operations('chown', path.decode(self.encoding), uid, gid)
def truncate(self, path, length):
return self.operations('truncate', path.decode(self.encoding), length)
def open(self, path, fip):
fi = fip.contents
if self.raw_fi:
return self.operations('open', path.decode(self.encoding), fi)
else:
fi.fh = self.operations('open', path.decode(self.encoding),
fi.flags)
return 0
def read(self, path, buf, size, offset, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
ret = self.operations('read', self._decode_optional_path(path), size,
offset, fh)
if not ret: return 0
retsize = len(ret)
assert retsize <= size, \
'actual amount read {:d} greater than expected {:d}'.format(retsize, size)
data = create_string_buffer(ret, retsize)
memmove(buf, data, retsize)
return retsize
def write(self, path, buf, size, offset, fip):
data = string_at(buf, size)
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('write', self._decode_optional_path(path), data,
offset, fh)
def statfs(self, path, buf):
stv = buf.contents
attrs = self.operations('statfs', path.decode(self.encoding))
for key, val in attrs.items():
if hasattr(stv, key):
setattr(stv, key, val)
return 0
def flush(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('flush', self._decode_optional_path(path), fh)
def release(self, path, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('release', self._decode_optional_path(path), fh)
def fsync(self, path, datasync, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('fsync', self._decode_optional_path(path), datasync,
fh)
def setxattr(self, path, name, value, size, options, *args):
return self.operations('setxattr', path.decode(self.encoding),
name.decode(self.encoding),
string_at(value, size), options, *args)
def getxattr(self, path, name, value, size, *args):
ret = self.operations('getxattr', path.decode(self.encoding),
name.decode(self.encoding), *args)
retsize = len(ret)
# allow size queries
if not value: return retsize
# do not truncate
if retsize > size: return -ERANGE
buf = create_string_buffer(ret, retsize) # Does not add trailing 0
memmove(value, buf, retsize)
return retsize
def listxattr(self, path, namebuf, size):
attrs = self.operations('listxattr', path.decode(self.encoding)) or ''
ret = '\x00'.join(attrs).encode(self.encoding)
if len(ret) > 0:
ret += '\x00'.encode(self.encoding)
retsize = len(ret)
# allow size queries
if not namebuf: return retsize
# do not truncate
if retsize > size: return -ERANGE
buf = create_string_buffer(ret, retsize)
memmove(namebuf, buf, retsize)
return retsize
def removexattr(self, path, name):
return self.operations('removexattr', path.decode(self.encoding),
name.decode(self.encoding))
def opendir(self, path, fip):
# Ignore raw_fi
fip.contents.fh = self.operations('opendir',
path.decode(self.encoding))
return 0
def readdir(self, path, buf, filler, offset, fip):
# Ignore raw_fi
for item in self.operations('readdir', self._decode_optional_path(path),
fip.contents.fh):
if isinstance(item, basestring):
name, st, offset = item, None, 0
else:
name, attrs, offset = item
if attrs:
st = c_stat()
set_st_attrs(st, attrs)
else:
st = None
if filler(buf, name.encode(self.encoding), st, offset) != 0:
break
return 0
def releasedir(self, path, fip):
# Ignore raw_fi
return self.operations('releasedir', self._decode_optional_path(path),
fip.contents.fh)
def fsyncdir(self, path, datasync, fip):
# Ignore raw_fi
return self.operations('fsyncdir', self._decode_optional_path(path),
datasync, fip.contents.fh)
def init(self, conn):
return self.operations('init', '/')
def destroy(self, private_data):
return self.operations('destroy', '/')
def access(self, path, amode):
return self.operations('access', path.decode(self.encoding), amode)
def create(self, path, mode, fip):
fi = fip.contents
path = path.decode(self.encoding)
if self.raw_fi:
return self.operations('create', path, mode, fi)
else:
# This line is different from upstream to fix issues
# reading file opened with O_CREAT|O_RDWR.
# See issue #143.
fi.fh = self.operations('create', path, mode, fi.flags)
# END OF MODIFICATION
return 0
def ftruncate(self, path, length, fip):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('truncate', self._decode_optional_path(path),
length, fh)
def fgetattr(self, path, buf, fip):
memset(buf, 0, sizeof(c_stat))
st = buf.contents
if not fip:
fh = fip
elif self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
attrs = self.operations('getattr', self._decode_optional_path(path), fh)
set_st_attrs(st, attrs)
return 0
def lock(self, path, fip, cmd, lock):
if self.raw_fi:
fh = fip.contents
else:
fh = fip.contents.fh
return self.operations('lock', self._decode_optional_path(path), fh, cmd,
lock)
def utimens(self, path, buf):
if buf:
atime = time_of_timespec(buf.contents.actime)
mtime = time_of_timespec(buf.contents.modtime)
times = (atime, mtime)
else:
times = None
return self.operations('utimens', path.decode(self.encoding), times)
def bmap(self, path, blocksize, idx):
return self.operations('bmap', path.decode(self.encoding), blocksize,
idx)
class Operations(object):
'''
This class should be subclassed and passed as an argument to FUSE on
initialization. All operations should raise a FuseOSError exception on
error.
When in doubt of what an operation should do, check the FUSE header file
or the corresponding system call man page.
'''
def __call__(self, op, *args):
if not hasattr(self, op):
raise FuseOSError(EFAULT)
return getattr(self, op)(*args)
def access(self, path, amode):
return 0
bmap = None
def chmod(self, path, mode):
raise FuseOSError(EROFS)
def chown(self, path, uid, gid):
raise FuseOSError(EROFS)
def create(self, path, mode, fi=None):
'''
When raw_fi is False (default case), fi is None and create should
return a numerical file handle.
When raw_fi is True the file handle should be set directly by create
and return 0.
'''
raise FuseOSError(EROFS)
def destroy(self, path):
'Called on filesystem destruction. Path is always /'
pass
def flush(self, path, fh):
return 0
def fsync(self, path, datasync, fh):
return 0
def fsyncdir(self, path, datasync, fh):
return 0
def getattr(self, path, fh=None):
'''
Returns a dictionary with keys identical to the stat C structure of
stat(2).
st_atime, st_mtime and st_ctime should be floats.
NOTE: There is an incombatibility between Linux and Mac OS X
concerning st_nlink of directories. Mac OS X counts all files inside
the directory, while Linux counts only the subdirectories.
'''
if path != '/':
raise FuseOSError(ENOENT)
return dict(st_mode=(S_IFDIR | 0o755), st_nlink=2)
def getxattr(self, path, name, position=0):
raise FuseOSError(ENOTSUP)
def init(self, path):
'''
Called on filesystem initialization. (Path is always /)
Use it instead of __init__ if you start threads on initialization.
'''
pass
def link(self, target, source):
'creates a hard link `target -> source` (e.g. ln source target)'
raise FuseOSError(EROFS)
def listxattr(self, path):
return []
lock = None
def mkdir(self, path, mode):
raise FuseOSError(EROFS)
def mknod(self, path, mode, dev):
raise FuseOSError(EROFS)
def open(self, path, flags):
'''
When raw_fi is False (default case), open should return a numerical
file handle.
When raw_fi is True the signature of open becomes:
open(self, path, fi)
and the file handle should be set directly.
'''
return 0
def opendir(self, path):
'Returns a numerical file handle.'
return 0
def read(self, path, size, offset, fh):
'Returns a string containing the data requested.'
raise FuseOSError(EIO)
def readdir(self, path, fh):
'''
Can return either a list of names, or a list of (name, attrs, offset)
tuples. attrs is a dict as in getattr.
'''
return ['.', '..']
def readlink(self, path):
raise FuseOSError(ENOENT)
def release(self, path, fh):
return 0
def releasedir(self, path, fh):
return 0
def removexattr(self, path, name):
raise FuseOSError(ENOTSUP)
def rename(self, old, new):
raise FuseOSError(EROFS)
def rmdir(self, path):
raise FuseOSError(EROFS)
def setxattr(self, path, name, value, options, position=0):
raise FuseOSError(ENOTSUP)
def statfs(self, path):
'''
Returns a dictionary with keys identical to the statvfs C structure of
statvfs(3).
On Mac OS X f_bsize and f_frsize must be a power of 2
(minimum 512).
'''
return {}
def symlink(self, target, source):
'creates a symlink `target -> source` (e.g. ln -s source target)'
raise FuseOSError(EROFS)
def truncate(self, path, length, fh=None):
raise FuseOSError(EROFS)
def unlink(self, path):
raise FuseOSError(EROFS)
def utimens(self, path, times=None):
'Times is a (atime, mtime) tuple. If None use current time.'
return 0
def write(self, path, data, offset, fh):
raise FuseOSError(EROFS)
class LoggingMixIn:
log = logging.getLogger('fuse.log-mixin')
def __call__(self, op, path, *args):
self.log.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
self.log.debug('<- %s %s', op, repr(ret))
buildstream-1.6.9/buildstream/_fuse/hardlinks.py 0000664 0000000 0000000 00000016307 14375152700 0022022 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Stavros Korokithakis
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
#
# The filesystem operations implementation here is based
# on some example code written by Stavros Korokithakis.
import errno
import os
import shutil
import stat
import tempfile
from .fuse import FuseOSError, Operations
from .mount import Mount
# SafeHardlinks()
#
# A FUSE mount which implements a copy on write hardlink experience.
#
# Args:
# root (str): The underlying filesystem path to mirror
# tmp (str): A directory on the same filesystem for creating temp files
#
class SafeHardlinks(Mount):
def __init__(self, directory, tempdir):
self.directory = directory
self.tempdir = tempdir
def create_operations(self):
return SafeHardlinkOps(self.directory, self.tempdir)
# SafeHardlinkOps()
#
# The actual FUSE Operations implementation below.
#
class SafeHardlinkOps(Operations):
def __init__(self, root, tmp):
self.root = root
self.tmp = tmp
def _full_path(self, partial):
if partial.startswith("/"):
partial = partial[1:]
path = os.path.join(self.root, partial)
return path
def _ensure_copy(self, full_path, follow_symlinks=True):
try:
if follow_symlinks:
# Follow symbolic links manually here
real_path = os.path.realpath(full_path)
else:
real_path = full_path
file_stat = os.stat(real_path, follow_symlinks=False)
# Skip the file if it's not a hardlink
if file_stat.st_nlink <= 1:
return
# For some reason directories may have st_nlink > 1, but they
# cannot be hardlinked, so just ignore those.
#
if not stat.S_ISDIR(file_stat.st_mode):
with tempfile.TemporaryDirectory(dir=self.tmp) as tempdir:
basename = os.path.basename(real_path)
temp_path = os.path.join(tempdir, basename)
# First copy, then unlink origin and rename
shutil.copy2(real_path, temp_path, follow_symlinks=False)
os.unlink(real_path)
os.rename(temp_path, real_path)
except FileNotFoundError:
# This doesnt exist yet, assume we're about to create it
# so it's not a problem.
pass
###########################################################
# Fuse Methods #
###########################################################
def access(self, path, amode):
full_path = self._full_path(path)
if not os.access(full_path, amode):
raise FuseOSError(errno.EACCES)
def chmod(self, path, mode):
full_path = self._full_path(path)
# Ensure copies on chmod
self._ensure_copy(full_path)
return os.chmod(full_path, mode)
def chown(self, path, uid, gid):
full_path = self._full_path(path)
# Ensure copies on chown
self._ensure_copy(full_path, follow_symlinks=False)
return os.chown(full_path, uid, gid, follow_symlinks=False)
def getattr(self, path, fh=None):
full_path = self._full_path(path)
st = os.lstat(full_path)
return dict((key, getattr(st, key)) for key in (
'st_atime', 'st_ctime', 'st_gid', 'st_mode',
'st_mtime', 'st_nlink', 'st_size', 'st_uid',
'st_ino'))
def readdir(self, path, fh):
full_path = self._full_path(path)
dir_entries = ['.', '..']
if os.path.isdir(full_path):
dir_entries.extend(os.listdir(full_path))
for entry in dir_entries:
entry_full_path = os.path.join(full_path, entry)
st = os.stat(entry_full_path, follow_symlinks=False)
attrs = dict((key, getattr(st, key)) for key in (
'st_ino', 'st_mode'))
yield entry, attrs, 0
def readlink(self, path):
pathname = os.readlink(self._full_path(path))
if pathname.startswith("/"):
# Path name is absolute, sanitize it.
return os.path.relpath(pathname, self.root)
else:
return pathname
def mknod(self, path, mode, dev):
return os.mknod(self._full_path(path), mode, dev)
def rmdir(self, path):
full_path = self._full_path(path)
return os.rmdir(full_path)
def mkdir(self, path, mode):
return os.mkdir(self._full_path(path), mode)
def statfs(self, path):
full_path = self._full_path(path)
stv = os.statvfs(full_path)
return dict((key, getattr(stv, key)) for key in (
'f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail',
'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax'))
def unlink(self, path):
return os.unlink(self._full_path(path))
def symlink(self, target, source):
return os.symlink(source, self._full_path(target))
def rename(self, old, new):
return os.rename(self._full_path(old), self._full_path(new))
def link(self, target, source):
# When creating a hard link here, should we ensure the original
# file is not a hardlink itself first ?
#
return os.link(self._full_path(source), self._full_path(target))
def utimens(self, path, times=None):
return os.utime(self._full_path(path), times)
def open(self, path, flags):
full_path = self._full_path(path)
# If we're opening for writing, ensure it's a copy first
if flags & os.O_WRONLY or flags & os.O_RDWR:
self._ensure_copy(full_path)
return os.open(full_path, flags)
def create(self, path, mode, fi=None):
full_path = self._full_path(path)
# If it already exists, ensure it's a copy first
self._ensure_copy(full_path)
return os.open(full_path, fi, mode)
def read(self, path, size, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.read(fh, size)
def write(self, path, data, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.write(fh, data)
def truncate(self, path, length, fh=None):
full_path = self._full_path(path)
with open(full_path, 'r+', encoding='utf-8') as f:
f.truncate(length)
def flush(self, path, fh):
return os.fsync(fh)
def release(self, path, fh):
return os.close(fh)
def fsync(self, path, datasync, fh):
return self.flush(path, fh)
buildstream-1.6.9/buildstream/_fuse/mount.py 0000664 0000000 0000000 00000014733 14375152700 0021206 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import signal
import time
import sys
from contextlib import contextmanager
from multiprocessing import Process
from .fuse import FUSE
from .._exceptions import ImplError
from .. import _signals
# Just a custom exception to raise here, for identifying possible
# bugs with a fuse layer implementation
#
class FuseMountError(Exception):
pass
# This is a convenience class which takes care of synchronizing the
# startup of FUSE and shutting it down.
#
# The implementations / subclasses should:
#
# - Overload the instance initializer to add any parameters
# needed for their fuse Operations implementation
#
# - Implement create_operations() to create the Operations
# instance on behalf of the superclass, using any additional
# parameters collected in the initializer.
#
# Mount objects can be treated as contextmanagers, the volume
# will be mounted during the context.
#
# UGLY CODE NOTE:
#
# This is a horrible little piece of code. The problem we face
# here is that the highlevel libfuse API has fuse_main(), which
# will either block in the foreground, or become a full daemon.
#
# With the daemon approach, we know that the fuse is mounted right
# away when fuse_main() returns, then the daemon will go and handle
# requests on it's own, but then we have no way to shut down the
# daemon.
#
# With the blocking approach, we still have it as a child process
# so we can tell it to gracefully terminate; but it's impossible
# to know when the mount is done, there is no callback for that
#
# The solution we use here without digging too deep into the
# low level fuse API, is to fork a child process which will
# fun the fuse loop in foreground, and we block the parent
# process until the volume is mounted with a busy loop with timeouts.
#
class Mount():
# These are not really class data, they are
# just here for the sake of having None setup instead
# of missing attributes, since we do not provide any
# initializer and leave the initializer to the subclass.
#
__mountpoint = None
__operations = None
__process = None
################################################
# User Facing API #
################################################
# mount():
#
# User facing API for mounting a fuse subclass implementation
#
# Args:
# (str): Location to mount this fuse fs
#
def mount(self, mountpoint):
assert self.__process is None
self.__mountpoint = mountpoint
self.__process = Process(target=self.__run_fuse)
# Ensure the child fork() does not inherit our signal handlers, if the
# child wants to handle a signal then it will first set it's own
# handler, and then unblock it.
with _signals.blocked([signal.SIGTERM, signal.SIGTSTP, signal.SIGINT], ignore=False):
self.__process.start()
# This is horrible, we're going to wait until mountpoint is mounted and that's it.
while not os.path.ismount(mountpoint):
time.sleep(1 / 100)
# unmount():
#
# User facing API for unmounting a fuse subclass implementation
#
def unmount(self):
# Terminate child process and join
if self.__process is not None:
self.__process.terminate()
self.__process.join()
# Report an error if ever the underlying operations crashed for some reason.
if self.__process.exitcode != 0:
raise FuseMountError("{} reported exit code {} when unmounting"
.format(type(self).__name__, self.__process.exitcode))
self.__mountpoint = None
self.__process = None
# mounted():
#
# A context manager to run a code block with this fuse Mount
# mounted, this will take care of automatically unmounting
# in the case that the calling process is terminated.
#
# Args:
# (str): Location to mount this fuse fs
#
@contextmanager
def mounted(self, mountpoint):
self.mount(mountpoint)
try:
with _signals.terminator(self.unmount):
yield
finally:
self.unmount()
################################################
# Abstract Methods #
################################################
# create_operations():
#
# Create an Operations class (from fusepy) and return it
#
# Returns:
# (Operations): A FUSE Operations implementation
def create_operations(self):
raise ImplError("Mount subclass '{}' did not implement create_operations()"
.format(type(self).__name__))
################################################
# Child Process #
################################################
def __run_fuse(self):
# First become session leader while signals are still blocked
#
# Then reset the SIGTERM handler to the default and finally
# unblock SIGTERM.
#
os.setsid()
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGTERM])
# Ask the subclass to give us an Operations object
#
self.__operations = self.create_operations()
# Run fuse in foreground in this child process, internally libfuse
# will handle SIGTERM and gracefully exit it's own little main loop.
#
FUSE(self.__operations, self.__mountpoint, nothreads=True, foreground=True, use_ino=True)
# Explicit 0 exit code, if the operations crashed for some reason, the exit
# code will not be 0, and we want to know about it.
#
sys.exit(0)
buildstream-1.6.9/buildstream/_includes.py 0000664 0000000 0000000 00000016763 14375152700 0020715 0 ustar 00root root 0000000 0000000 import os
from collections.abc import Mapping
from . import _yaml
from ._exceptions import LoadError, LoadErrorReason
# Includes()
#
# This takes care of processing include directives "(@)".
#
# Args:
# loader (Loader): The Loader object
# copy_tree (bool): Whether to make a copy, of tree in
# provenance. Should be true if intended to be
# serialized.
class Includes:
def __init__(self, loader, *, copy_tree=False):
self._loader = loader
self._loaded = {}
self._copy_tree = copy_tree
# process()
#
# Process recursively include directives in a YAML node.
#
# Args:
# node (dict): A YAML node
# only_local (bool): Whether to ignore junction files
# process_project_options (bool): Whether to process options from current project
#
def process(self, node, *, only_local=False, process_project_options=True):
self._process(node, only_local=only_local, process_project_options=process_project_options)
# _process()
#
# Process recursively include directives in a YAML node. This
# method is a recursively called on loaded nodes from files.
#
# Args:
# node (dict): A YAML node
# included (set): Fail for recursion if trying to load any files in this set
# current_loader (Loader): Use alternative loader (for junction files)
# only_local (bool): Whether to ignore junction files
# process_project_options (bool): Whether to process options from current project
#
def _process(self, node, *, included=None, current_loader=None, only_local=False, process_project_options=True):
if included is None:
included = set()
if current_loader is None:
current_loader = self._loader
if process_project_options:
current_loader.project.options.process_node(node)
self._process_node(
node,
included=included,
only_local=only_local,
current_loader=current_loader,
process_project_options=process_project_options,
)
# _process_node()
#
# Process recursively include directives in a YAML node. This
# method is recursively called on all nodes.
#
# Args:
# node (dict): A YAML node
# included (set): Fail for recursion if trying to load any files in this set
# current_loader (Loader): Use alternative loader (for junction files)
# only_local (bool): Whether to ignore junction files
# process_project_options (bool): Whether to process options from current project
#
def _process_node(
self, node, *, included=None, current_loader=None, only_local=False, process_project_options=True
):
if included is None:
included = set()
if isinstance(node.get('(@)'), str):
includes = [_yaml.node_get(node, str, '(@)')]
else:
includes = _yaml.node_get(node, list, '(@)', default_value=None)
if '(@)' in node:
del node['(@)']
if includes:
for include in reversed(includes):
if only_local and ':' in include:
continue
include_node, file_path, sub_loader = self._include_file(include,
current_loader)
if file_path in included:
provenance = _yaml.node_get_provenance(node)
raise LoadError(LoadErrorReason.RECURSIVE_INCLUDE,
"{}: trying to recursively include {}". format(provenance,
file_path))
# Because the included node will be modified, we need
# to copy it so that we do not modify the toplevel
# node of the provenance.
include_node = _yaml.node_chain_copy(include_node)
try:
included.add(file_path)
self._process(
include_node, included=included,
current_loader=sub_loader,
only_local=only_local,
process_project_options=process_project_options or current_loader != sub_loader,
)
finally:
included.remove(file_path)
_yaml.composite(include_node, node)
to_delete = [key for key, _ in _yaml.node_items(node) if key not in include_node]
for key, value in include_node.items():
node[key] = value
for key in to_delete:
del node[key]
for _, value in _yaml.node_items(node):
self._process_value(
value,
included=included,
current_loader=current_loader,
only_local=only_local,
process_project_options=process_project_options,
)
# _include_file()
#
# Load include YAML file from with a loader.
#
# Args:
# include (str): file path relative to loader's project directory.
# Can be prefixed with junctio name.
# loader (Loader): Loader for the current project.
def _include_file(self, include, loader):
shortname = include
if ':' in include:
junction, include = include.split(':', 1)
junction_loader = loader._get_loader(junction, fetch_subprojects=True)
current_loader = junction_loader
current_loader.project.ensure_fully_loaded()
else:
current_loader = loader
project = current_loader.project
directory = project.directory
file_path = os.path.join(directory, include)
key = (current_loader, file_path)
if key not in self._loaded:
self._loaded[key] = _yaml.load(os.path.join(directory, include),
shortname=shortname,
project=project,
copy_tree=self._copy_tree)
return self._loaded[key], file_path, current_loader
# _process_value()
#
# Select processing for value that could be a list or a dictionary.
#
# Args:
# value: Value to process. Can be a list or a dictionary.
# included (set): Fail for recursion if trying to load any files in this set
# current_loader (Loader): Use alternative loader (for junction files)
# only_local (bool): Whether to ignore junction files
# process_project_options (bool): Whether to process options from current project
#
def _process_value(
self, value, *, included=None, current_loader=None, only_local=False, process_project_options=True
):
if included is None:
included = set()
if isinstance(value, Mapping):
self._process_node(
value,
included=included,
current_loader=current_loader,
only_local=only_local,
process_project_options=process_project_options,
)
elif isinstance(value, list):
for v in value:
self._process_value(
v,
included=included,
current_loader=current_loader,
only_local=only_local,
process_project_options=process_project_options,
)
buildstream-1.6.9/buildstream/_loader/ 0000775 0000000 0000000 00000000000 14375152700 0017766 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_loader/__init__.py 0000664 0000000 0000000 00000001566 14375152700 0022107 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .metasource import MetaSource
from .metaelement import MetaElement
from .loader import Loader
buildstream-1.6.9/buildstream/_loader/loadelement.py 0000664 0000000 0000000 00000011661 14375152700 0022636 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# BuildStream toplevel imports
from .. import _yaml
# Local package imports
from .types import Symbol, Dependency
# LoadElement():
#
# A transient object breaking down what is loaded allowing us to
# do complex operations in multiple passes.
#
# Args:
# node (dict): A YAML loaded dictionary
# name (str): The element name
# loader (Loader): The Loader object for this element
#
class LoadElement():
def __init__(self, node, filename, loader):
#
# Public members
#
self.node = node # The YAML node
self.name = filename # The element name
self.full_name = None # The element full name (with associated junction)
self.deps = None # The list of Dependency objects
#
# Private members
#
self._loader = loader # The Loader object
self._dep_cache = None # The dependency cache, to speed up depends()
#
# Initialization
#
if loader.project.junction:
# dependency is in subproject, qualify name
self.full_name = '{}:{}'.format(loader.project.junction.name, self.name)
else:
# dependency is in top-level project
self.full_name = self.name
# Ensure the root node is valid
_yaml.node_validate(self.node, [
'kind', 'depends', 'sources', 'sandbox',
'variables', 'environment', 'environment-nocache',
'config', 'public', 'description',
'build-depends', 'runtime-depends',
])
# Extract the Dependencies
self.deps = _extract_depends_from_node(self.node)
# depends():
#
# Checks if this element depends on another element, directly
# or indirectly.
#
# Args:
# other (LoadElement): Another LoadElement
#
# Returns:
# (bool): True if this LoadElement depends on 'other'
#
def depends(self, other):
self._ensure_depends_cache()
return self._dep_cache.get(other.full_name) is not None
###########################################
# Private Methods #
###########################################
def _ensure_depends_cache(self):
if self._dep_cache:
return
self._dep_cache = {}
for dep in self.deps:
elt = self._loader.get_element_for_dep(dep)
# Ensure the cache of the element we depend on
elt._ensure_depends_cache()
# We depend on this element
self._dep_cache[elt.full_name] = True
# And we depend on everything this element depends on
self._dep_cache.update(elt._dep_cache)
# _extract_depends_from_node():
#
# Creates an array of Dependency objects from a given dict node 'node',
# allows both strings and dicts for expressing the dependency and
# throws a comprehensive LoadError in the case that the node is malformed.
#
# After extracting depends, the symbol is deleted from the node
#
# Args:
# node (dict): A YAML loaded dictionary
#
# Returns:
# (list): a list of Dependency objects
#
def _extract_depends_from_node(node, *, key=None):
if key is None:
build_depends = _extract_depends_from_node(node, key=Symbol.BUILD_DEPENDS)
runtime_depends = _extract_depends_from_node(node, key=Symbol.RUNTIME_DEPENDS)
depends = _extract_depends_from_node(node, key=Symbol.DEPENDS)
return build_depends + runtime_depends + depends
elif key == Symbol.BUILD_DEPENDS:
default_dep_type = Symbol.BUILD
elif key == Symbol.RUNTIME_DEPENDS:
default_dep_type = Symbol.RUNTIME
elif key == Symbol.DEPENDS:
default_dep_type = None
else:
assert False, "Unexpected value of key '{}'".format(key)
depends = _yaml.node_get(node, list, key, default_value=[])
output_deps = []
for index, dep in enumerate(depends):
dep_provenance = _yaml.node_get_provenance(node, key=key, indices=[index])
dependency = Dependency(dep, dep_provenance, default_dep_type=default_dep_type)
output_deps.append(dependency)
# Now delete the field, we dont want it anymore
if key in node:
del node[key]
return output_deps
buildstream-1.6.9/buildstream/_loader/loader.py 0000664 0000000 0000000 00000061242 14375152700 0021613 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
from functools import cmp_to_key
from collections import namedtuple
from collections.abc import Mapping
import tempfile
import shutil
from .._exceptions import LoadError, LoadErrorReason
from .. import Consistency
from .. import _yaml
from ..element import Element
from .._profile import Topics, profile_start, profile_end
from .._includes import Includes
from .types import Symbol, Dependency
from .loadelement import LoadElement
from . import MetaElement
from . import MetaSource
# Loader():
#
# The Loader class does the heavy lifting of parsing target
# bst files and ultimately transforming them into a list of MetaElements
# with their own MetaSources, ready for instantiation by the core.
#
# Args:
# context (Context): The Context object
# project (Project): The toplevel Project object
# parent (Loader): A parent Loader object, in the case this is a junctioned Loader
# tempdir (str): A directory to cleanup with the Loader, given to the loader by a parent
# loader in the case that this loader is a subproject loader.
#
class Loader():
def __init__(self, context, project, *, parent=None, tempdir=None):
# Ensure we have an absolute path for the base directory
basedir = project.element_path
if not os.path.isabs(basedir):
basedir = os.path.abspath(basedir)
#
# Public members
#
self.project = project # The associated Project
#
# Private members
#
self._context = context
self._options = project.options # Project options (OptionPool)
self._basedir = basedir # Base project directory
self._first_pass_options = project.first_pass_config.options # Project options (OptionPool)
self._tempdir = tempdir # A directory to cleanup
self._parent = parent # The parent loader
self._meta_elements = {} # Dict of resolved meta elements by name
self._elements = {} # Dict of elements
self._loaders = {} # Dict of junction loaders
self._includes = Includes(self, copy_tree=True)
# load():
#
# Loads the project based on the parameters given to the constructor
#
# Args:
# rewritable (bool): Whether the loaded files should be rewritable
# this is a bit more expensive due to deep copies
# ticker (callable): An optional function for tracking load progress
# targets (list of str): Target, element-path relative bst filenames in the project
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Raises: LoadError
#
# Returns: The toplevel LoadElement
def load(self, targets, rewritable=False, ticker=None, fetch_subprojects=False):
for filename in targets:
if os.path.isabs(filename):
# XXX Should this just be an assertion ?
# Expect that the caller gives us the right thing at least ?
raise LoadError(LoadErrorReason.INVALID_DATA,
"Target '{}' was not specified as a relative "
"path to the base project directory: {}"
.format(filename, self._basedir))
# First pass, recursively load files and populate our table of LoadElements
#
deps = []
for target in targets:
profile_start(Topics.LOAD_PROJECT, target)
_, name, loader = self._parse_name(target, rewritable, ticker,
fetch_subprojects=fetch_subprojects)
loader._load_file(name, rewritable, ticker, fetch_subprojects)
deps.append(Dependency(target, provenance="[command line]"))
profile_end(Topics.LOAD_PROJECT, target)
#
# Now that we've resolve the dependencies, scan them for circular dependencies
#
# Set up a dummy element that depends on all top-level targets
# to resolve potential circular dependencies between them
DummyTarget = namedtuple('DummyTarget', ['name', 'full_name', 'deps'])
dummy = DummyTarget(name='', full_name='', deps=deps)
self._elements[''] = dummy
profile_key = "_".join(t for t in targets)
profile_start(Topics.CIRCULAR_CHECK, profile_key)
self._check_circular_deps('')
profile_end(Topics.CIRCULAR_CHECK, profile_key)
ret = []
#
# Sort direct dependencies of elements by their dependency ordering
#
for target in targets:
profile_start(Topics.SORT_DEPENDENCIES, target)
_, name, loader = self._parse_name(target, rewritable, ticker,
fetch_subprojects=fetch_subprojects)
loader._sort_dependencies(name)
profile_end(Topics.SORT_DEPENDENCIES, target)
# Finally, wrap what we have into LoadElements and return the target
#
ret.append(loader._collect_element(name))
return ret
# cleanup():
#
# Remove temporary checkout directories of subprojects
#
def cleanup(self):
if self._parent and not self._tempdir:
# already done
return
# recurse
for loader in self._loaders.values():
# value may be None with nested junctions without overrides
if loader is not None:
loader.cleanup()
if not self._parent:
# basedir of top-level loader is never a temporary directory
return
# safe guard to not accidentally delete directories outside builddir
if self._tempdir.startswith(self._context.builddir + os.sep):
if os.path.exists(self._tempdir):
shutil.rmtree(self._tempdir)
# get_element_for_dep():
#
# Gets a cached LoadElement by Dependency object
#
# This is used by LoadElement
#
# Args:
# dep (Dependency): The dependency to search for
#
# Returns:
# (LoadElement): The cached LoadElement
#
def get_element_for_dep(self, dep):
loader = self._get_loader_for_dep(dep)
return loader._elements[dep.name]
###########################################
# Private Methods #
###########################################
# _load_file():
#
# Recursively load bst files
#
# Args:
# filename (str): The element-path relative bst file
# rewritable (bool): Whether we should load in round trippable mode
# ticker (callable): A callback to report loaded filenames to the frontend
# fetch_subprojects (bool): Whether to fetch subprojects while loading
# provenance (Provenance): The location from where the file was referred to, or None
#
# Returns:
# (LoadElement): A loaded LoadElement
#
def _load_file(self, filename, rewritable, ticker, fetch_subprojects, provenance=None):
# Silently ignore already loaded files
if filename in self._elements:
return self._elements[filename]
# Call the ticker
if ticker:
ticker(filename)
# Load the data and process any conditional statements therein
fullpath = os.path.join(self._basedir, filename)
try:
node = _yaml.load(fullpath, shortname=filename, copy_tree=rewritable, project=self.project)
except LoadError as e:
if e.reason == LoadErrorReason.MISSING_FILE:
if self.project.junction:
message = "Could not find element '{}' in project referred to by junction element '{}'" \
.format(filename, self.project.junction.name)
else:
message = "Could not find element '{}' in elements directory '{}'".format(filename, self._basedir)
if provenance:
message = "{}: {}".format(provenance, message)
# If we can't find the file, try to suggest plausible
# alternatives by stripping the element-path from the given
# filename, and verifying that it exists.
detail = None
elements_dir = os.path.relpath(self._basedir, self.project.directory)
element_relpath = os.path.relpath(filename, elements_dir)
if filename.startswith(elements_dir) and os.path.exists(os.path.join(self._basedir, element_relpath)):
detail = "Did you mean '{}'?".format(element_relpath)
raise LoadError(LoadErrorReason.MISSING_FILE,
message, detail=detail) from e
if e.reason == LoadErrorReason.LOADING_DIRECTORY:
# If a .bst file exists in the element path,
# let's suggest this as a plausible alternative.
message = str(e)
if provenance:
message = "{}: {}".format(provenance, message)
detail = None
if os.path.exists(os.path.join(self._basedir, filename + '.bst')):
element_name = filename + '.bst'
detail = "Did you mean '{}'?\n".format(element_name)
raise LoadError(LoadErrorReason.LOADING_DIRECTORY,
message, detail=detail) from e
# Raise the unmodified LoadError
raise
kind = _yaml.node_get(node, str, Symbol.KIND)
if kind == "junction":
self._first_pass_options.process_node(node)
else:
self.project.ensure_fully_loaded()
self._includes.process(node)
element = LoadElement(node, filename, self)
self._elements[filename] = element
# Load all dependency files for the new LoadElement
for dep in element.deps:
if dep.junction:
self._load_file(dep.junction, rewritable, ticker, fetch_subprojects, dep.provenance)
loader = self._get_loader(dep.junction, rewritable=rewritable, ticker=ticker,
fetch_subprojects=fetch_subprojects)
else:
loader = self
dep_element = loader._load_file(dep.name, rewritable, ticker,
fetch_subprojects, dep.provenance)
if _yaml.node_get(dep_element.node, str, Symbol.KIND) == 'junction':
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Cannot depend on junction"
.format(dep.provenance))
return element
# _check_circular_deps():
#
# Detect circular dependencies on LoadElements with
# dependencies already resolved.
#
# Args:
# element_name (str): The element-path relative element name to check
#
# Raises:
# (LoadError): In case there was a circular dependency error
#
def _check_circular_deps(self, element_name, check_elements=None, validated=None):
if check_elements is None:
check_elements = {}
if validated is None:
validated = {}
element = self._elements[element_name]
# element name must be unique across projects
# to be usable as key for the check_elements and validated dicts
element_name = element.full_name
# Skip already validated branches
if validated.get(element_name) is not None:
return
if check_elements.get(element_name) is not None:
raise LoadError(LoadErrorReason.CIRCULAR_DEPENDENCY,
"Circular dependency detected for element: {}"
.format(element.name))
# Push / Check each dependency / Pop
check_elements[element_name] = True
for dep in element.deps:
loader = self._get_loader_for_dep(dep)
loader._check_circular_deps(dep.name, check_elements, validated)
del check_elements[element_name]
# Eliminate duplicate paths
validated[element_name] = True
# _sort_dependencies():
#
# Sort dependencies of each element by their dependencies,
# so that direct dependencies which depend on other direct
# dependencies (directly or indirectly) appear later in the
# list.
#
# This avoids the need for performing multiple topological
# sorts throughout the build process.
#
# Args:
# element_name (str): The element-path relative element name to sort
#
def _sort_dependencies(self, element_name, visited=None):
if visited is None:
visited = {}
element = self._elements[element_name]
# element name must be unique across projects
# to be usable as key for the visited dict
element_name = element.full_name
if visited.get(element_name) is not None:
return
for dep in element.deps:
loader = self._get_loader_for_dep(dep)
loader._sort_dependencies(dep.name, visited=visited)
def dependency_cmp(dep_a, dep_b):
element_a = self.get_element_for_dep(dep_a)
element_b = self.get_element_for_dep(dep_b)
# Sort on inter element dependency first
if element_a.depends(element_b):
return 1
elif element_b.depends(element_a):
return -1
# If there are no inter element dependencies, place
# runtime only dependencies last
if dep_a.dep_type != dep_b.dep_type:
if dep_a.dep_type == Symbol.RUNTIME:
return 1
elif dep_b.dep_type == Symbol.RUNTIME:
return -1
# All things being equal, string comparison.
if dep_a.name > dep_b.name:
return 1
elif dep_a.name < dep_b.name:
return -1
# Sort local elements before junction elements
# and use string comparison between junction elements
if dep_a.junction and dep_b.junction:
if dep_a.junction > dep_b.junction:
return 1
elif dep_a.junction < dep_b.junction:
return -1
elif dep_a.junction:
return -1
elif dep_b.junction:
return 1
# This wont ever happen
return 0
# Now dependency sort, we ensure that if any direct dependency
# directly or indirectly depends on another direct dependency,
# it is found later in the list.
element.deps.sort(key=cmp_to_key(dependency_cmp))
visited[element_name] = True
# _collect_element()
#
# Collect the toplevel elements we have
#
# Args:
# element_name (str): The element-path relative element name to sort
#
# Returns:
# (MetaElement): A recursively loaded MetaElement
#
def _collect_element(self, element_name):
element = self._elements[element_name]
# Return the already built one, if we already built it
meta_element = self._meta_elements.get(element_name)
if meta_element:
return meta_element
node = element.node
elt_provenance = _yaml.node_get_provenance(node)
meta_sources = []
sources = _yaml.node_get(node, list, Symbol.SOURCES, default_value=[])
element_kind = _yaml.node_get(node, str, Symbol.KIND)
# Safe loop calling into _yaml.node_get() for each element ensures
# we have good error reporting
for i in range(len(sources)):
source = _yaml.node_get(node, Mapping, Symbol.SOURCES, indices=[i])
kind = _yaml.node_get(source, str, Symbol.KIND)
del source[Symbol.KIND]
# Directory is optional
directory = _yaml.node_get(source, str, Symbol.DIRECTORY, default_value=None)
if directory:
del source[Symbol.DIRECTORY]
index = sources.index(source)
meta_source = MetaSource(element_name, index, element_kind, kind, source, directory)
meta_sources.append(meta_source)
meta_element = MetaElement(self.project, element_name, element_kind,
elt_provenance, meta_sources,
_yaml.node_get(node, Mapping, Symbol.CONFIG, default_value={}),
_yaml.node_get(node, Mapping, Symbol.VARIABLES, default_value={}),
_yaml.node_get(node, Mapping, Symbol.ENVIRONMENT, default_value={}),
_yaml.node_get(node, list, Symbol.ENV_NOCACHE, default_value=[]),
_yaml.node_get(node, Mapping, Symbol.PUBLIC, default_value={}),
_yaml.node_get(node, Mapping, Symbol.SANDBOX, default_value={}),
element_kind == 'junction')
# Cache it now, make sure it's already there before recursing
self._meta_elements[element_name] = meta_element
# Descend
for dep in element.deps:
loader = self._get_loader_for_dep(dep)
meta_dep = loader._collect_element(dep.name)
if dep.dep_type != 'runtime':
meta_element.build_dependencies.append(meta_dep)
if dep.dep_type != 'build':
meta_element.dependencies.append(meta_dep)
if dep.strict:
meta_element.strict_dependencies.append(meta_dep)
return meta_element
# _get_loader():
#
# Return loader for specified junction
#
# Args:
# filename (str): Junction name
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Raises: LoadError
#
# Returns: A Loader or None if specified junction does not exist
def _get_loader(self, filename, *, rewritable=False, ticker=None, level=0, fetch_subprojects=False):
# return previously determined result
if filename in self._loaders:
loader = self._loaders[filename]
if loader is None:
# do not allow junctions with the same name in different
# subprojects
raise LoadError(LoadErrorReason.CONFLICTING_JUNCTION,
"Conflicting junction {} in subprojects, define junction in {}"
.format(filename, self.project.name))
return loader
if self._parent:
# junctions in the parent take precedence over junctions defined
# in subprojects
loader = self._parent._get_loader(filename, rewritable=rewritable, ticker=ticker,
level=level + 1, fetch_subprojects=fetch_subprojects)
if loader:
self._loaders[filename] = loader
return loader
try:
self._load_file(filename, rewritable, ticker, fetch_subprojects)
except LoadError as e:
if e.reason != LoadErrorReason.MISSING_FILE:
# other load error
raise
if level == 0:
# junction element not found in this or ancestor projects
raise
# mark junction as not available to allow detection of
# conflicting junctions in subprojects
self._loaders[filename] = None
return None
# meta junction element
meta_element = self._collect_element(filename)
if meta_element.kind != 'junction':
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Expected junction but element kind is {}".format(filename, meta_element.kind))
element = Element._new_from_meta(meta_element)
element._preflight()
element._update_state()
# Handle the case where a subproject needs to be fetched
#
if element._get_consistency() == Consistency.RESOLVED:
if fetch_subprojects:
sources = list(element.sources())
for idx, source in enumerate(sources):
if ticker:
ticker(filename, 'Fetching subproject from {} source'.format(source.get_kind()))
if source._get_consistency() != Consistency.CACHED:
source._fetch(sources[0:idx])
else:
detail = "Try fetching the project with `bst fetch {}`".format(filename)
raise LoadError(LoadErrorReason.SUBPROJECT_FETCH_NEEDED,
"Subproject fetch needed for junction: {}".format(filename),
detail=detail)
# Handle the case where a subproject has no ref
#
elif element._get_consistency() == Consistency.INCONSISTENT:
detail = "Try tracking the junction element with `bst track {}`".format(filename)
raise LoadError(LoadErrorReason.SUBPROJECT_INCONSISTENT,
"Subproject has no ref for junction: {}".format(filename),
detail=detail)
# Stage sources
os.makedirs(self._context.builddir, exist_ok=True)
basedir = tempfile.mkdtemp(prefix="{}-".format(element.normal_name), dir=self._context.builddir)
element._stage_sources_at(basedir, mount_workspaces=False)
# Load the project
project_dir = os.path.join(basedir, element.path)
try:
from .._project import Project # pylint: disable=import-outside-toplevel
project = Project(project_dir, self._context, junction=element,
parent_loader=self, tempdir=basedir)
except LoadError as e:
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
raise LoadError(reason=LoadErrorReason.INVALID_JUNCTION,
message="Could not find the project.conf file for {}. "
"Expecting a project at path '{}'"
.format(element, element.path or '.')) from e
raise
loader = project.loader
self._loaders[filename] = loader
return loader
# _get_loader_for_dep():
#
# Gets the appropriate Loader for a Dependency object
#
# Args:
# dep (Dependency): A Dependency object
#
# Returns:
# (Loader): The Loader object to use for this Dependency
#
def _get_loader_for_dep(self, dep):
if dep.junction:
# junction dependency, delegate to appropriate loader
return self._loaders[dep.junction]
else:
return self
# _parse_name():
#
# Get junction and base name of element along with loader for the sub-project
#
# Args:
# name (str): Name of target
# rewritable (bool): Whether the loaded files should be rewritable
# this is a bit more expensive due to deep copies
# ticker (callable): An optional function for tracking load progress
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Returns:
# (tuple): - (str): name of the junction element
# - (str): name of the element
# - (Loader): loader for sub-project
#
def _parse_name(self, name, rewritable, ticker, fetch_subprojects=False):
# We allow to split only once since deep junctions names are forbidden.
# Users who want to refer to elements in sub-sub-projects are required
# to create junctions on the top level project.
junction_path = name.rsplit(':', 1)
if len(junction_path) == 1:
return None, junction_path[-1], self
else:
self._load_file(junction_path[-2], rewritable, ticker, fetch_subprojects)
loader = self._get_loader(junction_path[-2], rewritable=rewritable, ticker=ticker,
fetch_subprojects=fetch_subprojects)
return junction_path[-2], junction_path[-1], loader
buildstream-1.6.9/buildstream/_loader/metaelement.py 0000664 0000000 0000000 00000004451 14375152700 0022644 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
class MetaElement():
# MetaElement()
#
# An abstract object holding data suitable for constructing an Element
#
# Args:
# project: The project that contains the element
# name: The resolved element name
# kind: The element kind
# provenance: The provenance of the element
# sources: An array of MetaSource objects
# config: The configuration data for the element
# variables: The variables declared or overridden on this element
# environment: The environment variables declared or overridden on this element
# env_nocache: List of environment vars which should not be considered in cache keys
# public: Public domain data dictionary
# sandbox: Configuration specific to the sandbox environment
# first_pass: The element is to be loaded with first pass configuration (junction)
#
def __init__(self, project, name, kind, provenance, sources, config,
variables, environment, env_nocache, public, sandbox,
first_pass):
self.project = project
self.name = name
self.kind = kind
self.provenance = provenance
self.sources = sources
self.config = config
self.variables = variables
self.environment = environment
self.env_nocache = env_nocache
self.public = public
self.sandbox = sandbox
self.build_dependencies = []
self.dependencies = []
self.strict_dependencies = []
self.first_pass = first_pass
buildstream-1.6.9/buildstream/_loader/metasource.py 0000664 0000000 0000000 00000003174 14375152700 0022514 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
class MetaSource():
# MetaSource()
#
# An abstract object holding data suitable for constructing a Source
#
# Args:
# element_name: The name of the owning element
# element_index: The index of the source in the owning element's source list
# element_kind: The kind of the owning element
# kind: The kind of the source
# config: The configuration data for the source
# first_pass: This source will be used with first project pass configuration (used for junctions).
#
def __init__(self, element_name, element_index, element_kind, kind, config, directory):
self.element_name = element_name
self.element_index = element_index
self.element_kind = element_kind
self.kind = kind
self.config = config
self.directory = directory
self.first_pass = False
buildstream-1.6.9/buildstream/_loader/types.py 0000664 0000000 0000000 00000013355 14375152700 0021513 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from collections.abc import Mapping
from .._exceptions import LoadError, LoadErrorReason
from .. import _yaml
# Symbol():
#
# A simple object to denote the symbols we load with from YAML
#
class Symbol():
FILENAME = "filename"
KIND = "kind"
DEPENDS = "depends"
BUILD_DEPENDS = "build-depends"
RUNTIME_DEPENDS = "runtime-depends"
SOURCES = "sources"
CONFIG = "config"
VARIABLES = "variables"
ENVIRONMENT = "environment"
ENV_NOCACHE = "environment-nocache"
PUBLIC = "public"
TYPE = "type"
BUILD = "build"
RUNTIME = "runtime"
ALL = "all"
DIRECTORY = "directory"
JUNCTION = "junction"
SANDBOX = "sandbox"
STRICT = "strict"
# Dependency()
#
# A simple object describing a dependency
#
# Args:
# name (str): The element name
# dep_type (str): The type of dependency, can be
# Symbol.ALL, Symbol.BUILD, or Symbol.RUNTIME
# junction (str): The element name of the junction, or None
# provenance (Provenance): The YAML node provenance of where this
# dependency was declared
#
class Dependency():
def __init__(self, dep, provenance, default_dep_type=None):
self.provenance = provenance
if isinstance(dep, str):
self.name = dep
self.dep_type = default_dep_type
self.junction = None
self.strict = False
elif isinstance(dep, Mapping):
if default_dep_type:
_yaml.node_validate(dep, ['filename', 'junction', 'strict'])
dep_type = default_dep_type
else:
_yaml.node_validate(dep, ['filename', 'type', 'junction', 'strict'])
# Make type optional, for this we set it to None
dep_type = _yaml.node_get(dep, str, Symbol.TYPE, default_value=None)
if dep_type is None or dep_type == Symbol.ALL:
dep_type = None
elif dep_type not in [Symbol.BUILD, Symbol.RUNTIME]:
provenance = _yaml.node_get_provenance(dep, key=Symbol.TYPE)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Dependency type '{}' is not 'build', 'runtime' or 'all'"
.format(provenance, dep_type))
self.name = _yaml.node_get(dep, str, Symbol.FILENAME)
self.dep_type = dep_type
self.junction = _yaml.node_get(dep, str, Symbol.JUNCTION, default_value=None)
self.strict = _yaml.node_get(dep, bool, Symbol.STRICT, default_value=False)
# Here we disallow explicitly setting 'strict' to False.
#
# This is in order to keep the door open to allowing the project.conf
# set the default of dependency 'strict'-ness which might be useful
# for projects which use mostly static linking and the like, in which
# case we can later interpret explicitly non-strict dependencies
# as an override of the project default.
#
if self.strict is False and Symbol.STRICT in dep:
provenance = _yaml.node_get_provenance(dep, key=Symbol.STRICT)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Setting 'strict' to False is unsupported"
.format(provenance))
else:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Dependency is not specified as a string or a dictionary".format(provenance))
# Only build dependencies are allowed to be strict
#
if self.strict and self.dep_type == Symbol.RUNTIME:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Runtime dependency {} specified as `strict`.".format(self.provenance, self.name),
detail="Only dependencies required at build time may be declared `strict`.")
# `:` characters are not allowed in filename if a junction was
# explicitly specified
if self.junction and ':' in self.name:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Dependency {} contains `:` in its name. "
"`:` characters are not allowed in filename when "
"junction attribute is specified.".format(self.provenance, self.name))
# Name of the element should never contain more than one `:` characters
if self.name.count(':') > 1:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Dependency {} contains multiple `:` in its name. "
"Recursive lookups for cross-junction elements is not "
"allowed.".format(self.provenance, self.name))
# Attempt to split name if no junction was specified explicitly
if not self.junction and self.name.count(':') == 1:
self.junction, self.name = self.name.split(':')
buildstream-1.6.9/buildstream/_message.py 0000664 0000000 0000000 00000006220 14375152700 0020516 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import datetime
import os
# Types of status messages.
#
class MessageType():
DEBUG = "debug" # Debugging message
STATUS = "status" # Status message, verbose details
INFO = "info" # Informative messages
WARN = "warning" # Warning messages
ERROR = "error" # Error messages
BUG = "bug" # An unhandled exception was raised in a plugin
LOG = "log" # Messages for log files _only_, never in the frontend
# Timed Messages: SUCCESS and FAIL have duration timestamps
START = "start" # Status start message
SUCCESS = "success" # Successful status complete message
FAIL = "failure" # Failing status complete message
SKIPPED = "skipped"
# Messages which should be reported regardless of whether
# they are currently silenced or not
unconditional_messages = [
MessageType.INFO,
MessageType.WARN,
MessageType.FAIL,
MessageType.ERROR,
MessageType.BUG
]
# Message object
#
class Message():
def __init__(self, unique_id, message_type, message,
task_id=None,
detail=None,
action_name=None,
elapsed=None,
depth=None,
logfile=None,
sandbox=None,
scheduler=False):
self.message_type = message_type # Message type
self.message = message # The message string
self.detail = detail # An additional detail string
self.action_name = action_name # Name of the task queue (fetch, refresh, build, etc)
self.elapsed = elapsed # The elapsed time, in timed messages
self.depth = depth # The depth of a timed message
self.logfile = logfile # The log file path where commands took place
self.sandbox = sandbox # The sandbox directory where an error occurred (if any)
self.pid = os.getpid() # The process pid
self.unique_id = unique_id # The plugin object ID issueing the message
self.task_id = task_id # The plugin object ID of the task
self.scheduler = scheduler # Whether this is a scheduler level message
self.creation_time = datetime.datetime.now()
if message_type in (MessageType.SUCCESS, MessageType.FAIL):
assert elapsed is not None
buildstream-1.6.9/buildstream/_options/ 0000775 0000000 0000000 00000000000 14375152700 0020213 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_options/__init__.py 0000664 0000000 0000000 00000001466 14375152700 0022333 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .optionpool import OptionPool
buildstream-1.6.9/buildstream/_options/option.py 0000664 0000000 0000000 00000006211 14375152700 0022075 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .. import _yaml
# Shared symbols for validation purposes
#
OPTION_SYMBOLS = [
'type',
'description',
'variable'
]
# Option()
#
# An abstract class representing a project option.
#
# Concrete classes must be created to handle option types,
# the loaded project options is a collection of typed Option
# instances.
#
class Option():
# Subclasses use this to specify the type name used
# for the yaml format and error messages
OPTION_TYPE = None
def __init__(self, name, definition, pool):
self.name = name
self.description = None
self.variable = None
self.value = None
self.pool = pool
self.load(definition)
# load()
#
# Loads the option attributes from the descriptions
# in the project.conf
#
# Args:
# node (dict): The loaded YAML dictionary describing
# the option
def load(self, node):
self.description = _yaml.node_get(node, str, 'description')
self.variable = _yaml.node_get(node, str, 'variable', default_value=None)
# Assert valid symbol name for variable name
if self.variable is not None:
p = _yaml.node_get_provenance(node, 'variable')
_yaml.assert_symbol_name(p, self.variable, 'variable name')
# load_value()
#
# Loads the value of the option in string form.
#
# Args:
# node (Mapping): The YAML loaded key/value dictionary
# to load the value from
# transform (callbable): Transform function for variable substitution
#
def load_value(self, node, *, transform=None):
pass # pragma: nocover
# set_value()
#
# Sets the value of an option from a string passed
# to buildstream on the command line
#
# Args:
# value (str): The value in string form
#
def set_value(self, value):
pass # pragma: nocover
# get_value()
#
# Gets the value of an option in string form, this
# is for the purpose of exporting option values to
# variables which must be in string form.
#
# Returns:
# (str): The value in string form
#
def get_value(self):
pass # pragma: nocover
# resolve()
#
# Called on each option once, after all configuration
# and cli options have been passed.
#
def resolve(self):
pass # pragma: nocover
buildstream-1.6.9/buildstream/_options/optionarch.py 0000664 0000000 0000000 00000003561 14375152700 0022740 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
from .optionenum import OptionEnum
# OptionArch
#
# An enumeration project option which does not allow
# definition of a default value, but instead tries to set
# the default value to the machine architecture introspected
# using `uname`
#
# Note that when using OptionArch in a project, it will automatically
# bail out of the host machine `uname` reports a machine architecture
# not supported by the project, in the case that no option was
# specifically specified
#
class OptionArch(OptionEnum):
OPTION_TYPE = 'arch'
def load(self, node):
super().load(node, allow_default_definition=False)
def load_default_value(self, node):
_, _, _, _, machine_arch = os.uname()
return machine_arch
def resolve(self):
# Validate that the default machine arch reported by uname() is
# explicitly supported by the project, only if it was not
# overridden by user configuration or cli.
#
# If the value is specified on the cli or user configuration,
# then it will already be valid.
#
self.validate(self.value)
buildstream-1.6.9/buildstream/_options/optionbool.py 0000664 0000000 0000000 00000003502 14375152700 0022751 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .. import _yaml
from .._exceptions import LoadError, LoadErrorReason
from .option import Option, OPTION_SYMBOLS
# OptionBool
#
# A boolean project option
#
class OptionBool(Option):
OPTION_TYPE = 'bool'
def load(self, node):
super().load(node)
_yaml.node_validate(node, OPTION_SYMBOLS + ['default'])
self.value = _yaml.node_get(node, bool, 'default')
def load_value(self, node, *, transform=None):
if transform:
self.set_value(transform(_yaml.node_get(node, str, self.name)))
else:
self.value = _yaml.node_get(node, bool, self.name)
def set_value(self, value):
if value in ('True', 'true'):
self.value = True
elif value in ('False', 'false'):
self.value = False
else:
raise LoadError(LoadErrorReason.INVALID_DATA,
"Invalid value for boolean option {}: {}".format(self.name, value))
def get_value(self):
if self.value:
return "1"
else:
return "0"
buildstream-1.6.9/buildstream/_options/optioneltmask.py 0000664 0000000 0000000 00000003135 14375152700 0023460 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .. import utils
from .optionflags import OptionFlags
# OptionEltMask
#
# A flags option which automatically only allows element
# names as values.
#
class OptionEltMask(OptionFlags):
OPTION_TYPE = 'element-mask'
def load(self, node):
# Ask the parent constructor to disallow value definitions,
# we define those automatically only.
super().load(node, allow_value_definitions=False)
# Here we want all valid elements as possible values,
# but we'll settle for just the relative filenames
# of files ending with ".bst" in the project element directory
def load_valid_values(self, node):
values = []
for filename in utils.list_relative_paths(self.pool.element_path):
if filename.endswith('.bst'):
values.append(filename)
return values
buildstream-1.6.9/buildstream/_options/optionenum.py 0000664 0000000 0000000 00000005332 14375152700 0022765 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .. import _yaml
from .._exceptions import LoadError, LoadErrorReason
from .option import Option, OPTION_SYMBOLS
# OptionEnum
#
# An enumeration project option
#
class OptionEnum(Option):
OPTION_TYPE = 'enum'
def load(self, node, allow_default_definition=True):
super().load(node)
valid_symbols = OPTION_SYMBOLS + ['values']
if allow_default_definition:
valid_symbols += ['default']
_yaml.node_validate(node, valid_symbols)
self.values = _yaml.node_get(node, list, 'values', default_value=[])
if not self.values:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: No values specified for {} option '{}'"
.format(_yaml.node_get_provenance(node), self.OPTION_TYPE, self.name))
# Allow subclass to define the default value
self.value = self.load_default_value(node)
def load_value(self, node, *, transform=None):
self.value = _yaml.node_get(node, str, self.name)
if transform:
self.value = transform(self.value)
self.validate(self.value, _yaml.node_get_provenance(node, self.name))
def set_value(self, value):
self.validate(value)
self.value = value
def get_value(self):
return self.value
def validate(self, value, provenance=None):
if value not in self.values:
prefix = ""
if provenance:
prefix = "{}: ".format(provenance)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}Invalid value for {} option '{}': {}\n"
.format(prefix, self.OPTION_TYPE, self.name, value) +
"Valid values: {}".format(", ".join(self.values)))
def load_default_value(self, node):
value = _yaml.node_get(node, str, 'default')
self.validate(value, _yaml.node_get_provenance(node, 'default'))
return value
buildstream-1.6.9/buildstream/_options/optionflags.py 0000664 0000000 0000000 00000006244 14375152700 0023120 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .. import _yaml
from .._exceptions import LoadError, LoadErrorReason
from .option import Option, OPTION_SYMBOLS
# OptionFlags
#
# A flags project option
#
class OptionFlags(Option):
OPTION_TYPE = 'flags'
def load(self, node, allow_value_definitions=True):
super().load(node)
valid_symbols = OPTION_SYMBOLS + ['default']
if allow_value_definitions:
valid_symbols += ['values']
_yaml.node_validate(node, valid_symbols)
# Allow subclass to define the valid values
self.values = self.load_valid_values(node)
if not self.values:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: No values specified for {} option '{}'"
.format(_yaml.node_get_provenance(node), self.OPTION_TYPE, self.name))
self.value = _yaml.node_get(node, list, 'default', default_value=[])
self.validate(self.value, _yaml.node_get_provenance(node, 'default'))
def load_value(self, node, *, transform=None):
self.value = _yaml.node_get(node, list, self.name)
if transform:
self.value = [transform(x) for x in self.value]
self.value = sorted(self.value)
self.validate(self.value, _yaml.node_get_provenance(node, self.name))
def set_value(self, value):
# Strip out all whitespace, allowing: "value1, value2 , value3"
stripped = "".join(value.split())
# Get the comma separated values
list_value = stripped.split(',')
self.validate(list_value)
self.value = sorted(list_value)
def get_value(self):
return ",".join(self.value)
def validate(self, value, provenance=None):
for flag in value:
if flag not in self.values:
prefix = ""
if provenance:
prefix = "{}: ".format(provenance)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}Invalid value for flags option '{}': {}\n"
.format(prefix, self.name, value) +
"Valid values: {}".format(", ".join(self.values)))
def load_valid_values(self, node):
# Allow the more descriptive error to raise when no values
# exist rather than bailing out here (by specifying default_value)
return _yaml.node_get(node, list, 'values', default_value=[])
buildstream-1.6.9/buildstream/_options/optionpool.py 0000664 0000000 0000000 00000024700 14375152700 0022772 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
#
from collections.abc import Mapping
import jinja2
from .. import _yaml
from .._exceptions import LoadError, LoadErrorReason
from .optionbool import OptionBool
from .optionenum import OptionEnum
from .optionflags import OptionFlags
from .optioneltmask import OptionEltMask
from .optionarch import OptionArch
_OPTION_TYPES = {
OptionBool.OPTION_TYPE: OptionBool,
OptionEnum.OPTION_TYPE: OptionEnum,
OptionFlags.OPTION_TYPE: OptionFlags,
OptionEltMask.OPTION_TYPE: OptionEltMask,
OptionArch.OPTION_TYPE: OptionArch,
}
class OptionPool():
def __init__(self, element_path):
# We hold on to the element path for the sake of OptionEltMask
self.element_path = element_path
#
# Private members
#
self._options = {} # The Options
self._variables = None # The Options resolved into typed variables
# jinja2 environment, with default globals cleared out of the way
self._environment = jinja2.Environment(undefined=jinja2.StrictUndefined)
self._environment.globals = []
# load()
#
# Loads the options described in the project.conf
#
# Args:
# node (dict): The loaded YAML options
#
def load(self, options):
for option_name, option_definition in _yaml.node_items(options):
# Assert that the option name is a valid symbol
p = _yaml.node_get_provenance(options, option_name)
_yaml.assert_symbol_name(p, option_name, "option name", allow_dashes=False)
opt_type_name = _yaml.node_get(option_definition, str, 'type')
try:
opt_type = _OPTION_TYPES[opt_type_name]
except KeyError as e:
p = _yaml.node_get_provenance(option_definition, 'type')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Invalid option type '{}'".format(p, opt_type_name)) from e
option = opt_type(option_name, option_definition, self)
self._options[option_name] = option
# load_yaml_values()
#
# Loads the option values specified in a key/value
# dictionary loaded from YAML
#
# Args:
# node (dict): The loaded YAML options
#
def load_yaml_values(self, node, *, transform=None):
for option_name, _ in _yaml.node_items(node):
try:
option = self._options[option_name]
except KeyError as e:
p = _yaml.node_get_provenance(node, option_name)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Unknown option '{}' specified"
.format(p, option_name)) from e
option.load_value(node, transform=transform)
# load_cli_values()
#
# Loads the option values specified in a list of tuples
# collected from the command line
#
# Args:
# cli_options (list): A list of (str, str) tuples
# ignore_unknown (bool): Whether to silently ignore unknown options.
#
def load_cli_values(self, cli_options, *, ignore_unknown=False):
for option_name, option_value in cli_options:
try:
option = self._options[option_name]
except KeyError as e:
if not ignore_unknown:
raise LoadError(LoadErrorReason.INVALID_DATA,
"Unknown option '{}' specified on the command line"
.format(option_name)) from e
else:
option.set_value(option_value)
# resolve()
#
# Resolves the loaded options, this is just a step which must be
# performed after loading all options and their values, and before
# ever trying to evaluate an expression
#
def resolve(self):
self._variables = {}
for option_name, option in self._options.items():
# Delegate one more method for options to
# do some last minute validation once any
# overrides have been performed.
#
option.resolve()
self._variables[option_name] = option.value
# export_variables()
#
# Exports the option values which are declared
# to be exported, to the passed dictionary.
#
# Variable values are exported in string form
#
# Args:
# variables (dict): A variables dictionary
#
def export_variables(self, variables):
for _, option in self._options.items():
if option.variable:
variables[option.variable] = option.get_value()
# printable_variables()
#
# Exports all option names and string values
# to the passed dictionary in alphabetical order.
#
# Args:
# variables (dict): A variables dictionary
#
def printable_variables(self, variables):
for key in sorted(self._options):
variables[key] = self._options[key].get_value()
# process_node()
#
# Args:
# node (Mapping): A YAML Loaded dictionary
#
def process_node(self, node):
# A conditional will result in composition, which can
# in turn add new conditionals to the root.
#
# Keep processing conditionals on the root node until
# all directly nested conditionals are resolved.
#
while self._process_one_node(node):
pass
# Now recurse into nested dictionaries and lists
# and process any indirectly nested conditionals.
#
for _, value in _yaml.node_items(node):
if isinstance(value, Mapping):
self.process_node(value)
elif isinstance(value, list):
self._process_list(value)
#######################################################
# Private Methods #
#######################################################
# _evaluate()
#
# Evaluates a jinja2 style expression with the loaded options in context.
#
# Args:
# expression (str): The jinja2 style expression
#
# Returns:
# (bool): Whether the expression resolved to a truthy value or a falsy one.
#
# Raises:
# LoadError: If the expression failed to resolve for any reason
#
def _evaluate(self, expression):
#
# Variables must be resolved at this point.
#
try:
template_string = "{{% if {} %}} True {{% else %}} False {{% endif %}}".format(expression)
template = self._environment.from_string(template_string)
context = template.new_context(self._variables, shared=True)
result = template.root_render_func(context)
evaluated = jinja2.utils.concat(result)
val = evaluated.strip()
if val == "True":
return True
elif val == "False":
return False
else: # pragma: nocover
raise LoadError(LoadErrorReason.EXPRESSION_FAILED,
"Failed to evaluate expression: {}".format(expression))
except jinja2.exceptions.TemplateError as e:
raise LoadError(LoadErrorReason.EXPRESSION_FAILED,
"Failed to evaluate expression ({}): {}".format(expression, e)) from e
# Recursion assistent for lists, in case there
# are lists of lists.
#
def _process_list(self, values):
for value in values:
if isinstance(value, Mapping):
self.process_node(value)
elif isinstance(value, list):
self._process_list(value)
# Process a single conditional, resulting in composition
# at the root level on the passed node
#
# Return true if a conditional was processed.
#
def _process_one_node(self, node):
conditions = _yaml.node_get(node, list, '(?)', default_value=None)
assertion = _yaml.node_get(node, str, '(!)', default_value=None)
# Process assersions first, we want to abort on the first encountered
# assertion in a given dictionary, and not lose an assertion due to
# it being overwritten by a later assertion which might also trigger.
if assertion is not None:
p = _yaml.node_get_provenance(node, '(!)')
raise LoadError(LoadErrorReason.USER_ASSERTION,
"{}: {}".format(p, assertion.strip()))
if conditions is not None:
# Collect provenance first, we need to delete the (?) key
# before any composition occurs.
provenance = [
_yaml.node_get_provenance(node, '(?)', indices=[i])
for i in range(len(conditions))
]
del node['(?)']
for condition, p in zip(conditions, provenance):
tuples = list(_yaml.node_items(condition))
if len(tuples) > 1:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Conditional statement has more than one key".format(p))
expression, value = tuples[0]
try:
apply_fragment = self._evaluate(expression)
except LoadError as e:
# Prepend the provenance of the error
raise LoadError(e.reason, "{}: {}".format(p, e)) from e
if not hasattr(value, 'get'):
raise LoadError(LoadErrorReason.ILLEGAL_COMPOSITE,
"{}: Only values of type 'dict' can be composed.".format(p))
# Apply the yaml fragment if its condition evaluates to true
if apply_fragment:
_yaml.composite(node, value)
return True
return False
buildstream-1.6.9/buildstream/_pipeline.py 0000664 0000000 0000000 00000043051 14375152700 0020702 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016-2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# Tristan Maat
import os
import itertools
from operator import itemgetter
from ._exceptions import PipelineError
from ._message import Message, MessageType
from ._profile import Topics, profile_start, profile_end
from . import Scope, Consistency
from ._project import ProjectRefStorage
# PipelineSelection()
#
# Defines the kind of pipeline selection to make when the pipeline
# is provided a list of targets, for whichever purpose.
#
# These values correspond to the CLI `--deps` arguments for convenience.
#
class PipelineSelection():
# Select only the target elements in the associated targets
NONE = 'none'
# As NONE, but redirect elements that are capable of it
REDIRECT = 'redirect'
# Select elements which must be built for the associated targets to be built
PLAN = 'plan'
# All dependencies of all targets, including the targets
ALL = 'all'
# All direct build dependencies and their recursive runtime dependencies,
# excluding the targets
BUILD = 'build'
# All direct runtime dependencies and their recursive runtime dependencies,
# including the targets
RUN = 'run'
# Pipeline()
#
# Args:
# project (Project): The Project object
# context (Context): The Context object
# artifacts (Context): The ArtifactCache object
#
class Pipeline():
def __init__(self, context, project, artifacts):
self._context = context # The Context
self._project = project # The toplevel project
#
# Private members
#
self._artifacts = artifacts
# load()
#
# Loads elements from target names.
#
# This function is called with a list of lists, such that multiple
# target groups may be specified. Element names specified in `targets`
# are allowed to be redundant.
#
# Args:
# target_groups (list of lists): Groups of toplevel targets to load
# fetch_subprojects (bool): Whether we should fetch subprojects as a part of the
# loading process, if they are not yet locally cached
# rewritable (bool): Whether the loaded files should be rewritable
# this is a bit more expensive due to deep copies
#
# Returns:
# (tuple of lists): A tuple of grouped Element objects corresponding to target_groups
#
def load(self, target_groups, *,
fetch_subprojects=True,
rewritable=False):
# First concatenate all the lists for the loader's sake
targets = list(itertools.chain(*target_groups))
profile_start(Topics.LOAD_PIPELINE, "_".join(t.replace(os.sep, '-') for t in targets))
elements = self._project.load_elements(targets,
rewritable=rewritable,
fetch_subprojects=fetch_subprojects)
# Now create element groups to match the input target groups
elt_iter = iter(elements)
element_groups = [
[next(elt_iter) for i in range(len(group))]
for group in target_groups
]
profile_end(Topics.LOAD_PIPELINE, "_".join(t.replace(os.sep, '-') for t in targets))
return tuple(element_groups)
# resolve_elements()
#
# Resolve element state and cache keys.
#
# Args:
# targets (list of Element): The list of toplevel element targets
#
def resolve_elements(self, targets):
with self._context.timed_activity("Resolving cached state", silent_nested=True):
for element in self.dependencies(targets, Scope.ALL):
# Preflight
element._preflight()
# Determine initial element state.
element._update_state()
# dependencies()
#
# Generator function to iterate over elements and optionally
# also iterate over sources.
#
# Args:
# targets (list of Element): The target Elements to loop over
# scope (Scope): The scope to iterate over
# recurse (bool): Whether to recurse into dependencies
#
def dependencies(self, targets, scope, *, recurse=True):
# Keep track of 'visited' in this scope, so that all targets
# share the same context.
visited = {}
for target in targets:
for element in target.dependencies(scope, recurse=recurse, visited=visited):
yield element
# plan()
#
# Generator function to iterate over only the elements
# which are required to build the pipeline target, omitting
# cached elements. The elements are yielded in a depth sorted
# ordering for optimal build plans
#
# Args:
# elements (list of Element): List of target elements to plan
#
# Returns:
# (list of Element): A depth sorted list of the build plan
#
def plan(self, elements):
# Keep locally cached elements in the plan if remote artifact cache is used
# to allow pulling artifact with strict cache key, if available.
plan_cached = not self._context.get_strict() and self._artifacts.has_fetch_remotes()
return _Planner().plan(elements, plan_cached)
# get_selection()
#
# Gets a full list of elements based on a toplevel
# list of element targets
#
# Args:
# targets (list of Element): The target Elements
# mode (PipelineSelection): The PipelineSelection mode
#
# Various commands define a --deps option to specify what elements to
# use in the result, this function reports a list that is appropriate for
# the selected option.
#
def get_selection(self, targets, mode, *, silent=True):
elements = None
if mode == PipelineSelection.NONE:
elements = targets
elif mode == PipelineSelection.REDIRECT:
# Redirect and log if permitted
elements = []
for t in targets:
new_elm = t._get_source_element()
if new_elm != t and not silent:
self._message(MessageType.INFO, "Element '{}' redirected to '{}'"
.format(t.name, new_elm.name))
if new_elm not in elements:
elements.append(new_elm)
elif mode == PipelineSelection.PLAN:
elements = self.plan(targets)
else:
if mode == PipelineSelection.ALL:
scope = Scope.ALL
elif mode == PipelineSelection.BUILD:
scope = Scope.BUILD
elif mode == PipelineSelection.RUN:
scope = Scope.RUN
elements = list(self.dependencies(targets, scope))
return elements
# except_elements():
#
# Return what we are left with after the intersection between
# excepted and target elements and their unique dependencies is
# gone.
#
# Args:
# targets (list of Element): List of toplevel targetted elements
# elements (list of Element): The list to remove elements from
# except_targets (list of Element): List of toplevel except targets
#
# Returns:
# (list of Element): The elements list with the intersected
# exceptions removed
#
def except_elements(self, targets, elements, except_targets):
if not except_targets:
return elements
targeted = list(self.dependencies(targets, Scope.ALL))
visited = []
def find_intersection(element):
if element in visited:
return
visited.append(element)
# Intersection elements are those that are also in
# 'targeted', as long as we don't recurse into them.
if element in targeted:
yield element
else:
for dep in element.dependencies(Scope.ALL, recurse=False):
yield from find_intersection(dep)
# Build a list of 'intersection' elements, i.e. the set of
# elements that lie on the border closest to excepted elements
# between excepted and target elements.
intersection = list(itertools.chain.from_iterable(
find_intersection(element) for element in except_targets
))
# Now use this set of elements to traverse the targeted
# elements, except 'intersection' elements and their unique
# dependencies.
queue = []
visited = []
queue.extend(targets)
while queue:
element = queue.pop()
if element in visited or element in intersection:
continue
visited.append(element)
queue.extend(element.dependencies(Scope.ALL, recurse=False))
# That looks like a lot, but overall we only traverse (part
# of) the graph twice. This could be reduced to once if we
# kept track of parent elements, but is probably not
# significant.
# Ensure that we return elements in the same order they were
# in before.
return [element for element in elements if element in visited]
# targets_include()
#
# Checks whether the given targets are, or depend on some elements
#
# Args:
# targets (list of Element): A list of targets
# elements (list of Element): List of elements to check
#
# Returns:
# (bool): True if all of `elements` are the `targets`, or are
# somehow depended on by `targets`.
#
def targets_include(self, targets, elements):
target_element_set = set(self.dependencies(targets, Scope.ALL))
element_set = set(elements)
return element_set.issubset(target_element_set)
# subtract_elements()
#
# Subtract a subset of elements
#
# Args:
# elements (list of Element): The element list
# subtract (list of Element): List of elements to subtract from elements
#
# Returns:
# (list): The original elements list, with elements in subtract removed
#
def subtract_elements(self, elements, subtract):
subtract_set = set(subtract)
return [
e for e in elements
if e not in subtract_set
]
# track_cross_junction_filter()
#
# Filters out elements which are across junction boundaries,
# otherwise asserts that there are no such elements.
#
# This is currently assumed to be only relevant for element
# lists targetted at tracking.
#
# Args:
# project (Project): Project used for cross_junction filtering.
# All elements are expected to belong to that project.
# elements (list of Element): The list of elements to filter
# cross_junction_requested (bool): Whether the user requested
# cross junction tracking
#
# Returns:
# (list of Element): The filtered or asserted result
#
def track_cross_junction_filter(self, project, elements, cross_junction_requested):
# Filter out cross junctioned elements
if not cross_junction_requested:
elements = self._filter_cross_junctions(project, elements)
self._assert_junction_tracking(elements)
return elements
# assert_consistent()
#
# Asserts that the given list of elements are in a consistent state, that
# is to say that all sources are consistent and can at least be fetched.
#
# Consequently it also means that cache keys can be resolved.
#
def assert_consistent(self, elements):
inconsistent = []
inconsistent_workspaced = []
with self._context.timed_activity("Checking sources"):
for element in elements:
if element._get_consistency() == Consistency.INCONSISTENT:
if element._get_workspace():
inconsistent_workspaced.append(element)
else:
inconsistent.append(element)
if inconsistent:
detail = "Exact versions are missing for the following elements:\n\n"
for element in inconsistent:
detail += " Element: {} is inconsistent\n".format(element._get_full_name())
for source in element.sources():
if source._get_consistency() == Consistency.INCONSISTENT:
detail += " Source {} is missing ref\n".format(source)
detail += '\n'
detail += "Try tracking these elements first with `bst track`\n"
raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline")
if inconsistent_workspaced:
detail = "Some workspaces do not exist but are not closed\n" + \
"Try closing them with `bst workspace close`\n\n"
for element in inconsistent_workspaced:
detail += " " + element._get_full_name() + "\n"
raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline-workspaced")
#############################################################
# Private Methods #
#############################################################
# _filter_cross_junction()
#
# Filters out cross junction elements from the elements
#
# Args:
# project (Project): The project on which elements are allowed
# elements (list of Element): The list of elements to be tracked
#
# Returns:
# (list): A filtered list of `elements` which does
# not contain any cross junction elements.
#
def _filter_cross_junctions(self, project, elements):
return [
element for element in elements
if element._get_project() is project
]
# _assert_junction_tracking()
#
# Raises an error if tracking is attempted on junctioned elements and
# a project.refs file is not enabled for the toplevel project.
#
# Args:
# elements (list of Element): The list of elements to be tracked
#
def _assert_junction_tracking(self, elements):
# We can track anything if the toplevel project uses project.refs
#
if self._project.ref_storage == ProjectRefStorage.PROJECT_REFS:
return
# Ideally, we would want to report every cross junction element but not
# their dependencies, unless those cross junction elements dependencies
# were also explicitly requested on the command line.
#
# But this is too hard, lets shoot for a simple error.
for element in elements:
element_project = element._get_project()
if element_project is not self._project:
detail = "Requested to track sources across junction boundaries\n" + \
"in a project which does not use project.refs ref-storage."
raise PipelineError("Untrackable sources", detail=detail, reason="untrackable-sources")
# _message()
#
# Local message propagator
#
def _message(self, message_type, message, **kwargs):
args = dict(kwargs)
self._context.message(
Message(None, message_type, message, **args))
# _Planner()
#
# An internal object used for constructing build plan
# from a given resolved toplevel element, while considering what
# parts need to be built depending on build only dependencies
# being cached, and depth sorting for more efficient processing.
#
class _Planner():
def __init__(self):
self.depth_map = {}
self.visiting_elements = set()
# Here we want to traverse the same element more than once when
# it is reachable from multiple places, with the interest of finding
# the deepest occurance of every element
def plan_element(self, element, depth):
if element in self.visiting_elements:
# circular dependency, already being processed
return
prev_depth = self.depth_map.get(element)
if prev_depth is not None and prev_depth >= depth:
# element and dependencies already processed at equal or greater depth
return
self.visiting_elements.add(element)
for dep in element.dependencies(Scope.RUN, recurse=False):
self.plan_element(dep, depth)
# Dont try to plan builds of elements that are cached already
if not element._cached():
for dep in element.dependencies(Scope.BUILD, recurse=False):
self.plan_element(dep, depth + 1)
self.depth_map[element] = depth
self.visiting_elements.remove(element)
def plan(self, roots, plan_cached):
for root in roots:
self.plan_element(root, 0)
depth_sorted = sorted(self.depth_map.items(), key=itemgetter(1), reverse=True)
return [item[0] for item in depth_sorted if plan_cached or not item[0]._cached()]
buildstream-1.6.9/buildstream/_platform/ 0000775 0000000 0000000 00000000000 14375152700 0020344 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_platform/__init__.py 0000664 0000000 0000000 00000001447 14375152700 0022463 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
from .platform import Platform
buildstream-1.6.9/buildstream/_platform/linux.py 0000664 0000000 0000000 00000010653 14375152700 0022062 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
import os
import subprocess
from .. import _site
from .. import utils
from ..sandbox import SandboxBwrap, SandboxDummy
from . import Platform
class Linux(Platform):
ARCHITECTURES = {
'amd64': 'x86_64',
'arm64': 'aarch64',
'i386': 'i686',
'armhf': 'armv7l',
'ppc64el': 'ppc64le',
}
def __init__(self):
super().__init__()
self._uid = os.geteuid()
self._gid = os.getegid()
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
self._user_ns_available = self._check_user_ns_available()
def create_sandbox(self, *args, **kwargs):
# Inform the bubblewrap sandbox as to whether it can use user namespaces or not
kwargs['user_ns_available'] = self._user_ns_available
kwargs['die_with_parent_available'] = self._die_with_parent_available
kwargs['linux32'] = False
host_os, _, _, _, host_arch = os.uname()
config = kwargs['config']
# We can't do builds for another host OS
if config.build_os != host_os:
return SandboxDummy("Configured and host OS don't match.", *args, **kwargs)
if config.build_arch != host_arch:
try:
archtest = utils.get_host_tool('arch-test')
supported = subprocess.getoutput(archtest).splitlines()
supported_architectures = map(self.ARCHITECTURES.get, supported, supported)
except utils.ProgramNotFoundError:
supported_architectures = []
if host_arch == "x86_64":
supported_architectures = ["i686"]
elif host_arch == "aarch64":
supported_architectures = ["armv7l"]
if config.build_arch not in supported_architectures:
return SandboxDummy("Configured and host architecture don't match.", *args, **kwargs)
if ((config.build_arch == "i686" and host_arch == "x86_64") or
(config.build_arch == "armv7l" and host_arch == "aarch64")):
# check whether linux32 is available
try:
utils.get_host_tool('linux32')
kwargs['linux32'] = True
except utils.ProgramNotFoundError:
return SandboxDummy("Configured and host architecture don't match.", *args, **kwargs)
return SandboxBwrap(*args, **kwargs)
def check_sandbox_config(self, config):
if self._user_ns_available:
# User namespace support allows arbitrary build UID/GID settings.
return True
else:
# Without user namespace support, the UID/GID in the sandbox
# will match the host UID/GID.
return config.build_uid == self._uid and config.build_gid == self._gid
################################################
# Private Methods #
################################################
def _check_user_ns_available(self):
# Here, lets check if bwrap is able to create user namespaces,
# issue a warning if it's not available, and save the state
# locally so that we can inform the sandbox to not try it
# later on.
bwrap = utils.get_host_tool('bwrap')
whoami = utils.get_host_tool('whoami')
try:
output = subprocess.check_output([
bwrap,
'--ro-bind', '/', '/',
'--unshare-user',
'--uid', '0', '--gid', '0',
whoami,
])
output = output.decode('UTF-8').strip()
except subprocess.CalledProcessError:
output = ''
return output == 'root'
buildstream-1.6.9/buildstream/_platform/platform.py 0000664 0000000 0000000 00000005525 14375152700 0022551 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
import os
import sys
from .._exceptions import PlatformError, ImplError
class Platform():
_instance = None
# Platform()
#
# A class to manage platform-specific details. Currently holds the
# sandbox factory as well as platform helpers.
#
def __init__(self):
pass
@classmethod
def _create_instance(cls):
# pylint: disable=import-outside-toplevel
if sys.platform.startswith('linux'):
backend = 'linux'
else:
backend = 'unix'
# Meant for testing purposes and therefore hidden in the
# deepest corners of the source code. Try not to abuse this,
# please?
if os.getenv('BST_FORCE_BACKEND'):
backend = os.getenv('BST_FORCE_BACKEND')
if backend == 'linux':
from .linux import Linux as PlatformImpl
elif backend == 'unix':
from .unix import Unix as PlatformImpl
else:
raise PlatformError("No such platform: '{}'".format(backend))
cls._instance = PlatformImpl()
@classmethod
def get_platform(cls):
if not cls._instance:
cls._create_instance()
return cls._instance
##################################################################
# Sandbox functions #
##################################################################
# create_sandbox():
#
# Create a build sandbox suitable for the environment
#
# Args:
# args (dict): The arguments to pass to the sandbox constructor
# kwargs (file): The keyword arguments to pass to the sandbox constructor
#
# Returns:
# (Sandbox) A sandbox
#
def create_sandbox(self, *args, **kwargs):
raise ImplError("Platform {platform} does not implement create_sandbox()"
.format(platform=type(self).__name__))
def check_sandbox_config(self, config):
raise ImplError("Platform {platform} does not implement check_sandbox_config()"
.format(platform=type(self).__name__))
buildstream-1.6.9/buildstream/_platform/unix.py 0000664 0000000 0000000 00000003021 14375152700 0021675 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
import os
from .._exceptions import PlatformError
from ..sandbox import SandboxChroot
from . import Platform
class Unix(Platform):
def __init__(self):
super().__init__()
self._uid = os.geteuid()
self._gid = os.getegid()
# Not necessarily 100% reliable, but we want to fail early.
if self._uid != 0:
raise PlatformError("Root privileges are required to run without bubblewrap.")
def create_sandbox(self, *args, **kwargs):
return SandboxChroot(*args, **kwargs)
def check_sandbox_config(self, config):
# With the chroot sandbox, the UID/GID in the sandbox
# will match the host UID/GID (typically 0/0).
return config.build_uid == self._uid and config.build_gid == self._gid
buildstream-1.6.9/buildstream/_plugincontext.py 0000664 0000000 0000000 00000025240 14375152700 0022000 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import inspect
from ._exceptions import PluginError, LoadError, LoadErrorReason
from . import utils
from .utils import UtilError
# A Context for loading plugin types
#
# Args:
# plugin_base (PluginBase): The main PluginBase object to work with
# base_type (type): A base object type for this context
# site_plugin_path (str): Path to where buildstream keeps plugins
# plugin_origins (list): Data used to search for plugins
#
# Since multiple pipelines can be processed recursively
# within the same interpretor, it's important that we have
# one context associated to the processing of a given pipeline,
# this way sources and element types which are particular to
# a given BuildStream project are isolated to their respective
# Pipelines.
#
class PluginContext():
def __init__(self, plugin_base, base_type, site_plugin_path, *,
plugin_origins=None, dependencies=None,
format_versions=None):
if format_versions is None:
format_versions = {}
# The plugin kinds which were loaded
self.loaded_dependencies = []
#
# Private members
#
self._dependencies = dependencies
self._base_type = base_type # The base class plugins derive from
self._types = {} # Plugin type lookup table by kind
self._plugin_origins = plugin_origins or []
# The PluginSource object
self._plugin_base = plugin_base
self._site_source = plugin_base.make_plugin_source(searchpath=site_plugin_path)
self._alternate_sources = {}
self._format_versions = format_versions
# lookup():
#
# Fetches a type loaded from a plugin in this plugin context
#
# Args:
# kind (str): The kind of Plugin to create
#
# Returns: the type associated with the given kind
#
# Raises: PluginError
#
def lookup(self, kind):
return self._ensure_plugin(kind)
def _get_local_plugin_source(self, path):
if ('local', path) not in self._alternate_sources:
# key by a tuple to avoid collision
source = self._plugin_base.make_plugin_source(searchpath=[path])
# Ensure that sources never get garbage collected,
# as they'll take the plugins with them.
self._alternate_sources[('local', path)] = source
else:
source = self._alternate_sources[('local', path)]
return source
def _get_pip_plugin_source(self, package_name, kind):
defaults = None
if ('pip', package_name) not in self._alternate_sources:
import pkg_resources # pylint: disable=import-outside-toplevel
# key by a tuple to avoid collision
try:
package = pkg_resources.get_entry_info(package_name,
'buildstream.plugins',
kind)
except pkg_resources.DistributionNotFound as e:
raise PluginError("Failed to load {} plugin '{}': {}"
.format(self._base_type.__name__, kind, e)) from e
if package is None:
raise PluginError("Pip package {} does not contain a plugin named '{}'"
.format(package_name, kind))
location = package.dist.get_resource_filename(
pkg_resources._manager,
package.module_name.replace('.', os.sep) + '.py'
)
# Also load the defaults - required since setuptools
# may need to extract the file.
try:
defaults = package.dist.get_resource_filename(
pkg_resources._manager,
package.module_name.replace('.', os.sep) + '.yaml'
)
except KeyError:
# The plugin didn't have an accompanying YAML file
defaults = None
source = self._plugin_base.make_plugin_source(searchpath=[os.path.dirname(location)])
self._alternate_sources[('pip', package_name)] = source
else:
source = self._alternate_sources[('pip', package_name)]
return source, defaults
def _ensure_plugin(self, kind):
if kind not in self._types:
# Check whether the plugin is specified in plugins
source = None
defaults = None
loaded_dependency = False
for origin in self._plugin_origins:
if kind not in origin['plugins']:
continue
if origin['origin'] == 'local':
source = self._get_local_plugin_source(origin['path'])
elif origin['origin'] == 'pip':
source, defaults = self._get_pip_plugin_source(origin['package-name'], kind)
else:
raise PluginError("Failed to load plugin '{}': "
"Unexpected plugin origin '{}'"
.format(kind, origin['origin']))
loaded_dependency = True
break
# Fall back to getting the source from site
if not source:
if kind not in self._site_source.list_plugins():
raise PluginError("No {} type registered for kind '{}'"
.format(self._base_type.__name__, kind))
source = self._site_source
self._types[kind] = self._load_plugin(source, kind, defaults)
if loaded_dependency:
self.loaded_dependencies.append(kind)
return self._types[kind]
def _load_plugin(self, source, kind, defaults):
try:
plugin = source.load_plugin(kind)
if not defaults:
plugin_file = inspect.getfile(plugin)
plugin_dir = os.path.dirname(plugin_file)
plugin_conf_name = "{}.yaml".format(kind)
defaults = os.path.join(plugin_dir, plugin_conf_name)
except ImportError as e:
raise PluginError("Failed to load {} plugin '{}': {}"
.format(self._base_type.__name__, kind, e)) from e
try:
plugin_type = plugin.setup()
except AttributeError as e:
raise PluginError("{} plugin '{}' did not provide a setup() function"
.format(self._base_type.__name__, kind)) from e
except TypeError as e:
raise PluginError("setup symbol in {} plugin '{}' is not a function"
.format(self._base_type.__name__, kind)) from e
self._assert_plugin(kind, plugin_type)
self._assert_version(kind, plugin_type)
return (plugin_type, defaults)
def _assert_plugin(self, kind, plugin_type):
if kind in self._types:
raise PluginError("Tried to register {} plugin for existing kind '{}' "
"(already registered {})"
.format(self._base_type.__name__, kind, self._types[kind].__name__))
try:
if not issubclass(plugin_type, self._base_type):
raise PluginError("{} plugin '{}' returned type '{}', which is not a subclass of {}"
.format(self._base_type.__name__, kind,
plugin_type.__name__,
self._base_type.__name__))
except TypeError as e:
raise PluginError("{} plugin '{}' returned something that is not a type (expected subclass of {})"
.format(self._base_type.__name__, kind,
self._base_type.__name__)) from e
def _assert_version(self, kind, plugin_type):
# Now assert BuildStream version
bst_major, bst_minor = utils.get_bst_version()
if bst_major < plugin_type.BST_REQUIRED_VERSION_MAJOR or \
(bst_major == plugin_type.BST_REQUIRED_VERSION_MAJOR and
bst_minor < plugin_type.BST_REQUIRED_VERSION_MINOR):
raise PluginError("BuildStream {}.{} is too old for {} plugin '{}' (requires {}.{})"
.format(
bst_major, bst_minor,
self._base_type.__name__, kind,
plugin_type.BST_REQUIRED_VERSION_MAJOR,
plugin_type.BST_REQUIRED_VERSION_MINOR))
# If a BST_MIN_VERSION was specified, then we need to raise an error
# that we are loading a plugin which targets the wrong BuildStream version.
#
try:
min_version = plugin_type.BST_MIN_VERSION
except AttributeError:
return
# Handle malformed version string specified by plugin
#
try:
major, _ = utils._parse_version(min_version)
except UtilError as e:
raise PluginError(
"Loaded plugin '{}' is not a BuildStream 1 plugin".format(kind),
detail="Error parsing BST_MIN_VERSION: {}".format(e),
reason="plugin-version-mismatch"
) from e
raise PluginError(
"Loaded plugin '{}' is a BuildStream {} plugin".format(kind, major),
detail="You need to use BuildStream 1 plugins with BuildStream 1 projects",
reason="plugin-version-mismatch"
)
# _assert_plugin_format()
#
# Helper to raise a PluginError if the loaded plugin is of a lesser version then
# the required version for this plugin
#
def _assert_plugin_format(self, plugin, version):
if plugin.BST_FORMAT_VERSION < version:
raise LoadError(LoadErrorReason.UNSUPPORTED_PLUGIN,
"{}: Format version {} is too old for requested version {}"
.format(plugin, plugin.BST_FORMAT_VERSION, version))
buildstream-1.6.9/buildstream/_profile.py 0000664 0000000 0000000 00000010012 14375152700 0020524 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import cProfile
import pstats
import os
import datetime
import time
# Track what profile topics are active
active_topics = {}
active_profiles = {}
initialized = False
# Use the topic values here to decide what to profile
# by setting them in the BST_PROFILE environment variable.
#
# Multiple topics can be set with the ':' separator.
#
# E.g.:
#
# BST_PROFILE=circ-dep-check:sort-deps bst
#
# The special 'all' value will enable all profiles.
class Topics():
CIRCULAR_CHECK = 'circ-dep-check'
SORT_DEPENDENCIES = 'sort-deps'
LOAD_LOADER = 'load-loader'
LOAD_CONTEXT = 'load-context'
LOAD_PROJECT = 'load-project'
LOAD_PIPELINE = 'load-pipeline'
SHOW = 'show'
ARTIFACT_RECEIVE = 'artifact-receive'
ALL = 'all'
class Profile():
def __init__(self, topic, key, message):
self.message = message
self.key = topic + '-' + key
self.start = time.time()
self.profiler = cProfile.Profile()
self.profiler.enable()
def end(self):
self.profiler.disable()
filename = self.key.replace('/', '-')
filename = filename.replace('.', '-')
filename = os.path.join(os.getcwd(), 'profile-' + filename + '.log')
with open(filename, "a", encoding="utf-8") as f:
dt = datetime.datetime.fromtimestamp(self.start)
time_ = dt.strftime('%Y-%m-%d %H:%M:%S')
heading = '================================================================\n'
heading += 'Profile for key: {}\n'.format(self.key)
heading += 'Started at: {}\n'.format(time_)
if self.message:
heading += '\n {}'.format(self.message)
heading += '================================================================\n'
f.write(heading)
ps = pstats.Stats(self.profiler, stream=f).sort_stats('cumulative')
ps.print_stats()
# profile_start()
#
# Start profiling for a given topic.
#
# Args:
# topic (str): A topic name
# key (str): A key for this profile run
# message (str): An optional message to print in profile results
#
def profile_start(topic, key, message=None):
if not profile_enabled(topic):
return
# Start profiling and hold on to the key
profile = Profile(topic, key, message)
assert active_profiles.get(profile.key) is None
active_profiles[profile.key] = profile
# profile_end()
#
# Ends a profiling session previously
# started with profile_start()
#
# Args:
# topic (str): A topic name
# key (str): A key for this profile run
#
def profile_end(topic, key):
if not profile_enabled(topic):
return
topic_key = topic + '-' + key
profile = active_profiles.get(topic_key)
assert profile
profile.end()
del active_profiles[topic_key]
def profile_init():
global initialized # pylint: disable=global-statement
if not initialized:
setting = os.getenv('BST_PROFILE')
if setting:
topics = setting.split(':')
for topic in topics:
active_topics[topic] = True
initialized = True
def profile_enabled(topic):
profile_init()
if active_topics.get(topic):
return True
if active_topics.get(Topics.ALL):
return True
return False
buildstream-1.6.9/buildstream/_project.py 0000664 0000000 0000000 00000076441 14375152700 0020554 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016-2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Tiago Gomes
import os
from collections import OrderedDict
from collections.abc import Mapping
from pluginbase import PluginBase
from . import utils
from . import _cachekey
from . import _site
from . import _yaml
from .utils import UtilError
from ._profile import Topics, profile_start, profile_end
from ._exceptions import LoadError, LoadErrorReason
from ._options import OptionPool
from ._artifactcache import ArtifactCache
from ._elementfactory import ElementFactory
from ._sourcefactory import SourceFactory
from .types import CoreWarnings
from ._projectrefs import ProjectRefs, ProjectRefStorage
from ._versions import BST_FORMAT_VERSION
from ._loader import Loader
from .element import Element
from ._message import Message, MessageType
from ._includes import Includes
# Project Configuration file
_PROJECT_CONF_FILE = 'project.conf'
# HostMount()
#
# A simple object describing the behavior of
# a host mount.
#
class HostMount():
def __init__(self, path, host_path=None, optional=False):
# Support environment variable expansion in host mounts
path = os.path.expandvars(path)
if host_path is not None:
host_path = os.path.expandvars(host_path)
self.path = path # Path inside the sandbox
self.host_path = host_path # Path on the host
self.optional = optional # Optional mounts do not incur warnings or errors
if self.host_path is None:
self.host_path = self.path
# Represents project configuration that can have different values for junctions.
class ProjectConfig:
def __init__(self):
self.element_factory = None
self.source_factory = None
self.options = None # OptionPool
self.base_variables = {} # The base set of variables
self.element_overrides = {} # Element specific configurations
self.source_overrides = {} # Source specific configurations
self.mirrors = OrderedDict() # contains dicts of alias-mappings to URIs.
self.default_mirror = None # The name of the preferred mirror.
self._aliases = {} # Aliases dictionary
# Project()
#
# The Project Configuration
#
class Project():
def __init__(self, directory, context, *, junction=None, cli_options=None,
default_mirror=None, parent_loader=None, tempdir=None):
# The project name
self.name = None
# The project directory
self.directory = self._ensure_project_dir(directory)
# Absolute path to where elements are loaded from within the project
self.element_path = None
# ProjectRefs for the main refs and also for junctions
self.refs = ProjectRefs(self.directory, 'project.refs')
self.junction_refs = ProjectRefs(self.directory, 'junction.refs')
self.config = ProjectConfig()
self.first_pass_config = ProjectConfig()
self.junction = junction # The junction Element object, if this is a subproject
self.ref_storage = None # ProjectRefStorage setting
self.base_environment = {} # The base set of environment variables
self.base_env_nocache = None # The base nocache mask (list) for the environment
#
# Private Members
#
self._context = context # The invocation Context
self._default_mirror = default_mirror # The name of the preferred mirror.
self._cli_options = cli_options
self._cache_key = None
self._fatal_warnings = [] # A list of warnings which should trigger an error
self._shell_command = [] # The default interactive shell command
self._shell_environment = {} # Statically set environment vars
self._shell_host_files = [] # A list of HostMount objects
self.artifact_cache_specs = None
self._sandbox = None
self._splits = None
self._context.add_project(self)
self._partially_loaded = False
self._fully_loaded = False
self._project_includes = None
profile_start(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-'))
self._load(parent_loader=parent_loader, tempdir=tempdir)
profile_end(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-'))
self._partially_loaded = True
@property
def options(self):
return self.config.options
@property
def base_variables(self):
return self.config.base_variables
@property
def element_overrides(self):
return self.config.element_overrides
@property
def source_overrides(self):
return self.config.source_overrides
# translate_url():
#
# Translates the given url which may be specified with an alias
# into a fully qualified url.
#
# Args:
# url (str): A url, which may be using an alias
# first_pass (bool): Whether to use first pass configuration (for junctions)
#
# Returns:
# str: The fully qualified url, with aliases resolved
#
# This method is provided for :class:`.Source` objects to resolve
# fully qualified urls based on the shorthand which is allowed
# to be specified in the YAML
def translate_url(self, url, *, first_pass=False):
if first_pass:
config = self.first_pass_config
else:
config = self.config
if url and utils._ALIAS_SEPARATOR in url:
url_alias, url_body = url.split(utils._ALIAS_SEPARATOR, 1)
alias_url = config._aliases.get(url_alias)
if alias_url:
url = alias_url + url_body
return url
# get_shell_config()
#
# Gets the project specified shell configuration
#
# Returns:
# (list): The shell command
# (dict): The shell environment
# (list): The list of HostMount objects
#
def get_shell_config(self):
return (self._shell_command, self._shell_environment, self._shell_host_files)
# get_cache_key():
#
# Returns the cache key, calculating it if necessary
#
# Returns:
# (str): A hex digest cache key for the Context
#
def get_cache_key(self):
if self._cache_key is None:
# Anything that alters the build goes into the unique key
# (currently nothing here)
self._cache_key = _cachekey.generate_key({})
return self._cache_key
# create_element()
#
# Instantiate and return an element
#
# Args:
# meta (MetaElement): The loaded MetaElement
# first_pass (bool): Whether to use first pass configuration (for junctions)
#
# Returns:
# (Element): A newly created Element object of the appropriate kind
#
def create_element(self, meta, *, first_pass=False):
if first_pass:
return self.first_pass_config.element_factory.create(self._context, self, meta)
else:
return self.config.element_factory.create(self._context, self, meta)
# create_source()
#
# Instantiate and return a Source
#
# Args:
# meta (MetaSource): The loaded MetaSource
# first_pass (bool): Whether to use first pass configuration (for junctions)
#
# Returns:
# (Source): A newly created Source object of the appropriate kind
#
def create_source(self, meta, *, first_pass=False):
if first_pass:
return self.first_pass_config.source_factory.create(self._context, self, meta)
else:
return self.config.source_factory.create(self._context, self, meta)
# get_alias_uri()
#
# Returns the URI for a given alias, if it exists
#
# Args:
# alias (str): The alias.
# first_pass (bool): Whether to use first pass configuration (for junctions)
#
# Returns:
# str: The URI for the given alias; or None: if there is no URI for
# that alias.
def get_alias_uri(self, alias, *, first_pass=False):
if first_pass:
config = self.first_pass_config
else:
config = self.config
return config._aliases.get(alias)
# get_alias_uris()
#
# Args:
# alias (str): The alias.
# first_pass (bool): Whether to use first pass configuration (for junctions)
#
# Returns a list of every URI to replace an alias with
def get_alias_uris(self, alias, *, first_pass=False):
if first_pass:
config = self.first_pass_config
else:
config = self.config
if not alias or alias not in config._aliases:
return [None]
mirror_list = []
for key, alias_mapping in config.mirrors.items():
if alias in alias_mapping:
if key == config.default_mirror:
mirror_list = alias_mapping[alias] + mirror_list
else:
mirror_list += alias_mapping[alias]
mirror_list.append(config._aliases[alias])
return mirror_list
# load_elements()
#
# Loads elements from target names.
#
# Args:
# targets (list): Target names
# rewritable (bool): Whether the loaded files should be rewritable
# this is a bit more expensive due to deep copies
# fetch_subprojects (bool): Whether we should fetch subprojects as a part of the
# loading process, if they are not yet locally cached
#
# Returns:
# (list): A list of loaded Element
#
def load_elements(self, targets, *,
rewritable=False, fetch_subprojects=False):
with self._context.timed_activity("Loading elements", silent_nested=True):
meta_elements = self.loader.load(targets, rewritable=rewritable,
ticker=None,
fetch_subprojects=fetch_subprojects)
with self._context.timed_activity("Resolving elements"):
elements = [
Element._new_from_meta(meta)
for meta in meta_elements
]
# Now warn about any redundant source references which may have
# been discovered in the resolve() phase.
redundant_refs = Element._get_redundant_source_refs()
if redundant_refs:
detail = "The following inline specified source references will be ignored:\n\n"
lines = [
"{}:{}".format(source._get_provenance(), ref)
for source, ref in redundant_refs
]
detail += "\n".join(lines)
self._context.message(
Message(None, MessageType.WARN, "Ignoring redundant source references", detail=detail))
return elements
# ensure_fully_loaded()
#
# Ensure project has finished loading. At first initialization, a
# project can only load junction elements. Other elements require
# project to be fully loaded.
#
def ensure_fully_loaded(self):
if self._fully_loaded:
return
assert self._partially_loaded
self._fully_loaded = True
if self.junction:
self.junction._get_project().ensure_fully_loaded()
self._load_second_pass()
# cleanup()
#
# Cleans up resources used loading elements
#
def cleanup(self):
self.loader.cleanup()
# Reset the element loader state
Element._reset_load_state()
# _load():
#
# Loads the project configuration file in the project
# directory process the first pass.
#
# Raises: LoadError if there was a problem with the project.conf
#
def _load(self, parent_loader=None, tempdir=None):
# Load builtin default
projectfile = os.path.join(self.directory, _PROJECT_CONF_FILE)
self._default_config_node = _yaml.load(_site.default_project_config)
# Load project local config and override the builtin
try:
self._project_conf = _yaml.load(projectfile)
except LoadError as e:
# Raise a more specific error here
if e.reason == LoadErrorReason.MISSING_FILE:
raise LoadError(LoadErrorReason.MISSING_PROJECT_CONF, str(e)) from e
raise
pre_config_node = _yaml.node_copy(self._default_config_node)
_yaml.composite(pre_config_node, self._project_conf)
# Assert project's format version early, before validating toplevel keys
format_version = _yaml.node_get(pre_config_node, int, 'format-version')
if BST_FORMAT_VERSION < format_version:
major, minor = utils.get_bst_version()
raise LoadError(
LoadErrorReason.UNSUPPORTED_PROJECT,
"Project requested format version {}, but BuildStream {}.{} only supports up until format version {}"
.format(format_version, major, minor, BST_FORMAT_VERSION))
# Since BuildStream 2, project.conf is required to specify min-version.
#
# Detect this and raise an error, indicating which major version of BuildStream
# should be used for this project.
#
min_version = _yaml.node_get(pre_config_node, str, 'min-version', default_value=None)
if min_version:
# Handle case of malformed min-version
#
try:
major, minor = utils._parse_version(min_version)
except UtilError as e:
raise LoadError(
LoadErrorReason.UNSUPPORTED_PROJECT,
"This is not a BuildStream 1 project: {}".format(e)
) from e
# Raise a helpful error indicating what the user should do to
# use this project.
#
raise LoadError(
LoadErrorReason.UNSUPPORTED_PROJECT,
"Tried to load a BuildStream {} project with BuildStream 1".format(major),
# TODO: Include a link to the appropriate documentation for parallel
# installing different BuildStream versions.
#
detail="Please install at least BuildStream {}.{} to use this project".format(major, minor)
)
# FIXME:
#
# Performing this check manually in the absense
# of proper support from _yaml.node_get(), this should
# be removed in favor of a proper accessor function
# from the _yaml module when #591 is fixed.
#
if self._project_conf.get('name') is None:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: project.conf does not contain expected key '{}'".format(projectfile, 'name'))
# The project name, element path and option declarations
# are constant and cannot be overridden by option conditional statements
self.name = _yaml.node_get(pre_config_node, str, 'name')
# Validate that project name is a valid symbol name
_yaml.assert_symbol_name(_yaml.node_get_provenance(pre_config_node, 'name'),
self.name, "project name")
self.element_path = os.path.join(
self.directory,
_yaml.node_get_project_path(pre_config_node, 'element-path', self.directory,
check_is_dir=True)
)
self.config.options = OptionPool(self.element_path)
self.first_pass_config.options = OptionPool(self.element_path)
self.loader = Loader(self._context, self,
parent=parent_loader,
tempdir=tempdir)
self._project_includes = Includes(self.loader, copy_tree=False)
project_conf_first_pass = _yaml.node_copy(self._project_conf)
self._project_includes.process(project_conf_first_pass, only_local=True, process_project_options=False)
config_no_include = _yaml.node_copy(self._default_config_node)
_yaml.composite(config_no_include, project_conf_first_pass)
self._load_pass(config_no_include, self.first_pass_config,
ignore_unknown=True)
# Use separate file for storing source references
self.ref_storage = _yaml.node_get(pre_config_node, str, 'ref-storage')
if self.ref_storage not in [ProjectRefStorage.INLINE, ProjectRefStorage.PROJECT_REFS]:
p = _yaml.node_get_provenance(pre_config_node, 'ref-storage')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Invalid value '{}' specified for ref-storage"
.format(p, self.ref_storage))
if self.ref_storage == ProjectRefStorage.PROJECT_REFS:
self.junction_refs.load(self.first_pass_config.options)
# _load_second_pass()
#
# Process the second pass of loading the project configuration.
#
def _load_second_pass(self):
project_conf_second_pass = _yaml.node_copy(self._project_conf)
self._project_includes.process(project_conf_second_pass, process_project_options=False)
config = _yaml.node_copy(self._default_config_node)
_yaml.composite(config, project_conf_second_pass)
self._load_pass(config, self.config)
_yaml.node_validate(config, [
'format-version',
'element-path', 'variables',
'environment', 'environment-nocache',
'split-rules', 'elements', 'plugins',
'aliases', 'name',
'artifacts', 'options',
'fail-on-overlap', 'shell', 'fatal-warnings',
'ref-storage', 'sandbox', 'mirrors'
])
#
# Now all YAML composition is done, from here on we just load
# the values from our loaded configuration dictionary.
#
# Load artifacts pull/push configuration for this project
self.artifact_cache_specs = ArtifactCache.specs_from_config_node(config, self.directory)
# Load sandbox environment variables
self.base_environment = _yaml.node_get(config, Mapping, 'environment')
self.base_env_nocache = _yaml.node_get(config, list, 'environment-nocache')
# Load sandbox configuration
self._sandbox = _yaml.node_get(config, Mapping, 'sandbox')
# Load project split rules
self._splits = _yaml.node_get(config, Mapping, 'split-rules')
# Fatal warnings
self._fatal_warnings = _yaml.node_get(config, list, 'fatal-warnings', default_value=[])
# Support backwards compatibility for fail-on-overlap
fail_on_overlap = _yaml.node_get(config, bool, 'fail-on-overlap', default_value=None)
if (CoreWarnings.OVERLAPS not in self._fatal_warnings) and fail_on_overlap:
self._fatal_warnings.append(CoreWarnings.OVERLAPS)
# Deprecation check
if fail_on_overlap is not None:
self._context.message(
Message(
None,
MessageType.WARN,
"Use of fail-on-overlap within project.conf " +
"is deprecated. Consider using fatal-warnings instead."
)
)
# Load project.refs if it exists, this may be ignored.
if self.ref_storage == ProjectRefStorage.PROJECT_REFS:
self.refs.load(self.options)
# Parse shell options
shell_options = _yaml.node_get(config, Mapping, 'shell')
_yaml.node_validate(shell_options, ['command', 'environment', 'host-files'])
self._shell_command = _yaml.node_get(shell_options, list, 'command')
# Perform environment expansion right away
shell_environment = _yaml.node_get(shell_options, Mapping, 'environment', default_value={})
for key, _ in _yaml.node_items(shell_environment):
value = _yaml.node_get(shell_environment, str, key)
self._shell_environment[key] = os.path.expandvars(value)
# Host files is parsed as a list for convenience
host_files = _yaml.node_get(shell_options, list, 'host-files', default_value=[])
for host_file in host_files:
if isinstance(host_file, str):
mount = HostMount(host_file)
else:
# Some validation
index = host_files.index(host_file)
host_file_desc = _yaml.node_get(shell_options, Mapping, 'host-files', indices=[index])
_yaml.node_validate(host_file_desc, ['path', 'host_path', 'optional'])
# Parse the host mount
path = _yaml.node_get(host_file_desc, str, 'path')
host_path = _yaml.node_get(host_file_desc, str, 'host_path', default_value=None)
optional = _yaml.node_get(host_file_desc, bool, 'optional', default_value=False)
mount = HostMount(path, host_path, optional)
self._shell_host_files.append(mount)
# _load_pass():
#
# Loads parts of the project configuration that are different
# for first and second pass configurations.
#
# Args:
# config (dict) - YaML node of the configuration file.
# output (ProjectConfig) - ProjectConfig to load configuration onto.
# ignore_unknown (bool) - Whether option loader shoud ignore unknown options.
#
def _load_pass(self, config, output, *,
ignore_unknown=False):
self._load_plugin_factories(config, output)
# Load project options
options_node = _yaml.node_get(config, Mapping, 'options', default_value={})
output.options.load(options_node)
if self.junction:
# load before user configuration
output.options.load_yaml_values(self.junction.options, transform=self.junction._subst_string)
# Collect option values specified in the user configuration
overrides = self._context.get_overrides(self.name)
override_options = _yaml.node_get(overrides, Mapping, 'options', default_value={})
output.options.load_yaml_values(override_options)
if self._cli_options:
output.options.load_cli_values(self._cli_options, ignore_unknown=ignore_unknown)
# We're done modifying options, now we can use them for substitutions
output.options.resolve()
#
# Now resolve any conditionals in the remaining configuration,
# any conditionals specified for project option declarations,
# or conditionally specifying the project name; will be ignored.
output.options.process_node(config)
# Element and Source type configurations will be composited later onto
# element/source types, so we delete it from here and run our final
# assertion after.
output.element_overrides = _yaml.node_get(config, Mapping, 'elements', default_value={})
output.source_overrides = _yaml.node_get(config, Mapping, 'sources', default_value={})
config.pop('elements', None)
config.pop('sources', None)
_yaml.node_final_assertions(config)
# Load base variables
output.base_variables = _yaml.node_get(config, Mapping, 'variables')
# Add the project name as a default variable
output.base_variables['project-name'] = self.name
# Extend variables with automatic variables and option exports
# Initialize it as a string as all variables are processed as strings.
# Based on some testing (mainly on AWS), maximum effective
# max-jobs value seems to be around 8-10 if we have enough cores
# users should set values based on workload and build infrastructure
if self._context.build_max_jobs == 0:
# User requested automatic max-jobs
output.base_variables['max-jobs'] = str(min(len(os.sched_getaffinity(0)), 8))
else:
# User requested explicit max-jobs setting
output.base_variables['max-jobs'] = str(self._context.build_max_jobs)
# Export options into variables, if that was requested
output.options.export_variables(output.base_variables)
# Override default_mirror if not set by command-line
output.default_mirror = self._default_mirror or _yaml.node_get(overrides, str,
'default-mirror', default_value=None)
mirrors = _yaml.node_get(config, list, 'mirrors', default_value=[])
for mirror in mirrors:
allowed_mirror_fields = [
'name', 'aliases'
]
_yaml.node_validate(mirror, allowed_mirror_fields)
mirror_name = _yaml.node_get(mirror, str, 'name')
alias_mappings = {}
for alias_mapping, uris in _yaml.node_items(mirror['aliases']):
assert isinstance(uris, list)
alias_mappings[alias_mapping] = list(uris)
output.mirrors[mirror_name] = alias_mappings
if not output.default_mirror:
output.default_mirror = mirror_name
# Source url aliases
output._aliases = _yaml.node_get(config, Mapping, 'aliases', default_value={})
# _ensure_project_dir()
#
# Returns path of the project directory, if a configuration file is found
# in given directory or any of its parent directories.
#
# Args:
# directory (str) - directory from where the command was invoked
#
# Raises:
# LoadError if project.conf is not found
#
def _ensure_project_dir(self, directory):
directory = os.path.abspath(directory)
while not os.path.isfile(os.path.join(directory, _PROJECT_CONF_FILE)):
parent_dir = os.path.dirname(directory)
if directory == parent_dir:
raise LoadError(
LoadErrorReason.MISSING_PROJECT_CONF,
'{} not found in current directory or any of its parent directories'
.format(_PROJECT_CONF_FILE))
directory = parent_dir
return directory
def _load_plugin_factories(self, config, output):
plugin_source_origins = [] # Origins of custom sources
plugin_element_origins = [] # Origins of custom elements
# Plugin origins and versions
origins = _yaml.node_get(config, list, 'plugins', default_value=[])
source_format_versions = {}
element_format_versions = {}
for origin in origins:
allowed_origin_fields = [
'origin', 'sources', 'elements',
'package-name', 'path',
]
allowed_origins = ['core', 'local', 'pip']
_yaml.node_validate(origin, allowed_origin_fields)
if origin['origin'] not in allowed_origins:
raise LoadError(
LoadErrorReason.INVALID_YAML,
"Origin '{}' is not one of the allowed types"
.format(origin['origin']))
# Store source versions for checking later
source_versions = _yaml.node_get(origin, Mapping, 'sources', default_value={})
for key, _ in _yaml.node_items(source_versions):
if key in source_format_versions:
raise LoadError(
LoadErrorReason.INVALID_YAML,
"Duplicate listing of source '{}'".format(key))
source_format_versions[key] = _yaml.node_get(source_versions, int, key)
# Store element versions for checking later
element_versions = _yaml.node_get(origin, Mapping, 'elements', default_value={})
for key, _ in _yaml.node_items(element_versions):
if key in element_format_versions:
raise LoadError(
LoadErrorReason.INVALID_YAML,
"Duplicate listing of element '{}'".format(key))
element_format_versions[key] = _yaml.node_get(element_versions, int, key)
# Store the origins if they're not 'core'.
# core elements are loaded by default, so storing is unnecessary.
if _yaml.node_get(origin, str, 'origin') != 'core':
self._store_origin(origin, 'sources', plugin_source_origins)
self._store_origin(origin, 'elements', plugin_element_origins)
pluginbase = PluginBase(package='buildstream.plugins')
output.element_factory = ElementFactory(pluginbase,
plugin_origins=plugin_element_origins,
format_versions=element_format_versions)
output.source_factory = SourceFactory(pluginbase,
plugin_origins=plugin_source_origins,
format_versions=source_format_versions)
# _store_origin()
#
# Helper function to store plugin origins
#
# Args:
# origin (dict) - a dictionary indicating the origin of a group of
# plugins.
# plugin_group (str) - The name of the type of plugin that is being
# loaded
# destination (list) - A list of dicts to store the origins in
#
# Raises:
# LoadError if 'origin' is an unexpected value
def _store_origin(self, origin, plugin_group, destination):
expected_groups = ['sources', 'elements']
if plugin_group not in expected_groups:
raise LoadError(LoadErrorReason.INVALID_DATA,
"Unexpected plugin group: {}, expecting {}"
.format(plugin_group, expected_groups))
if plugin_group in origin:
origin_dict = _yaml.node_copy(origin)
plugins = _yaml.node_get(origin, Mapping, plugin_group, default_value={})
origin_dict['plugins'] = [k for k, _ in _yaml.node_items(plugins)]
for group in expected_groups:
if group in origin_dict:
del origin_dict[group]
if origin_dict['origin'] == 'local':
path = _yaml.node_get_project_path(origin, 'path',
self.directory,
check_is_dir=True)
# paths are passed in relative to the project, but must be absolute
origin_dict['path'] = os.path.join(self.directory, path)
destination.append(origin_dict)
# _warning_is_fatal():
#
# Returns true if the warning in question should be considered fatal based on
# the project configuration.
#
# Args:
# warning_str (str): The warning configuration string to check against
#
# Returns:
# (bool): True if the warning should be considered fatal and cause an error.
#
def _warning_is_fatal(self, warning_str):
return warning_str in self._fatal_warnings
buildstream-1.6.9/buildstream/_projectrefs.py 0000664 0000000 0000000 00000012152 14375152700 0021421 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
from . import _yaml
from ._exceptions import LoadError, LoadErrorReason
# ProjectRefStorage()
#
# Indicates the type of ref storage
class ProjectRefStorage():
# Source references are stored inline
#
INLINE = 'inline'
# Source references are stored in a central project.refs file
#
PROJECT_REFS = 'project.refs'
# ProjectRefs()
#
# The project.refs file management
#
# Args:
# directory (str): The project directory
# base_name (str): The project.refs basename
#
class ProjectRefs():
def __init__(self, directory, base_name):
directory = os.path.abspath(directory)
self._fullpath = os.path.join(directory, base_name)
self._base_name = base_name
self._toplevel_node = None
self._toplevel_save = None
# load()
#
# Load the project.refs file
#
# Args:
# options (OptionPool): To resolve conditional statements
#
def load(self, options):
try:
self._toplevel_node = _yaml.load(self._fullpath, shortname=self._base_name, copy_tree=True)
provenance = _yaml.node_get_provenance(self._toplevel_node)
self._toplevel_save = provenance.toplevel
# Process any project options immediately
options.process_node(self._toplevel_node)
# Run any final assertions on the project.refs, just incase there
# are list composition directives or anything left unprocessed.
_yaml.node_final_assertions(self._toplevel_node)
except LoadError as e:
if e.reason != LoadErrorReason.MISSING_FILE:
raise
# Ignore failure if the file doesnt exist, it'll be created and
# for now just assumed to be empty
self._toplevel_node = {}
self._toplevel_save = self._toplevel_node
_yaml.node_validate(self._toplevel_node, ['projects'])
# Ensure we create our toplevel entry point on the fly here
for node in [self._toplevel_node, self._toplevel_save]:
if 'projects' not in node:
node['projects'] = {}
# save()
#
# Save the project.refs file with any local changes
#
def save(self):
_yaml.dump(self._toplevel_save, self._fullpath)
# lookup_ref()
#
# Fetch the ref node for a given Source. If the ref node does not
# exist and `write` is specified, it will be automatically created.
#
# Args:
# project (str): The project to lookup
# element (str): The element name to lookup
# source_index (int): The index of the Source in the specified element
# write (bool): Whether we want to read the node or write to it
#
# Returns:
# (node): The YAML dictionary where the ref is stored
#
def lookup_ref(self, project, element, source_index, *, write=False):
node = self._lookup(self._toplevel_node, project, element, source_index)
if write:
if node is not None:
provenance = _yaml.node_get_provenance(node)
if provenance:
node = provenance.node
# If we couldnt find the orignal, create a new one.
#
if node is None:
node = self._lookup(self._toplevel_save, project, element, source_index, ensure=True)
return node
# _lookup()
#
# Looks up a ref node in the project.refs file, creates one if ensure is True.
#
def _lookup(self, toplevel, project, element, source_index, *, ensure=False):
# Fetch the project
try:
project_node = toplevel['projects'][project]
except KeyError:
if not ensure:
return None
project_node = toplevel['projects'][project] = {}
# Fetch the element
try:
element_list = project_node[element]
except KeyError:
if not ensure:
return None
element_list = project_node[element] = []
# Fetch the source index
try:
node = element_list[source_index]
except IndexError:
if not ensure:
return None
# Pad the list with empty newly created dictionaries
element_list.extend({} for _ in range(len(element_list), source_index + 1))
node = element_list[source_index]
return node
buildstream-1.6.9/buildstream/_protos/ 0000775 0000000 0000000 00000000000 14375152700 0020046 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0022145 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/ 0000775 0000000 0000000 00000000000 14375152700 0021145 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0023244 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/ 0000775 0000000 0000000 00000000000 14375152700 0022242 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0024341 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/ 0000775 0000000 0000000 00000000000 14375152700 0023535 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0025634 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/ 0000775 0000000 0000000 00000000000 14375152700 0024654 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0026753 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/ 0000775 0000000 0000000 00000000000 14375152700 0025202 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0027301 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset.proto 0000664 0000000 0000000 00000052106 14375152700 0030765 0 ustar 00root root 0000000 0000000 // Copyright 2020 The Bazel Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package build.bazel.remote.asset.v1;
import "build/bazel/remote/execution/v2/remote_execution.proto";
import "google/api/annotations.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
option csharp_namespace = "Build.Bazel.Remote.Asset.v1";
option go_package = "remoteasset";
option java_multiple_files = true;
option java_outer_classname = "RemoteAssetProto";
option java_package = "build.bazel.remote.asset.v1";
option objc_class_prefix = "RA";
// The Remote Asset API provides a mapping from a URI and Qualifiers to
// Digests.
//
// Multiple URIs may be used to refer to the same content. For example, the
// same tarball may exist at multiple mirrors and thus be retrievable from
// multiple URLs. When URLs are used, these should refer to actual content as
// Fetch service implementations may choose to fetch the content directly
// from the origin. For example, the HEAD of a git repository's active branch
// can be referred to as:
//
// uri: https://github.com/bazelbuild/remote-apis.git
//
// URNs may be used to strongly identify content, for instance by using the
// uuid namespace identifier: urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6.
// This is most applicable to named content that is Push'd, where the URN
// serves as an agreed-upon key, but carries no other inherent meaning.
//
// Service implementations may choose to support only URLs, only URNs for
// Push'd content, only other URIs for which the server and client agree upon
// semantics of, or any mixture of the above.
// Qualifiers are used to disambiguate or sub-select content that shares a URI.
// This may include specifying a particular commit or branch, in the case of
// URIs referencing a repository; they could also be used to specify a
// particular subdirectory of a repository or tarball. Qualifiers may also be
// used to ensure content matches what the client expects, even when there is
// no ambiguity to be had - for example, a qualifier specifying a checksum
// value.
//
// In cases where the semantics of the request are not immediately clear from
// the URL and/or qualifiers - e.g. dictated by URL scheme - it is recommended
// to use an additional qualifier to remove the ambiguity. The `resource_type`
// qualifier is recommended for this purpose.
//
// Qualifiers may be supplied in any order.
message Qualifier {
// The "name" of the qualifier, for example "resource_type".
// No separation is made between 'standard' and 'nonstandard'
// qualifiers, in accordance with https://tools.ietf.org/html/rfc6648,
// however implementers *SHOULD* take care to avoid ambiguity.
string name = 1;
// The "value" of the qualifier. Semantics will be dictated by the name.
string value = 2;
}
// The Fetch service resolves or fetches assets referenced by URI and
// Qualifiers, returning a Digest for the content in
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
//
// As with other services in the Remote Execution API, any call may return an
// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
// information about when the client should retry the request; clients SHOULD
// respect the information provided.
service Fetch {
// Resolve or fetch referenced assets, making them available to the caller and
// other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
//
// Servers *MAY* fetch content that they do not already have cached, for any
// URLs they support.
//
// Servers *SHOULD* ensure that referenced files are present in the CAS at the
// time of the response, and (if supported) that they will remain available
// for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
// be increased if necessary and applicable.
// In the event that a client receives a reference to content that is no
// longer present, it *MAY* re-issue the request with
// `oldest_content_accepted` set to a more recent timestamp than the original
// attempt, to induce a re-fetch from origin.
//
// Servers *MAY* cache fetched content and reuse it for subsequent requests,
// subject to `oldest_content_accepted`.
//
// Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push]
// API and allow content to be directly inserted for use in future fetch
// responses.
//
// Servers *MUST* ensure Fetch'd content matches all the specified
// qualifiers except in the case of previously Push'd resources, for which
// the server *MAY* trust the pushing client to have set the qualifiers
// correctly, without validation.
//
// Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push]
// API *MUST* reject requests containing qualifiers it does not support.
//
// Servers *MAY* transform assets as part of the fetch. For example a
// tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]
// might be unpacked, or a Git repository
// fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]
// might be passed through `git-archive`.
//
// Errors handling the requested assets will be returned as gRPC Status errors
// here; errors outside the server's control will be returned inline in the
// `status` field of the response (see comment there for details).
// The possible RPC errors include:
// * `INVALID_ARGUMENT`: One or more arguments were invalid, such as a
// qualifier that is not supported by the server.
// * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
// perform the requested operation. The client may retry after a delay.
// * `UNAVAILABLE`: Due to a transient condition the operation could not be
// completed. The client should retry.
// * `INTERNAL`: An internal error occurred while performing the operation.
// The client should retry.
// * `DEADLINE_EXCEEDED`: The fetch could not be completed within the given
// RPC deadline. The client should retry for at least as long as the value
// provided in `timeout` field of the request.
//
// In the case of unsupported qualifiers, the server *SHOULD* additionally
// send a [BadRequest][google.rpc.BadRequest] error detail where, for each
// unsupported qualifier, there is a `FieldViolation` with a `field` of
// `qualifiers.name` and a `description` of `"{qualifier}" not supported`
// indicating the name of the unsupported qualifier.
rpc FetchBlob(FetchBlobRequest) returns (FetchBlobResponse) {
option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchBlob" body: "*" };
}
rpc FetchDirectory(FetchDirectoryRequest) returns (FetchDirectoryResponse) {
option (google.api.http) = { post: "/v1/{instance_name=**}/assets:fetchDirectory" body: "*" };
}
}
// A request message for
// [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob].
message FetchBlobRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The timeout for the underlying fetch, if content needs to be retrieved from
// origin.
//
// If unset, the server *MAY* apply an implementation-defined timeout.
//
// If set, and the user-provided timeout exceeds the RPC deadline, the server
// *SHOULD* keep the fetch going after the RPC completes, to be made
// available for future Fetch calls. The server may also enforce (via clamping
// and/or an INVALID_ARGUMENT error) implementation-defined minimum and
// maximum timeout values.
//
// If this timeout is exceeded on an attempt to retrieve content from origin
// the client will receive DEADLINE_EXCEEDED in [FetchBlobResponse.status].
google.protobuf.Duration timeout = 2;
// The oldest content the client is willing to accept, as measured from the
// time it was Push'd or when the underlying retrieval from origin was
// started.
// Upon retries of Fetch requests that cannot be completed within a single
// RPC, clients *SHOULD* provide the same value for subsequent requests as the
// original, to simplify combining the request with the previous attempt.
//
// If unset, the client *SHOULD* accept content of any age.
google.protobuf.Timestamp oldest_content_accepted = 3;
// The URI(s) of the content to fetch. These may be resources that the server
// can directly fetch from origin, in which case multiple URIs *SHOULD*
// represent the same content available at different locations (such as an
// origin and secondary mirrors). These may also be URIs for content known to
// the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push]
// service.
//
// Clients *MUST* supply at least one URI. Servers *MAY* match any one of the
// supplied URIs.
repeated string uris = 4;
// Qualifiers sub-specifying the content to fetch - see comments on
// [Qualifier][build.bazel.remote.asset.v1.Qualifier].
// The same qualifiers apply to all URIs.
//
// Specified qualifier names *MUST* be unique.
repeated Qualifier qualifiers = 5;
}
// A response message for
// [Fetch.FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob].
message FetchBlobResponse {
// If the status has a code other than `OK`, it indicates that the operation
// was unable to be completed for reasons outside the servers' control.
// The possible fetch errors include:
// * `DEADLINE_EXCEEDED`: The operation could not be completed within the
// specified timeout.
// * `NOT_FOUND`: The requested asset was not found at the specified location.
// * `PERMISSION_DENIED`: The request was rejected by a remote server, or
// requested an asset from a disallowed origin.
// * `ABORTED`: The operation could not be completed, typically due to a
// failed consistency check.
google.rpc.Status status = 1;
// The uri from the request that resulted in a successful retrieval, or from
// which the error indicated in `status` was obtained.
string uri = 2;
// Any qualifiers known to the server and of interest to clients.
repeated Qualifier qualifiers = 3;
// A minimum timestamp the content is expected to be available through.
// Servers *MAY* omit this field, if not known with confidence.
google.protobuf.Timestamp expires_at = 4;
// The result of the fetch, if the status had code `OK`.
// The digest of the file's contents, available for download through the CAS.
build.bazel.remote.execution.v2.Digest blob_digest = 5;
}
// A request message for
// [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory].
message FetchDirectoryRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The timeout for the underlying fetch, if content needs to be retrieved from
// origin. This value is allowed to exceed the RPC deadline, in which case the
// server *SHOULD* keep the fetch going after the RPC completes, to be made
// available for future Fetch calls.
//
// If this timeout is exceeded on an attempt to retrieve content from origin
// the client will receive DEADLINE_EXCEEDED in [FetchDirectoryResponse.status].
google.protobuf.Duration timeout = 2;
// The oldest content the client is willing to accept, as measured from the
// time it was Push'd or when the underlying retrieval from origin was
// started.
// Upon retries of Fetch requests that cannot be completed within a single
// RPC, clients *SHOULD* provide the same value for subsequent requests as the
// original, to simplify combining the request with the previous attempt.
//
// If unset, the client *SHOULD* accept content of any age.
google.protobuf.Timestamp oldest_content_accepted = 3;
// The URI(s) of the content to fetch. These may be resources that the server
// can directly fetch from origin, in which case multiple URIs *SHOULD*
// represent the same content available at different locations (such as an
// origin and secondary mirrors). These may also be URIs for content known to
// the server through other mechanisms, e.g. pushed via the [Push][build.bazel.remote.asset.v1.Push]
// service.
//
// Clients *MUST* supply at least one URI. Servers *MAY* match any one of the
// supplied URIs.
repeated string uris = 4;
// Qualifiers sub-specifying the content to fetch - see comments on
// [Qualifier][build.bazel.remote.asset.v1.Qualifier].
// The same qualifiers apply to all URIs.
//
// Specified qualifier names *MUST* be unique.
repeated Qualifier qualifiers = 5;
}
// A response message for
// [Fetch.FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory].
message FetchDirectoryResponse {
// If the status has a code other than `OK`, it indicates that the operation
// was unable to be completed for reasons outside the servers' control.
// The possible fetch errors include:
// * `DEADLINE_EXCEEDED`: The operation could not be completed within the
// specified timeout.
// * `NOT_FOUND`: The requested asset was not found at the specified location.
// * `PERMISSION_DENIED`: The request was rejected by a remote server, or
// requested an asset from a disallowed origin.
// * `ABORTED`: The operation could not be completed, typically due to a
// failed consistency check.
google.rpc.Status status = 1;
// The uri from the request that resulted in a successful retrieval, or from
// which the error indicated in `status` was obtained.
string uri = 2;
// Any qualifiers known to the server and of interest to clients.
repeated Qualifier qualifiers = 3;
// A minimum timestamp the content is expected to be available through.
// Servers *MAY* omit this field, if not known with confidence.
google.protobuf.Timestamp expires_at = 4;
// The result of the fetch, if the status had code `OK`.
// the root digest of a directory tree, suitable for fetching via
// [ContentAddressableStorage.GetTree].
build.bazel.remote.execution.v2.Digest root_directory_digest = 5;
}
// The Push service is complementary to the Fetch, and allows for
// associating contents of URLs to be returned in future Fetch API calls.
//
// As with other services in the Remote Execution API, any call may return an
// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
// information about when the client should retry the request; clients SHOULD
// respect the information provided.
service Push {
// These APIs associate the identifying information of a resource, as
// indicated by URI and optionally Qualifiers, with content available in the
// CAS. For example, associating a repository url and a commit id with a
// Directory Digest.
//
// Servers *SHOULD* only allow trusted clients to associate content, and *MAY*
// only allow certain URIs to be pushed.
//
// Clients *MUST* ensure associated content is available in CAS prior to
// pushing.
//
// Clients *MUST* ensure the Qualifiers listed correctly match the contents,
// and Servers *MAY* trust these values without validation.
// Fetch servers *MAY* require exact match of all qualifiers when returning
// content previously pushed, or allow fetching content with only a subset of
// the qualifiers specified on Push.
//
// Clients can specify expiration information that the server *SHOULD*
// respect. Subsequent requests can be used to alter the expiration time.
//
// A minimal compliant Fetch implementation may support only Push'd content
// and return `NOT_FOUND` for any resource that was not pushed first.
// Alternatively, a compliant implementation may choose to not support Push
// and only return resources that can be Fetch'd from origin.
//
// Errors will be returned as gRPC Status errors.
// The possible RPC errors include:
// * `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid.
// * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
// perform the requested operation. The client may retry after a delay.
// * `UNAVAILABLE`: Due to a transient condition the operation could not be
// completed. The client should retry.
// * `INTERNAL`: An internal error occurred while performing the operation.
// The client should retry.
rpc PushBlob(PushBlobRequest) returns (PushBlobResponse) {
option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushBlob" body: "*" };
}
rpc PushDirectory(PushDirectoryRequest) returns (PushDirectoryResponse) {
option (google.api.http) = { post: "/v1/{instance_name=**}/assets:pushDirectory" body: "*" };
}
}
// A request message for
// [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob].
message PushBlobRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The URI(s) of the content to associate. If multiple URIs are specified, the
// pushed content will be available to fetch by specifying any of them.
repeated string uris = 2;
// Qualifiers sub-specifying the content that is being pushed - see comments
// on [Qualifier][build.bazel.remote.asset.v1.Qualifier].
// The same qualifiers apply to all URIs.
repeated Qualifier qualifiers = 3;
// A time after which this content should stop being returned via [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob].
// Servers *MAY* expire content early, e.g. due to storage pressure.
google.protobuf.Timestamp expire_at = 4;
// The blob to associate.
build.bazel.remote.execution.v2.Digest blob_digest = 5;
// Referenced blobs or directories that need to not expire before expiration
// of this association, in addition to `blob_digest` itself.
// These fields are hints - clients *MAY* omit them, and servers *SHOULD*
// respect them, at the risk of increased incidents of Fetch responses
// indirectly referencing unavailable blobs.
repeated build.bazel.remote.execution.v2.Digest references_blobs = 6;
repeated build.bazel.remote.execution.v2.Digest references_directories = 7;
}
// A response message for
// [Push.PushBlob][build.bazel.remote.asset.v1.Push.PushBlob].
message PushBlobResponse { /* empty */ }
// A request message for
// [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory].
message PushDirectoryRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The URI(s) of the content to associate. If multiple URIs are specified, the
// pushed content will be available to fetch by specifying any of them.
repeated string uris = 2;
// Qualifiers sub-specifying the content that is being pushed - see comments
// on [Qualifier][build.bazel.remote.asset.v1.Qualifier].
// The same qualifiers apply to all URIs.
repeated Qualifier qualifiers = 3;
// A time after which this content should stop being returned via
// [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory].
// Servers *MAY* expire content early, e.g. due to storage pressure.
google.protobuf.Timestamp expire_at = 4;
// Directory to associate
build.bazel.remote.execution.v2.Digest root_directory_digest = 5;
// Referenced blobs or directories that need to not expire before expiration
// of this association, in addition to `root_directory_digest` itself.
// These fields are hints - clients *MAY* omit them, and servers *SHOULD*
// respect them, at the risk of increased incidents of Fetch responses
// indirectly referencing unavailable blobs.
repeated build.bazel.remote.execution.v2.Digest references_blobs = 6;
repeated build.bazel.remote.execution.v2.Digest references_directories = 7;
}
// A response message for
// [Push.PushDirectory][build.bazel.remote.asset.v1.Push.PushDirectory].
message PushDirectoryResponse { /* empty */ }
buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2.py 0000664 0000000 0000000 00000025337 14375152700 0031023 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: build/bazel/remote/asset/v1/remote_asset.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n.build/bazel/remote/asset/v1/remote_asset.proto\x12\x1b\x62uild.bazel.remote.asset.v1\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"(\n\tQualifier\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xdc\x01\n\x10\x46\x65tchBlobRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12*\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x17oldest_content_accepted\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04uris\x18\x04 \x03(\t\x12:\n\nqualifiers\x18\x05 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\"\xee\x01\n\x11\x46\x65tchBlobResponse\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12.\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12<\n\x0b\x62lob_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe1\x01\n\x15\x46\x65tchDirectoryRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12*\n\x07timeout\x18\x02 \x01(\x0b\x32\x19.google.protobuf.Duration\x12;\n\x17oldest_content_accepted\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04uris\x18\x04 \x03(\t\x12:\n\nqualifiers\x18\x05 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\"\xfd\x01\n\x16\x46\x65tchDirectoryResponse\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x0b\n\x03uri\x18\x02 \x01(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12.\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x15root_directory_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xeb\x02\n\x0fPushBlobRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04uris\x18\x02 \x03(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12-\n\texpire_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12<\n\x0b\x62lob_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x41\n\x10references_blobs\x18\x06 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12G\n\x16references_directories\x18\x07 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x12\n\x10PushBlobResponse\"\xfa\x02\n\x14PushDirectoryRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04uris\x18\x02 \x03(\t\x12:\n\nqualifiers\x18\x03 \x03(\x0b\x32&.build.bazel.remote.asset.v1.Qualifier\x12-\n\texpire_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x46\n\x15root_directory_digest\x18\x05 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x41\n\x10references_blobs\x18\x06 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12G\n\x16references_directories\x18\x07 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x17\n\x15PushDirectoryResponse2\xdd\x02\n\x05\x46\x65tch\x12\x9e\x01\n\tFetchBlob\x12-.build.bazel.remote.asset.v1.FetchBlobRequest\x1a..build.bazel.remote.asset.v1.FetchBlobResponse\"2\x82\xd3\xe4\x93\x02,\"\'/v1/{instance_name=**}/assets:fetchBlob:\x01*\x12\xb2\x01\n\x0e\x46\x65tchDirectory\x12\x32.build.bazel.remote.asset.v1.FetchDirectoryRequest\x1a\x33.build.bazel.remote.asset.v1.FetchDirectoryResponse\"7\x82\xd3\xe4\x93\x02\x31\",/v1/{instance_name=**}/assets:fetchDirectory:\x01*2\xd4\x02\n\x04Push\x12\x9a\x01\n\x08PushBlob\x12,.build.bazel.remote.asset.v1.PushBlobRequest\x1a-.build.bazel.remote.asset.v1.PushBlobResponse\"1\x82\xd3\xe4\x93\x02+\"&/v1/{instance_name=**}/assets:pushBlob:\x01*\x12\xae\x01\n\rPushDirectory\x12\x31.build.bazel.remote.asset.v1.PushDirectoryRequest\x1a\x32.build.bazel.remote.asset.v1.PushDirectoryResponse\"6\x82\xd3\xe4\x93\x02\x30\"+/v1/{instance_name=**}/assets:pushDirectory:\x01*Ba\n\x1b\x62uild.bazel.remote.asset.v1B\x10RemoteAssetProtoP\x01Z\x0bremoteasset\xa2\x02\x02RA\xaa\x02\x1b\x42uild.Bazel.Remote.Asset.v1b\x06proto3')
_QUALIFIER = DESCRIPTOR.message_types_by_name['Qualifier']
_FETCHBLOBREQUEST = DESCRIPTOR.message_types_by_name['FetchBlobRequest']
_FETCHBLOBRESPONSE = DESCRIPTOR.message_types_by_name['FetchBlobResponse']
_FETCHDIRECTORYREQUEST = DESCRIPTOR.message_types_by_name['FetchDirectoryRequest']
_FETCHDIRECTORYRESPONSE = DESCRIPTOR.message_types_by_name['FetchDirectoryResponse']
_PUSHBLOBREQUEST = DESCRIPTOR.message_types_by_name['PushBlobRequest']
_PUSHBLOBRESPONSE = DESCRIPTOR.message_types_by_name['PushBlobResponse']
_PUSHDIRECTORYREQUEST = DESCRIPTOR.message_types_by_name['PushDirectoryRequest']
_PUSHDIRECTORYRESPONSE = DESCRIPTOR.message_types_by_name['PushDirectoryResponse']
Qualifier = _reflection.GeneratedProtocolMessageType('Qualifier', (_message.Message,), {
'DESCRIPTOR' : _QUALIFIER,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.Qualifier)
})
_sym_db.RegisterMessage(Qualifier)
FetchBlobRequest = _reflection.GeneratedProtocolMessageType('FetchBlobRequest', (_message.Message,), {
'DESCRIPTOR' : _FETCHBLOBREQUEST,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchBlobRequest)
})
_sym_db.RegisterMessage(FetchBlobRequest)
FetchBlobResponse = _reflection.GeneratedProtocolMessageType('FetchBlobResponse', (_message.Message,), {
'DESCRIPTOR' : _FETCHBLOBRESPONSE,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchBlobResponse)
})
_sym_db.RegisterMessage(FetchBlobResponse)
FetchDirectoryRequest = _reflection.GeneratedProtocolMessageType('FetchDirectoryRequest', (_message.Message,), {
'DESCRIPTOR' : _FETCHDIRECTORYREQUEST,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchDirectoryRequest)
})
_sym_db.RegisterMessage(FetchDirectoryRequest)
FetchDirectoryResponse = _reflection.GeneratedProtocolMessageType('FetchDirectoryResponse', (_message.Message,), {
'DESCRIPTOR' : _FETCHDIRECTORYRESPONSE,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.FetchDirectoryResponse)
})
_sym_db.RegisterMessage(FetchDirectoryResponse)
PushBlobRequest = _reflection.GeneratedProtocolMessageType('PushBlobRequest', (_message.Message,), {
'DESCRIPTOR' : _PUSHBLOBREQUEST,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushBlobRequest)
})
_sym_db.RegisterMessage(PushBlobRequest)
PushBlobResponse = _reflection.GeneratedProtocolMessageType('PushBlobResponse', (_message.Message,), {
'DESCRIPTOR' : _PUSHBLOBRESPONSE,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushBlobResponse)
})
_sym_db.RegisterMessage(PushBlobResponse)
PushDirectoryRequest = _reflection.GeneratedProtocolMessageType('PushDirectoryRequest', (_message.Message,), {
'DESCRIPTOR' : _PUSHDIRECTORYREQUEST,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushDirectoryRequest)
})
_sym_db.RegisterMessage(PushDirectoryRequest)
PushDirectoryResponse = _reflection.GeneratedProtocolMessageType('PushDirectoryResponse', (_message.Message,), {
'DESCRIPTOR' : _PUSHDIRECTORYRESPONSE,
'__module__' : 'build.bazel.remote.asset.v1.remote_asset_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.asset.v1.PushDirectoryResponse)
})
_sym_db.RegisterMessage(PushDirectoryResponse)
_FETCH = DESCRIPTOR.services_by_name['Fetch']
_PUSH = DESCRIPTOR.services_by_name['Push']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\033build.bazel.remote.asset.v1B\020RemoteAssetProtoP\001Z\013remoteasset\242\002\002RA\252\002\033Build.Bazel.Remote.Asset.v1'
_FETCH.methods_by_name['FetchBlob']._options = None
_FETCH.methods_by_name['FetchBlob']._serialized_options = b'\202\323\344\223\002,\"\'/v1/{instance_name=**}/assets:fetchBlob:\001*'
_FETCH.methods_by_name['FetchDirectory']._options = None
_FETCH.methods_by_name['FetchDirectory']._serialized_options = b'\202\323\344\223\0021\",/v1/{instance_name=**}/assets:fetchDirectory:\001*'
_PUSH.methods_by_name['PushBlob']._options = None
_PUSH.methods_by_name['PushBlob']._serialized_options = b'\202\323\344\223\002+\"&/v1/{instance_name=**}/assets:pushBlob:\001*'
_PUSH.methods_by_name['PushDirectory']._options = None
_PUSH.methods_by_name['PushDirectory']._serialized_options = b'\202\323\344\223\0020\"+/v1/{instance_name=**}/assets:pushDirectory:\001*'
_QUALIFIER._serialized_start=255
_QUALIFIER._serialized_end=295
_FETCHBLOBREQUEST._serialized_start=298
_FETCHBLOBREQUEST._serialized_end=518
_FETCHBLOBRESPONSE._serialized_start=521
_FETCHBLOBRESPONSE._serialized_end=759
_FETCHDIRECTORYREQUEST._serialized_start=762
_FETCHDIRECTORYREQUEST._serialized_end=987
_FETCHDIRECTORYRESPONSE._serialized_start=990
_FETCHDIRECTORYRESPONSE._serialized_end=1243
_PUSHBLOBREQUEST._serialized_start=1246
_PUSHBLOBREQUEST._serialized_end=1609
_PUSHBLOBRESPONSE._serialized_start=1611
_PUSHBLOBRESPONSE._serialized_end=1629
_PUSHDIRECTORYREQUEST._serialized_start=1632
_PUSHDIRECTORYREQUEST._serialized_end=2010
_PUSHDIRECTORYRESPONSE._serialized_start=2012
_PUSHDIRECTORYRESPONSE._serialized_end=2035
_FETCH._serialized_start=2038
_FETCH._serialized_end=2387
_PUSH._serialized_start=2390
_PUSH._serialized_end=2730
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/build/bazel/remote/asset/v1/remote_asset_pb2_grpc.py 0000664 0000000 0000000 00000040617 14375152700 0032034 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from buildstream._protos.build.bazel.remote.asset.v1 import remote_asset_pb2 as build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2
class FetchStub(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.FetchBlob = channel.unary_unary(
'/build.bazel.remote.asset.v1.Fetch/FetchBlob',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString,
)
self.FetchDirectory = channel.unary_unary(
'/build.bazel.remote.asset.v1.Fetch/FetchDirectory',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString,
)
class FetchServicer(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def FetchBlob(self, request, context):
"""Resolve or fetch referenced assets, making them available to the caller and
other consumers in the [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
Servers *MAY* fetch content that they do not already have cached, for any
URLs they support.
Servers *SHOULD* ensure that referenced files are present in the CAS at the
time of the response, and (if supported) that they will remain available
for a reasonable period of time. The TTLs of the referenced blobs *SHOULD*
be increased if necessary and applicable.
In the event that a client receives a reference to content that is no
longer present, it *MAY* re-issue the request with
`oldest_content_accepted` set to a more recent timestamp than the original
attempt, to induce a re-fetch from origin.
Servers *MAY* cache fetched content and reuse it for subsequent requests,
subject to `oldest_content_accepted`.
Servers *MAY* support the complementary [Push][build.bazel.remote.asset.v1.Push]
API and allow content to be directly inserted for use in future fetch
responses.
Servers *MUST* ensure Fetch'd content matches all the specified
qualifiers except in the case of previously Push'd resources, for which
the server *MAY* trust the pushing client to have set the qualifiers
correctly, without validation.
Servers not implementing the complementary [Push][build.bazel.remote.asset.v1.Push]
API *MUST* reject requests containing qualifiers it does not support.
Servers *MAY* transform assets as part of the fetch. For example a
tarball fetched by [FetchDirectory][build.bazel.remote.asset.v1.Fetch.FetchDirectory]
might be unpacked, or a Git repository
fetched by [FetchBlob][build.bazel.remote.asset.v1.Fetch.FetchBlob]
might be passed through `git-archive`.
Errors handling the requested assets will be returned as gRPC Status errors
here; errors outside the server's control will be returned inline in the
`status` field of the response (see comment there for details).
The possible RPC errors include:
* `INVALID_ARGUMENT`: One or more arguments were invalid, such as a
qualifier that is not supported by the server.
* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
perform the requested operation. The client may retry after a delay.
* `UNAVAILABLE`: Due to a transient condition the operation could not be
completed. The client should retry.
* `INTERNAL`: An internal error occurred while performing the operation.
The client should retry.
* `DEADLINE_EXCEEDED`: The fetch could not be completed within the given
RPC deadline. The client should retry for at least as long as the value
provided in `timeout` field of the request.
In the case of unsupported qualifiers, the server *SHOULD* additionally
send a [BadRequest][google.rpc.BadRequest] error detail where, for each
unsupported qualifier, there is a `FieldViolation` with a `field` of
`qualifiers.name` and a `description` of `"{qualifier}" not supported`
indicating the name of the unsupported qualifier.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def FetchDirectory(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FetchServicer_to_server(servicer, server):
rpc_method_handlers = {
'FetchBlob': grpc.unary_unary_rpc_method_handler(
servicer.FetchBlob,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.SerializeToString,
),
'FetchDirectory': grpc.unary_unary_rpc_method_handler(
servicer.FetchDirectory,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.asset.v1.Fetch', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Fetch(object):
"""The Fetch service resolves or fetches assets referenced by URI and
Qualifiers, returning a Digest for the content in
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
def FetchBlob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchBlob',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchBlobResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def FetchDirectory(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Fetch/FetchDirectory',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.FetchDirectoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class PushStub(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.PushBlob = channel.unary_unary(
'/build.bazel.remote.asset.v1.Push/PushBlob',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString,
)
self.PushDirectory = channel.unary_unary(
'/build.bazel.remote.asset.v1.Push/PushDirectory',
request_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString,
)
class PushServicer(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def PushBlob(self, request, context):
"""These APIs associate the identifying information of a resource, as
indicated by URI and optionally Qualifiers, with content available in the
CAS. For example, associating a repository url and a commit id with a
Directory Digest.
Servers *SHOULD* only allow trusted clients to associate content, and *MAY*
only allow certain URIs to be pushed.
Clients *MUST* ensure associated content is available in CAS prior to
pushing.
Clients *MUST* ensure the Qualifiers listed correctly match the contents,
and Servers *MAY* trust these values without validation.
Fetch servers *MAY* require exact match of all qualifiers when returning
content previously pushed, or allow fetching content with only a subset of
the qualifiers specified on Push.
Clients can specify expiration information that the server *SHOULD*
respect. Subsequent requests can be used to alter the expiration time.
A minimal compliant Fetch implementation may support only Push'd content
and return `NOT_FOUND` for any resource that was not pushed first.
Alternatively, a compliant implementation may choose to not support Push
and only return resources that can be Fetch'd from origin.
Errors will be returned as gRPC Status errors.
The possible RPC errors include:
* `INVALID_ARGUMENT`: One or more arguments to the RPC were invalid.
* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to
perform the requested operation. The client may retry after a delay.
* `UNAVAILABLE`: Due to a transient condition the operation could not be
completed. The client should retry.
* `INTERNAL`: An internal error occurred while performing the operation.
The client should retry.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PushDirectory(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PushServicer_to_server(servicer, server):
rpc_method_handlers = {
'PushBlob': grpc.unary_unary_rpc_method_handler(
servicer.PushBlob,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.SerializeToString,
),
'PushDirectory': grpc.unary_unary_rpc_method_handler(
servicer.PushDirectory,
request_deserializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.asset.v1.Push', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Push(object):
"""The Push service is complementary to the Fetch, and allows for
associating contents of URLs to be returned in future Fetch API calls.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
def PushBlob(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushBlob',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushBlobResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def PushDirectory(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.asset.v1.Push/PushDirectory',
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_asset_dot_v1_dot_remote__asset__pb2.PushDirectoryResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/ 0000775 0000000 0000000 00000000000 14375152700 0025540 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0027637 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/ 0000775 0000000 0000000 00000000000 14375152700 0026067 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0030166 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution.proto 0000664 0000000 0000000 00000161214 14375152700 0032537 0 ustar 00root root 0000000 0000000 // Copyright 2018 The Bazel Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package build.bazel.remote.execution.v2;
import "build/bazel/semver/semver.proto";
import "google/api/annotations.proto";
import "google/longrunning/operations.proto";
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
import "google/rpc/status.proto";
option csharp_namespace = "Build.Bazel.Remote.Execution.V2";
option go_package = "remoteexecution";
option java_multiple_files = true;
option java_outer_classname = "RemoteExecutionProto";
option java_package = "build.bazel.remote.execution.v2";
option objc_class_prefix = "REX";
// The Remote Execution API is used to execute an
// [Action][build.bazel.remote.execution.v2.Action] on the remote
// workers.
//
// As with other services in the Remote Execution API, any call may return an
// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
// information about when the client should retry the request; clients SHOULD
// respect the information provided.
service Execution {
// Execute an action remotely.
//
// In order to execute an action, the client must first upload all of the
// inputs, the
// [Command][build.bazel.remote.execution.v2.Command] to run, and the
// [Action][build.bazel.remote.execution.v2.Action] into the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
// It then calls `Execute` with an `action_digest` referring to them. The
// server will run the action and eventually return the result.
//
// The input `Action`'s fields MUST meet the various canonicalization
// requirements specified in the documentation for their types so that it has
// the same digest as other logically equivalent `Action`s. The server MAY
// enforce the requirements and return errors if a non-canonical input is
// received. It MAY also proceed without verifying some or all of the
// requirements, such as for performance reasons. If the server does not
// verify the requirement, then it will treat the `Action` as distinct from
// another logically equivalent action if they hash differently.
//
// Returns a stream of
// [google.longrunning.Operation][google.longrunning.Operation] messages
// describing the resulting execution, with eventual `response`
// [ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
// `metadata` on the operation is of type
// [ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
//
// If the client remains connected after the first response is returned after
// the server, then updates are streamed as if the client had called
// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
// until the execution completes or the request reaches an error. The
// operation can also be queried using [Operations
// API][google.longrunning.Operations.GetOperation].
//
// The server NEED NOT implement other methods or functionality of the
// Operations API.
//
// Errors discovered during creation of the `Operation` will be reported
// as gRPC Status errors, while errors that occurred while running the
// action will be reported in the `status` field of the `ExecuteResponse`. The
// server MUST NOT set the `error` field of the `Operation` proto.
// The possible errors include:
// * `INVALID_ARGUMENT`: One or more arguments are invalid.
// * `FAILED_PRECONDITION`: One or more errors occurred in setting up the
// action requested, such as a missing input or command or no worker being
// available. The client may be able to fix the errors and retry.
// * `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
// the action.
// * `UNAVAILABLE`: Due to a transient condition, such as all workers being
// occupied (and the server does not support a queue), the action could not
// be started. The client should retry.
// * `INTERNAL`: An internal error occurred in the execution engine or the
// worker.
// * `DEADLINE_EXCEEDED`: The execution timed out.
//
// In the case of a missing input or command, the server SHOULD additionally
// send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
// where, for each requested blob not present in the CAS, there is a
// `Violation` with a `type` of `MISSING` and a `subject` of
// `"blobs/{hash}/{size}"` indicating the digest of the missing blob.
rpc Execute(ExecuteRequest) returns (stream google.longrunning.Operation) {
option (google.api.http) = { post: "/v2/{instance_name=**}/actions:execute" body: "*" };
}
// Wait for an execution operation to complete. When the client initially
// makes the request, the server immediately responds with the current status
// of the execution. The server will leave the request stream open until the
// operation completes, and then respond with the completed operation. The
// server MAY choose to stream additional updates as execution progresses,
// such as to provide an update as to the state of the execution.
rpc WaitExecution(WaitExecutionRequest) returns (stream google.longrunning.Operation) {
option (google.api.http) = { post: "/v2/{name=operations/**}:waitExecution" body: "*" };
}
}
// The action cache API is used to query whether a given action has already been
// performed and, if so, retrieve its result. Unlike the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
// which addresses blobs by their own content, the action cache addresses the
// [ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
// digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
// which produced them.
//
// The lifetime of entries in the action cache is implementation-specific, but
// the server SHOULD assume that more recently used entries are more likely to
// be used again. Additionally, action cache implementations SHOULD ensure that
// any blobs referenced in the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
// are still valid when returning a result.
//
// As with other services in the Remote Execution API, any call may return an
// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
// information about when the client should retry the request; clients SHOULD
// respect the information provided.
service ActionCache {
// Retrieve a cached execution result.
//
// Errors:
// * `NOT_FOUND`: The requested `ActionResult` is not in the cache.
rpc GetActionResult(GetActionResultRequest) returns (ActionResult) {
option (google.api.http) = { get: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" };
}
// Upload a new execution result.
//
// This method is intended for servers which implement the distributed cache
// independently of the
// [Execution][build.bazel.remote.execution.v2.Execution] API. As a
// result, it is OPTIONAL for servers to implement.
//
// In order to allow the server to perform access control based on the type of
// action, and to assist with client debugging, the client MUST first upload
// the [Action][build.bazel.remote.execution.v2.Execution] that produced the
// result, along with its
// [Command][build.bazel.remote.execution.v2.Command], into the
// `ContentAddressableStorage`.
//
// Errors:
// * `NOT_IMPLEMENTED`: This method is not supported by the server.
// * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
// entry to the cache.
rpc UpdateActionResult(UpdateActionResultRequest) returns (ActionResult) {
option (google.api.http) = { put: "/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}" body: "action_result" };
}
}
// The CAS (content-addressable storage) is used to store the inputs to and
// outputs from the execution service. Each piece of content is addressed by the
// digest of its binary data.
//
// Most of the binary data stored in the CAS is opaque to the execution engine,
// and is only used as a communication medium. In order to build an
// [Action][build.bazel.remote.execution.v2.Action],
// however, the client will need to also upload the
// [Command][build.bazel.remote.execution.v2.Command] and input root
// [Directory][build.bazel.remote.execution.v2.Directory] for the Action.
// The Command and Directory messages must be marshalled to wire format and then
// uploaded under the hash as with any other piece of content. In practice, the
// input root directory is likely to refer to other Directories in its
// hierarchy, which must also each be uploaded on their own.
//
// For small file uploads the client should group them together and call
// [BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
// on chunks of no more than 10 MiB. For large uploads, the client must use the
// [Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
// `resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
// where `instance_name` is as described in the next paragraph, `uuid` is a
// version 4 UUID generated by the client, and `hash` and `size` are the
// [Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
// `uuid` is used only to avoid collisions when multiple clients try to upload
// the same file (or the same client tries to upload the file multiple times at
// once on different threads), so the client MAY reuse the `uuid` for uploading
// different blobs. The `resource_name` may optionally have a trailing filename
// (or other metadata) for a client to use if it is storing URLs, as in
// `{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
// after the `size` is ignored.
//
// A single server MAY support multiple instances of the execution system, each
// with their own workers, storage, cache, etc. The exact relationship between
// instances is up to the server. If the server does, then the `instance_name`
// is an identifier, possibly containing multiple path segments, used to
// distinguish between the various instances on the server, in a manner defined
// by the server. For servers which do not support multiple instances, then the
// `instance_name` is the empty path and the leading slash is omitted, so that
// the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
//
// When attempting an upload, if another client has already completed the upload
// (which may occur in the middle of a single upload if another client uploads
// the same blob concurrently), the request will terminate immediately with
// a response whose `committed_size` is the full size of the uploaded file
// (regardless of how much data was transmitted by the client). If the client
// completes the upload but the
// [Digest][build.bazel.remote.execution.v2.Digest] does not match, an
// `INVALID_ARGUMENT` error will be returned. In either case, the client should
// not attempt to retry the upload.
//
// For downloading blobs, the client must use the
// [Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
// a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
// `instance_name` is the instance name (see above), and `hash` and `size` are
// the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
//
// The lifetime of entries in the CAS is implementation specific, but it SHOULD
// be long enough to allow for newly-added and recently looked-up entries to be
// used in subsequent calls (e.g. to
// [Execute][build.bazel.remote.execution.v2.Execution.Execute]).
//
// As with other services in the Remote Execution API, any call may return an
// error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
// information about when the client should retry the request; clients SHOULD
// respect the information provided.
service ContentAddressableStorage {
// Determine if blobs are present in the CAS.
//
// Clients can use this API before uploading blobs to determine which ones are
// already present in the CAS and do not need to be uploaded again.
//
// There are no method-specific errors.
rpc FindMissingBlobs(FindMissingBlobsRequest) returns (FindMissingBlobsResponse) {
option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:findMissing" body: "*" };
}
// Upload many blobs at once.
//
// The server may enforce a limit of the combined total size of blobs
// to be uploaded using this API. This limit may be obtained using the
// [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
// Requests exceeding the limit should either be split into smaller
// chunks or uploaded using the
// [ByteStream API][google.bytestream.ByteStream], as appropriate.
//
// This request is equivalent to calling a Bytestream `Write` request
// on each individual blob, in parallel. The requests may succeed or fail
// independently.
//
// Errors:
// * `INVALID_ARGUMENT`: The client attempted to upload more than the
// server supported limit.
//
// Individual requests may return the following errors, additionally:
// * `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
// * `INVALID_ARGUMENT`: The
// [Digest][build.bazel.remote.execution.v2.Digest] does not match the
// provided data.
rpc BatchUpdateBlobs(BatchUpdateBlobsRequest) returns (BatchUpdateBlobsResponse) {
option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchUpdate" body: "*" };
}
// Download many blobs at once.
//
// The server may enforce a limit of the combined total size of blobs
// to be downloaded using this API. This limit may be obtained using the
// [Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
// Requests exceeding the limit should either be split into smaller
// chunks or downloaded using the
// [ByteStream API][google.bytestream.ByteStream], as appropriate.
//
// This request is equivalent to calling a Bytestream `Read` request
// on each individual blob, in parallel. The requests may succeed or fail
// independently.
//
// Errors:
// * `INVALID_ARGUMENT`: The client attempted to read more than the
// server supported limit.
//
// Every error on individual read will be returned in the corresponding digest
// status.
rpc BatchReadBlobs(BatchReadBlobsRequest) returns (BatchReadBlobsResponse) {
option (google.api.http) = { post: "/v2/{instance_name=**}/blobs:batchRead" body: "*" };
}
// Fetch the entire directory tree rooted at a node.
//
// This request must be targeted at a
// [Directory][build.bazel.remote.execution.v2.Directory] stored in the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
// (CAS). The server will enumerate the `Directory` tree recursively and
// return every node descended from the root.
//
// The GetTreeRequest.page_token parameter can be used to skip ahead in
// the stream (e.g. when retrying a partially completed and aborted request),
// by setting it to a value taken from GetTreeResponse.next_page_token of the
// last successfully processed GetTreeResponse).
//
// The exact traversal order is unspecified and, unless retrieving subsequent
// pages from an earlier request, is not guaranteed to be stable across
// multiple invocations of `GetTree`.
//
// If part of the tree is missing from the CAS, the server will return the
// portion present and omit the rest.
//
// * `NOT_FOUND`: The requested tree root is not present in the CAS.
rpc GetTree(GetTreeRequest) returns (stream GetTreeResponse) {
option (google.api.http) = { get: "/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree" };
}
}
// The Capabilities service may be used by remote execution clients to query
// various server properties, in order to self-configure or return meaningful
// error messages.
//
// The query may include a particular `instance_name`, in which case the values
// returned will pertain to that instance.
service Capabilities {
// GetCapabilities returns the server capabilities configuration.
rpc GetCapabilities(GetCapabilitiesRequest) returns (ServerCapabilities) {
option (google.api.http) = {
get: "/v2/{instance_name=**}/capabilities"
};
}
}
// An `Action` captures all the information about an execution which is required
// to reproduce it.
//
// `Action`s are the core component of the [Execution] service. A single
// `Action` represents a repeatable action that can be performed by the
// execution service. `Action`s can be succinctly identified by the digest of
// their wire format encoding and, once an `Action` has been executed, will be
// cached in the action cache. Future requests can then use the cached result
// rather than needing to run afresh.
//
// When a server completes execution of an
// [Action][build.bazel.remote.execution.v2.Action], it MAY choose to
// cache the [result][build.bazel.remote.execution.v2.ActionResult] in
// the [ActionCache][build.bazel.remote.execution.v2.ActionCache] unless
// `do_not_cache` is `true`. Clients SHOULD expect the server to do so. By
// default, future calls to
// [Execute][build.bazel.remote.execution.v2.Execution.Execute] the same
// `Action` will also serve their results from the cache. Clients must take care
// to understand the caching behaviour. Ideally, all `Action`s will be
// reproducible so that serving a result from cache is always desirable and
// correct.
message Action {
// The digest of the [Command][build.bazel.remote.execution.v2.Command]
// to run, which MUST be present in the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
Digest command_digest = 1;
// The digest of the root
// [Directory][build.bazel.remote.execution.v2.Directory] for the input
// files. The files in the directory tree are available in the correct
// location on the build machine before the command is executed. The root
// directory, as well as every subdirectory and content blob referred to, MUST
// be in the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
Digest input_root_digest = 2;
reserved 3 to 5; // Used for fields moved to [Command][build.bazel.remote.execution.v2.Command].
// A timeout after which the execution should be killed. If the timeout is
// absent, then the client is specifying that the execution should continue
// as long as the server will let it. The server SHOULD impose a timeout if
// the client does not specify one, however, if the client does specify a
// timeout that is longer than the server's maximum timeout, the server MUST
// reject the request.
//
// The timeout is a part of the
// [Action][build.bazel.remote.execution.v2.Action] message, and
// therefore two `Actions` with different timeouts are different, even if they
// are otherwise identical. This is because, if they were not, running an
// `Action` with a lower timeout than is required might result in a cache hit
// from an execution run with a longer timeout, hiding the fact that the
// timeout is too short. By encoding it directly in the `Action`, a lower
// timeout will result in a cache miss and the execution timeout will fail
// immediately, rather than whenever the cache entry gets evicted.
google.protobuf.Duration timeout = 6;
// If true, then the `Action`'s result cannot be cached.
bool do_not_cache = 7;
}
// A `Command` is the actual command executed by a worker running an
// [Action][build.bazel.remote.execution.v2.Action] and specifications of its
// environment.
//
// Except as otherwise required, the environment (such as which system
// libraries or binaries are available, and what filesystems are mounted where)
// is defined by and specific to the implementation of the remote execution API.
message Command {
// An `EnvironmentVariable` is one variable to set in the running program's
// environment.
message EnvironmentVariable {
// The variable name.
string name = 1;
// The variable value.
string value = 2;
}
// The arguments to the command. The first argument must be the path to the
// executable, which must be either a relative path, in which case it is
// evaluated with respect to the input root, or an absolute path.
repeated string arguments = 1;
// The environment variables to set when running the program. The worker may
// provide its own default environment variables; these defaults can be
// overridden using this field. Additional variables can also be specified.
//
// In order to ensure that equivalent `Command`s always hash to the same
// value, the environment variables MUST be lexicographically sorted by name.
// Sorting of strings is done by code point, equivalently, by the UTF-8 bytes.
repeated EnvironmentVariable environment_variables = 2;
// A list of the output files that the client expects to retrieve from the
// action. Only the listed files, as well as directories listed in
// `output_directories`, will be returned to the client as output.
// Other files that may be created during command execution are discarded.
//
// The paths are relative to the working directory of the action execution.
// The paths are specified using a single forward slash (`/`) as a path
// separator, even if the execution platform natively uses a different
// separator. The path MUST NOT include a trailing slash, nor a leading slash,
// being a relative path.
//
// In order to ensure consistent hashing of the same Action, the output paths
// MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
// bytes).
//
// An output file cannot be duplicated, be a parent of another output file, be
// a child of a listed output directory, or have the same path as any of the
// listed output directories.
repeated string output_files = 3;
// A list of the output directories that the client expects to retrieve from
// the action. Only the contents of the indicated directories (recursively
// including the contents of their subdirectories) will be
// returned, as well as files listed in `output_files`. Other files that may
// be created during command execution are discarded.
//
// The paths are relative to the working directory of the action execution.
// The paths are specified using a single forward slash (`/`) as a path
// separator, even if the execution platform natively uses a different
// separator. The path MUST NOT include a trailing slash, nor a leading slash,
// being a relative path. The special value of empty string is allowed,
// although not recommended, and can be used to capture the entire working
// directory tree, including inputs.
//
// In order to ensure consistent hashing of the same Action, the output paths
// MUST be sorted lexicographically by code point (or, equivalently, by UTF-8
// bytes).
//
// An output directory cannot be duplicated, be a parent of another output
// directory, be a parent of a listed output file, or have the same path as
// any of the listed output files.
repeated string output_directories = 4;
// The platform requirements for the execution environment. The server MAY
// choose to execute the action on any worker satisfying the requirements, so
// the client SHOULD ensure that running the action on any such worker will
// have the same result.
Platform platform = 5;
// The working directory, relative to the input root, for the command to run
// in. It must be a directory which exists in the input tree. If it is left
// empty, then the action is run in the input root.
string working_directory = 6;
}
// A `Platform` is a set of requirements, such as hardware, operating system, or
// compiler toolchain, for an
// [Action][build.bazel.remote.execution.v2.Action]'s execution
// environment. A `Platform` is represented as a series of key-value pairs
// representing the properties that are required of the platform.
message Platform {
// A single property for the environment. The server is responsible for
// specifying the property `name`s that it accepts. If an unknown `name` is
// provided in the requirements for an
// [Action][build.bazel.remote.execution.v2.Action], the server SHOULD
// reject the execution request. If permitted by the server, the same `name`
// may occur multiple times.
//
// The server is also responsible for specifying the interpretation of
// property `value`s. For instance, a property describing how much RAM must be
// available may be interpreted as allowing a worker with 16GB to fulfill a
// request for 8GB, while a property describing the OS environment on which
// the action must be performed may require an exact match with the worker's
// OS.
//
// The server MAY use the `value` of one or more properties to determine how
// it sets up the execution environment, such as by making specific system
// files available to the worker.
message Property {
// The property name.
string name = 1;
// The property value.
string value = 2;
}
// The properties that make up this platform. In order to ensure that
// equivalent `Platform`s always hash to the same value, the properties MUST
// be lexicographically sorted by name, and then by value. Sorting of strings
// is done by code point, equivalently, by the UTF-8 bytes.
repeated Property properties = 1;
}
// A `Directory` represents a directory node in a file tree, containing zero or
// more children [FileNodes][build.bazel.remote.execution.v2.FileNode],
// [DirectoryNodes][build.bazel.remote.execution.v2.DirectoryNode] and
// [SymlinkNodes][build.bazel.remote.execution.v2.SymlinkNode].
// Each `Node` contains its name in the directory, either the digest of its
// content (either a file blob or a `Directory` proto) or a symlink target, as
// well as possibly some metadata about the file or directory.
//
// In order to ensure that two equivalent directory trees hash to the same
// value, the following restrictions MUST be obeyed when constructing a
// a `Directory`:
// - Every child in the directory must have a path of exactly one segment.
// Multiple levels of directory hierarchy may not be collapsed.
// - Each child in the directory must have a unique path segment (file name).
// - The files, directories and symlinks in the directory must each be sorted
// in lexicographical order by path. The path strings must be sorted by code
// point, equivalently, by UTF-8 bytes.
//
// A `Directory` that obeys the restrictions is said to be in canonical form.
//
// As an example, the following could be used for a file named `bar` and a
// directory named `foo` with an executable file named `baz` (hashes shortened
// for readability):
//
// ```json
// // (Directory proto)
// {
// files: [
// {
// name: "bar",
// digest: {
// hash: "4a73bc9d03...",
// size: 65534
// }
// }
// ],
// directories: [
// {
// name: "foo",
// digest: {
// hash: "4cf2eda940...",
// size: 43
// }
// }
// ]
// }
//
// // (Directory proto with hash "4cf2eda940..." and size 43)
// {
// files: [
// {
// name: "baz",
// digest: {
// hash: "b2c941073e...",
// size: 1294,
// },
// is_executable: true
// }
// ]
// }
// ```
message Directory {
// The files in the directory.
repeated FileNode files = 1;
// The subdirectories in the directory.
repeated DirectoryNode directories = 2;
// The symlinks in the directory.
repeated SymlinkNode symlinks = 3;
}
// A `FileNode` represents a single file and associated metadata.
message FileNode {
// The name of the file.
string name = 1;
// The digest of the file's content.
Digest digest = 2;
reserved 3; // Reserved to ensure wire-compatibility with `OutputFile`.
// True if file is executable, false otherwise.
bool is_executable = 4;
}
// A `DirectoryNode` represents a child of a
// [Directory][build.bazel.remote.execution.v2.Directory] which is itself
// a `Directory` and its associated metadata.
message DirectoryNode {
// The name of the directory.
string name = 1;
// The digest of the
// [Directory][build.bazel.remote.execution.v2.Directory] object
// represented. See [Digest][build.bazel.remote.execution.v2.Digest]
// for information about how to take the digest of a proto message.
Digest digest = 2;
}
// A `SymlinkNode` represents a symbolic link.
message SymlinkNode {
// The name of the symlink.
string name = 1;
// The target path of the symlink. The path separator is a forward slash `/`.
// The target path can be relative to the parent directory of the symlink or
// it can be an absolute path starting with `/`. Support for absolute paths
// can be checked using the [Capabilities][build.bazel.remote.execution.v2.Capabilities]
// API. The canonical form forbids the substrings `/./` and `//` in the target
// path. `..` components are allowed anywhere in the target path.
string target = 2;
}
// A content digest. A digest for a given blob consists of the size of the blob
// and its hash. The hash algorithm to use is defined by the server, but servers
// SHOULD use SHA-256.
//
// The size is considered to be an integral part of the digest and cannot be
// separated. That is, even if the `hash` field is correctly specified but
// `size_bytes` is not, the server MUST reject the request.
//
// The reason for including the size in the digest is as follows: in a great
// many cases, the server needs to know the size of the blob it is about to work
// with prior to starting an operation with it, such as flattening Merkle tree
// structures or streaming it to a worker. Technically, the server could
// implement a separate metadata store, but this results in a significantly more
// complicated implementation as opposed to having the client specify the size
// up-front (or storing the size along with the digest in every message where
// digests are embedded). This does mean that the API leaks some implementation
// details of (what we consider to be) a reasonable server implementation, but
// we consider this to be a worthwhile tradeoff.
//
// When a `Digest` is used to refer to a proto message, it always refers to the
// message in binary encoded form. To ensure consistent hashing, clients and
// servers MUST ensure that they serialize messages according to the following
// rules, even if there are alternate valid encodings for the same message.
// - Fields are serialized in tag order.
// - There are no unknown fields.
// - There are no duplicate fields.
// - Fields are serialized according to the default semantics for their type.
//
// Most protocol buffer implementations will always follow these rules when
// serializing, but care should be taken to avoid shortcuts. For instance,
// concatenating two messages to merge them may produce duplicate fields.
message Digest {
// The hash. In the case of SHA-256, it will always be a lowercase hex string
// exactly 64 characters long.
string hash = 1;
// The size of the blob, in bytes.
int64 size_bytes = 2;
}
// ExecutedActionMetadata contains details about a completed execution.
message ExecutedActionMetadata {
// The name of the worker which ran the execution.
string worker = 1;
// When was the action added to the queue.
google.protobuf.Timestamp queued_timestamp = 2;
// When the worker received the action.
google.protobuf.Timestamp worker_start_timestamp = 3;
// When the worker completed the action, including all stages.
google.protobuf.Timestamp worker_completed_timestamp = 4;
// When the worker started fetching action inputs.
google.protobuf.Timestamp input_fetch_start_timestamp = 5;
// When the worker finished fetching action inputs.
google.protobuf.Timestamp input_fetch_completed_timestamp = 6;
// When the worker started executing the action command.
google.protobuf.Timestamp execution_start_timestamp = 7;
// When the worker completed executing the action command.
google.protobuf.Timestamp execution_completed_timestamp = 8;
// When the worker started uploading action outputs.
google.protobuf.Timestamp output_upload_start_timestamp = 9;
// When the worker finished uploading action outputs.
google.protobuf.Timestamp output_upload_completed_timestamp = 10;
}
// An ActionResult represents the result of an
// [Action][build.bazel.remote.execution.v2.Action] being run.
message ActionResult {
reserved 1; // Reserved for use as the resource name.
// The output files of the action. For each output file requested in the
// `output_files` field of the Action, if the corresponding file existed after
// the action completed, a single entry will be present in the output list.
//
// If the action does not produce the requested output, or produces a
// directory where a regular file is expected or vice versa, then that output
// will be omitted from the list. The server is free to arrange the output
// list as desired; clients MUST NOT assume that the output list is sorted.
repeated OutputFile output_files = 2;
// The output directories of the action. For each output directory requested
// in the `output_directories` field of the Action, if the corresponding
// directory existed after the action completed, a single entry will be
// present in the output list, which will contain the digest of a
// [Tree][build.bazel.remote.execution.v2.Tree] message containing the
// directory tree, and the path equal exactly to the corresponding Action
// output_directories member.
//
// As an example, suppose the Action had an output directory `a/b/dir` and the
// execution produced the following contents in `a/b/dir`: a file named `bar`
// and a directory named `foo` with an executable file named `baz`. Then,
// output_directory will contain (hashes shortened for readability):
//
// ```json
// // OutputDirectory proto:
// {
// path: "a/b/dir"
// tree_digest: {
// hash: "4a73bc9d03...",
// size: 55
// }
// }
// // Tree proto with hash "4a73bc9d03..." and size 55:
// {
// root: {
// files: [
// {
// name: "bar",
// digest: {
// hash: "4a73bc9d03...",
// size: 65534
// }
// }
// ],
// directories: [
// {
// name: "foo",
// digest: {
// hash: "4cf2eda940...",
// size: 43
// }
// }
// ]
// }
// children : {
// // (Directory proto with hash "4cf2eda940..." and size 43)
// files: [
// {
// name: "baz",
// digest: {
// hash: "b2c941073e...",
// size: 1294,
// },
// is_executable: true
// }
// ]
// }
// }
// ```
repeated OutputDirectory output_directories = 3;
// The exit code of the command.
int32 exit_code = 4;
// The standard output buffer of the action. The server will determine, based
// on the size of the buffer, whether to return it in raw form or to return
// a digest in `stdout_digest` that points to the buffer. If neither is set,
// then the buffer is empty. The client SHOULD NOT assume it will get one of
// the raw buffer or a digest on any given request and should be prepared to
// handle either.
bytes stdout_raw = 5;
// The digest for a blob containing the standard output of the action, which
// can be retrieved from the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
// See `stdout_raw` for when this will be set.
Digest stdout_digest = 6;
// The standard error buffer of the action. The server will determine, based
// on the size of the buffer, whether to return it in raw form or to return
// a digest in `stderr_digest` that points to the buffer. If neither is set,
// then the buffer is empty. The client SHOULD NOT assume it will get one of
// the raw buffer or a digest on any given request and should be prepared to
// handle either.
bytes stderr_raw = 7;
// The digest for a blob containing the standard error of the action, which
// can be retrieved from the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
// See `stderr_raw` for when this will be set.
Digest stderr_digest = 8;
// The details of the execution that originally produced this result.
ExecutedActionMetadata execution_metadata = 9;
}
// An `OutputFile` is similar to a
// [FileNode][build.bazel.remote.execution.v2.FileNode], but it is used as an
// output in an `ActionResult`. It allows a full file path rather than
// only a name.
//
// `OutputFile` is binary-compatible with `FileNode`.
message OutputFile {
// The full path of the file relative to the input root, including the
// filename. The path separator is a forward slash `/`. Since this is a
// relative path, it MUST NOT begin with a leading forward slash.
string path = 1;
// The digest of the file's content.
Digest digest = 2;
reserved 3; // Used for a removed field in an earlier version of the API.
// True if file is executable, false otherwise.
bool is_executable = 4;
}
// A `Tree` contains all the
// [Directory][build.bazel.remote.execution.v2.Directory] protos in a
// single directory Merkle tree, compressed into one message.
message Tree {
// The root directory in the tree.
Directory root = 1;
// All the child directories: the directories referred to by the root and,
// recursively, all its children. In order to reconstruct the directory tree,
// the client must take the digests of each of the child directories and then
// build up a tree starting from the `root`.
repeated Directory children = 2;
}
// An `OutputDirectory` is the output in an `ActionResult` corresponding to a
// directory's full contents rather than a single file.
message OutputDirectory {
// The full path of the directory relative to the working directory. The path
// separator is a forward slash `/`. Since this is a relative path, it MUST
// NOT begin with a leading forward slash. The empty string value is allowed,
// and it denotes the entire working directory.
string path = 1;
reserved 2; // Used for a removed field in an earlier version of the API.
// The digest of the encoded
// [Tree][build.bazel.remote.execution.v2.Tree] proto containing the
// directory's contents.
Digest tree_digest = 3;
}
// An `ExecutionPolicy` can be used to control the scheduling of the action.
message ExecutionPolicy {
// The priority (relative importance) of this action. Generally, a lower value
// means that the action should be run sooner than actions having a greater
// priority value, but the interpretation of a given value is server-
// dependent. A priority of 0 means the *default* priority. Priorities may be
// positive or negative, and such actions should run later or sooner than
// actions having the default priority, respectively. The particular semantics
// of this field is up to the server. In particular, every server will have
// their own supported range of priorities, and will decide how these map into
// scheduling policy.
int32 priority = 1;
}
// A `ResultsCachePolicy` is used for fine-grained control over how action
// outputs are stored in the CAS and Action Cache.
message ResultsCachePolicy {
// The priority (relative importance) of this content in the overall cache.
// Generally, a lower value means a longer retention time or other advantage,
// but the interpretation of a given value is server-dependent. A priority of
// 0 means a *default* value, decided by the server.
//
// The particular semantics of this field is up to the server. In particular,
// every server will have their own supported range of priorities, and will
// decide how these map into retention/eviction policy.
int32 priority = 1;
}
// A request message for
// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute].
message ExecuteRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// If true, the action will be executed anew even if its result was already
// present in the cache. If false, the result may be served from the
// [ActionCache][build.bazel.remote.execution.v2.ActionCache].
bool skip_cache_lookup = 3;
reserved 2, 4, 5; // Used for removed fields in an earlier version of the API.
// The digest of the [Action][build.bazel.remote.execution.v2.Action] to
// execute.
Digest action_digest = 6;
// An optional policy for execution of the action.
// The server will have a default policy if this is not provided.
ExecutionPolicy execution_policy = 7;
// An optional policy for the results of this execution in the remote cache.
// The server will have a default policy if this is not provided.
// This may be applied to both the ActionResult and the associated blobs.
ResultsCachePolicy results_cache_policy = 8;
}
// A `LogFile` is a log stored in the CAS.
message LogFile {
// The digest of the log contents.
Digest digest = 1;
// This is a hint as to the purpose of the log, and is set to true if the log
// is human-readable text that can be usefully displayed to a user, and false
// otherwise. For instance, if a command-line client wishes to print the
// server logs to the terminal for a failed action, this allows it to avoid
// displaying a binary file.
bool human_readable = 2;
}
// The response message for
// [Execution.Execute][build.bazel.remote.execution.v2.Execution.Execute],
// which will be contained in the [response
// field][google.longrunning.Operation.response] of the
// [Operation][google.longrunning.Operation].
message ExecuteResponse {
// The result of the action.
ActionResult result = 1;
// True if the result was served from cache, false if it was executed.
bool cached_result = 2;
// If the status has a code other than `OK`, it indicates that the action did
// not finish execution. For example, if the operation times out during
// execution, the status will have a `DEADLINE_EXCEEDED` code. Servers MUST
// use this field for errors in execution, rather than the error field on the
// `Operation` object.
//
// If the status code is other than `OK`, then the result MUST NOT be cached.
// For an error status, the `result` field is optional; the server may
// populate the output-, stdout-, and stderr-related fields if it has any
// information available, such as the stdout and stderr of a timed-out action.
google.rpc.Status status = 3;
// An optional list of additional log outputs the server wishes to provide. A
// server can use this to return execution-specific logs however it wishes.
// This is intended primarily to make it easier for users to debug issues that
// may be outside of the actual job execution, such as by identifying the
// worker executing the action or by providing logs from the worker's setup
// phase. The keys SHOULD be human readable so that a client can display them
// to a user.
map server_logs = 4;
}
// Metadata about an ongoing
// [execution][build.bazel.remote.execution.v2.Execution.Execute], which
// will be contained in the [metadata
// field][google.longrunning.Operation.response] of the
// [Operation][google.longrunning.Operation].
message ExecuteOperationMetadata {
// The current stage of execution.
enum Stage {
UNKNOWN = 0;
// Checking the result against the cache.
CACHE_CHECK = 1;
// Currently idle, awaiting a free machine to execute.
QUEUED = 2;
// Currently being executed by a worker.
EXECUTING = 3;
// Finished execution.
COMPLETED = 4;
}
Stage stage = 1;
// The digest of the [Action][build.bazel.remote.execution.v2.Action]
// being executed.
Digest action_digest = 2;
// If set, the client can use this name with
// [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
// standard output.
string stdout_stream_name = 3;
// If set, the client can use this name with
// [ByteStream.Read][google.bytestream.ByteStream.Read] to stream the
// standard error.
string stderr_stream_name = 4;
}
// A request message for
// [WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution].
message WaitExecutionRequest {
// The name of the [Operation][google.longrunning.operations.v1.Operation]
// returned by [Execute][build.bazel.remote.execution.v2.Execution.Execute].
string name = 1;
}
// A request message for
// [ActionCache.GetActionResult][build.bazel.remote.execution.v2.ActionCache.GetActionResult].
message GetActionResultRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The digest of the [Action][build.bazel.remote.execution.v2.Action]
// whose result is requested.
Digest action_digest = 2;
}
// A request message for
// [ActionCache.UpdateActionResult][build.bazel.remote.execution.v2.ActionCache.UpdateActionResult].
message UpdateActionResultRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The digest of the [Action][build.bazel.remote.execution.v2.Action]
// whose result is being uploaded.
Digest action_digest = 2;
// The [ActionResult][build.bazel.remote.execution.v2.ActionResult]
// to store in the cache.
ActionResult action_result = 3;
// An optional policy for the results of this execution in the remote cache.
// The server will have a default policy if this is not provided.
// This may be applied to both the ActionResult and the associated blobs.
ResultsCachePolicy results_cache_policy = 4;
}
// A request message for
// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
message FindMissingBlobsRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// A list of the blobs to check.
repeated Digest blob_digests = 2;
}
// A response message for
// [ContentAddressableStorage.FindMissingBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.FindMissingBlobs].
message FindMissingBlobsResponse {
// A list of the blobs requested *not* present in the storage.
repeated Digest missing_blob_digests = 2;
}
// A request message for
// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
message BatchUpdateBlobsRequest {
// A request corresponding to a single blob that the client wants to upload.
message Request {
// The digest of the blob. This MUST be the digest of `data`.
Digest digest = 1;
// The raw binary data.
bytes data = 2;
}
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The individual upload requests.
repeated Request requests = 2;
}
// A response message for
// [ContentAddressableStorage.BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs].
message BatchUpdateBlobsResponse {
// A response corresponding to a single blob that the client tried to upload.
message Response {
// The blob digest to which this response corresponds.
Digest digest = 1;
// The result of attempting to upload that blob.
google.rpc.Status status = 2;
}
// The responses to the requests.
repeated Response responses = 1;
}
// A request message for
// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
message BatchReadBlobsRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The individual blob digests.
repeated Digest digests = 2;
}
// A response message for
// [ContentAddressableStorage.BatchReadBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchReadBlobs].
message BatchReadBlobsResponse {
// A response corresponding to a single blob that the client tried to upload.
message Response {
// The digest to which this response corresponds.
Digest digest = 1;
// The raw binary data.
bytes data = 2;
// The result of attempting to download that blob.
google.rpc.Status status = 3;
}
// The responses to the requests.
repeated Response responses = 1;
}
// A request message for
// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
message GetTreeRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The digest of the root, which must be an encoded
// [Directory][build.bazel.remote.execution.v2.Directory] message
// stored in the
// [ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
Digest root_digest = 2;
// A maximum page size to request. If present, the server will request no more
// than this many items. Regardless of whether a page size is specified, the
// server may place its own limit on the number of items to be returned and
// require the client to retrieve more items using a subsequent request.
int32 page_size = 3;
// A page token, which must be a value received in a previous
// [GetTreeResponse][build.bazel.remote.execution.v2.GetTreeResponse].
// If present, the server will use it to return the following page of results.
string page_token = 4;
}
// A response message for
// [ContentAddressableStorage.GetTree][build.bazel.remote.execution.v2.ContentAddressableStorage.GetTree].
message GetTreeResponse {
// The directories descended from the requested root.
repeated Directory directories = 1;
// If present, signifies that there are more results which the client can
// retrieve by passing this as the page_token in a subsequent
// [request][build.bazel.remote.execution.v2.GetTreeRequest].
// If empty, signifies that this is the last page of results.
string next_page_token = 2;
}
// A request message for
// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
message GetCapabilitiesRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
}
// A response message for
// [Capabilities.GetCapabilities][google.devtools.remoteexecution.v2.Capabilities.GetCapabilities].
message ServerCapabilities {
// Capabilities of the remote cache system.
CacheCapabilities cache_capabilities = 1;
// Capabilities of the remote execution system.
ExecutionCapabilities execution_capabilities = 2;
// Earliest RE API version supported, including deprecated versions.
build.bazel.semver.SemVer deprecated_api_version = 3;
// Earliest non-deprecated RE API version supported.
build.bazel.semver.SemVer low_api_version = 4;
// Latest RE API version supported.
build.bazel.semver.SemVer high_api_version = 5;
}
// The digest function used for converting values into keys for CAS and Action
// Cache.
enum DigestFunction {
UNKNOWN = 0;
SHA256 = 1;
SHA1 = 2;
MD5 = 3;
}
// Describes the server/instance capabilities for updating the action cache.
message ActionCacheUpdateCapabilities {
bool update_enabled = 1;
}
// Allowed values for priority in
// [ResultsCachePolicy][google.devtools.remoteexecution.v2.ResultsCachePolicy]
// Used for querying both cache and execution valid priority ranges.
message PriorityCapabilities {
// Supported range of priorities, including boundaries.
message PriorityRange {
int32 min_priority = 1;
int32 max_priority = 2;
}
repeated PriorityRange priorities = 1;
}
// Capabilities of the remote cache system.
message CacheCapabilities {
// Describes how the server treats absolute symlink targets.
enum SymlinkAbsolutePathStrategy {
UNKNOWN = 0;
// Server will return an INVALID_ARGUMENT on input symlinks with absolute targets.
// If an action tries to create an output symlink with an absolute target, a
// FAILED_PRECONDITION will be returned.
DISALLOWED = 1;
// Server will allow symlink targets to escape the input root tree, possibly
// resulting in non-hermetic builds.
ALLOWED = 2;
}
// All the digest functions supported by the remote cache.
// Remote cache may support multiple digest functions simultaneously.
repeated DigestFunction digest_function = 1;
// Capabilities for updating the action cache.
ActionCacheUpdateCapabilities action_cache_update_capabilities = 2;
// Supported cache priority range for both CAS and ActionCache.
PriorityCapabilities cache_priority_capabilities = 3;
// Maximum total size of blobs to be uploaded/downloaded using
// batch methods. A value of 0 means no limit is set, although
// in practice there will always be a message size limitation
// of the protocol in use, e.g. GRPC.
int64 max_batch_total_size_bytes = 4;
// Whether absolute symlink targets are supported.
SymlinkAbsolutePathStrategy symlink_absolute_path_strategy = 5;
}
// Capabilities of the remote execution system.
message ExecutionCapabilities {
// Remote execution may only support a single digest function.
DigestFunction digest_function = 1;
// Whether remote execution is enabled for the particular server/instance.
bool exec_enabled = 2;
// Supported execution priority range.
PriorityCapabilities execution_priority_capabilities = 3;
}
// Details for the tool used to call the API.
message ToolDetails {
// Name of the tool, e.g. bazel.
string tool_name = 1;
// Version of the tool used for the request, e.g. 5.0.3.
string tool_version = 2;
}
// An optional Metadata to attach to any RPC request to tell the server about an
// external context of the request. The server may use this for logging or other
// purposes. To use it, the client attaches the header to the call using the
// canonical proto serialization:
// name: build.bazel.remote.execution.v2.requestmetadata-bin
// contents: the base64 encoded binary RequestMetadata message.
message RequestMetadata {
// The details for the tool invoking the requests.
ToolDetails tool_details = 1;
// An identifier that ties multiple requests to the same action.
// For example, multiple requests to the CAS, Action Cache, and Execution
// API are used in order to compile foo.cc.
string action_id = 2;
// An identifier that ties multiple actions together to a final result.
// For example, multiple actions are required to build and run foo_test.
string tool_invocation_id = 3;
// An identifier to tie multiple tool invocations together. For example,
// runs of foo_test, bar_test and baz_test on a post-submit of a given patch.
string correlated_invocations_id = 4;
}
buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2.py 0000664 0000000 0000000 00000121610 14375152700 0032563 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: build/bazel/remote/execution/v2/remote_execution.proto
"""Generated protocol buffer code."""
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from buildstream._protos.build.bazel.semver import semver_pb2 as build_dot_bazel_dot_semver_dot_semver__pb2
from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2
from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n6build/bazel/remote/execution/v2/remote_execution.proto\x12\x1f\x62uild.bazel.remote.execution.v2\x1a\x1f\x62uild/bazel/semver/semver.proto\x1a\x1cgoogle/api/annotations.proto\x1a#google/longrunning/operations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17google/rpc/status.proto\"\xd5\x01\n\x06\x41\x63tion\x12?\n\x0e\x63ommand_digest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x42\n\x11input_root_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12*\n\x07timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\x12\x14\n\x0c\x64o_not_cache\x18\x07 \x01(\x08J\x04\x08\x03\x10\x06\"\xb7\x02\n\x07\x43ommand\x12\x11\n\targuments\x18\x01 \x03(\t\x12[\n\x15\x65nvironment_variables\x18\x02 \x03(\x0b\x32<.build.bazel.remote.execution.v2.Command.EnvironmentVariable\x12\x14\n\x0coutput_files\x18\x03 \x03(\t\x12\x1a\n\x12output_directories\x18\x04 \x03(\t\x12;\n\x08platform\x18\x05 \x01(\x0b\x32).build.bazel.remote.execution.v2.Platform\x12\x19\n\x11working_directory\x18\x06 \x01(\t\x1a\x32\n\x13\x45nvironmentVariable\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"{\n\x08Platform\x12\x46\n\nproperties\x18\x01 \x03(\x0b\x32\x32.build.bazel.remote.execution.v2.Platform.Property\x1a\'\n\x08Property\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\xca\x01\n\tDirectory\x12\x38\n\x05\x66iles\x18\x01 \x03(\x0b\x32).build.bazel.remote.execution.v2.FileNode\x12\x43\n\x0b\x64irectories\x18\x02 \x03(\x0b\x32..build.bazel.remote.execution.v2.DirectoryNode\x12>\n\x08symlinks\x18\x03 \x03(\x0b\x32,.build.bazel.remote.execution.v2.SymlinkNode\"n\n\x08\x46ileNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"V\n\rDirectoryNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"+\n\x0bSymlinkNode\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\"*\n\x06\x44igest\x12\x0c\n\x04hash\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\"\xec\x04\n\x16\x45xecutedActionMetadata\x12\x0e\n\x06worker\x18\x01 \x01(\t\x12\x34\n\x10queued_timestamp\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12:\n\x16worker_start_timestamp\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12>\n\x1aworker_completed_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12?\n\x1binput_fetch_start_timestamp\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x43\n\x1finput_fetch_completed_timestamp\x18\x06 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12=\n\x19\x65xecution_start_timestamp\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1d\x65xecution_completed_timestamp\x18\x08 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x41\n\x1doutput_upload_start_timestamp\x18\t \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x45\n!output_upload_completed_timestamp\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"\xb5\x03\n\x0c\x41\x63tionResult\x12\x41\n\x0coutput_files\x18\x02 \x03(\x0b\x32+.build.bazel.remote.execution.v2.OutputFile\x12L\n\x12output_directories\x18\x03 \x03(\x0b\x32\x30.build.bazel.remote.execution.v2.OutputDirectory\x12\x11\n\texit_code\x18\x04 \x01(\x05\x12\x12\n\nstdout_raw\x18\x05 \x01(\x0c\x12>\n\rstdout_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x12\n\nstderr_raw\x18\x07 \x01(\x0c\x12>\n\rstderr_digest\x18\x08 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12S\n\x12\x65xecution_metadata\x18\t \x01(\x0b\x32\x37.build.bazel.remote.execution.v2.ExecutedActionMetadataJ\x04\x08\x01\x10\x02\"p\n\nOutputFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x37\n\x06\x64igest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x15\n\ris_executable\x18\x04 \x01(\x08J\x04\x08\x03\x10\x04\"~\n\x04Tree\x12\x38\n\x04root\x18\x01 \x01(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12<\n\x08\x63hildren\x18\x02 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\"c\n\x0fOutputDirectory\x12\x0c\n\x04path\x18\x01 \x01(\t\x12<\n\x0btree_digest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.DigestJ\x04\x08\x02\x10\x03\"#\n\x0f\x45xecutionPolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"&\n\x12ResultsCachePolicy\x12\x10\n\x08priority\x18\x01 \x01(\x05\"\xb3\x02\n\x0e\x45xecuteRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x19\n\x11skip_cache_lookup\x18\x03 \x01(\x08\x12>\n\raction_digest\x18\x06 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12J\n\x10\x65xecution_policy\x18\x07 \x01(\x0b\x32\x30.build.bazel.remote.execution.v2.ExecutionPolicy\x12Q\n\x14results_cache_policy\x18\x08 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicyJ\x04\x08\x02\x10\x03J\x04\x08\x04\x10\x05J\x04\x08\x05\x10\x06\"Z\n\x07LogFile\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x16\n\x0ehuman_readable\x18\x02 \x01(\x08\"\xbf\x02\n\x0f\x45xecuteResponse\x12=\n\x06result\x18\x01 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12\x15\n\rcached_result\x18\x02 \x01(\x08\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12U\n\x0bserver_logs\x18\x04 \x03(\x0b\x32@.build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry\x1a[\n\x0fServerLogsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.build.bazel.remote.execution.v2.LogFile:\x02\x38\x01\"\xb3\x02\n\x18\x45xecuteOperationMetadata\x12N\n\x05stage\x18\x01 \x01(\x0e\x32?.build.bazel.remote.execution.v2.ExecuteOperationMetadata.Stage\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x1a\n\x12stdout_stream_name\x18\x03 \x01(\t\x12\x1a\n\x12stderr_stream_name\x18\x04 \x01(\t\"O\n\x05Stage\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0f\n\x0b\x43\x41\x43HE_CHECK\x10\x01\x12\n\n\x06QUEUED\x10\x02\x12\r\n\tEXECUTING\x10\x03\x12\r\n\tCOMPLETED\x10\x04\"$\n\x14WaitExecutionRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"o\n\x16GetActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x8b\x02\n\x19UpdateActionResultRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12>\n\raction_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x44\n\raction_result\x18\x03 \x01(\x0b\x32-.build.bazel.remote.execution.v2.ActionResult\x12Q\n\x14results_cache_policy\x18\x04 \x01(\x0b\x32\x33.build.bazel.remote.execution.v2.ResultsCachePolicy\"o\n\x17\x46indMissingBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12=\n\x0c\x62lob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"a\n\x18\x46indMissingBlobsResponse\x12\x45\n\x14missing_blob_digests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xd6\x01\n\x17\x42\x61tchUpdateBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12R\n\x08requests\x18\x02 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request\x1aP\n\x07Request\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"\xda\x01\n\x18\x42\x61tchUpdateBlobsResponse\x12U\n\tresponses\x18\x01 \x03(\x0b\x32\x42.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response\x1ag\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"h\n\x15\x42\x61tchReadBlobsRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x38\n\x07\x64igests\x18\x02 \x03(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\xe4\x01\n\x16\x42\x61tchReadBlobsResponse\x12S\n\tresponses\x18\x01 \x03(\x0b\x32@.build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response\x1au\n\x08Response\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\"\n\x06status\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\"\x8c\x01\n\x0eGetTreeRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12<\n\x0broot_digest\x18\x02 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"k\n\x0fGetTreeResponse\x12?\n\x0b\x64irectories\x18\x01 \x03(\x0b\x32*.build.bazel.remote.execution.v2.Directory\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"/\n\x16GetCapabilitiesRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\xe3\x02\n\x12ServerCapabilities\x12N\n\x12\x63\x61\x63he_capabilities\x18\x01 \x01(\x0b\x32\x32.build.bazel.remote.execution.v2.CacheCapabilities\x12V\n\x16\x65xecution_capabilities\x18\x02 \x01(\x0b\x32\x36.build.bazel.remote.execution.v2.ExecutionCapabilities\x12:\n\x16\x64\x65precated_api_version\x18\x03 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x33\n\x0flow_api_version\x18\x04 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\x12\x34\n\x10high_api_version\x18\x05 \x01(\x0b\x32\x1a.build.bazel.semver.SemVer\"7\n\x1d\x41\x63tionCacheUpdateCapabilities\x12\x16\n\x0eupdate_enabled\x18\x01 \x01(\x08\"\xac\x01\n\x14PriorityCapabilities\x12W\n\npriorities\x18\x01 \x03(\x0b\x32\x43.build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange\x1a;\n\rPriorityRange\x12\x14\n\x0cmin_priority\x18\x01 \x01(\x05\x12\x14\n\x0cmax_priority\x18\x02 \x01(\x05\"\x88\x04\n\x11\x43\x61\x63heCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x03(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12h\n action_cache_update_capabilities\x18\x02 \x01(\x0b\x32>.build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities\x12Z\n\x1b\x63\x61\x63he_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\x12\"\n\x1amax_batch_total_size_bytes\x18\x04 \x01(\x03\x12v\n\x1esymlink_absolute_path_strategy\x18\x05 \x01(\x0e\x32N.build.bazel.remote.execution.v2.CacheCapabilities.SymlinkAbsolutePathStrategy\"G\n\x1bSymlinkAbsolutePathStrategy\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x0e\n\nDISALLOWED\x10\x01\x12\x0b\n\x07\x41LLOWED\x10\x02\"\xd7\x01\n\x15\x45xecutionCapabilities\x12H\n\x0f\x64igest_function\x18\x01 \x01(\x0e\x32/.build.bazel.remote.execution.v2.DigestFunction\x12\x14\n\x0c\x65xec_enabled\x18\x02 \x01(\x08\x12^\n\x1f\x65xecution_priority_capabilities\x18\x03 \x01(\x0b\x32\x35.build.bazel.remote.execution.v2.PriorityCapabilities\"6\n\x0bToolDetails\x12\x11\n\ttool_name\x18\x01 \x01(\t\x12\x14\n\x0ctool_version\x18\x02 \x01(\t\"\xa7\x01\n\x0fRequestMetadata\x12\x42\n\x0ctool_details\x18\x01 \x01(\x0b\x32,.build.bazel.remote.execution.v2.ToolDetails\x12\x11\n\taction_id\x18\x02 \x01(\t\x12\x1a\n\x12tool_invocation_id\x18\x03 \x01(\t\x12!\n\x19\x63orrelated_invocations_id\x18\x04 \x01(\t*<\n\x0e\x44igestFunction\x12\x0b\n\x07UNKNOWN\x10\x00\x12\n\n\x06SHA256\x10\x01\x12\x08\n\x04SHA1\x10\x02\x12\x07\n\x03MD5\x10\x03\x32\xb9\x02\n\tExecution\x12\x8e\x01\n\x07\x45xecute\x12/.build.bazel.remote.execution.v2.ExecuteRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/actions:execute:\x01*0\x01\x12\x9a\x01\n\rWaitExecution\x12\x35.build.bazel.remote.execution.v2.WaitExecutionRequest\x1a\x1d.google.longrunning.Operation\"1\x82\xd3\xe4\x93\x02+\"&/v2/{name=operations/**}:waitExecution:\x01*0\x01\x32\xd6\x03\n\x0b\x41\x63tionCache\x12\xd7\x01\n\x0fGetActionResult\x12\x37.build.bazel.remote.execution.v2.GetActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"\\\x82\xd3\xe4\x93\x02V\x12T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}\x12\xec\x01\n\x12UpdateActionResult\x12:.build.bazel.remote.execution.v2.UpdateActionResultRequest\x1a-.build.bazel.remote.execution.v2.ActionResult\"k\x82\xd3\xe4\x93\x02\x65\x1aT/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result2\x9b\x06\n\x19\x43ontentAddressableStorage\x12\xbc\x01\n\x10\x46indMissingBlobs\x12\x38.build.bazel.remote.execution.v2.FindMissingBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.FindMissingBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:findMissing:\x01*\x12\xbc\x01\n\x10\x42\x61tchUpdateBlobs\x12\x38.build.bazel.remote.execution.v2.BatchUpdateBlobsRequest\x1a\x39.build.bazel.remote.execution.v2.BatchUpdateBlobsResponse\"3\x82\xd3\xe4\x93\x02-\"(/v2/{instance_name=**}/blobs:batchUpdate:\x01*\x12\xb4\x01\n\x0e\x42\x61tchReadBlobs\x12\x36.build.bazel.remote.execution.v2.BatchReadBlobsRequest\x1a\x37.build.bazel.remote.execution.v2.BatchReadBlobsResponse\"1\x82\xd3\xe4\x93\x02+\"&/v2/{instance_name=**}/blobs:batchRead:\x01*\x12\xc8\x01\n\x07GetTree\x12/.build.bazel.remote.execution.v2.GetTreeRequest\x1a\x30.build.bazel.remote.execution.v2.GetTreeResponse\"X\x82\xd3\xe4\x93\x02R\x12P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree0\x01\x32\xbd\x01\n\x0c\x43\x61pabilities\x12\xac\x01\n\x0fGetCapabilities\x12\x37.build.bazel.remote.execution.v2.GetCapabilitiesRequest\x1a\x33.build.bazel.remote.execution.v2.ServerCapabilities\"+\x82\xd3\xe4\x93\x02%\x12#/v2/{instance_name=**}/capabilitiesBr\n\x1f\x62uild.bazel.remote.execution.v2B\x14RemoteExecutionProtoP\x01Z\x0fremoteexecution\xa2\x02\x03REX\xaa\x02\x1f\x42uild.Bazel.Remote.Execution.V2b\x06proto3')
_DIGESTFUNCTION = DESCRIPTOR.enum_types_by_name['DigestFunction']
DigestFunction = enum_type_wrapper.EnumTypeWrapper(_DIGESTFUNCTION)
UNKNOWN = 0
SHA256 = 1
SHA1 = 2
MD5 = 3
_ACTION = DESCRIPTOR.message_types_by_name['Action']
_COMMAND = DESCRIPTOR.message_types_by_name['Command']
_COMMAND_ENVIRONMENTVARIABLE = _COMMAND.nested_types_by_name['EnvironmentVariable']
_PLATFORM = DESCRIPTOR.message_types_by_name['Platform']
_PLATFORM_PROPERTY = _PLATFORM.nested_types_by_name['Property']
_DIRECTORY = DESCRIPTOR.message_types_by_name['Directory']
_FILENODE = DESCRIPTOR.message_types_by_name['FileNode']
_DIRECTORYNODE = DESCRIPTOR.message_types_by_name['DirectoryNode']
_SYMLINKNODE = DESCRIPTOR.message_types_by_name['SymlinkNode']
_DIGEST = DESCRIPTOR.message_types_by_name['Digest']
_EXECUTEDACTIONMETADATA = DESCRIPTOR.message_types_by_name['ExecutedActionMetadata']
_ACTIONRESULT = DESCRIPTOR.message_types_by_name['ActionResult']
_OUTPUTFILE = DESCRIPTOR.message_types_by_name['OutputFile']
_TREE = DESCRIPTOR.message_types_by_name['Tree']
_OUTPUTDIRECTORY = DESCRIPTOR.message_types_by_name['OutputDirectory']
_EXECUTIONPOLICY = DESCRIPTOR.message_types_by_name['ExecutionPolicy']
_RESULTSCACHEPOLICY = DESCRIPTOR.message_types_by_name['ResultsCachePolicy']
_EXECUTEREQUEST = DESCRIPTOR.message_types_by_name['ExecuteRequest']
_LOGFILE = DESCRIPTOR.message_types_by_name['LogFile']
_EXECUTERESPONSE = DESCRIPTOR.message_types_by_name['ExecuteResponse']
_EXECUTERESPONSE_SERVERLOGSENTRY = _EXECUTERESPONSE.nested_types_by_name['ServerLogsEntry']
_EXECUTEOPERATIONMETADATA = DESCRIPTOR.message_types_by_name['ExecuteOperationMetadata']
_WAITEXECUTIONREQUEST = DESCRIPTOR.message_types_by_name['WaitExecutionRequest']
_GETACTIONRESULTREQUEST = DESCRIPTOR.message_types_by_name['GetActionResultRequest']
_UPDATEACTIONRESULTREQUEST = DESCRIPTOR.message_types_by_name['UpdateActionResultRequest']
_FINDMISSINGBLOBSREQUEST = DESCRIPTOR.message_types_by_name['FindMissingBlobsRequest']
_FINDMISSINGBLOBSRESPONSE = DESCRIPTOR.message_types_by_name['FindMissingBlobsResponse']
_BATCHUPDATEBLOBSREQUEST = DESCRIPTOR.message_types_by_name['BatchUpdateBlobsRequest']
_BATCHUPDATEBLOBSREQUEST_REQUEST = _BATCHUPDATEBLOBSREQUEST.nested_types_by_name['Request']
_BATCHUPDATEBLOBSRESPONSE = DESCRIPTOR.message_types_by_name['BatchUpdateBlobsResponse']
_BATCHUPDATEBLOBSRESPONSE_RESPONSE = _BATCHUPDATEBLOBSRESPONSE.nested_types_by_name['Response']
_BATCHREADBLOBSREQUEST = DESCRIPTOR.message_types_by_name['BatchReadBlobsRequest']
_BATCHREADBLOBSRESPONSE = DESCRIPTOR.message_types_by_name['BatchReadBlobsResponse']
_BATCHREADBLOBSRESPONSE_RESPONSE = _BATCHREADBLOBSRESPONSE.nested_types_by_name['Response']
_GETTREEREQUEST = DESCRIPTOR.message_types_by_name['GetTreeRequest']
_GETTREERESPONSE = DESCRIPTOR.message_types_by_name['GetTreeResponse']
_GETCAPABILITIESREQUEST = DESCRIPTOR.message_types_by_name['GetCapabilitiesRequest']
_SERVERCAPABILITIES = DESCRIPTOR.message_types_by_name['ServerCapabilities']
_ACTIONCACHEUPDATECAPABILITIES = DESCRIPTOR.message_types_by_name['ActionCacheUpdateCapabilities']
_PRIORITYCAPABILITIES = DESCRIPTOR.message_types_by_name['PriorityCapabilities']
_PRIORITYCAPABILITIES_PRIORITYRANGE = _PRIORITYCAPABILITIES.nested_types_by_name['PriorityRange']
_CACHECAPABILITIES = DESCRIPTOR.message_types_by_name['CacheCapabilities']
_EXECUTIONCAPABILITIES = DESCRIPTOR.message_types_by_name['ExecutionCapabilities']
_TOOLDETAILS = DESCRIPTOR.message_types_by_name['ToolDetails']
_REQUESTMETADATA = DESCRIPTOR.message_types_by_name['RequestMetadata']
_EXECUTEOPERATIONMETADATA_STAGE = _EXECUTEOPERATIONMETADATA.enum_types_by_name['Stage']
_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY = _CACHECAPABILITIES.enum_types_by_name['SymlinkAbsolutePathStrategy']
Action = _reflection.GeneratedProtocolMessageType('Action', (_message.Message,), {
'DESCRIPTOR' : _ACTION,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Action)
})
_sym_db.RegisterMessage(Action)
Command = _reflection.GeneratedProtocolMessageType('Command', (_message.Message,), {
'EnvironmentVariable' : _reflection.GeneratedProtocolMessageType('EnvironmentVariable', (_message.Message,), {
'DESCRIPTOR' : _COMMAND_ENVIRONMENTVARIABLE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command.EnvironmentVariable)
})
,
'DESCRIPTOR' : _COMMAND,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Command)
})
_sym_db.RegisterMessage(Command)
_sym_db.RegisterMessage(Command.EnvironmentVariable)
Platform = _reflection.GeneratedProtocolMessageType('Platform', (_message.Message,), {
'Property' : _reflection.GeneratedProtocolMessageType('Property', (_message.Message,), {
'DESCRIPTOR' : _PLATFORM_PROPERTY,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform.Property)
})
,
'DESCRIPTOR' : _PLATFORM,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Platform)
})
_sym_db.RegisterMessage(Platform)
_sym_db.RegisterMessage(Platform.Property)
Directory = _reflection.GeneratedProtocolMessageType('Directory', (_message.Message,), {
'DESCRIPTOR' : _DIRECTORY,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Directory)
})
_sym_db.RegisterMessage(Directory)
FileNode = _reflection.GeneratedProtocolMessageType('FileNode', (_message.Message,), {
'DESCRIPTOR' : _FILENODE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FileNode)
})
_sym_db.RegisterMessage(FileNode)
DirectoryNode = _reflection.GeneratedProtocolMessageType('DirectoryNode', (_message.Message,), {
'DESCRIPTOR' : _DIRECTORYNODE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.DirectoryNode)
})
_sym_db.RegisterMessage(DirectoryNode)
SymlinkNode = _reflection.GeneratedProtocolMessageType('SymlinkNode', (_message.Message,), {
'DESCRIPTOR' : _SYMLINKNODE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.SymlinkNode)
})
_sym_db.RegisterMessage(SymlinkNode)
Digest = _reflection.GeneratedProtocolMessageType('Digest', (_message.Message,), {
'DESCRIPTOR' : _DIGEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Digest)
})
_sym_db.RegisterMessage(Digest)
ExecutedActionMetadata = _reflection.GeneratedProtocolMessageType('ExecutedActionMetadata', (_message.Message,), {
'DESCRIPTOR' : _EXECUTEDACTIONMETADATA,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutedActionMetadata)
})
_sym_db.RegisterMessage(ExecutedActionMetadata)
ActionResult = _reflection.GeneratedProtocolMessageType('ActionResult', (_message.Message,), {
'DESCRIPTOR' : _ACTIONRESULT,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionResult)
})
_sym_db.RegisterMessage(ActionResult)
OutputFile = _reflection.GeneratedProtocolMessageType('OutputFile', (_message.Message,), {
'DESCRIPTOR' : _OUTPUTFILE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputFile)
})
_sym_db.RegisterMessage(OutputFile)
Tree = _reflection.GeneratedProtocolMessageType('Tree', (_message.Message,), {
'DESCRIPTOR' : _TREE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.Tree)
})
_sym_db.RegisterMessage(Tree)
OutputDirectory = _reflection.GeneratedProtocolMessageType('OutputDirectory', (_message.Message,), {
'DESCRIPTOR' : _OUTPUTDIRECTORY,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.OutputDirectory)
})
_sym_db.RegisterMessage(OutputDirectory)
ExecutionPolicy = _reflection.GeneratedProtocolMessageType('ExecutionPolicy', (_message.Message,), {
'DESCRIPTOR' : _EXECUTIONPOLICY,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionPolicy)
})
_sym_db.RegisterMessage(ExecutionPolicy)
ResultsCachePolicy = _reflection.GeneratedProtocolMessageType('ResultsCachePolicy', (_message.Message,), {
'DESCRIPTOR' : _RESULTSCACHEPOLICY,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ResultsCachePolicy)
})
_sym_db.RegisterMessage(ResultsCachePolicy)
ExecuteRequest = _reflection.GeneratedProtocolMessageType('ExecuteRequest', (_message.Message,), {
'DESCRIPTOR' : _EXECUTEREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteRequest)
})
_sym_db.RegisterMessage(ExecuteRequest)
LogFile = _reflection.GeneratedProtocolMessageType('LogFile', (_message.Message,), {
'DESCRIPTOR' : _LOGFILE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.LogFile)
})
_sym_db.RegisterMessage(LogFile)
ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), {
'ServerLogsEntry' : _reflection.GeneratedProtocolMessageType('ServerLogsEntry', (_message.Message,), {
'DESCRIPTOR' : _EXECUTERESPONSE_SERVERLOGSENTRY,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse.ServerLogsEntry)
})
,
'DESCRIPTOR' : _EXECUTERESPONSE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteResponse)
})
_sym_db.RegisterMessage(ExecuteResponse)
_sym_db.RegisterMessage(ExecuteResponse.ServerLogsEntry)
ExecuteOperationMetadata = _reflection.GeneratedProtocolMessageType('ExecuteOperationMetadata', (_message.Message,), {
'DESCRIPTOR' : _EXECUTEOPERATIONMETADATA,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecuteOperationMetadata)
})
_sym_db.RegisterMessage(ExecuteOperationMetadata)
WaitExecutionRequest = _reflection.GeneratedProtocolMessageType('WaitExecutionRequest', (_message.Message,), {
'DESCRIPTOR' : _WAITEXECUTIONREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.WaitExecutionRequest)
})
_sym_db.RegisterMessage(WaitExecutionRequest)
GetActionResultRequest = _reflection.GeneratedProtocolMessageType('GetActionResultRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACTIONRESULTREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetActionResultRequest)
})
_sym_db.RegisterMessage(GetActionResultRequest)
UpdateActionResultRequest = _reflection.GeneratedProtocolMessageType('UpdateActionResultRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEACTIONRESULTREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.UpdateActionResultRequest)
})
_sym_db.RegisterMessage(UpdateActionResultRequest)
FindMissingBlobsRequest = _reflection.GeneratedProtocolMessageType('FindMissingBlobsRequest', (_message.Message,), {
'DESCRIPTOR' : _FINDMISSINGBLOBSREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsRequest)
})
_sym_db.RegisterMessage(FindMissingBlobsRequest)
FindMissingBlobsResponse = _reflection.GeneratedProtocolMessageType('FindMissingBlobsResponse', (_message.Message,), {
'DESCRIPTOR' : _FINDMISSINGBLOBSRESPONSE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.FindMissingBlobsResponse)
})
_sym_db.RegisterMessage(FindMissingBlobsResponse)
BatchUpdateBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsRequest', (_message.Message,), {
'Request' : _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), {
'DESCRIPTOR' : _BATCHUPDATEBLOBSREQUEST_REQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsRequest.Request)
})
,
'DESCRIPTOR' : _BATCHUPDATEBLOBSREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsRequest)
})
_sym_db.RegisterMessage(BatchUpdateBlobsRequest)
_sym_db.RegisterMessage(BatchUpdateBlobsRequest.Request)
BatchUpdateBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchUpdateBlobsResponse', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _BATCHUPDATEBLOBSRESPONSE_RESPONSE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse.Response)
})
,
'DESCRIPTOR' : _BATCHUPDATEBLOBSRESPONSE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchUpdateBlobsResponse)
})
_sym_db.RegisterMessage(BatchUpdateBlobsResponse)
_sym_db.RegisterMessage(BatchUpdateBlobsResponse.Response)
BatchReadBlobsRequest = _reflection.GeneratedProtocolMessageType('BatchReadBlobsRequest', (_message.Message,), {
'DESCRIPTOR' : _BATCHREADBLOBSREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsRequest)
})
_sym_db.RegisterMessage(BatchReadBlobsRequest)
BatchReadBlobsResponse = _reflection.GeneratedProtocolMessageType('BatchReadBlobsResponse', (_message.Message,), {
'Response' : _reflection.GeneratedProtocolMessageType('Response', (_message.Message,), {
'DESCRIPTOR' : _BATCHREADBLOBSRESPONSE_RESPONSE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsResponse.Response)
})
,
'DESCRIPTOR' : _BATCHREADBLOBSRESPONSE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.BatchReadBlobsResponse)
})
_sym_db.RegisterMessage(BatchReadBlobsResponse)
_sym_db.RegisterMessage(BatchReadBlobsResponse.Response)
GetTreeRequest = _reflection.GeneratedProtocolMessageType('GetTreeRequest', (_message.Message,), {
'DESCRIPTOR' : _GETTREEREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeRequest)
})
_sym_db.RegisterMessage(GetTreeRequest)
GetTreeResponse = _reflection.GeneratedProtocolMessageType('GetTreeResponse', (_message.Message,), {
'DESCRIPTOR' : _GETTREERESPONSE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetTreeResponse)
})
_sym_db.RegisterMessage(GetTreeResponse)
GetCapabilitiesRequest = _reflection.GeneratedProtocolMessageType('GetCapabilitiesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCAPABILITIESREQUEST,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.GetCapabilitiesRequest)
})
_sym_db.RegisterMessage(GetCapabilitiesRequest)
ServerCapabilities = _reflection.GeneratedProtocolMessageType('ServerCapabilities', (_message.Message,), {
'DESCRIPTOR' : _SERVERCAPABILITIES,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ServerCapabilities)
})
_sym_db.RegisterMessage(ServerCapabilities)
ActionCacheUpdateCapabilities = _reflection.GeneratedProtocolMessageType('ActionCacheUpdateCapabilities', (_message.Message,), {
'DESCRIPTOR' : _ACTIONCACHEUPDATECAPABILITIES,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ActionCacheUpdateCapabilities)
})
_sym_db.RegisterMessage(ActionCacheUpdateCapabilities)
PriorityCapabilities = _reflection.GeneratedProtocolMessageType('PriorityCapabilities', (_message.Message,), {
'PriorityRange' : _reflection.GeneratedProtocolMessageType('PriorityRange', (_message.Message,), {
'DESCRIPTOR' : _PRIORITYCAPABILITIES_PRIORITYRANGE,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities.PriorityRange)
})
,
'DESCRIPTOR' : _PRIORITYCAPABILITIES,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.PriorityCapabilities)
})
_sym_db.RegisterMessage(PriorityCapabilities)
_sym_db.RegisterMessage(PriorityCapabilities.PriorityRange)
CacheCapabilities = _reflection.GeneratedProtocolMessageType('CacheCapabilities', (_message.Message,), {
'DESCRIPTOR' : _CACHECAPABILITIES,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.CacheCapabilities)
})
_sym_db.RegisterMessage(CacheCapabilities)
ExecutionCapabilities = _reflection.GeneratedProtocolMessageType('ExecutionCapabilities', (_message.Message,), {
'DESCRIPTOR' : _EXECUTIONCAPABILITIES,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ExecutionCapabilities)
})
_sym_db.RegisterMessage(ExecutionCapabilities)
ToolDetails = _reflection.GeneratedProtocolMessageType('ToolDetails', (_message.Message,), {
'DESCRIPTOR' : _TOOLDETAILS,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.ToolDetails)
})
_sym_db.RegisterMessage(ToolDetails)
RequestMetadata = _reflection.GeneratedProtocolMessageType('RequestMetadata', (_message.Message,), {
'DESCRIPTOR' : _REQUESTMETADATA,
'__module__' : 'build.bazel.remote.execution.v2.remote_execution_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.remote.execution.v2.RequestMetadata)
})
_sym_db.RegisterMessage(RequestMetadata)
_EXECUTION = DESCRIPTOR.services_by_name['Execution']
_ACTIONCACHE = DESCRIPTOR.services_by_name['ActionCache']
_CONTENTADDRESSABLESTORAGE = DESCRIPTOR.services_by_name['ContentAddressableStorage']
_CAPABILITIES = DESCRIPTOR.services_by_name['Capabilities']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\037build.bazel.remote.execution.v2B\024RemoteExecutionProtoP\001Z\017remoteexecution\242\002\003REX\252\002\037Build.Bazel.Remote.Execution.V2'
_EXECUTERESPONSE_SERVERLOGSENTRY._options = None
_EXECUTERESPONSE_SERVERLOGSENTRY._serialized_options = b'8\001'
_EXECUTION.methods_by_name['Execute']._options = None
_EXECUTION.methods_by_name['Execute']._serialized_options = b'\202\323\344\223\002+\"&/v2/{instance_name=**}/actions:execute:\001*'
_EXECUTION.methods_by_name['WaitExecution']._options = None
_EXECUTION.methods_by_name['WaitExecution']._serialized_options = b'\202\323\344\223\002+\"&/v2/{name=operations/**}:waitExecution:\001*'
_ACTIONCACHE.methods_by_name['GetActionResult']._options = None
_ACTIONCACHE.methods_by_name['GetActionResult']._serialized_options = b'\202\323\344\223\002V\022T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}'
_ACTIONCACHE.methods_by_name['UpdateActionResult']._options = None
_ACTIONCACHE.methods_by_name['UpdateActionResult']._serialized_options = b'\202\323\344\223\002e\032T/v2/{instance_name=**}/actionResults/{action_digest.hash}/{action_digest.size_bytes}:\raction_result'
_CONTENTADDRESSABLESTORAGE.methods_by_name['FindMissingBlobs']._options = None
_CONTENTADDRESSABLESTORAGE.methods_by_name['FindMissingBlobs']._serialized_options = b'\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:findMissing:\001*'
_CONTENTADDRESSABLESTORAGE.methods_by_name['BatchUpdateBlobs']._options = None
_CONTENTADDRESSABLESTORAGE.methods_by_name['BatchUpdateBlobs']._serialized_options = b'\202\323\344\223\002-\"(/v2/{instance_name=**}/blobs:batchUpdate:\001*'
_CONTENTADDRESSABLESTORAGE.methods_by_name['BatchReadBlobs']._options = None
_CONTENTADDRESSABLESTORAGE.methods_by_name['BatchReadBlobs']._serialized_options = b'\202\323\344\223\002+\"&/v2/{instance_name=**}/blobs:batchRead:\001*'
_CONTENTADDRESSABLESTORAGE.methods_by_name['GetTree']._options = None
_CONTENTADDRESSABLESTORAGE.methods_by_name['GetTree']._serialized_options = b'\202\323\344\223\002R\022P/v2/{instance_name=**}/blobs/{root_digest.hash}/{root_digest.size_bytes}:getTree'
_CAPABILITIES.methods_by_name['GetCapabilities']._options = None
_CAPABILITIES.methods_by_name['GetCapabilities']._serialized_options = b'\202\323\344\223\002%\022#/v2/{instance_name=**}/capabilities'
_DIGESTFUNCTION._serialized_start=7213
_DIGESTFUNCTION._serialized_end=7273
_ACTION._serialized_start=282
_ACTION._serialized_end=495
_COMMAND._serialized_start=498
_COMMAND._serialized_end=809
_COMMAND_ENVIRONMENTVARIABLE._serialized_start=759
_COMMAND_ENVIRONMENTVARIABLE._serialized_end=809
_PLATFORM._serialized_start=811
_PLATFORM._serialized_end=934
_PLATFORM_PROPERTY._serialized_start=895
_PLATFORM_PROPERTY._serialized_end=934
_DIRECTORY._serialized_start=937
_DIRECTORY._serialized_end=1139
_FILENODE._serialized_start=1141
_FILENODE._serialized_end=1251
_DIRECTORYNODE._serialized_start=1253
_DIRECTORYNODE._serialized_end=1339
_SYMLINKNODE._serialized_start=1341
_SYMLINKNODE._serialized_end=1384
_DIGEST._serialized_start=1386
_DIGEST._serialized_end=1428
_EXECUTEDACTIONMETADATA._serialized_start=1431
_EXECUTEDACTIONMETADATA._serialized_end=2051
_ACTIONRESULT._serialized_start=2054
_ACTIONRESULT._serialized_end=2491
_OUTPUTFILE._serialized_start=2493
_OUTPUTFILE._serialized_end=2605
_TREE._serialized_start=2607
_TREE._serialized_end=2733
_OUTPUTDIRECTORY._serialized_start=2735
_OUTPUTDIRECTORY._serialized_end=2834
_EXECUTIONPOLICY._serialized_start=2836
_EXECUTIONPOLICY._serialized_end=2871
_RESULTSCACHEPOLICY._serialized_start=2873
_RESULTSCACHEPOLICY._serialized_end=2911
_EXECUTEREQUEST._serialized_start=2914
_EXECUTEREQUEST._serialized_end=3221
_LOGFILE._serialized_start=3223
_LOGFILE._serialized_end=3313
_EXECUTERESPONSE._serialized_start=3316
_EXECUTERESPONSE._serialized_end=3635
_EXECUTERESPONSE_SERVERLOGSENTRY._serialized_start=3544
_EXECUTERESPONSE_SERVERLOGSENTRY._serialized_end=3635
_EXECUTEOPERATIONMETADATA._serialized_start=3638
_EXECUTEOPERATIONMETADATA._serialized_end=3945
_EXECUTEOPERATIONMETADATA_STAGE._serialized_start=3866
_EXECUTEOPERATIONMETADATA_STAGE._serialized_end=3945
_WAITEXECUTIONREQUEST._serialized_start=3947
_WAITEXECUTIONREQUEST._serialized_end=3983
_GETACTIONRESULTREQUEST._serialized_start=3985
_GETACTIONRESULTREQUEST._serialized_end=4096
_UPDATEACTIONRESULTREQUEST._serialized_start=4099
_UPDATEACTIONRESULTREQUEST._serialized_end=4366
_FINDMISSINGBLOBSREQUEST._serialized_start=4368
_FINDMISSINGBLOBSREQUEST._serialized_end=4479
_FINDMISSINGBLOBSRESPONSE._serialized_start=4481
_FINDMISSINGBLOBSRESPONSE._serialized_end=4578
_BATCHUPDATEBLOBSREQUEST._serialized_start=4581
_BATCHUPDATEBLOBSREQUEST._serialized_end=4795
_BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_start=4715
_BATCHUPDATEBLOBSREQUEST_REQUEST._serialized_end=4795
_BATCHUPDATEBLOBSRESPONSE._serialized_start=4798
_BATCHUPDATEBLOBSRESPONSE._serialized_end=5016
_BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_start=4913
_BATCHUPDATEBLOBSRESPONSE_RESPONSE._serialized_end=5016
_BATCHREADBLOBSREQUEST._serialized_start=5018
_BATCHREADBLOBSREQUEST._serialized_end=5122
_BATCHREADBLOBSRESPONSE._serialized_start=5125
_BATCHREADBLOBSRESPONSE._serialized_end=5353
_BATCHREADBLOBSRESPONSE_RESPONSE._serialized_start=5236
_BATCHREADBLOBSRESPONSE_RESPONSE._serialized_end=5353
_GETTREEREQUEST._serialized_start=5356
_GETTREEREQUEST._serialized_end=5496
_GETTREERESPONSE._serialized_start=5498
_GETTREERESPONSE._serialized_end=5605
_GETCAPABILITIESREQUEST._serialized_start=5607
_GETCAPABILITIESREQUEST._serialized_end=5654
_SERVERCAPABILITIES._serialized_start=5657
_SERVERCAPABILITIES._serialized_end=6012
_ACTIONCACHEUPDATECAPABILITIES._serialized_start=6014
_ACTIONCACHEUPDATECAPABILITIES._serialized_end=6069
_PRIORITYCAPABILITIES._serialized_start=6072
_PRIORITYCAPABILITIES._serialized_end=6244
_PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_start=6185
_PRIORITYCAPABILITIES_PRIORITYRANGE._serialized_end=6244
_CACHECAPABILITIES._serialized_start=6247
_CACHECAPABILITIES._serialized_end=6767
_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY._serialized_start=6696
_CACHECAPABILITIES_SYMLINKABSOLUTEPATHSTRATEGY._serialized_end=6767
_EXECUTIONCAPABILITIES._serialized_start=6770
_EXECUTIONCAPABILITIES._serialized_end=6985
_TOOLDETAILS._serialized_start=6987
_TOOLDETAILS._serialized_end=7041
_REQUESTMETADATA._serialized_start=7044
_REQUESTMETADATA._serialized_end=7211
_EXECUTION._serialized_start=7276
_EXECUTION._serialized_end=7589
_ACTIONCACHE._serialized_start=7592
_ACTIONCACHE._serialized_end=8062
_CONTENTADDRESSABLESTORAGE._serialized_start=8065
_CONTENTADDRESSABLESTORAGE._serialized_end=8860
_CAPABILITIES._serialized_start=8863
_CAPABILITIES._serialized_end=9052
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/build/bazel/remote/execution/v2/remote_execution_pb2_grpc.py 0000664 0000000 0000000 00000132553 14375152700 0033606 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
class ExecutionStub(object):
"""The Remote Execution API is used to execute an
[Action][build.bazel.remote.execution.v2.Action] on the remote
workers.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Execute = channel.unary_stream(
'/build.bazel.remote.execution.v2.Execution/Execute',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.WaitExecution = channel.unary_stream(
'/build.bazel.remote.execution.v2.Execution/WaitExecution',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
class ExecutionServicer(object):
"""The Remote Execution API is used to execute an
[Action][build.bazel.remote.execution.v2.Action] on the remote
workers.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def Execute(self, request, context):
"""Execute an action remotely.
In order to execute an action, the client must first upload all of the
inputs, the
[Command][build.bazel.remote.execution.v2.Command] to run, and the
[Action][build.bazel.remote.execution.v2.Action] into the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage].
It then calls `Execute` with an `action_digest` referring to them. The
server will run the action and eventually return the result.
The input `Action`'s fields MUST meet the various canonicalization
requirements specified in the documentation for their types so that it has
the same digest as other logically equivalent `Action`s. The server MAY
enforce the requirements and return errors if a non-canonical input is
received. It MAY also proceed without verifying some or all of the
requirements, such as for performance reasons. If the server does not
verify the requirement, then it will treat the `Action` as distinct from
another logically equivalent action if they hash differently.
Returns a stream of
[google.longrunning.Operation][google.longrunning.Operation] messages
describing the resulting execution, with eventual `response`
[ExecuteResponse][build.bazel.remote.execution.v2.ExecuteResponse]. The
`metadata` on the operation is of type
[ExecuteOperationMetadata][build.bazel.remote.execution.v2.ExecuteOperationMetadata].
If the client remains connected after the first response is returned after
the server, then updates are streamed as if the client had called
[WaitExecution][build.bazel.remote.execution.v2.Execution.WaitExecution]
until the execution completes or the request reaches an error. The
operation can also be queried using [Operations
API][google.longrunning.Operations.GetOperation].
The server NEED NOT implement other methods or functionality of the
Operations API.
Errors discovered during creation of the `Operation` will be reported
as gRPC Status errors, while errors that occurred while running the
action will be reported in the `status` field of the `ExecuteResponse`. The
server MUST NOT set the `error` field of the `Operation` proto.
The possible errors include:
* `INVALID_ARGUMENT`: One or more arguments are invalid.
* `FAILED_PRECONDITION`: One or more errors occurred in setting up the
action requested, such as a missing input or command or no worker being
available. The client may be able to fix the errors and retry.
* `RESOURCE_EXHAUSTED`: There is insufficient quota of some resource to run
the action.
* `UNAVAILABLE`: Due to a transient condition, such as all workers being
occupied (and the server does not support a queue), the action could not
be started. The client should retry.
* `INTERNAL`: An internal error occurred in the execution engine or the
worker.
* `DEADLINE_EXCEEDED`: The execution timed out.
In the case of a missing input or command, the server SHOULD additionally
send a [PreconditionFailure][google.rpc.PreconditionFailure] error detail
where, for each requested blob not present in the CAS, there is a
`Violation` with a `type` of `MISSING` and a `subject` of
`"blobs/{hash}/{size}"` indicating the digest of the missing blob.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WaitExecution(self, request, context):
"""Wait for an execution operation to complete. When the client initially
makes the request, the server immediately responds with the current status
of the execution. The server will leave the request stream open until the
operation completes, and then respond with the completed operation. The
server MAY choose to stream additional updates as execution progresses,
such as to provide an update as to the state of the execution.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ExecutionServicer_to_server(servicer, server):
rpc_method_handlers = {
'Execute': grpc.unary_stream_rpc_method_handler(
servicer.Execute,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'WaitExecution': grpc.unary_stream_rpc_method_handler(
servicer.WaitExecution,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.execution.v2.Execution', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Execution(object):
"""The Remote Execution API is used to execute an
[Action][build.bazel.remote.execution.v2.Action] on the remote
workers.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
def Execute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.Execution/Execute',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ExecuteRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def WaitExecution(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.Execution/WaitExecution',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.WaitExecutionRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class ActionCacheStub(object):
"""The action cache API is used to query whether a given action has already been
performed and, if so, retrieve its result. Unlike the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
which addresses blobs by their own content, the action cache addresses the
[ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
which produced them.
The lifetime of entries in the action cache is implementation-specific, but
the server SHOULD assume that more recently used entries are more likely to
be used again. Additionally, action cache implementations SHOULD ensure that
any blobs referenced in the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
are still valid when returning a result.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetActionResult = channel.unary_unary(
'/build.bazel.remote.execution.v2.ActionCache/GetActionResult',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
)
self.UpdateActionResult = channel.unary_unary(
'/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
)
class ActionCacheServicer(object):
"""The action cache API is used to query whether a given action has already been
performed and, if so, retrieve its result. Unlike the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
which addresses blobs by their own content, the action cache addresses the
[ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
which produced them.
The lifetime of entries in the action cache is implementation-specific, but
the server SHOULD assume that more recently used entries are more likely to
be used again. Additionally, action cache implementations SHOULD ensure that
any blobs referenced in the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
are still valid when returning a result.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def GetActionResult(self, request, context):
"""Retrieve a cached execution result.
Errors:
* `NOT_FOUND`: The requested `ActionResult` is not in the cache.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateActionResult(self, request, context):
"""Upload a new execution result.
This method is intended for servers which implement the distributed cache
independently of the
[Execution][build.bazel.remote.execution.v2.Execution] API. As a
result, it is OPTIONAL for servers to implement.
In order to allow the server to perform access control based on the type of
action, and to assist with client debugging, the client MUST first upload
the [Action][build.bazel.remote.execution.v2.Execution] that produced the
result, along with its
[Command][build.bazel.remote.execution.v2.Command], into the
`ContentAddressableStorage`.
Errors:
* `NOT_IMPLEMENTED`: This method is not supported by the server.
* `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
entry to the cache.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ActionCacheServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetActionResult': grpc.unary_unary_rpc_method_handler(
servicer.GetActionResult,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
),
'UpdateActionResult': grpc.unary_unary_rpc_method_handler(
servicer.UpdateActionResult,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.execution.v2.ActionCache', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ActionCache(object):
"""The action cache API is used to query whether a given action has already been
performed and, if so, retrieve its result. Unlike the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage],
which addresses blobs by their own content, the action cache addresses the
[ActionResult][build.bazel.remote.execution.v2.ActionResult] by a
digest of the encoded [Action][build.bazel.remote.execution.v2.Action]
which produced them.
The lifetime of entries in the action cache is implementation-specific, but
the server SHOULD assume that more recently used entries are more likely to
be used again. Additionally, action cache implementations SHOULD ensure that
any blobs referenced in the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
are still valid when returning a result.
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
def GetActionResult(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ActionCache/GetActionResult',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetActionResultRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateActionResult(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ActionCache/UpdateActionResult',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.UpdateActionResultRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ActionResult.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class ContentAddressableStorageStub(object):
"""The CAS (content-addressable storage) is used to store the inputs to and
outputs from the execution service. Each piece of content is addressed by the
digest of its binary data.
Most of the binary data stored in the CAS is opaque to the execution engine,
and is only used as a communication medium. In order to build an
[Action][build.bazel.remote.execution.v2.Action],
however, the client will need to also upload the
[Command][build.bazel.remote.execution.v2.Command] and input root
[Directory][build.bazel.remote.execution.v2.Directory] for the Action.
The Command and Directory messages must be marshalled to wire format and then
uploaded under the hash as with any other piece of content. In practice, the
input root directory is likely to refer to other Directories in its
hierarchy, which must also each be uploaded on their own.
For small file uploads the client should group them together and call
[BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
on chunks of no more than 10 MiB. For large uploads, the client must use the
[Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
`resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
where `instance_name` is as described in the next paragraph, `uuid` is a
version 4 UUID generated by the client, and `hash` and `size` are the
[Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
`uuid` is used only to avoid collisions when multiple clients try to upload
the same file (or the same client tries to upload the file multiple times at
once on different threads), so the client MAY reuse the `uuid` for uploading
different blobs. The `resource_name` may optionally have a trailing filename
(or other metadata) for a client to use if it is storing URLs, as in
`{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
after the `size` is ignored.
A single server MAY support multiple instances of the execution system, each
with their own workers, storage, cache, etc. The exact relationship between
instances is up to the server. If the server does, then the `instance_name`
is an identifier, possibly containing multiple path segments, used to
distinguish between the various instances on the server, in a manner defined
by the server. For servers which do not support multiple instances, then the
`instance_name` is the empty path and the leading slash is omitted, so that
the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
When attempting an upload, if another client has already completed the upload
(which may occur in the middle of a single upload if another client uploads
the same blob concurrently), the request will terminate immediately with
a response whose `committed_size` is the full size of the uploaded file
(regardless of how much data was transmitted by the client). If the client
completes the upload but the
[Digest][build.bazel.remote.execution.v2.Digest] does not match, an
`INVALID_ARGUMENT` error will be returned. In either case, the client should
not attempt to retry the upload.
For downloading blobs, the client must use the
[Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
`instance_name` is the instance name (see above), and `hash` and `size` are
the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
The lifetime of entries in the CAS is implementation specific, but it SHOULD
be long enough to allow for newly-added and recently looked-up entries to be
used in subsequent calls (e.g. to
[Execute][build.bazel.remote.execution.v2.Execution.Execute]).
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.FindMissingBlobs = channel.unary_unary(
'/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
)
self.BatchUpdateBlobs = channel.unary_unary(
'/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
)
self.BatchReadBlobs = channel.unary_unary(
'/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString,
)
self.GetTree = channel.unary_stream(
'/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString,
)
class ContentAddressableStorageServicer(object):
"""The CAS (content-addressable storage) is used to store the inputs to and
outputs from the execution service. Each piece of content is addressed by the
digest of its binary data.
Most of the binary data stored in the CAS is opaque to the execution engine,
and is only used as a communication medium. In order to build an
[Action][build.bazel.remote.execution.v2.Action],
however, the client will need to also upload the
[Command][build.bazel.remote.execution.v2.Command] and input root
[Directory][build.bazel.remote.execution.v2.Directory] for the Action.
The Command and Directory messages must be marshalled to wire format and then
uploaded under the hash as with any other piece of content. In practice, the
input root directory is likely to refer to other Directories in its
hierarchy, which must also each be uploaded on their own.
For small file uploads the client should group them together and call
[BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
on chunks of no more than 10 MiB. For large uploads, the client must use the
[Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
`resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
where `instance_name` is as described in the next paragraph, `uuid` is a
version 4 UUID generated by the client, and `hash` and `size` are the
[Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
`uuid` is used only to avoid collisions when multiple clients try to upload
the same file (or the same client tries to upload the file multiple times at
once on different threads), so the client MAY reuse the `uuid` for uploading
different blobs. The `resource_name` may optionally have a trailing filename
(or other metadata) for a client to use if it is storing URLs, as in
`{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
after the `size` is ignored.
A single server MAY support multiple instances of the execution system, each
with their own workers, storage, cache, etc. The exact relationship between
instances is up to the server. If the server does, then the `instance_name`
is an identifier, possibly containing multiple path segments, used to
distinguish between the various instances on the server, in a manner defined
by the server. For servers which do not support multiple instances, then the
`instance_name` is the empty path and the leading slash is omitted, so that
the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
When attempting an upload, if another client has already completed the upload
(which may occur in the middle of a single upload if another client uploads
the same blob concurrently), the request will terminate immediately with
a response whose `committed_size` is the full size of the uploaded file
(regardless of how much data was transmitted by the client). If the client
completes the upload but the
[Digest][build.bazel.remote.execution.v2.Digest] does not match, an
`INVALID_ARGUMENT` error will be returned. In either case, the client should
not attempt to retry the upload.
For downloading blobs, the client must use the
[Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
`instance_name` is the instance name (see above), and `hash` and `size` are
the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
The lifetime of entries in the CAS is implementation specific, but it SHOULD
be long enough to allow for newly-added and recently looked-up entries to be
used in subsequent calls (e.g. to
[Execute][build.bazel.remote.execution.v2.Execution.Execute]).
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
def FindMissingBlobs(self, request, context):
"""Determine if blobs are present in the CAS.
Clients can use this API before uploading blobs to determine which ones are
already present in the CAS and do not need to be uploaded again.
There are no method-specific errors.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchUpdateBlobs(self, request, context):
"""Upload many blobs at once.
The server may enforce a limit of the combined total size of blobs
to be uploaded using this API. This limit may be obtained using the
[Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
Requests exceeding the limit should either be split into smaller
chunks or uploaded using the
[ByteStream API][google.bytestream.ByteStream], as appropriate.
This request is equivalent to calling a Bytestream `Write` request
on each individual blob, in parallel. The requests may succeed or fail
independently.
Errors:
* `INVALID_ARGUMENT`: The client attempted to upload more than the
server supported limit.
Individual requests may return the following errors, additionally:
* `RESOURCE_EXHAUSTED`: There is insufficient disk quota to store the blob.
* `INVALID_ARGUMENT`: The
[Digest][build.bazel.remote.execution.v2.Digest] does not match the
provided data.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BatchReadBlobs(self, request, context):
"""Download many blobs at once.
The server may enforce a limit of the combined total size of blobs
to be downloaded using this API. This limit may be obtained using the
[Capabilities][build.bazel.remote.execution.v2.Capabilities] API.
Requests exceeding the limit should either be split into smaller
chunks or downloaded using the
[ByteStream API][google.bytestream.ByteStream], as appropriate.
This request is equivalent to calling a Bytestream `Read` request
on each individual blob, in parallel. The requests may succeed or fail
independently.
Errors:
* `INVALID_ARGUMENT`: The client attempted to read more than the
server supported limit.
Every error on individual read will be returned in the corresponding digest
status.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTree(self, request, context):
"""Fetch the entire directory tree rooted at a node.
This request must be targeted at a
[Directory][build.bazel.remote.execution.v2.Directory] stored in the
[ContentAddressableStorage][build.bazel.remote.execution.v2.ContentAddressableStorage]
(CAS). The server will enumerate the `Directory` tree recursively and
return every node descended from the root.
The GetTreeRequest.page_token parameter can be used to skip ahead in
the stream (e.g. when retrying a partially completed and aborted request),
by setting it to a value taken from GetTreeResponse.next_page_token of the
last successfully processed GetTreeResponse).
The exact traversal order is unspecified and, unless retrieving subsequent
pages from an earlier request, is not guaranteed to be stable across
multiple invocations of `GetTree`.
If part of the tree is missing from the CAS, the server will return the
portion present and omit the rest.
* `NOT_FOUND`: The requested tree root is not present in the CAS.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ContentAddressableStorageServicer_to_server(servicer, server):
rpc_method_handlers = {
'FindMissingBlobs': grpc.unary_unary_rpc_method_handler(
servicer.FindMissingBlobs,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.SerializeToString,
),
'BatchUpdateBlobs': grpc.unary_unary_rpc_method_handler(
servicer.BatchUpdateBlobs,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.SerializeToString,
),
'BatchReadBlobs': grpc.unary_unary_rpc_method_handler(
servicer.BatchReadBlobs,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.SerializeToString,
),
'GetTree': grpc.unary_stream_rpc_method_handler(
servicer.GetTree,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.execution.v2.ContentAddressableStorage', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ContentAddressableStorage(object):
"""The CAS (content-addressable storage) is used to store the inputs to and
outputs from the execution service. Each piece of content is addressed by the
digest of its binary data.
Most of the binary data stored in the CAS is opaque to the execution engine,
and is only used as a communication medium. In order to build an
[Action][build.bazel.remote.execution.v2.Action],
however, the client will need to also upload the
[Command][build.bazel.remote.execution.v2.Command] and input root
[Directory][build.bazel.remote.execution.v2.Directory] for the Action.
The Command and Directory messages must be marshalled to wire format and then
uploaded under the hash as with any other piece of content. In practice, the
input root directory is likely to refer to other Directories in its
hierarchy, which must also each be uploaded on their own.
For small file uploads the client should group them together and call
[BatchUpdateBlobs][build.bazel.remote.execution.v2.ContentAddressableStorage.BatchUpdateBlobs]
on chunks of no more than 10 MiB. For large uploads, the client must use the
[Write method][google.bytestream.ByteStream.Write] of the ByteStream API. The
`resource_name` is `{instance_name}/uploads/{uuid}/blobs/{hash}/{size}`,
where `instance_name` is as described in the next paragraph, `uuid` is a
version 4 UUID generated by the client, and `hash` and `size` are the
[Digest][build.bazel.remote.execution.v2.Digest] of the blob. The
`uuid` is used only to avoid collisions when multiple clients try to upload
the same file (or the same client tries to upload the file multiple times at
once on different threads), so the client MAY reuse the `uuid` for uploading
different blobs. The `resource_name` may optionally have a trailing filename
(or other metadata) for a client to use if it is storing URLs, as in
`{instance}/uploads/{uuid}/blobs/{hash}/{size}/foo/bar/baz.cc`. Anything
after the `size` is ignored.
A single server MAY support multiple instances of the execution system, each
with their own workers, storage, cache, etc. The exact relationship between
instances is up to the server. If the server does, then the `instance_name`
is an identifier, possibly containing multiple path segments, used to
distinguish between the various instances on the server, in a manner defined
by the server. For servers which do not support multiple instances, then the
`instance_name` is the empty path and the leading slash is omitted, so that
the `resource_name` becomes `uploads/{uuid}/blobs/{hash}/{size}`.
When attempting an upload, if another client has already completed the upload
(which may occur in the middle of a single upload if another client uploads
the same blob concurrently), the request will terminate immediately with
a response whose `committed_size` is the full size of the uploaded file
(regardless of how much data was transmitted by the client). If the client
completes the upload but the
[Digest][build.bazel.remote.execution.v2.Digest] does not match, an
`INVALID_ARGUMENT` error will be returned. In either case, the client should
not attempt to retry the upload.
For downloading blobs, the client must use the
[Read method][google.bytestream.ByteStream.Read] of the ByteStream API, with
a `resource_name` of `"{instance_name}/blobs/{hash}/{size}"`, where
`instance_name` is the instance name (see above), and `hash` and `size` are
the [Digest][build.bazel.remote.execution.v2.Digest] of the blob.
The lifetime of entries in the CAS is implementation specific, but it SHOULD
be long enough to allow for newly-added and recently looked-up entries to be
used in subsequent calls (e.g. to
[Execute][build.bazel.remote.execution.v2.Execution.Execute]).
As with other services in the Remote Execution API, any call may return an
error with a [RetryInfo][google.rpc.RetryInfo] error detail providing
information about when the client should retry the request; clients SHOULD
respect the information provided.
"""
@staticmethod
def FindMissingBlobs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/FindMissingBlobs',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.FindMissingBlobsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BatchUpdateBlobs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchUpdateBlobs',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchUpdateBlobsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BatchReadBlobs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/BatchReadBlobs',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.BatchReadBlobsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTree(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/build.bazel.remote.execution.v2.ContentAddressableStorage/GetTree',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetTreeResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
class CapabilitiesStub(object):
"""The Capabilities service may be used by remote execution clients to query
various server properties, in order to self-configure or return meaningful
error messages.
The query may include a particular `instance_name`, in which case the values
returned will pertain to that instance.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCapabilities = channel.unary_unary(
'/build.bazel.remote.execution.v2.Capabilities/GetCapabilities',
request_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString,
response_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString,
)
class CapabilitiesServicer(object):
"""The Capabilities service may be used by remote execution clients to query
various server properties, in order to self-configure or return meaningful
error messages.
The query may include a particular `instance_name`, in which case the values
returned will pertain to that instance.
"""
def GetCapabilities(self, request, context):
"""GetCapabilities returns the server capabilities configuration.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_CapabilitiesServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCapabilities': grpc.unary_unary_rpc_method_handler(
servicer.GetCapabilities,
request_deserializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.FromString,
response_serializer=build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'build.bazel.remote.execution.v2.Capabilities', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Capabilities(object):
"""The Capabilities service may be used by remote execution clients to query
various server properties, in order to self-configure or return meaningful
error messages.
The query may include a particular `instance_name`, in which case the values
returned will pertain to that instance.
"""
@staticmethod
def GetCapabilities(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/build.bazel.remote.execution.v2.Capabilities/GetCapabilities',
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.GetCapabilitiesRequest.SerializeToString,
build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2.ServerCapabilities.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
buildstream-1.6.9/buildstream/_protos/build/bazel/semver/ 0000775 0000000 0000000 00000000000 14375152700 0023543 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/semver/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0025642 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/build/bazel/semver/semver.proto 0000664 0000000 0000000 00000001352 14375152700 0026132 0 ustar 00root root 0000000 0000000 // Copyright 2018 The Bazel Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package build.bazel.semver;
message SemVer {
int32 major = 1;
int32 minor = 2;
int32 patch = 3;
string prerelease = 4;
}
buildstream-1.6.9/buildstream/_protos/build/bazel/semver/semver_pb2.py 0000664 0000000 0000000 00000002452 14375152700 0026164 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: build/bazel/semver/semver.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1f\x62uild/bazel/semver/semver.proto\x12\x12\x62uild.bazel.semver\"I\n\x06SemVer\x12\r\n\x05major\x18\x01 \x01(\x05\x12\r\n\x05minor\x18\x02 \x01(\x05\x12\r\n\x05patch\x18\x03 \x01(\x05\x12\x12\n\nprerelease\x18\x04 \x01(\tb\x06proto3')
_SEMVER = DESCRIPTOR.message_types_by_name['SemVer']
SemVer = _reflection.GeneratedProtocolMessageType('SemVer', (_message.Message,), {
'DESCRIPTOR' : _SEMVER,
'__module__' : 'build.bazel.semver.semver_pb2'
# @@protoc_insertion_point(class_scope:build.bazel.semver.SemVer)
})
_sym_db.RegisterMessage(SemVer)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_SEMVER._serialized_start=55
_SEMVER._serialized_end=128
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/build/bazel/semver/semver_pb2_grpc.py 0000664 0000000 0000000 00000000237 14375152700 0027176 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
buildstream-1.6.9/buildstream/_protos/buildstream/ 0000775 0000000 0000000 00000000000 14375152700 0022361 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/buildstream/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0024460 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/buildstream/v2/ 0000775 0000000 0000000 00000000000 14375152700 0022710 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/buildstream/v2/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0025007 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/buildstream/v2/buildstream.proto 0000664 0000000 0000000 00000006566 14375152700 0026325 0 ustar 00root root 0000000 0000000 // Copyright 2018 Codethink Limited
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package buildstream.v2;
import "build/bazel/remote/execution/v2/remote_execution.proto";
import "google/api/annotations.proto";
service ReferenceStorage {
// Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory]
// digest by name.
//
// Errors:
// * `NOT_FOUND`: The requested reference is not in the cache.
rpc GetReference(GetReferenceRequest) returns (GetReferenceResponse) {
option (google.api.http) = { get: "/v2/{instance_name=**}/buildstream/refs/{key}" };
}
// Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory]
// digest.
//
// Errors:
// * `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
// entry to the cache.
rpc UpdateReference(UpdateReferenceRequest) returns (UpdateReferenceResponse) {
option (google.api.http) = { put: "/v2/{instance_name=**}/buildstream/refs/{key}" body: "digest" };
}
rpc Status(StatusRequest) returns (StatusResponse) {
option (google.api.http) = { put: "/v2/{instance_name=**}/buildstream/refs:status" };
}
}
message GetReferenceRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The name of the reference.
string key = 2;
}
message GetReferenceResponse {
// The digest of the CAS [Directory][build.bazel.remote.execution.v2.Directory].
build.bazel.remote.execution.v2.Digest digest = 1;
}
message UpdateReferenceRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
// The name of the reference.
repeated string keys = 2;
// The digest of the CAS [Directory][build.bazel.remote.execution.v2.Directory]
// to store in the cache.
build.bazel.remote.execution.v2.Digest digest = 3;
}
message UpdateReferenceResponse {
}
message StatusRequest {
// The instance of the execution system to operate against. A server may
// support multiple instances of the execution system (with their own workers,
// storage, caches, etc.). The server MAY require use of this field to select
// between them in an implementation-defined fashion, otherwise it can be
// omitted.
string instance_name = 1;
}
message StatusResponse {
// Whether reference updates are allowed for the connected client.
bool allow_updates = 1;
}
buildstream-1.6.9/buildstream/_protos/buildstream/v2/buildstream_pb2.py 0000664 0000000 0000000 00000013646 14375152700 0026352 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: buildstream/v2/buildstream.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2 as build_dot_bazel_dot_remote_dot_execution_dot_v2_dot_remote__execution__pb2
from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n buildstream/v2/buildstream.proto\x12\x0e\x62uildstream.v2\x1a\x36\x62uild/bazel/remote/execution/v2/remote_execution.proto\x1a\x1cgoogle/api/annotations.proto\"9\n\x13GetReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"O\n\x14GetReferenceResponse\x12\x37\n\x06\x64igest\x18\x01 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"v\n\x16UpdateReferenceRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\x12\x0c\n\x04keys\x18\x02 \x03(\t\x12\x37\n\x06\x64igest\x18\x03 \x01(\x0b\x32\'.build.bazel.remote.execution.v2.Digest\"\x19\n\x17UpdateReferenceResponse\"&\n\rStatusRequest\x12\x15\n\rinstance_name\x18\x01 \x01(\t\"\'\n\x0eStatusResponse\x12\x15\n\rallow_updates\x18\x01 \x01(\x08\x32\xca\x03\n\x10ReferenceStorage\x12\x90\x01\n\x0cGetReference\x12#.buildstream.v2.GetReferenceRequest\x1a$.buildstream.v2.GetReferenceResponse\"5\x82\xd3\xe4\x93\x02/\x12-/v2/{instance_name=**}/buildstream/refs/{key}\x12\xa1\x01\n\x0fUpdateReference\x12&.buildstream.v2.UpdateReferenceRequest\x1a\'.buildstream.v2.UpdateReferenceResponse\"=\x82\xd3\xe4\x93\x02\x37\x1a-/v2/{instance_name=**}/buildstream/refs/{key}:\x06\x64igest\x12\x7f\n\x06Status\x12\x1d.buildstream.v2.StatusRequest\x1a\x1e.buildstream.v2.StatusResponse\"6\x82\xd3\xe4\x93\x02\x30\x1a./v2/{instance_name=**}/buildstream/refs:statusb\x06proto3')
_GETREFERENCEREQUEST = DESCRIPTOR.message_types_by_name['GetReferenceRequest']
_GETREFERENCERESPONSE = DESCRIPTOR.message_types_by_name['GetReferenceResponse']
_UPDATEREFERENCEREQUEST = DESCRIPTOR.message_types_by_name['UpdateReferenceRequest']
_UPDATEREFERENCERESPONSE = DESCRIPTOR.message_types_by_name['UpdateReferenceResponse']
_STATUSREQUEST = DESCRIPTOR.message_types_by_name['StatusRequest']
_STATUSRESPONSE = DESCRIPTOR.message_types_by_name['StatusResponse']
GetReferenceRequest = _reflection.GeneratedProtocolMessageType('GetReferenceRequest', (_message.Message,), {
'DESCRIPTOR' : _GETREFERENCEREQUEST,
'__module__' : 'buildstream.v2.buildstream_pb2'
# @@protoc_insertion_point(class_scope:buildstream.v2.GetReferenceRequest)
})
_sym_db.RegisterMessage(GetReferenceRequest)
GetReferenceResponse = _reflection.GeneratedProtocolMessageType('GetReferenceResponse', (_message.Message,), {
'DESCRIPTOR' : _GETREFERENCERESPONSE,
'__module__' : 'buildstream.v2.buildstream_pb2'
# @@protoc_insertion_point(class_scope:buildstream.v2.GetReferenceResponse)
})
_sym_db.RegisterMessage(GetReferenceResponse)
UpdateReferenceRequest = _reflection.GeneratedProtocolMessageType('UpdateReferenceRequest', (_message.Message,), {
'DESCRIPTOR' : _UPDATEREFERENCEREQUEST,
'__module__' : 'buildstream.v2.buildstream_pb2'
# @@protoc_insertion_point(class_scope:buildstream.v2.UpdateReferenceRequest)
})
_sym_db.RegisterMessage(UpdateReferenceRequest)
UpdateReferenceResponse = _reflection.GeneratedProtocolMessageType('UpdateReferenceResponse', (_message.Message,), {
'DESCRIPTOR' : _UPDATEREFERENCERESPONSE,
'__module__' : 'buildstream.v2.buildstream_pb2'
# @@protoc_insertion_point(class_scope:buildstream.v2.UpdateReferenceResponse)
})
_sym_db.RegisterMessage(UpdateReferenceResponse)
StatusRequest = _reflection.GeneratedProtocolMessageType('StatusRequest', (_message.Message,), {
'DESCRIPTOR' : _STATUSREQUEST,
'__module__' : 'buildstream.v2.buildstream_pb2'
# @@protoc_insertion_point(class_scope:buildstream.v2.StatusRequest)
})
_sym_db.RegisterMessage(StatusRequest)
StatusResponse = _reflection.GeneratedProtocolMessageType('StatusResponse', (_message.Message,), {
'DESCRIPTOR' : _STATUSRESPONSE,
'__module__' : 'buildstream.v2.buildstream_pb2'
# @@protoc_insertion_point(class_scope:buildstream.v2.StatusResponse)
})
_sym_db.RegisterMessage(StatusResponse)
_REFERENCESTORAGE = DESCRIPTOR.services_by_name['ReferenceStorage']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_REFERENCESTORAGE.methods_by_name['GetReference']._options = None
_REFERENCESTORAGE.methods_by_name['GetReference']._serialized_options = b'\202\323\344\223\002/\022-/v2/{instance_name=**}/buildstream/refs/{key}'
_REFERENCESTORAGE.methods_by_name['UpdateReference']._options = None
_REFERENCESTORAGE.methods_by_name['UpdateReference']._serialized_options = b'\202\323\344\223\0027\032-/v2/{instance_name=**}/buildstream/refs/{key}:\006digest'
_REFERENCESTORAGE.methods_by_name['Status']._options = None
_REFERENCESTORAGE.methods_by_name['Status']._serialized_options = b'\202\323\344\223\0020\032./v2/{instance_name=**}/buildstream/refs:status'
_GETREFERENCEREQUEST._serialized_start=138
_GETREFERENCEREQUEST._serialized_end=195
_GETREFERENCERESPONSE._serialized_start=197
_GETREFERENCERESPONSE._serialized_end=276
_UPDATEREFERENCEREQUEST._serialized_start=278
_UPDATEREFERENCEREQUEST._serialized_end=396
_UPDATEREFERENCERESPONSE._serialized_start=398
_UPDATEREFERENCERESPONSE._serialized_end=423
_STATUSREQUEST._serialized_start=425
_STATUSREQUEST._serialized_end=463
_STATUSRESPONSE._serialized_start=465
_STATUSRESPONSE._serialized_end=504
_REFERENCESTORAGE._serialized_start=507
_REFERENCESTORAGE._serialized_end=965
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/buildstream/v2/buildstream_pb2_grpc.py 0000664 0000000 0000000 00000014532 14375152700 0027360 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from buildstream._protos.buildstream.v2 import buildstream_pb2 as buildstream_dot_v2_dot_buildstream__pb2
class ReferenceStorageStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetReference = channel.unary_unary(
'/buildstream.v2.ReferenceStorage/GetReference',
request_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString,
response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString,
)
self.UpdateReference = channel.unary_unary(
'/buildstream.v2.ReferenceStorage/UpdateReference',
request_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString,
response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString,
)
self.Status = channel.unary_unary(
'/buildstream.v2.ReferenceStorage/Status',
request_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString,
response_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString,
)
class ReferenceStorageServicer(object):
"""Missing associated documentation comment in .proto file."""
def GetReference(self, request, context):
"""Retrieve a CAS [Directory][build.bazel.remote.execution.v2.Directory]
digest by name.
Errors:
* `NOT_FOUND`: The requested reference is not in the cache.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateReference(self, request, context):
"""Associate a name with a CAS [Directory][build.bazel.remote.execution.v2.Directory]
digest.
Errors:
* `RESOURCE_EXHAUSTED`: There is insufficient storage space to add the
entry to the cache.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Status(self, request, context):
"""Missing associated documentation comment in .proto file."""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ReferenceStorageServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetReference': grpc.unary_unary_rpc_method_handler(
servicer.GetReference,
request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.FromString,
response_serializer=buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.SerializeToString,
),
'UpdateReference': grpc.unary_unary_rpc_method_handler(
servicer.UpdateReference,
request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.FromString,
response_serializer=buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.SerializeToString,
),
'Status': grpc.unary_unary_rpc_method_handler(
servicer.Status,
request_deserializer=buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.FromString,
response_serializer=buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'buildstream.v2.ReferenceStorage', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ReferenceStorage(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def GetReference(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/GetReference',
buildstream_dot_v2_dot_buildstream__pb2.GetReferenceRequest.SerializeToString,
buildstream_dot_v2_dot_buildstream__pb2.GetReferenceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateReference(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/UpdateReference',
buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceRequest.SerializeToString,
buildstream_dot_v2_dot_buildstream__pb2.UpdateReferenceResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Status(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/buildstream.v2.ReferenceStorage/Status',
buildstream_dot_v2_dot_buildstream__pb2.StatusRequest.SerializeToString,
buildstream_dot_v2_dot_buildstream__pb2.StatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
buildstream-1.6.9/buildstream/_protos/google/ 0000775 0000000 0000000 00000000000 14375152700 0021322 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0023421 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/api/ 0000775 0000000 0000000 00000000000 14375152700 0022073 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/api/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0024172 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/api/annotations.proto 0000664 0000000 0000000 00000002033 14375152700 0025513 0 ustar 00root root 0000000 0000000 // Copyright (c) 2015, Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.api;
import "google/api/http.proto";
import "google/protobuf/descriptor.proto";
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
option java_multiple_files = true;
option java_outer_classname = "AnnotationsProto";
option java_package = "com.google.api";
option objc_class_prefix = "GAPI";
extend google.protobuf.MethodOptions {
// See `HttpRule`.
HttpRule http = 72295728;
}
buildstream-1.6.9/buildstream/_protos/google/api/annotations_pb2.py 0000664 0000000 0000000 00000003035 14375152700 0025546 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/annotations.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from buildstream._protos.google.api import http_pb2 as google_dot_api_dot_http__pb2
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1cgoogle/api/annotations.proto\x12\ngoogle.api\x1a\x15google/api/http.proto\x1a google/protobuf/descriptor.proto:E\n\x04http\x12\x1e.google.protobuf.MethodOptions\x18\xb0\xca\xbc\" \x01(\x0b\x32\x14.google.api.HttpRuleBn\n\x0e\x63om.google.apiB\x10\x41nnotationsProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xa2\x02\x04GAPIb\x06proto3')
HTTP_FIELD_NUMBER = 72295728
http = DESCRIPTOR.extensions_by_name['http']
if _descriptor._USE_C_DESCRIPTORS == False:
google_dot_protobuf_dot_descriptor__pb2.MethodOptions.RegisterExtension(http)
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\016com.google.apiB\020AnnotationsProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\242\002\004GAPI'
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/google/api/annotations_pb2_grpc.py 0000664 0000000 0000000 00000000237 14375152700 0026562 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
buildstream-1.6.9/buildstream/_protos/google/api/http.proto 0000664 0000000 0000000 00000027114 14375152700 0024144 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.api;
option cc_enable_arenas = true;
option go_package = "google.golang.org/genproto/googleapis/api/annotations;annotations";
option java_multiple_files = true;
option java_outer_classname = "HttpProto";
option java_package = "com.google.api";
option objc_class_prefix = "GAPI";
// Defines the HTTP configuration for an API service. It contains a list of
// [HttpRule][google.api.HttpRule], each specifying the mapping of an RPC method
// to one or more HTTP REST API methods.
message Http {
// A list of HTTP configuration rules that apply to individual API methods.
//
// **NOTE:** All service configuration rules follow "last one wins" order.
repeated HttpRule rules = 1;
// When set to true, URL path parmeters will be fully URI-decoded except in
// cases of single segment matches in reserved expansion, where "%2F" will be
// left encoded.
//
// The default behavior is to not decode RFC 6570 reserved characters in multi
// segment matches.
bool fully_decode_reserved_expansion = 2;
}
// `HttpRule` defines the mapping of an RPC method to one or more HTTP
// REST API methods. The mapping specifies how different portions of the RPC
// request message are mapped to URL path, URL query parameters, and
// HTTP request body. The mapping is typically specified as an
// `google.api.http` annotation on the RPC method,
// see "google/api/annotations.proto" for details.
//
// The mapping consists of a field specifying the path template and
// method kind. The path template can refer to fields in the request
// message, as in the example below which describes a REST GET
// operation on a resource collection of messages:
//
//
// service Messaging {
// rpc GetMessage(GetMessageRequest) returns (Message) {
// option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}";
// }
// }
// message GetMessageRequest {
// message SubMessage {
// string subfield = 1;
// }
// string message_id = 1; // mapped to the URL
// SubMessage sub = 2; // `sub.subfield` is url-mapped
// }
// message Message {
// string text = 1; // content of the resource
// }
//
// The same http annotation can alternatively be expressed inside the
// `GRPC API Configuration` YAML file.
//
// http:
// rules:
// - selector: .Messaging.GetMessage
// get: /v1/messages/{message_id}/{sub.subfield}
//
// This definition enables an automatic, bidrectional mapping of HTTP
// JSON to RPC. Example:
//
// HTTP | RPC
// -----|-----
// `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))`
//
// In general, not only fields but also field paths can be referenced
// from a path pattern. Fields mapped to the path pattern cannot be
// repeated and must have a primitive (non-message) type.
//
// Any fields in the request message which are not bound by the path
// pattern automatically become (optional) HTTP query
// parameters. Assume the following definition of the request message:
//
//
// service Messaging {
// rpc GetMessage(GetMessageRequest) returns (Message) {
// option (google.api.http).get = "/v1/messages/{message_id}";
// }
// }
// message GetMessageRequest {
// message SubMessage {
// string subfield = 1;
// }
// string message_id = 1; // mapped to the URL
// int64 revision = 2; // becomes a parameter
// SubMessage sub = 3; // `sub.subfield` becomes a parameter
// }
//
//
// This enables a HTTP JSON to RPC mapping as below:
//
// HTTP | RPC
// -----|-----
// `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))`
//
// Note that fields which are mapped to HTTP parameters must have a
// primitive type or a repeated primitive type. Message types are not
// allowed. In the case of a repeated type, the parameter can be
// repeated in the URL, as in `...?param=A¶m=B`.
//
// For HTTP method kinds which allow a request body, the `body` field
// specifies the mapping. Consider a REST update method on the
// message resource collection:
//
//
// service Messaging {
// rpc UpdateMessage(UpdateMessageRequest) returns (Message) {
// option (google.api.http) = {
// put: "/v1/messages/{message_id}"
// body: "message"
// };
// }
// }
// message UpdateMessageRequest {
// string message_id = 1; // mapped to the URL
// Message message = 2; // mapped to the body
// }
//
//
// The following HTTP JSON to RPC mapping is enabled, where the
// representation of the JSON in the request body is determined by
// protos JSON encoding:
//
// HTTP | RPC
// -----|-----
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })`
//
// The special name `*` can be used in the body mapping to define that
// every field not bound by the path template should be mapped to the
// request body. This enables the following alternative definition of
// the update method:
//
// service Messaging {
// rpc UpdateMessage(Message) returns (Message) {
// option (google.api.http) = {
// put: "/v1/messages/{message_id}"
// body: "*"
// };
// }
// }
// message Message {
// string message_id = 1;
// string text = 2;
// }
//
//
// The following HTTP JSON to RPC mapping is enabled:
//
// HTTP | RPC
// -----|-----
// `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")`
//
// Note that when using `*` in the body mapping, it is not possible to
// have HTTP parameters, as all fields not bound by the path end in
// the body. This makes this option more rarely used in practice of
// defining REST APIs. The common usage of `*` is in custom methods
// which don't use the URL at all for transferring data.
//
// It is possible to define multiple HTTP methods for one RPC by using
// the `additional_bindings` option. Example:
//
// service Messaging {
// rpc GetMessage(GetMessageRequest) returns (Message) {
// option (google.api.http) = {
// get: "/v1/messages/{message_id}"
// additional_bindings {
// get: "/v1/users/{user_id}/messages/{message_id}"
// }
// };
// }
// }
// message GetMessageRequest {
// string message_id = 1;
// string user_id = 2;
// }
//
//
// This enables the following two alternative HTTP JSON to RPC
// mappings:
//
// HTTP | RPC
// -----|-----
// `GET /v1/messages/123456` | `GetMessage(message_id: "123456")`
// `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")`
//
// # Rules for HTTP mapping
//
// The rules for mapping HTTP path, query parameters, and body fields
// to the request message are as follows:
//
// 1. The `body` field specifies either `*` or a field path, or is
// omitted. If omitted, it indicates there is no HTTP request body.
// 2. Leaf fields (recursive expansion of nested messages in the
// request) can be classified into three types:
// (a) Matched in the URL template.
// (b) Covered by body (if body is `*`, everything except (a) fields;
// else everything under the body field)
// (c) All other fields.
// 3. URL query parameters found in the HTTP request are mapped to (c) fields.
// 4. Any body sent with an HTTP request can contain only (b) fields.
//
// The syntax of the path template is as follows:
//
// Template = "/" Segments [ Verb ] ;
// Segments = Segment { "/" Segment } ;
// Segment = "*" | "**" | LITERAL | Variable ;
// Variable = "{" FieldPath [ "=" Segments ] "}" ;
// FieldPath = IDENT { "." IDENT } ;
// Verb = ":" LITERAL ;
//
// The syntax `*` matches a single path segment. The syntax `**` matches zero
// or more path segments, which must be the last part of the path except the
// `Verb`. The syntax `LITERAL` matches literal text in the path.
//
// The syntax `Variable` matches part of the URL path as specified by its
// template. A variable template must not contain other variables. If a variable
// matches a single path segment, its template may be omitted, e.g. `{var}`
// is equivalent to `{var=*}`.
//
// If a variable contains exactly one path segment, such as `"{var}"` or
// `"{var=*}"`, when such a variable is expanded into a URL path, all characters
// except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the
// Discovery Document as `{var}`.
//
// If a variable contains one or more path segments, such as `"{var=foo/*}"`
// or `"{var=**}"`, when such a variable is expanded into a URL path, all
// characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables
// show up in the Discovery Document as `{+var}`.
//
// NOTE: While the single segment variable matches the semantics of
// [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2
// Simple String Expansion, the multi segment variable **does not** match
// RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion
// does not expand special characters like `?` and `#`, which would lead
// to invalid URLs.
//
// NOTE: the field paths in variables and in the `body` must not refer to
// repeated fields or map fields.
message HttpRule {
// Selects methods to which this rule applies.
//
// Refer to [selector][google.api.DocumentationRule.selector] for syntax details.
string selector = 1;
// Determines the URL pattern is matched by this rules. This pattern can be
// used with any of the {get|put|post|delete|patch} methods. A custom method
// can be defined using the 'custom' field.
oneof pattern {
// Used for listing and getting information about resources.
string get = 2;
// Used for updating a resource.
string put = 3;
// Used for creating a resource.
string post = 4;
// Used for deleting a resource.
string delete = 5;
// Used for updating a resource.
string patch = 6;
// The custom pattern is used for specifying an HTTP method that is not
// included in the `pattern` field, such as HEAD, or "*" to leave the
// HTTP method unspecified for this rule. The wild-card rule is useful
// for services that provide content to Web (HTML) clients.
CustomHttpPattern custom = 8;
}
// The name of the request field whose value is mapped to the HTTP body, or
// `*` for mapping all fields not captured by the path pattern to the HTTP
// body. NOTE: the referred field must not be a repeated field and must be
// present at the top-level of request message type.
string body = 7;
// Additional HTTP bindings for the selector. Nested bindings must
// not contain an `additional_bindings` field themselves (that is,
// the nesting may only be one level deep).
repeated HttpRule additional_bindings = 11;
}
// A custom pattern is used for defining custom HTTP verb.
message CustomHttpPattern {
// The name of this custom HTTP verb.
string kind = 1;
// The path matched by this custom verb.
string path = 2;
}
buildstream-1.6.9/buildstream/_protos/google/api/http_pb2.py 0000664 0000000 0000000 00000005614 14375152700 0024175 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/http.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x15google/api/http.proto\x12\ngoogle.api\"T\n\x04Http\x12#\n\x05rules\x18\x01 \x03(\x0b\x32\x14.google.api.HttpRule\x12\'\n\x1f\x66ully_decode_reserved_expansion\x18\x02 \x01(\x08\"\xea\x01\n\x08HttpRule\x12\x10\n\x08selector\x18\x01 \x01(\t\x12\r\n\x03get\x18\x02 \x01(\tH\x00\x12\r\n\x03put\x18\x03 \x01(\tH\x00\x12\x0e\n\x04post\x18\x04 \x01(\tH\x00\x12\x10\n\x06\x64\x65lete\x18\x05 \x01(\tH\x00\x12\x0f\n\x05patch\x18\x06 \x01(\tH\x00\x12/\n\x06\x63ustom\x18\x08 \x01(\x0b\x32\x1d.google.api.CustomHttpPatternH\x00\x12\x0c\n\x04\x62ody\x18\x07 \x01(\t\x12\x31\n\x13\x61\x64\x64itional_bindings\x18\x0b \x03(\x0b\x32\x14.google.api.HttpRuleB\t\n\x07pattern\"/\n\x11\x43ustomHttpPattern\x12\x0c\n\x04kind\x18\x01 \x01(\t\x12\x0c\n\x04path\x18\x02 \x01(\tBj\n\x0e\x63om.google.apiB\tHttpProtoP\x01ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\xf8\x01\x01\xa2\x02\x04GAPIb\x06proto3')
_HTTP = DESCRIPTOR.message_types_by_name['Http']
_HTTPRULE = DESCRIPTOR.message_types_by_name['HttpRule']
_CUSTOMHTTPPATTERN = DESCRIPTOR.message_types_by_name['CustomHttpPattern']
Http = _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), {
'DESCRIPTOR' : _HTTP,
'__module__' : 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.Http)
})
_sym_db.RegisterMessage(Http)
HttpRule = _reflection.GeneratedProtocolMessageType('HttpRule', (_message.Message,), {
'DESCRIPTOR' : _HTTPRULE,
'__module__' : 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.HttpRule)
})
_sym_db.RegisterMessage(HttpRule)
CustomHttpPattern = _reflection.GeneratedProtocolMessageType('CustomHttpPattern', (_message.Message,), {
'DESCRIPTOR' : _CUSTOMHTTPPATTERN,
'__module__' : 'google.api.http_pb2'
# @@protoc_insertion_point(class_scope:google.api.CustomHttpPattern)
})
_sym_db.RegisterMessage(CustomHttpPattern)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\016com.google.apiB\tHttpProtoP\001ZAgoogle.golang.org/genproto/googleapis/api/annotations;annotations\370\001\001\242\002\004GAPI'
_HTTP._serialized_start=37
_HTTP._serialized_end=121
_HTTPRULE._serialized_start=124
_HTTPRULE._serialized_end=358
_CUSTOMHTTPPATTERN._serialized_start=360
_CUSTOMHTTPPATTERN._serialized_end=407
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/google/api/http_pb2_grpc.py 0000664 0000000 0000000 00000000237 14375152700 0025204 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
buildstream-1.6.9/buildstream/_protos/google/bytestream/ 0000775 0000000 0000000 00000000000 14375152700 0023501 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/bytestream/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0025600 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/bytestream/bytestream.proto 0000664 0000000 0000000 00000016661 14375152700 0026757 0 ustar 00root root 0000000 0000000 // Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.bytestream;
import "google/api/annotations.proto";
import "google/protobuf/wrappers.proto";
option go_package = "google.golang.org/genproto/googleapis/bytestream;bytestream";
option java_outer_classname = "ByteStreamProto";
option java_package = "com.google.bytestream";
// #### Introduction
//
// The Byte Stream API enables a client to read and write a stream of bytes to
// and from a resource. Resources have names, and these names are supplied in
// the API calls below to identify the resource that is being read from or
// written to.
//
// All implementations of the Byte Stream API export the interface defined here:
//
// * `Read()`: Reads the contents of a resource.
//
// * `Write()`: Writes the contents of a resource. The client can call `Write()`
// multiple times with the same resource and can check the status of the write
// by calling `QueryWriteStatus()`.
//
// #### Service parameters and metadata
//
// The ByteStream API provides no direct way to access/modify any metadata
// associated with the resource.
//
// #### Errors
//
// The errors returned by the service are in the Google canonical error space.
service ByteStream {
// `Read()` is used to retrieve the contents of a resource as a sequence
// of bytes. The bytes are returned in a sequence of responses, and the
// responses are delivered as the results of a server-side streaming RPC.
rpc Read(ReadRequest) returns (stream ReadResponse);
// `Write()` is used to send the contents of a resource as a sequence of
// bytes. The bytes are sent in a sequence of request protos of a client-side
// streaming RPC.
//
// A `Write()` action is resumable. If there is an error or the connection is
// broken during the `Write()`, the client should check the status of the
// `Write()` by calling `QueryWriteStatus()` and continue writing from the
// returned `committed_size`. This may be less than the amount of data the
// client previously sent.
//
// Calling `Write()` on a resource name that was previously written and
// finalized could cause an error, depending on whether the underlying service
// allows over-writing of previously written resources.
//
// When the client closes the request channel, the service will respond with
// a `WriteResponse`. The service will not view the resource as `complete`
// until the client has sent a `WriteRequest` with `finish_write` set to
// `true`. Sending any requests on a stream after sending a request with
// `finish_write` set to `true` will cause an error. The client **should**
// check the `WriteResponse` it receives to determine how much data the
// service was able to commit and whether the service views the resource as
// `complete` or not.
rpc Write(stream WriteRequest) returns (WriteResponse);
// `QueryWriteStatus()` is used to find the `committed_size` for a resource
// that is being written, which can then be used as the `write_offset` for
// the next `Write()` call.
//
// If the resource does not exist (i.e., the resource has been deleted, or the
// first `Write()` has not yet reached the service), this method returns the
// error `NOT_FOUND`.
//
// The client **may** call `QueryWriteStatus()` at any time to determine how
// much data has been processed for this resource. This is useful if the
// client is buffering data and needs to know which data can be safely
// evicted. For any sequence of `QueryWriteStatus()` calls for a given
// resource name, the sequence of returned `committed_size` values will be
// non-decreasing.
rpc QueryWriteStatus(QueryWriteStatusRequest) returns (QueryWriteStatusResponse);
}
// Request object for ByteStream.Read.
message ReadRequest {
// The name of the resource to read.
string resource_name = 1;
// The offset for the first byte to return in the read, relative to the start
// of the resource.
//
// A `read_offset` that is negative or greater than the size of the resource
// will cause an `OUT_OF_RANGE` error.
int64 read_offset = 2;
// The maximum number of `data` bytes the server is allowed to return in the
// sum of all `ReadResponse` messages. A `read_limit` of zero indicates that
// there is no limit, and a negative `read_limit` will cause an error.
//
// If the stream returns fewer bytes than allowed by the `read_limit` and no
// error occurred, the stream includes all data from the `read_offset` to the
// end of the resource.
int64 read_limit = 3;
}
// Response object for ByteStream.Read.
message ReadResponse {
// A portion of the data for the resource. The service **may** leave `data`
// empty for any given `ReadResponse`. This enables the service to inform the
// client that the request is still live while it is running an operation to
// generate more data.
bytes data = 10;
}
// Request object for ByteStream.Write.
message WriteRequest {
// The name of the resource to write. This **must** be set on the first
// `WriteRequest` of each `Write()` action. If it is set on subsequent calls,
// it **must** match the value of the first request.
string resource_name = 1;
// The offset from the beginning of the resource at which the data should be
// written. It is required on all `WriteRequest`s.
//
// In the first `WriteRequest` of a `Write()` action, it indicates
// the initial offset for the `Write()` call. The value **must** be equal to
// the `committed_size` that a call to `QueryWriteStatus()` would return.
//
// On subsequent calls, this value **must** be set and **must** be equal to
// the sum of the first `write_offset` and the sizes of all `data` bundles
// sent previously on this stream.
//
// An incorrect value will cause an error.
int64 write_offset = 2;
// If `true`, this indicates that the write is complete. Sending any
// `WriteRequest`s subsequent to one in which `finish_write` is `true` will
// cause an error.
bool finish_write = 3;
// A portion of the data for the resource. The client **may** leave `data`
// empty for any given `WriteRequest`. This enables the client to inform the
// service that the request is still live while it is running an operation to
// generate more data.
bytes data = 10;
}
// Response object for ByteStream.Write.
message WriteResponse {
// The number of bytes that have been processed for the given resource.
int64 committed_size = 1;
}
// Request object for ByteStream.QueryWriteStatus.
message QueryWriteStatusRequest {
// The name of the resource whose write status is being requested.
string resource_name = 1;
}
// Response object for ByteStream.QueryWriteStatus.
message QueryWriteStatusResponse {
// The number of bytes that have been processed for the given resource.
int64 committed_size = 1;
// `complete` is `true` only if the client has sent a `WriteRequest` with
// `finish_write` set to true, and the server has processed that request.
bool complete = 2;
}
buildstream-1.6.9/buildstream/_protos/google/bytestream/bytestream_pb2.py 0000664 0000000 0000000 00000012206 14375152700 0026776 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/bytestream/bytestream.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"google/bytestream/bytestream.proto\x12\x11google.bytestream\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/wrappers.proto\"M\n\x0bReadRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x13\n\x0bread_offset\x18\x02 \x01(\x03\x12\x12\n\nread_limit\x18\x03 \x01(\x03\"\x1c\n\x0cReadResponse\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"_\n\x0cWriteRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\x14\n\x0cwrite_offset\x18\x02 \x01(\x03\x12\x14\n\x0c\x66inish_write\x18\x03 \x01(\x08\x12\x0c\n\x04\x64\x61ta\x18\n \x01(\x0c\"\'\n\rWriteResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\"0\n\x17QueryWriteStatusRequest\x12\x15\n\rresource_name\x18\x01 \x01(\t\"D\n\x18QueryWriteStatusResponse\x12\x16\n\x0e\x63ommitted_size\x18\x01 \x01(\x03\x12\x10\n\x08\x63omplete\x18\x02 \x01(\x08\x32\x92\x02\n\nByteStream\x12I\n\x04Read\x12\x1e.google.bytestream.ReadRequest\x1a\x1f.google.bytestream.ReadResponse0\x01\x12L\n\x05Write\x12\x1f.google.bytestream.WriteRequest\x1a .google.bytestream.WriteResponse(\x01\x12k\n\x10QueryWriteStatus\x12*.google.bytestream.QueryWriteStatusRequest\x1a+.google.bytestream.QueryWriteStatusResponseBe\n\x15\x63om.google.bytestreamB\x0f\x42yteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestreamb\x06proto3')
_READREQUEST = DESCRIPTOR.message_types_by_name['ReadRequest']
_READRESPONSE = DESCRIPTOR.message_types_by_name['ReadResponse']
_WRITEREQUEST = DESCRIPTOR.message_types_by_name['WriteRequest']
_WRITERESPONSE = DESCRIPTOR.message_types_by_name['WriteResponse']
_QUERYWRITESTATUSREQUEST = DESCRIPTOR.message_types_by_name['QueryWriteStatusRequest']
_QUERYWRITESTATUSRESPONSE = DESCRIPTOR.message_types_by_name['QueryWriteStatusResponse']
ReadRequest = _reflection.GeneratedProtocolMessageType('ReadRequest', (_message.Message,), {
'DESCRIPTOR' : _READREQUEST,
'__module__' : 'google.bytestream.bytestream_pb2'
# @@protoc_insertion_point(class_scope:google.bytestream.ReadRequest)
})
_sym_db.RegisterMessage(ReadRequest)
ReadResponse = _reflection.GeneratedProtocolMessageType('ReadResponse', (_message.Message,), {
'DESCRIPTOR' : _READRESPONSE,
'__module__' : 'google.bytestream.bytestream_pb2'
# @@protoc_insertion_point(class_scope:google.bytestream.ReadResponse)
})
_sym_db.RegisterMessage(ReadResponse)
WriteRequest = _reflection.GeneratedProtocolMessageType('WriteRequest', (_message.Message,), {
'DESCRIPTOR' : _WRITEREQUEST,
'__module__' : 'google.bytestream.bytestream_pb2'
# @@protoc_insertion_point(class_scope:google.bytestream.WriteRequest)
})
_sym_db.RegisterMessage(WriteRequest)
WriteResponse = _reflection.GeneratedProtocolMessageType('WriteResponse', (_message.Message,), {
'DESCRIPTOR' : _WRITERESPONSE,
'__module__' : 'google.bytestream.bytestream_pb2'
# @@protoc_insertion_point(class_scope:google.bytestream.WriteResponse)
})
_sym_db.RegisterMessage(WriteResponse)
QueryWriteStatusRequest = _reflection.GeneratedProtocolMessageType('QueryWriteStatusRequest', (_message.Message,), {
'DESCRIPTOR' : _QUERYWRITESTATUSREQUEST,
'__module__' : 'google.bytestream.bytestream_pb2'
# @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusRequest)
})
_sym_db.RegisterMessage(QueryWriteStatusRequest)
QueryWriteStatusResponse = _reflection.GeneratedProtocolMessageType('QueryWriteStatusResponse', (_message.Message,), {
'DESCRIPTOR' : _QUERYWRITESTATUSRESPONSE,
'__module__' : 'google.bytestream.bytestream_pb2'
# @@protoc_insertion_point(class_scope:google.bytestream.QueryWriteStatusResponse)
})
_sym_db.RegisterMessage(QueryWriteStatusResponse)
_BYTESTREAM = DESCRIPTOR.services_by_name['ByteStream']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\025com.google.bytestreamB\017ByteStreamProtoZ;google.golang.org/genproto/googleapis/bytestream;bytestream'
_READREQUEST._serialized_start=119
_READREQUEST._serialized_end=196
_READRESPONSE._serialized_start=198
_READRESPONSE._serialized_end=226
_WRITEREQUEST._serialized_start=228
_WRITEREQUEST._serialized_end=323
_WRITERESPONSE._serialized_start=325
_WRITERESPONSE._serialized_end=364
_QUERYWRITESTATUSREQUEST._serialized_start=366
_QUERYWRITESTATUSREQUEST._serialized_end=414
_QUERYWRITESTATUSRESPONSE._serialized_start=416
_QUERYWRITESTATUSRESPONSE._serialized_end=484
_BYTESTREAM._serialized_start=487
_BYTESTREAM._serialized_end=761
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/google/bytestream/bytestream_pb2_grpc.py 0000664 0000000 0000000 00000024756 14375152700 0030026 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from buildstream._protos.google.bytestream import bytestream_pb2 as google_dot_bytestream_dot_bytestream__pb2
class ByteStreamStub(object):
"""#### Introduction
The Byte Stream API enables a client to read and write a stream of bytes to
and from a resource. Resources have names, and these names are supplied in
the API calls below to identify the resource that is being read from or
written to.
All implementations of the Byte Stream API export the interface defined here:
* `Read()`: Reads the contents of a resource.
* `Write()`: Writes the contents of a resource. The client can call `Write()`
multiple times with the same resource and can check the status of the write
by calling `QueryWriteStatus()`.
#### Service parameters and metadata
The ByteStream API provides no direct way to access/modify any metadata
associated with the resource.
#### Errors
The errors returned by the service are in the Google canonical error space.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Read = channel.unary_stream(
'/google.bytestream.ByteStream/Read',
request_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
response_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
)
self.Write = channel.stream_unary(
'/google.bytestream.ByteStream/Write',
request_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
response_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
)
self.QueryWriteStatus = channel.unary_unary(
'/google.bytestream.ByteStream/QueryWriteStatus',
request_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
response_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
)
class ByteStreamServicer(object):
"""#### Introduction
The Byte Stream API enables a client to read and write a stream of bytes to
and from a resource. Resources have names, and these names are supplied in
the API calls below to identify the resource that is being read from or
written to.
All implementations of the Byte Stream API export the interface defined here:
* `Read()`: Reads the contents of a resource.
* `Write()`: Writes the contents of a resource. The client can call `Write()`
multiple times with the same resource and can check the status of the write
by calling `QueryWriteStatus()`.
#### Service parameters and metadata
The ByteStream API provides no direct way to access/modify any metadata
associated with the resource.
#### Errors
The errors returned by the service are in the Google canonical error space.
"""
def Read(self, request, context):
"""`Read()` is used to retrieve the contents of a resource as a sequence
of bytes. The bytes are returned in a sequence of responses, and the
responses are delivered as the results of a server-side streaming RPC.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Write(self, request_iterator, context):
"""`Write()` is used to send the contents of a resource as a sequence of
bytes. The bytes are sent in a sequence of request protos of a client-side
streaming RPC.
A `Write()` action is resumable. If there is an error or the connection is
broken during the `Write()`, the client should check the status of the
`Write()` by calling `QueryWriteStatus()` and continue writing from the
returned `committed_size`. This may be less than the amount of data the
client previously sent.
Calling `Write()` on a resource name that was previously written and
finalized could cause an error, depending on whether the underlying service
allows over-writing of previously written resources.
When the client closes the request channel, the service will respond with
a `WriteResponse`. The service will not view the resource as `complete`
until the client has sent a `WriteRequest` with `finish_write` set to
`true`. Sending any requests on a stream after sending a request with
`finish_write` set to `true` will cause an error. The client **should**
check the `WriteResponse` it receives to determine how much data the
service was able to commit and whether the service views the resource as
`complete` or not.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def QueryWriteStatus(self, request, context):
"""`QueryWriteStatus()` is used to find the `committed_size` for a resource
that is being written, which can then be used as the `write_offset` for
the next `Write()` call.
If the resource does not exist (i.e., the resource has been deleted, or the
first `Write()` has not yet reached the service), this method returns the
error `NOT_FOUND`.
The client **may** call `QueryWriteStatus()` at any time to determine how
much data has been processed for this resource. This is useful if the
client is buffering data and needs to know which data can be safely
evicted. For any sequence of `QueryWriteStatus()` calls for a given
resource name, the sequence of returned `committed_size` values will be
non-decreasing.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ByteStreamServicer_to_server(servicer, server):
rpc_method_handlers = {
'Read': grpc.unary_stream_rpc_method_handler(
servicer.Read,
request_deserializer=google_dot_bytestream_dot_bytestream__pb2.ReadRequest.FromString,
response_serializer=google_dot_bytestream_dot_bytestream__pb2.ReadResponse.SerializeToString,
),
'Write': grpc.stream_unary_rpc_method_handler(
servicer.Write,
request_deserializer=google_dot_bytestream_dot_bytestream__pb2.WriteRequest.FromString,
response_serializer=google_dot_bytestream_dot_bytestream__pb2.WriteResponse.SerializeToString,
),
'QueryWriteStatus': grpc.unary_unary_rpc_method_handler(
servicer.QueryWriteStatus,
request_deserializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.FromString,
response_serializer=google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.bytestream.ByteStream', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class ByteStream(object):
"""#### Introduction
The Byte Stream API enables a client to read and write a stream of bytes to
and from a resource. Resources have names, and these names are supplied in
the API calls below to identify the resource that is being read from or
written to.
All implementations of the Byte Stream API export the interface defined here:
* `Read()`: Reads the contents of a resource.
* `Write()`: Writes the contents of a resource. The client can call `Write()`
multiple times with the same resource and can check the status of the write
by calling `QueryWriteStatus()`.
#### Service parameters and metadata
The ByteStream API provides no direct way to access/modify any metadata
associated with the resource.
#### Errors
The errors returned by the service are in the Google canonical error space.
"""
@staticmethod
def Read(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/google.bytestream.ByteStream/Read',
google_dot_bytestream_dot_bytestream__pb2.ReadRequest.SerializeToString,
google_dot_bytestream_dot_bytestream__pb2.ReadResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Write(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/google.bytestream.ByteStream/Write',
google_dot_bytestream_dot_bytestream__pb2.WriteRequest.SerializeToString,
google_dot_bytestream_dot_bytestream__pb2.WriteResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def QueryWriteStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.bytestream.ByteStream/QueryWriteStatus',
google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusRequest.SerializeToString,
google_dot_bytestream_dot_bytestream__pb2.QueryWriteStatusResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
buildstream-1.6.9/buildstream/_protos/google/longrunning/ 0000775 0000000 0000000 00000000000 14375152700 0023662 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/longrunning/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0025761 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/longrunning/operations.proto 0000664 0000000 0000000 00000015457 14375152700 0027146 0 ustar 00root root 0000000 0000000 // Copyright 2016 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.longrunning;
import "google/api/annotations.proto";
import "google/protobuf/any.proto";
import "google/protobuf/empty.proto";
import "google/rpc/status.proto";
option csharp_namespace = "Google.LongRunning";
option go_package = "google.golang.org/genproto/googleapis/longrunning;longrunning";
option java_multiple_files = true;
option java_outer_classname = "OperationsProto";
option java_package = "com.google.longrunning";
option php_namespace = "Google\\LongRunning";
// Manages long-running operations with an API service.
//
// When an API method normally takes long time to complete, it can be designed
// to return [Operation][google.longrunning.Operation] to the client, and the client can use this
// interface to receive the real response asynchronously by polling the
// operation resource, or pass the operation resource to another API (such as
// Google Cloud Pub/Sub API) to receive the response. Any API service that
// returns long-running operations should implement the `Operations` interface
// so developers can have a consistent client experience.
service Operations {
// Lists operations that match the specified filter in the request. If the
// server doesn't support this method, it returns `UNIMPLEMENTED`.
//
// NOTE: the `name` binding below allows API services to override the binding
// to use different resource name schemes, such as `users/*/operations`.
rpc ListOperations(ListOperationsRequest) returns (ListOperationsResponse) {
option (google.api.http) = { get: "/v1/{name=operations}" };
}
// Gets the latest state of a long-running operation. Clients can use this
// method to poll the operation result at intervals as recommended by the API
// service.
rpc GetOperation(GetOperationRequest) returns (Operation) {
option (google.api.http) = { get: "/v1/{name=operations/**}" };
}
// Deletes a long-running operation. This method indicates that the client is
// no longer interested in the operation result. It does not cancel the
// operation. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`.
rpc DeleteOperation(DeleteOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { delete: "/v1/{name=operations/**}" };
}
// Starts asynchronous cancellation on a long-running operation. The server
// makes a best effort to cancel the operation, but success is not
// guaranteed. If the server doesn't support this method, it returns
// `google.rpc.Code.UNIMPLEMENTED`. Clients can use
// [Operations.GetOperation][google.longrunning.Operations.GetOperation] or
// other methods to check whether the cancellation succeeded or whether the
// operation completed despite cancellation. On successful cancellation,
// the operation is not deleted; instead, it becomes an operation with
// an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
// corresponding to `Code.CANCELLED`.
rpc CancelOperation(CancelOperationRequest) returns (google.protobuf.Empty) {
option (google.api.http) = { post: "/v1/{name=operations/**}:cancel" body: "*" };
}
}
// This resource represents a long-running operation that is the result of a
// network API call.
message Operation {
// The server-assigned name, which is only unique within the same service that
// originally returns it. If you use the default HTTP mapping, the
// `name` should have the format of `operations/some/unique/name`.
string name = 1;
// Service-specific metadata associated with the operation. It typically
// contains progress information and common metadata such as create time.
// Some services might not provide such metadata. Any method that returns a
// long-running operation should document the metadata type, if any.
google.protobuf.Any metadata = 2;
// If the value is `false`, it means the operation is still in progress.
// If true, the operation is completed, and either `error` or `response` is
// available.
bool done = 3;
// The operation result, which can be either an `error` or a valid `response`.
// If `done` == `false`, neither `error` nor `response` is set.
// If `done` == `true`, exactly one of `error` or `response` is set.
oneof result {
// The error result of the operation in case of failure or cancellation.
google.rpc.Status error = 4;
// The normal response of the operation in case of success. If the original
// method returns no data on success, such as `Delete`, the response is
// `google.protobuf.Empty`. If the original method is standard
// `Get`/`Create`/`Update`, the response should be the resource. For other
// methods, the response should have the type `XxxResponse`, where `Xxx`
// is the original method name. For example, if the original method name
// is `TakeSnapshot()`, the inferred response type is
// `TakeSnapshotResponse`.
google.protobuf.Any response = 5;
}
}
// The request message for [Operations.GetOperation][google.longrunning.Operations.GetOperation].
message GetOperationRequest {
// The name of the operation resource.
string name = 1;
}
// The request message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsRequest {
// The name of the operation collection.
string name = 4;
// The standard list filter.
string filter = 1;
// The standard list page size.
int32 page_size = 2;
// The standard list page token.
string page_token = 3;
}
// The response message for [Operations.ListOperations][google.longrunning.Operations.ListOperations].
message ListOperationsResponse {
// A list of operations that matches the specified filter in the request.
repeated Operation operations = 1;
// The standard List next-page token.
string next_page_token = 2;
}
// The request message for [Operations.CancelOperation][google.longrunning.Operations.CancelOperation].
message CancelOperationRequest {
// The name of the operation resource to be cancelled.
string name = 1;
}
// The request message for [Operations.DeleteOperation][google.longrunning.Operations.DeleteOperation].
message DeleteOperationRequest {
// The name of the operation resource to be deleted.
string name = 1;
}
buildstream-1.6.9/buildstream/_protos/google/longrunning/operations_pb2.py 0000664 0000000 0000000 00000015755 14375152700 0027177 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/longrunning/operations.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from buildstream._protos.google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from buildstream._protos.google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n#google/longrunning/operations.proto\x12\x12google.longrunning\x1a\x1cgoogle/api/annotations.proto\x1a\x19google/protobuf/any.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x17google/rpc/status.proto\"\xa8\x01\n\tOperation\x12\x0c\n\x04name\x18\x01 \x01(\t\x12&\n\x08metadata\x18\x02 \x01(\x0b\x32\x14.google.protobuf.Any\x12\x0c\n\x04\x64one\x18\x03 \x01(\x08\x12#\n\x05\x65rror\x18\x04 \x01(\x0b\x32\x12.google.rpc.StatusH\x00\x12(\n\x08response\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x08\n\x06result\"#\n\x13GetOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\\\n\x15ListOperationsRequest\x12\x0c\n\x04name\x18\x04 \x01(\t\x12\x0e\n\x06\x66ilter\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x16ListOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"&\n\x16\x43\x61ncelOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"&\n\x16\x44\x65leteOperationRequest\x12\x0c\n\x04name\x18\x01 \x01(\t2\x8c\x04\n\nOperations\x12\x86\x01\n\x0eListOperations\x12).google.longrunning.ListOperationsRequest\x1a*.google.longrunning.ListOperationsResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/v1/{name=operations}\x12x\n\x0cGetOperation\x12\'.google.longrunning.GetOperationRequest\x1a\x1d.google.longrunning.Operation\" \x82\xd3\xe4\x93\x02\x1a\x12\x18/v1/{name=operations/**}\x12w\n\x0f\x44\x65leteOperation\x12*.google.longrunning.DeleteOperationRequest\x1a\x16.google.protobuf.Empty\" \x82\xd3\xe4\x93\x02\x1a*\x18/v1/{name=operations/**}\x12\x81\x01\n\x0f\x43\x61ncelOperation\x12*.google.longrunning.CancelOperationRequest\x1a\x16.google.protobuf.Empty\"*\x82\xd3\xe4\x93\x02$\"\x1f/v1/{name=operations/**}:cancel:\x01*B\x94\x01\n\x16\x63om.google.longrunningB\x0fOperationsProtoP\x01Z=google.golang.org/genproto/googleapis/longrunning;longrunning\xaa\x02\x12Google.LongRunning\xca\x02\x12Google\\LongRunningb\x06proto3')
_OPERATION = DESCRIPTOR.message_types_by_name['Operation']
_GETOPERATIONREQUEST = DESCRIPTOR.message_types_by_name['GetOperationRequest']
_LISTOPERATIONSREQUEST = DESCRIPTOR.message_types_by_name['ListOperationsRequest']
_LISTOPERATIONSRESPONSE = DESCRIPTOR.message_types_by_name['ListOperationsResponse']
_CANCELOPERATIONREQUEST = DESCRIPTOR.message_types_by_name['CancelOperationRequest']
_DELETEOPERATIONREQUEST = DESCRIPTOR.message_types_by_name['DeleteOperationRequest']
Operation = _reflection.GeneratedProtocolMessageType('Operation', (_message.Message,), {
'DESCRIPTOR' : _OPERATION,
'__module__' : 'google.longrunning.operations_pb2'
# @@protoc_insertion_point(class_scope:google.longrunning.Operation)
})
_sym_db.RegisterMessage(Operation)
GetOperationRequest = _reflection.GeneratedProtocolMessageType('GetOperationRequest', (_message.Message,), {
'DESCRIPTOR' : _GETOPERATIONREQUEST,
'__module__' : 'google.longrunning.operations_pb2'
# @@protoc_insertion_point(class_scope:google.longrunning.GetOperationRequest)
})
_sym_db.RegisterMessage(GetOperationRequest)
ListOperationsRequest = _reflection.GeneratedProtocolMessageType('ListOperationsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTOPERATIONSREQUEST,
'__module__' : 'google.longrunning.operations_pb2'
# @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsRequest)
})
_sym_db.RegisterMessage(ListOperationsRequest)
ListOperationsResponse = _reflection.GeneratedProtocolMessageType('ListOperationsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTOPERATIONSRESPONSE,
'__module__' : 'google.longrunning.operations_pb2'
# @@protoc_insertion_point(class_scope:google.longrunning.ListOperationsResponse)
})
_sym_db.RegisterMessage(ListOperationsResponse)
CancelOperationRequest = _reflection.GeneratedProtocolMessageType('CancelOperationRequest', (_message.Message,), {
'DESCRIPTOR' : _CANCELOPERATIONREQUEST,
'__module__' : 'google.longrunning.operations_pb2'
# @@protoc_insertion_point(class_scope:google.longrunning.CancelOperationRequest)
})
_sym_db.RegisterMessage(CancelOperationRequest)
DeleteOperationRequest = _reflection.GeneratedProtocolMessageType('DeleteOperationRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETEOPERATIONREQUEST,
'__module__' : 'google.longrunning.operations_pb2'
# @@protoc_insertion_point(class_scope:google.longrunning.DeleteOperationRequest)
})
_sym_db.RegisterMessage(DeleteOperationRequest)
_OPERATIONS = DESCRIPTOR.services_by_name['Operations']
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\026com.google.longrunningB\017OperationsProtoP\001Z=google.golang.org/genproto/googleapis/longrunning;longrunning\252\002\022Google.LongRunning\312\002\022Google\\LongRunning'
_OPERATIONS.methods_by_name['ListOperations']._options = None
_OPERATIONS.methods_by_name['ListOperations']._serialized_options = b'\202\323\344\223\002\027\022\025/v1/{name=operations}'
_OPERATIONS.methods_by_name['GetOperation']._options = None
_OPERATIONS.methods_by_name['GetOperation']._serialized_options = b'\202\323\344\223\002\032\022\030/v1/{name=operations/**}'
_OPERATIONS.methods_by_name['DeleteOperation']._options = None
_OPERATIONS.methods_by_name['DeleteOperation']._serialized_options = b'\202\323\344\223\002\032*\030/v1/{name=operations/**}'
_OPERATIONS.methods_by_name['CancelOperation']._options = None
_OPERATIONS.methods_by_name['CancelOperation']._serialized_options = b'\202\323\344\223\002$\"\037/v1/{name=operations/**}:cancel:\001*'
_OPERATION._serialized_start=171
_OPERATION._serialized_end=339
_GETOPERATIONREQUEST._serialized_start=341
_GETOPERATIONREQUEST._serialized_end=376
_LISTOPERATIONSREQUEST._serialized_start=378
_LISTOPERATIONSREQUEST._serialized_end=470
_LISTOPERATIONSRESPONSE._serialized_start=472
_LISTOPERATIONSRESPONSE._serialized_end=572
_CANCELOPERATIONREQUEST._serialized_start=574
_CANCELOPERATIONREQUEST._serialized_end=612
_DELETEOPERATIONREQUEST._serialized_start=614
_DELETEOPERATIONREQUEST._serialized_end=652
_OPERATIONS._serialized_start=655
_OPERATIONS._serialized_end=1179
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/google/longrunning/operations_pb2_grpc.py 0000664 0000000 0000000 00000025310 14375152700 0030176 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from buildstream._protos.google.longrunning import operations_pb2 as google_dot_longrunning_dot_operations__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class OperationsStub(object):
"""Manages long-running operations with an API service.
When an API method normally takes long time to complete, it can be designed
to return [Operation][google.longrunning.Operation] to the client, and the client can use this
interface to receive the real response asynchronously by polling the
operation resource, or pass the operation resource to another API (such as
Google Cloud Pub/Sub API) to receive the response. Any API service that
returns long-running operations should implement the `Operations` interface
so developers can have a consistent client experience.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.ListOperations = channel.unary_unary(
'/google.longrunning.Operations/ListOperations',
request_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
)
self.GetOperation = channel.unary_unary(
'/google.longrunning.Operations/GetOperation',
request_serializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.DeleteOperation = channel.unary_unary(
'/google.longrunning.Operations/DeleteOperation',
request_serializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.CancelOperation = channel.unary_unary(
'/google.longrunning.Operations/CancelOperation',
request_serializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class OperationsServicer(object):
"""Manages long-running operations with an API service.
When an API method normally takes long time to complete, it can be designed
to return [Operation][google.longrunning.Operation] to the client, and the client can use this
interface to receive the real response asynchronously by polling the
operation resource, or pass the operation resource to another API (such as
Google Cloud Pub/Sub API) to receive the response. Any API service that
returns long-running operations should implement the `Operations` interface
so developers can have a consistent client experience.
"""
def ListOperations(self, request, context):
"""Lists operations that match the specified filter in the request. If the
server doesn't support this method, it returns `UNIMPLEMENTED`.
NOTE: the `name` binding below allows API services to override the binding
to use different resource name schemes, such as `users/*/operations`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOperation(self, request, context):
"""Gets the latest state of a long-running operation. Clients can use this
method to poll the operation result at intervals as recommended by the API
service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeleteOperation(self, request, context):
"""Deletes a long-running operation. This method indicates that the client is
no longer interested in the operation result. It does not cancel the
operation. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CancelOperation(self, request, context):
"""Starts asynchronous cancellation on a long-running operation. The server
makes a best effort to cancel the operation, but success is not
guaranteed. If the server doesn't support this method, it returns
`google.rpc.Code.UNIMPLEMENTED`. Clients can use
[Operations.GetOperation][google.longrunning.Operations.GetOperation] or
other methods to check whether the cancellation succeeded or whether the
operation completed despite cancellation. On successful cancellation,
the operation is not deleted; instead, it becomes an operation with
an [Operation.error][google.longrunning.Operation.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1,
corresponding to `Code.CANCELLED`.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_OperationsServicer_to_server(servicer, server):
rpc_method_handlers = {
'ListOperations': grpc.unary_unary_rpc_method_handler(
servicer.ListOperations,
request_deserializer=google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.SerializeToString,
),
'GetOperation': grpc.unary_unary_rpc_method_handler(
servicer.GetOperation,
request_deserializer=google_dot_longrunning_dot_operations__pb2.GetOperationRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
'DeleteOperation': grpc.unary_unary_rpc_method_handler(
servicer.DeleteOperation,
request_deserializer=google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'CancelOperation': grpc.unary_unary_rpc_method_handler(
servicer.CancelOperation,
request_deserializer=google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.longrunning.Operations', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Operations(object):
"""Manages long-running operations with an API service.
When an API method normally takes long time to complete, it can be designed
to return [Operation][google.longrunning.Operation] to the client, and the client can use this
interface to receive the real response asynchronously by polling the
operation resource, or pass the operation resource to another API (such as
Google Cloud Pub/Sub API) to receive the response. Any API service that
returns long-running operations should implement the `Operations` interface
so developers can have a consistent client experience.
"""
@staticmethod
def ListOperations(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/ListOperations',
google_dot_longrunning_dot_operations__pb2.ListOperationsRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.ListOperationsResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/GetOperation',
google_dot_longrunning_dot_operations__pb2.GetOperationRequest.SerializeToString,
google_dot_longrunning_dot_operations__pb2.Operation.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeleteOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/DeleteOperation',
google_dot_longrunning_dot_operations__pb2.DeleteOperationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def CancelOperation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/google.longrunning.Operations/CancelOperation',
google_dot_longrunning_dot_operations__pb2.CancelOperationRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
buildstream-1.6.9/buildstream/_protos/google/rpc/ 0000775 0000000 0000000 00000000000 14375152700 0022106 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/rpc/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0024205 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_protos/google/rpc/status.proto 0000664 0000000 0000000 00000007717 14375152700 0024532 0 ustar 00root root 0000000 0000000 // Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.rpc;
import "google/protobuf/any.proto";
option go_package = "google.golang.org/genproto/googleapis/rpc/status;status";
option java_multiple_files = true;
option java_outer_classname = "StatusProto";
option java_package = "com.google.rpc";
option objc_class_prefix = "RPC";
// The `Status` type defines a logical error model that is suitable for different
// programming environments, including REST APIs and RPC APIs. It is used by
// [gRPC](https://github.com/grpc). The error model is designed to be:
//
// - Simple to use and understand for most users
// - Flexible enough to meet unexpected needs
//
// # Overview
//
// The `Status` message contains three pieces of data: error code, error message,
// and error details. The error code should be an enum value of
// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The
// error message should be a developer-facing English message that helps
// developers *understand* and *resolve* the error. If a localized user-facing
// error message is needed, put the localized message in the error details or
// localize it in the client. The optional error details may contain arbitrary
// information about the error. There is a predefined set of error detail types
// in the package `google.rpc` that can be used for common error conditions.
//
// # Language mapping
//
// The `Status` message is the logical representation of the error model, but it
// is not necessarily the actual wire format. When the `Status` message is
// exposed in different client libraries and different wire protocols, it can be
// mapped differently. For example, it will likely be mapped to some exceptions
// in Java, but more likely mapped to some error codes in C.
//
// # Other uses
//
// The error model and the `Status` message can be used in a variety of
// environments, either with or without APIs, to provide a
// consistent developer experience across different environments.
//
// Example uses of this error model include:
//
// - Partial errors. If a service needs to return partial errors to the client,
// it may embed the `Status` in the normal response to indicate the partial
// errors.
//
// - Workflow errors. A typical workflow has multiple steps. Each step may
// have a `Status` message for error reporting.
//
// - Batch operations. If a client uses batch request and batch response, the
// `Status` message should be used directly inside batch response, one for
// each error sub-response.
//
// - Asynchronous operations. If an API call embeds asynchronous operation
// results in its response, the status of those operations should be
// represented directly using the `Status` message.
//
// - Logging. If some API errors are stored in logs, the message `Status` could
// be used directly after any stripping needed for security/privacy reasons.
message Status {
// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
int32 code = 1;
// A developer-facing error message, which should be in English. Any
// user-facing error message should be localized and sent in the
// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
string message = 2;
// A list of messages that carry the error details. There is a common set of
// message types for APIs to use.
repeated google.protobuf.Any details = 3;
}
buildstream-1.6.9/buildstream/_protos/google/rpc/status_pb2.py 0000664 0000000 0000000 00000003164 14375152700 0024552 0 ustar 00root root 0000000 0000000 # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/rpc/status.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17google/rpc/status.proto\x12\ngoogle.rpc\x1a\x19google/protobuf/any.proto\"N\n\x06Status\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x0f\n\x07message\x18\x02 \x01(\t\x12%\n\x07\x64\x65tails\x18\x03 \x03(\x0b\x32\x14.google.protobuf.AnyB^\n\x0e\x63om.google.rpcB\x0bStatusProtoP\x01Z7google.golang.org/genproto/googleapis/rpc/status;status\xa2\x02\x03RPCb\x06proto3')
_STATUS = DESCRIPTOR.message_types_by_name['Status']
Status = _reflection.GeneratedProtocolMessageType('Status', (_message.Message,), {
'DESCRIPTOR' : _STATUS,
'__module__' : 'google.rpc.status_pb2'
# @@protoc_insertion_point(class_scope:google.rpc.Status)
})
_sym_db.RegisterMessage(Status)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\016com.google.rpcB\013StatusProtoP\001Z7google.golang.org/genproto/googleapis/rpc/status;status\242\002\003RPC'
_STATUS._serialized_start=66
_STATUS._serialized_end=144
# @@protoc_insertion_point(module_scope)
buildstream-1.6.9/buildstream/_protos/google/rpc/status_pb2_grpc.py 0000664 0000000 0000000 00000000237 14375152700 0025563 0 ustar 00root root 0000000 0000000 # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
buildstream-1.6.9/buildstream/_scheduler/ 0000775 0000000 0000000 00000000000 14375152700 0020476 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_scheduler/__init__.py 0000664 0000000 0000000 00000002140 14375152700 0022604 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from .queues import Queue, QueueStatus
from .queues.fetchqueue import FetchQueue
from .queues.trackqueue import TrackQueue
from .queues.buildqueue import BuildQueue
from .queues.pushqueue import PushQueue
from .queues.pullqueue import PullQueue
from .scheduler import Scheduler, SchedStatus
from .jobs import ElementJob, JobStatus
buildstream-1.6.9/buildstream/_scheduler/_multiprocessing.py 0000664 0000000 0000000 00000006133 14375152700 0024441 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# TLDR:
# ALWAYS use `.AsyncioSafeProcess` when you have an asyncio event loop running and need a `multiprocessing.Process`
#
#
# The upstream asyncio library doesn't play well with forking subprocesses while an event loop is running.
#
# The main problem that affects us is that the parent and the child will share some file handlers.
# The most important one for us is the sig_handler_fd, which the loop uses to buffer signals received
# by the app so that the asyncio loop can treat them afterwards.
#
# This sharing means that when we send a signal to the child, the sighandler in the child will write
# it back to the parent sig_handler_fd, making the parent have to treat it too.
# This is a problem for example when we sigterm the process. The scheduler will send sigterms to all its children,
# which in turn will make the scheduler receive N SIGTERMs (one per child). Which in turn will send sigterms to
# the children...
#
# We therefore provide a `AsyncioSafeProcess` derived from multiprocessing.Process that automatically
# tries to cleanup the loop and never calls `waitpid` on the child process, which breaks our child watchers.
#
#
# Relevant issues:
# - Asyncio: support fork (https://bugs.python.org/issue21998)
# - Asyncio: support multiprocessing (support fork) (https://bugs.python.org/issue22087)
# - Signal delivered to a subprocess triggers parent's handler (https://bugs.python.org/issue31489)
#
#
import multiprocessing
import signal
import sys
from asyncio import set_event_loop_policy
# _AsyncioSafeForkAwareProcess()
#
# Process class that doesn't call waitpid on its own.
# This prevents conflicts with the asyncio child watcher.
#
# Also automatically close any running asyncio loop before calling
# the actual run target
#
class _AsyncioSafeForkAwareProcess(multiprocessing.Process):
# pylint: disable=attribute-defined-outside-init
def start(self):
self._popen = self._Popen(self)
self._sentinel = self._popen.sentinel
def run(self):
signal.set_wakeup_fd(-1)
set_event_loop_policy(None)
super().run()
if sys.platform != "win32":
# Set the default event loop policy to automatically close our asyncio loop in child processes
AsyncioSafeProcess = _AsyncioSafeForkAwareProcess
else:
# Windows doesn't support ChildWatcher that way anyways, we'll need another
# implementation if we want it
AsyncioSafeProcess = multiprocessing.Process
buildstream-1.6.9/buildstream/_scheduler/jobs/ 0000775 0000000 0000000 00000000000 14375152700 0021433 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_scheduler/jobs/__init__.py 0000664 0000000 0000000 00000000210 14375152700 0023535 0 ustar 00root root 0000000 0000000 from .elementjob import ElementJob
from .cachesizejob import CacheSizeJob
from .cleanupjob import CleanupJob
from .job import JobStatus
buildstream-1.6.9/buildstream/_scheduler/jobs/cachesizejob.py 0000664 0000000 0000000 00000002606 14375152700 0024442 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Author:
# Tristan Daniël Maat
#
from .job import Job, JobStatus
class CacheSizeJob(Job):
def __init__(self, *args, complete_cb, **kwargs):
super().__init__(*args, **kwargs)
self._complete_cb = complete_cb
context = self._scheduler.context
self._artifacts = context.artifactcache
def child_process(self):
return self._artifacts.compute_cache_size()
def parent_complete(self, status, result):
if status == JobStatus.OK:
self._artifacts.set_cache_size(result)
if self._complete_cb:
self._complete_cb(status, result)
def child_process_data(self):
return {}
buildstream-1.6.9/buildstream/_scheduler/jobs/cleanupjob.py 0000664 0000000 0000000 00000003417 14375152700 0024134 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Author:
# Tristan Daniël Maat
#
from .job import Job, JobStatus
class CleanupJob(Job):
def __init__(self, *args, complete_cb, **kwargs):
super().__init__(*args, **kwargs)
self._complete_cb = complete_cb
context = self._scheduler.context
self._artifacts = context.artifactcache
def child_process(self):
def progress():
self.send_message('update-cache-size',
self._artifacts.get_cache_size())
return self._artifacts.clean(progress)
def handle_message(self, message_type, message):
# Update the cache size in the main process as we go,
# this provides better feedback in the UI.
if message_type == 'update-cache-size':
self._artifacts.set_cache_size(message)
return True
return False
def parent_complete(self, status, result):
if status == JobStatus.OK:
self._artifacts.set_cache_size(result)
if self._complete_cb:
self._complete_cb(status, result)
buildstream-1.6.9/buildstream/_scheduler/jobs/elementjob.py 0000664 0000000 0000000 00000007743 14375152700 0024144 0 ustar 00root root 0000000 0000000 # Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Author:
# Tristan Daniël Maat
#
from ruamel import yaml
from ..._message import Message, MessageType
from .job import Job
# ElementJob()
#
# A job to run an element's commands. When this job is spawned
# `action_cb` will be called, and when it completes `complete_cb` will
# be called.
#
# Args:
# scheduler (Scheduler): The scheduler
# action_name (str): The queue action name
# max_retries (int): The maximum number of retries
# action_cb (callable): The function to execute on the child
# complete_cb (callable): The function to execute when the job completes
# element (Element): The element to work on
# kwargs: Remaining Job() constructor arguments
#
# Here is the calling signature of the action_cb:
#
# action_cb():
#
# This function will be called in the child task
#
# Args:
# element (Element): The element passed to the Job() constructor
#
# Returns:
# (object): Any abstract simple python object, including a string, int,
# bool, list or dict, this must be a simple serializable object.
#
# Here is the calling signature of the complete_cb:
#
# complete_cb():
#
# This function will be called when the child task completes
#
# Args:
# job (Job): The job object which completed
# element (Element): The element passed to the Job() constructor
# status (JobStatus): The status of whether the workload raised an exception
# result (object): The deserialized object returned by the `action_cb`, or None
# if `success` is False
#
class ElementJob(Job):
def __init__(self, *args, element, queue, action_cb, complete_cb, **kwargs):
super().__init__(*args, **kwargs)
self.queue = queue
self._element = element
self._action_cb = action_cb # The action callable function
self._complete_cb = complete_cb # The complete callable function
# Set the task wide ID for logging purposes
self.set_task_id(element._unique_id)
@property
def element(self):
return self._element
def child_process(self):
# Print the element's environment at the beginning of any element's log file.
#
# This should probably be omitted for non-build tasks but it's harmless here
elt_env = self._element.get_environment()
env_dump = yaml.round_trip_dump(elt_env, default_flow_style=False, allow_unicode=True)
self.message(MessageType.LOG,
"Build environment for element {}".format(self._element.name),
detail=env_dump)
# Run the action
return self._action_cb(self._element)
def parent_complete(self, status, result):
self._complete_cb(self, self._element, status, self._result)
def message(self, message_type, message, **kwargs):
args = dict(kwargs)
args['scheduler'] = True
self._scheduler.context.message(
Message(self._element._unique_id,
message_type,
message,
**args))
def child_process_data(self):
data = {}
workspace = self._element._get_workspace()
if workspace is not None:
data['workspace'] = workspace.to_dict()
return data
buildstream-1.6.9/buildstream/_scheduler/jobs/job.py 0000664 0000000 0000000 00000054543 14375152700 0022572 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# Tristan Maat
# System imports
import os
import sys
import signal
import datetime
import traceback
import asyncio
import multiprocessing
# BuildStream toplevel imports
from ..._exceptions import ImplError, BstError, set_last_task_error, SkipJob
from ..._message import Message, MessageType, unconditional_messages
from ... import _signals, utils
from .. import _multiprocessing
# Return code values shutdown of job handling child processes
#
RC_OK = 0
RC_FAIL = 1
RC_PERM_FAIL = 2
RC_SKIPPED = 3
# JobStatus:
#
# The job completion status, passed back through the
# complete callbacks.
#
class JobStatus():
# Job succeeded
OK = 0
# A temporary BstError was raised
FAIL = 1
# A SkipJob was raised
SKIPPED = 3
# Used to distinguish between status messages and return values
class _Envelope():
def __init__(self, message_type, message):
self.message_type = message_type
self.message = message
# Job()
#
# The Job object represents a parallel task, when calling Job.spawn(),
# the given `Job.child_process()` will be called in parallel to the
# calling process, and `Job.parent_complete()` will be called with the
# action result in the calling process when the job completes.
#
# Args:
# scheduler (Scheduler): The scheduler
# action_name (str): The queue action name
# logfile (str): A template string that points to the logfile
# that should be used - should contain {pid}.
# max_retries (int): The maximum number of retries
#
class Job():
def __init__(self, scheduler, action_name, logfile, *, max_retries=0):
#
# Public members
#
self.action_name = action_name # The action name for the Queue
self.child_data = None # Data to be sent to the main process
#
# Private members
#
self._scheduler = scheduler # The scheduler
self._queue = multiprocessing.Queue() # A message passing queue
self._process = None # The Process object
self._watcher = None # Child process watcher
self._listening = False # Whether the parent is currently listening
self._suspended = False # Whether this job is currently suspended
self._max_retries = max_retries # Maximum number of automatic retries
self._result = None # Return value of child action in the parent
self._tries = 0 # Try count, for retryable jobs
self._terminated = False # Whether this job has been explicitly terminated
# If False, a retry will not be attempted regardless of whether _tries is less than _max_retries.
#
self._retry_flag = True
self._logfile = logfile
self._task_id = None
# spawn()
#
# Spawns the job.
#
def spawn(self):
self._tries += 1
self._parent_start_listening()
# Spawn the process
self._process = _multiprocessing.AsyncioSafeProcess(target=self._child_action, args=[self._queue])
# Block signals which are handled in the main process such that
# the child process does not inherit the parent's state, but the main
# process will be notified of any signal after we launch the child.
#
with _signals.blocked([signal.SIGINT, signal.SIGTSTP, signal.SIGTERM], ignore=False):
with asyncio.get_child_watcher() as watcher:
self._process.start()
# Register the process to call `_parent_child_completed` once it is done
# Here we delay the call to the next loop tick. This is in order to be running
# in the main thread, as the callback itself must be thread safe.
def on_completion(pid, returncode):
asyncio.get_event_loop().call_soon(self._parent_child_completed, pid, returncode)
watcher.add_child_handler(self._process.pid, on_completion)
# terminate()
#
# Politely request that an ongoing job terminate soon.
#
# This will send a SIGTERM signal to the Job process.
#
def terminate(self):
# First resume the job if it's suspended
self.resume(silent=True)
self.message(MessageType.STATUS, "{} terminating".format(self.action_name))
# Make sure there is no garbage on the queue
self._parent_stop_listening()
# Terminate the process using multiprocessing API pathway
self._process.terminate()
self._terminated = True
# get_terminated()
#
# Check if a job has been terminated.
#
# Returns:
# (bool): True in the main process if Job.terminate() was called.
#
def get_terminated(self):
return self._terminated
# kill()
#
# Forcefully kill the process, and any children it might have.
#
def kill(self):
# Force kill
self.message(MessageType.WARN,
"{} did not terminate gracefully, killing".format(self.action_name))
utils._kill_process_tree(self._process.pid)
# suspend()
#
# Suspend this job.
#
def suspend(self):
if not self._suspended:
self.message(MessageType.STATUS,
"{} suspending".format(self.action_name))
try:
# Use SIGTSTP so that child processes may handle and propagate
# it to processes they spawn that become session leaders
os.kill(self._process.pid, signal.SIGTSTP)
# For some reason we receive exactly one suspend event for every
# SIGTSTP we send to the child fork(), even though the child forks
# are setsid(). We keep a count of these so we can ignore them
# in our event loop suspend_event()
self._scheduler.internal_stops += 1
self._suspended = True
except ProcessLookupError:
# ignore, process has already exited
pass
# resume()
#
# Resume this suspended job.
#
def resume(self, silent=False):
if self._suspended:
if not silent and not self._scheduler.terminated:
self.message(MessageType.STATUS,
"{} resuming".format(self.action_name))
os.kill(self._process.pid, signal.SIGCONT)
self._suspended = False
# set_task_id()
#
# This is called by Job subclasses to set a plugin ID
# associated with the task at large (if any element is related
# to the task).
#
# The task ID helps keep messages in the frontend coherent
# in the case that multiple plugins log in the context of
# a single task (e.g. running integration commands should appear
# in the frontend for the element being built, not the element
# running the integration commands).
#
# Args:
# task_id (int): The plugin identifier for this task
#
def set_task_id(self, task_id):
self._task_id = task_id
# send_message()
#
# To be called from inside Job.child_process() implementations
# to send messages to the main process during processing.
#
# These messages will be processed by the class's Job.handle_message()
# implementation.
#
def send_message(self, message_type, message):
self._queue.put(_Envelope(message_type, message))
#######################################################
# Abstract Methods #
#######################################################
# handle_message()
#
# Handle a custom message. This will be called in the main process in
# response to any messages sent to the main proces using the
# Job.send_message() API from inside a Job.child_process() implementation
#
# Args:
# message_type (str): A string to identify the message type
# message (any): A simple serializable object
#
# Returns:
# (bool): Should return a truthy value if message_type is handled.
#
def handle_message(self, message_type, message):
return False
# parent_complete()
#
# This will be executed after the job finishes, and is expected to
# pass the result to the main thread.
#
# Args:
# status (JobStatus): The job exit status
# result (any): The result returned by child_process().
#
def parent_complete(self, status, result):
raise ImplError("Job '{kind}' does not implement parent_complete()"
.format(kind=type(self).__name__))
# child_process()
#
# This will be executed after fork(), and is intended to perform
# the job's task.
#
# Returns:
# (any): A (simple!) object to be returned to the main thread
# as the result.
#
def child_process(self):
raise ImplError("Job '{kind}' does not implement child_process()"
.format(kind=type(self).__name__))
# message():
#
# Logs a message, this will be logged in the task's logfile and
# conditionally also be sent to the frontend.
#
# Args:
# message_type (MessageType): The type of message to send
# message (str): The message
# kwargs: Remaining Message() constructor arguments
#
def message(self, message_type, message, **kwargs):
args = dict(kwargs)
args['scheduler'] = True
self._scheduler.context.message(Message(None, message_type, message, **args))
# child_process_data()
#
# Abstract method to retrieve additional data that should be
# returned to the parent process. Note that the job result is
# retrieved independently.
#
# Values can later be retrieved in Job.child_data.
#
# Returns:
# (dict) A dict containing values to be reported to the main process
#
def child_process_data(self):
return {}
#######################################################
# Local Private Methods #
#######################################################
#
# Methods prefixed with the word 'child' take place in the child process
#
# Methods prefixed with the word 'parent' take place in the parent process
#
# Other methods can be called in both child or parent processes
#
#######################################################
# _child_action()
#
# Perform the action in the child process, this calls the action_cb.
#
# Args:
# queue (multiprocessing.Queue): The message queue for IPC
#
def _child_action(self, queue):
# This avoids some SIGTSTP signals from grandchildren
# getting propagated up to the master process
os.setsid()
# First set back to the default signal handlers for the signals
# we handle, and then clear their blocked state.
#
signal_list = [signal.SIGTSTP, signal.SIGTERM]
for sig in signal_list:
signal.signal(sig, signal.SIG_DFL)
signal.pthread_sigmask(signal.SIG_UNBLOCK, signal_list)
# Assign the queue we passed across the process boundaries
#
# Set the global message handler in this child
# process to forward messages to the parent process
self._queue = queue
self._scheduler.context.set_message_handler(self._child_message_handler)
starttime = datetime.datetime.now()
stopped_time = None
def stop_time():
nonlocal stopped_time
stopped_time = datetime.datetime.now()
def resume_time():
nonlocal stopped_time
nonlocal starttime
starttime += (datetime.datetime.now() - stopped_time)
# Time, log and and run the action function
#
with _signals.suspendable(stop_time, resume_time), \
self._scheduler.context.recorded_messages(self._logfile) as filename:
self.message(MessageType.START, self.action_name, logfile=filename)
try:
# Try the task action
result = self.child_process()
except SkipJob as e:
elapsed = datetime.datetime.now() - starttime
self.message(MessageType.SKIPPED, str(e),
elapsed=elapsed, logfile=filename)
# Alert parent of skip by return code
self._child_shutdown(RC_SKIPPED)
except BstError as e:
elapsed = datetime.datetime.now() - starttime
self._retry_flag = e.temporary
if self._retry_flag and (self._tries <= self._max_retries):
self.message(MessageType.FAIL,
"Try #{} failed, retrying".format(self._tries),
elapsed=elapsed, logfile=filename)
else:
self.message(MessageType.FAIL, str(e),
elapsed=elapsed, detail=e.detail,
logfile=filename, sandbox=e.sandbox)
self._queue.put(_Envelope('child_data', self.child_process_data()))
# Report the exception to the parent (for internal testing purposes)
self._child_send_error(e)
# Set return code based on whether or not the error was temporary.
#
self._child_shutdown(RC_FAIL if self._retry_flag else RC_PERM_FAIL)
except Exception as e: # pylint: disable=broad-except
# If an unhandled (not normalized to BstError) occurs, that's a bug,
# send the traceback and formatted exception back to the frontend
# and print it to the log file.
#
elapsed = datetime.datetime.now() - starttime
detail = "An unhandled exception occured:\n\n{}".format(traceback.format_exc())
self.message(MessageType.BUG, self.action_name,
elapsed=elapsed, detail=detail,
logfile=filename)
# Unhandled exceptions should permenantly fail
self._child_shutdown(RC_PERM_FAIL)
else:
# No exception occurred in the action
self._queue.put(_Envelope('child_data', self.child_process_data()))
self._child_send_result(result)
elapsed = datetime.datetime.now() - starttime
self.message(MessageType.SUCCESS, self.action_name, elapsed=elapsed,
logfile=filename)
# Shutdown needs to stay outside of the above context manager,
# make sure we dont try to handle SIGTERM while the process
# is already busy in sys.exit()
self._child_shutdown(RC_OK)
# _child_send_error()
#
# Sends an error to the main process through the message queue
#
# Args:
# e (Exception): The error to send
#
def _child_send_error(self, e):
domain = None
reason = None
if isinstance(e, BstError):
domain = e.domain
reason = e.reason
envelope = _Envelope('error', {
'domain': domain,
'reason': reason
})
self._queue.put(envelope)
# _child_send_result()
#
# Sends the serialized result to the main process through the message queue
#
# Args:
# result (object): A simple serializable object, or None
#
# Note: If None is passed here, nothing needs to be sent, the
# result member in the parent process will simply remain None.
#
def _child_send_result(self, result):
if result is not None:
envelope = _Envelope('result', result)
self._queue.put(envelope)
# _child_shutdown()
#
# Shuts down the child process by cleaning up and exiting the process
#
# Args:
# exit_code (int): The exit code to exit with
#
def _child_shutdown(self, exit_code):
self._queue.close()
sys.exit(exit_code)
# _child_message_handler()
#
# A Context delegate for handling messages, this replaces the
# frontend's main message handler in the context of a child task
# and performs local logging to the local log file before sending
# the message back to the parent process for further propagation.
#
# Args:
# message (Message): The message to log
# context (Context): The context object delegating this message
#
def _child_message_handler(self, message, context):
message.action_name = self.action_name
message.task_id = self._task_id
# Send to frontend if appropriate
if context.silent_messages() and (message.message_type not in unconditional_messages):
return
if message.message_type == MessageType.LOG:
return
self._queue.put(_Envelope('message', message))
# _parent_shutdown()
#
# Shuts down the Job on the parent side by reading any remaining
# messages on the message queue and cleaning up any resources.
#
def _parent_shutdown(self):
# Make sure we've read everything we need and then stop listening
self._parent_process_queue()
self._parent_stop_listening()
# _parent_child_completed()
#
# Called in the main process courtesy of asyncio's ChildWatcher.add_child_handler()
#
# Args:
# pid (int): The PID of the child which completed
# returncode (int): The return code of the child process
#
def _parent_child_completed(self, pid, returncode):
self._parent_shutdown()
# We don't want to retry if we got OK or a permanent fail.
# This is set in _child_action but must also be set for the parent.
#
self._retry_flag = returncode == RC_FAIL
if self._retry_flag and (self._tries <= self._max_retries) and not self._scheduler.terminated:
self.spawn()
return
# Resolve the outward facing overall job completion status
#
if returncode == RC_OK:
status = JobStatus.OK
elif returncode == RC_SKIPPED:
status = JobStatus.SKIPPED
elif returncode in (RC_FAIL, RC_PERM_FAIL):
status = JobStatus.FAIL
else:
status = JobStatus.FAIL
self.parent_complete(status, self._result)
self._scheduler.job_completed(self, status)
# _parent_process_envelope()
#
# Processes a message Envelope deserialized form the message queue.
#
# this will have the side effect of assigning some local state
# on the Job in the parent process for later inspection when the
# child process completes.
#
# Args:
# envelope (Envelope): The message envelope
#
def _parent_process_envelope(self, envelope):
if not self._listening:
return
if envelope.message_type == 'message':
# Propagate received messages from children
# back through the context.
self._scheduler.context.message(envelope.message)
elif envelope.message_type == 'error':
# For regression tests only, save the last error domain / reason
# reported from a child task in the main process, this global state
# is currently managed in _exceptions.py
set_last_task_error(envelope.message['domain'],
envelope.message['reason'])
elif envelope.message_type == 'result':
assert self._result is None
self._result = envelope.message
elif envelope.message_type == 'child_data':
# If we retry a job, we assign a new value to this
self.child_data = envelope.message
# Try Job subclass specific messages now
elif not self.handle_message(envelope.message_type,
envelope.message):
assert 0, "Unhandled message type '{}': {}" \
.format(envelope.message_type, envelope.message)
# _parent_process_queue()
#
# Reads back message envelopes from the message queue
# in the parent process.
#
def _parent_process_queue(self):
while not self._queue.empty():
envelope = self._queue.get_nowait()
self._parent_process_envelope(envelope)
# _parent_recv()
#
# A callback to handle I/O events from the message
# queue file descriptor in the main process message loop
#
def _parent_recv(self, *args):
self._parent_process_queue()
# _parent_start_listening()
#
# Starts listening on the message queue
#
def _parent_start_listening(self):
# Warning: Platform specific code up ahead
#
# The multiprocessing.Queue object does not tell us how
# to receive io events in the receiving process, so we
# need to sneak in and get its file descriptor.
#
# The _reader member of the Queue is currently private
# but well known, perhaps it will become public:
#
# http://bugs.python.org/issue3831
#
if not self._listening:
self._scheduler.loop.add_reader(
self._queue._reader.fileno(), self._parent_recv)
self._listening = True
# _parent_stop_listening()
#
# Stops listening on the message queue
#
def _parent_stop_listening(self):
if self._listening:
self._scheduler.loop.remove_reader(self._queue._reader.fileno())
self._listening = False
buildstream-1.6.9/buildstream/_scheduler/queues/ 0000775 0000000 0000000 00000000000 14375152700 0022005 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/_scheduler/queues/__init__.py 0000664 0000000 0000000 00000000046 14375152700 0024116 0 ustar 00root root 0000000 0000000 from .queue import Queue, QueueStatus
buildstream-1.6.9/buildstream/_scheduler/queues/buildqueue.py 0000664 0000000 0000000 00000005010 14375152700 0024517 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
from . import Queue, QueueStatus
from ..jobs import JobStatus
from ..resources import ResourceType
# A queue which assembles elements
#
class BuildQueue(Queue):
action_name = "Build"
complete_name = "Built"
resources = [ResourceType.PROCESS, ResourceType.CACHE]
def process(self, element):
return element._assemble()
def status(self, element):
if not element._is_required():
# Artifact is not currently required but it may be requested later.
# Keep it in the queue.
return QueueStatus.WAIT
if element._cached():
return QueueStatus.SKIP
if not element._buildable():
return QueueStatus.WAIT
return QueueStatus.READY
def _check_cache_size(self, job, element, artifact_size):
# After completing a build job, add the artifact size
# as returned from Element._assemble() to the estimated
# artifact cache size
#
context = self._scheduler.context
artifacts = context.artifactcache
artifacts.add_artifact_size(artifact_size)
# If the estimated size outgrows the quota, ask the scheduler
# to queue a job to actually check the real cache size.
#
if artifacts.has_quota_exceeded():
self._scheduler.check_cache_size()
def done(self, job, element, result, status):
if status == JobStatus.OK:
# Inform element in main process that assembly is done
element._assemble_done()
# This has to be done after _assemble_done, such that the
# element may register its cache key as required
self._check_cache_size(job, element, result)
buildstream-1.6.9/buildstream/_scheduler/queues/fetchqueue.py 0000664 0000000 0000000 00000004640 14375152700 0024521 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# BuildStream toplevel imports
from ... import Consistency
# Local imports
from . import Queue, QueueStatus
from ..resources import ResourceType
from ..jobs import JobStatus
# A queue which fetches element sources
#
class FetchQueue(Queue):
action_name = "Fetch"
complete_name = "Fetched"
resources = [ResourceType.DOWNLOAD]
def __init__(self, scheduler, skip_cached=False):
super().__init__(scheduler)
self._skip_cached = skip_cached
def process(self, element):
previous_sources = []
for source in element.sources():
source._fetch(previous_sources)
previous_sources.append(source)
def status(self, element):
if not element._is_required():
# Artifact is not currently required but it may be requested later.
# Keep it in the queue.
return QueueStatus.WAIT
# Optionally skip elements that are already in the artifact cache
if self._skip_cached:
if not element._can_query_cache():
return QueueStatus.WAIT
if element._cached():
return QueueStatus.SKIP
# This will automatically skip elements which
# have no sources.
if element._get_consistency() == Consistency.CACHED:
return QueueStatus.SKIP
return QueueStatus.READY
def done(self, _, element, result, status):
if status == JobStatus.FAIL:
return
element._fetch_done()
# Successful fetch, we must be CACHED now
assert element._get_consistency() == Consistency.CACHED
buildstream-1.6.9/buildstream/_scheduler/queues/pullqueue.py 0000664 0000000 0000000 00000004171 14375152700 0024403 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# Local imports
from . import Queue, QueueStatus
from ..resources import ResourceType
from ..jobs import JobStatus
from ..._exceptions import SkipJob
# A queue which pulls element artifacts
#
class PullQueue(Queue):
action_name = "Pull"
complete_name = "Pulled"
resources = [ResourceType.DOWNLOAD, ResourceType.CACHE]
def process(self, element):
# returns whether an artifact was downloaded or not
if not element._pull():
raise SkipJob(self.action_name)
def status(self, element):
if not element._is_required():
# Artifact is not currently required but it may be requested later.
# Keep it in the queue.
return QueueStatus.WAIT
if not element._can_query_cache():
return QueueStatus.WAIT
if element._pull_pending():
return QueueStatus.READY
else:
return QueueStatus.SKIP
def done(self, _, element, result, status):
if status == JobStatus.FAIL:
return
element._pull_done()
# Build jobs will check the "approximate" size first. Since we
# do not get an artifact size from pull jobs, we have to
# actually check the cache size.
if status == JobStatus.OK:
self._scheduler.check_cache_size()
buildstream-1.6.9/buildstream/_scheduler/queues/pushqueue.py 0000664 0000000 0000000 00000002625 14375152700 0024410 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# Local imports
from . import Queue, QueueStatus
from ..resources import ResourceType
from ..._exceptions import SkipJob
# A queue which pushes element artifacts
#
class PushQueue(Queue):
action_name = "Push"
complete_name = "Pushed"
resources = [ResourceType.UPLOAD]
def process(self, element):
# returns whether an artifact was uploaded or not
if not element._push():
raise SkipJob(self.action_name)
def status(self, element):
if element._skip_push():
return QueueStatus.SKIP
return QueueStatus.READY
buildstream-1.6.9/buildstream/_scheduler/queues/queue.py 0000664 0000000 0000000 00000025402 14375152700 0023506 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# System imports
import os
from collections import deque
from enum import Enum
import traceback
# Local imports
from ..jobs import ElementJob, JobStatus
from ..resources import ResourceType
# BuildStream toplevel imports
from ..._exceptions import BstError, set_last_task_error
from ..._message import Message, MessageType
# Queue status for a given element
#
#
class QueueStatus(Enum):
# The element is waiting for dependencies.
WAIT = 1
# The element can skip this queue.
SKIP = 2
# The element is ready for processing in this queue.
READY = 3
# Queue()
#
# Args:
# scheduler (Scheduler): The Scheduler
#
class Queue():
# These should be overridden on class data of of concrete Queue implementations
action_name = None
complete_name = None
resources = [] # Resources this queues' jobs want
def __init__(self, scheduler):
#
# Public members
#
self.failed_elements = [] # List of failed elements, for the frontend
self.processed_elements = [] # List of processed elements, for the frontend
self.skipped_elements = [] # List of skipped elements, for the frontend
#
# Private members
#
self._scheduler = scheduler
self._resources = scheduler.resources # Shared resource pool
self._wait_queue = deque() # Ready / Waiting elements
self._done_queue = deque() # Processed / Skipped elements
self._max_retries = 0
# Assert the subclass has setup class data
assert self.action_name is not None
assert self.complete_name is not None
if ResourceType.UPLOAD in self.resources or ResourceType.DOWNLOAD in self.resources:
self._max_retries = scheduler.context.sched_network_retries
#####################################################
# Abstract Methods for Queue implementations #
#####################################################
# process()
#
# Abstract method for processing an element
#
# Args:
# element (Element): An element to process
#
# Returns:
# (any): An optional something to be returned
# for every element successfully processed
#
#
def process(self, element):
pass
# status()
#
# Abstract method for reporting the status of an element.
#
# Args:
# element (Element): An element to process
#
# Returns:
# (QueueStatus): The element status
#
def status(self, element):
return QueueStatus.READY
# done()
#
# Abstract method for handling a successful job completion.
#
# Args:
# job (Job): The job which completed processing
# element (Element): The element which completed processing
# result (any): The return value of the process() implementation
# status (JobStatus): The return status of the Job
#
def done(self, job, element, result, status):
pass
#####################################################
# Scheduler / Pipeline facing APIs #
#####################################################
# enqueue()
#
# Enqueues some elements
#
# Args:
# elts (list): A list of Elements
#
def enqueue(self, elts):
if not elts:
return
# Place skipped elements on the done queue right away.
#
# The remaining ready and waiting elements must remain in the
# same queue, and ready status must be determined at the moment
# which the scheduler is asking for the next job.
#
skip = [elt for elt in elts if self.status(elt) == QueueStatus.SKIP]
wait = [elt for elt in elts if elt not in skip]
self.skipped_elements.extend(skip) # Public record of skipped elements
self._done_queue.extend(skip) # Elements to be processed
self._wait_queue.extend(wait) # Elements eligible to be dequeued
# dequeue()
#
# A generator which dequeues the elements which
# are ready to exit the queue.
#
# Yields:
# (Element): Elements being dequeued
#
def dequeue(self):
while self._done_queue:
yield self._done_queue.popleft()
# dequeue_ready()
#
# Reports whether any elements can be promoted to other queues
#
# Returns:
# (bool): Whether there are elements ready
#
def dequeue_ready(self):
return any(self._done_queue)
# harvest_jobs()
#
# Process elements in the queue, moving elements which were enqueued
# into the dequeue pool, and creating as many jobs for which resources
# can be reserved.
#
# Returns:
# ([Job]): A list of jobs which can be run now
#
def harvest_jobs(self):
unready = []
ready = []
while self._wait_queue:
if not self._resources.reserve(self.resources, peek=True):
break
element = self._wait_queue.popleft()
status = self.status(element)
if status == QueueStatus.WAIT:
unready.append(element)
elif status == QueueStatus.SKIP:
self._done_queue.append(element)
self.skipped_elements.append(element)
else:
reserved = self._resources.reserve(self.resources)
assert reserved
ready.append(element)
self._wait_queue.extendleft(unready)
return [
ElementJob(self._scheduler, self.action_name,
self._element_log_path(element),
element=element, queue=self,
action_cb=self.process,
complete_cb=self._job_done,
max_retries=self._max_retries)
for element in ready
]
#####################################################
# Private Methods #
#####################################################
# _update_workspaces()
#
# Updates and possibly saves the workspaces in the
# main data model in the main process after a job completes.
#
# Args:
# element (Element): The element which completed
# job (Job): The job which completed
#
def _update_workspaces(self, element, job):
workspace_dict = None
if job.child_data:
workspace_dict = job.child_data.get('workspace', None)
# Handle any workspace modifications now
#
if workspace_dict:
context = element._get_context()
workspaces = context.get_workspaces()
if workspaces.update_workspace(element._get_full_name(), workspace_dict):
try:
workspaces.save_config()
except BstError as e:
self._message(element, MessageType.ERROR, "Error saving workspaces", detail=str(e))
except Exception: # pylint: disable=broad-except
self._message(element, MessageType.BUG,
"Unhandled exception while saving workspaces",
detail=traceback.format_exc())
# _job_done()
#
# A callback reported by the Job() when a job completes
#
# This will call the Queue implementation specific Queue.done()
# implementation and trigger the scheduler to reschedule.
#
# See the Job object for an explanation of the call signature
#
def _job_done(self, job, element, status, result):
# Now release the resources we reserved
#
self._resources.release(self.resources)
# Update values that need to be synchronized in the main task
# before calling any queue implementation
self._update_workspaces(element, job)
# Give the result of the job to the Queue implementor,
# and determine if it should be considered as processed
# or skipped.
try:
self.done(job, element, result, status)
except BstError as e:
# Report error and mark as failed
#
self._message(element, MessageType.ERROR, "Post processing error", detail=str(e))
self.failed_elements.append(element)
# Treat this as a task error as it's related to a task
# even though it did not occur in the task context
#
# This just allows us stronger testing capability
#
set_last_task_error(e.domain, e.reason)
except Exception: # pylint: disable=broad-except
# Report unhandled exceptions and mark as failed
#
self._message(element, MessageType.BUG,
"Unhandled exception in post processing",
detail=traceback.format_exc())
self.failed_elements.append(element)
else:
# All elements get placed on the done queue for later processing.
self._done_queue.append(element)
# These lists are for bookkeeping purposes for the UI and logging.
if status == JobStatus.SKIPPED:
self.skipped_elements.append(element)
elif status == JobStatus.OK:
self.processed_elements.append(element)
else:
self.failed_elements.append(element)
# Convenience wrapper for Queue implementations to send
# a message for the element they are processing
def _message(self, element, message_type, brief, **kwargs):
context = element._get_context()
message = Message(element._unique_id, message_type, brief, **kwargs)
context.message(message)
def _element_log_path(self, element):
project = element._get_project()
key = element._get_display_key()[1]
action = self.action_name.lower()
logfile = "{key}-{action}".format(key=key, action=action)
return os.path.join(project.name, element.normal_name, logfile)
buildstream-1.6.9/buildstream/_scheduler/queues/trackqueue.py 0000664 0000000 0000000 00000003622 14375152700 0024533 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# BuildStream toplevel imports
from ...plugin import Plugin
# Local imports
from . import Queue, QueueStatus
from ..resources import ResourceType
from ..jobs import JobStatus
# A queue which tracks sources
#
class TrackQueue(Queue):
action_name = "Track"
complete_name = "Tracked"
resources = [ResourceType.DOWNLOAD]
def process(self, element):
return element._track()
def status(self, element):
# We can skip elements entirely if they have no sources.
if not list(element.sources()):
# But we still have to mark them as tracked
element._tracking_done()
return QueueStatus.SKIP
return QueueStatus.READY
def done(self, _, element, result, status):
if status == JobStatus.FAIL:
return
# Set the new refs in the main process one by one as they complete,
# writing to bst files this time
for unique_id, new_ref in result:
source = Plugin._lookup(unique_id)
source._set_ref(new_ref, save=True)
element._tracking_done()
buildstream-1.6.9/buildstream/_scheduler/resources.py 0000664 0000000 0000000 00000013542 14375152700 0023067 0 ustar 00root root 0000000 0000000 class ResourceType():
CACHE = 0
DOWNLOAD = 1
PROCESS = 2
UPLOAD = 3
class Resources():
def __init__(self, num_builders, num_fetchers, num_pushers):
self._max_resources = {
ResourceType.CACHE: 0,
ResourceType.DOWNLOAD: num_fetchers,
ResourceType.PROCESS: num_builders,
ResourceType.UPLOAD: num_pushers
}
# Resources jobs are currently using.
self._used_resources = {
ResourceType.CACHE: 0,
ResourceType.DOWNLOAD: 0,
ResourceType.PROCESS: 0,
ResourceType.UPLOAD: 0
}
# Resources jobs currently want exclusive access to. The set
# of jobs that have asked for exclusive access is the value -
# this is so that we can avoid scheduling any other jobs until
# *all* exclusive jobs that "register interest" have finished
# - which avoids starving them of scheduling time.
self._exclusive_resources = {
ResourceType.CACHE: set(),
ResourceType.DOWNLOAD: set(),
ResourceType.PROCESS: set(),
ResourceType.UPLOAD: set()
}
# reserve()
#
# Reserves a set of resources
#
# Args:
# resources (set): A set of ResourceTypes
# exclusive (set): Another set of ResourceTypes
# peek (bool): Whether to only peek at whether the resource is available
#
# Returns:
# (bool): True if the resources could be reserved
#
def reserve(self, resources, exclusive=None, *, peek=False):
if exclusive is None:
exclusive = set()
resources = set(resources)
exclusive = set(exclusive)
# First, we check if the job wants to access a resource that
# another job wants exclusive access to. If so, it cannot be
# scheduled.
#
# Note that if *both* jobs want this exclusively, we don't
# fail yet.
#
# FIXME: I *think* we can deadlock if two jobs want disjoint
# sets of exclusive and non-exclusive resources. This
# is currently not possible, but may be worth thinking
# about.
#
for resource in resources - exclusive:
# If our job wants this resource exclusively, we never
# check this, so we can get away with not (temporarily)
# removing it from the set.
if self._exclusive_resources[resource]:
return False
# Now we check if anything is currently using any resources
# this job wants exclusively. If so, the job cannot be
# scheduled.
#
# Since jobs that use a resource exclusively are also using
# it, this means only one exclusive job can ever be scheduled
# at a time, despite being allowed to be part of the exclusive
# set.
#
for resource in exclusive:
if self._used_resources[resource] != 0:
return False
# Finally, we check if we have enough of each resource
# available. If we don't have enough, the job cannot be
# scheduled.
for resource in resources:
if (self._max_resources[resource] > 0 and
self._used_resources[resource] >= self._max_resources[resource]):
return False
# Now we register the fact that our job is using the resources
# it asked for, and tell the scheduler that it is allowed to
# continue.
if not peek:
for resource in resources:
self._used_resources[resource] += 1
return True
# release()
#
# Release resources previously reserved with Resources.reserve()
#
# Args:
# resources (set): A set of resources to release
#
def release(self, resources):
for resource in resources:
assert self._used_resources[resource] > 0, "Scheduler resource imbalance"
self._used_resources[resource] -= 1
# register_exclusive_interest()
#
# Inform the resources pool that `source` has an interest in
# reserving this resource exclusively.
#
# The source parameter is used to identify the caller, it
# must be ensured to be unique for the time that the
# interest is registered.
#
# This function may be called multiple times, and subsequent
# calls will simply have no effect until clear_exclusive_interest()
# is used to clear the interest.
#
# This must be called in advance of reserve()
#
# Args:
# resources (set): Set of resources to reserve exclusively
# source (any): Source identifier, to be used again when unregistering
# the interest.
#
def register_exclusive_interest(self, resources, source):
# The very first thing we do is to register any exclusive
# resources this job may want. Even if the job is not yet
# allowed to run (because another job is holding the resource
# it wants), we can still set this - it just means that any
# job *currently* using these resources has to finish first,
# and no new jobs wanting these can be launched (except other
# exclusive-access jobs).
#
for resource in resources:
self._exclusive_resources[resource].add(source)
# unregister_exclusive_interest()
#
# Clear the exclusive interest in these resources.
#
# This should be called by the given source which registered
# an exclusive interest.
#
# Args:
# resources (set): Set of resources to reserve exclusively
# source (str): Source identifier, to be used again when unregistering
# the interest.
#
def unregister_exclusive_interest(self, resources, source):
for resource in resources:
self._exclusive_resources[resource].remove(source)
buildstream-1.6.9/buildstream/_scheduler/scheduler.py 0000664 0000000 0000000 00000043433 14375152700 0023035 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# System imports
import os
import asyncio
from itertools import chain
import signal
import datetime
from contextlib import contextmanager
# Local imports
from .resources import Resources, ResourceType
from .jobs import JobStatus, CacheSizeJob, CleanupJob
# A decent return code for Scheduler.run()
class SchedStatus():
SUCCESS = 0
ERROR = -1
TERMINATED = 1
# Some action names for the internal jobs we launch
#
_ACTION_NAME_CLEANUP = 'cleanup'
_ACTION_NAME_CACHE_SIZE = 'cache_size'
# Scheduler()
#
# The scheduler operates on a list queues, each of which is meant to accomplish
# a specific task. Elements enter the first queue when Scheduler.run() is called
# and into the next queue when complete. Scheduler.run() returns when all of the
# elements have been traversed or when an occurs.
#
# Using the scheduler is a matter of:
# a.) Deriving the Queue class and implementing its abstract methods
# b.) Instantiating a Scheduler with one or more queues
# c.) Calling Scheduler.run(elements) with a list of elements
# d.) Fetching results from your queues
#
# Args:
# context: The Context in the parent scheduling process
# start_time: The time at which the session started
# interrupt_callback: A callback to handle ^C
# ticker_callback: A callback call once per second
# job_start_callback: A callback call when each job starts
# job_complete_callback: A callback call when each job completes
#
class Scheduler():
def __init__(self, context,
start_time,
interrupt_callback=None,
ticker_callback=None,
job_start_callback=None,
job_complete_callback=None):
#
# Public members
#
self.queues = None # Exposed for the frontend to print summaries
self.context = context # The Context object shared with Queues
self.terminated = False # Whether the scheduler was asked to terminate or has terminated
self.suspended = False # Whether the scheduler is currently suspended
# These are shared with the Job, but should probably be removed or made private in some way.
self.loop = None # Shared for Job access to observe the message queue
self.internal_stops = 0 # Amount of SIGSTP signals we've introduced, this is shared with job.py
#
# Private members
#
self._active_jobs = [] # Jobs currently being run in the scheduler
self._starttime = start_time # Initial application start time
self._suspendtime = None # Session time compensation for suspended state
self._queue_jobs = True # Whether we should continue to queue jobs
# State of cache management related jobs
self._cache_size_scheduled = False # Whether we have a cache size job scheduled
self._cache_size_running = None # A running CacheSizeJob, or None
self._cleanup_scheduled = False # Whether we have a cleanup job scheduled
self._cleanup_running = None # A running CleanupJob, or None
# Callbacks to report back to the Scheduler owner
self._interrupt_callback = interrupt_callback
self._ticker_callback = ticker_callback
self._job_start_callback = job_start_callback
self._job_complete_callback = job_complete_callback
# Whether our exclusive jobs, like 'cleanup' are currently already
# waiting or active.
#
# This is just a bit quicker than scanning the wait queue and active
# queue and comparing job action names.
#
self._exclusive_waiting = set()
self._exclusive_active = set()
self.resources = Resources(context.sched_builders,
context.sched_fetchers,
context.sched_pushers)
# run()
#
# Args:
# queues (list): A list of Queue objects
#
# Returns:
# (SchedStatus): How the scheduling terminated
#
# Elements in the 'plan' will be processed by each
# queue in order. Processing will complete when all
# elements have been processed by each queue or when
# an error arises
#
def run(self, queues):
# Hold on to the queues to process
self.queues = queues
# NOTE: Enforce use of `SafeChildWatcher` as we generally don't want
# background threads.
# In Python 3.8+, `ThreadedChildWatcher` is the default watcher, and
# not `SafeChildWatcher`.
asyncio.set_child_watcher(asyncio.SafeChildWatcher())
# Ensure that we have a fresh new event loop, in case we want
# to run another test in this thread.
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(self.loop)
# Add timeouts
if self._ticker_callback:
self.loop.call_later(1, self._tick)
# Handle unix signals while running
self._connect_signals()
# Run the queues
self._sched()
self.loop.run_forever()
self.loop.close()
# Stop handling unix signals
self._disconnect_signals()
failed = any(any(queue.failed_elements) for queue in self.queues)
self.loop = None
if failed:
status = SchedStatus.ERROR
elif self.terminated:
status = SchedStatus.TERMINATED
else:
status = SchedStatus.SUCCESS
return self.elapsed_time(), status
# terminate_jobs()
#
# Forcefully terminates all ongoing jobs.
#
# For this to be effective, one needs to return to
# the scheduler loop first and allow the scheduler
# to complete gracefully.
#
# NOTE: This will block SIGINT so that graceful process
# termination is not interrupted, and SIGINT will
# remain blocked after Scheduler.run() returns.
#
def terminate_jobs(self):
# Set this right away, the frontend will check this
# attribute to decide whether or not to print status info
# etc and the following code block will trigger some callbacks.
self.terminated = True
self.loop.call_soon(self._terminate_jobs_real)
# Block this until we're finished terminating jobs,
# this will remain blocked forever.
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGINT])
# jobs_suspended()
#
# A context manager for running with jobs suspended
#
@contextmanager
def jobs_suspended(self):
self._disconnect_signals()
self._suspend_jobs()
yield
self._resume_jobs()
self._connect_signals()
# stop_queueing()
#
# Stop queueing additional jobs, causes Scheduler.run()
# to return once all currently processing jobs are finished.
#
def stop_queueing(self):
self._queue_jobs = False
# elapsed_time()
#
# Fetches the current session elapsed time
#
# Returns:
# (datetime): The amount of time since the start of the session,
# discounting any time spent while jobs were suspended.
#
def elapsed_time(self):
timenow = datetime.datetime.now()
starttime = self._starttime
if not starttime:
starttime = timenow
return timenow - starttime
# job_completed():
#
# Called when a Job completes
#
# Args:
# queue (Queue): The Queue holding a complete job
# job (Job): The completed Job
# status (JobStatus): The status of the completed job
#
def job_completed(self, job, status):
# Remove from the active jobs list
self._active_jobs.remove(job)
# Scheduler owner facing callback
self._job_complete_callback(job, status)
# Now check for more jobs
self._sched()
# check_cache_size():
#
# Queues a cache size calculation job, after the cache
# size is calculated, a cleanup job will be run automatically
# if needed.
#
def check_cache_size(self):
# Here we assume we are called in response to a job
# completion callback, or before entering the scheduler.
#
# As such there is no need to call `_sched()` from here,
# and we prefer to run it once at the last moment.
#
self._cache_size_scheduled = True
#######################################################
# Local Private Methods #
#######################################################
# _spawn_job()
#
# Spanws a job
#
# Args:
# job (Job): The job to spawn
#
def _spawn_job(self, job):
job.spawn()
self._active_jobs.append(job)
if self._job_start_callback:
self._job_start_callback(job)
# Callback for the cache size job
def _cache_size_job_complete(self, status, cache_size):
context = self.context
artifacts = context.artifactcache
# Deallocate cache size job resources
self._cache_size_running = None
self.resources.release([ResourceType.CACHE, ResourceType.PROCESS])
# Schedule a cleanup job if we've hit the threshold
if status != JobStatus.OK:
return
if artifacts.has_quota_exceeded():
self._cleanup_scheduled = True
# Callback for the cleanup job
def _cleanup_job_complete(self, status, cache_size):
# Deallocate cleanup job resources
self._cleanup_running = None
self.resources.release([ResourceType.CACHE, ResourceType.PROCESS])
# Unregister the exclusive interest when we're done with it
if not self._cleanup_scheduled:
self.resources.unregister_exclusive_interest(
[ResourceType.CACHE], 'cache-cleanup'
)
# _sched_cleanup_job()
#
# Runs a cleanup job if one is scheduled to run now and
# sufficient recources are available.
#
def _sched_cleanup_job(self):
if self._cleanup_scheduled and self._cleanup_running is None:
# Ensure we have an exclusive interest in the resources
self.resources.register_exclusive_interest(
[ResourceType.CACHE], 'cache-cleanup'
)
if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS],
[ResourceType.CACHE]):
# Update state and launch
self._cleanup_scheduled = False
self._cleanup_running = \
CleanupJob(self, _ACTION_NAME_CLEANUP, 'cleanup/cleanup',
complete_cb=self._cleanup_job_complete)
self._spawn_job(self._cleanup_running)
# _sched_cache_size_job()
#
# Runs a cache size job if one is scheduled to run now and
# sufficient recources are available.
#
def _sched_cache_size_job(self):
if self._cache_size_scheduled and not self._cache_size_running:
if self.resources.reserve([ResourceType.CACHE, ResourceType.PROCESS]):
self._cache_size_scheduled = False
self._cache_size_running = \
CacheSizeJob(self, _ACTION_NAME_CACHE_SIZE,
'cache_size/cache_size',
complete_cb=self._cache_size_job_complete)
self._spawn_job(self._cache_size_running)
# _sched_queue_jobs()
#
# Ask the queues what jobs they want to schedule and schedule
# them. This is done here so we can ask for new jobs when jobs
# from previous queues become available.
#
# This will process the Queues, pull elements through the Queues
# and process anything that is ready.
#
def _sched_queue_jobs(self):
ready = []
process_queues = True
while self._queue_jobs and process_queues:
# Pull elements forward through queues
elements = []
for queue in self.queues:
queue.enqueue(elements)
elements = list(queue.dequeue())
# Kickoff whatever processes can be processed at this time
#
# We start by queuing from the last queue first, because
# we want to give priority to queues later in the
# scheduling process in the case that multiple queues
# share the same token type.
#
# This avoids starvation situations where we dont move on
# to fetch tasks for elements which failed to pull, and
# thus need all the pulls to complete before ever starting
# a build
ready.extend(chain.from_iterable(
q.harvest_jobs() for q in reversed(self.queues)
))
# harvest_jobs() may have decided to skip some jobs, making
# them eligible for promotion to the next queue as a side effect.
#
# If that happens, do another round.
process_queues = any(q.dequeue_ready() for q in self.queues)
# Spawn the jobs
#
for job in ready:
self._spawn_job(job)
# _sched()
#
# Run any jobs which are ready to run, or quit the main loop
# when nothing is running or is ready to run.
#
# This is the main driving function of the scheduler, it is called
# initially when we enter Scheduler.run(), and at the end of whenever
# any job completes, after any bussiness logic has occurred and before
# going back to sleep.
#
def _sched(self):
if not self.terminated:
#
# Try the cache management jobs
#
self._sched_cleanup_job()
self._sched_cache_size_job()
#
# Run as many jobs as the queues can handle for the
# available resources
#
self._sched_queue_jobs()
#
# If nothing is ticking then bail out
#
if not self._active_jobs:
self.loop.stop()
# _suspend_jobs()
#
# Suspend all ongoing jobs.
#
def _suspend_jobs(self):
if not self.suspended:
self._suspendtime = datetime.datetime.now()
self.suspended = True
for job in self._active_jobs:
job.suspend()
# _resume_jobs()
#
# Resume suspended jobs.
#
def _resume_jobs(self):
if self.suspended:
for job in self._active_jobs:
job.resume()
self.suspended = False
self._starttime += (datetime.datetime.now() - self._suspendtime)
self._suspendtime = None
# _interrupt_event():
#
# A loop registered event callback for keyboard interrupts
#
def _interrupt_event(self):
# FIXME: This should not be needed, but for some reason we receive an
# additional SIGINT event when the user hits ^C a second time
# to inform us that they really intend to terminate; even though
# we have disconnected our handlers at this time.
#
if self.terminated:
return
# Leave this to the frontend to decide, if no
# interrrupt callback was specified, then just terminate.
if self._interrupt_callback:
self._interrupt_callback()
else:
# Default without a frontend is just terminate
self.terminate_jobs()
# _terminate_event():
#
# A loop registered event callback for SIGTERM
#
def _terminate_event(self):
self.terminate_jobs()
# _suspend_event():
#
# A loop registered event callback for SIGTSTP
#
def _suspend_event(self):
# Ignore the feedback signals from Job.suspend()
if self.internal_stops:
self.internal_stops -= 1
return
# No need to care if jobs were suspended or not, we _only_ handle this
# while we know jobs are not suspended.
self._suspend_jobs()
os.kill(os.getpid(), signal.SIGSTOP)
self._resume_jobs()
# _connect_signals():
#
# Connects our signal handler event callbacks to the mainloop
#
def _connect_signals(self):
self.loop.add_signal_handler(signal.SIGINT, self._interrupt_event)
self.loop.add_signal_handler(signal.SIGTERM, self._terminate_event)
self.loop.add_signal_handler(signal.SIGTSTP, self._suspend_event)
def _disconnect_signals(self):
self.loop.remove_signal_handler(signal.SIGINT)
self.loop.remove_signal_handler(signal.SIGTSTP)
self.loop.remove_signal_handler(signal.SIGTERM)
def _terminate_jobs_real(self):
def kill_jobs():
for job_ in self._active_jobs:
job_.kill()
# Schedule all jobs to be killed if they have not exited in 20 sec
self.loop.call_later(20, kill_jobs)
for job in self._active_jobs:
job.terminate()
# Regular timeout for driving status in the UI
def _tick(self):
elapsed = self.elapsed_time()
self._ticker_callback(elapsed)
self.loop.call_later(1, self._tick)
buildstream-1.6.9/buildstream/_signals.py 0000664 0000000 0000000 00000014365 14375152700 0020543 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager, ExitStack
from collections import deque
# Global per process state for handling of sigterm/sigtstp/sigcont,
# note that it is expected that this only ever be used by processes
# the scheduler forks off, not the main process
terminator_stack = deque()
suspendable_stack = deque()
# Per process SIGTERM handler
def terminator_handler(signal_, frame):
while terminator_stack:
terminator_ = terminator_stack.pop()
try:
terminator_()
except: # pylint: disable=bare-except
# Ensure we print something if there's an exception raised when
# processing the handlers. Note that the default exception
# handler won't be called because we os._exit next, so we must
# catch all possible exceptions with the unqualified 'except'
# clause.
traceback.print_exc(file=sys.stderr)
print('Error encountered in BuildStream while processing custom SIGTERM handler:',
terminator_,
file=sys.stderr)
# Use special exit here, terminate immediately, recommended
# for precisely this situation where child forks are teminated.
os._exit(-1)
# terminator()
#
# A context manager for interruptable tasks, this guarantees
# that while the code block is running, the supplied function
# will be called upon process termination.
#
# Note that after handlers are called, the termination will be handled by
# terminating immediately with os._exit(). This means that SystemExit will not
# be raised and 'finally' clauses will not be executed.
#
# Args:
# terminate_func (callable): A function to call when aborting
# the nested code block.
#
@contextmanager
def terminator(terminate_func):
# Signal handling only works in the main thread
if threading.current_thread() != threading.main_thread():
yield
return
outermost = not terminator_stack
terminator_stack.append(terminate_func)
if outermost:
original_handler = signal.signal(signal.SIGTERM, terminator_handler)
try:
yield
finally:
if outermost:
signal.signal(signal.SIGTERM, original_handler)
terminator_stack.pop()
# Just a simple object for holding on to two callbacks
class Suspender():
def __init__(self, suspend_callback, resume_callback):
self.suspend = suspend_callback
self.resume = resume_callback
# Per process SIGTSTP handler
def suspend_handler(sig, frame):
# Suspend callbacks from innermost frame first
for suspender in reversed(suspendable_stack):
suspender.suspend()
# Use SIGSTOP directly now on self, dont introduce more SIGTSTP
#
# Here the process sleeps until SIGCONT, which we simply
# dont handle. We know we'll pickup execution right here
# when we wake up.
os.kill(os.getpid(), signal.SIGSTOP)
# Resume callbacks from outermost frame inwards
for suspender in suspendable_stack:
suspender.resume()
# suspendable()
#
# A context manager for handling process suspending and resumeing
#
# Args:
# suspend_callback (callable): A function to call as process suspend time.
# resume_callback (callable): A function to call as process resume time.
#
# This must be used in code blocks which spawn processes that become
# their own session leader. In these cases, SIGSTOP and SIGCONT need
# to be propagated to the child process group.
#
# This context manager can also be used recursively, so multiple
# things can happen at suspend/resume time (such as tracking timers
# and ensuring durations do not count suspended time).
#
@contextmanager
def suspendable(suspend_callback, resume_callback):
outermost = not suspendable_stack
suspender = Suspender(suspend_callback, resume_callback)
suspendable_stack.append(suspender)
if outermost:
original_stop = signal.signal(signal.SIGTSTP, suspend_handler)
try:
yield
finally:
if outermost:
signal.signal(signal.SIGTSTP, original_stop)
suspendable_stack.pop()
# blocked()
#
# A context manager for running a code block with blocked signals
#
# Args:
# signals (list): A list of unix signals to block
# ignore (bool): Whether to ignore entirely the signals which were
# received and pending while the process had blocked them
#
@contextmanager
def blocked(signal_list, ignore=True):
with ExitStack() as stack:
# Optionally add the ignored() context manager to this context
if ignore:
stack.enter_context(ignored(signal_list))
# Set and save the sigprocmask
blocked_signals = signal.pthread_sigmask(signal.SIG_BLOCK, signal_list)
try:
yield
finally:
# If we have discarded the signals completely, this line will cause
# the discard_handler() to trigger for each signal in the list
signal.pthread_sigmask(signal.SIG_SETMASK, blocked_signals)
# ignored()
#
# A context manager for running a code block with ignored signals
#
# Args:
# signals (list): A list of unix signals to ignore
#
@contextmanager
def ignored(signal_list):
orig_handlers = {}
for sig in signal_list:
orig_handlers[sig] = signal.signal(sig, signal.SIG_IGN)
try:
yield
finally:
for sig in signal_list:
signal.signal(sig, orig_handlers[sig])
buildstream-1.6.9/buildstream/_site.py 0000664 0000000 0000000 00000005646 14375152700 0020051 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
import shutil
import subprocess
#
# Private module declaring some info about where the buildstream
# is installed so we can lookup package relative resources easily
#
# The package root, wherever we are running the package from
root = os.path.dirname(os.path.abspath(__file__))
# The Element plugin directory
element_plugins = os.path.join(root, 'plugins', 'elements')
# The Source plugin directory
source_plugins = os.path.join(root, 'plugins', 'sources')
# Default user configuration
default_user_config = os.path.join(root, 'data', 'userconfig.yaml')
# Default project configuration
default_project_config = os.path.join(root, 'data', 'projectconfig.yaml')
# Script template to call module building scripts
build_all_template = os.path.join(root, 'data', 'build-all.sh.in')
# Module building script template
build_module_template = os.path.join(root, 'data', 'build-module.sh.in')
# Cached bwrap version
_bwrap_major = None
_bwrap_minor = None
_bwrap_patch = None
# check_bwrap_version()
#
# Checks the version of installed bwrap against the requested version
#
# Args:
# major (int): The required major version
# minor (int): The required minor version
# patch (int): The required patch level
#
# Returns:
# (bool): Whether installed bwrap meets the requirements
#
def check_bwrap_version(major, minor, patch):
# pylint: disable=global-statement
global _bwrap_major
global _bwrap_minor
global _bwrap_patch
# Parse bwrap version and save into cache, if not already cached
if _bwrap_major is None:
bwrap_path = shutil.which('bwrap')
if not bwrap_path:
return False
cmd = [bwrap_path, "--version"]
version = str(subprocess.check_output(cmd).split()[1], "utf-8")
_bwrap_major, _bwrap_minor, _bwrap_patch = map(int, version.split("."))
# Check whether the installed version meets the requirements
if _bwrap_major > major:
return True
elif _bwrap_major < major:
return False
else:
if _bwrap_minor > minor:
return True
elif _bwrap_minor < minor:
return False
else:
return _bwrap_patch >= patch
buildstream-1.6.9/buildstream/_sourcefactory.py 0000664 0000000 0000000 00000004453 14375152700 0021770 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
from . import _site
from ._plugincontext import PluginContext
from .source import Source
# A SourceFactory creates Source instances
# in the context of a given factory
#
# Args:
# plugin_base (PluginBase): The main PluginBase object to work with
# plugin_origins (list): Data used to search for external Source plugins
#
class SourceFactory(PluginContext):
def __init__(self, plugin_base, *,
format_versions=None,
plugin_origins=None):
if format_versions is None:
format_versions = {}
super().__init__(plugin_base, Source, [_site.source_plugins],
format_versions=format_versions,
plugin_origins=plugin_origins)
# create():
#
# Create a Source object, the pipeline uses this to create Source
# objects on demand for a given pipeline.
#
# Args:
# context (object): The Context object for processing
# project (object): The project object
# meta (object): The loaded MetaSource
#
# Returns:
# A newly created Source object of the appropriate kind
#
# Raises:
# PluginError (if the kind lookup failed)
# LoadError (if the source itself took issue with the config)
#
def create(self, context, project, meta):
source_type, _ = self.lookup(meta.kind)
source = source_type(context, project, meta)
version = self._format_versions.get(meta.kind, 0)
self._assert_plugin_format(source, version)
return source
buildstream-1.6.9/buildstream/_stream.py 0000664 0000000 0000000 00000127045 14375152700 0020376 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jürg Billeter
# Tristan Maat
import os
import sys
import stat
import shlex
import shutil
import tarfile
from contextlib import contextmanager
from tempfile import TemporaryDirectory
from ._exceptions import StreamError, ImplError, BstError
from ._message import Message, MessageType
from ._scheduler import Scheduler, SchedStatus, TrackQueue, FetchQueue, BuildQueue, PullQueue, PushQueue
from ._pipeline import Pipeline, PipelineSelection
from . import utils, _yaml, _site
from . import Scope, Consistency
# Stream()
#
# This is the main, toplevel calling interface in BuildStream core.
#
# Args:
# context (Context): The Context object
# project (Project): The Project object
# session_start (datetime): The time when the session started
# session_start_callback (callable): A callback to invoke when the session starts
# interrupt_callback (callable): A callback to invoke when we get interrupted
# ticker_callback (callable): Invoked every second while running the scheduler
# job_start_callback (callable): Called when a job starts
# job_complete_callback (callable): Called when a job completes
#
class Stream():
def __init__(self, context, project, session_start, *,
session_start_callback=None,
interrupt_callback=None,
ticker_callback=None,
job_start_callback=None,
job_complete_callback=None):
#
# Public members
#
self.targets = [] # Resolved target elements
self.session_elements = [] # List of elements being processed this session
self.total_elements = [] # Total list of elements based on targets
self.queues = [] # Queue objects
#
# Private members
#
self._artifacts = context.artifactcache
self._context = context
self._project = project
self._pipeline = Pipeline(context, project, self._artifacts)
self._scheduler = Scheduler(context, session_start,
interrupt_callback=interrupt_callback,
ticker_callback=ticker_callback,
job_start_callback=job_start_callback,
job_complete_callback=job_complete_callback)
self._first_non_track_queue = None
self._session_start_callback = session_start_callback
# cleanup()
#
# Cleans up application state
#
def cleanup(self):
if self._project:
self._project.cleanup()
# load_selection()
#
# An all purpose method for loading a selection of elements, this
# is primarily useful for the frontend to implement `bst show`
# and `bst shell`.
#
# Args:
# targets (list of str): Targets to pull
# selection (PipelineSelection): The selection mode for the specified targets
# except_targets (list of str): Specified targets to except from fetching
#
# Returns:
# (list of Element): The selected elements
def load_selection(self, targets, *,
selection=PipelineSelection.NONE,
except_targets=()):
elements, _ = self._load(targets, (),
selection=selection,
except_targets=except_targets,
fetch_subprojects=False)
return elements
# shell()
#
# Run a shell
#
# Args:
# element (Element): An Element object to run the shell for
# scope (Scope): The scope for the shell (Scope.BUILD or Scope.RUN)
# prompt (str): The prompt to display in the shell
# directory (str): A directory where an existing prestaged sysroot is expected, or None
# mounts (list of HostMount): Additional directories to mount into the sandbox
# isolate (bool): Whether to isolate the environment like we do in builds
# command (list): An argv to launch in the sandbox, or None
#
# Returns:
# (int): The exit code of the launched shell
#
def shell(self, element, scope, prompt, *,
directory=None,
mounts=None,
isolate=False,
command=None):
# Assert we have everything we need built, unless the directory is specified
# in which case we just blindly trust the directory, using the element
# definitions to control the execution environment only.
if directory is None:
missing_deps = [
dep._get_full_name()
for dep in self._pipeline.dependencies([element], scope)
if not dep._cached()
]
if missing_deps:
raise StreamError("Elements need to be built or downloaded before staging a shell environment",
detail="\n".join(missing_deps))
return element._shell(scope, directory, mounts=mounts, isolate=isolate, prompt=prompt, command=command)
# build()
#
# Builds (assembles) elements in the pipeline.
#
# Args:
# targets (list of str): Targets to build
# track_targets (list of str): Specified targets for tracking
# track_except (list of str): Specified targets to except from tracking
# track_cross_junctions (bool): Whether tracking should cross junction boundaries
# build_all (bool): Whether to build all elements, or only those
# which are required to build the target.
#
def build(self, targets, *,
track_targets=None,
track_except=None,
track_cross_junctions=False,
build_all=False):
if build_all:
selection = PipelineSelection.ALL
else:
selection = PipelineSelection.PLAN
elements, track_elements = \
self._load(targets, track_targets,
selection=selection, track_selection=PipelineSelection.ALL,
track_except_targets=track_except,
track_cross_junctions=track_cross_junctions,
use_artifact_config=True,
fetch_subprojects=True,
dynamic_plan=True)
# Remove the tracking elements from the main targets
elements = self._pipeline.subtract_elements(elements, track_elements)
# Assert that the elements we're not going to track are consistent
self._pipeline.assert_consistent(elements)
# Now construct the queues
#
track_queue = None
if track_elements:
track_queue = TrackQueue(self._scheduler)
self._add_queue(track_queue, track=True)
if self._artifacts.has_fetch_remotes():
self._add_queue(PullQueue(self._scheduler))
self._add_queue(FetchQueue(self._scheduler, skip_cached=True))
self._add_queue(BuildQueue(self._scheduler))
if self._artifacts.has_push_remotes():
self._add_queue(PushQueue(self._scheduler))
# Enqueue elements
#
if track_elements:
self._enqueue_plan(track_elements, queue=track_queue)
self._enqueue_plan(elements)
self._run()
# fetch()
#
# Fetches sources on the pipeline.
#
# Args:
# targets (list of str): Targets to fetch
# selection (PipelineSelection): The selection mode for the specified targets
# except_targets (list of str): Specified targets to except from fetching
# track_targets (bool): Whether to track selected targets in addition to fetching
# track_cross_junctions (bool): Whether tracking should cross junction boundaries
#
def fetch(self, targets, *,
selection=PipelineSelection.PLAN,
except_targets=None,
track_targets=False,
track_cross_junctions=False):
if track_targets:
track_targets = targets
track_selection = selection
track_except_targets = except_targets
else:
track_targets = ()
track_selection = PipelineSelection.NONE
track_except_targets = ()
elements, track_elements = \
self._load(targets, track_targets,
selection=selection, track_selection=track_selection,
except_targets=except_targets,
track_except_targets=track_except_targets,
track_cross_junctions=track_cross_junctions,
fetch_subprojects=True)
# Delegated to a shared fetch method
self._fetch(elements, track_elements=track_elements)
# track()
#
# Tracks all the sources of the selected elements.
#
# Args:
# targets (list of str): Targets to track
# selection (PipelineSelection): The selection mode for the specified targets
# except_targets (list of str): Specified targets to except from tracking
# cross_junctions (bool): Whether tracking should cross junction boundaries
#
# If no error is encountered while tracking, then the project files
# are rewritten inline.
#
def track(self, targets, *,
selection=PipelineSelection.REDIRECT,
except_targets=None,
cross_junctions=False):
# We pass no target to build. Only to track. Passing build targets
# would fully load project configuration which might not be
# possible before tracking is done.
_, elements = \
self._load([], targets,
selection=selection, track_selection=selection,
except_targets=except_targets,
track_except_targets=except_targets,
track_cross_junctions=cross_junctions,
fetch_subprojects=True)
track_queue = TrackQueue(self._scheduler)
self._add_queue(track_queue, track=True)
self._enqueue_plan(elements, queue=track_queue)
self._run()
# pull()
#
# Pulls artifacts from remote artifact server(s)
#
# Args:
# targets (list of str): Targets to pull
# selection (PipelineSelection): The selection mode for the specified targets
# remote (str): The URL of a specific remote server to pull from, or None
#
# If `remote` specified as None, then regular configuration will be used
# to determine where to pull artifacts from.
#
def pull(self, targets, *,
selection=PipelineSelection.NONE,
remote=None):
use_config = True
if remote:
use_config = False
elements, _ = self._load(targets, (),
selection=selection,
use_artifact_config=use_config,
artifact_remote_url=remote,
fetch_subprojects=True)
if not self._artifacts.has_fetch_remotes():
raise StreamError("No artifact caches available for pulling artifacts")
self._pipeline.assert_consistent(elements)
self._add_queue(PullQueue(self._scheduler))
self._enqueue_plan(elements)
self._run()
# push()
#
# Pulls artifacts to remote artifact server(s)
#
# Args:
# targets (list of str): Targets to push
# selection (PipelineSelection): The selection mode for the specified targets
# remote (str): The URL of a specific remote server to push to, or None
#
# If `remote` specified as None, then regular configuration will be used
# to determine where to push artifacts to.
#
def push(self, targets, *,
selection=PipelineSelection.NONE,
remote=None):
use_config = True
if remote:
use_config = False
elements, _ = self._load(targets, (),
selection=selection,
use_artifact_config=use_config,
artifact_remote_url=remote,
fetch_subprojects=True)
if not self._artifacts.has_push_remotes():
raise StreamError("No artifact caches available for pushing artifacts")
# Mark all dependencies of all selected elements as "pulled" before
# trying to push.
#
# In non-strict mode, elements which are cached by their weak keys
# will attempt to pull a remote artifact by it's strict key and prefer
# a strict key artifact, however pull does not occur when running
# a `bst push` session.
#
# Marking the elements as pulled is a workaround which ensures that
# the cache keys are resolved before pushing.
#
for element in elements:
element._pull_done()
self._pipeline.assert_consistent(elements)
self._add_queue(PushQueue(self._scheduler))
self._enqueue_plan(elements)
self._run()
# checkout()
#
# Checkout target artifact to the specified location
#
# Args:
# target (str): Target to checkout
# location (str): Location to checkout the artifact to
# force (bool): Whether files can be overwritten if necessary
# deps (str): The dependencies to checkout
# integrate (bool): Whether to run integration commands
# hardlinks (bool): Whether checking out files hardlinked to
# their artifacts is acceptable
# tar (bool): If true, a tarball from the artifact contents will
# be created, otherwise the file tree of the artifact
# will be placed at the given location. If true and
# location is '-', the tarball will be dumped on the
# standard output.
#
def checkout(self, target, *,
location=None,
force=False,
deps='run',
integrate=True,
hardlinks=False,
tar=False):
# We only have one target in a checkout command
elements, _ = self._load((target,), (), fetch_subprojects=True)
target = elements[0]
if not tar:
try:
os.makedirs(location, exist_ok=True)
except OSError as e:
raise StreamError("Failed to create checkout directory: '{}'"
.format(e)) from e
if not tar:
if not os.access(location, os.W_OK):
raise StreamError("Checkout directory '{}' not writable"
.format(location))
if not force and os.listdir(location):
raise StreamError("Checkout directory '{}' not empty"
.format(location))
elif os.path.exists(location) and location != '-':
if not os.access(location, os.W_OK):
raise StreamError("Output file '{}' not writable"
.format(location))
if not force and os.path.exists(location):
raise StreamError("Output file '{}' already exists"
.format(location))
# Stage deps into a temporary sandbox first
try:
with target._prepare_sandbox(Scope.RUN, None, deps=deps,
integrate=integrate) as sandbox:
# Copy or move the sandbox to the target directory
sandbox_root = sandbox.get_directory()
if not tar:
with target.timed_activity("Checking out files in '{}'"
.format(location)):
try:
if hardlinks:
self._checkout_hardlinks(sandbox_root, location)
else:
utils.copy_files(sandbox_root, location)
except OSError as e:
raise StreamError("Failed to checkout files: '{}'"
.format(e)) from e
else:
if location == '-':
with target.timed_activity("Creating tarball"):
with os.fdopen(sys.stdout.fileno(), 'wb') as fo:
with tarfile.open(fileobj=fo, mode="w|") as tf:
Stream._add_directory_to_tarfile(
tf, sandbox_root, '.')
else:
with target.timed_activity("Creating tarball '{}'"
.format(location)):
with tarfile.open(location, "w:") as tf:
Stream._add_directory_to_tarfile(
tf, sandbox_root, '.')
except BstError as e:
raise StreamError("Error while staging dependencies into a sandbox"
": '{}'".format(e), detail=e.detail, reason=e.reason) from e
# workspace_open
#
# Open a project workspace
#
# Args:
# target (str): The target element to open the workspace for
# directory (str): The directory to stage the source in
# no_checkout (bool): Whether to skip checking out the source
# track_first (bool): Whether to track and fetch first
# force (bool): Whether to ignore contents in an existing directory
#
def workspace_open(self, target, directory, *,
no_checkout,
track_first,
force):
if track_first:
track_targets = (target,)
else:
track_targets = ()
elements, track_elements = self._load((target,), track_targets,
selection=PipelineSelection.REDIRECT,
track_selection=PipelineSelection.REDIRECT)
target = elements[0]
directory = os.path.abspath(directory)
if not list(target.sources()):
build_depends = [x.name for x in target.dependencies(Scope.BUILD, recurse=False)]
if not build_depends:
raise StreamError("The given element has no sources")
detail = "Try opening a workspace on one of its dependencies instead:\n"
detail += " \n".join(build_depends)
raise StreamError("The given element has no sources", detail=detail)
workspaces = self._context.get_workspaces()
# Check for workspace config
workspace = workspaces.get_workspace(target._get_full_name())
if workspace and not force:
raise StreamError("Workspace '{}' is already defined at: {}"
.format(target.name, workspace.get_absolute_path()))
# If we're going to checkout, we need at least a fetch,
# if we were asked to track first, we're going to fetch anyway.
#
if not no_checkout or track_first:
track_elements = []
if track_first:
track_elements = elements
self._fetch(elements, track_elements=track_elements)
if not no_checkout and target._get_consistency() != Consistency.CACHED:
raise StreamError("Could not stage uncached source. " +
"Use `--track` to track and " +
"fetch the latest version of the " +
"source.")
if workspace:
workspaces.delete_workspace(target._get_full_name())
workspaces.save_config()
shutil.rmtree(directory)
try:
os.makedirs(directory, exist_ok=True)
except OSError as e:
raise StreamError("Failed to create workspace directory: {}".format(e)) from e
workspaces.create_workspace(target._get_full_name(), directory)
if not no_checkout:
with target.timed_activity("Staging sources to {}".format(directory)):
target._open_workspace()
workspaces.save_config()
self._message(MessageType.INFO, "Saved workspace configuration")
# workspace_close
#
# Close a project workspace
#
# Args:
# element_name (str): The element name to close the workspace for
# remove_dir (bool): Whether to remove the associated directory
#
def workspace_close(self, element_name, *, remove_dir):
workspaces = self._context.get_workspaces()
workspace = workspaces.get_workspace(element_name)
# Remove workspace directory if prompted
if remove_dir:
with self._context.timed_activity("Removing workspace directory {}"
.format(workspace.get_absolute_path())):
try:
shutil.rmtree(workspace.get_absolute_path())
except OSError as e:
raise StreamError("Could not remove '{}': {}"
.format(workspace.get_absolute_path(), e)) from e
# Delete the workspace and save the configuration
workspaces.delete_workspace(element_name)
workspaces.save_config()
self._message(MessageType.INFO, "Closed workspace for {}".format(element_name))
# workspace_reset
#
# Reset a workspace to its original state, discarding any user
# changes.
#
# Args:
# targets (list of str): The target elements to reset the workspace for
# soft (bool): Only reset workspace state
# track_first (bool): Whether to also track the sources first
#
def workspace_reset(self, targets, *, soft, track_first):
if track_first:
track_targets = targets
else:
track_targets = ()
elements, track_elements = self._load(targets, track_targets,
selection=PipelineSelection.REDIRECT,
track_selection=PipelineSelection.REDIRECT)
nonexisting = []
for element in elements:
if not self.workspace_exists(element.name):
nonexisting.append(element.name)
if nonexisting:
raise StreamError("Workspace does not exist", detail="\n".join(nonexisting))
# Do the tracking first
if track_first:
self._fetch(elements, track_elements=track_elements)
workspaces = self._context.get_workspaces()
for element in elements:
workspace = workspaces.get_workspace(element._get_full_name())
workspace_path = workspace.get_absolute_path()
if soft:
workspace.prepared = False
self._message(MessageType.INFO, "Reset workspace state for {} at: {}"
.format(element.name, workspace_path))
continue
with element.timed_activity("Removing workspace directory {}"
.format(workspace_path)):
try:
shutil.rmtree(workspace_path)
except OSError as e:
raise StreamError("Could not remove '{}': {}"
.format(workspace_path, e)) from e
workspaces.delete_workspace(element._get_full_name())
workspaces.create_workspace(element._get_full_name(), workspace_path)
with element.timed_activity("Staging sources to {}".format(workspace_path)):
element._open_workspace()
self._message(MessageType.INFO,
"Reset workspace for {} at: {}".format(element.name,
workspace_path))
workspaces.save_config()
# workspace_exists
#
# Check if a workspace exists
#
# Args:
# element_name (str): The element name to close the workspace for, or None
#
# Returns:
# (bool): True if the workspace exists
#
# If None is specified for `element_name`, then this will return
# True if there are any existing workspaces.
#
def workspace_exists(self, element_name=None):
workspaces = self._context.get_workspaces()
if element_name:
workspace = workspaces.get_workspace(element_name)
if workspace:
return True
elif any(workspaces.list()):
return True
return False
# workspace_list
#
# Serializes the workspaces and dumps them in YAML to stdout.
#
def workspace_list(self):
workspaces = []
for element_name, workspace_ in self._context.get_workspaces().list():
workspace_detail = {
'element': element_name,
'directory': workspace_.get_absolute_path(),
}
workspaces.append(workspace_detail)
_yaml.dump({
'workspaces': workspaces
})
# source_bundle()
#
# Create a host buildable tarball bundle for the given target.
#
# Args:
# target (str): The target element to bundle
# directory (str): The directory to output the tarball
# track_first (bool): Track new source references before bundling
# compression (str): The compression type to use
# force (bool): Overwrite an existing tarball
#
def source_bundle(self, target, directory, *,
track_first=False,
force=False,
compression="gz",
except_targets=()):
if track_first:
track_targets = (target,)
else:
track_targets = ()
elements, track_elements = self._load((target,), track_targets,
selection=PipelineSelection.ALL,
except_targets=except_targets,
track_selection=PipelineSelection.ALL,
fetch_subprojects=True)
# source-bundle only supports one target
target = self.targets[0]
self._message(MessageType.INFO, "Bundling sources for target {}".format(target.name))
# Find the correct filename for the compression algorithm
tar_location = os.path.join(directory, target.normal_name + ".tar")
if compression != "none":
tar_location += "." + compression
# Attempt writing a file to generate a good error message
# early
#
# FIXME: A bit hackish
try:
with open(tar_location, mode="x") as _: # pylint: disable=unspecified-encoding
pass
os.remove(tar_location)
except IOError as e:
raise StreamError("Cannot write to {0}: {1}"
.format(tar_location, e)) from e
# Fetch and possibly track first
#
self._fetch(elements, track_elements=track_elements)
# We don't use the scheduler for this as it is almost entirely IO
# bound.
# Create a temporary directory to build the source tree in
builddir = self._context.builddir
prefix = "{}-".format(target.normal_name)
with TemporaryDirectory(prefix=prefix, dir=builddir) as tempdir:
source_directory = os.path.join(tempdir, 'source')
try:
os.makedirs(source_directory)
except OSError as e:
raise StreamError("Failed to create directory: {}"
.format(e)) from e
# Any elements that don't implement _write_script
# should not be included in the later stages.
elements = [
element for element in elements
if self._write_element_script(source_directory, element)
]
self._write_element_sources(tempdir, elements)
self._write_build_script(tempdir, elements)
self._collect_sources(tempdir, tar_location,
target.normal_name, compression)
# redirect_element_names()
#
# Takes a list of element names and returns a list where elements have been
# redirected to their source elements if the element file exists, and just
# the name, if not.
#
# Args:
# elements (list of str): The element names to redirect
#
# Returns:
# (list of str): The element names after redirecting
#
def redirect_element_names(self, elements):
element_dir = self._project.element_path
load_elements = []
output_elements = set()
for e in elements:
element_path = os.path.join(element_dir, e)
if os.path.exists(element_path):
load_elements.append(e)
else:
output_elements.add(e)
if load_elements:
loaded_elements, _ = self._load(load_elements, (),
selection=PipelineSelection.REDIRECT,
track_selection=PipelineSelection.REDIRECT)
for e in loaded_elements:
output_elements.add(e.name)
return list(output_elements)
#############################################################
# Scheduler API forwarding #
#############################################################
# running
#
# Whether the scheduler is running
#
@property
def running(self):
return self._scheduler.loop is not None
# suspended
#
# Whether the scheduler is currently suspended
#
@property
def suspended(self):
return self._scheduler.suspended
# terminated
#
# Whether the scheduler is currently terminated
#
@property
def terminated(self):
return self._scheduler.terminated
# elapsed_time
#
# Elapsed time since the session start
#
@property
def elapsed_time(self):
return self._scheduler.elapsed_time()
# terminate()
#
# Terminate jobs
#
def terminate(self):
self._scheduler.terminate_jobs()
# quit()
#
# Quit the session, this will continue with any ongoing
# jobs, use Stream.terminate() instead for cancellation
# of ongoing jobs
#
def quit(self):
self._scheduler.stop_queueing()
# suspend()
#
# Context manager to suspend ongoing jobs
#
@contextmanager
def suspend(self):
with self._scheduler.jobs_suspended():
yield
#############################################################
# Private Methods #
#############################################################
# _load()
#
# A convenience method for loading element lists
#
# If `targets` is not empty used project configuration will be
# fully loaded. If `targets` is empty, tracking will still be
# resolved for elements in `track_targets`, but no build pipeline
# will be resolved. This is behavior is import for track() to
# not trigger full loading of project configuration.
#
# Args:
# targets (list of str): Main targets to load
# track_targets (list of str): Tracking targets
# selection (PipelineSelection): The selection mode for the specified targets
# track_selection (PipelineSelection): The selection mode for the specified tracking targets
# except_targets (list of str): Specified targets to except from fetching
# track_except_targets (list of str): Specified targets to except from fetching
# track_cross_junctions (bool): Whether tracking should cross junction boundaries
# use_artifact_config (bool): Whether to initialize artifacts with the config
# artifact_remote_url (bool): A remote url for initializing the artifacts
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Returns:
# (list of Element): The primary element selection
# (list of Element): The tracking element selection
#
def _load(self, targets, track_targets, *,
selection=PipelineSelection.NONE,
track_selection=PipelineSelection.NONE,
except_targets=(),
track_except_targets=(),
track_cross_junctions=False,
use_artifact_config=False,
artifact_remote_url=None,
fetch_subprojects=False,
dynamic_plan=False):
# Load rewritable if we have any tracking selection to make
rewritable = False
if track_targets:
rewritable = True
# Load all targets
elements, except_elements, track_elements, track_except_elements = \
self._pipeline.load([targets, except_targets, track_targets, track_except_targets],
rewritable=rewritable,
fetch_subprojects=fetch_subprojects)
# Hold on to the targets
self.targets = elements
# Here we should raise an error if the track_elements targets
# are not dependencies of the primary targets, this is not
# supported.
#
# This can happen with `bst build --track`
#
if targets and not self._pipeline.targets_include(elements, track_elements):
raise StreamError("Specified tracking targets that are not "
"within the scope of primary targets")
# First take care of marking tracking elements, this must be
# done before resolving element states.
#
assert track_selection != PipelineSelection.PLAN
# Tracked elements are split by owner projects in order to
# filter cross junctions tracking dependencies on their
# respective project.
track_projects = {}
for element in track_elements:
project = element._get_project()
if project not in track_projects:
track_projects[project] = [element]
else:
track_projects[project].append(element)
track_selected = []
for project, project_elements in track_projects.items():
selected = self._pipeline.get_selection(project_elements, track_selection)
selected = self._pipeline.track_cross_junction_filter(project,
selected,
track_cross_junctions)
track_selected.extend(selected)
track_selected = self._pipeline.except_elements(track_elements,
track_selected,
track_except_elements)
for element in track_selected:
element._schedule_tracking()
if not targets:
self._pipeline.resolve_elements(track_selected)
return [], track_selected
# ArtifactCache.setup_remotes expects all projects to be fully loaded
for project in self._context.get_projects():
project.ensure_fully_loaded()
# Connect to remote caches, this needs to be done before resolving element state
self._artifacts.setup_remotes(use_config=use_artifact_config, remote_url=artifact_remote_url)
# Now move on to loading primary selection.
#
self._pipeline.resolve_elements(elements)
selected = self._pipeline.get_selection(elements, selection, silent=False)
selected = self._pipeline.except_elements(elements,
selected,
except_elements)
# Set the "required" artifacts that should not be removed
# while this pipeline is active
#
# It must include all the artifacts which are required by the
# final product. Note that this is a superset of the build plan.
#
self._artifacts.mark_required_elements(self._pipeline.dependencies(elements, Scope.ALL))
if selection == PipelineSelection.PLAN and dynamic_plan:
# We use a dynamic build plan, only request artifacts of top-level targets,
# others are requested dynamically as needed.
# This avoids pulling, fetching, or building unneeded build-only dependencies.
for element in elements:
element._set_required()
else:
for element in selected:
element._set_required()
return selected, track_selected
# _message()
#
# Local message propagator
#
def _message(self, message_type, message, **kwargs):
args = dict(kwargs)
self._context.message(
Message(None, message_type, message, **args))
# _add_queue()
#
# Adds a queue to the stream
#
# Args:
# queue (Queue): Queue to add to the pipeline
# track (bool): Whether this is the tracking queue
#
def _add_queue(self, queue, *, track=False):
self.queues.append(queue)
if not (track or self._first_non_track_queue):
self._first_non_track_queue = queue
# _enqueue_plan()
#
# Enqueues planned elements to the specified queue.
#
# Args:
# plan (list of Element): The list of elements to be enqueued
# queue (Queue): The target queue, defaults to the first non-track queue
#
def _enqueue_plan(self, plan, *, queue=None):
queue = queue or self._first_non_track_queue
queue.enqueue(plan)
self.session_elements += plan
# _run()
#
# Common function for running the scheduler
#
def _run(self):
# Inform the frontend of the full list of elements
# and the list of elements which will be processed in this run
#
self.total_elements = list(self._pipeline.dependencies(self.targets, Scope.ALL))
if self._session_start_callback is not None:
self._session_start_callback()
_, status = self._scheduler.run(self.queues)
if status == SchedStatus.ERROR:
raise StreamError()
if status == SchedStatus.TERMINATED:
raise StreamError(terminated=True)
# _fetch()
#
# Performs the fetch job, the body of this function is here because
# it is shared between a few internals.
#
# Args:
# elements (list of Element): Elements to fetch
# track_elements (list of Element): Elements to track
#
def _fetch(self, elements, *, track_elements=None):
if track_elements is None:
track_elements = []
# Subtract the track elements from the fetch elements, they will be added separately
fetch_plan = self._pipeline.subtract_elements(elements, track_elements)
# Assert consistency for the fetch elements
self._pipeline.assert_consistent(fetch_plan)
# Filter out elements with cached sources, only from the fetch plan
# let the track plan resolve new refs.
cached = [elt for elt in fetch_plan if elt._get_consistency() == Consistency.CACHED]
fetch_plan = self._pipeline.subtract_elements(fetch_plan, cached)
# Construct queues, enqueue and run
#
track_queue = None
if track_elements:
track_queue = TrackQueue(self._scheduler)
self._add_queue(track_queue, track=True)
self._add_queue(FetchQueue(self._scheduler))
if track_elements:
self._enqueue_plan(track_elements, queue=track_queue)
self._enqueue_plan(fetch_plan)
self._run()
# Helper function for checkout()
#
def _checkout_hardlinks(self, sandbox_root, directory):
try:
removed = utils.safe_remove(directory)
except OSError as e:
raise StreamError("Failed to remove checkout directory: {}".format(e)) from e
if removed:
# Try a simple rename of the sandbox root; if that
# doesnt cut it, then do the regular link files code path
try:
os.rename(sandbox_root, directory)
except OSError:
os.makedirs(directory, exist_ok=True)
utils.link_files(sandbox_root, directory)
else:
utils.link_files(sandbox_root, directory)
# Add a directory entry deterministically to a tar file
#
# This function takes extra steps to ensure the output is deterministic.
# First, it sorts the results of os.listdir() to ensure the ordering of
# the files in the archive is the same. Second, it sets a fixed
# timestamp for each entry. See also https://bugs.python.org/issue24465.
@staticmethod
def _add_directory_to_tarfile(tf, dir_name, dir_arcname, mtime=0):
for filename in sorted(os.listdir(dir_name)):
name = os.path.join(dir_name, filename)
arcname = os.path.join(dir_arcname, filename)
tarinfo = tf.gettarinfo(name, arcname)
tarinfo.mtime = mtime
if tarinfo.isreg():
with open(name, "rb") as f:
tf.addfile(tarinfo, f)
elif tarinfo.isdir():
tf.addfile(tarinfo)
Stream._add_directory_to_tarfile(tf, name, arcname, mtime)
else:
tf.addfile(tarinfo)
# Write the element build script to the given directory
def _write_element_script(self, directory, element):
try:
element._write_script(directory)
except ImplError:
return False
return True
# Write all source elements to the given directory
def _write_element_sources(self, directory, elements):
for element in elements:
source_dir = os.path.join(directory, "source")
element_source_dir = os.path.join(source_dir, element.normal_name)
element._stage_sources_at(element_source_dir)
# Write a master build script to the sandbox
def _write_build_script(self, directory, elements):
module_string = ""
for element in elements:
module_string += shlex.quote(element.normal_name) + " "
script_path = os.path.join(directory, "build.sh")
with open(_site.build_all_template, "r", encoding="utf-8") as f:
script_template = f.read()
with utils.save_file_atomic(script_path, "w") as script:
script.write(script_template.format(modules=module_string))
os.chmod(script_path, stat.S_IEXEC | stat.S_IREAD)
# Collect the sources in the given sandbox into a tarfile
def _collect_sources(self, directory, tar_name, element_name, compression):
with self._context.timed_activity("Creating tarball {}".format(tar_name)):
if compression == "none":
permissions = "w:"
else:
permissions = "w:" + compression
with tarfile.open(tar_name, permissions) as tar:
tar.add(directory, arcname=element_name)
buildstream-1.6.9/buildstream/_variables.py 0000664 0000000 0000000 00000054576 14375152700 0021063 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2020 Codethink Limited
# Copyright (C) 2019 Bloomberg L.P.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Daniel Silverstone
# Benjamin Schubert
import re
import sys
from ._exceptions import LoadError, LoadErrorReason
from . import _yaml
########################################################
# Understanding Value Expressions #
########################################################
#
# This code uses the term "value expression" a lot to refer to `str` objects
# which have references to variables in them, and also to `list` objects which
# are effectively broken down strings.
#
# Ideally we would have a ValueExpression type in order to make this more
# comprehensive, but this would unfortunately introduce unnecessary overhead,
# making the code measurably slower.
#
# Value Expression Strings
# ------------------------
# Strings which contain variables in them, such as:
#
# "My name is %{username}, good day."
#
#
# Parsed Value Expression Lists
# -----------------------------
# Using `re.split()` from python's regular expression implementation, we
# parse the list using our locally defined VALUE_EXPRESSION_REGEX, which
# breaks down the string into a list of "literal" and "variable" components.
#
# The "literal" components are literal portions of the string which need
# no substitution, while the "variable" components represent variable names
# which need to be substituted with their corresponding resolved values.
#
# The parsed variable expressions have the following properties:
#
# * They are sparse, some of the "literal" values contain zero length
# strings which can be ignored.
#
# * Literal values are found only at even indices of the parsed
# variable expression
#
# * Variable names are found only at odd indices
#
# The above example "My name is %{username}, good day." is broken down
# into a parsed value expression as follows:
#
# [
# "My name is ", # <- Index 0, literal value
# "username", # <- Index 1, variable name, '%{ ... }' discarded
# ", good day." # <- Index 2, literal value
# ]
#
# Maximum recursion depth using the fast (recursive) variable resolution
# algorithm.
#
MAX_RECURSION_DEPTH = 200
# Regular expression used to parse %{variables} in value expressions
#
# Note that variables are allowed to have dashes
#
VALUE_EXPRESSION_REGEX = re.compile(r"\%\{([a-zA-Z][a-zA-Z0-9_-]*)\}")
# Cache for the parsed expansion strings.
#
VALUE_EXPRESSION_CACHE = {
# Prime the cache with the empty string since otherwise that can
# cause issues with the parser, complications to which cause slowdown
"": [""],
}
# Variables()
#
# The Variables object resolves the variable references in the given MappingNode,
# expecting that any dictionary values which contain variable references can be
# resolved from the same dictionary.
#
# Each Element creates its own Variables instance to track the configured
# variable settings for the element.
#
# Notably, this object is delegated the responsibility of expanding
# variables in yaml Node hierarchies and substituting variables in strings
# in the context of a given Element's variable configuration.
#
# Args:
# node (dict): A node loaded and composited with yaml tools
#
# Raises:
# LoadError, if unresolved variables, or cycles in resolution, occur.
#
class Variables:
#################################################################
# Dunder Methods #
#################################################################
def __init__(self, node):
# The original MappingNode, we need to keep this
# around for proper error reporting.
#
self._original = node
# The value map, this dictionary contains either unresolved
# value expressions, or resolved values.
#
# Each mapping value is a list, in the case that the value
# is resolved, then the list is only 1 element long.
#
self._values = self._init_values(node)
# __getitem__()
#
# Fetches a resolved variable by it's name, allows
# addressing the Variables instance like a dictionary.
#
# Args:
# name (str): The name of the variable
#
# Returns:
# (str): The resolved variable value
#
# Raises:
# (LoadError): In the case of an undefined variable or
# a cyclic variable reference
#
def __getitem__(self, name):
if name not in self._values:
raise KeyError(name)
return self._expand_var(name)
# __contains__()
#
# Checks whether a given variable exists, allows
# supporting `if 'foo' in variables` expressions.
#
# Args:
# name (str): The name of the variable to check for
#
# Returns:
# (bool): True if `name` is a valid variable
#
def __contains__(self, name):
return name in self._values
# __iter__()
#
# Provide an iterator for all variables effective values
#
# Returns:
# (Iterator[Tuple[str, str]])
#
def __iter__(self):
return _VariablesIterator(self)
#################################################################
# Public API #
#################################################################
# check()
#
# Assert that all variables declared on this Variables
# instance have been resolved properly, and reports errors
# for undefined references and circular references.
#
# Raises:
# (LoadError): In the case of an undefined variable or
# a cyclic variable reference
#
def check(self):
# Just resolve all variables.
for key in self._values:
self._expand_var(key)
# get()
#
# Expand definition of variable by name. If the variable is not
# defined, it will return None instead of failing.
#
# Args:
# name (str): Name of the variable to expand
#
# Returns:
# (str|None): The expanded value for the variable or None variable was not defined.
#
def get(self, name):
if name not in self._values:
return None
return self[name]
# subst():
#
# Substitutes any variables in 'string' and returns the result.
#
# Args:
# string (str): The string to substitute
# provenance (Provenance): The provenance of the string
#
# Returns:
# (str): The new string with any substitutions made
#
# Raises:
# (LoadError): In the case of an undefined variable or
# a cyclic variable reference
#
def subst(self, string, provenance):
value_expression = _parse_value_expression(string)
return self._expand_value_expression(value_expression, provenance)
#################################################################
# Private API #
#################################################################
# _init_values()
#
# Initialize the table of values.
#
# The value table is a dictionary keyed by the variable names where
# the values are value expressions (lists) which are initially unresolved.
#
# Value expressions are later resolved on demand and replaced in this
# table with single element lists.
#
# Args:
# node (dict): The original variables mapping node
#
# Returns:
# (dict): A dictionary of value expressions (lists)
#
def _init_values(self, node):
# Special case, if notparallel is specified in the variables for this
# element, then override max-jobs to be 1.
# Initialize it as a string as all variables are processed as strings.
#
if _yaml.node_get(node, bool, 'notparallel', default_value=False):
node['max-jobs'] = str(1)
ret = {}
for key in node.keys():
value = _yaml.node_get(node, str, key)
ret[sys.intern(key)] = _parse_value_expression(value)
return ret
# _expand_var()
#
# Expand and cache a variable definition.
#
# This will try the fast, recursive path first and fallback to
# the slower iterative codepath.
#
# Args:
# name (str): Name of the variable to expand
#
# Returns:
# (str): The expanded value of variable
#
# Raises:
# (LoadError): In the case of an undefined variable or
# a cyclic variable reference
#
def _expand_var(self, name):
try:
return self._fast_expand_var(name)
except (KeyError, RecursionError):
return self._slow_expand_var(name)
# _expand_value_expression()
#
# Expands a value expression
#
# This will try the fast, recursive path first and fallback to
# the slower iterative codepath.
#
# Args:
# value_expression (list): The parsed value expression to be expanded
# provenance (Provenance): The provenance of the value expression
#
# Returns:
# (str): The expanded value expression
#
# Raises:
# (LoadError): In the case of an undefined variable or
# a cyclic variable reference
#
def _expand_value_expression(self, value_expression, provenance):
try:
return self._fast_expand_value_expression(value_expression)
except (KeyError, RecursionError):
return self._slow_expand_value_expression(None, value_expression, provenance)
#################################################################
# Resolution algorithm: fast path #
#################################################################
# _fast_expand_var()
#
# Fast, recursive path for variable expansion
#
# Args:
# name (str): Name of the variable to expand
# counter (int): Number of recursion cycles (used only in recursion)
#
# Returns:
# (str): The expanded value of variable
#
# Raises:
# (KeyError): If a reference to an undefined variable is encountered
# (RecursionError): If MAX_RECURSION_DEPTH recursion cycles is reached
#
def _fast_expand_var(self, name, counter=0):
value_expression = self._values[name]
if len(value_expression) > 1:
sub = self._fast_expand_value_expression(value_expression, counter)
value_expression = [sys.intern(sub)]
self._values[name] = value_expression
return value_expression[0]
# _fast_expand_value_expression()
#
# Fast, recursive path for value expression expansion.
#
# Args:
# value_expression (list): The parsed value expression to be expanded
# counter (int): Number of recursion cycles (used only in recursion)
#
# Returns:
# (str): The expanded value expression
#
# Raises:
# (KeyError): If a reference to an undefined variable is encountered
# (RecursionError): If MAX_RECURSION_DEPTH recursion cycles is reached
#
def _fast_expand_value_expression(self, value_expression, counter=0):
if counter > MAX_RECURSION_DEPTH:
raise RecursionError()
acc = []
for idx, value in enumerate(value_expression):
if (idx % 2) == 0:
acc.append(value)
else:
acc.append(self._fast_expand_var(value, counter + 1))
return "".join(acc)
#################################################################
# Resolution algorithm: slow path #
#################################################################
# _slow_expand_var()
#
# Slow, iterative path for variable expansion with full error reporting
#
# Args:
# name (str): Name of the variable to expand
#
# Returns:
# (str): The expanded value of variable
#
# Raises:
# (LoadError): In the case of an undefined variable or
# a cyclic variable reference
#
def _slow_expand_var(self, name):
value_expression = self._get_checked_value_expression(name, None, None)
if len(value_expression) > 1:
expanded = self._slow_expand_value_expression(name, value_expression, None)
value_expression = [sys.intern(expanded)]
self._values[name] = value_expression
return value_expression[0]
# _slow_expand_value_expression()
#
# Slow, iterative path for value expression expansion with full error reporting
#
# Note that either `varname` or `node` must be provided, these are used to
# identify the provenance of this value expression (which might be the value
# of a variable, or a value expression found elswhere in project YAML which
# needs to be substituted).
#
# Args:
# varname (str|None): The variable name associated with this value expression, if any
# value_expression (list): The parsed value expression to be expanded
# provenance (Provenance): The provenance who is asking for an expansion
#
# Returns:
# (str): The expanded value expression
#
# Raises:
# (LoadError): In the case of an undefined variable or
# a cyclic variable reference
#
def _slow_expand_value_expression(self, varname, value_expression, provenance):
idx = 0
resolved_value = None
# We will collect the varnames and value expressions which need
# to be resolved in the loop, sorted by dependency, and then
# finally reverse through them resolving them one at a time
#
resolved_varnames = []
resolved_values = []
step = ResolutionStep(varname, value_expression, None)
while step:
# Keep a hold of the current overall step
this_step = step
step = step.prev
# Check for circular dependencies
this_step.check_circular(self._original)
for idx, value in enumerate(this_step.value_expression):
# Skip literal parts of the value expression
if (idx % 2) == 0:
continue
iter_value_expression = self._get_checked_value_expression(value, this_step.referee, provenance)
# Queue up this value.
#
# Even if the value was already resolved, we need it in context to resolve
# previously enqueued variables
resolved_values.append(iter_value_expression)
resolved_varnames.append(value)
# Queue up the values dependencies.
#
if len(iter_value_expression) > 1:
new_step = ResolutionStep(value, iter_value_expression, this_step)
# Link it to the end of the stack
new_step.prev = step
step = new_step
# We've now constructed the dependencies queue such that
# later dependencies are on the right, we can now safely peddle
# backwards and the last (leftmost) resolved value is the one
# we want to return.
#
for iter_value_expression, resolved_varname in zip(reversed(resolved_values), reversed(resolved_varnames)):
# Resolve as needed
#
if len(iter_value_expression) > 1:
resolved_value = self._resolve_value_expression(iter_value_expression)
iter_value_expression = [resolved_value]
if resolved_varname is not None:
self._values[resolved_varname] = iter_value_expression
return resolved_value
# _get_checked_value_expression()
#
# Fetches a value expression from the value table and raises a user
# facing error if the value is undefined.
#
# Args:
# varname (str): The variable name to fetch
# referee (str): The variable name referring to `varname`, or None
# provenance (Provenance): The provenance for which we need to resolve `name`
#
# Returns:
# (list): The value expression for varname
#
# Raises:
# (LoadError): An appropriate error in case of undefined variables
#
def _get_checked_value_expression(self, varname, referee=None, provenance=None):
#
# Fetch the value and detect undefined references
#
try:
return self._values[varname]
except KeyError as e:
# Either the provenance is the toplevel calling provenance,
# or it is the provenance of the direct referee
if referee:
p = _yaml.node_get_provenance(self._original, referee)
else:
p = provenance
error_message = "Reference to undefined variable '{}'".format(varname)
if p:
error_message = "{}: {}".format(p, error_message)
raise LoadError(LoadErrorReason.UNRESOLVED_VARIABLE, error_message) from e
# _resolve_value_expression()
#
# Resolves a value expression with the expectation that all
# variables within this value expression have already been
# resolved and updated in the Variables._values table.
#
# This is used as a part of the iterative resolution codepath,
# where value expressions are first sorted by dependency before
# being resolved in one go.
#
# Args:
# value_expression (list): The value expression to resolve
#
# Returns:
# (str): The resolved value expression
#
def _resolve_value_expression(self, value_expression):
acc = []
for idx, value in enumerate(value_expression):
if (idx % 2) == 0:
acc.append(value)
else:
acc.append(self._values[value][0])
return "".join(acc)
# ResolutionStep()
#
# The context for a single iteration in variable resolution.
#
# Args:
# referee (str): The name of the referring variable
# value_expression (list): The parsed value expression to be expanded
# parent (ResolutionStep): The parent ResolutionStep
#
class ResolutionStep:
def __init__(self, referee, value_expression, parent):
self.referee = referee
self.value_expression = value_expression
self.parent = parent
self.prev = None
# check_circular()
#
# Check for circular references in this step.
#
# Args:
# original_values (MappingNode): The original MappingNode for the Variables
#
# Raises:
# (LoadError): Will raise a user facing LoadError with
# LoadErrorReason.CIRCULAR_REFERENCE_VARIABLE in case
# circular references were encountered.
#
def check_circular(self, original_values):
step = self.parent
while step:
if self.referee is step.referee:
self._raise_circular_reference_error(step, original_values)
step = step.parent
# _raise_circular_reference_error()
#
# Helper function to construct a full report and raise the LoadError
# with LoadErrorReason.CIRCULAR_REFERENCE_VARIABLE.
#
# Args:
# conflict (ResolutionStep): The resolution step which conflicts with this step
# original_values (MappingNode): The original node to extract provenances from
#
# Raises:
# (LoadError): Unconditionally
#
def _raise_circular_reference_error(self, conflict, original_values):
error_lines = []
step = self
while step is not conflict:
if step.parent:
referee = step.parent.referee
else:
referee = self.referee
provenance = _yaml.node_get_provenance(original_values, referee)
error_lines.append("{}: Variable '{}' refers to variable '{}'".format(provenance, referee, step.referee))
step = step.parent
raise LoadError(LoadErrorReason.CIRCULAR_REFERENCE_VARIABLE,
"Circular dependency detected on variable '{}'".format(self.referee),
detail="\n".join(reversed(error_lines)))
# _parse_value_expression()
#
# Tries to fetch the parsed value expression from the cache, parsing and
# caching value expressions on demand and returns the parsed value expression.
#
# Args:
# value_expression (str): The value expression in string form to parse
#
# Returns:
# (list): The parsed value expression in list form.
#
def _parse_value_expression(value_expression):
try:
return VALUE_EXPRESSION_CACHE[value_expression]
except KeyError:
# This use of the regex turns a string like "foo %{bar} baz" into
# a list ["foo ", "bar", " baz"]
#
# The result is a parsed value expression, where even indicies
# contain literal parts of the value and odd indices contain
# variable names which need to be replaced by resolved variables.
#
splits = VALUE_EXPRESSION_REGEX.split(value_expression)
# Optimize later routines by discarding any unnecessary trailing
# empty strings.
#
if splits[-1] == '':
del splits[-1]
# We intern the string parts to try and reduce the memory impact
# of the cache.
#
ret = [sys.intern(s) for s in splits]
# Cache and return the value expression
#
VALUE_EXPRESSION_CACHE[value_expression] = ret
return ret
# Iterator for all flatten variables.
# Used by Variables.__iter__
class _VariablesIterator:
def __init__(self, variables):
self._variables = variables
self._iter = iter(variables._values)
def __iter__(self):
return self
def __next__(self):
name = next(self._iter)
return name, self._variables._expand_var(name)
buildstream-1.6.9/buildstream/_version.py 0000664 0000000 0000000 00000044247 14375152700 0020572 0 ustar 00root root 0000000 0000000 # pylint: skip-file
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = " (tag: 1.6.9, bst-1)"
git_full = "4abd1f3e1b5e5d128bc24e45ec9a37d61723be87"
git_date = "2023-02-21 23:31:28 +0900"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.tag_regex = "*.*.*"
cfg.parentdir_prefix = "BuildStream-"
cfg.versionfile_source = "buildstream/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, tag_regex, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s%s" % (tag_prefix, tag_regex)],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, cfg.tag_regex, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
buildstream-1.6.9/buildstream/_versions.py 0000664 0000000 0000000 00000002417 14375152700 0020746 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# The base BuildStream format version
#
# This version is bumped whenever enhancements are made
# to the `project.conf` format or the core element format.
#
BST_FORMAT_VERSION = 18
# The base BuildStream artifact version
#
# The artifact version changes whenever the cache key
# calculation algorithm changes in an incompatible way
# or if buildstream was changed in a way which can cause
# the same cache key to produce something that is no longer
# the same.
BST_CORE_ARTIFACT_VERSION = ('bst-1.2', 5)
buildstream-1.6.9/buildstream/_workspaces.py 0000664 0000000 0000000 00000033016 14375152700 0021256 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
import os
from . import utils
from . import _yaml
from ._exceptions import LoadError, LoadErrorReason
BST_WORKSPACE_FORMAT_VERSION = 3
# Workspace()
#
# An object to contain various helper functions and data required for
# workspaces.
#
# last_successful, path and running_files are intended to be public
# properties, but may be best accessed using this classes' helper
# methods.
#
# Args:
# toplevel_project (Project): Top project. Will be used for resolving relative workspace paths.
# path (str): The path that should host this workspace
# last_successful (str): The key of the last successful build of this workspace
# running_files (dict): A dict mapping dependency elements to files
# changed between failed builds. Should be
# made obsolete with failed build artifacts.
#
class Workspace():
def __init__(self, toplevel_project, *, last_successful=None, path=None, prepared=False, running_files=None):
self.prepared = prepared
self.last_successful = last_successful
self._path = path
self.running_files = running_files if running_files is not None else {}
self._toplevel_project = toplevel_project
self._key = None
# to_dict()
#
# Convert a list of members which get serialized to a dict for serialization purposes
#
# Returns:
# (dict) A dict representation of the workspace
#
def to_dict(self):
ret = {
'prepared': self.prepared,
'path': self._path,
'running_files': self.running_files
}
if self.last_successful is not None:
ret["last_successful"] = self.last_successful
return ret
# from_dict():
#
# Loads a new workspace from a simple dictionary, the dictionary
# is expected to be generated from Workspace.to_dict(), or manually
# when loading from a YAML file.
#
# Args:
# toplevel_project (Project): Top project. Will be used for resolving relative workspace paths.
# dictionary: A simple dictionary object
#
# Returns:
# (Workspace): A newly instantiated Workspace
#
@classmethod
def from_dict(cls, toplevel_project, dictionary):
# Just pass the dictionary as kwargs
return cls(toplevel_project, **dictionary)
# differs()
#
# Checks if two workspaces are different in any way.
#
# Args:
# other (Workspace): Another workspace instance
#
# Returns:
# True if the workspace differs from 'other', otherwise False
#
def differs(self, other):
return self.to_dict() != other.to_dict()
# invalidate_key()
#
# Invalidate the workspace key, forcing a recalculation next time
# it is accessed.
#
def invalidate_key(self):
self._key = None
# stage()
#
# Stage the workspace to the given directory.
#
# Args:
# directory (str) - The directory into which to stage this workspace
#
def stage(self, directory):
fullpath = self.get_absolute_path()
if os.path.isdir(fullpath):
utils.copy_files(fullpath, directory)
else:
destfile = os.path.join(directory, os.path.basename(self.get_absolute_path()))
utils.safe_copy(fullpath, destfile)
# add_running_files()
#
# Append a list of files to the running_files for the given
# dependency. Duplicate files will be ignored.
#
# Args:
# dep_name (str) - The dependency name whose files to append to
# files (str) - A list of files to append
#
def add_running_files(self, dep_name, files):
if dep_name in self.running_files:
# ruamel.py cannot serialize sets in python3.4
to_add = set(files) - set(self.running_files[dep_name])
self.running_files[dep_name].extend(to_add)
else:
self.running_files[dep_name] = list(files)
# clear_running_files()
#
# Clear all running files associated with this workspace.
#
def clear_running_files(self):
self.running_files = {}
# get_key()
#
# Get a unique key for this workspace.
#
# Args:
# recalculate (bool) - Whether to recalculate the key
#
# Returns:
# (str) A unique key for this workspace
#
def get_key(self, recalculate=False):
def unique_key(filename):
try:
stat = os.lstat(filename)
except OSError as e:
raise LoadError(LoadErrorReason.MISSING_FILE,
"Failed to stat file in workspace: {}".format(e)) from e
# Use the mtime of any file with sub second precision
return stat.st_mtime_ns
if recalculate or self._key is None:
fullpath = self.get_absolute_path()
# Get a list of tuples of the the project relative paths and fullpaths
if os.path.isdir(fullpath):
filelist = utils.list_relative_paths(fullpath)
filelist = [(relpath, os.path.join(fullpath, relpath)) for relpath in filelist]
else:
filelist = [(self.get_absolute_path(), fullpath)]
self._key = [(relpath, unique_key(fullpath)) for relpath, fullpath in filelist]
return self._key
# get_absolute_path():
#
# Returns: The absolute path of the element's workspace.
#
def get_absolute_path(self):
return os.path.join(self._toplevel_project.directory, self._path)
# Workspaces()
#
# A class to manage Workspaces for multiple elements.
#
# Args:
# toplevel_project (Project): Top project used to resolve paths.
#
class Workspaces():
def __init__(self, toplevel_project):
self._toplevel_project = toplevel_project
self._bst_directory = os.path.join(toplevel_project.directory, ".bst")
self._workspaces = self._load_config()
# list()
#
# Generator function to enumerate workspaces.
#
# Yields:
# A tuple in the following format: (str, Workspace), where the
# first element is the name of the workspaced element.
def list(self):
for element, _ in _yaml.node_items(self._workspaces):
yield (element, self._workspaces[element])
# create_workspace()
#
# Create a workspace in the given path for the given element.
#
# Args:
# element_name (str) - The element name to create a workspace for
# path (str) - The path in which the workspace should be kept
#
def create_workspace(self, element_name, path):
if path.startswith(self._toplevel_project.directory):
path = os.path.relpath(path, self._toplevel_project.directory)
self._workspaces[element_name] = Workspace(self._toplevel_project, path=path)
return self._workspaces[element_name]
# get_workspace()
#
# Get the path of the workspace source associated with the given
# element's source at the given index
#
# Args:
# element_name (str) - The element name whose workspace to return
#
# Returns:
# (None|Workspace)
#
def get_workspace(self, element_name):
if element_name not in self._workspaces:
return None
return self._workspaces[element_name]
# update_workspace()
#
# Update the datamodel with a new Workspace instance
#
# Args:
# element_name (str): The name of the element to update a workspace for
# workspace_dict (Workspace): A serialized workspace dictionary
#
# Returns:
# (bool): Whether the workspace has changed as a result
#
def update_workspace(self, element_name, workspace_dict):
assert element_name in self._workspaces
workspace = Workspace.from_dict(self._toplevel_project, workspace_dict)
if self._workspaces[element_name].differs(workspace):
self._workspaces[element_name] = workspace
return True
return False
# delete_workspace()
#
# Remove the workspace from the workspace element. Note that this
# does *not* remove the workspace from the stored yaml
# configuration, call save_config() afterwards.
#
# Args:
# element_name (str) - The element name whose workspace to delete
#
def delete_workspace(self, element_name):
del self._workspaces[element_name]
# save_config()
#
# Dump the current workspace element to the project configuration
# file. This makes any changes performed with delete_workspace or
# create_workspace permanent
#
def save_config(self):
assert utils._is_main_process()
config = {
'format-version': BST_WORKSPACE_FORMAT_VERSION,
'workspaces': {
element: workspace.to_dict()
for element, workspace in _yaml.node_items(self._workspaces)
}
}
os.makedirs(self._bst_directory, exist_ok=True)
_yaml.dump(_yaml.node_sanitize(config),
self._get_filename())
# _load_config()
#
# Loads and parses the workspace configuration
#
# Returns:
# (dict) The extracted workspaces
#
# Raises: LoadError if there was a problem with the workspace config
#
def _load_config(self):
workspace_file = self._get_filename()
try:
node = _yaml.load(workspace_file)
except LoadError as e:
if e.reason == LoadErrorReason.MISSING_FILE:
# Return an empty dict if there was no workspace file
return {}
raise
return self._parse_workspace_config(node)
# _parse_workspace_config_format()
#
# If workspace config is in old-style format, i.e. it is using
# source-specific workspaces, try to convert it to element-specific
# workspaces.
#
# Args:
# workspaces (dict): current workspace config, usually output of _load_workspace_config()
#
# Returns:
# (dict) The extracted workspaces
#
# Raises: LoadError if there was a problem with the workspace config
#
def _parse_workspace_config(self, workspaces):
version = _yaml.node_get(workspaces, int, "format-version", default_value=0)
if version == 0:
# Pre-versioning format can be of two forms
for element, config in _yaml.node_items(workspaces):
if isinstance(config, str):
pass
elif isinstance(config, dict):
sources = list(_yaml.node_items(config))
if len(sources) > 1:
detail = "There are multiple workspaces open for '{}'.\n" + \
"This is not supported anymore.\n" + \
"Please remove this element from '{}'."
raise LoadError(LoadErrorReason.INVALID_DATA,
detail.format(element, self._get_filename()))
workspaces[element] = sources[0][1]
else:
raise LoadError(LoadErrorReason.INVALID_DATA,
"Workspace config is in unexpected format.")
res = {
element: Workspace(self._toplevel_project, path=config)
for element, config in _yaml.node_items(workspaces)
}
elif version >= 1 and version <= BST_WORKSPACE_FORMAT_VERSION:
workspaces = _yaml.node_get(workspaces, dict, "workspaces", default_value={})
res = {element: self._load_workspace(node)
for element, node in _yaml.node_items(workspaces)}
else:
raise LoadError(LoadErrorReason.INVALID_DATA,
"Workspace configuration format version {} not supported."
"Your version of buildstream may be too old. Max supported version: {}"
.format(version, BST_WORKSPACE_FORMAT_VERSION))
return res
# _load_workspace():
#
# Loads a new workspace from a YAML node
#
# Args:
# node: A YAML Node
#
# Returns:
# (Workspace): A newly instantiated Workspace
#
def _load_workspace(self, node):
dictionary = {
'prepared': _yaml.node_get(node, bool, 'prepared', default_value=False),
'path': _yaml.node_get(node, str, 'path'),
'last_successful': _yaml.node_get(node, str, 'last_successful', default_value=None),
'running_files': _yaml.node_get(node, dict, 'running_files', default_value=None),
}
return Workspace.from_dict(self._toplevel_project, dictionary)
# _get_filename():
#
# Get the workspaces.yml file path.
#
# Returns:
# (str): The path to workspaces.yml file.
def _get_filename(self):
return os.path.join(self._bst_directory, "workspaces.yml")
buildstream-1.6.9/buildstream/_yaml.py 0000664 0000000 0000000 00000122322 14375152700 0020036 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import sys
import collections
import string
from io import StringIO
from copy import deepcopy
from contextlib import ExitStack
from pathlib import Path
from ruamel import yaml
from ruamel.yaml.representer import SafeRepresenter, RoundTripRepresenter
from ruamel.yaml.constructor import RoundTripConstructor
from ._exceptions import LoadError, LoadErrorReason
# SanitizedDict is an OrderedDict that is dumped as unordered mapping.
# This provides deterministic output for unordered mappings.
#
class SanitizedDict(collections.OrderedDict):
pass
# This overrides the ruamel constructor to treat everything as a string
RoundTripConstructor.add_constructor('tag:yaml.org,2002:int', RoundTripConstructor.construct_yaml_str)
RoundTripConstructor.add_constructor('tag:yaml.org,2002:float', RoundTripConstructor.construct_yaml_str)
RoundTripConstructor.add_constructor('tag:yaml.org,2002:null', RoundTripConstructor.construct_yaml_str)
# Represent simple types as strings
def represent_as_str(self, value):
return self.represent_str(str(value))
RoundTripRepresenter.add_representer(SanitizedDict, SafeRepresenter.represent_dict)
RoundTripRepresenter.add_representer(type(None), represent_as_str)
RoundTripRepresenter.add_representer(int, represent_as_str)
RoundTripRepresenter.add_representer(float, represent_as_str)
# We store information in the loaded yaml on a DictProvenance
# stored in all dictionaries under this key
PROVENANCE_KEY = '__bst_provenance_info'
# Provides information about file for provenance
#
# Args:
# name (str): Full path to the file
# shortname (str): Relative path to the file
# project (Project): Project where the shortname is relative from
class ProvenanceFile():
def __init__(self, name, shortname, project):
self.name = name
self.shortname = shortname
self.project = project
# Provenance tracks the origin of a given node in the parsed dictionary.
#
# Args:
# node (dict, list, value): A binding to the originally parsed value
# filename (string): The filename the node was loaded from
# toplevel (dict): The toplevel of the loaded file, suitable for later dumps
# line (int): The line number where node was parsed
# col (int): The column number where node was parsed
#
class Provenance():
def __init__(self, filename, node, toplevel, line=0, col=0):
self.filename = filename
self.node = node
self.toplevel = toplevel
self.line = line
self.col = col
# Convert a Provenance to a string for error reporting
def __str__(self):
filename = self.filename.shortname
if self.filename.project and self.filename.project.junction:
filename = "{}:{}".format(self.filename.project.junction.name, self.filename.shortname)
return "{} [line {:d} column {:d}]".format(filename, self.line, self.col)
# Abstract method
def clone(self):
pass # pragma: nocover
# A Provenance for dictionaries, these are stored in the copy of the
# loaded YAML tree and track the provenance of all members
#
class DictProvenance(Provenance):
def __init__(self, filename, node, toplevel, line=None, col=None):
if line is None or col is None:
# Special case for loading an empty dict
if hasattr(node, 'lc'):
line = node.lc.line + 1
col = node.lc.col
else:
line = 1
col = 0
super().__init__(filename, node, toplevel, line=line, col=col)
self.members = {}
def clone(self):
provenance = DictProvenance(self.filename, self.node, self.toplevel,
line=self.line, col=self.col)
provenance.members = {
member_name: member.clone()
for member_name, member in self.members.items()
}
return provenance
# A Provenance for dict members
#
class MemberProvenance(Provenance):
def __init__(self, filename, parent_dict, member_name, toplevel,
node=None, line=None, col=None):
if parent_dict is not None:
node = parent_dict[member_name]
line, col = parent_dict.lc.value(member_name)
line += 1
super().__init__(filename, node, toplevel, line=line, col=col)
# Only used if member is a list
self.elements = []
def clone(self):
provenance = MemberProvenance(self.filename, None, None, self.toplevel,
node=self.node, line=self.line, col=self.col)
provenance.elements = [e.clone() for e in self.elements]
return provenance
# A Provenance for list elements
#
class ElementProvenance(Provenance):
def __init__(self, filename, parent_list, index, toplevel,
node=None, line=None, col=None):
if parent_list is not None:
node = parent_list[index]
line, col = parent_list.lc.item(index)
line += 1
super().__init__(filename, node, toplevel, line=line, col=col)
# Only used if element is a list
self.elements = []
def clone(self):
provenance = ElementProvenance(self.filename, None, None, self.toplevel,
node=self.node, line=self.line, col=self.col)
provenance.elements = [e.clone for e in self.elements]
return provenance
# These exceptions are intended to be caught entirely within
# the BuildStream framework, hence they do not reside in the
# public exceptions.py
class CompositeError(Exception):
def __init__(self, path, message):
super().__init__(message)
self.path = path
class CompositeTypeError(CompositeError):
def __init__(self, path, expected_type, actual_type):
super().__init__(
path,
"Error compositing dictionary key '{}', expected source type '{}' "
"but received type '{}'"
.format(path, expected_type.__name__, actual_type.__name__))
self.expected_type = expected_type
self.actual_type = actual_type
# Loads a dictionary from some YAML
#
# Args:
# filename (str): The YAML file to load
# shortname (str): The filename in shorthand for error reporting (or None)
# copy_tree (bool): Whether to make a copy, preserving the original toplevels
# for later serialization
#
# Returns (dict): A loaded copy of the YAML file with provenance information
#
# Raises: LoadError
#
def load(filename, shortname=None, copy_tree=False, *, project=None):
if not shortname:
shortname = filename
file = ProvenanceFile(filename, shortname, project)
try:
with open(filename, encoding="utf-8") as f:
return load_data(f, file, copy_tree=copy_tree)
except FileNotFoundError as e:
raise LoadError(LoadErrorReason.MISSING_FILE,
"Could not find file at {}".format(filename)) from e
except IsADirectoryError as e:
raise LoadError(LoadErrorReason.LOADING_DIRECTORY,
"{} is a directory. bst command expects a .bst file."
.format(filename)) from e
# A function to get the roundtrip yaml handle
#
# Args:
# write (bool): Whether we intend to write
#
def prepare_roundtrip_yaml(write=False):
yml = yaml.YAML()
yml.preserve_quotes=True
# For each of YAML 1.1 and 1.2, force everything to be a plain string
for version in [(1, 1), (1, 2), None]:
yml.resolver.add_version_implicit_resolver(
version,
'tag:yaml.org,2002:str',
yaml.util.RegExp(r'.*'),
None)
# When writing, we want to represent boolean as strings
if write:
yml.representer.add_representer(bool, represent_as_str)
return yml
# Like load(), but doesnt require the data to be in a file
#
def load_data(data, file=None, copy_tree=False):
yml = prepare_roundtrip_yaml()
try:
contents = yml.load(data)
except (yaml.scanner.ScannerError, yaml.composer.ComposerError, yaml.parser.ParserError) as e:
raise LoadError(LoadErrorReason.INVALID_YAML,
"Malformed YAML:\n\n{}\n\n{}\n".format(e.problem, e.problem_mark)) from e
if not isinstance(contents, dict):
# Special case allowance for None, when the loaded file has only comments in it.
if contents is None:
contents = {}
else:
raise LoadError(LoadErrorReason.INVALID_YAML,
"YAML file has content of type '{}' instead of expected type 'dict': {}"
.format(type(contents).__name__, file.name))
return node_decorated_copy(file, contents, copy_tree=copy_tree)
# Dumps a previously loaded YAML node to a file handle
#
def dump_file_handle(node, fh):
yml = prepare_roundtrip_yaml(write=True)
yml.dump(node, fh)
# Dumps a previously loaded YAML node to a file
#
# Args:
# node (dict): A node previously loaded with _yaml.load() above
#
# Returns:
# (str): The generated string
#
def dump_string(node):
with StringIO() as f:
dump_file_handle(node, f)
return f.getvalue()
# Dumps a previously loaded YAML node to a file
#
# Args:
# node (dict): A node previously loaded with _yaml.load() above
# filename (str): The YAML file to load
#
def dump(node, filename=None):
with ExitStack() as stack:
if filename:
from . import utils # pylint: disable=import-outside-toplevel
f = stack.enter_context(utils.save_file_atomic(filename, 'w'))
else:
f = sys.stdout
dump_file_handle(node, f)
# node_decorated_copy()
#
# Create a copy of a loaded dict tree decorated with Provenance
# information, used directly after loading yaml
#
# Args:
# filename (str): The filename
# toplevel (node): The toplevel dictionary node
# copy_tree (bool): Whether to load a copy and preserve the original
#
# Returns: A copy of the toplevel decorated with Provinance
#
def node_decorated_copy(filename, toplevel, copy_tree=False):
if copy_tree:
result = deepcopy(toplevel)
else:
result = toplevel
node_decorate_dict(filename, result, toplevel, toplevel)
return result
def node_decorate_dict(filename, target, source, toplevel):
provenance = DictProvenance(filename, source, toplevel)
target[PROVENANCE_KEY] = provenance
for key, value in node_items(source):
member = MemberProvenance(filename, source, key, toplevel)
provenance.members[key] = member
target_value = target.get(key)
if isinstance(value, collections.abc.Mapping):
node_decorate_dict(filename, target_value, value, toplevel)
elif isinstance(value, list):
member.elements = node_decorate_list(filename, target_value, value, toplevel)
def node_decorate_list(filename, target, source, toplevel):
elements = []
for item in source:
idx = source.index(item)
target_item = target[idx]
element = ElementProvenance(filename, source, idx, toplevel)
if isinstance(item, collections.abc.Mapping):
node_decorate_dict(filename, target_item, item, toplevel)
elif isinstance(item, list):
element.elements = node_decorate_list(filename, target_item, item, toplevel)
elements.append(element)
return elements
# node_get_provenance()
#
# Gets the provenance for a node
#
# Args:
# node (dict): a dictionary
# key (str): key in the dictionary
# indices (list of indexes): Index path, in the case of list values
#
# Returns: The Provenance of the dict, member or list element
#
def node_get_provenance(node, key=None, indices=None):
provenance = node.get(PROVENANCE_KEY)
if provenance and key:
provenance = provenance.members.get(key)
if provenance and indices is not None:
for index in indices:
provenance = provenance.elements[index]
return provenance
# Helper to use utils.sentinel without unconditional utils import,
# which causes issues for completion.
#
# Local private, but defined here because sphinx appears to break if
# it's not defined before any functions calling it in default kwarg
# values.
#
def _get_sentinel():
from .utils import _sentinel # pylint: disable=import-outside-toplevel
return _sentinel
# node_get()
#
# Fetches a value from a dictionary node and checks it for
# an expected value. Use default_value when parsing a value
# which is only optionally supplied.
#
# Args:
# node (dict): The dictionary node
# expected_type (type): The expected type for the value being searched
# key (str): The key to get a value for in node
# indices (list of ints): Optionally decend into lists of lists
#
# Returns:
# The value if found in node, otherwise default_value is returned
#
# Raises:
# LoadError, when the value found is not of the expected type
#
# Note:
# Returned strings are stripped of leading and trailing whitespace
#
def node_get(node, expected_type, key, indices=None, default_value=_get_sentinel()):
value = node.get(key, default_value)
provenance = node_get_provenance(node)
if value is _get_sentinel():
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Dictionary did not contain expected key '{}'".format(provenance, key))
path = key
if indices is not None:
# Implied type check of the element itself
value = node_get(node, list, key)
for index in indices:
value = value[index]
path += '[{:d}]'.format(index)
# We want to allow None as a valid value for any type
if value is None:
return None
if not isinstance(value, expected_type):
# Attempt basic conversions if possible, typically we want to
# be able to specify numeric values and convert them to strings,
# but we dont want to try converting dicts/lists
try:
if (expected_type == bool and isinstance(value, str)):
# Dont coerce booleans to string, this makes "False" strings evaluate to True
if value in ('true', 'True'):
value = True
elif value in ('false', 'False'):
value = False
else:
raise ValueError()
elif not (expected_type == list or
expected_type == dict or
isinstance(value, (list, dict))):
value = expected_type(value)
else:
raise ValueError()
except (ValueError, TypeError) as e:
provenance = node_get_provenance(node, key=key, indices=indices)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Value of '{}' is not of the expected type '{}'"
.format(provenance, path, expected_type.__name__)) from e
# Trim it at the bud, let all loaded strings from yaml be stripped of whitespace
if isinstance(value, str):
value = value.strip()
return value
# node_get_project_path()
#
# Fetches a project path from a dictionary node and validates it
#
# Paths are asserted to never lead to a directory outside of the project
# directory. In addition, paths can not point to symbolic links, fifos,
# sockets and block/character devices.
#
# The `check_is_file` and `check_is_dir` parameters can be used to
# perform additional validations on the path. Note that an exception
# will always be raised if both parameters are set to ``True``.
#
# Args:
# node (dict): A dictionary loaded from YAML
# key (str): The key whose value contains a path to validate
# project_dir (str): The project directory
# check_is_file (bool): If ``True`` an error will also be raised
# if path does not point to a regular file.
# Defaults to ``False``
# check_is_dir (bool): If ``True`` an error will be also raised
# if path does not point to a directory.
# Defaults to ``False``
# Returns:
# (str): The project path
#
# Raises:
# (LoadError): In case that the project path is not valid or does not
# exist
#
def node_get_project_path(node, key, project_dir, *,
check_is_file=False, check_is_dir=False):
path_str = node_get(node, str, key)
path = Path(path_str)
project_dir_path = Path(project_dir)
provenance = node_get_provenance(node, key=key)
if (project_dir_path / path).is_symlink():
raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
"{}: Specified path '{}' must not point to "
"symbolic links "
.format(provenance, path_str))
if path.parts and path.parts[0] == '..':
raise LoadError(LoadErrorReason.PROJ_PATH_INVALID,
"{}: Specified path '{}' first component must "
"not be '..'"
.format(provenance, path_str))
try:
if sys.version_info[0] == 3 and sys.version_info[1] < 6:
full_resolved_path = (project_dir_path / path).resolve()
else:
full_resolved_path = (project_dir_path / path).resolve(strict=True)
except FileNotFoundError as e:
raise LoadError(LoadErrorReason.MISSING_FILE,
"{}: Specified path '{}' does not exist"
.format(provenance, path_str)) from e
is_inside = project_dir_path.resolve() in full_resolved_path.parents or (
full_resolved_path == project_dir_path)
if path.is_absolute() or not is_inside:
raise LoadError(LoadErrorReason.PROJ_PATH_INVALID,
"{}: Specified path '{}' must not lead outside of the "
"project directory"
.format(provenance, path_str))
if full_resolved_path.is_socket() or (
full_resolved_path.is_fifo() or
full_resolved_path.is_block_device()):
raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
"{}: Specified path '{}' points to an unsupported "
"file kind"
.format(provenance, path_str))
if check_is_file and not full_resolved_path.is_file():
raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
"{}: Specified path '{}' is not a regular file"
.format(provenance, path_str))
if check_is_dir and not full_resolved_path.is_dir():
raise LoadError(LoadErrorReason.PROJ_PATH_INVALID_KIND,
"{}: Specified path '{}' is not a directory"
.format(provenance, path_str))
return path_str
# node_items()
#
# A convenience generator for iterating over loaded key/value
# tuples in a dictionary loaded from project YAML.
#
# Args:
# node (dict): The dictionary node
#
# Yields:
# (str): The key name
# (anything): The value for the key
#
def node_items(node):
for key, value in node.items():
if key == PROVENANCE_KEY:
continue
yield (key, value)
# Gives a node a dummy provenance, in case of compositing dictionaries
# where the target is an empty {}
def ensure_provenance(node):
provenance = node.get(PROVENANCE_KEY)
if not provenance:
provenance = DictProvenance(ProvenanceFile('', '', None), node, node)
node[PROVENANCE_KEY] = provenance
return provenance
# is_ruamel_str():
#
# Args:
# value: A value loaded from ruamel
#
# This returns if the value is "stringish", since ruamel
# has some complex types to represent strings, this is needed
# to avoid compositing exceptions in order to allow various
# string types to be interchangable and acceptable
#
def is_ruamel_str(value):
if isinstance(value, str):
return True
elif isinstance(value, yaml.scalarstring.ScalarString):
return True
return False
# is_composite_list
#
# Checks if the given node is a Mapping with array composition
# directives.
#
# Args:
# node (value): Any node
#
# Returns:
# (bool): True if node was a Mapping containing only
# list composition directives
#
# Raises:
# (LoadError): If node was a mapping and contained a mix of
# list composition directives and other keys
#
def is_composite_list(node):
if isinstance(node, collections.abc.Mapping):
has_directives = False
has_keys = False
for key, _ in node_items(node):
if key in ['(>)', '(<)', '(=)']: # pylint: disable=simplifiable-if-statement
has_directives = True
else:
has_keys = True
if has_keys and has_directives:
provenance = node_get_provenance(node)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Dictionary contains array composition directives and arbitrary keys"
.format(provenance))
return has_directives
return False
# composite_list_prepend
#
# Internal helper for list composition
#
# Args:
# target_node (dict): A simple dictionary
# target_key (dict): The key indicating a literal array to prepend to
# source_node (dict): Another simple dictionary
# source_key (str): The key indicating an array to prepend to the target
#
# Returns:
# (bool): True if a source list was found and compositing occurred
#
def composite_list_prepend(target_node, target_key, source_node, source_key):
source_list = node_get(source_node, list, source_key, default_value=[])
if not source_list:
return False
target_provenance = node_get_provenance(target_node)
source_provenance = node_get_provenance(source_node)
if target_node.get(target_key) is None:
target_node[target_key] = []
source_list = list_chain_copy(source_list)
target_list = target_node[target_key]
for element in reversed(source_list):
target_list.insert(0, element)
if not target_provenance.members.get(target_key):
target_provenance.members[target_key] = source_provenance.members[source_key].clone()
else:
for p in reversed(source_provenance.members[source_key].elements):
target_provenance.members[target_key].elements.insert(0, p.clone())
return True
# composite_list_append
#
# Internal helper for list composition
#
# Args:
# target_node (dict): A simple dictionary
# target_key (dict): The key indicating a literal array to append to
# source_node (dict): Another simple dictionary
# source_key (str): The key indicating an array to append to the target
#
# Returns:
# (bool): True if a source list was found and compositing occurred
#
def composite_list_append(target_node, target_key, source_node, source_key):
source_list = node_get(source_node, list, source_key, default_value=[])
if not source_list:
return False
target_provenance = node_get_provenance(target_node)
source_provenance = node_get_provenance(source_node)
if target_node.get(target_key) is None:
target_node[target_key] = []
source_list = list_chain_copy(source_list)
target_list = target_node[target_key]
target_list.extend(source_list)
if not target_provenance.members.get(target_key):
target_provenance.members[target_key] = source_provenance.members[source_key].clone()
else:
target_provenance.members[target_key].elements.extend([
p.clone() for p in source_provenance.members[source_key].elements
])
return True
# composite_list_overwrite
#
# Internal helper for list composition
#
# Args:
# target_node (dict): A simple dictionary
# target_key (dict): The key indicating a literal array to overwrite
# source_node (dict): Another simple dictionary
# source_key (str): The key indicating an array to overwrite the target with
#
# Returns:
# (bool): True if a source list was found and compositing occurred
#
def composite_list_overwrite(target_node, target_key, source_node, source_key):
# We need to handle the legitimate case of overwriting a list with an empty
# list, hence the slightly odd default_value of [None] rather than [].
source_list = node_get(source_node, list, source_key, default_value=[None])
if source_list == [None]:
return False
target_provenance = node_get_provenance(target_node)
source_provenance = node_get_provenance(source_node)
target_node[target_key] = list_chain_copy(source_list)
target_provenance.members[target_key] = source_provenance.members[source_key].clone()
return True
# composite_list():
#
# Composite the source value onto the target value, if either
# sides are lists, or dictionaries containing list compositing directives
#
# Args:
# target_node (dict): A simple dictionary
# source_node (dict): Another simple dictionary
# key (str): The key to compose on
#
# Returns:
# (bool): True if both sides were logical lists
#
# Raises:
# (LoadError): If one side was a logical list and the other was not
#
def composite_list(target_node, source_node, key):
target_value = target_node.get(key)
source_value = source_node[key]
target_key_provenance = node_get_provenance(target_node, key)
source_key_provenance = node_get_provenance(source_node, key)
# Whenever a literal list is encountered in the source, it
# overwrites the target values and provenance completely.
#
if isinstance(source_value, list):
source_provenance = node_get_provenance(source_node)
target_provenance = node_get_provenance(target_node)
# Assert target type
if not (target_value is None or
isinstance(target_value, list) or
is_composite_list(target_value)):
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: List cannot overwrite value at: {}"
.format(source_key_provenance, target_key_provenance))
# Special case: The `project.conf` in some cases needs to composite
# include files before having resolved options, so there can be
# conditionals that need to be merged at this point.
#
# This unconditionally appends conditional statements to a matching
# conditional in the target so as to preserve them. The precedence
# of include files is preserved regardless due to the order in which
# included dictionaries are composited.
#
if key == "(?)":
composite_list_append(target_node, key, source_node, key)
else:
composite_list_overwrite(target_node, key, source_node, key)
return True
# When a composite list is encountered in the source, then
# multiple outcomes can occur...
#
elif is_composite_list(source_value):
# If there is nothing there, then the composite list
# is copied in it's entirety as is, and preserved
# for later composition
#
if target_value is None:
source_provenance = node_get_provenance(source_node)
target_provenance = node_get_provenance(target_node)
target_node[key] = node_chain_copy(source_value)
target_provenance.members[key] = source_provenance.members[key].clone()
# If the target is a literal list, then composition
# occurs directly onto that target, leaving the target
# as a literal list to overwrite anything in later composition
#
elif isinstance(target_value, list):
composite_list_overwrite(target_node, key, source_value, '(=)')
composite_list_prepend(target_node, key, source_value, '(<)')
composite_list_append(target_node, key, source_value, '(>)')
# If the target is a composite list, then composition
# occurs in the target composite list, and the composite
# target list is preserved in dictionary form for further
# composition.
#
elif is_composite_list(target_value):
if composite_list_overwrite(target_value, '(=)', source_value, '(=)'):
# When overwriting a target with composition directives, remove any
# existing prepend/append directives in the target before adding our own
target_provenance = node_get_provenance(target_value)
for directive in ['(<)', '(>)']:
try:
del target_value[directive]
del target_provenance.members[directive]
except KeyError:
# Ignore errors from deletion of non-existing keys
pass
# Prepend to the target prepend array, and append to the append array
composite_list_prepend(target_value, '(<)', source_value, '(<)')
composite_list_append(target_value, '(>)', source_value, '(>)')
else:
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: List cannot overwrite value at: {}"
.format(source_key_provenance, target_key_provenance))
# We handled list composition in some way
return True
# Source value was not a logical list
return False
# composite_dict():
#
# Composites values in target with values from source
#
# Args:
# target (dict): A simple dictionary
# source (dict): Another simple dictionary
#
# Raises: CompositeError
#
# Unlike the dictionary update() method, nested values in source
# will not obsolete entire subdictionaries in target, instead both
# dictionaries will be recursed and a composition of both will result
#
# This is useful for overriding configuration files and element
# configurations.
#
def composite_dict(target, source, path=None):
target_provenance = ensure_provenance(target)
source_provenance = ensure_provenance(source)
for key, source_value in node_items(source):
# Track the full path of keys, only for raising CompositeError
if path:
thispath = path + '.' + key
else:
thispath = key
# Handle list composition separately
if composite_list(target, source, key):
continue
target_value = target.get(key)
if isinstance(source_value, collections.abc.Mapping):
# Handle creating new dicts on target side
if target_value is None:
target_value = {}
target[key] = target_value
# Give the new dict provenance
value_provenance = source_value.get(PROVENANCE_KEY)
if value_provenance:
target_value[PROVENANCE_KEY] = value_provenance.clone()
# Add a new provenance member element to the containing dict
target_provenance.members[key] = source_provenance.members[key]
if not isinstance(target_value, collections.abc.Mapping):
raise CompositeTypeError(thispath, type(target_value), type(source_value))
# Recurse into matching dictionary
composite_dict(target_value, source_value, path=thispath)
else:
if target_value is not None:
# Exception here: depending on how strings were declared ruamel may
# use a different type, but for our purposes, any stringish type will do.
if not (is_ruamel_str(source_value) and is_ruamel_str(target_value)) \
and not isinstance(source_value, type(target_value)):
raise CompositeTypeError(thispath, type(target_value), type(source_value))
# Overwrite simple values, lists and mappings have already been handled
target_provenance.members[key] = source_provenance.members[key].clone()
target[key] = source_value
# Like composite_dict(), but raises an all purpose LoadError for convenience
#
def composite(target, source):
assert hasattr(source, 'get')
source_provenance = node_get_provenance(source)
try:
composite_dict(target, source)
except CompositeTypeError as e:
error_prefix = ""
if source_provenance:
error_prefix = "{}: ".format(source_provenance)
raise LoadError(LoadErrorReason.ILLEGAL_COMPOSITE,
"{}Expected '{}' type for configuration '{}', instead received '{}'"
.format(error_prefix,
e.expected_type.__name__,
e.path,
e.actual_type.__name__)) from e
# node_sanitize()
#
# Returnes an alphabetically ordered recursive copy
# of the source node with internal provenance information stripped.
#
# Only dicts are ordered, list elements are left in order.
#
def node_sanitize(node):
if isinstance(node, collections.abc.Mapping):
result = SanitizedDict()
key_list = [key for key, _ in node_items(node)]
for key in sorted(key_list):
result[key] = node_sanitize(node[key])
return result
elif isinstance(node, list):
return [node_sanitize(elt) for elt in node]
return node
# node_validate()
#
# Validate the node so as to ensure the user has not specified
# any keys which are unrecognized by buildstream (usually this
# means a typo which would otherwise not trigger an error).
#
# Args:
# node (dict): A dictionary loaded from YAML
# valid_keys (list): A list of valid keys for the specified node
#
# Raises:
# LoadError: In the case that the specified node contained
# one or more invalid keys
#
def node_validate(node, valid_keys):
# Probably the fastest way to do this: https://stackoverflow.com/a/23062482
valid_keys = set(valid_keys)
valid_keys.add(PROVENANCE_KEY)
invalid = next((key for key in node if key not in valid_keys), None)
if invalid:
provenance = node_get_provenance(node, key=invalid)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Unexpected key: {}".format(provenance, invalid))
# ChainMap
#
# This is a derivative of collections.ChainMap(), but supports
# explicit deletions of keys.
#
# The purpose of this is to create a virtual copy-on-write
# copy of a dictionary, so that mutating it in any way does
# not effect the underlying dictionaries.
#
# collections.ChainMap covers this already mostly, but fails
# to record internal state so as to hide keys which have been
# explicitly deleted.
#
class ChainMap(collections.ChainMap):
def __init__(self, *maps):
super().__init__(*maps)
self.__deletions = set()
def __getitem__(self, key):
# Honor deletion state of 'key'
if key in self.__deletions:
return self.__missing__(key)
return super().__getitem__(key)
def __len__(self):
return len(set().union(*self.maps) - self.__deletions)
def __iter__(self):
return iter(set().union(*self.maps) - self.__deletions)
def __contains__(self, key):
if key in self.__deletions:
return False
return any(key in m for m in self.maps)
def __bool__(self):
# Attempt to preserve 'any' optimization
any_keys = any(self.maps)
# Something existed, try again with deletions subtracted
if any_keys:
return any(set().union(*self.maps) - self.__deletions)
return False
def __setitem__(self, key, value):
self.__deletions.discard(key)
super().__setitem__(key, value)
def __delitem__(self, key):
if key in self.__deletions:
raise KeyError('Key was already deleted from this mapping: {!r}'.format(key))
# Ignore KeyError if it's not in the first map, just save the deletion state
try:
super().__delitem__(key)
except KeyError:
pass
# Store deleted state
self.__deletions.add(key)
def popitem(self):
poppable = set().union(*self.maps) - self.__deletions
for key in poppable:
return self.pop(key)
raise KeyError('No keys found.')
__marker = object()
def pop(self, key, default=__marker):
# Reimplement MutableMapping's behavior here
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def clear(self):
clearable = set().union(*self.maps) - self.__deletions
for key in clearable:
del self[key]
def node_chain_copy(source):
copy = ChainMap({}, source)
for key, value in source.items():
if isinstance(value, collections.abc.Mapping):
copy[key] = node_chain_copy(value)
elif isinstance(value, list):
copy[key] = list_chain_copy(value)
elif isinstance(value, Provenance):
copy[key] = value.clone()
return copy
def list_chain_copy(source):
copy = []
for item in source:
if isinstance(item, collections.abc.Mapping):
copy.append(node_chain_copy(item))
elif isinstance(item, list):
copy.append(list_chain_copy(item))
elif isinstance(item, Provenance):
copy.append(item.clone())
else:
copy.append(item)
return copy
def node_copy(source):
copy = {}
for key, value in source.items():
if isinstance(value, collections.abc.Mapping):
copy[key] = node_copy(value)
elif isinstance(value, list):
copy[key] = list_copy(value)
elif isinstance(value, Provenance):
copy[key] = value.clone()
else:
copy[key] = value
ensure_provenance(copy)
return copy
def list_copy(source):
copy = []
for item in source:
if isinstance(item, collections.abc.Mapping):
copy.append(node_copy(item))
elif isinstance(item, list):
copy.append(list_copy(item))
elif isinstance(item, Provenance):
copy.append(item.clone())
else:
copy.append(item)
return copy
# node_final_assertions()
#
# This must be called on a fully loaded and composited node,
# after all composition has completed.
#
# Args:
# node (Mapping): The final composited node
#
# Raises:
# (LoadError): If any assertions fail
#
def node_final_assertions(node):
for key, value in node_items(node):
# Assert that list composition directives dont remain, this
# indicates that the user intended to override a list which
# never existed in the underlying data
#
if key in ['(>)', '(<)', '(=)']:
provenance = node_get_provenance(node, key)
raise LoadError(LoadErrorReason.TRAILING_LIST_DIRECTIVE,
"{}: Attempt to override non-existing list".format(provenance))
if isinstance(value, collections.abc.Mapping):
node_final_assertions(value)
elif isinstance(value, list):
list_final_assertions(value)
def list_final_assertions(values):
for value in values:
if isinstance(value, collections.abc.Mapping):
node_final_assertions(value)
elif isinstance(value, list):
list_final_assertions(value)
# assert_symbol_name()
#
# A helper function to check if a loaded string is a valid symbol
# name and to raise a consistent LoadError if not. For strings which
# are required to be symbols.
#
# Args:
# provenance (Provenance): The provenance of the loaded symbol, or None
# symbol_name (str): The loaded symbol name
# purpose (str): The purpose of the string, for an error message
# allow_dashes (bool): Whether dashes are allowed for this symbol
#
# Raises:
# LoadError: If the symbol_name is invalid
#
# Note that dashes are generally preferred for variable names and
# usage in YAML, but things such as option names which will be
# evaluated with jinja2 cannot use dashes.
#
def assert_symbol_name(provenance, symbol_name, purpose, *, allow_dashes=True):
valid_chars = string.digits + string.ascii_letters + '_'
if allow_dashes:
valid_chars += '-'
valid = True
if not symbol_name:
valid = False
elif any(x not in valid_chars for x in symbol_name):
valid = False
elif symbol_name[0] in string.digits:
valid = False
if not valid:
detail = "Symbol names must contain only alphanumeric characters, " + \
"may not start with a digit, and may contain underscores"
if allow_dashes:
detail += " or dashes"
message = "Invalid symbol name for {}: '{}'".format(purpose, symbol_name)
if provenance is not None:
message = "{}: {}".format(provenance, message)
raise LoadError(LoadErrorReason.INVALID_SYMBOL_NAME,
message, detail=detail)
buildstream-1.6.9/buildstream/buildelement.py 0000664 0000000 0000000 00000024002 14375152700 0021402 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
# Copyright (C) 2018 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
BuildElement - Abstract class for build elements
================================================
The BuildElement class is a convenience element one can derive from for
implementing the most common case of element.
Abstract method implementations
-------------------------------
Element.configure_sandbox()
~~~~~~~~~~~~~~~~~~~~~~~~~~~
In :func:`Element.configure_sandbox() `,
the BuildElement will ensure that the sandbox locations described by the ``%{build-root}``
and ``%{install-root}`` variables are marked and will be mounted read-write for the
:func:`assemble phase`.
The working directory for the sandbox will be configured to be the ``%{build-root}``,
unless the ``%{command-subdir}`` variable is specified for the element in question,
in which case the working directory will be configured as ``%{build-root}/%{command-subdir}``.
Element.stage()
~~~~~~~~~~~~~~~
In :func:`Element.stage() `, the BuildElement
will do the following operations:
* Stage all the dependencies in the :func:`Scope.BUILD `
scope into the sandbox root.
* Run the integration commands for all staged dependencies using
:func:`Element.integrate() `
* Stage any Source on the given element to the ``%{build-root}`` location
inside the sandbox, using
:func:`Element.stage_sources() `
Element.prepare()
~~~~~~~~~~~~~~~~~
In :func:`Element.prepare() `,
the BuildElement will run ``configure-commands``, which are used to
run one-off preparations that should not be repeated for a single
build directory.
Element.assemble()
~~~~~~~~~~~~~~~~~~
In :func:`Element.assemble() `, the
BuildElement will proceed to run sandboxed commands which are expected to be
found in the element configuration.
Commands are run in the following order:
* ``configure-commands``: Commands to configure the element before building
* ``build-commands``: Commands to build the element
* ``install-commands``: Commands to install the results into ``%{install-root}``
* ``strip-commands``: Commands to strip debugging symbols installed binaries
The result of the build is expected to end up in ``%{install-root}``, and
as such; Element.assemble() method will return the ``%{install-root}`` for
artifact collection purposes.
In addition to the command lists specified above, build elements support
specifying the ``create-dev-shm`` boolean parameter. If configured, this
parameter causes the sandbox to mount a tmpfs filesystem at ``/dev/shm``.
**Example of create-dev-shm**:
.. code:: yaml
kind: manual
config:
# Enable /dev/shm
create-dev-shm: true
"""
import os
from . import Element, Scope, ElementError
from . import SandboxFlags
# This list is preserved because of an unfortunate situation, we
# need to remove these older commands which were secret and never
# documented, but without breaking the cache keys.
_legacy_command_steps = ['bootstrap-commands',
'configure-commands',
'build-commands',
'test-commands',
'install-commands',
'strip-commands']
_command_steps = ['configure-commands',
'build-commands',
'install-commands',
'strip-commands']
class BuildElement(Element):
# pylint: disable=attribute-defined-outside-init
#############################################################
# Abstract Method Implementations #
#############################################################
def configure(self, node):
self.__commands = {}
self.__create_dev_shm = False
# FIXME: Currently this forcefully validates configurations
# for all BuildElement subclasses so they are unable to
# extend the configuration
self.node_validate(node, _command_steps + ["create-dev-shm"])
self.__create_dev_shm = self.node_get_member(node, bool, "create-dev-shm", False)
for command_name in _legacy_command_steps:
if command_name in _command_steps:
self.__commands[command_name] = self.__get_commands(node, command_name)
else:
self.__commands[command_name] = []
def preflight(self):
pass
def get_unique_key(self):
dictionary = {}
for command_name, command_list in self.__commands.items():
dictionary[command_name] = command_list
# Specifying notparallel for a given element effects the
# cache key, while having the side effect of setting max-jobs to 1,
# which is normally automatically resolved and does not effect
# the cache key.
if self.get_variable('notparallel'):
dictionary['notparallel'] = True
return dictionary
def configure_sandbox(self, sandbox):
build_root = self.get_variable('build-root')
install_root = self.get_variable('install-root')
# Tell the sandbox to mount the build root and install root
sandbox.mark_directory(build_root)
sandbox.mark_directory(install_root)
# Allow running all commands in a specified subdirectory
command_subdir = self.get_variable('command-subdir')
if command_subdir:
command_dir = os.path.join(build_root, command_subdir)
else:
command_dir = build_root
sandbox.set_work_directory(command_dir)
# Setup environment
sandbox.set_environment(self.get_environment())
def stage(self, sandbox):
# Stage deps in the sandbox root
with self.timed_activity("Staging dependencies", silent_nested=True):
self.stage_dependency_artifacts(sandbox, Scope.BUILD)
# Run any integration commands provided by the dependencies
# once they are all staged and ready
with self.timed_activity("Integrating sandbox"):
for dep in self.dependencies(Scope.BUILD):
dep.integrate(sandbox)
# Stage sources in the build root
self.stage_sources(sandbox, self.get_variable('build-root'))
def assemble(self, sandbox):
# Run commands
for command_name in _command_steps:
commands = self.__commands[command_name]
if not commands or command_name == 'configure-commands':
continue
with self.timed_activity("Running {}".format(command_name)):
for cmd in commands:
self.__run_command(sandbox, cmd, command_name)
# %{install-root}/%{build-root} should normally not be written
# to - if an element later attempts to stage to a location
# that is not empty, we abort the build - in this case this
# will almost certainly happen.
staged_build = os.path.join(self.get_variable('install-root'),
self.get_variable('build-root'))
if os.path.isdir(staged_build) and os.listdir(staged_build):
self.warn("Writing to %{install-root}/%{build-root}.",
detail="Writing to this directory will almost " +
"certainly cause an error, since later elements " +
"will not be allowed to stage to %{build-root}.")
# Return the payload, this is configurable but is generally
# always the /buildstream-install directory
return self.get_variable('install-root')
def prepare(self, sandbox):
commands = self.__commands['configure-commands']
if commands:
with self.timed_activity("Running configure-commands"):
for cmd in commands:
self.__run_command(sandbox, cmd, 'configure-commands')
def generate_script(self):
script = ""
for command_name in _command_steps:
commands = self.__commands[command_name]
for cmd in commands:
script += "(set -ex; {}\n) || exit 1\n".format(cmd)
return script
#############################################################
# Private Local Methods #
#############################################################
def __get_commands(self, node, name):
list_node = self.node_get_member(node, list, name, [])
commands = []
for i in range(len(list_node)):
command = self.node_subst_list_element(node, name, [i])
commands.append(command)
return commands
def __run_command(self, sandbox, cmd, cmd_name):
self.status("Running {}".format(cmd_name), detail=cmd)
if self.__create_dev_shm:
flags = SandboxFlags.ROOT_READ_ONLY | SandboxFlags.CREATE_DEV_SHM
else:
flags = SandboxFlags.ROOT_READ_ONLY
# Note the -e switch to 'sh' means to exit with an error
# if any untested command fails.
#
exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'], flags)
if exitcode != 0:
raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode))
buildstream-1.6.9/buildstream/data/ 0000775 0000000 0000000 00000000000 14375152700 0017272 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/data/bst 0000664 0000000 0000000 00000001167 14375152700 0020012 0 ustar 00root root 0000000 0000000 # BuildStream bash completion scriptlet.
#
# On systems which use the bash-completion module for
# completion discovery with bash, this can be installed at:
#
# pkg-config --variable=completionsdir bash-completion
#
# If BuildStream is not installed system wide, you can
# simply source this script to enable completions or append
# this script to your ~/.bash_completion file.
#
_bst_completion() {
local IFS=$'
'
COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \
COMP_CWORD=$COMP_CWORD \
_BST_COMPLETION=complete $1 ) )
return 0
}
complete -F _bst_completion -o nospace bst;
buildstream-1.6.9/buildstream/data/build-all.sh.in 0000664 0000000 0000000 00000001401 14375152700 0022074 0 ustar 00root root 0000000 0000000 #!/bin/sh
#
# DO NOT EDIT THIS FILE
#
# This is a build script generated by
# [BuildStream](https://wiki.gnome.org/Projects/BuildStream/).
#
# Builds all given modules using their respective scripts.
set -eu
echo "Buildstream native bootstrap script"
export PATH='/usr/bin:/usr/sbin/:/sbin:/bin:/tools/bin:/tools/sbin'
export SRCDIR='./source'
SUCCESS=false
CURRENT_MODULE='None'
echo 'Setting up build environment...'
except() {{
if [ "$SUCCESS" = true ]; then
echo "Done!"
else
echo "Error building module ${{CURRENT_MODULE}}."
fi
}}
trap "except" EXIT
for module in {modules}; do
CURRENT_MODULE="$module"
"$SRCDIR/build-$module"
if [ -e /sbin/ldconfig ]; then
/sbin/ldconfig || true;
fi
done
SUCCESS=true
buildstream-1.6.9/buildstream/data/build-module.sh.in 0000664 0000000 0000000 00000001305 14375152700 0022614 0 ustar 00root root 0000000 0000000 #!/bin/sh
#
# DO NOT EDIT THIS FILE
#
# This is a build script generated by
# [BuildStream](https://wiki.gnome.org/Projects/BuildStream/).
#
# Builds the module {name}.
set -e
# Prepare the build environment
echo 'Building {name}'
if [ -d '{build_root}' ]; then
rm -rf '{build_root}'
fi
if [ -d '{install_root}' ]; then
rm -rf '{install_root}'
fi
mkdir -p '{build_root}'
mkdir -p '{install_root}'
if [ -d "$SRCDIR/{name}/" ]; then
cp -a "$SRCDIR/{name}/." '{build_root}'
fi
cd '{build_root}'
export PREFIX='{install_root}'
export {variables}
# Build the module
{commands}
rm -rf '{build_root}'
# Install the module
echo 'Installing {name}'
(cd '{install_root}'; find . | cpio -umdp /)
buildstream-1.6.9/buildstream/data/projectconfig.yaml 0000664 0000000 0000000 00000010464 14375152700 0023017 0 ustar 00root root 0000000 0000000 # Default BuildStream project configuration.
# General configuration defaults
#
# Require format version 0
format-version: 0
# Elements are found at the project root
element-path: .
# Store source references in element files
ref-storage: inline
# Variable Configuration
#
variables:
# Path configuration, to be used in build instructions.
prefix: "/usr"
exec_prefix: "%{prefix}"
bindir: "%{exec_prefix}/bin"
sbindir: "%{exec_prefix}/sbin"
libexecdir: "%{exec_prefix}/libexec"
datadir: "%{prefix}/share"
sysconfdir: "/etc"
sharedstatedir: "%{prefix}/com"
localstatedir: "/var"
lib: "lib"
libdir: "%{prefix}/%{lib}"
debugdir: "%{libdir}/debug"
includedir: "%{prefix}/include"
docdir: "%{datadir}/doc"
infodir: "%{datadir}/info"
mandir: "%{datadir}/man"
# Indicates the default build directory where input is
# normally staged
build-root: /buildstream/%{project-name}/%{element-name}
# Indicates the build installation directory in the sandbox
install-root: /buildstream-install
# Arguments for tooling used when stripping debug symbols
objcopy-link-args: --add-gnu-debuglink
objcopy-extract-args: |
--only-keep-debug --compress-debug-sections
strip-args: |
--remove-section=.comment --remove-section=.note --strip-unneeded
# Generic implementation for stripping debugging symbols
strip-binaries: |
cd "%{install-root}" && find -type f \
'(' -perm -111 -o -name '*.so*' \
-o -name '*.cmxs' -o -name '*.node' ')' \
-exec sh -ec \
'read -n4 hdr <"$1" # check for elf header
if [ "$hdr" != "$(printf \\x7fELF)" ]; then
exit 0
fi
debugfile="%{install-root}%{debugdir}/$1"
mkdir -p "$(dirname "$debugfile")"
objcopy %{objcopy-extract-args} "$1" "$debugfile"
chmod 644 "$debugfile"
strip %{strip-args} "$1"
objcopy %{objcopy-link-args} "$debugfile" "$1"' - {} ';'
# Generic implementation for reproducible python builds
fix-pyc-timestamps: |
find "%{install-root}" -name '*.pyc' -exec \
dd if=/dev/zero of={} bs=1 count=4 seek=4 conv=notrunc ';'
# Base sandbox environment, can be overridden by plugins
environment:
PATH: /usr/bin:/bin:/usr/sbin:/sbin
SHELL: /bin/sh
TERM: dumb
USER: tomjon
USERNAME: tomjon
LOGNAME: tomjon
LC_ALL: C
HOME: /tmp
TZ: UTC
# For reproducible builds we use 2011-11-11 as a constant
SOURCE_DATE_EPOCH: 1320937200
# List of environment variables which should not be taken into
# account when calculating a cache key for a given element.
#
environment-nocache: []
# Configuration for the sandbox other than environment variables
# should go in 'sandbox'. This just contains the UID and GID that
# the user in the sandbox will have. Not all sandboxes will support
# changing the values.
sandbox:
build-uid: 0
build-gid: 0
# Defaults for the 'split-rules' public data found on elements
# in the 'bst' domain.
#
split-rules:
# The runtime domain includes whatever is needed for the
# built element to run, this includes stripped executables
# and shared libraries by default.
runtime:
- |
%{bindir}/*
- |
%{sbindir}/*
- |
%{libexecdir}/*
- |
%{libdir}/lib*.so*
# The devel domain includes additional things which
# you may need for development.
#
# By default this includes header files, static libraries
# and other metadata such as pkgconfig files, m4 macros and
# libtool archives.
devel:
- |
%{includedir}
- |
%{includedir}/**
- |
%{libdir}/lib*.a
- |
%{libdir}/lib*.la
- |
%{libdir}/pkgconfig/*.pc
- |
%{datadir}/pkgconfig/*.pc
- |
%{datadir}/aclocal/*.m4
# The debug domain includes debugging information stripped
# away from libraries and executables
debug:
- |
%{debugdir}
- |
%{debugdir}/**
# The doc domain includes documentation
doc:
- |
%{docdir}
- |
%{docdir}/**
- |
%{infodir}
- |
%{infodir}/**
- |
%{mandir}
- |
%{mandir}/**
# The locale domain includes translations etc
locale:
- |
%{datadir}/locale
- |
%{datadir}/locale/**
- |
%{datadir}/i18n
- |
%{datadir}/i18n/**
- |
%{datadir}/zoneinfo
- |
%{datadir}/zoneinfo/**
# Default behavior for `bst shell`
#
shell:
# Command to run when `bst shell` does not provide a command
#
command: [ 'sh', '-i' ]
buildstream-1.6.9/buildstream/data/userconfig.yaml 0000664 0000000 0000000 00000005234 14375152700 0022326 0 ustar 00root root 0000000 0000000 # Default BuildStream user configuration.
#
# Work Directories
#
#
# Note that BuildStream forces the XDG Base Directory names
# into the environment if they are not already set, and allows
# expansion of '~' and environment variables when specifying
# paths.
#
# Location to store sources
sourcedir: ${XDG_CACHE_HOME}/buildstream/sources
# Location to perform builds
builddir: ${XDG_CACHE_HOME}/buildstream/build
# Location to store local binary artifacts
artifactdir: ${XDG_CACHE_HOME}/buildstream/artifacts
# Location to store build logs
logdir: ${XDG_CACHE_HOME}/buildstream/logs
#
# Cache
#
cache:
# Size of the artifact cache in bytes - BuildStream will attempt to keep the
# artifact cache within this size.
# If the value is suffixed with K, M, G or T, the specified memory size is
# parsed as Kilobytes, Megabytes, Gigabytes, or Terabytes (with the base
# 1024), respectively.
# Alternatively, a percentage value may be specified, which is taken relative
# to the isize of the file system containing the cache.
quota: infinity
#
# Scheduler
#
scheduler:
# Maximum number of simultaneous downloading tasks.
fetchers: 10
# Maximum number of simultaneous build tasks.
builders: 4
# Maximum number of simultaneous uploading tasks.
pushers: 4
# Maximum number of retries for network tasks.
network-retries: 2
# What to do when an element fails, if not running in
# interactive mode:
#
# continue - Continue queueing jobs as much as possible
# quit - Exit after all ongoing jobs complete
# terminate - Terminate any ongoing jobs and exit
#
on-error: quit
#
# Build related configuration
#
build:
#
# Maximum number of jobs to run per build task.
#
# The default behavior when this is set to 0, is to use the
# maximum number of threads available, with a maximum of 8.
#
max-jobs: 0
#
# Logging
#
logging:
# The abbreviated cache key length to display in the UI
key-length: 8
# Whether to show extra detailed messages
verbose: True
# Maximum number of lines to print from the
# end of a failing build log
error-lines: 20
# Maximum number of lines to print in a detailed
# message on the console or in the master log (the full
# messages are always recorded in the individual build
# logs)
message-lines: 20
# Whether to enable debugging messages
debug: False
# Format string for printing the pipeline at startup, this
# also determines the default display format for `bst show`
element-format: |
%{state: >12} %{full-key} %{name} %{workspace-dirs}
# Format string for all log messages.
message-format: |
[%{elapsed}][%{key}][%{element}] %{action} %{message}
buildstream-1.6.9/buildstream/element.py 0000664 0000000 0000000 00000304124 14375152700 0020370 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016-2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
Element - Base element class
============================
.. _core_element_abstract_methods:
Abstract Methods
----------------
For loading and configuration purposes, Elements must implement the
:ref:`Plugin base class abstract methods `.
.. _core_element_build_phase:
Build Phase
~~~~~~~~~~~
The following methods are the foundation of the element's *build
phase*, they must be implemented by all Element classes, unless
explicitly stated otherwise.
* :func:`Element.configure_sandbox() `
Configures the :class:`.Sandbox`. This is called before anything else
* :func:`Element.stage() `
Stage dependencies and :class:`Sources ` into
the sandbox.
* :func:`Element.prepare() `
Call preparation methods that should only be performed once in the
lifetime of a build directory (e.g. autotools' ./configure).
**Optional**: If left unimplemented, this step will be skipped.
* :func:`Element.assemble() `
Perform the actual assembly of the element
Miscellaneous
~~~~~~~~~~~~~
Miscellaneous abstract methods also exist:
* :func:`Element.generate_script() `
For the purpose of ``bst source bundle``, an Element may optionally implement this.
Class Reference
---------------
"""
import os
import re
import stat
import copy
from collections import OrderedDict
from collections.abc import Mapping
from contextlib import contextmanager
from enum import Enum
import tempfile
import time
import shutil
from . import _yaml
from ._variables import Variables
from ._versions import BST_CORE_ARTIFACT_VERSION
from ._exceptions import BstError, LoadError, LoadErrorReason, ImplError, ErrorDomain
from .utils import UtilError
from .types import _UniquePriorityQueue
from . import Plugin, Consistency
from . import SandboxFlags
from . import utils
from . import _cachekey
from . import _signals
from . import _site
from ._platform import Platform
from .sandbox._config import SandboxConfig
from .types import _KeyStrength, CoreWarnings
class Scope(Enum):
"""Types of scope for a given element"""
ALL = 1
"""All elements which the given element depends on, following
all elements required for building. Including the element itself.
"""
BUILD = 2
"""All elements required for building the element, including their
respective run dependencies. Not including the given element itself.
"""
RUN = 3
"""All elements required for running the element. Including the element
itself.
"""
class ElementError(BstError):
"""This exception should be raised by :class:`.Element` implementations
to report errors to the user.
Args:
message (str): The error message to report to the user
detail (str): A possibly multiline, more detailed error message
reason (str): An optional machine readable reason string, used for test cases
temporary (bool): An indicator to whether the error may occur if the operation was run again. (*Since: 1.2*)
"""
def __init__(self, message, *, detail=None, reason=None, temporary=False):
super().__init__(message, detail=detail, domain=ErrorDomain.ELEMENT, reason=reason, temporary=temporary)
class Element(Plugin):
"""Element()
Base Element class.
All elements derive from this class, this interface defines how
the core will be interacting with Elements.
"""
__defaults = {} # The defaults from the yaml file and project
__defaults_set = False # Flag, in case there are no defaults at all
__instantiated_elements = {} # A hash of Element by MetaElement
__redundant_source_refs = [] # A list of (source, ref) tuples which were redundantly specified
BST_ARTIFACT_VERSION = 0
"""The element plugin's artifact version
Elements must first set this to 1 if they change their unique key
structure in a way that would produce a different key for the
same input, or introduce a change in the build output for the
same unique key. Further changes of this nature require bumping the
artifact version.
"""
BST_STRICT_REBUILD = False
"""Whether to rebuild this element in non strict mode if
any of the dependencies have changed.
"""
BST_FORBID_RDEPENDS = False
"""Whether to raise exceptions if an element has runtime dependencies.
*Since: 1.2*
"""
BST_FORBID_BDEPENDS = False
"""Whether to raise exceptions if an element has build dependencies.
*Since: 1.2*
"""
BST_FORBID_SOURCES = False
"""Whether to raise exceptions if an element has sources.
*Since: 1.2*
"""
def __init__(self, context, project, meta, plugin_conf):
self.__cache_key_dict = None # Dict for cache key calculation
self.__cache_key = None # Our cached cache key
super().__init__(meta.name, context, project, meta.provenance, "element")
self.__is_junction = meta.kind == "junction"
if not self.__is_junction:
project.ensure_fully_loaded()
self.normal_name = os.path.splitext(self.name.replace(os.sep, '-'))[0]
"""A normalized element name
This is the original element without path separators or
the extension, it's used mainly for composing log file names
and creating directory names and such.
"""
self.__runtime_dependencies = [] # Direct runtime dependency Elements
self.__build_dependencies = [] # Direct build dependency Elements
self.__strict_dependencies = [] # Direct build dependency subset which require strict rebuilds
self.__reverse_dependencies = set() # Direct reverse dependency Elements
self.__ready_for_runtime = False # Wether the element has all its dependencies ready and has a cache key
self.__sources = [] # List of Sources
self.__weak_cache_key = None # Our cached weak cache key
self.__strict_cache_key = None # Our cached cache key for strict builds
self.__artifacts = context.artifactcache # Artifact cache
self.__consistency = Consistency.INCONSISTENT # Cached overall consistency state
self.__cached = None # Whether we have a cached artifact
self.__strong_cached = None # Whether we have a cached artifact
self.__assemble_scheduled = False # Element is scheduled to be assembled
self.__assemble_done = False # Element is assembled
self.__tracking_scheduled = False # Sources are scheduled to be tracked
self.__tracking_done = False # Sources have been tracked
self.__pull_done = False # Whether pull was attempted
self.__splits = None # Resolved regex objects for computing split domains
self.__whitelist_regex = None # Resolved regex object to check if file is allowed to overlap
self.__staged_sources_directory = None # Location where Element.stage_sources() was called
self.__tainted = None # Whether the artifact is tainted and should not be shared
self.__required = False # Whether the artifact is required in the current session
# hash tables of loaded artifact metadata, hashed by key
self.__metadata_keys = {} # Strong and weak keys for this key
self.__metadata_dependencies = {} # Dictionary of dependency strong keys
self.__metadata_workspaced = {} # Boolean of whether it's workspaced
self.__metadata_workspaced_dependencies = {} # List of which dependencies are workspaced
# Ensure we have loaded this class's defaults
self.__init_defaults(plugin_conf)
# Collect the composited variables and resolve them
variables = self.__extract_variables(meta)
variables['element-name'] = self.name
self.__variables = Variables(variables)
if not self.__is_junction:
self.__variables.check()
# Collect the composited environment now that we have variables
env = self.__extract_environment(meta)
self.__environment = env
# Collect the environment nocache blacklist list
nocache = self.__extract_env_nocache(meta)
self.__env_nocache = nocache
# Grab public domain data declared for this instance
self.__public = self.__extract_public(meta)
self.__dynamic_public = None
# Collect the composited element configuration and
# ask the element to configure itself.
self.__config = self.__extract_config(meta)
self._configure(self.__config)
# Extract Sandbox config
self.__sandbox_config = self.__extract_sandbox_config(meta)
# Extract Sandbox config
self.__sandbox_config = self.__extract_sandbox_config(meta)
self.__sandbox_config_supported = True
platform = Platform.get_platform()
if not platform.check_sandbox_config(self.__sandbox_config):
# Local sandbox does not fully support specified sandbox config.
# This will taint the artifact, disable pushing.
self.__sandbox_config_supported = False
def __lt__(self, other):
return self.name < other.name
#############################################################
# Abstract Methods #
#############################################################
def configure_sandbox(self, sandbox):
"""Configures the the sandbox for execution
Args:
sandbox (:class:`.Sandbox`): The build sandbox
Raises:
(:class:`.ElementError`): When the element raises an error
Elements must implement this method to configure the sandbox object
for execution.
"""
raise ImplError("element plugin '{kind}' does not implement configure_sandbox()".format(
kind=self.get_kind()))
def stage(self, sandbox):
"""Stage inputs into the sandbox directories
Args:
sandbox (:class:`.Sandbox`): The build sandbox
Raises:
(:class:`.ElementError`): When the element raises an error
Elements must implement this method to populate the sandbox
directory with data. This is done either by staging :class:`.Source`
objects, by staging the artifacts of the elements this element depends
on, or both.
"""
raise ImplError("element plugin '{kind}' does not implement stage()".format(
kind=self.get_kind()))
def prepare(self, sandbox):
"""Run one-off preparation commands.
This is run before assemble(), but is guaranteed to run only
the first time if we build incrementally - this makes it
possible to run configure-like commands without causing the
entire element to rebuild.
Args:
sandbox (:class:`.Sandbox`): The build sandbox
Raises:
(:class:`.ElementError`): When the element raises an error
By default, this method does nothing, but may be overriden to
allow configure-like commands.
*Since: 1.2*
"""
def assemble(self, sandbox):
"""Assemble the output artifact
Args:
sandbox (:class:`.Sandbox`): The build sandbox
Returns:
(str): An absolute path within the sandbox to collect the artifact from
Raises:
(:class:`.ElementError`): When the element raises an error
Elements must implement this method to create an output
artifact from its sources and dependencies.
"""
raise ImplError("element plugin '{kind}' does not implement assemble()".format(
kind=self.get_kind()))
def generate_script(self):
"""Generate a build (sh) script to build this element
Returns:
(str): A string containing the shell commands required to build the element
BuildStream guarantees the following environment when the
generated script is run:
- All element variables have been exported.
- The cwd is `self.get_variable('build_root')/self.normal_name`.
- $PREFIX is set to `self.get_variable('install_root')`.
- The directory indicated by $PREFIX is an empty directory.
Files are expected to be installed to $PREFIX.
If the script fails, it is expected to return with an exit
code != 0.
"""
raise ImplError("element plugin '{kind}' does not implement write_script()".format(
kind=self.get_kind()))
#############################################################
# Public Methods #
#############################################################
def sources(self):
"""A generator function to enumerate the element sources
Yields:
(:class:`.Source`): The sources of this element
"""
for source in self.__sources:
yield source
def dependencies(self, scope, *, recurse=True, visited=None, recursed=False):
"""dependencies(scope, *, recurse=True)
A generator function which yields the dependencies of the given element.
If `recurse` is specified (the default), the full dependencies will be listed
in deterministic staging order, starting with the basemost elements in the
given `scope`. Otherwise, if `recurse` is not specified then only the direct
dependencies in the given `scope` will be traversed, and the element itself
will be omitted.
Args:
scope (:class:`.Scope`): The scope to iterate in
recurse (bool): Whether to recurse
Yields:
(:class:`.Element`): The dependencies in `scope`, in deterministic staging order
"""
if visited is None:
visited = {}
full_name = self._get_full_name()
scope_set = set((Scope.BUILD, Scope.RUN)) if scope == Scope.ALL else set((scope,))
if full_name in visited and scope_set.issubset(visited[full_name]):
return
should_yield = False
if full_name not in visited:
visited[full_name] = scope_set
should_yield = True
else:
visited[full_name] |= scope_set
if recurse or not recursed:
if scope == Scope.ALL:
for dep in self.__build_dependencies:
yield from dep.dependencies(Scope.ALL, recurse=recurse,
visited=visited, recursed=True)
for dep in self.__runtime_dependencies:
if dep not in self.__build_dependencies:
yield from dep.dependencies(Scope.ALL, recurse=recurse,
visited=visited, recursed=True)
elif scope == Scope.BUILD:
for dep in self.__build_dependencies:
yield from dep.dependencies(Scope.RUN, recurse=recurse,
visited=visited, recursed=True)
elif scope == Scope.RUN:
for dep in self.__runtime_dependencies:
yield from dep.dependencies(Scope.RUN, recurse=recurse,
visited=visited, recursed=True)
# Yeild self only at the end, after anything needed has been traversed
if should_yield and (recurse or recursed) and scope != Scope.BUILD:
yield self
def search(self, scope, name):
"""Search for a dependency by name
Args:
scope (:class:`.Scope`): The scope to search
name (str): The dependency to search for
Returns:
(:class:`.Element`): The dependency element, or None if not found.
"""
for dep in self.dependencies(scope):
if dep.name == name:
return dep
return None
def node_subst_member(self, node, member_name, default=utils._sentinel):
"""Fetch the value of a string node member, substituting any variables
in the loaded value with the element contextual variables.
Args:
node (dict): A dictionary loaded from YAML
member_name (str): The name of the member to fetch
default (str): A value to return when *member_name* is not specified in *node*
Returns:
The value of *member_name* in *node*, otherwise *default*
Raises:
:class:`.LoadError`: When *member_name* is not found and no *default* was provided
This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_member`
except that it assumes the expected type is a string and will also perform variable
substitutions.
**Example:**
.. code:: python
# Expect a string 'name' in 'node', substituting any
# variables in the returned string
name = self.node_subst_member(node, 'name')
"""
value = self.node_get_member(node, str, member_name, default)
provenance = _yaml.node_get_provenance(node, key=member_name)
try:
return self.__variables.subst(value, provenance)
except LoadError as e:
raise LoadError(e.reason, '{}: {}'.format(provenance, str(e))) from e
def node_subst_list(self, node, member_name):
"""Fetch a list from a node member, substituting any variables in the list
Args:
node (dict): A dictionary loaded from YAML
member_name (str): The name of the member to fetch (a list)
Returns:
The list in *member_name*
Raises:
:class:`.LoadError`
This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_member`
except that it assumes the expected type is a list of strings and will also
perform variable substitutions.
"""
value = self.node_get_member(node, list, member_name)
ret = []
for index, x in enumerate(value):
provenance = _yaml.node_get_provenance(node, key=member_name, indices=[index])
try:
ret.append(self.__variables.subst(x, provenance))
except LoadError as e:
raise LoadError(e.reason, '{}: {}'.format(provenance, str(e))) from e
return ret
def node_subst_list_element(self, node, member_name, indices):
"""Fetch the value of a list element from a node member, substituting any variables
in the loaded value with the element contextual variables.
Args:
node (dict): A dictionary loaded from YAML
member_name (str): The name of the member to fetch
indices (list of int): List of indices to search, in case of nested lists
Returns:
The value of the list element in *member_name* at the specified *indices*
Raises:
:class:`.LoadError`
This is essentially the same as :func:`~buildstream.plugin.Plugin.node_get_list_element`
except that it assumes the expected type is a string and will also perform variable
substitutions.
**Example:**
.. code:: python
# Fetch the list itself
strings = self.node_get_member(node, list, 'strings')
# Iterate over the list indices
for i in range(len(strings)):
# Fetch the strings in this list, substituting content
# with our element's variables if needed
string = self.node_subst_list_element(
node, 'strings', [ i ])
"""
value = self.node_get_list_element(node, str, member_name, indices)
provenance = _yaml.node_get_provenance(node, key=member_name, indices=indices)
try:
return self.__variables.subst(value, provenance)
except LoadError as e:
raise LoadError(e.reason, '{}: {}'.format(provenance, str(e))) from e
def compute_manifest(self, *, include=None, exclude=None, orphans=True):
"""Compute and return this element's selective manifest
The manifest consists on the list of file paths in the
artifact. The files in the manifest are selected according to
`include`, `exclude` and `orphans` parameters. If `include` is
not specified then all files spoken for by any domain are
included unless explicitly excluded with an `exclude` domain.
Args:
include (list): An optional list of domains to include files from
exclude (list): An optional list of domains to exclude files from
orphans (bool): Whether to include files not spoken for by split domains
Yields:
(str): The paths of the files in manifest
"""
self.__assert_cached()
return self.__compute_splits(include, exclude, orphans)
def stage_artifact(self, sandbox, *, path=None, include=None, exclude=None, orphans=True, update_mtimes=None):
"""Stage this element's output artifact in the sandbox
This will stage the files from the artifact to the sandbox at specified location.
The files are selected for staging according to the `include`, `exclude` and `orphans`
parameters; if `include` is not specified then all files spoken for by any domain
are included unless explicitly excluded with an `exclude` domain.
Args:
sandbox (:class:`.Sandbox`): The build sandbox
path (str): An optional sandbox relative path
include (list): An optional list of domains to include files from
exclude (list): An optional list of domains to exclude files from
orphans (bool): Whether to include files not spoken for by split domains
update_mtimes (list): An optional list of files whose mtimes to set to the current time.
Raises:
(:class:`.ElementError`): If the element has not yet produced an artifact.
Returns:
(:class:`~.utils.FileListResult`): The result describing what happened while staging
.. note::
Directories in `dest` are replaced with files from `src`,
unless the existing directory in `dest` is not empty in
which case the path will be reported in the return value.
**Example:**
.. code:: python
# Stage the dependencies for a build of 'self'
for dep in self.dependencies(Scope.BUILD):
dep.stage_artifact(sandbox)
"""
if not self._cached():
detail = "No artifacts have been cached yet for that element\n" + \
"Try building the element first with `bst build`\n"
raise ElementError("No artifacts to stage",
detail=detail, reason="uncached-checkout-attempt")
if update_mtimes is None:
update_mtimes = []
# Time to use the artifact, check once more that it's there
self.__assert_cached()
with self.timed_activity("Staging {}/{}".format(self.name, self._get_brief_display_key())):
# Get the extracted artifact
artifact_base, _ = self.__extract()
artifact = os.path.join(artifact_base, 'files')
# Hard link it into the staging area
#
basedir = sandbox.get_directory()
stagedir = basedir \
if path is None \
else os.path.join(basedir, path.lstrip(os.sep))
files = list(self.__compute_splits(include, exclude, orphans))
# We must not hardlink files whose mtimes we want to update
if update_mtimes:
link_files = [f for f in files if f not in update_mtimes]
copy_files = [f for f in files if f in update_mtimes]
else:
link_files = files
copy_files = []
link_result = utils.link_files(artifact, stagedir, files=link_files,
report_written=True)
copy_result = utils.copy_files(artifact, stagedir, files=copy_files,
report_written=True)
cur_time = time.time()
for f in copy_result.files_written:
os.utime(os.path.join(stagedir, f), times=(cur_time, cur_time))
return link_result.combine(copy_result)
def stage_dependency_artifacts(self, sandbox, scope, *, path=None,
include=None, exclude=None, orphans=True):
"""Stage element dependencies in scope
This is primarily a convenience wrapper around
:func:`Element.stage_artifact() `
which takes care of staging all the dependencies in `scope` and issueing the
appropriate warnings.
Args:
sandbox (:class:`.Sandbox`): The build sandbox
scope (:class:`.Scope`): The scope to stage dependencies in
path (str): An optional sandbox relative path
include (list): An optional list of domains to include files from
exclude (list): An optional list of domains to exclude files from
orphans (bool): Whether to include files not spoken for by split domains
Raises:
(:class:`.ElementError`): If any of the dependencies in `scope` have not
yet produced artifacts, or if forbidden overlaps
occur.
"""
ignored = {}
overlaps = OrderedDict()
files_written = {}
old_dep_keys = {}
workspace = self._get_workspace()
context = self._get_context()
if self.__can_build_incrementally() and workspace.last_successful:
# Try to perform an incremental build if the last successful
# build is still in the artifact cache
#
if self.__artifacts.contains(self, workspace.last_successful):
old_dep_keys = self.__get_artifact_metadata_dependencies(workspace.last_successful)
else:
# Last successful build is no longer in the artifact cache,
# so let's reset it and perform a full build now.
workspace.prepared = False
workspace.last_successful = None
self.info("Resetting workspace state, last successful build is no longer in the cache")
# In case we are staging in the main process
if utils._is_main_process():
context.get_workspaces().save_config()
for dep in self.dependencies(scope):
# If we are workspaced, and we therefore perform an
# incremental build, we must ensure that we update the mtimes
# of any files created by our dependencies since the last
# successful build.
to_update = None
if workspace and old_dep_keys:
dep.__assert_cached()
if dep.name in old_dep_keys:
key_new = dep._get_cache_key()
key_old = old_dep_keys[dep.name]
# We only need to worry about modified and added
# files, since removed files will be picked up by
# build systems anyway.
to_update, _, added = self.__artifacts.diff(dep, key_old, key_new, subdir='files')
workspace.add_running_files(dep.name, to_update + added)
to_update.extend(workspace.running_files[dep.name])
# In case we are running `bst shell`, this happens in the
# main process and we need to update the workspace config
if utils._is_main_process():
context.get_workspaces().save_config()
result = dep.stage_artifact(sandbox,
path=path,
include=include,
exclude=exclude,
orphans=orphans,
update_mtimes=to_update)
if result.overwritten:
for overwrite in result.overwritten:
# Completely new overwrite
if overwrite not in overlaps:
# Find the overwritten element by checking where we've
# written the element before
for elm, contents in files_written.items():
if overwrite in contents:
overlaps[overwrite] = [elm, dep.name]
else:
overlaps[overwrite].append(dep.name)
files_written[dep.name] = result.files_written
if result.ignored:
ignored[dep.name] = result.ignored
if overlaps:
overlap_warning = False
warning_detail = "Staged files overwrite existing files in staging area:\n"
for f, elements in overlaps.items():
overlap_warning_elements = []
# The bottom item overlaps nothing
overlapping_elements = elements[1:]
for elm in overlapping_elements:
element = self.search(scope, elm)
if not element.__file_is_whitelisted(f):
overlap_warning_elements.append(elm)
overlap_warning = True
warning_detail += _overlap_error_detail(f, overlap_warning_elements, elements)
if overlap_warning:
self.warn("Non-whitelisted overlaps detected", detail=warning_detail,
warning_token=CoreWarnings.OVERLAPS)
if ignored:
detail = "Not staging files which would replace non-empty directories:\n"
for key, value in ignored.items():
detail += "\nFrom {}:\n".format(key)
detail += " " + " ".join(["/" + f + "\n" for f in value])
self.warn("Ignored files", detail=detail)
def integrate(self, sandbox):
"""Integrate currently staged filesystem against this artifact.
Args:
sandbox (:class:`.Sandbox`): The build sandbox
This modifies the sysroot staged inside the sandbox so that
the sysroot is *integrated*. Only an *integrated* sandbox
may be trusted for running the software therein, as the integration
commands will create and update important system cache files
required for running the installed software (such as the ld.so.cache).
"""
bstdata = self.get_public_data('bst')
environment = self.get_environment()
if bstdata is not None:
commands = self.node_get_member(bstdata, list, 'integration-commands', [])
for i in range(len(commands)):
cmd = self.node_subst_list_element(bstdata, 'integration-commands', [i])
self.status("Running integration command", detail=cmd)
exitcode = sandbox.run(['sh', '-e', '-c', cmd], 0, env=environment, cwd='/')
if exitcode != 0:
raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode))
def stage_sources(self, sandbox, directory):
"""Stage this element's sources to a directory in the sandbox
Args:
sandbox (:class:`.Sandbox`): The build sandbox
directory (str): An absolute path within the sandbox to stage the sources at
"""
# Hold on to the location where a plugin decided to stage sources,
# this will be used to reconstruct the failed sysroot properly
# after a failed build.
#
assert self.__staged_sources_directory is None
self.__staged_sources_directory = directory
self._stage_sources_in_sandbox(sandbox, directory)
def get_public_data(self, domain):
"""Fetch public data on this element
Args:
domain (str): A public domain name to fetch data for
Returns:
(dict): The public data dictionary for the given domain
.. note::
This can only be called the abstract methods which are
called as a part of the :ref:`build phase `
and never before.
"""
if self.__dynamic_public is None:
self.__load_public_data()
data = self.__dynamic_public.get(domain)
if data is not None:
data = _yaml.node_copy(data)
return data
def set_public_data(self, domain, data):
"""Set public data on this element
Args:
domain (str): A public domain name to fetch data for
data (dict): The public data dictionary for the given domain
This allows an element to dynamically mutate public data of
elements or add new domains as the result of success completion
of the :func:`Element.assemble() `
method.
"""
if self.__dynamic_public is None:
self.__load_public_data()
if data is not None:
data = _yaml.node_copy(data)
self.__dynamic_public[domain] = data
def get_environment(self):
"""Fetch the environment suitable for running in the sandbox
Returns:
(dict): A dictionary of string key/values suitable for passing
to :func:`Sandbox.run() `
"""
return _yaml.node_sanitize(self.__environment)
def get_variable(self, varname):
"""Fetch the value of a variable resolved for this element.
Args:
varname (str): The name of the variable to fetch
Returns:
(str): The resolved value for *varname*, or None if no
variable was declared with the given name.
"""
return self.__variables.get(varname)
#############################################################
# Private Methods used in BuildStream #
#############################################################
# _new_from_meta():
#
# Recursively instantiate a new Element instance, it's sources
# and it's dependencies from a meta element.
#
# Args:
# meta (MetaElement): The meta element
#
# Returns:
# (Element): A newly created Element instance
#
@classmethod
def _new_from_meta(cls, meta):
if not meta.first_pass:
meta.project.ensure_fully_loaded()
if meta in cls.__instantiated_elements:
return cls.__instantiated_elements[meta]
element = meta.project.create_element(meta, first_pass=meta.first_pass)
cls.__instantiated_elements[meta] = element
# Instantiate sources
for meta_source in meta.sources:
meta_source.first_pass = meta.kind == "junction"
source = meta.project.create_source(meta_source,
first_pass=meta.first_pass)
redundant_ref = source._load_ref()
element.__sources.append(source)
# Collect redundant refs which occurred at load time
if redundant_ref is not None:
cls.__redundant_source_refs.append((source, redundant_ref))
# Instantiate dependencies
for meta_dep in meta.dependencies:
dependency = Element._new_from_meta(meta_dep)
element.__runtime_dependencies.append(dependency)
dependency.__reverse_dependencies.add(element)
for meta_dep in meta.build_dependencies:
dependency = Element._new_from_meta(meta_dep)
element.__build_dependencies.append(dependency)
dependency.__reverse_dependencies.add(element)
if meta_dep in meta.strict_dependencies:
element.__strict_dependencies.append(dependency)
return element
# _get_redundant_source_refs()
#
# Fetches a list of (Source, ref) tuples of all the Sources
# which were loaded with a ref specified in the element declaration
# for projects which use project.refs ref-storage.
#
# This is used to produce a warning
@classmethod
def _get_redundant_source_refs(cls):
return cls.__redundant_source_refs
# _reset_load_state()
#
# This is called by Pipeline.cleanup() and is used to
# reset the loader state between multiple sessions.
#
@classmethod
def _reset_load_state(cls):
cls.__instantiated_elements = {}
cls.__redundant_source_refs = []
# _get_consistency()
#
# Returns cached consistency state
#
def _get_consistency(self):
return self.__consistency
# _cached():
#
# Returns:
# (bool): Whether this element is already present in
# the artifact cache
#
def _cached(self):
return self.__cached
# _buildable():
#
# Returns:
# (bool): Whether this element can currently be built
#
def _buildable(self):
if self._get_consistency() != Consistency.CACHED:
return False
for dependency in self.dependencies(Scope.BUILD):
# In non-strict mode an element's strong cache key may not be available yet
# even though an artifact is available in the local cache. This can happen
# if the pull job is still pending as the remote cache may have an artifact
# that matches the strict cache key, which is preferred over a locally
# cached artifact with a weak cache key match.
if not dependency._cached() or not dependency._get_cache_key(strength=_KeyStrength.STRONG):
return False
if not self.__assemble_scheduled:
return False
return True
# _get_cache_key():
#
# Returns the cache key
#
# Args:
# strength (_KeyStrength): Either STRONG or WEAK key strength
#
# Returns:
# (str): A hex digest cache key for this Element, or None
#
# None is returned if information for the cache key is missing.
#
def _get_cache_key(self, strength=_KeyStrength.STRONG):
if strength == _KeyStrength.STRONG:
return self.__cache_key
else:
return self.__weak_cache_key
# _can_query_cache():
#
# Returns whether the cache key required for cache queries is available.
#
# Returns:
# (bool): True if cache can be queried
#
def _can_query_cache(self):
# If build has already been scheduled, we know that the element is
# not cached and thus can allow cache query even if the strict cache key
# is not available yet.
# This special case is required for workspaced elements to prevent
# them from getting blocked in the pull queue.
if self.__assemble_scheduled:
return True
# cache cannot be queried until strict cache key is available
return self.__strict_cache_key is not None
# _update_state()
#
# Keep track of element state. Calculate cache keys if possible and
# check whether artifacts are cached.
#
# This must be called whenever the state of an element may have changed.
#
def _update_state(self):
context = self._get_context()
# Compute and determine consistency of sources
self.__update_source_state()
if self._get_consistency() == Consistency.INCONSISTENT:
# Tracking may still be pending
return
if self._get_workspace() and self.__assemble_scheduled:
# If we have an active workspace and are going to build, then
# discard current cache key values as their correct values can only
# be calculated once the build is complete
self.__cache_key_dict = None
self.__cache_key = None
self.__weak_cache_key = None
self.__strict_cache_key = None
self.__strong_cached = None
return
if self.__weak_cache_key is None:
# Calculate weak cache key
# Weak cache key includes names of direct build dependencies
# so as to only trigger rebuilds when the shape of the
# dependencies change.
#
# Some conditions cause dependencies to be strict, such
# that this element will be rebuilt anyway if the dependency
# changes even in non strict mode, for these cases we just
# encode the dependency's weak cache key instead of it's name.
#
dependencies = [
e._get_cache_key(strength=_KeyStrength.WEAK)
if self.BST_STRICT_REBUILD or e in self.__strict_dependencies
else e.name
for e in self.dependencies(Scope.BUILD)
]
self.__weak_cache_key = self.__calculate_cache_key(dependencies)
if self.__weak_cache_key is None:
# Weak cache key could not be calculated yet
return
if not context.get_strict():
# Full cache query in non-strict mode requires both the weak and
# strict cache keys. However, we need to determine as early as
# possible whether a build is pending to discard unstable cache keys
# for workspaced elements. For this cache check the weak cache keys
# are sufficient. However, don't update the `cached` attributes
# until the full cache query below.
cached = self.__artifacts.contains(self, self.__weak_cache_key)
if (not self.__assemble_scheduled and not self.__assemble_done and
not cached and not self._pull_pending()):
# For uncached workspaced elements, assemble is required
# even if we only need the cache key
if self._is_required() or self._get_workspace():
self._schedule_assemble()
return
if self.__strict_cache_key is None:
dependencies = [
e.__strict_cache_key for e in self.dependencies(Scope.BUILD)
]
self.__strict_cache_key = self.__calculate_cache_key(dependencies)
if self.__strict_cache_key is None:
# Strict cache key could not be calculated yet
return
# Query caches now that the weak and strict cache keys are available
key_for_cache_lookup = self.__strict_cache_key if context.get_strict() else self.__weak_cache_key
if not self.__cached:
self.__cached = self.__artifacts.contains(self, key_for_cache_lookup)
if not self.__strong_cached:
self.__strong_cached = self.__artifacts.contains(self, self.__strict_cache_key)
if (not self.__assemble_scheduled and not self.__assemble_done and
not self.__cached and not self._pull_pending()):
# Workspaced sources are considered unstable if a build is pending
# as the build will modify the contents of the workspace.
# Determine as early as possible if a build is pending to discard
# unstable cache keys.
# For uncached workspaced elements, assemble is required
# even if we only need the cache key
if self._is_required() or self._get_workspace():
self._schedule_assemble()
return
if self.__cache_key is None:
# Calculate strong cache key
if context.get_strict():
self.__cache_key = self.__strict_cache_key
elif self._pull_pending():
# Effective strong cache key is unknown until after the pull
pass
elif self._cached():
# Load the strong cache key from the artifact
strong_key, _ = self.__get_artifact_metadata_keys()
self.__cache_key = strong_key
elif self.__assemble_scheduled or self.__assemble_done:
# Artifact will or has been built, not downloaded
dependencies = [
e._get_cache_key() for e in self.dependencies(Scope.BUILD)
]
self.__cache_key = self.__calculate_cache_key(dependencies)
if self.__cache_key is None:
# Strong cache key could not be calculated yet
return
if not self.__ready_for_runtime and self.__cache_key is not None:
self.__ready_for_runtime = all(
dep.__ready_for_runtime for dep in self.__runtime_dependencies)
# _get_display_key():
#
# Returns cache keys for display purposes
#
# Returns:
# (str): A full hex digest cache key for this Element
# (str): An abbreviated hex digest cache key for this Element
# (bool): True if key should be shown as dim, False otherwise
#
# Question marks are returned if information for the cache key is missing.
#
def _get_display_key(self):
context = self._get_context()
dim_key = True
cache_key = self._get_cache_key()
if not cache_key:
cache_key = "{:?<64}".format('')
elif self._get_cache_key() == self.__strict_cache_key:
# Strong cache key used in this session matches cache key
# that would be used in strict build mode
dim_key = False
length = min(len(cache_key), context.log_key_length)
return (cache_key, cache_key[0:length], dim_key)
# _get_brief_display_key()
#
# Returns an abbreviated cache key for display purposes
#
# Returns:
# (str): An abbreviated hex digest cache key for this Element
#
# Question marks are returned if information for the cache key is missing.
#
def _get_brief_display_key(self):
_, display_key, _ = self._get_display_key()
return display_key
# _preflight():
#
# A wrapper for calling the abstract preflight() method on
# the element and it's sources.
#
def _preflight(self):
if self.BST_FORBID_RDEPENDS and self.BST_FORBID_BDEPENDS:
if any(self.dependencies(Scope.RUN, recurse=False)) or any(self.dependencies(Scope.BUILD, recurse=False)):
raise ElementError("{}: Dependencies are forbidden for '{}' elements"
.format(self, self.get_kind()), reason="element-forbidden-depends")
if self.BST_FORBID_RDEPENDS:
if any(self.dependencies(Scope.RUN, recurse=False)):
raise ElementError("{}: Runtime dependencies are forbidden for '{}' elements"
.format(self, self.get_kind()), reason="element-forbidden-rdepends")
if self.BST_FORBID_BDEPENDS:
if any(self.dependencies(Scope.BUILD, recurse=False)):
raise ElementError("{}: Build dependencies are forbidden for '{}' elements"
.format(self, self.get_kind()), reason="element-forbidden-bdepends")
if self.BST_FORBID_SOURCES:
if any(self.sources()):
raise ElementError("{}: Sources are forbidden for '{}' elements"
.format(self, self.get_kind()), reason="element-forbidden-sources")
try:
self.preflight()
except BstError as e:
# Prepend provenance to the error
raise ElementError("{}: {}".format(self, e), reason=e.reason) from e
# Ensure that the first source does not need access to previous soruces
if self.__sources and self.__sources[0]._requires_previous_sources():
raise ElementError("{}: {} cannot be the first source of an element "
"as it requires access to previous sources"
.format(self, self.__sources[0]))
# Preflight the sources
for source in self.sources():
source._preflight()
# _schedule_tracking():
#
# Force an element state to be inconsistent. Any sources appear to be
# inconsistent.
#
# This is used across the pipeline in sessions where the
# elements in question are going to be tracked, causing the
# pipeline to rebuild safely by ensuring cache key recalculation
# and reinterrogation of element state after tracking of elements
# succeeds.
#
def _schedule_tracking(self):
self.__tracking_scheduled = True
self._update_state()
# _tracking_done():
#
# This is called in the main process after the element has been tracked
#
def _tracking_done(self):
assert self.__tracking_scheduled
self.__tracking_scheduled = False
self.__tracking_done = True
self.__update_state_recursively()
# _track():
#
# Calls track() on the Element sources
#
# Raises:
# SourceError: If one of the element sources has an error
#
# Returns:
# (list): A list of Source object ids and their new references
#
def _track(self):
refs = []
for index, source in enumerate(self.__sources):
old_ref = source.get_ref()
new_ref = source._track(self.__sources[0:index])
refs.append((source._unique_id, new_ref))
# Complimentary warning that the new ref will be unused.
if old_ref != new_ref and self._get_workspace():
detail = "This source has an open workspace.\n" \
+ "To start using the new reference, please close the existing workspace."
source.warn("Updated reference will be ignored as source has open workspace", detail=detail)
return refs
# _prepare_sandbox():
#
# This stages things for either _shell() (below) or also
# is used to stage things by the `bst checkout` codepath
#
@contextmanager
def _prepare_sandbox(self, scope, directory, deps='run', integrate=True):
with self.__sandbox(directory, config=self.__sandbox_config) as sandbox:
# Configure always comes first, and we need it.
self.configure_sandbox(sandbox)
# Stage something if we need it
if not directory:
if scope == Scope.BUILD:
self.stage(sandbox)
elif scope == Scope.RUN:
if deps == 'run':
dependency_scope = Scope.RUN
else:
dependency_scope = None
# Stage deps in the sandbox root
with self.timed_activity("Staging dependencies", silent_nested=True):
self.stage_dependency_artifacts(sandbox, dependency_scope)
# Run any integration commands provided by the dependencies
# once they are all staged and ready
if integrate:
with self.timed_activity("Integrating sandbox"):
for dep in self.dependencies(dependency_scope):
dep.integrate(sandbox)
yield sandbox
# _stage_sources_in_sandbox():
#
# Stage this element's sources to a directory inside sandbox
#
# Args:
# sandbox (:class:`.Sandbox`): The build sandbox
# directory (str): An absolute path to stage the sources at
# mount_workspaces (bool): mount workspaces if True, copy otherwise
#
def _stage_sources_in_sandbox(self, sandbox, directory, mount_workspaces=True):
# Only artifact caches that implement diff() are allowed to
# perform incremental builds.
if mount_workspaces and self.__can_build_incrementally():
workspace = self._get_workspace()
sandbox.mark_directory(directory)
sandbox._set_mount_source(directory, workspace.get_absolute_path())
# Stage all sources that need to be copied
sandbox_root = sandbox.get_directory()
host_directory = os.path.join(sandbox_root, directory.lstrip(os.sep))
self._stage_sources_at(host_directory, mount_workspaces=mount_workspaces)
# _stage_sources_at():
#
# Stage this element's sources to a directory
#
# Args:
# directory (str): An absolute path to stage the sources at
# mount_workspaces (bool): mount workspaces if True, copy otherwise
#
def _stage_sources_at(self, directory, mount_workspaces=True):
with self.timed_activity("Staging sources", silent_nested=True):
if os.path.isdir(directory) and os.listdir(directory):
raise ElementError("Staging directory '{}' is not empty".format(directory))
workspace = self._get_workspace()
if workspace:
# If mount_workspaces is set and we're doing incremental builds,
# the workspace is already mounted into the sandbox.
if not (mount_workspaces and self.__can_build_incrementally()):
with self.timed_activity("Staging local files at {}"
.format(workspace.get_absolute_path())):
workspace.stage(directory)
else:
# No workspace, stage directly
for source in self.sources():
source._stage(directory)
# Ensure deterministic mtime of sources at build time
utils._set_deterministic_mtime(directory)
# Ensure deterministic owners of sources at build time
utils._set_deterministic_user(directory)
# _set_required():
#
# Mark this element and its runtime dependencies as required.
# This unblocks pull/fetch/build.
#
def _set_required(self):
if self.__required:
# Already done
return
self.__required = True
# Request artifacts of runtime dependencies
for dep in self.dependencies(Scope.RUN, recurse=False):
dep._set_required()
self._update_state()
# _is_required():
#
# Returns whether this element has been marked as required.
#
def _is_required(self):
return self.__required
# _schedule_assemble():
#
# This is called in the main process before the element is assembled
# in a subprocess.
#
def _schedule_assemble(self):
assert not self.__assemble_scheduled
self.__assemble_scheduled = True
# Requests artifacts of build dependencies
for dep in self.dependencies(Scope.BUILD, recurse=False):
dep._set_required()
self._set_required()
# Invalidate workspace key as the build modifies the workspace directory
workspace = self._get_workspace()
if workspace:
workspace.invalidate_key()
self._update_state()
# _assemble_done():
#
# This is called in the main process after the element has been assembled
# and in the a subprocess after assembly completes.
#
# This will result in updating the element state.
#
def _assemble_done(self):
assert self.__assemble_scheduled
self.__assemble_scheduled = False
self.__assemble_done = True
self.__update_state_recursively()
if self._get_workspace() and self._cached():
#
# Note that this block can only happen in the
# main process, since `self._cached()` cannot
# be true when assembly is completed in the task.
#
# For this reason, it is safe to update and
# save the workspaces configuration
#
key = self._get_cache_key()
workspace = self._get_workspace()
workspace.last_successful = key
workspace.clear_running_files()
self._get_context().get_workspaces().save_config()
# This element will have already been marked as
# required, but we bump the atime again, in case
# we did not know the cache key until now.
#
# FIXME: This is not exactly correct, we should be
# doing this at the time which we have discovered
# a new cache key, this just happens to be the
# last place where that can happen.
#
# Ultimately, we should be refactoring
# Element._update_state() such that we know
# when a cache key is actually discovered.
#
self.__artifacts.mark_required_elements([self])
# _assemble():
#
# Internal method for running the entire build phase.
#
# This will:
# - Prepare a sandbox for the build
# - Call the public abstract methods for the build phase
# - Cache the resulting artifact
#
# Returns:
# (int): The size of the newly cached artifact
#
def _assemble(self):
# Assert call ordering
assert not self._cached()
context = self._get_context()
with self._output_file() as output_file:
if not self.__sandbox_config_supported:
self.warn("Sandbox configuration is not supported by the platform.",
detail="Falling back to UID {} GID {}. Artifact will not be pushed."
.format(self.__sandbox_config.build_uid, self.__sandbox_config.build_gid))
# Explicitly clean it up, keep the build dir around if exceptions are raised
os.makedirs(context.builddir, exist_ok=True)
rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
# Cleanup the build directory on explicit SIGTERM
def cleanup_rootdir():
utils._force_rmtree(rootdir)
with _signals.terminator(cleanup_rootdir), \
self.__sandbox(rootdir, output_file, output_file, self.__sandbox_config) as sandbox: # nopep8
sandbox_root = sandbox.get_directory()
# By default, the dynamic public data is the same as the static public data.
# The plugin's assemble() method may modify this, though.
self.__dynamic_public = _yaml.node_copy(self.__public)
# Call the abstract plugin methods
try:
# Step 1 - Configure
self.configure_sandbox(sandbox)
# Step 2 - Stage
self.stage(sandbox)
# Step 3 - Prepare
self.__prepare(sandbox)
# Step 4 - Assemble
collect = self.assemble(sandbox)
except BstError as e:
# If an error occurred assembling an element in a sandbox,
# then tack on the sandbox directory to the error
e.sandbox = rootdir
# If there is a workspace open on this element, it will have
# been mounted for sandbox invocations instead of being staged.
#
# In order to preserve the correct failure state, we need to
# copy over the workspace files into the appropriate directory
# in the sandbox.
#
workspace = self._get_workspace()
if workspace and self.__staged_sources_directory:
sandbox_root = sandbox.get_directory()
sandbox_path = os.path.join(sandbox_root,
self.__staged_sources_directory.lstrip(os.sep))
try:
utils.copy_files(workspace.get_absolute_path(), sandbox_path)
except UtilError as err:
self.warn("Failed to preserve workspace state for failed build sysroot: {}"
.format(err))
raise
collectdir = os.path.join(sandbox_root, collect.lstrip(os.sep))
if not os.path.exists(collectdir):
raise ElementError(
"Directory '{}' was not found inside the sandbox, "
"unable to collect artifact contents"
.format(collect))
# At this point, we expect an exception was raised leading to
# an error message, or we have good output to collect.
# Create artifact directory structure
assembledir = os.path.join(rootdir, 'artifact')
filesdir = os.path.join(assembledir, 'files')
logsdir = os.path.join(assembledir, 'logs')
metadir = os.path.join(assembledir, 'meta')
os.mkdir(assembledir)
os.mkdir(filesdir)
os.mkdir(logsdir)
os.mkdir(metadir)
# Hard link files from collect dir to files directory
utils.link_files(collectdir, filesdir)
# Copy build log
log_filename = context.get_log_filename()
if log_filename:
shutil.copyfile(log_filename, os.path.join(logsdir, 'build.log'))
# Store public data
_yaml.dump(_yaml.node_sanitize(self.__dynamic_public), os.path.join(metadir, 'public.yaml'))
# ensure we have cache keys
self._assemble_done()
# Store keys.yaml
_yaml.dump(_yaml.node_sanitize({
'strong': self._get_cache_key(),
'weak': self._get_cache_key(_KeyStrength.WEAK),
}), os.path.join(metadir, 'keys.yaml'))
# Store dependencies.yaml
_yaml.dump(_yaml.node_sanitize({
e.name: e._get_cache_key() for e in self.dependencies(Scope.BUILD)
}), os.path.join(metadir, 'dependencies.yaml'))
# Store workspaced.yaml
_yaml.dump(_yaml.node_sanitize({
'workspaced': bool(self._get_workspace())
}), os.path.join(metadir, 'workspaced.yaml'))
# Store workspaced-dependencies.yaml
_yaml.dump(_yaml.node_sanitize({
'workspaced-dependencies': [
e.name for e in self.dependencies(Scope.BUILD)
if e._get_workspace()
]
}), os.path.join(metadir, 'workspaced-dependencies.yaml'))
with self.timed_activity("Caching artifact"):
artifact_size = utils._get_dir_size(assembledir)
self.__artifacts.commit(self, assembledir, self.__get_cache_keys_for_commit())
# Finally cleanup the build dir
cleanup_rootdir()
return artifact_size
# _fetch_done()
#
# Indicates that fetching the sources for this element has been done.
#
def _fetch_done(self):
# We are not updating the state recursively here since fetching can
# never end up in updating them.
self._update_state()
# _pull_pending()
#
# Check whether the artifact will be pulled.
#
# Returns:
# (bool): Whether a pull operation is pending
#
def _pull_pending(self):
if self._get_workspace():
# Workspace builds are never pushed to artifact servers
return False
if self.__strong_cached:
# Artifact already in local cache
return False
# Pull is pending if artifact remote server available
# and pull has not been attempted yet
return self.__artifacts.has_fetch_remotes(element=self) and not self.__pull_done
# _pull_done()
#
# Indicate that pull was attempted.
#
# This needs to be called in the main process after a pull
# succeeds or fails so that we properly update the main
# process data model
#
# This will result in updating the element state.
#
def _pull_done(self):
self.__pull_done = True
self.__update_state_recursively()
def _pull_strong(self, *, progress=None):
weak_key = self._get_cache_key(strength=_KeyStrength.WEAK)
key = self.__strict_cache_key
if not self.__artifacts.pull(self, key, progress=progress):
return False
# update weak ref by pointing it to this newly fetched artifact
self.__artifacts.link_key(self, key, weak_key)
return True
def _pull_weak(self, *, progress=None):
weak_key = self._get_cache_key(strength=_KeyStrength.WEAK)
if not self.__artifacts.pull(self, weak_key, progress=progress):
return False
# extract strong cache key from this newly fetched artifact
self._pull_done()
# create tag for strong cache key
key = self._get_cache_key(strength=_KeyStrength.STRONG)
self.__artifacts.link_key(self, weak_key, key)
return True
# _pull():
#
# Pull artifact from remote artifact repository into local artifact cache.
#
# Returns: True if the artifact has been downloaded, False otherwise
#
def _pull(self):
context = self._get_context()
def progress(percent, message):
self.status(message)
# Attempt to pull artifact without knowing whether it's available
pulled = self._pull_strong(progress=progress)
if not pulled and not self._cached() and not context.get_strict():
pulled = self._pull_weak(progress=progress)
if not pulled:
return False
# Notify successfull download
return True
# _skip_push():
#
# Determine whether we should create a push job for this element.
#
# Returns:
# (bool): True if this element does not need a push job to be created
#
def _skip_push(self):
if not self.__artifacts.has_push_remotes(element=self):
# No push remotes for this element's project
return True
if not self._cached():
return True
# Do not push tained artifact
if self.__get_tainted():
return True
return False
# _push():
#
# Push locally cached artifact to remote artifact repository.
#
# Returns:
# (bool): True if the remote was updated, False if it already existed
# and no updated was required
#
def _push(self):
self.__assert_cached()
if self.__get_tainted():
self.warn("Not pushing tainted artifact.")
return False
# Push all keys used for local commit
pushed = self.__artifacts.push(self, self.__get_cache_keys_for_commit())
if not pushed:
return False
# Notify successful upload
return True
# _shell():
#
# Connects the terminal with a shell running in a staged
# environment
#
# Args:
# scope (Scope): Either BUILD or RUN scopes are valid, or None
# directory (str): A directory to an existing sandbox, or None
# mounts (list): A list of (str, str) tuples, representing host/target paths to mount
# isolate (bool): Whether to isolate the environment like we do in builds
# prompt (str): A suitable prompt string for PS1
# command (list): An argv to launch in the sandbox
#
# Returns: Exit code
#
# If directory is not specified, one will be staged using scope
def _shell(self, scope=None, directory=None, *, mounts=None, isolate=False, prompt=None, command=None):
with self._prepare_sandbox(scope, directory) as sandbox:
environment = self.get_environment()
environment = copy.copy(environment)
flags = SandboxFlags.INTERACTIVE | SandboxFlags.ROOT_READ_ONLY
# Fetch the main toplevel project, in case this is a junctioned
# subproject, we want to use the rules defined by the main one.
context = self._get_context()
project = context.get_toplevel_project()
shell_command, shell_environment, shell_host_files = project.get_shell_config()
if prompt is not None:
environment['PS1'] = prompt
# Special configurations for non-isolated sandboxes
if not isolate:
# Open the network, and reuse calling uid/gid
#
flags |= SandboxFlags.NETWORK_ENABLED | SandboxFlags.INHERIT_UID
# Apply project defined environment vars to set for a shell
for key, value in _yaml.node_items(shell_environment):
environment[key] = value
# Setup any requested bind mounts
if mounts is None:
mounts = []
for mount in shell_host_files + mounts:
if not os.path.exists(mount.host_path):
if not mount.optional:
self.warn("Not mounting non-existing host file: {}".format(mount.host_path))
else:
sandbox.mark_directory(mount.path)
sandbox._set_mount_source(mount.path, mount.host_path)
if command:
argv = list(command)
else:
argv = shell_command
self.status("Running command", detail=" ".join(argv))
# Run shells with network enabled and readonly root.
return sandbox.run(argv, flags, env=environment)
# _open_workspace():
#
# "Open" a workspace for this element
#
# This requires that a workspace already be created in
# the workspaces metadata first.
#
def _open_workspace(self):
context = self._get_context()
workspace = self._get_workspace()
assert workspace is not None
# First lets get a temp dir in our build directory
# and stage there, then link the files over to the desired
# path.
#
# We do this so that force opening workspaces which overwrites
# files in the target directory actually works without any
# additional support from Source implementations.
#
os.makedirs(context.builddir, exist_ok=True)
with utils._tempdir(dir=context.builddir, prefix='workspace-{}'
.format(self.normal_name)) as temp:
for source in self.sources():
source._init_workspace(temp)
# Now hardlink the files into the workspace target.
utils.link_files(temp, workspace.get_absolute_path())
# _get_workspace():
#
# Returns:
# (Workspace|None): A workspace associated with this element
#
def _get_workspace(self):
workspaces = self._get_context().get_workspaces()
return workspaces.get_workspace(self._get_full_name())
# _write_script():
#
# Writes a script to the given directory.
def _write_script(self, directory):
with open(_site.build_module_template, "r", encoding="utf-8") as f:
script_template = f.read()
variable_string = ""
for var, val in self.get_environment().items():
variable_string += "{0}={1} ".format(var, val)
script = script_template.format(
name=self.normal_name,
build_root=self.get_variable('build-root'),
install_root=self.get_variable('install-root'),
variables=variable_string,
commands=self.generate_script()
)
os.makedirs(directory, exist_ok=True)
script_path = os.path.join(directory, "build-" + self.normal_name)
with self.timed_activity("Writing build script", silent_nested=True):
with utils.save_file_atomic(script_path, "w") as script_file:
script_file.write(script)
os.chmod(script_path, stat.S_IEXEC | stat.S_IREAD)
# _subst_string()
#
# Substitue a string, this is an internal function related
# to how junctions are loaded and needs to be more generic
# than the public node_subst_member()
#
# Args:
# value (str): A string value
#
# Returns:
# (str): The string after substitutions have occurred
#
def _subst_string(self, value):
return self.__variables.subst(value, None)
# Returns the element whose sources this element is ultimately derived from.
#
# This is intended for being used to redirect commands that operate on an
# element to the element whose sources it is ultimately derived from.
#
# For example, element A is a build element depending on source foo,
# element B is a filter element that depends on element A. The source
# element of B is A, since B depends on A, and A has sources.
#
def _get_source_element(self):
return self
#############################################################
# Private Local Methods #
#############################################################
# __update_source_state()
#
# Updates source consistency state
#
def __update_source_state(self):
# Cannot resolve source state until tracked
if self.__tracking_scheduled:
return
self.__consistency = Consistency.CACHED
workspace = self._get_workspace()
# Special case for workspaces
if workspace:
# A workspace is considered inconsistent in the case
# that it's directory went missing
#
fullpath = workspace.get_absolute_path()
if not os.path.exists(fullpath):
self.__consistency = Consistency.INCONSISTENT
else:
# Determine overall consistency of the element
for source in self.__sources:
source._update_state()
source_consistency = source._get_consistency()
self.__consistency = min(self.__consistency, source_consistency)
# __calculate_cache_key():
#
# Calculates the cache key
#
# Returns:
# (str): A hex digest cache key for this Element, or None
#
# None is returned if information for the cache key is missing.
#
def __calculate_cache_key(self, dependencies):
# No cache keys for dependencies which have no cache keys
if None in dependencies:
return None
# Generate dict that is used as base for all cache keys
if self.__cache_key_dict is None:
# Filter out nocache variables from the element's environment
cache_env = {
key: value
for key, value in self.node_items(self.__environment)
if key not in self.__env_nocache
}
context = self._get_context()
project = self._get_project()
workspace = self._get_workspace()
self.__cache_key_dict = {
'artifact-version': "{}.{}".format(BST_CORE_ARTIFACT_VERSION,
self.BST_ARTIFACT_VERSION),
'context': context.get_cache_key(),
'project': project.get_cache_key(),
'element': self.get_unique_key(),
'execution-environment': self.__sandbox_config.get_unique_key(),
'environment': cache_env,
'sources': [s._get_unique_key(workspace is None) for s in self.__sources],
'workspace': '' if workspace is None else workspace.get_key(self._get_project()),
'public': self.__public,
'cache': 'CASCache'
}
self.__cache_key_dict['fatal-warnings'] = sorted(project._fatal_warnings)
cache_key_dict = self.__cache_key_dict.copy()
cache_key_dict['dependencies'] = dependencies
return _cachekey.generate_key(cache_key_dict)
# __can_build_incrementally()
#
# Check if the element can be built incrementally, this
# is used to decide how to stage things
#
# Returns:
# (bool): Whether this element can be built incrementally
#
def __can_build_incrementally(self):
return bool(self._get_workspace())
# __prepare():
#
# Internal method for calling public abstract prepare() method.
#
def __prepare(self, sandbox):
workspace = self._get_workspace()
# We need to ensure that the prepare() method is only called
# once in workspaces, because the changes will persist across
# incremental builds - not desirable, for example, in the case
# of autotools' `./configure`.
if not (workspace and workspace.prepared):
self.prepare(sandbox)
if workspace:
workspace.prepared = True
# __assert_cached()
#
# Raises an error if the artifact is not cached.
#
def __assert_cached(self):
assert self._cached(), "{}: Missing artifact {}".format(self, self._get_brief_display_key())
# __get_tainted():
#
# Checkes whether this artifact should be pushed to an artifact cache.
#
# Args:
# recalculate (bool) - Whether to force recalculation
#
# Returns:
# (bool) False if this artifact should be excluded from pushing.
#
# Note:
# This method should only be called after the element's
# artifact is present in the local artifact cache.
#
def __get_tainted(self, recalculate=False):
if recalculate or self.__tainted is None:
# Whether this artifact has a workspace
workspaced = self.__get_artifact_metadata_workspaced()
# Whether this artifact's dependencies have workspaces
workspaced_dependencies = self.__get_artifact_metadata_workspaced_dependencies()
# Other conditions should be or-ed
self.__tainted = (workspaced or workspaced_dependencies or
not self.__sandbox_config_supported)
return self.__tainted
# __sandbox():
#
# A context manager to prepare a Sandbox object at the specified directory,
# if the directory is None, then a directory will be chosen automatically
# in the configured build directory.
#
# Args:
# directory (str): The local directory where the sandbox will live, or None
# stdout (fileobject): The stream for stdout for the sandbox
# stderr (fileobject): The stream for stderr for the sandbox
# config (SandboxConfig): The SandboxConfig object
#
# Yields:
# (Sandbox): A usable sandbox
#
@contextmanager
def __sandbox(self, directory, stdout=None, stderr=None, config=None):
context = self._get_context()
project = self._get_project()
platform = Platform.get_platform()
if directory is not None and os.path.exists(directory):
sandbox = platform.create_sandbox(context, project,
directory,
stdout=stdout,
stderr=stderr,
config=config)
yield sandbox
else:
os.makedirs(context.builddir, exist_ok=True)
rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
# Recursive contextmanager...
with self.__sandbox(rootdir, stdout=stdout, stderr=stderr, config=config) as sandbox:
yield sandbox
# Cleanup the build dir
utils._force_rmtree(rootdir)
def __compose_default_splits(self, defaults):
project = self._get_project()
element_public = _yaml.node_get(defaults, Mapping, 'public', default_value={})
element_bst = _yaml.node_get(element_public, Mapping, 'bst', default_value={})
element_splits = _yaml.node_get(element_bst, Mapping, 'split-rules', default_value={})
if self.__is_junction:
splits = _yaml.node_chain_copy(element_splits)
else:
assert project._splits is not None
splits = _yaml.node_chain_copy(project._splits)
# Extend project wide split rules with any split rules defined by the element
_yaml.composite(splits, element_splits)
element_bst['split-rules'] = splits
element_public['bst'] = element_bst
defaults['public'] = element_public
def __init_defaults(self, plugin_conf):
# Defaults are loaded once per class and then reused
#
if not self.__defaults_set:
# Load the plugin's accompanying .yaml file if one was provided
defaults = {}
try:
defaults = _yaml.load(plugin_conf, os.path.basename(plugin_conf))
except LoadError as e:
if e.reason != LoadErrorReason.MISSING_FILE:
raise e
# Special case; compose any element-wide split-rules declarations
self.__compose_default_splits(defaults)
# Override the element's defaults with element specific
# overrides from the project.conf
project = self._get_project()
if self.__is_junction:
elements = project.first_pass_config.element_overrides
else:
elements = project.element_overrides
overrides = elements.get(self.get_kind())
if overrides:
_yaml.composite(defaults, overrides)
# Set the data class wide
type(self).__defaults = defaults
type(self).__defaults_set = True
# This will resolve the final environment to be used when
# creating sandboxes for this element
#
def __extract_environment(self, meta):
default_env = _yaml.node_get(self.__defaults, Mapping, 'environment', default_value={})
if self.__is_junction:
environment = {}
else:
project = self._get_project()
environment = _yaml.node_chain_copy(project.base_environment)
_yaml.composite(environment, default_env)
_yaml.composite(environment, meta.environment)
_yaml.node_final_assertions(environment)
# Resolve variables in environment value strings
final_env = {}
for key, _ in self.node_items(environment):
final_env[key] = self.node_subst_member(environment, key)
return final_env
def __extract_env_nocache(self, meta):
if self.__is_junction:
project_nocache = []
else:
project = self._get_project()
project.ensure_fully_loaded()
project_nocache = project.base_env_nocache
default_nocache = _yaml.node_get(self.__defaults, list, 'environment-nocache', default_value=[])
element_nocache = meta.env_nocache
# Accumulate values from the element default, the project and the element
# itself to form a complete list of nocache env vars.
env_nocache = set(project_nocache + default_nocache + element_nocache)
# Convert back to list now we know they're unique
return list(env_nocache)
# This will resolve the final variables to be used when
# substituting command strings to be run in the sandbox
#
def __extract_variables(self, meta):
default_vars = _yaml.node_get(self.__defaults, Mapping, 'variables',
default_value={})
project = self._get_project()
if self.__is_junction:
variables = _yaml.node_chain_copy(project.first_pass_config.base_variables)
else:
project.ensure_fully_loaded()
variables = _yaml.node_chain_copy(project.base_variables)
_yaml.composite(variables, default_vars)
_yaml.composite(variables, meta.variables)
_yaml.node_final_assertions(variables)
for var in ('project-name', 'element-name', 'max-jobs'):
provenance = _yaml.node_get_provenance(variables, var)
if provenance and provenance.filename != '':
raise LoadError(LoadErrorReason.PROTECTED_VARIABLE_REDEFINED,
"{}: invalid redefinition of protected variable '{}'"
.format(provenance, var))
return variables
# This will resolve the final configuration to be handed
# off to element.configure()
#
def __extract_config(self, meta):
# The default config is already composited with the project overrides
config = _yaml.node_get(self.__defaults, Mapping, 'config', default_value={})
config = _yaml.node_chain_copy(config)
_yaml.composite(config, meta.config)
_yaml.node_final_assertions(config)
return config
# Sandbox-specific configuration data, to be passed to the sandbox's constructor.
#
def __extract_sandbox_config(self, meta):
if self.__is_junction:
sandbox_config = {'build-uid': 0,
'build-gid': 0}
else:
project = self._get_project()
project.ensure_fully_loaded()
sandbox_config = _yaml.node_chain_copy(project._sandbox)
host_os, _, _, _, host_arch = os.uname()
# The default config is already composited with the project overrides
sandbox_defaults = _yaml.node_get(self.__defaults, Mapping, 'sandbox', default_value={})
sandbox_defaults = _yaml.node_chain_copy(sandbox_defaults)
_yaml.composite(sandbox_config, sandbox_defaults)
_yaml.composite(sandbox_config, meta.sandbox)
_yaml.node_final_assertions(sandbox_config)
# Sandbox config, unlike others, has fixed members so we should validate them
_yaml.node_validate(sandbox_config, ['build-uid', 'build-gid', 'build-os', 'build-arch'])
return SandboxConfig(
int(self.node_subst_member(sandbox_config, 'build-uid')),
int(self.node_subst_member(sandbox_config, 'build-gid')),
self.node_subst_member(sandbox_config, 'build-os', default=host_os),
self.node_subst_member(sandbox_config, 'build-arch', default=host_arch))
# This makes a special exception for the split rules, which
# elements may extend but whos defaults are defined in the project.
#
def __extract_public(self, meta):
base_public = _yaml.node_get(self.__defaults, Mapping, 'public', default_value={})
base_public = _yaml.node_chain_copy(base_public)
base_bst = _yaml.node_get(base_public, Mapping, 'bst', default_value={})
base_splits = _yaml.node_get(base_bst, Mapping, 'split-rules', default_value={})
element_public = _yaml.node_chain_copy(meta.public)
element_bst = _yaml.node_get(element_public, Mapping, 'bst', default_value={})
element_splits = _yaml.node_get(element_bst, Mapping, 'split-rules', default_value={})
# Allow elements to extend the default splits defined in their project or
# element specific defaults
_yaml.composite(base_splits, element_splits)
element_bst['split-rules'] = base_splits
element_public['bst'] = element_bst
_yaml.node_final_assertions(element_public)
# Also, resolve any variables in the public split rules directly
for domain, splits in self.node_items(base_splits):
new_splits = []
for index, split in enumerate(splits):
provenance = _yaml.node_get_provenance(base_splits, key=domain, indices=[index])
new_splits.append(
self.__variables.subst(split.strip(), provenance)
)
base_splits[domain] = new_splits
return element_public
def __init_splits(self):
bstdata = self.get_public_data('bst')
splits = bstdata.get('split-rules')
self.__splits = {
domain: re.compile(
"^(?:" + "|".join([utils._glob2re(r) for r in rules]) + ")$", re.MULTILINE | re.DOTALL
)
for domain, rules in self.node_items(splits)
}
def __compute_splits(self, include=None, exclude=None, orphans=True):
artifact_base, _ = self.__extract()
basedir = os.path.join(artifact_base, 'files')
# No splitting requested, just report complete artifact
if orphans and not (include or exclude):
for filename in utils.list_relative_paths(basedir):
yield filename
return
if not self.__splits:
self.__init_splits()
element_domains = list(self.__splits.keys())
if not include:
include = element_domains
if not exclude:
exclude = []
# Ignore domains that dont apply to this element
#
include = [domain for domain in include if domain in element_domains]
exclude = [domain for domain in exclude if domain in element_domains]
# FIXME: Instead of listing the paths in an extracted artifact,
# we should be using a manifest loaded from the artifact
# metadata.
#
element_files = [
os.path.join(os.sep, filename)
for filename in utils.list_relative_paths(basedir)
]
for filename in element_files:
include_file = False
exclude_file = False
claimed_file = False
for domain in element_domains:
if self.__splits[domain].match(filename):
claimed_file = True
if domain in include:
include_file = True
if domain in exclude:
exclude_file = True
if orphans and not claimed_file:
include_file = True
if include_file and not exclude_file:
yield filename.lstrip(os.sep)
def __file_is_whitelisted(self, path):
# Considered storing the whitelist regex for re-use, but public data
# can be altered mid-build.
# Public data is not guaranteed to stay the same for the duration of
# the build, but I can think of no reason to change it mid-build.
# If this ever changes, things will go wrong unexpectedly.
if not self.__whitelist_regex:
bstdata = self.get_public_data('bst')
whitelist = _yaml.node_get(bstdata, list, 'overlap-whitelist', default_value=[])
whitelist_expressions = [
utils._glob2re(
self.__variables.subst(
exp.strip(),
_yaml.node_get_provenance(bstdata, key='overlap-whitelist', indices=[index])
)
)
for index, exp in enumerate(whitelist)
]
expression = ('^(?:' + '|'.join(whitelist_expressions) + ')$')
self.__whitelist_regex = re.compile(expression, re.MULTILINE | re.DOTALL)
return self.__whitelist_regex.match(path) or self.__whitelist_regex.match(os.path.join(os.sep, path))
# __extract():
#
# Extract an artifact and return the directory
#
# Args:
# key (str): The key for the artifact to extract,
# or None for the default key
#
# Returns:
# (str): The path to the extracted artifact
# (str): The chosen key
#
def __extract(self, key=None):
if key is None:
context = self._get_context()
key = self.__strict_cache_key
# Use weak cache key, if artifact is missing for strong cache key
# and the context allows use of weak cache keys
if not context.get_strict() and not self.__artifacts.contains(self, key):
key = self._get_cache_key(strength=_KeyStrength.WEAK)
return (self.__artifacts.extract(self, key), key)
# __get_artifact_metadata_keys():
#
# Retrieve the strong and weak keys from the given artifact.
#
# Args:
# key (str): The artifact key, or None for the default key
#
# Returns:
# (str): The strong key
# (str): The weak key
#
def __get_artifact_metadata_keys(self, key=None):
# Now extract it and possibly derive the key
artifact_base, key = self.__extract(key)
# Now try the cache, once we're sure about the key
if key in self.__metadata_keys:
return (self.__metadata_keys[key]['strong'],
self.__metadata_keys[key]['weak'])
# Parse the expensive yaml now and cache the result
meta_file = os.path.join(artifact_base, 'meta', 'keys.yaml')
meta = _yaml.load(meta_file)
strong_key = meta['strong']
weak_key = meta['weak']
assert key in (strong_key, weak_key)
self.__metadata_keys[strong_key] = meta
self.__metadata_keys[weak_key] = meta
return (strong_key, weak_key)
# __get_artifact_metadata_dependencies():
#
# Retrieve the hash of dependency strong keys from the given artifact.
#
# Args:
# key (str): The artifact key, or None for the default key
#
# Returns:
# (dict): A dictionary of element names and their strong keys
#
def __get_artifact_metadata_dependencies(self, key=None):
# Extract it and possibly derive the key
artifact_base, key = self.__extract(key)
# Now try the cache, once we're sure about the key
if key in self.__metadata_dependencies:
return self.__metadata_dependencies[key]
# Parse the expensive yaml now and cache the result
meta_file = os.path.join(artifact_base, 'meta', 'dependencies.yaml')
meta = _yaml.load(meta_file)
# Cache it under both strong and weak keys
strong_key, weak_key = self.__get_artifact_metadata_keys(key)
self.__metadata_dependencies[strong_key] = meta
self.__metadata_dependencies[weak_key] = meta
return meta
# __get_artifact_metadata_workspaced():
#
# Retrieve the hash of dependency strong keys from the given artifact.
#
# Args:
# key (str): The artifact key, or None for the default key
#
# Returns:
# (bool): Whether the given artifact was workspaced
#
def __get_artifact_metadata_workspaced(self, key=None):
# Extract it and possibly derive the key
artifact_base, key = self.__extract(key)
# Now try the cache, once we're sure about the key
if key in self.__metadata_workspaced:
return self.__metadata_workspaced[key]
# Parse the expensive yaml now and cache the result
meta_file = os.path.join(artifact_base, 'meta', 'workspaced.yaml')
meta = _yaml.load(meta_file)
workspaced = _yaml.node_get(meta, bool, 'workspaced')
# Cache it under both strong and weak keys
strong_key, weak_key = self.__get_artifact_metadata_keys(key)
self.__metadata_workspaced[strong_key] = workspaced
self.__metadata_workspaced[weak_key] = workspaced
return workspaced
# __get_artifact_metadata_workspaced_dependencies():
#
# Retrieve the hash of dependency strong keys from the given artifact.
#
# Args:
# key (str): The artifact key, or None for the default key
#
# Returns:
# (list): List of which dependencies are workspaced
#
def __get_artifact_metadata_workspaced_dependencies(self, key=None):
# Extract it and possibly derive the key
artifact_base, key = self.__extract(key)
# Now try the cache, once we're sure about the key
if key in self.__metadata_workspaced_dependencies:
return self.__metadata_workspaced_dependencies[key]
# Parse the expensive yaml now and cache the result
meta_file = os.path.join(artifact_base, 'meta', 'workspaced-dependencies.yaml')
meta = _yaml.load(meta_file)
workspaced = _yaml.node_get(meta, list, 'workspaced-dependencies')
# Cache it under both strong and weak keys
strong_key, weak_key = self.__get_artifact_metadata_keys(key)
self.__metadata_workspaced_dependencies[strong_key] = workspaced
self.__metadata_workspaced_dependencies[weak_key] = workspaced
return workspaced
# __load_public_data():
#
# Loads the public data from the cached artifact
#
def __load_public_data(self):
self.__assert_cached()
assert self.__dynamic_public is None
# Load the public data from the artifact
artifact_base, _ = self.__extract()
metadir = os.path.join(artifact_base, 'meta')
self.__dynamic_public = _yaml.load(os.path.join(metadir, 'public.yaml'))
def __get_cache_keys_for_commit(self):
keys = []
# tag with strong cache key based on dependency versions used for the build
keys.append(self._get_cache_key(strength=_KeyStrength.STRONG))
# also store under weak cache key
keys.append(self._get_cache_key(strength=_KeyStrength.WEAK))
return utils._deduplicate(keys)
# __update_state_recursively()
#
# Update the state of all reverse dependencies, recursively.
#
def __update_state_recursively(self):
queue = _UniquePriorityQueue()
queue.push(self._unique_id, self)
while queue:
element = queue.pop()
old_ready_for_runtime = element.__ready_for_runtime
old_strict_cache_key = element.__strict_cache_key
element._update_state()
if element.__ready_for_runtime != old_ready_for_runtime or \
element.__strict_cache_key != old_strict_cache_key:
for rdep in element.__reverse_dependencies:
queue.push(rdep._unique_id, rdep)
def _overlap_error_detail(f, forbidden_overlap_elements, elements):
if forbidden_overlap_elements:
return ("/{}: {} {} not permitted to overlap other elements, order {} \n"
.format(f, " and ".join(forbidden_overlap_elements),
"is" if len(forbidden_overlap_elements) == 1 else "are",
" above ".join(reversed(elements))))
else:
return ""
buildstream-1.6.9/buildstream/plugin.py 0000664 0000000 0000000 00000074754 14375152700 0020252 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
Plugin - Base plugin class
==========================
BuildStream supports third party plugins to define additional kinds of
:mod:`Elements ` and :mod:`Sources `.
The common API is documented here, along with some information on how
external plugin packages are structured.
.. _core_plugin_abstract_methods:
Abstract Methods
----------------
For both :mod:`Elements ` and :mod:`Sources `,
it is mandatory to implement the following abstract methods:
* :func:`Plugin.configure() `
Loads the user provided configuration YAML for the given source or element
* :func:`Plugin.preflight() `
Early preflight checks allow plugins to bail out early with an error
in the case that it can predict that failure is inevitable.
* :func:`Plugin.get_unique_key() `
Once all configuration has been loaded and preflight checks have passed,
this method is used to inform the core of a plugin's unique configuration.
Configurable Warnings
---------------------
Warnings raised through calling :func:`Plugin.warn() ` can provide an optional
parameter ``warning_token``, this will raise a :class:`PluginError` if the warning is configured as fatal within
the project configuration.
Configurable warnings will be prefixed with :func:`Plugin.get_kind() `
within buildstream and must be prefixed as such in project configurations. For more detail on project configuration
see :ref:`Configurable Warnings `.
It is important to document these warnings in your plugin documentation to allow users to make full use of them
while configuring their projects.
Example
~~~~~~~
If the :class:`git ` plugin uses the warning ``"inconsistent-submodule"``
then it could be referenced in project configuration as ``"git:inconsistent-submodule"``.
Plugin Structure
----------------
A plugin should consist of a `setuptools package
`_ that
advertises contained plugins using `entry points
`_.
A plugin entry point must be a module that extends a class in the
:ref:`core_framework` to be discovered by BuildStream. A YAML file
defining plugin default settings with the same name as the module can
also be defined in the same directory as the plugin module.
.. note::
BuildStream does not support function/class entry points.
A sample plugin could be structured as such:
.. code-block:: text
.
├── elements
│ ├── autotools.py
│ ├── autotools.yaml
│ └── __init__.py
├── MANIFEST.in
└── setup.py
The setuptools configuration should then contain at least:
setup.py:
.. literalinclude:: ../source/sample_plugin/setup.py
:language: python
MANIFEST.in:
.. literalinclude:: ../source/sample_plugin/MANIFEST.in
:language: text
Class Reference
---------------
"""
import itertools
import os
import subprocess
from contextlib import contextmanager
from weakref import WeakValueDictionary
from . import _yaml
from . import utils
from ._exceptions import PluginError, ImplError
from ._message import Message, MessageType
from .types import CoreWarnings
class Plugin():
"""Plugin()
Base Plugin class.
Some common features to both Sources and Elements are found
in this class.
.. note::
Derivation of plugins is not supported. Plugins may only
derive from the base :mod:`Source ` and
:mod:`Element ` types, and any convenience
subclasses (like :mod:`BuildElement `)
which are included in the buildstream namespace.
"""
BST_REQUIRED_VERSION_MAJOR = 0
"""Minimum required major version"""
BST_REQUIRED_VERSION_MINOR = 0
"""Minimum required minor version"""
BST_FORMAT_VERSION = 0
"""The plugin's YAML format version
This should be set to ``1`` the first time any new configuration
is understood by your :func:`Plugin.configure() `
implementation and subsequently bumped every time your
configuration is enhanced.
.. note::
Plugins are expected to maintain backward compatibility
in the format and configurations they expose. The versioning
is intended to track availability of new features only.
For convenience, the format version for plugins maintained and
distributed with BuildStream are revisioned with BuildStream's
core format version :ref:`core format version `.
"""
# Unique id generator for Plugins
#
# Each plugin gets a unique id at creation.
#
# Ids are a monotically increasing integer which
# starts as 1 (a falsy plugin ID is considered unset
# in various parts of the codebase).
#
__id_generator = itertools.count(1)
# Hold on to a lookup table by counter of all instantiated plugins.
# We use this to send the id back from child processes so we can lookup
# corresponding element/source in the master process.
#
# Use WeakValueDictionary() so the map we use to lookup objects does not
# keep the plugins alive after pipeline destruction.
#
# Note that Plugins can only be instantiated in the main process before
# scheduling tasks.
__TABLE = WeakValueDictionary()
def __init__(self, name, context, project, provenance, type_tag, unique_id=None):
self.name = name
"""The plugin name
For elements, this is the project relative bst filename,
for sources this is the owning element's name with a suffix
indicating it's index on the owning element.
For sources this is for display purposes only.
"""
# Unique ID
#
# This id allows to uniquely identify a plugin.
#
# /!\ the unique id must be an increasing value /!\
# This is because we are depending on it in buildstream.element.Element
# to give us a topological sort over all elements.
# Modifying how we handle ids here will modify the behavior of the
# Element's state handling.
if unique_id is None:
# Register ourself in the table containing all existing plugins
self._unique_id = next(self.__id_generator)
self.__TABLE[self._unique_id] = self
else:
# If the unique ID is passed in the constructor, then it is a cloned
# plugin in a subprocess and should use the same ID.
self._unique_id = unique_id
self.__context = context # The Context object
self.__project = project # The Project object
self.__provenance = provenance # The Provenance information
self.__type_tag = type_tag # The type of plugin (element or source)
self.__configuring = False # Whether we are currently configuring
# Infer the kind identifier
modulename = type(self).__module__
self.__kind = modulename.rsplit('.', maxsplit=1)[-1]
self.debug("Created: {}".format(self))
def __del__(self):
# Dont send anything through the Message() pipeline at destruction time,
# any subsequent lookup of plugin by unique id would raise KeyError.
if self.__context.log_debug:
print("DEBUG: Destroyed: {}".format(self))
def __str__(self):
return "{kind} {typetag} at {provenance}".format(
kind=self.__kind,
typetag=self.__type_tag,
provenance=self.__provenance)
#############################################################
# Abstract Methods #
#############################################################
def configure(self, node):
"""Configure the Plugin from loaded configuration data
Args:
node (dict): The loaded configuration dictionary
Raises:
:class:`.SourceError`: If its a :class:`.Source` implementation
:class:`.ElementError`: If its an :class:`.Element` implementation
Plugin implementors should implement this method to read configuration
data and store it.
Plugins should use the :func:`Plugin.node_get_member() `
and :func:`Plugin.node_get_list_element() `
methods to fetch values from the passed `node`. This will ensure that a nice human readable error
message will be raised if the expected configuration is not found, indicating the filename,
line and column numbers.
Further the :func:`Plugin.node_validate() ` method
should be used to ensure that the user has not specified keys in `node` which are unsupported
by the plugin.
.. note::
For Elements, when variable substitution is desirable, the
:func:`Element.node_subst_member() `
and :func:`Element.node_subst_list_element() `
methods can be used.
"""
raise ImplError("{tag} plugin '{kind}' does not implement configure()".format(
tag=self.__type_tag, kind=self.get_kind()))
def preflight(self):
"""Preflight Check
Raises:
:class:`.SourceError`: If its a :class:`.Source` implementation
:class:`.ElementError`: If its an :class:`.Element` implementation
This method is run after :func:`Plugin.configure() `
and after the pipeline is fully constructed.
Implementors should simply raise :class:`.SourceError` or :class:`.ElementError`
with an informative message in the case that the host environment is
unsuitable for operation.
Plugins which require host tools (only sources usually) should obtain
them with :func:`utils.get_host_tool() ` which
will raise an error automatically informing the user that a host tool is needed.
"""
raise ImplError("{tag} plugin '{kind}' does not implement preflight()".format(
tag=self.__type_tag, kind=self.get_kind()))
def get_unique_key(self):
"""Return something which uniquely identifies the plugin input
Returns:
A string, list or dictionary which uniquely identifies the input
This is used to construct unique cache keys for elements and sources,
sources should return something which uniquely identifies the payload,
such as an sha256 sum of a tarball content.
Elements and Sources should implement this by collecting any configurations
which could possibly effect the output and return a dictionary of these settings.
For Sources, this is guaranteed to only be called if
:func:`Source.get_consistency() `
has not returned :func:`Consistency.INCONSISTENT `
which is to say that the Source is expected to have an exact *ref* indicating
exactly what source is going to be staged.
"""
raise ImplError("{tag} plugin '{kind}' does not implement get_unique_key()".format(
tag=self.__type_tag, kind=self.get_kind()))
#############################################################
# Public Methods #
#############################################################
def get_kind(self):
"""Fetches the kind of this plugin
Returns:
(str): The kind of this plugin
"""
return self.__kind
def node_items(self, node):
"""Iterate over a dictionary loaded from YAML
Args:
node (dict): The YAML loaded dictionary object
Returns:
list: List of key/value tuples to iterate over
BuildStream holds some private data in dictionaries loaded from
the YAML in order to preserve information to report in errors.
This convenience function should be used instead of the dict.items()
builtin function provided by python.
"""
yield from _yaml.node_items(node)
def node_provenance(self, node, member_name=None):
"""Gets the provenance for `node` and `member_name`
This reports a string with file, line and column information suitable
for reporting an error or warning.
Args:
node (dict): The YAML loaded dictionary object
member_name (str): The name of the member to check, or None for the node itself
Returns:
(str): A string describing the provenance of the node and member
"""
provenance = _yaml.node_get_provenance(node, key=member_name)
return str(provenance)
def node_get_member(self, node, expected_type, member_name, default=utils._sentinel):
"""Fetch the value of a node member, raising an error if the value is
missing or incorrectly typed.
Args:
node (dict): A dictionary loaded from YAML
expected_type (type): The expected type of the node member
member_name (str): The name of the member to fetch
default (expected_type): A value to return when *member_name* is not specified in *node*
Returns:
The value of *member_name* in *node*, otherwise *default*
Raises:
:class:`.LoadError`: When *member_name* is not found and no *default* was provided
Note:
Returned strings are stripped of leading and trailing whitespace
**Example:**
.. code:: python
# Expect a string 'name' in 'node'
name = self.node_get_member(node, str, 'name')
# Fetch an optional integer
level = self.node_get_member(node, int, 'level', -1)
"""
return _yaml.node_get(node, expected_type, member_name, default_value=default)
def node_get_project_path(self, node, key, *,
check_is_file=False, check_is_dir=False):
"""Fetches a project path from a dictionary node and validates it
Paths are asserted to never lead to a directory outside of the
project directory. In addition, paths can not point to symbolic
links, fifos, sockets and block/character devices.
The `check_is_file` and `check_is_dir` parameters can be used to
perform additional validations on the path. Note that an
exception will always be raised if both parameters are set to
``True``.
Args:
node (dict): A dictionary loaded from YAML
key (str): The key whose value contains a path to validate
check_is_file (bool): If ``True`` an error will also be raised
if path does not point to a regular file.
Defaults to ``False``
check_is_dir (bool): If ``True`` an error will also be raised
if path does not point to a directory.
Defaults to ``False``
Returns:
(str): The project path
Raises:
:class:`.LoadError`: In the case that the project path is not
valid or does not exist
*Since: 1.2*
**Example:**
.. code:: python
path = self.node_get_project_path(node, 'path')
"""
return _yaml.node_get_project_path(node, key,
self.__project.directory,
check_is_file=check_is_file,
check_is_dir=check_is_dir)
def node_validate(self, node, valid_keys):
"""This should be used in :func:`~buildstream.plugin.Plugin.configure`
implementations to assert that users have only entered
valid configuration keys.
Args:
node (dict): A dictionary loaded from YAML
valid_keys (iterable): A list of valid keys for the node
Raises:
:class:`.LoadError`: When an invalid key is found
**Example:**
.. code:: python
# Ensure our node only contains valid autotools config keys
self.node_validate(node, [
'configure-commands', 'build-commands',
'install-commands', 'strip-commands'
])
"""
_yaml.node_validate(node, valid_keys)
def node_get_list_element(self, node, expected_type, member_name, indices):
"""Fetch the value of a list element from a node member, raising an error if the
value is incorrectly typed.
Args:
node (dict): A dictionary loaded from YAML
expected_type (type): The expected type of the node member
member_name (str): The name of the member to fetch
indices (list of int): List of indices to search, in case of nested lists
Returns:
The value of the list element in *member_name* at the specified *indices*
Raises:
:class:`.LoadError`
Note:
Returned strings are stripped of leading and trailing whitespace
**Example:**
.. code:: python
# Fetch the list itself
things = self.node_get_member(node, list, 'things')
# Iterate over the list indices
for i in range(len(things)):
# Fetch dict things
thing = self.node_get_list_element(
node, dict, 'things', [ i ])
"""
return _yaml.node_get(node, expected_type, member_name, indices=indices)
def debug(self, brief, *, detail=None):
"""Print a debugging message
Args:
brief (str): The brief message
detail (str): An optional detailed message, can be multiline output
"""
if self.__context.log_debug:
self.__message(MessageType.DEBUG, brief, detail=detail)
def status(self, brief, *, detail=None):
"""Print a status message
Args:
brief (str): The brief message
detail (str): An optional detailed message, can be multiline output
Note: Status messages tell about what a plugin is currently doing
"""
self.__message(MessageType.STATUS, brief, detail=detail)
def info(self, brief, *, detail=None):
"""Print an informative message
Args:
brief (str): The brief message
detail (str): An optional detailed message, can be multiline output
Note: Informative messages tell the user something they might want
to know, like if refreshing an element caused it to change.
"""
self.__message(MessageType.INFO, brief, detail=detail)
def warn(self, brief, *, detail=None, warning_token=None):
"""Print a warning message, checks warning_token against project configuration
Args:
brief (str): The brief message
detail (str): An optional detailed message, can be multiline output
warning_token (str): An optional configurable warning assosciated with this warning,
this will cause PluginError to be raised if this warning is configured as fatal.
(*Since 1.4*)
Raises:
(:class:`.PluginError`): When warning_token is considered fatal by the project configuration
"""
if warning_token:
warning_token = _prefix_warning(self, warning_token)
brief = "[{}]: {}".format(warning_token, brief)
project = self._get_project()
if project._warning_is_fatal(warning_token):
detail = detail if detail else ""
raise PluginError(message="{}\n{}".format(brief, detail), reason=warning_token)
self.__message(MessageType.WARN, brief=brief, detail=detail)
def log(self, brief, *, detail=None):
"""Log a message into the plugin's log file
The message will not be shown in the master log at all (so it will not
be displayed to the user on the console).
Args:
brief (str): The brief message
detail (str): An optional detailed message, can be multiline output
"""
self.__message(MessageType.LOG, brief, detail=detail)
@contextmanager
def timed_activity(self, activity_name, *, detail=None, silent_nested=False):
"""Context manager for performing timed activities in plugins
Args:
activity_name (str): The name of the activity
detail (str): An optional detailed message, can be multiline output
silent_nested (bool): If specified, nested messages will be silenced
This function lets you perform timed tasks in your plugin,
the core will take care of timing the duration of your
task and printing start / fail / success messages.
**Example**
.. code:: python
# Activity will be logged and timed
with self.timed_activity("Mirroring {}".format(self.url)):
# This will raise SourceError on its own
self.call(... command which takes time ...)
"""
with self.__context.timed_activity(activity_name,
unique_id=self._unique_id,
detail=detail,
silent_nested=silent_nested):
yield
def call(self, *popenargs, fail=None, fail_temporarily=False, **kwargs):
"""A wrapper for subprocess.call()
Args:
popenargs (list): Popen() arguments
fail (str): A message to display if the process returns
a non zero exit code
fail_temporarily (bool): Whether any exceptions should
be raised as temporary. (*Since: 1.2*)
rest_of_args (kwargs): Remaining arguments to subprocess.call()
Returns:
(int): The process exit code.
Raises:
(:class:`.PluginError`): If a non-zero return code is received and *fail* is specified
Note: If *fail* is not specified, then the return value of subprocess.call()
is returned even on error, and no exception is automatically raised.
**Example**
.. code:: python
# Call some host tool
self.tool = utils.get_host_tool('toolname')
self.call(
[self.tool, '--download-ponies', self.mirror_directory],
"Failed to download ponies from {}".format(
self.mirror_directory))
"""
exit_code, _ = self.__call(*popenargs, fail=fail, fail_temporarily=fail_temporarily, **kwargs)
return exit_code
def check_output(self, *popenargs, fail=None, fail_temporarily=False, **kwargs):
"""A wrapper for subprocess.check_output()
Args:
popenargs (list): Popen() arguments
fail (str): A message to display if the process returns
a non zero exit code
fail_temporarily (bool): Whether any exceptions should
be raised as temporary. (*Since: 1.2*)
rest_of_args (kwargs): Remaining arguments to subprocess.call()
Returns:
(int): The process exit code
(str): The process standard output
Raises:
(:class:`.PluginError`): If a non-zero return code is received and *fail* is specified
Note: If *fail* is not specified, then the return value of subprocess.check_output()
is returned even on error, and no exception is automatically raised.
**Example**
.. code:: python
# Get the tool at preflight time
self.tool = utils.get_host_tool('toolname')
# Call the tool, automatically raise an error
_, output = self.check_output(
[self.tool, '--print-ponies'],
"Failed to print the ponies in {}".format(
self.mirror_directory),
cwd=self.mirror_directory)
# Call the tool, inspect exit code
exit_code, output = self.check_output(
[self.tool, 'get-ref', tracking],
cwd=self.mirror_directory)
if exit_code == 128:
return
elif exit_code != 0:
fmt = "{plugin}: Failed to get ref for tracking: {track}"
raise SourceError(
fmt.format(plugin=self, track=tracking)) from e
"""
return self.__call(*popenargs, collect_stdout=True, fail=fail, fail_temporarily=fail_temporarily, **kwargs)
#############################################################
# Private Methods used in BuildStream #
#############################################################
# _lookup():
#
# Fetch a plugin in the current process by its
# unique identifier
#
# Args:
# unique_id: The unique identifier as returned by
# plugin._unique_id
#
# Returns:
# (Plugin): The plugin for the given ID, or None
#
@classmethod
def _lookup(cls, unique_id):
assert unique_id != 0, "Looking up invalid plugin ID 0, ID counter starts at 1"
assert unique_id in cls.__TABLE, "Could not find plugin with ID {}".format(unique_id)
return cls.__TABLE[unique_id]
# _get_context()
#
# Fetches the invocation context
#
def _get_context(self):
return self.__context
# _get_project()
#
# Fetches the project object associated with this plugin
#
def _get_project(self):
return self.__project
# _get_provenance():
#
# Fetch bst file, line and column of the entity
#
def _get_provenance(self):
return self.__provenance
# Context manager for getting the open file handle to this
# plugin's log. Used in the child context to add stuff to
# a log.
#
@contextmanager
def _output_file(self):
log = self.__context.get_log_handle()
if log is None:
with open(os.devnull, "w", encoding="utf-8") as output:
yield output
else:
yield log
# _configure():
#
# Calls configure() for the plugin, this must be called by
# the core instead of configure() directly, so that the
# _get_configuring() state is up to date.
#
# Args:
# node (dict): The loaded configuration dictionary
#
def _configure(self, node):
self.__configuring = True
self.configure(node)
self.__configuring = False
# _get_configuring():
#
# Checks whether the plugin is in the middle of having
# its Plugin.configure() method called
#
# Returns:
# (bool): Whether we are currently configuring
def _get_configuring(self):
return self.__configuring
# _preflight():
#
# Calls preflight() for the plugin, and allows generic preflight
# checks to be added
#
# Raises:
# SourceError: If it's a Source implementation
# ElementError: If it's an Element implementation
# ProgramNotFoundError: If a required host tool is not found
#
def _preflight(self):
self.preflight()
#############################################################
# Local Private Methods #
#############################################################
# Internal subprocess implementation for the call() and check_output() APIs
#
def __call(self, *popenargs, collect_stdout=False, fail=None, fail_temporarily=False, **kwargs):
with self._output_file() as output_file:
if 'stdout' not in kwargs:
kwargs['stdout'] = output_file
if 'stderr' not in kwargs:
kwargs['stderr'] = output_file
if collect_stdout:
kwargs['stdout'] = subprocess.PIPE
self.__note_command(output_file, *popenargs, **kwargs)
exit_code, output = utils._call(*popenargs, **kwargs)
if fail and exit_code:
raise PluginError("{plugin}: {message}".format(plugin=self, message=fail),
temporary=fail_temporarily)
return (exit_code, output)
def __message(self, message_type, brief, **kwargs):
message = Message(self._unique_id, message_type, brief, **kwargs)
self.__context.message(message)
def __note_command(self, output, *popenargs, **kwargs):
workdir = os.getcwd()
workdir = kwargs.get('cwd', workdir)
command = " ".join(popenargs[0])
output.write('Running host command {}: {}\n'.format(workdir, command))
output.flush()
self.status('Running host command', detail=command)
def _get_full_name(self):
project = self.__project
if project.junction:
return '{}:{}'.format(project.junction.name, self.name)
else:
return self.name
# A local table for _prefix_warning()
#
__CORE_WARNINGS = [
value
for name, value in CoreWarnings.__dict__.items()
if not name.startswith("__")
]
# _prefix_warning():
#
# Prefix a warning with the plugin kind. CoreWarnings are not prefixed.
#
# Args:
# plugin (Plugin): The plugin which raised the warning
# warning (str): The warning to prefix
#
# Returns:
# (str): A prefixed warning
#
def _prefix_warning(plugin, warning):
if any((warning is core_warning for core_warning in __CORE_WARNINGS)):
return warning
return "{}:{}".format(plugin.get_kind(), warning)
buildstream-1.6.9/buildstream/plugins/ 0000775 0000000 0000000 00000000000 14375152700 0020042 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/plugins/elements/ 0000775 0000000 0000000 00000000000 14375152700 0021656 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/plugins/elements/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0023755 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/plugins/elements/autotools.py 0000664 0000000 0000000 00000003727 14375152700 0024272 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016, 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
autotools - Autotools build element
===================================
This is a :mod:`BuildElement ` implementation for
using Autotools build scripts (also known as the `GNU Build System
`_).
You will often want to pass additional arguments to ``configure``. This should
be done on a per-element basis by setting the ``conf-local`` variable. Here is
an example:
.. code:: yaml
variables:
conf-local: |
--disable-foo --enable-bar
If you want to pass extra options to ``configure`` for every element in your
project, set the ``conf-global`` variable in your project.conf file. Here is
an example of that:
.. code:: yaml
elements:
autotools:
variables:
conf-global: |
--disable-gtk-doc --disable-static
Here is the default configuration for the ``autotools`` element in full:
.. literalinclude:: ../../../buildstream/plugins/elements/autotools.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'autotools' kind.
class AutotoolsElement(BuildElement):
pass
# Plugin entry point
def setup():
return AutotoolsElement
buildstream-1.6.9/buildstream/plugins/elements/autotools.yaml 0000664 0000000 0000000 00000003614 14375152700 0024577 0 ustar 00root root 0000000 0000000 # Autotools default configurations
variables:
autogen: |
export NOCONFIGURE=1;
if [ -x %{conf-cmd} ]; then true;
elif [ -x autogen ]; then ./autogen;
elif [ -x autogen.sh ]; then ./autogen.sh;
elif [ -x bootstrap ]; then ./bootstrap;
elif [ -x bootstrap.sh ]; then ./bootstrap.sh;
else autoreconf -ivf;
fi
# Project-wide extra arguments to be passed to `configure`
conf-global: ''
# Element-specific extra arguments to be passed to `configure`.
conf-local: ''
# For backwards compatibility only, do not use.
conf-extra: ''
conf-cmd: ./configure
conf-args: |
--prefix=%{prefix} \
--exec-prefix=%{exec_prefix} \
--bindir=%{bindir} \
--sbindir=%{sbindir} \
--sysconfdir=%{sysconfdir} \
--datadir=%{datadir} \
--includedir=%{includedir} \
--libdir=%{libdir} \
--libexecdir=%{libexecdir} \
--localstatedir=%{localstatedir} \
--sharedstatedir=%{sharedstatedir} \
--mandir=%{mandir} \
--infodir=%{infodir} %{conf-extra} %{conf-global} %{conf-local}
configure: |
%{conf-cmd} %{conf-args}
make: make
make-install: make -j1 DESTDIR="%{install-root}" install
# Set this if the sources cannot handle parallelization.
#
# notparallel: True
config:
# Commands for configuring the software
#
configure-commands:
- |
%{autogen}
- |
%{configure}
# Commands for building the software
#
build-commands:
- |
%{make}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{make-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
# Use max-jobs CPUs for building and enable verbosity
environment:
MAKEFLAGS: -j%{max-jobs}
V: 1
# And dont consider MAKEFLAGS or V as something which may
# effect build output.
environment-nocache:
- MAKEFLAGS
- V
buildstream-1.6.9/buildstream/plugins/elements/cmake.py 0000664 0000000 0000000 00000003616 14375152700 0023316 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016, 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
cmake - CMake build element
===========================
This is a :mod:`BuildElement ` implementation for
using the `CMake `_ build system.
You will often want to pass additional arguments to the ``cmake`` program for
specific configuration options. This should be done on a per-element basis by
setting the ``cmake-local`` variable. Here is an example:
.. code:: yaml
variables:
cmake-local: |
-DCMAKE_BUILD_TYPE=Debug
If you want to pass extra options to ``cmake`` for every element in your
project, set the ``cmake-global`` variable in your project.conf file. Here is
an example of that:
.. code:: yaml
elements:
cmake:
variables:
cmake-global: |
-DCMAKE_BUILD_TYPE=Release
Here is the default configuration for the ``cmake`` element in full:
.. literalinclude:: ../../../buildstream/plugins/elements/cmake.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'cmake' kind.
class CMakeElement(BuildElement):
pass
# Plugin entry point
def setup():
return CMakeElement
buildstream-1.6.9/buildstream/plugins/elements/cmake.yaml 0000664 0000000 0000000 00000002633 14375152700 0023626 0 ustar 00root root 0000000 0000000 # CMake default configuration
variables:
build-dir: _builddir
# Project-wide extra arguments to be passed to `cmake`
cmake-global: ''
# Element-specific extra arguments to be passed to `cmake`.
cmake-local: ''
# For backwards compatibility only, do not use.
cmake-extra: ''
# The cmake generator to use
generator: Unix Makefiles
cmake-args: |
-DCMAKE_INSTALL_PREFIX:PATH="%{prefix}" \
-DCMAKE_INSTALL_LIBDIR:PATH="%{lib}" %{cmake-extra} %{cmake-global} %{cmake-local}
cmake: |
cmake -B%{build-dir} -H. -G"%{generator}" %{cmake-args}
make: cmake --build %{build-dir} -- ${JOBS}
make-install: env DESTDIR="%{install-root}" cmake --build %{build-dir} --target install
# Set this if the sources cannot handle parallelization.
#
# notparallel: True
config:
# Commands for configuring the software
#
configure-commands:
- |
%{cmake}
# Commands for building the software
#
build-commands:
- |
%{make}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{make-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
# Use max-jobs CPUs for building and enable verbosity
environment:
JOBS: -j%{max-jobs}
V: 1
# And dont consider JOBS or V as something which may
# effect build output.
environment-nocache:
- JOBS
- V
buildstream-1.6.9/buildstream/plugins/elements/compose.py 0000664 0000000 0000000 00000017304 14375152700 0023702 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
compose - Compose the output of multiple elements
=================================================
This element creates a selective composition of its dependencies.
This is normally used at near the end of a pipeline to prepare
something for later deployment.
Since this element's output includes its dependencies, it may only
depend on elements as `build` type dependencies.
The default configuration and possible options are as such:
.. literalinclude:: ../../../buildstream/plugins/elements/compose.yaml
:language: yaml
"""
import os
from buildstream import utils
from buildstream import Element, Scope
# Element implementation for the 'compose' kind.
class ComposeElement(Element):
# pylint: disable=attribute-defined-outside-init
# The compose element's output is it's dependencies, so
# we must rebuild if the dependencies change even when
# not in strict build plans.
#
BST_STRICT_REBUILD = True
# Compose artifacts must never have indirect dependencies,
# so runtime dependencies are forbidden.
BST_FORBID_RDEPENDS = True
# This element ignores sources, so we should forbid them from being
# added, to reduce the potential for confusion
BST_FORBID_SOURCES = True
def configure(self, node):
self.node_validate(node, [
'integrate', 'include', 'exclude', 'include-orphans'
])
# We name this variable 'integration' only to avoid
# collision with the Element.integrate() method.
self.integration = self.node_get_member(node, bool, 'integrate')
self.include = self.node_get_member(node, list, 'include')
self.exclude = self.node_get_member(node, list, 'exclude')
self.include_orphans = self.node_get_member(node, bool, 'include-orphans')
def preflight(self):
pass
def get_unique_key(self):
key = {'integrate': self.integration,
'include': sorted(self.include),
'orphans': self.include_orphans}
if self.exclude:
key['exclude'] = sorted(self.exclude)
return key
def configure_sandbox(self, sandbox):
pass
def stage(self, sandbox):
pass
def assemble(self, sandbox):
require_split = self.include or self.exclude or not self.include_orphans
# Stage deps in the sandbox root
with self.timed_activity("Staging dependencies", silent_nested=True):
self.stage_dependency_artifacts(sandbox, Scope.BUILD)
manifest = set()
if require_split:
with self.timed_activity("Computing split", silent_nested=True):
for dep in self.dependencies(Scope.BUILD):
files = dep.compute_manifest(include=self.include,
exclude=self.exclude,
orphans=self.include_orphans)
manifest.update(files)
basedir = sandbox.get_directory()
modified_files = set()
removed_files = set()
added_files = set()
# Run any integration commands provided by the dependencies
# once they are all staged and ready
if self.integration:
with self.timed_activity("Integrating sandbox"):
if require_split:
# Make a snapshot of all the files before integration-commands are run.
snapshot = {
f: getmtime(os.path.join(basedir, f))
for f in utils.list_relative_paths(basedir)
}
for dep in self.dependencies(Scope.BUILD):
dep.integrate(sandbox)
if require_split:
# Calculate added, modified and removed files
basedir_contents = set(utils.list_relative_paths(basedir))
for path in manifest:
if path in basedir_contents:
if path in snapshot:
preintegration_mtime = snapshot[path]
if preintegration_mtime != getmtime(os.path.join(basedir, path)):
modified_files.add(path)
else:
# If the path appears in the manifest but not the initial snapshot,
# it may be a file staged inside a directory symlink. In this case
# the path we got from the manifest won't show up in the snapshot
# because utils.list_relative_paths() doesn't recurse into symlink
# directories.
pass
elif path in snapshot:
removed_files.add(path)
for path in basedir_contents:
if path not in snapshot:
added_files.add(path)
self.info("Integration modified {}, added {} and removed {} files"
.format(len(modified_files), len(added_files), len(removed_files)))
# The remainder of this is expensive, make an early exit if
# we're not being selective about what is to be included.
if not require_split:
return '/'
# Do we want to force include files which were modified by
# the integration commands, even if they were not added ?
#
manifest.update(added_files)
manifest.difference_update(removed_files)
# XXX We should be moving things outside of the build sandbox
# instead of into a subdir. The element assemble() method should
# support this in some way.
#
installdir = os.path.join(basedir, 'buildstream', 'install')
os.makedirs(installdir, exist_ok=True)
# We already saved the manifest for created files in the integration phase,
# now collect the rest of the manifest.
#
lines = []
if self.include:
lines.append("Including files from domains: " + ", ".join(self.include))
else:
lines.append("Including files from all domains")
if self.exclude:
lines.append("Excluding files from domains: " + ", ".join(self.exclude))
if self.include_orphans:
lines.append("Including orphaned files")
else:
lines.append("Excluding orphaned files")
detail = "\n".join(lines)
with self.timed_activity("Creating composition", detail=detail, silent_nested=True):
self.info("Composing {} files".format(len(manifest)))
utils.link_files(basedir, installdir, files=manifest)
# And we're done
return os.path.join(os.sep, 'buildstream', 'install')
# Like os.path.getmtime(), but doesnt explode on symlinks
#
def getmtime(path):
stat = os.lstat(path)
return stat.st_mtime
# Plugin entry point
def setup():
return ComposeElement
buildstream-1.6.9/buildstream/plugins/elements/compose.yaml 0000664 0000000 0000000 00000001670 14375152700 0024213 0 ustar 00root root 0000000 0000000
# Compose element configuration
config:
# Whether to run the integration commands for the
# staged dependencies.
#
integrate: True
# A list of domains to include from each artifact, as
# they were defined in the element's 'split-rules'.
#
# Since domains can be added, it is not an error to
# specify domains which may not exist for all of the
# elements in this composition.
#
# The default empty list indicates that all domains
# from each dependency should be included.
#
include: []
# A list of domains to exclude from each artifact, as
# they were defined in the element's 'split-rules'.
#
# In the case that a file is spoken for by a domain
# in the 'include' list and another in the 'exclude'
# list, then the file will be excluded.
exclude: []
# Whether to include orphan files which are not
# included by any of the 'split-rules' present on
# a given element.
#
include-orphans: True
buildstream-1.6.9/buildstream/plugins/elements/distutils.py 0000664 0000000 0000000 00000002427 14375152700 0024261 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
distutils - Python distutils element
====================================
A :mod:`BuildElement ` implementation for using
python distutils
The distutils default configuration:
.. literalinclude:: ../../../buildstream/plugins/elements/distutils.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the python 'distutils' kind.
class DistutilsElement(BuildElement):
pass
# Plugin entry point
def setup():
return DistutilsElement
buildstream-1.6.9/buildstream/plugins/elements/distutils.yaml 0000664 0000000 0000000 00000001457 14375152700 0024575 0 ustar 00root root 0000000 0000000 # Default python distutils configuration
variables:
# When building for python2 distutils, simply
# override this in the element declaration
python: python3
python-build: |
%{python} setup.py build
install-args: |
--prefix "%{prefix}" \
--root "%{install-root}"
python-install: |
%{python} setup.py install %{install-args}
config:
# Commands for configuring the software
#
configure-commands: []
# Commands for building the software
#
build-commands:
- |
%{python-build}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{python-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
- |
%{fix-pyc-timestamps}
buildstream-1.6.9/buildstream/plugins/elements/filter.py 0000664 0000000 0000000 00000011325 14375152700 0023517 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jonathan Maw
"""
filter - Extract a subset of files from another element
=======================================================
This filters another element by producing an output that is a subset of
the filtered element.
To specify the element to filter, specify it as the one and only build
dependency to filter. See :ref:`Dependencies `
for what dependencies are and how to specify them.
Dependencies aside from the filtered element may be specified, but
they must be runtime dependencies only. This can be useful to propagate
runtime dependencies forward from this filter element onto its reverse
dependencies.
When workspaces are opened, closed or reset on this element, or this
element is tracked, instead of erroring due to a lack of sources, this
element will transparently pass on the command to its sole build-dependency.
The default configuration and possible options are as such:
.. literalinclude:: ../../../buildstream/plugins/elements/filter.yaml
:language: yaml
"""
from buildstream import Element, ElementError, Scope
class FilterElement(Element):
# pylint: disable=attribute-defined-outside-init
BST_ARTIFACT_VERSION = 1
# The filter element's output is its dependencies, so
# we must rebuild if the dependencies change even when
# not in strict build plans.
BST_STRICT_REBUILD = True
# This element ignores sources, so we should forbid them from being
# added, to reduce the potential for confusion
BST_FORBID_SOURCES = True
def configure(self, node):
self.node_validate(node, [
'include', 'exclude', 'include-orphans'
])
self.include = self.node_get_member(node, list, 'include')
self.exclude = self.node_get_member(node, list, 'exclude')
self.include_orphans = self.node_get_member(node, bool, 'include-orphans')
def preflight(self):
# Exactly one build-depend is permitted
build_deps = list(self.dependencies(Scope.BUILD, recurse=False))
if len(build_deps) != 1:
detail = "Full list of build-depends:\n"
deps_list = " \n".join([x.name for x in build_deps])
detail += deps_list
raise ElementError("{}: {} element must have exactly 1 build-dependency, actually have {}"
.format(self, type(self).__name__, len(build_deps)),
detail=detail, reason="filter-bdepend-wrong-count")
# That build-depend must not also be a runtime-depend
runtime_deps = list(self.dependencies(Scope.RUN, recurse=False))
if build_deps[0] in runtime_deps:
detail = "Full list of runtime depends:\n"
deps_list = " \n".join([x.name for x in runtime_deps])
detail += deps_list
raise ElementError("{}: {} element's build dependency must not also be a runtime dependency"
.format(self, type(self).__name__),
detail=detail, reason="filter-bdepend-also-rdepend")
def get_unique_key(self):
key = {
'include': sorted(self.include),
'exclude': sorted(self.exclude),
'orphans': self.include_orphans,
}
return key
def configure_sandbox(self, sandbox):
pass
def stage(self, sandbox):
pass
def assemble(self, sandbox):
with self.timed_activity("Staging artifact", silent_nested=True):
for dep in self.dependencies(Scope.BUILD, recurse=False):
dep.stage_artifact(sandbox, include=self.include,
exclude=self.exclude, orphans=self.include_orphans)
return ""
def _get_source_element(self):
# Filter elements act as proxies for their sole build-dependency
build_deps = list(self.dependencies(Scope.BUILD, recurse=False))
assert len(build_deps) == 1
output_elm = build_deps[0]._get_source_element()
return output_elm
def setup():
return FilterElement
buildstream-1.6.9/buildstream/plugins/elements/filter.yaml 0000664 0000000 0000000 00000001524 14375152700 0024031 0 ustar 00root root 0000000 0000000
# Filter element configuration
config:
# A list of domains to include from each artifact, as
# they were defined in the element's 'split-rules'.
#
# Since domains can be added, it is not an error to
# specify domains which may not exist for all of the
# elements in this composition.
#
# The default empty list indicates that all domains
# from each dependency should be included.
#
include: []
# A list of domains to exclude from each artifact, as
# they were defined in the element's 'split-rules'.
#
# In the case that a file is spoken for by a domain
# in the 'include' list and another in the 'exclude'
# list, then the file will be excluded.
exclude: []
# Whether to include orphan files which are not
# included by any of the 'split-rules' present on
# a given element.
#
include-orphans: False
buildstream-1.6.9/buildstream/plugins/elements/import.py 0000664 0000000 0000000 00000010302 14375152700 0023536 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
import - Import sources directly
================================
Import elements produce artifacts directly from its sources
without any kind of processing. These are typically used to
import an SDK to build on top of or to overlay your build with
some configuration data.
The empty configuration is as such:
.. literalinclude:: ../../../buildstream/plugins/elements/import.yaml
:language: yaml
"""
import os
import shutil
from buildstream import Element, BuildElement, ElementError
# Element implementation for the 'import' kind.
class ImportElement(BuildElement):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
self.source = self.node_subst_member(node, 'source')
self.target = self.node_subst_member(node, 'target')
def preflight(self):
# Assert that we have at least one source to fetch.
sources = list(self.sources())
if not sources:
raise ElementError("{}: An import element must have at least one source.".format(self))
def get_unique_key(self):
return {
'source': self.source,
'target': self.target
}
def configure_sandbox(self, sandbox):
pass
def stage(self, sandbox):
pass
def assemble(self, sandbox):
# Stage sources into the input directory
# Do not mount workspaces as the files are copied from outside the sandbox
self._stage_sources_in_sandbox(sandbox, 'input', mount_workspaces=False)
rootdir = sandbox.get_directory()
inputdir = os.path.join(rootdir, 'input')
outputdir = os.path.join(rootdir, 'output')
# The directory to grab
inputdir = os.path.join(inputdir, self.source.lstrip(os.sep))
inputdir = inputdir.rstrip(os.sep)
# The output target directory
outputdir = os.path.join(outputdir, self.target.lstrip(os.sep))
outputdir = outputdir.rstrip(os.sep)
# Ensure target directory parent
os.makedirs(os.path.dirname(outputdir), exist_ok=True)
if not os.path.exists(inputdir):
raise ElementError("{}: No files were found inside directory '{}'"
.format(self, self.source))
# Move it over
shutil.move(inputdir, outputdir)
# And we're done
return '/output'
def prepare(self, sandbox):
# We inherit a non-default prepare from BuildElement.
Element.prepare(self, sandbox)
def generate_script(self):
build_root = self.get_variable('build-root')
install_root = self.get_variable('install-root')
commands = []
# The directory to grab
inputdir = os.path.join(build_root, self.normal_name, self.source.lstrip(os.sep))
inputdir = inputdir.rstrip(os.sep)
# The output target directory
outputdir = os.path.join(install_root, self.target.lstrip(os.sep))
outputdir = outputdir.rstrip(os.sep)
# Ensure target directory parent exists but target directory doesn't
commands.append("mkdir -p {}".format(os.path.dirname(outputdir)))
commands.append("[ ! -e {} ] || rmdir {}".format(outputdir, outputdir))
# Move it over
commands.append("mv {} {}".format(inputdir, outputdir))
script = ""
for cmd in commands:
script += "(set -ex; {}\n) || exit 1\n".format(cmd)
return script
# Plugin entry point
def setup():
return ImportElement
buildstream-1.6.9/buildstream/plugins/elements/import.yaml 0000664 0000000 0000000 00000000671 14375152700 0024060 0 ustar 00root root 0000000 0000000 # The import element simply stages the given sources
# directly to the root of the sandbox and then collects
# the output to create an output artifact.
#
config:
# By default we collect everything staged, specify a
# directory here to output only a subset of the staged
# input sources.
source: /
# Prefix the output with an optional directory, by default
# the input is found at the root of the produced artifact.
target: /
buildstream-1.6.9/buildstream/plugins/elements/junction.py 0000664 0000000 0000000 00000012566 14375152700 0024073 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jürg Billeter
"""
junction - Integrate subprojects
================================
This element is a link to another BuildStream project. It allows integration
of multiple projects into a single pipeline.
Overview
--------
.. code:: yaml
kind: junction
# Specify the BuildStream project source
sources:
- kind: git
url: upstream:projectname.git
track: master
ref: d0b38561afb8122a3fc6bafc5a733ec502fcaed6
# Specify the junction configuration
config:
# Override project options
options:
machine_arch: "%{machine_arch}"
debug: True
# Optionally look in a subpath of the source repository for the project
path: projects/hello
.. note::
Junction elements may not specify any dependencies as they are simply
links to other projects and are not in the dependency graph on their own.
With a junction element in place, local elements can depend on elements in
the other BuildStream project using the additional ``junction`` attribute in the
dependency dictionary:
.. code:: yaml
depends:
- junction: toolchain.bst
filename: gcc.bst
type: build
While junctions are elements, only a limited set of element operations is
supported. They can be tracked and fetched like other elements.
However, junction elements do not produce any artifacts, which means that
they cannot be built or staged. It also means that another element cannot
depend on a junction element itself.
.. note::
BuildStream does not implicitly track junction elements. This means
that if we were to invoke: `bst build --track-all ELEMENT` on an element
which uses a junction element, the ref of the junction element
will not automatically be updated if a more recent version exists.
Therefore, if you require the most up-to-date version of a subproject,
you must explicitly track the junction element by invoking:
`bst track JUNCTION_ELEMENT`.
Furthermore, elements within the subproject are also not tracked by default.
For this, we must specify the `--track-cross-junctions` option. This option
must be preceeded by `--track ELEMENT` or `--track-all`.
Sources
-------
``bst show`` does not implicitly fetch junction sources if they haven't been
cached yet. However, they can be fetched explicitly:
.. code::
bst fetch junction.bst
Other commands such as ``bst build`` implicitly fetch junction sources.
Options
-------
.. code:: yaml
options:
machine_arch: "%{machine_arch}"
debug: True
Junctions can configure options of the linked project. Options are never
implicitly inherited across junctions, however, variables can be used to
explicitly assign the same value to a subproject option.
.. _core_junction_nested:
Nested Junctions
----------------
Junctions can be nested. That is, subprojects are allowed to have junctions on
their own. Nested junctions in different subprojects may point to the same
project, however, in most use cases the same project should be loaded only once.
BuildStream uses the junction element name as key to determine which junctions
to merge. It is recommended that the name of a junction is set to the same as
the name of the linked project.
As the junctions may differ in source version and options, BuildStream cannot
simply use one junction and ignore the others. Due to this, BuildStream requires
the user to resolve possibly conflicting nested junctions by creating a junction
with the same name in the top-level project, which then takes precedence.
"""
from collections.abc import Mapping
from buildstream import Element
from buildstream._pipeline import PipelineError
# Element implementation for the 'junction' kind.
class JunctionElement(Element):
# pylint: disable=attribute-defined-outside-init
# Junctions are not allowed any dependencies
BST_FORBID_BDEPENDS = True
BST_FORBID_RDEPENDS = True
def configure(self, node):
self.path = self.node_get_member(node, str, 'path', default='')
self.options = self.node_get_member(node, Mapping, 'options', default={})
def preflight(self):
pass
def get_unique_key(self):
# Junctions do not produce artifacts. get_unique_key() implementation
# is still required for `bst fetch`.
return 1
def configure_sandbox(self, sandbox):
raise PipelineError("Cannot build junction elements")
def stage(self, sandbox):
raise PipelineError("Cannot stage junction elements")
def generate_script(self):
raise PipelineError("Cannot build junction elements")
def assemble(self, sandbox):
raise PipelineError("Cannot build junction elements")
# Plugin entry point
def setup():
return JunctionElement
buildstream-1.6.9/buildstream/plugins/elements/make.py 0000664 0000000 0000000 00000002543 14375152700 0023151 0 ustar 00root root 0000000 0000000 #
# Copyright Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Ed Baunton
"""
make - Make build element
=========================
This is a :mod:`BuildElement ` implementation for
using GNU make based build.
.. note::
The ``make`` element is available since :ref:`format version 9 `
Here is the default configuration for the ``make`` element in full:
.. literalinclude:: ../../../buildstream/plugins/elements/make.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'make' kind.
class MakeElement(BuildElement):
pass
# Plugin entry point
def setup():
return MakeElement
buildstream-1.6.9/buildstream/plugins/elements/make.yaml 0000664 0000000 0000000 00000001564 14375152700 0023465 0 ustar 00root root 0000000 0000000 # make default configurations
variables:
make-args: >-
PREFIX="%{prefix}"
make-install-args: >-
%{make-args}
DESTDIR="%{install-root}"
install
make: make %{make-args}
make-install: make -j1 %{make-install-args}
# Set this if the sources cannot handle parallelization.
#
# notparallel: True
config:
# Commands for building the software
#
build-commands:
- |
%{make}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{make-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
# Use max-jobs CPUs for building and enable verbosity
environment:
MAKEFLAGS: -j%{max-jobs}
V: 1
# And dont consider MAKEFLAGS or V as something which may
# effect build output.
environment-nocache:
- MAKEFLAGS
- V
buildstream-1.6.9/buildstream/plugins/elements/makemaker.py 0000664 0000000 0000000 00000002460 14375152700 0024167 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
makemaker - Perl MakeMaker build element
========================================
A :mod:`BuildElement ` implementation for using
the Perl ExtUtil::MakeMaker build system
The MakeMaker default configuration:
.. literalinclude:: ../../../buildstream/plugins/elements/makemaker.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'makemaker' kind.
class MakeMakerElement(BuildElement):
pass
# Plugin entry point
def setup():
return MakeMakerElement
buildstream-1.6.9/buildstream/plugins/elements/makemaker.yaml 0000664 0000000 0000000 00000002051 14375152700 0024475 0 ustar 00root root 0000000 0000000 # Default configuration for the Perl ExtUtil::MakeMaker
# build system
variables:
# To install perl distributions into the correct location
# in our chroot we need to set PREFIX to /
# in the configure-commands.
#
# The mapping between PREFIX and the final installation
# directories is complex and depends upon the configuration
# of perl see,
# https://metacpan.org/pod/distribution/perl/INSTALL#Installation-Directories
# and ExtUtil::MakeMaker's documentation for more details.
configure: |
perl Makefile.PL PREFIX=%{install-root}%{prefix}
make: make
make-install: make install
config:
# Commands for configuring the software
#
configure-commands:
- |
%{configure}
# Commands for building the software
#
build-commands:
- |
%{make}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{make-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
buildstream-1.6.9/buildstream/plugins/elements/manual.py 0000664 0000000 0000000 00000002500 14375152700 0023502 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
manual - Manual build element
=============================
The most basic build element does nothing but allows users to
add custom build commands to the array understood by the :mod:`BuildElement `
The empty configuration is as such:
.. literalinclude:: ../../../buildstream/plugins/elements/manual.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'manual' kind.
class ManualElement(BuildElement):
pass
# Plugin entry point
def setup():
return ManualElement
buildstream-1.6.9/buildstream/plugins/elements/manual.yaml 0000664 0000000 0000000 00000001454 14375152700 0024023 0 ustar 00root root 0000000 0000000 # No variables added for the manual element by default, set
# this if you plan to use make, and the sources cannot handle
# parallelization.
#
# variables:
#
# notparallel: True
# Manual build element does not provide any default
# build commands
config:
# Commands for configuring the software
#
configure-commands: []
# Commands for building the software
#
build-commands: []
# Commands for installing the software into a
# destination folder
#
install-commands: []
# Commands for stripping installed binaries
#
strip-commands:
- |
%{strip-binaries}
# Use max-jobs CPUs for building and enable verbosity
environment:
MAKEFLAGS: -j%{max-jobs}
V: 1
# And dont consider MAKEFLAGS or V as something which may
# effect build output.
environment-nocache:
- MAKEFLAGS
- V
buildstream-1.6.9/buildstream/plugins/elements/meson.py 0000664 0000000 0000000 00000003430 14375152700 0023351 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 Patrick Griffis
# Copyright (C) 2018 Codethink Ltd.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
"""
meson - Meson build element
===========================
This is a :mod:`BuildElement ` implementation for
using `Meson `_ build scripts.
You will often want to pass additional arguments to ``meson``. This should
be done on a per-element basis by setting the ``meson-local`` variable. Here is
an example:
.. code:: yaml
variables:
meson-local: |
-Dmonkeys=yes
If you want to pass extra options to ``meson`` for every element in your
project, set the ``meson-global`` variable in your project.conf file. Here is
an example of that:
.. code:: yaml
elements:
meson:
variables:
meson-global: |
-Dmonkeys=always
Here is the default configuration for the ``meson`` element in full:
.. literalinclude:: ../../../buildstream/plugins/elements/meson.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'meson' kind.
class MesonElement(BuildElement):
pass
# Plugin entry point
def setup():
return MesonElement
buildstream-1.6.9/buildstream/plugins/elements/meson.yaml 0000664 0000000 0000000 00000003101 14375152700 0023656 0 ustar 00root root 0000000 0000000 # Meson default configuration
variables:
build-dir: _builddir
# Project-wide extra arguments to be passed to `meson`
meson-global: ''
# Element-specific extra arguments to be passed to `meson`.
meson-local: ''
# For backwards compatibility only, do not use.
meson-extra: ''
meson-args: |
--prefix=%{prefix} \
--bindir=%{bindir} \
--sbindir=%{sbindir} \
--sysconfdir=%{sysconfdir} \
--datadir=%{datadir} \
--includedir=%{includedir} \
--libdir=%{libdir} \
--libexecdir=%{libexecdir} \
--localstatedir=%{localstatedir} \
--sharedstatedir=%{sharedstatedir} \
--mandir=%{mandir} \
--infodir=%{infodir} %{meson-extra} %{meson-global} %{meson-local}
meson: meson %{build-dir} %{meson-args}
ninja: |
ninja -j ${NINJAJOBS} -C %{build-dir}
ninja-install: |
env DESTDIR="%{install-root}" ninja -C %{build-dir} install
# Set this if the sources cannot handle parallelization.
#
# notparallel: True
config:
# Commands for configuring the software
#
configure-commands:
- |
%{meson}
# Commands for building the software
#
build-commands:
- |
%{ninja}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{ninja-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
# Use max-jobs CPUs for building
environment:
NINJAJOBS: |
%{max-jobs}
# And dont consider NINJAJOBS as something which may
# effect build output.
environment-nocache:
- NINJAJOBS
buildstream-1.6.9/buildstream/plugins/elements/modulebuild.py 0000664 0000000 0000000 00000002501 14375152700 0024533 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
modulebuild - Perl Module::Build build element
==============================================
A :mod:`BuildElement ` implementation for using
the Perl Module::Build build system
The modulebuild default configuration:
.. literalinclude:: ../../../buildstream/plugins/elements/modulebuild.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'modulebuild' kind.
class ModuleBuildElement(BuildElement):
pass
# Plugin entry point
def setup():
return ModuleBuildElement
buildstream-1.6.9/buildstream/plugins/elements/modulebuild.yaml 0000664 0000000 0000000 00000002067 14375152700 0025054 0 ustar 00root root 0000000 0000000 # Default configuration for the Perl Module::Build
# build system.
variables:
# To install perl distributions into the correct location
# in our chroot we need to set PREFIX to /
# in the configure-commands.
#
# The mapping between PREFIX and the final installation
# directories is complex and depends upon the configuration
# of perl see,
# https://metacpan.org/pod/distribution/perl/INSTALL#Installation-Directories
# and ExtUtil::MakeMaker's documentation for more details.
configure: |
perl Build.PL --prefix "%{install-root}%{prefix}"
perl-build: ./Build
perl-install: ./Build install
config:
# Commands for configuring the software
#
configure-commands:
- |
%{configure}
# Commands for building the software
#
build-commands:
- |
%{perl-build}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{perl-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
buildstream-1.6.9/buildstream/plugins/elements/pip.py 0000664 0000000 0000000 00000002320 14375152700 0023015 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Mathieu Bridon
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Mathieu Bridon
"""
pip - Pip build element
=======================
A :mod:`BuildElement ` implementation for installing
Python modules with pip
The pip default configuration:
.. literalinclude:: ../../../buildstream/plugins/elements/pip.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'pip' kind.
class PipElement(BuildElement):
pass
# Plugin entry point
def setup():
return PipElement
buildstream-1.6.9/buildstream/plugins/elements/pip.yaml 0000664 0000000 0000000 00000000676 14375152700 0023343 0 ustar 00root root 0000000 0000000 # Pip default configurations
variables:
pip: pip
config:
configure-commands: []
build-commands: []
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{pip} install --no-deps --root=%{install-root} --prefix=%{prefix} .
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
- |
%{fix-pyc-timestamps}
buildstream-1.6.9/buildstream/plugins/elements/qmake.py 0000664 0000000 0000000 00000002360 14375152700 0023327 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
qmake - QMake build element
===========================
A :mod:`BuildElement ` implementation for using
the qmake build system
The qmake default configuration:
.. literalinclude:: ../../../buildstream/plugins/elements/qmake.yaml
:language: yaml
"""
from buildstream import BuildElement
# Element implementation for the 'qmake' kind.
class QMakeElement(BuildElement):
pass
# Plugin entry point
def setup():
return QMakeElement
buildstream-1.6.9/buildstream/plugins/elements/qmake.yaml 0000664 0000000 0000000 00000001561 14375152700 0023643 0 ustar 00root root 0000000 0000000 # QMake default configuration
variables:
qmake: qmake -makefile
make: make
make-install: make -j1 INSTALL_ROOT="%{install-root}" install
# Set this if the sources cannot handle parallelization.
#
# notparallel: True
config:
# Commands for configuring the software
#
configure-commands:
- |
%{qmake}
# Commands for building the software
#
build-commands:
- |
%{make}
# Commands for installing the software into a
# destination folder
#
install-commands:
- |
%{make-install}
# Commands for stripping debugging information out of
# installed binaries
#
strip-commands:
- |
%{strip-binaries}
# Use max-jobs CPUs for building and enable verbosity
environment:
MAKEFLAGS: -j%{max-jobs}
V: 1
# And dont consider MAKEFLAGS or V as something which may
# effect build output.
environment-nocache:
- MAKEFLAGS
- V
buildstream-1.6.9/buildstream/plugins/elements/script.py 0000664 0000000 0000000 00000004532 14375152700 0023540 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jonathan Maw
"""
script - Run scripts to create output
=====================================
This element allows one to run some commands to mutate the
input and create some output.
.. note::
Script elements may only specify build dependencies. See
:ref:`the format documentation ` for more
detail on specifying dependencies.
The default configuration and possible options are as such:
.. literalinclude:: ../../../buildstream/plugins/elements/script.yaml
:language: yaml
"""
import buildstream
# Element implementation for the 'script' kind.
class ScriptElement(buildstream.ScriptElement):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
for n in self.node_get_member(node, list, 'layout', []):
dst = self.node_subst_member(n, 'destination')
elm = self.node_subst_member(n, 'element', None)
self.layout_add(elm, dst)
self.node_validate(node, [
'commands', 'root-read-only', 'layout', 'create-dev-shm'
])
cmds = self.node_subst_list(node, "commands")
self.add_commands("commands", cmds)
self.set_work_dir()
self.set_install_root()
self.set_root_read_only(self.node_get_member(node, bool,
'root-read-only', False))
self.set_create_dev_shm(self.node_get_member(node, bool,
'create-dev-shm', False))
# Plugin entry point
def setup():
return ScriptElement
buildstream-1.6.9/buildstream/plugins/elements/script.yaml 0000664 0000000 0000000 00000001522 14375152700 0024046 0 ustar 00root root 0000000 0000000 # Common script element variables
variables:
# Defines the directory commands will be run from.
cwd: /
# Script element configuration
config:
# Defines whether to run the sandbox with '/' read-only.
# It is recommended to set root as read-only wherever possible.
root-read-only: False
# Defines whether we should mount a tmpfs filesystem at /dev/shm
#
create-dev-shm: False
# Defines where to stage elements which are direct or indirect dependencies.
# By default, all direct dependencies are staged to '/'.
# This is also commonly used to take one element as an environment
# containing the tools used to operate on the other element.
# layout:
# - element: foo-tools.bst
# destination: /
# - element: foo-system.bst
# destination: %{build-root}
# List of commands to run in the sandbox.
commands: []
buildstream-1.6.9/buildstream/plugins/elements/stack.py 0000664 0000000 0000000 00000004436 14375152700 0023344 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
stack - Symbolic Element for dependency grouping
================================================
Stack elements are simply a symbolic element used for representing
a logical group of elements.
"""
import os
from buildstream import Element
# Element implementation for the 'stack' kind.
class StackElement(Element):
def configure(self, node):
pass
def preflight(self):
pass
def get_unique_key(self):
# We do not add anything to the build, only our dependencies
# do, so our unique key is just a constant.
return 1
def configure_sandbox(self, sandbox):
pass
def stage(self, sandbox):
pass
def assemble(self, sandbox):
# Just create a dummy empty artifact, its existence is a statement
# that all this stack's dependencies are built.
rootdir = sandbox.get_directory()
# XXX FIXME: This is currently needed because the artifact
# cache wont let us commit an empty artifact.
#
# We need to fix the artifact cache so that it stores
# the actual artifact data in a subdirectory, then we
# will be able to store some additional state in the
# artifact cache, and we can also remove this hack.
outputdir = os.path.join(rootdir, 'output', 'bst')
# Ensure target directory parent
os.makedirs(os.path.dirname(outputdir), exist_ok=True)
# And we're done
return '/output'
# Plugin entry point
def setup():
return StackElement
buildstream-1.6.9/buildstream/plugins/sources/ 0000775 0000000 0000000 00000000000 14375152700 0021525 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/plugins/sources/__init__.py 0000664 0000000 0000000 00000000000 14375152700 0023624 0 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/plugins/sources/_downloadablefilesource.py 0000664 0000000 0000000 00000022304 14375152700 0026753 0 ustar 00root root 0000000 0000000 """A base abstract class for source implementations which download a file"""
import os
import urllib.request
import urllib.error
import contextlib
import shutil
import netrc
from buildstream import Source, SourceError, Consistency
from buildstream import utils
class _NetrcFTPOpener(urllib.request.FTPHandler):
def __init__(self, netrc_config):
self.netrc = netrc_config
def _split(self, netloc):
userpass, hostport = urllib.parse.splituser(netloc)
host, port = urllib.parse.splitport(hostport)
if userpass:
user, passwd = urllib.parse.splitpasswd(userpass)
else:
user = None
passwd = None
return host, port, user, passwd
def _unsplit(self, host, port, user, passwd):
if port:
host = '{}:{}'.format(host, port)
if user:
if passwd:
user = '{}:{}'.format(user, passwd)
host = '{}@{}'.format(user, host)
return host
def ftp_open(self, req):
host, port, user, passwd = self._split(req.host)
if user is None and self.netrc:
entry = self.netrc.authenticators(host)
if entry:
user, _, passwd = entry
req.host = self._unsplit(host, port, user, passwd)
return super().ftp_open(req)
class _NetrcPasswordManager:
def __init__(self, netrc_config):
self.netrc = netrc_config
def add_password(self, realm, uri, user, passwd):
pass
def find_user_password(self, realm, authuri):
if not self.netrc:
return None, None
parts = urllib.parse.urlsplit(authuri)
entry = self.netrc.authenticators(parts.hostname)
if not entry:
return None, None
else:
login, _, password = entry
return login, password
class DownloadableFileSource(Source):
# pylint: disable=attribute-defined-outside-init
COMMON_CONFIG_KEYS = Source.COMMON_CONFIG_KEYS + ['url', 'ref', 'etag']
__urlopener = None
def configure(self, node):
self.original_url = self.node_get_member(node, str, 'url')
self.ref = self.node_get_member(node, str, 'ref', None)
self.url = self.translate_url(self.original_url)
self._warn_deprecated_etag(node)
def preflight(self):
return
def get_unique_key(self):
return [self.original_url, self.ref]
def get_consistency(self):
if self.ref is None:
return Consistency.INCONSISTENT
if os.path.isfile(self._get_mirror_file()):
return Consistency.CACHED
else:
return Consistency.RESOLVED
def load_ref(self, node):
self.ref = self.node_get_member(node, str, 'ref', None)
self._warn_deprecated_etag(node)
def get_ref(self):
return self.ref
def set_ref(self, ref, node):
node['ref'] = self.ref = ref
def track(self):
# there is no 'track' field in the source to determine what/whether
# or not to update refs, because tracking a ref is always a conscious
# decision by the user.
with self.timed_activity("Tracking {}".format(self.url),
silent_nested=True):
new_ref = self._ensure_mirror()
if self.ref and self.ref != new_ref:
detail = "When tracking, new ref differs from current ref:\n" \
+ " Tracked URL: {}\n".format(self.url) \
+ " Current ref: {}\n".format(self.ref) \
+ " New ref: {}\n".format(new_ref)
self.warn("Potential man-in-the-middle attack!", detail=detail)
return new_ref
def fetch(self):
# Just a defensive check, it is impossible for the
# file to be already cached because Source.fetch() will
# not be called if the source is already Consistency.CACHED.
#
if os.path.isfile(self._get_mirror_file()):
return # pragma: nocover
# Download the file, raise hell if the sha256sums don't match,
# and mirror the file otherwise.
with self.timed_activity("Fetching {}".format(self.url), silent_nested=True):
sha256 = self._ensure_mirror()
if sha256 != self.ref:
raise SourceError("File downloaded from {} has sha256sum '{}', not '{}'!"
.format(self.url, sha256, self.ref))
def _warn_deprecated_etag(self, node):
etag = self.node_get_member(node, str, 'etag', None)
if etag:
provenance = self.node_provenance(node, member_name='etag')
self.warn('{} "etag" is deprecated and ignored.'.format(provenance))
def _get_etag(self, ref):
etagfilename = os.path.join(self._get_mirror_dir(), '{}.etag'.format(ref))
if os.path.exists(etagfilename):
with open(etagfilename, 'r') as etagfile:
return etagfile.read()
return None
def _store_etag(self, ref, etag):
etagfilename = os.path.join(self._get_mirror_dir(), '{}.etag'.format(ref))
with utils.save_file_atomic(etagfilename) as etagfile:
etagfile.write(etag)
def _ensure_mirror(self):
# Downloads from the url and caches it according to its sha256sum.
try:
with self.tempdir() as td:
default_name = os.path.basename(self.url)
request = urllib.request.Request(self.url)
request.add_header('Accept', '*/*')
request.add_header('User-Agent', 'BuildStream/1')
# We do not use etag in case what we have in cache is
# not matching ref in order to be able to recover from
# corrupted download.
if self.ref:
etag = self._get_etag(self.ref)
# Do not re-download the file if the ETag matches.
if etag and self.get_consistency() == Consistency.CACHED:
request.add_header('If-None-Match', etag)
opener = self.__get_urlopener()
with contextlib.closing(opener.open(request)) as response:
info = response.info()
# some servers don't honor the 'If-None-Match' header
if self.ref and etag and info["ETag"] == etag:
return self.ref
etag = info["ETag"]
filename = info.get_filename(default_name)
filename = os.path.basename(filename)
local_file = os.path.join(td, filename)
with open(local_file, 'wb') as dest:
shutil.copyfileobj(response, dest)
# Make sure url-specific mirror dir exists.
if not os.path.isdir(self._get_mirror_dir()):
os.makedirs(self._get_mirror_dir())
# Store by sha256sum
sha256 = utils.sha256sum(local_file)
# Even if the file already exists, move the new file over.
# In case the old file was corrupted somehow.
os.rename(local_file, self._get_mirror_file(sha256))
if etag:
self._store_etag(sha256, etag)
return sha256
except urllib.error.HTTPError as e:
if e.code == 304:
# 304 Not Modified.
# Because we use etag only for matching ref, currently specified ref is what
# we would have downloaded.
return self.ref
raise SourceError("{}: Error mirroring {}: {}"
.format(self, self.url, e), temporary=True) from e
except (urllib.error.URLError, urllib.error.ContentTooShortError, OSError) as e:
raise SourceError("{}: Error mirroring {}: {}"
.format(self, self.url, e), temporary=True) from e
def _get_mirror_dir(self):
return os.path.join(self.get_mirror_directory(),
utils.url_directory_name(self.original_url))
def _get_mirror_file(self, sha=None):
return os.path.join(self._get_mirror_dir(), sha or self.ref)
def __get_urlopener(self):
if not DownloadableFileSource.__urlopener:
try:
netrc_config = netrc.netrc()
except OSError:
# If the .netrc file was not found, FileNotFoundError will be
# raised, but OSError will be raised directly by the netrc package
# in the case that $HOME is not set.
#
# This will catch both cases.
#
DownloadableFileSource.__urlopener = urllib.request.build_opener()
except netrc.NetrcParseError as e:
self.warn('{}: While reading .netrc: {}'.format(self, e))
return urllib.request.build_opener()
else:
netrc_pw_mgr = _NetrcPasswordManager(netrc_config)
http_auth = urllib.request.HTTPBasicAuthHandler(netrc_pw_mgr)
ftp_handler = _NetrcFTPOpener(netrc_config)
DownloadableFileSource.__urlopener = urllib.request.build_opener(http_auth, ftp_handler)
return DownloadableFileSource.__urlopener
buildstream-1.6.9/buildstream/plugins/sources/bzr.py 0000664 0000000 0000000 00000017474 14375152700 0022711 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jonathan Maw
"""
bzr - stage files from a bazaar repository
==========================================
**Host dependencies:**
* bzr
**Usage:**
.. code:: yaml
# Specify the bzr source kind
kind: bzr
# Optionally specify a relative staging directory
# directory: path/to/stage
# Specify the bzr url. Bazaar URLs come in many forms, see
# `bzr help urlspec` for more information. Using an alias defined
# in your project configuration is encouraged.
url: https://launchpad.net/bzr
# Specify the tracking branch. This is mandatory, as bzr cannot identify
# an individual revision outside its branch. bzr URLs that omit the branch
# name implicitly specify the trunk branch, but bst requires this to be
# explicit.
track: trunk
# Specify the ref. This is a revision number. This is usually a decimal,
# but revisions on a branch are of the form
# ..
# e.g. 6622.1.6.
# The ref must be specified to build, and 'bst track' will update the
# revision number to the one on the tip of the branch specified in 'track'.
ref: 6622
"""
import os
import shutil
import fcntl
from contextlib import contextmanager
from buildstream import Source, SourceError, Consistency
from buildstream import utils
class BzrSource(Source):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
self.node_validate(node, ['url', 'track', 'ref'] + Source.COMMON_CONFIG_KEYS)
self.original_url = self.node_get_member(node, str, 'url')
self.tracking = self.node_get_member(node, str, 'track')
self.ref = self.node_get_member(node, str, 'ref', None)
self.url = self.translate_url(self.original_url)
def preflight(self):
# Check if bzr is installed, get the binary at the same time.
self.host_bzr = utils.get_host_tool('bzr')
def get_unique_key(self):
return [self.original_url, self.tracking, self.ref]
def get_consistency(self):
if self.ref is None or self.tracking is None:
return Consistency.INCONSISTENT
# Lock for the _check_ref()
with self._locked():
if self._check_ref():
return Consistency.CACHED
else:
return Consistency.RESOLVED
def load_ref(self, node):
self.ref = self.node_get_member(node, str, 'ref', None)
def get_ref(self):
return self.ref
def set_ref(self, ref, node):
node['ref'] = self.ref = ref
def track(self):
with self.timed_activity("Tracking {}".format(self.url),
silent_nested=True), self._locked():
self._ensure_mirror(skip_ref_check=True)
ret, out = self.check_output([self.host_bzr, "version-info",
"--custom", "--template={revno}",
self._get_branch_dir()],
fail="Failed to read the revision number at '{}'"
.format(self._get_branch_dir()))
if ret != 0:
raise SourceError("{}: Failed to get ref for tracking {}".format(self, self.tracking))
return out
def fetch(self):
with self.timed_activity("Fetching {}".format(self.url),
silent_nested=True), self._locked():
self._ensure_mirror()
def stage(self, directory):
self.call([self.host_bzr, "checkout", "--lightweight",
"--revision=revno:{}".format(self.ref),
self._get_branch_dir(), directory],
fail="Failed to checkout revision {} from branch {} to {}"
.format(self.ref, self._get_branch_dir(), directory))
def init_workspace(self, directory):
url = os.path.join(self.url, self.tracking)
with self.timed_activity('Setting up workspace "{}"'.format(directory), silent_nested=True):
# Checkout from the cache
self.call([self.host_bzr, "branch",
"--use-existing-dir",
"--revision=revno:{}".format(self.ref),
self._get_branch_dir(), directory],
fail="Failed to branch revision {} from branch {} to {}"
.format(self.ref, self._get_branch_dir(), directory))
# Switch the parent branch to the source's origin
self.call([self.host_bzr, "switch",
"--directory={}".format(directory), url],
fail="Failed to switch workspace's parent branch to {}".format(url))
# _locked()
#
# This context manager ensures exclusive access to the
# bzr repository.
#
@contextmanager
def _locked(self):
lockdir = os.path.join(self.get_mirror_directory(), 'locks')
lockfile = os.path.join(
lockdir,
utils.url_directory_name(self.original_url) + '.lock'
)
os.makedirs(lockdir, exist_ok=True)
with open(lockfile, 'w') as lock:
fcntl.flock(lock, fcntl.LOCK_EX)
try:
yield
finally:
fcntl.flock(lock, fcntl.LOCK_UN)
def _check_ref(self):
# If the mirror doesnt exist yet, then we dont have the ref
if not os.path.exists(self._get_branch_dir()):
return False
return self.call([self.host_bzr, "revno",
"--revision=revno:{}".format(self.ref),
self._get_branch_dir()]) == 0
def _get_branch_dir(self):
return os.path.join(self._get_mirror_dir(), self.tracking)
def _get_mirror_dir(self):
return os.path.join(self.get_mirror_directory(),
utils.url_directory_name(self.original_url))
def _ensure_mirror(self, skip_ref_check=False):
mirror_dir = self._get_mirror_dir()
bzr_metadata_dir = os.path.join(mirror_dir, ".bzr")
if not os.path.exists(bzr_metadata_dir):
self.call([self.host_bzr, "init-repo", "--no-trees", mirror_dir],
fail="Failed to initialize bzr repository")
branch_dir = os.path.join(mirror_dir, self.tracking)
branch_url = self.url + "/" + self.tracking
if not os.path.exists(branch_dir):
# `bzr branch` the branch if it doesn't exist
# to get the upstream code
self.call([self.host_bzr, "branch", branch_url, branch_dir],
fail="Failed to branch from {} to {}".format(branch_url, branch_dir))
else:
# `bzr pull` the branch if it does exist
# to get any changes to the upstream code
self.call([self.host_bzr, "pull", "--directory={}".format(branch_dir), branch_url],
fail="Failed to pull new changes for {}".format(branch_dir))
if not skip_ref_check and not self._check_ref():
raise SourceError("Failed to ensure ref '{}' was mirrored".format(self.ref),
reason="ref-not-mirrored")
def setup():
return BzrSource
buildstream-1.6.9/buildstream/plugins/sources/deb.py 0000664 0000000 0000000 00000005332 14375152700 0022634 0 ustar 00root root 0000000 0000000 # Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Phillip Smyth
# Jonathan Maw
# Richard Maw
"""
deb - stage files from .deb packages
====================================
**Host dependencies:**
* arpy (python package)
**Usage:**
.. code:: yaml
# Specify the deb source kind
kind: deb
# Optionally specify a relative staging directory
# directory: path/to/stage
# Specify the deb url. Using an alias defined in your project
# configuration is encouraged. 'bst track' will update the
# sha256sum in 'ref' to the downloaded file's sha256sum.
url: upstream:foo.deb
# Specify the ref. It's a sha256sum of the file you download.
ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
# Specify the basedir to return only the specified dir and it's children
base-dir: ''
"""
import tarfile
from contextlib import contextmanager, ExitStack
import arpy # pylint: disable=import-error
from .tar import TarSource
class DebSource(TarSource):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
super().configure(node)
self.base_dir = self.node_get_member(node, str, 'base-dir', None)
def preflight(self):
return
@contextmanager
def _get_tar(self):
with ExitStack() as context:
deb_file = context.enter_context(open(self._get_mirror_file(), 'rb'))
arpy_archive = arpy.Archive(fileobj=deb_file)
arpy_archive.read_all_headers()
data_tar_arpy = [v for k, v in arpy_archive.archived_files.items() if b"data.tar" in k][0]
# ArchiveFileData is not enough like a file object for tarfile to use.
# Monkey-patching a seekable method makes it close enough for TarFile to open.
data_tar_arpy.seekable = lambda *args: True
tar = tarfile.open(fileobj=data_tar_arpy, mode="r:*")
yield tar
def setup():
return DebSource
buildstream-1.6.9/buildstream/plugins/sources/git.py 0000664 0000000 0000000 00000065032 14375152700 0022670 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
git - stage files from a git repository
=======================================
**Host dependencies:**
* git
**Usage:**
.. code:: yaml
# Specify the git source kind
kind: git
# Optionally specify a relative staging directory
# directory: path/to/stage
# Specify the repository url, using an alias defined
# in your project configuration is recommended.
url: upstream:foo.git
# Optionally specify a symbolic tracking branch or tag, this
# will be used to update the 'ref' when refreshing the pipeline.
track: master
# Optionally specify the ref format used for tracking.
# The default is 'sha1' for the raw commit hash.
# If you specify 'git-describe', the commit hash will be prefixed
# with the closest tag.
ref-format: sha1
# Specify the commit ref, this must be specified in order to
# checkout sources and build, but can be automatically updated
# if the 'track' attribute was specified.
ref: d63cbb6fdc0bbdadc4a1b92284826a6d63a7ebcd
# Optionally specify whether submodules should be checked-out.
# If not set, this will default to 'True'
checkout-submodules: True
# If your repository has submodules, explicitly specifying the
# url from which they are to be fetched allows you to easily
# rebuild the same sources from a different location. This is
# especially handy when used with project defined aliases which
# can be redefined at a later time.
# You may also explicitly specify whether to check out this
# submodule. If 'checkout' is set, it will override
# 'checkout-submodules' with the value set below.
submodules:
plugins/bar:
url: upstream:bar.git
checkout: True
plugins/baz:
url: upstream:baz.git
checkout: False
**Configurable Warnings:**
This plugin provides the following :ref:`configurable warnings `:
- ``git:inconsistent-submodule`` - A submodule present in the git repository's .gitmodules was never
added with `git submodule add`.
- ``git:unlisted-submodule`` - A submodule is present in the git repository but was not specified in
the source configuration and was not disabled for checkout.
.. note::
The ``git:unlisted-submodule`` warning is available since :ref:`format version 20 `
- ``git:invalid-submodule`` - A submodule is specified in the source configuration but does not exist
in the repository.
.. note::
The ``git:invalid-submodule`` warning is available since :ref:`format version 20 `
This plugin also utilises the following configurable :class:`core warnings `:
- :attr:`ref-not-in-track ` - The provided ref was not
found in the provided track in the element's git repository.
"""
import os
import errno
import re
from collections.abc import Mapping
from io import StringIO
from configparser import RawConfigParser
from buildstream import Source, SourceError, Consistency, SourceFetcher, CoreWarnings
from buildstream import utils
GIT_MODULES = '.gitmodules'
# Warnings
WARN_INCONSISTENT_SUBMODULE = "inconsistent-submodule"
WARN_UNLISTED_SUBMODULE = "unlisted-submodule"
WARN_INVALID_SUBMODULE = "invalid-submodule"
# Because of handling of submodules, we maintain a GitMirror
# for the primary git source and also for each submodule it
# might have at a given time
#
class GitMirror(SourceFetcher):
def __init__(self, source, path, url, ref, *, primary=False):
super().__init__()
self.source = source
self.path = path
self.url = url
self.ref = ref
self.primary = primary
self.mirror = os.path.join(source.get_mirror_directory(), utils.url_directory_name(url))
self.mark_download_url(url)
# Ensures that the mirror exists
def ensure(self, alias_override=None):
# Unfortunately, git does not know how to only clone just a specific ref,
# so we have to download all of those gigs even if we only need a couple
# of bytes.
if not os.path.exists(self.mirror):
# Do the initial clone in a tmpdir just because we want an atomic move
# after a long standing clone which could fail overtime, for now do
# this directly in our git directory, eliminating the chances that the
# system configured tmpdir is not on the same partition.
#
with self.source.tempdir() as tmpdir:
url = self.source.translate_url(self.url, alias_override=alias_override,
primary=self.primary)
self.source.call([self.source.host_git, 'clone', '--mirror', '-n', url, tmpdir],
fail="Failed to clone git repository {}".format(url),
fail_temporarily=True)
# Attempt atomic rename into destination, this will fail if
# another process beat us to the punch
try:
os.rename(tmpdir, self.mirror)
except OSError as e:
# When renaming and the destination repo already exists, os.rename()
# will fail with ENOTEMPTY or EEXIST, since an empty directory will
# be silently replaced
if e.errno in (errno.ENOTEMPTY, errno.EEXIST):
self.source.status("{}: Discarding duplicate clone of {}"
.format(self.source, url))
else:
raise SourceError("{}: Failed to move cloned git repository {} from '{}' to '{}': {}"
.format(self.source, url, tmpdir, self.mirror, e)) from e
def _fetch(self, alias_override=None):
url = self.source.translate_url(self.url,
alias_override=alias_override,
primary=self.primary)
if alias_override:
remote_name = utils.url_directory_name(alias_override)
_, remotes = self.source.check_output(
[self.source.host_git, 'remote'],
fail="Failed to retrieve list of remotes in {}".format(self.mirror),
cwd=self.mirror
)
if remote_name not in remotes.strip().split():
self.source.call(
[self.source.host_git, 'remote', 'add', remote_name, url],
fail="Failed to add remote {} with url {}".format(remote_name, url),
cwd=self.mirror
)
else:
remote_name = "origin"
# In git < 1.9.0, we have to call `git fetch` twice, once for the tags
#
if self.source.git_fetch_tags_exclusive:
self.source.call([self.source.host_git, 'fetch', remote_name, '--prune', '--force'],
fail="Failed to fetch from remote git repository: {}".format(url),
fail_temporarily=True,
cwd=self.mirror)
self.source.call([self.source.host_git, 'fetch', remote_name, '--prune', '--force', '--tags'],
fail="Failed to fetch from remote git repository: {}".format(url),
fail_temporarily=True,
cwd=self.mirror)
def fetch(self, alias_override=None):
# Resolve the URL for the message
resolved_url = self.source.translate_url(self.url,
alias_override=alias_override,
primary=self.primary)
with self.source.timed_activity("Fetching from {}"
.format(resolved_url),
silent_nested=True):
self.ensure(alias_override)
if not self.has_ref():
self._fetch(alias_override)
self.assert_ref()
def has_ref(self):
if not self.ref:
return False
# If the mirror doesnt exist, we also dont have the ref
if not os.path.exists(self.mirror):
return False
# Check if the ref is really there
rc = self.source.call([self.source.host_git, 'cat-file', '-t', self.ref], cwd=self.mirror)
return rc == 0
def assert_ref(self):
if not self.has_ref():
raise SourceError("{}: expected ref '{}' was not found in git repository: '{}'"
.format(self.source, self.ref, self.url))
def latest_commit(self, tracking):
_, output = self.source.check_output(
[self.source.host_git, 'rev-parse', tracking],
fail="Unable to find commit for specified branch name '{}'".format(tracking),
cwd=self.mirror)
ref = output.rstrip('\n')
if self.source.ref_format == 'git-describe':
# Prefix the ref with the closest tag, if available,
# to make the ref human readable
exit_code, output = self.source.check_output(
[self.source.host_git, 'describe', '--tags', '--abbrev=40', '--long', ref],
cwd=self.mirror)
if exit_code == 0:
ref = output.rstrip('\n')
return ref
def stage(self, directory):
fullpath = os.path.join(directory, self.path)
# We need to pass '--no-hardlinks' because there's nothing to
# stop the build from overwriting the files in the .git directory
# inside the sandbox.
self.source.call([self.source.host_git, 'clone', '--no-checkout', '--no-hardlinks', self.mirror, fullpath],
fail="Failed to create git mirror {} in directory: {}".format(self.mirror, fullpath),
fail_temporarily=True)
self.source.call([self.source.host_git, 'checkout', '--force', self.ref],
fail="Failed to checkout git ref {}".format(self.ref),
cwd=fullpath)
def init_workspace(self, directory):
fullpath = os.path.join(directory, self.path)
url = self.source.translate_url(self.url)
self.source.call([self.source.host_git, 'clone', '--no-checkout', self.mirror, fullpath],
fail="Failed to clone git mirror {} in directory: {}".format(self.mirror, fullpath),
fail_temporarily=True)
self.source.call([self.source.host_git, 'remote', 'set-url', 'origin', url],
fail='Failed to add remote origin "{}"'.format(url),
cwd=fullpath)
self.source.call([self.source.host_git, 'checkout', '--force', self.ref],
fail="Failed to checkout git ref {}".format(self.ref),
cwd=fullpath)
# List the submodules (path/url tuples) present at the given ref of this repo
def submodule_list(self):
modules = "{}:{}".format(self.ref, GIT_MODULES)
exit_code, output = self.source.check_output(
[self.source.host_git, 'show', modules], cwd=self.mirror)
# If git show reports error code 128 here, we take it to mean there is
# no .gitmodules file to display for the given revision.
if exit_code == 128:
return
elif exit_code != 0:
raise SourceError(
"{plugin}: Failed to show gitmodules at ref {ref}".format(
plugin=self, ref=self.ref))
content = '\n'.join([l.strip() for l in output.splitlines()])
io = StringIO(content)
parser = RawConfigParser()
parser.read_file(io)
for section in parser.sections():
# validate section name against the 'submodule "foo"' pattern
if re.match(r'submodule "(.*)"', section):
path = parser.get(section, 'path')
url = parser.get(section, 'url')
yield (path, url)
# Fetch the ref which this mirror requires its submodule to have,
# at the given ref of this mirror.
def submodule_ref(self, submodule, ref=None):
if not ref:
ref = self.ref
# list objects in the parent repo tree to find the commit
# object that corresponds to the submodule
_, output = self.source.check_output([self.source.host_git, 'ls-tree', ref, submodule],
fail="ls-tree failed for commit {} and submodule: {}".format(
ref, submodule),
cwd=self.mirror)
# read the commit hash from the output
fields = output.split()
if len(fields) >= 2 and fields[1] == 'commit':
submodule_commit = output.split()[2]
# fail if the commit hash is invalid
if len(submodule_commit) != 40:
raise SourceError("{}: Error reading commit information for submodule '{}'"
.format(self.source, submodule))
return submodule_commit
else:
detail = "The submodule '{}' is defined either in the BuildStream source\n".format(submodule) + \
"definition, or in a .gitmodules file. But the submodule was never added to the\n" + \
"underlying git repository with `git submodule add`."
self.source.warn("{}: Ignoring inconsistent submodule '{}'"
.format(self.source, submodule), detail=detail, warning_token=WARN_INCONSISTENT_SUBMODULE)
return None
class GitSource(Source):
# pylint: disable=attribute-defined-outside-init
#
# The --tags option before git 1.9.0 used to mean to fetch tags exclusively,
# since git 1.9.0 the --tags option means to additionally fetch tags.
#
# https://github.com/git/git/blob/master/Documentation/RelNotes/1.9.0.txt
#
git_fetch_tags_exclusive = None
def configure(self, node):
ref = self.node_get_member(node, str, 'ref', None)
config_keys = ['url', 'track', 'ref', 'submodules', 'checkout-submodules', 'ref-format']
self.node_validate(node, config_keys + Source.COMMON_CONFIG_KEYS)
self.original_url = self.node_get_member(node, str, 'url')
self.mirror = GitMirror(self, '', self.original_url, ref, primary=True)
self.tracking = self.node_get_member(node, str, 'track', None)
self.ref_format = self.node_get_member(node, str, 'ref-format', 'sha1')
if self.ref_format not in ['sha1', 'git-describe']:
provenance = self.node_provenance(node, member_name='ref-format')
raise SourceError("{}: Unexpected value for ref-format: {}".format(provenance, self.ref_format))
# At this point we now know if the source has a ref and/or a track.
# If it is missing both then we will be unable to track or build.
if self.mirror.ref is None and self.tracking is None:
raise SourceError("{}: Git sources require a ref and/or track".format(self),
reason="missing-track-and-ref")
self.checkout_submodules = self.node_get_member(node, bool, 'checkout-submodules', True)
self.submodules = []
# Parse a dict of submodule overrides, stored in the submodule_overrides
# and submodule_checkout_overrides dictionaries.
self.submodule_overrides = {}
self.submodule_checkout_overrides = {}
modules = self.node_get_member(node, Mapping, 'submodules', {})
for path, _ in self.node_items(modules):
submodule = self.node_get_member(modules, Mapping, path)
url = self.node_get_member(submodule, str, 'url', None)
# Make sure to mark all URLs that are specified in the configuration
if url:
self.mark_download_url(url, primary=False)
self.submodule_overrides[path] = url
if 'checkout' in submodule:
checkout = self.node_get_member(submodule, bool, 'checkout')
self.submodule_checkout_overrides[path] = checkout
self.mark_download_url(self.original_url)
def preflight(self):
# Check if git is installed, get the binary at the same time
self.host_git = utils.get_host_tool('git')
# Resolve what `--tags` means when calling `git fetch`
self.init_fetch_tags_mode()
def get_unique_key(self):
# Here we want to encode the local name of the repository and
# the ref, if the user changes the alias to fetch the same sources
# from another location, it should not effect the cache key.
key = [self.original_url, self.mirror.ref]
# Only modify the cache key with checkout_submodules if it's something
# other than the default behaviour.
if self.checkout_submodules is False:
key.append({"checkout_submodules": self.checkout_submodules})
# We want the cache key to change if the source was
# configured differently, and submodules count.
if self.submodule_overrides:
key.append(self.submodule_overrides)
if self.submodule_checkout_overrides:
key.append({"submodule_checkout_overrides": self.submodule_checkout_overrides})
return key
def get_consistency(self):
if self.have_all_refs():
return Consistency.CACHED
elif self.mirror.ref is not None:
return Consistency.RESOLVED
return Consistency.INCONSISTENT
def load_ref(self, node):
self.mirror.ref = self.node_get_member(node, str, 'ref', None)
def get_ref(self):
return self.mirror.ref
def set_ref(self, ref, node):
node['ref'] = self.mirror.ref = ref
def track(self):
# If self.tracking is not specified it's not an error, just silently return
if not self.tracking:
return None
# Resolve the URL for the message
resolved_url = self.translate_url(self.mirror.url)
with self.timed_activity("Tracking {} from {}"
.format(self.tracking, resolved_url),
silent_nested=True):
self.mirror.ensure()
self.mirror._fetch()
# Update self.mirror.ref and node.ref from the self.tracking branch
ret = self.mirror.latest_commit(self.tracking)
return ret
def init_workspace(self, directory):
# XXX: may wish to refactor this as some code dupe with stage()
self.refresh_submodules()
with self.timed_activity('Setting up workspace "{}"'.format(directory), silent_nested=True):
self.mirror.init_workspace(directory)
for mirror in self.submodules:
mirror.init_workspace(directory)
def stage(self, directory):
# Need to refresh submodule list here again, because
# it's possible that we did not load in the main process
# with submodules present (source needed fetching) and
# we may not know about the submodule yet come time to build.
#
self.refresh_submodules()
# Stage the main repo in the specified directory
#
with self.timed_activity("Staging {}".format(self.mirror.url), silent_nested=True):
self.mirror.stage(directory)
for mirror in self.submodules:
mirror.stage(directory)
def get_source_fetchers(self):
yield self.mirror
self.refresh_submodules()
for submodule in self.submodules:
yield submodule
def validate_cache(self):
discovered_submodules = {}
unlisted_submodules = []
invalid_submodules = []
for path, url in self.mirror.submodule_list():
discovered_submodules[path] = url
if self.ignore_submodule(path):
continue
override_url = self.submodule_overrides.get(path)
if not override_url:
unlisted_submodules.append((path, url))
# Warn about submodules which are explicitly configured but do not exist
for path, url in self.submodule_overrides.items():
if path not in discovered_submodules:
invalid_submodules.append((path, url))
if invalid_submodules:
detail = []
for path, url in invalid_submodules:
detail.append(" Submodule URL '{}' at path '{}'".format(url, path))
self.warn("{}: Invalid submodules specified".format(self),
warning_token=WARN_INVALID_SUBMODULE,
detail="The following submodules are specified in the source "
"description but do not exist according to the repository\n\n" +
"\n".join(detail))
# Warn about submodules which exist but have not been explicitly configured
if unlisted_submodules:
detail = []
for path, url in unlisted_submodules:
detail.append(" Submodule URL '{}' at path '{}'".format(url, path))
self.warn("{}: Unlisted submodules exist".format(self),
warning_token=WARN_UNLISTED_SUBMODULE,
detail="The following submodules exist but are not specified " +
"in the source description\n\n" +
"\n".join(detail))
# Assert that the ref exists in the track tag/branch, if track has been specified.
ref_in_track = False
if self.tracking:
_, branch = self.check_output([self.host_git, 'branch', '--list', self.tracking,
'--contains', self.mirror.ref],
cwd=self.mirror.mirror)
if branch:
ref_in_track = True
else:
_, tag = self.check_output([self.host_git, 'tag', '--list', self.tracking,
'--contains', self.mirror.ref],
cwd=self.mirror.mirror)
if tag:
ref_in_track = True
if not ref_in_track:
detail = "The ref provided for the element does not exist locally " + \
"in the provided track branch / tag '{}'.\n".format(self.tracking) + \
"You may wish to track the element to update the ref from '{}' ".format(self.tracking) + \
"with `bst track`,\n" + \
"or examine the upstream at '{}' for the specific ref.".format(self.mirror.url)
self.warn("{}: expected ref '{}' was not found in given track '{}' for staged repository: '{}'\n"
.format(self, self.mirror.ref, self.tracking, self.mirror.url),
detail=detail, warning_token=CoreWarnings.REF_NOT_IN_TRACK)
###########################################################
# Local Functions #
###########################################################
def have_all_refs(self):
if not self.mirror.has_ref():
return False
self.refresh_submodules()
for mirror in self.submodules:
if not os.path.exists(mirror.mirror):
return False
if not mirror.has_ref():
return False
return True
# Refreshes the GitMirror objects for submodules
#
# Assumes that we have our mirror and we have the ref which we point to
#
def refresh_submodules(self):
self.mirror.ensure()
submodules = []
for path, url in self.mirror.submodule_list():
# Completely ignore submodules which are disabled for checkout
if self.ignore_submodule(path):
continue
# Allow configuration to override the upstream
# location of the submodules.
override_url = self.submodule_overrides.get(path)
if override_url:
url = override_url
ref = self.mirror.submodule_ref(path)
if ref is not None:
mirror = GitMirror(self, path, url, ref)
submodules.append(mirror)
self.submodules = submodules
# Checks whether the plugin configuration has explicitly
# configured this submodule to be ignored
def ignore_submodule(self, path):
try:
checkout = self.submodule_checkout_overrides[path]
except KeyError:
checkout = self.checkout_submodules
return not checkout
# Checks whether the plugin configuration has explicitly
# configured this submodule to be ignored
def ignore_submodule(self, path):
try:
checkout = self.submodule_checkout_overrides[path]
except KeyError:
checkout = self.checkout_submodules
return not checkout
# Resolve GitSource.git_fetch_tags_exclusive
def init_fetch_tags_mode(self):
if self.git_fetch_tags_exclusive is None:
_, version_output = self.check_output([self.host_git, '--version'])
version_output = version_output.strip()
# Extract the version from "git version {version}" string
git_version = version_output.rsplit(maxsplit=1)[-1]
# Parse out the minor and major versions
git_version_split = git_version.split(".")
if len(git_version_split) < 3:
raise SourceError("{}: Failed to parse git version: {}".format(self, version_output))
git_version_major = int(git_version_split[0])
git_version_minor = int(git_version_split[1])
# Resolve whether `git fetch --tags` means to fetch tags exclusively
if git_version_major == 1 and git_version_minor < 9:
type(self).git_fetch_tags_exclusive = True
else:
type(self).git_fetch_tags_exclusive = False
# Plugin entry point
def setup():
return GitSource
buildstream-1.6.9/buildstream/plugins/sources/local.py 0000664 0000000 0000000 00000012024 14375152700 0023170 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Tiago Gomes
"""
local - stage local files and directories
=========================================
**Usage:**
.. code:: yaml
# Specify the local source kind
kind: local
# Optionally specify a relative staging directory
# directory: path/to/stage
# Specify the project relative path to a file or directory
path: files/somefile.txt
"""
import os
import stat
from buildstream import Source, Consistency
from buildstream import utils
class LocalSource(Source):
# pylint: disable=attribute-defined-outside-init
def __init__(self, context, project, meta):
super().__init__(context, project, meta)
# Cached unique key to avoid multiple file system traversal if the unique key is requested multiple times.
self.__unique_key = None
def configure(self, node):
self.node_validate(node, ['path'] + Source.COMMON_CONFIG_KEYS)
self.path = self.node_get_project_path(node, 'path')
self.fullpath = os.path.join(self.get_project_directory(), self.path)
def preflight(self):
pass
def get_unique_key(self):
if self.__unique_key is None:
# Get a list of tuples of the the project relative paths and fullpaths
if os.path.isdir(self.fullpath):
filelist = utils.list_relative_paths(self.fullpath)
filelist = [(relpath, os.path.join(self.fullpath, relpath)) for relpath in filelist]
else:
filelist = [(self.path, self.fullpath)]
# Return a list of (relative filename, sha256 digest) tuples, a sorted list
# has already been returned by list_relative_paths()
self.__unique_key = [(relpath, unique_key(fullpath)) for relpath, fullpath in filelist]
return self.__unique_key
def get_consistency(self):
return Consistency.CACHED
# We dont have a ref, we're a local file...
def load_ref(self, node):
pass
def get_ref(self):
return None # pragma: nocover
def set_ref(self, ref, node):
pass # pragma: nocover
def fetch(self):
# Nothing to do here for a local source
pass # pragma: nocover
def stage(self, directory):
# Dont use hardlinks to stage sources, they are not write protected
# in the sandbox.
with self.timed_activity("Staging local files at {}".format(self.path)):
if os.path.isdir(self.fullpath):
files = list(utils.list_relative_paths(self.fullpath, list_dirs=True))
utils.copy_files(self.fullpath, directory, files=files)
else:
destfile = os.path.join(directory, os.path.basename(self.path))
files = [os.path.basename(self.path)]
utils.safe_copy(self.fullpath, destfile)
for f in files:
# Non empty directories are not listed by list_relative_paths
dirs = f.split(os.sep)
for i in range(1, len(dirs)):
d = os.path.join(directory, *(dirs[:i]))
assert os.path.isdir(d) and not os.path.islink(d)
os.chmod(d, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
path = os.path.join(directory, f)
if os.path.islink(path):
pass
elif os.path.isdir(path):
os.chmod(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
else:
st = os.stat(path)
if st.st_mode & stat.S_IXUSR:
os.chmod(path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
else:
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
# Create a unique key for a file
def unique_key(filename):
# Return some hard coded things for files which
# have no content to calculate a key for
if os.path.isdir(filename):
return "0"
elif os.path.islink(filename):
# For a symbolic link, use the link target as it's unique identifier
return os.readlink(filename)
return utils.sha256sum(filename)
# Plugin entry point
def setup():
return LocalSource
buildstream-1.6.9/buildstream/plugins/sources/ostree.py 0000664 0000000 0000000 00000021605 14375152700 0023404 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Andrew Leeming
# Tiago Gomes
"""
ostree - stage files from an OSTree repository
==============================================
**Usage:**
.. code:: yaml
# Specify the ostree source kind
kind: ostree
# Optionally specify a relative staging directory
# directory: path/to/stage
# Specify the repository url, using an alias defined
# in your project configuration is recommended.
url: upstream:runtime
# Optionally specify a symbolic tracking branch or tag, this
# will be used to update the 'ref' when refreshing the pipeline.
track: runtime/x86_64/stable
# Specify the commit checksum, this must be specified in order
# to checkout sources and build, but can be automatically
# updated if the 'track' attribute was specified.
ref: d63cbb6fdc0bbdadc4a1b92284826a6d63a7ebcd
# For signed ostree repositories, specify a local project relative
# path to the public verifying GPG key for this remote.
gpg-key: keys/runtime.gpg
"""
import os
import shutil
from buildstream import Source, SourceError, Consistency
from buildstream import utils
class OSTreeSource(Source):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
self.node_validate(node, ['url', 'ref', 'track', 'gpg-key'] + Source.COMMON_CONFIG_KEYS)
self.ostree = None
self.original_url = self.node_get_member(node, str, 'url')
self.url = self.translate_url(self.original_url)
self.ref = self.node_get_member(node, str, 'ref', None)
self.tracking = self.node_get_member(node, str, 'track', None)
self.mirror = os.path.join(self.get_mirror_directory(),
utils.url_directory_name(self.original_url))
# At this point we now know if the source has a ref and/or a track.
# If it is missing both then we will be unable to track or build.
if self.ref is None and self.tracking is None:
raise SourceError("{}: OSTree sources require a ref and/or track".format(self),
reason="missing-track-and-ref")
# (optional) Not all repos are signed. But if they are, get the gpg key
self.gpg_key_path = None
if self.node_get_member(node, str, 'gpg-key', None):
self.gpg_key = self.node_get_project_path(node, 'gpg-key',
check_is_file=True)
self.gpg_key_path = os.path.join(self.get_project_directory(), self.gpg_key)
# Our OSTree repo handle
self.repo = None
def preflight(self):
# Check if ostree is installed, get the binary at the same time
self.ostree = utils.get_host_tool("ostree")
def get_unique_key(self):
return [self.original_url, self.ref]
def load_ref(self, node):
self.ref = self.node_get_member(node, str, 'ref', None)
def get_ref(self):
return self.ref
def set_ref(self, ref, node):
node['ref'] = self.ref = ref
def track(self):
# If self.tracking is not specified it's not an error, just silently return
if not self.tracking:
return None
self.ensure()
remote_name = self.ensure_remote(self.url)
with self.timed_activity(
"Fetching tracking ref '{}' from origin: {}".format(
self.tracking, self.url
)
):
self.call(
[
self.ostree,
"pull",
"--repo",
self.mirror,
remote_name,
self.tracking,
],
fail="Failed to fetch tracking ref '{}' from origin {}".format(
self.tracking, self.url
),
)
return self.check_output(
[self.ostree, "rev-parse", "--repo", self.mirror, self.tracking],
fail="Failed to compute checksum of '{}' on '{}'".format(
self.tracking, self.mirror
),
)[1].strip()
def fetch(self):
self.ensure()
remote_name = self.ensure_remote(self.url)
with self.timed_activity(
"Fetching remote ref: {} from origin: {}".format(
self.ref, self.url
)
):
self.call(
[
self.ostree,
"pull",
"--repo",
self.mirror,
remote_name,
self.ref,
],
fail="Failed to fetch ref '{}' from origin: {}".format(
self.ref, remote_name
),
)
def stage(self, directory):
self.ensure()
# Checkout self.ref into the specified directory
with self.tempdir() as tmpdir:
checkoutdir = os.path.join(tmpdir, "checkout")
with self.timed_activity(
"Staging ref: {} from origin: {}".format(self.ref, self.url)
):
self.call(
[
self.ostree,
"checkout",
"--repo",
self.mirror,
"--user-mode",
self.ref,
checkoutdir,
],
fail="Failed to checkout ref '{}' from origin: {}".format(
self.ref, self.url
),
)
# The target directory is guaranteed to exist, here we must move the
# content of out checkout into the existing target directory.
#
# We may not be able to create the target directory as its parent
# may be readonly, and the directory itself is often a mount point.
#
try:
for entry in os.listdir(checkoutdir):
source_path = os.path.join(checkoutdir, entry)
shutil.move(source_path, directory)
except (shutil.Error, OSError) as e:
raise SourceError(
"{}: Failed to move ostree checkout {} from '{}' to '{}'\n\n{}".format(
self, self.url, tmpdir, directory, e
)
) from e
def get_consistency(self):
if self.ref is None:
return Consistency.INCONSISTENT
elif os.path.exists(self.mirror):
if self.call([self.ostree, "show", "--repo", self.mirror, self.ref]) == 0:
return Consistency.CACHED
return Consistency.RESOLVED
#
# Local helpers
#
def ensure(self):
if not os.path.exists(self.mirror):
self.status("Creating local mirror for {}".format(self.url))
self.call(
[
self.ostree,
"init",
"--repo",
self.mirror,
"--mode",
"archive-z2",
],
fail="Unable to create local mirror for repository",
)
self.call(
[
self.ostree,
"config",
"--repo",
self.mirror,
"set",
"core.min-free-space-percent",
"0",
],
fail="Unable to disable minimum disk space checks",
)
def ensure_remote(self, url):
if self.original_url == self.url:
remote_name = "origin"
else:
remote_name = utils.url_directory_name(url)
command = [
self.ostree,
"remote",
"add",
"--if-not-exists",
"--repo",
self.mirror,
remote_name,
url,
]
if self.gpg_key_path:
command.extend(["--gpg-import", self.gpg_key_path])
else:
command.extend(["--no-gpg-verify"])
self.call(command, fail="Failed to configure origin {}".format(url))
return remote_name
# Plugin entry point
def setup():
return OSTreeSource
buildstream-1.6.9/buildstream/plugins/sources/patch.py 0000664 0000000 0000000 00000006014 14375152700 0023177 0 ustar 00root root 0000000 0000000 #
# Copyright Bloomberg Finance LP
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Chandan Singh
# Tiago Gomes
"""
patch - apply locally stored patches
====================================
**Host dependencies:**
* patch
**Usage:**
.. code:: yaml
# Specify the local source kind
kind: patch
# Specify the project relative path to a patch file
path: files/somefile.diff
# Optionally specify the root directory for the patch
# directory: path/to/stage
# Optionally specify the strip level, defaults to 1
strip-level: 1
"""
import os
from buildstream import Source, SourceError, Consistency
from buildstream import utils
class PatchSource(Source):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
self.path = self.node_get_project_path(node, 'path',
check_is_file=True)
self.strip_level = self.node_get_member(node, int, "strip-level", 1)
self.fullpath = os.path.join(self.get_project_directory(), self.path)
def preflight(self):
# Check if patch is installed, get the binary at the same time
self.host_patch = utils.get_host_tool("patch")
def get_unique_key(self):
return [self.path, utils.sha256sum(self.fullpath), self.strip_level]
def get_consistency(self):
return Consistency.CACHED
def load_ref(self, node):
pass
def get_ref(self):
return None # pragma: nocover
def set_ref(self, ref, node):
pass # pragma: nocover
def fetch(self):
# Nothing to do here for a local source
pass # pragma: nocover
def stage(self, directory):
with self.timed_activity("Applying local patch: {}".format(self.path)):
# Bail out with a comprehensive message if the target directory is empty
if not os.listdir(directory):
raise SourceError("Nothing to patch in directory '{}'".format(directory),
reason="patch-no-files")
strip_level_option = "-p{}".format(self.strip_level)
self.call([self.host_patch, strip_level_option, "-i", self.fullpath, "-d", directory],
fail="Failed to apply patch {}".format(self.path))
# Plugin entry point
def setup():
return PatchSource
buildstream-1.6.9/buildstream/plugins/sources/pip.py 0000664 0000000 0000000 00000021005 14375152700 0022665 0 ustar 00root root 0000000 0000000 #
# Copyright 2018 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Chandan Singh
"""
pip - stage python packages using pip
=====================================
**Host depndencies:**
* ``pip`` python module
This plugin will download source distributions for specified packages using
``pip`` but will not install them. It is expected that the elements using this
source will install the downloaded packages.
Downloaded tarballs will be stored in a directory called ".bst_pip_downloads".
**Usage:**
.. code:: yaml
# Specify the pip source kind
kind: pip
# Optionally specify index url, defaults to PyPi
# This url is used to discover new versions of packages and download them
# Projects intending to mirror their sources to a permanent location should
# use an aliased url, and declare the alias in the project configuration
url: https://mypypi.example.com/simple
# Optionally specify the path to requirements files
# Note that either 'requirements-files' or 'packages' must be defined
requirements-files:
- requirements.txt
# Optionally specify a list of additional packages
# Note that either 'requirements-files' or 'packages' must be defined
packages:
- flake8
# Optionally specify a relative staging directory
directory: path/to/stage
# Specify the ref. It is a list of strings of format
# "==", separated by "\\n".
# Usually this will be contents of a requirements.txt file where all
# package versions have been frozen.
ref: "flake8==3.5.0\\nmccabe==0.6.1\\npkg-resources==0.0.0\\npycodestyle==2.3.1\\npyflakes==1.6.0"
.. note::
The ``pip`` plugin is available since :ref:`format version 16 `
"""
import errno
import hashlib
import os
import re
from buildstream import Consistency, Source, SourceError, utils
_OUTPUT_DIRNAME = '.bst_pip_downloads'
_PYPI_INDEX_URL = 'https://pypi.org/simple/'
# Used only for finding pip command
_PYTHON_VERSIONS = [
'python2.7',
'python3.0',
'python3.1',
'python3.2',
'python3.3',
'python3.4',
'python3.5',
'python3.6',
'python3.7',
'python3.8',
'python3.9',
'python3.10',
'python3.11',
]
# List of allowed extensions taken from
# https://docs.python.org/3/distutils/sourcedist.html.
# Names of source distribution archives must be of the form
# '%{package-name}-%{version}.%{extension}'.
_SDIST_RE = re.compile(
r'^([a-zA-Z0-9]+?)-(.+).(?:tar|tar.bz2|tar.gz|tar.xz|tar.Z|zip)$',
re.IGNORECASE)
class PipSource(Source):
# pylint: disable=attribute-defined-outside-init
# We need access to previous sources at track time to use requirements.txt
# but not at fetch time as self.ref should contain sufficient information
# for this plugin
BST_REQUIRES_PREVIOUS_SOURCES_TRACK = True
def configure(self, node):
self.node_validate(node, ['url', 'packages', 'ref', 'requirements-files'] +
Source.COMMON_CONFIG_KEYS)
self.ref = self.node_get_member(node, str, 'ref', None)
self.original_url = self.node_get_member(node, str, 'url', _PYPI_INDEX_URL)
self.index_url = self.translate_url(self.original_url)
self.packages = self.node_get_member(node, list, 'packages', [])
self.requirements_files = self.node_get_member(node, list, 'requirements-files', [])
if not (self.packages or self.requirements_files):
raise SourceError("{}: Either 'packages' or 'requirements-files' must be specified". format(self))
def preflight(self):
# Try to find a pip version that supports download command
self.host_pip = None
for python in reversed(_PYTHON_VERSIONS):
try:
host_python = utils.get_host_tool(python)
rc = self.call([host_python, '-m', 'pip', 'download', '--help'])
if rc == 0:
self.host_pip = [host_python, '-m', 'pip']
break
except utils.ProgramNotFoundError:
pass
if self.host_pip is None:
raise SourceError("{}: Unable to find a suitable pip command".format(self))
def get_unique_key(self):
return [self.original_url, self.ref]
def get_consistency(self):
if not self.ref:
return Consistency.INCONSISTENT
if os.path.exists(self._mirror) and os.listdir(self._mirror):
return Consistency.CACHED
return Consistency.RESOLVED
def get_ref(self):
return self.ref
def load_ref(self, node):
self.ref = self.node_get_member(node, str, 'ref', None)
def set_ref(self, ref, node):
node['ref'] = self.ref = ref
def track(self, previous_sources_dir):
# XXX pip does not offer any public API other than the CLI tool so it
# is not feasible to correctly parse the requirements file or to check
# which package versions pip is going to install.
# See https://pip.pypa.io/en/stable/user_guide/#using-pip-from-your-program
# for details.
# As a result, we have to wastefully install the packages during track.
with self.tempdir() as tmpdir:
install_args = self.host_pip + ['download',
'--no-binary', ':all:',
'--index-url', self.index_url,
'--dest', tmpdir]
for requirement_file in self.requirements_files:
fpath = os.path.join(previous_sources_dir, requirement_file)
install_args += ['-r', fpath]
install_args += self.packages
self.call(install_args, fail="Failed to install python packages")
reqs = self._parse_sdist_names(tmpdir)
return '\n'.join(["{}=={}".format(pkg, ver) for pkg, ver in reqs])
def fetch(self):
with self.tempdir() as tmpdir:
packages = self.ref.strip().split('\n')
package_dir = os.path.join(tmpdir, 'packages')
os.makedirs(package_dir)
self.call(self.host_pip + ['download',
'--no-binary', ':all:',
'--index-url', self.index_url,
'--dest', package_dir] + packages,
fail="Failed to install python packages: {}".format(packages))
# If the mirror directory already exists, assume that some other
# process has fetched the sources before us and ensure that we do
# not raise an error in that case.
try:
os.makedirs(self._mirror)
os.rename(package_dir, self._mirror)
except FileExistsError:
return
except OSError as e:
if e.errno != errno.ENOTEMPTY:
raise
def stage(self, directory):
with self.timed_activity("Staging Python packages", silent_nested=True):
utils.copy_files(self._mirror, os.path.join(directory, _OUTPUT_DIRNAME))
# Directory where this source should stage its files
#
@property
def _mirror(self):
if not self.ref:
return None
return os.path.join(self.get_mirror_directory(),
utils.url_directory_name(self.original_url),
hashlib.sha256(self.ref.encode()).hexdigest())
# Parse names of downloaded source distributions
#
# Args:
# basedir (str): Directory containing source distribution archives
#
# Returns:
# (list): List of (package_name, version) tuples in sorted order
#
def _parse_sdist_names(self, basedir):
reqs = []
for f in os.listdir(basedir):
pkg_match = _SDIST_RE.match(f)
if pkg_match:
reqs.append(pkg_match.groups())
return sorted(reqs)
def setup():
return PipSource
buildstream-1.6.9/buildstream/plugins/sources/remote.py 0000664 0000000 0000000 00000005441 14375152700 0023376 0 ustar 00root root 0000000 0000000 #
# Copyright Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Ed Baunton
"""
remote - stage files from remote urls
=====================================
**Usage:**
.. code:: yaml
# Specify the remote source kind
kind: remote
# Optionally specify a relative staging directory
# directory: path/to/stage
# Optionally specify a relative staging filename.
# If not specified, the basename of the url will be used.
# filename: customfilename
# Specify the url. Using an alias defined in your project
# configuration is encouraged. 'bst track' will update the
# sha256sum in 'ref' to the downloaded file's sha256sum.
url: upstream:foo
# Specify the ref. It's a sha256sum of the file you download.
ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
.. note::
The ``remote`` plugin is available since :ref:`format version 10 `
"""
import os
import stat
from buildstream import SourceError, utils
from ._downloadablefilesource import DownloadableFileSource
class RemoteSource(DownloadableFileSource):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
super().configure(node)
self.filename = self.node_get_member(node, str, 'filename', os.path.basename(self.url))
if os.sep in self.filename:
raise SourceError('{}: filename parameter cannot contain directories'.format(self),
reason="filename-contains-directory")
self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['filename'])
def get_unique_key(self):
return super().get_unique_key() + [self.filename]
def stage(self, directory):
# Same as in local plugin, don't use hardlinks to stage sources, they
# are not write protected in the sandbox.
dest = os.path.join(directory, self.filename)
with self.timed_activity("Staging remote file to {}".format(dest)):
utils.safe_copy(self._get_mirror_file(), dest)
os.chmod(dest, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
def setup():
return RemoteSource
buildstream-1.6.9/buildstream/plugins/sources/tar.py 0000664 0000000 0000000 00000016066 14375152700 0022676 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jonathan Maw
"""
tar - stage files from tar archives
===================================
**Host dependencies:**
* lzip (for .tar.lz files)
**Usage:**
.. code:: yaml
# Specify the tar source kind
kind: tar
# Optionally specify a relative staging directory
# directory: path/to/stage
# Specify the tar url. Using an alias defined in your project
# configuration is encouraged. 'bst track' will update the
# sha256sum in 'ref' to the downloaded file's sha256sum.
url: upstream:foo.tar
# Specify the ref. It's a sha256sum of the file you download.
ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
# Specify a glob pattern to indicate the base directory to extract
# from the tarball. The first matching directory will be used.
#
# Note that this is '*' by default since most standard release
# tarballs contain a self named subdirectory at the root which
# contains the files one normally wants to extract to build.
#
# To extract the root of the tarball directly, this can be set
# to an empty string.
base-dir: '*'
"""
import os
import tarfile
from contextlib import contextmanager, ExitStack
from tempfile import TemporaryFile
from buildstream import SourceError
from buildstream import utils
from ._downloadablefilesource import DownloadableFileSource
class TarSource(DownloadableFileSource):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
super().configure(node)
self.base_dir = self.node_get_member(node, str, 'base-dir', '*') or None
self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['base-dir'])
def preflight(self):
self.host_lzip = None
if self.url.endswith('.lz'):
self.host_lzip = utils.get_host_tool('lzip')
def get_unique_key(self):
return super().get_unique_key() + [self.base_dir]
@contextmanager
def _run_lzip(self):
assert self.host_lzip
with TemporaryFile() as lzip_stdout:
with ExitStack() as context:
lzip_file = context.enter_context(open(self._get_mirror_file(), 'r'))
self.call([self.host_lzip, '-d'],
stdin=lzip_file,
stdout=lzip_stdout)
lzip_stdout.seek(0, 0)
yield lzip_stdout
@contextmanager
def _get_tar(self):
if self.url.endswith('.lz'):
with self._run_lzip() as lzip_dec:
with tarfile.open(fileobj=lzip_dec, mode='r:') as tar:
yield tar
else:
with tarfile.open(self._get_mirror_file()) as tar:
yield tar
def stage(self, directory):
try:
with self._get_tar() as tar:
base_dir = None
if self.base_dir:
base_dir = self._find_base_dir(tar, self.base_dir)
if base_dir:
tar.extractall(path=directory, members=self._extract_members(tar, base_dir))
else:
tar.extractall(path=directory)
except (tarfile.TarError, OSError) as e:
raise SourceError("{}: Error staging source: {}".format(self, e)) from e
# Override and translate which filenames to extract
def _extract_members(self, tar, base_dir):
if not base_dir.endswith(os.sep):
base_dir = base_dir + os.sep
l = len(base_dir)
for member in tar.getmembers():
# First, ensure that a member never starts with `./`
if member.path.startswith('./'):
member.path = member.path[2:]
# Now extract only the paths which match the normalized path
if member.path.startswith(base_dir):
# If it's got a link name, give it the same treatment, we
# need the link targets to match up with what we are staging
#
# NOTE: Its possible this is not perfect, we may need to
# consider links which point outside of the chosen
# base directory.
#
if member.type == tarfile.LNKTYPE:
member.linkname = member.linkname[l:]
member.path = member.path[l:]
yield member
# We want to iterate over all paths of a tarball, but getmembers()
# is not enough because some tarballs simply do not contain the leading
# directory paths for the archived files.
def _list_tar_paths(self, tar):
visited = {}
for member in tar.getmembers():
# Remove any possible leading './', offer more consistent behavior
# across tarballs encoded with or without a leading '.'
member_name = member.name.lstrip('./')
if not member.isdir():
# Loop over the components of a path, for a path of a/b/c/d
# we will first visit 'a', then 'a/b' and then 'a/b/c', excluding
# the final component
components = member_name.split('/')
for i in range(len(components) - 1):
dir_component = '/'.join([components[j] for j in range(i + 1)])
if dir_component not in visited:
visited[dir_component] = True
try:
# Dont yield directory members which actually do
# exist in the archive
_ = tar.getmember(dir_component)
except KeyError:
if dir_component != '.':
yield dir_component
continue
# Avoid considering the '.' directory, if any is included in the archive
# this is to avoid the default 'base-dir: *' value behaving differently
# depending on whether the tarball was encoded with a leading '.' or not
elif member_name == '.':
continue
yield member_name
def _find_base_dir(self, tar, pattern):
paths = self._list_tar_paths(tar)
matches = sorted(list(utils.glob(paths, pattern)))
if not matches:
raise SourceError("{}: Could not find base directory matching pattern: {}".format(self, pattern))
return matches[0]
def setup():
return TarSource
buildstream-1.6.9/buildstream/plugins/sources/zip.py 0000664 0000000 0000000 00000015044 14375152700 0022705 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Mathieu Bridon
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Mathieu Bridon
"""
zip - stage files from zip archives
===================================
**Usage:**
.. code:: yaml
# Specify the zip source kind
kind: zip
# Optionally specify a relative staging directory
# directory: path/to/stage
# Specify the zip url. Using an alias defined in your project
# configuration is encouraged. 'bst track' will update the
# sha256sum in 'ref' to the downloaded file's sha256sum.
url: upstream:foo.zip
# Specify the ref. It's a sha256sum of the file you download.
ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
# Specify a glob pattern to indicate the base directory to extract
# from the archive. The first matching directory will be used.
#
# Note that this is '*' by default since most standard release
# archives contain a self named subdirectory at the root which
# contains the files one normally wants to extract to build.
#
# To extract the root of the archive directly, this can be set
# to an empty string.
base-dir: '*'
.. attention::
File permissions are not preserved. All extracted directories have
permissions 0755 and all extracted files have permissions 0644.
"""
import os
import zipfile
import stat
from buildstream import SourceError
from buildstream import utils
from ._downloadablefilesource import DownloadableFileSource
class ZipSource(DownloadableFileSource):
# pylint: disable=attribute-defined-outside-init
def configure(self, node):
super().configure(node)
self.base_dir = self.node_get_member(node, str, 'base-dir', '*') or None
self.node_validate(node, DownloadableFileSource.COMMON_CONFIG_KEYS + ['base-dir'])
def get_unique_key(self):
return super().get_unique_key() + [self.base_dir]
def stage(self, directory):
exec_rights = (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) & ~(stat.S_IWGRP | stat.S_IWOTH)
noexec_rights = exec_rights & ~(stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
try:
with zipfile.ZipFile(self._get_mirror_file()) as archive:
base_dir = None
if self.base_dir:
base_dir = self._find_base_dir(archive, self.base_dir)
if base_dir:
members = self._extract_members(archive, base_dir)
else:
members = archive.namelist()
for member in members:
written = archive.extract(member, path=directory)
# zipfile.extract might create missing directories
rel = os.path.relpath(written, start=directory)
assert not os.path.isabs(rel)
rel = os.path.dirname(rel)
while rel:
os.chmod(os.path.join(directory, rel), exec_rights)
rel = os.path.dirname(rel)
if os.path.islink(written):
pass
elif os.path.isdir(written):
os.chmod(written, exec_rights)
else:
os.chmod(written, noexec_rights)
except (zipfile.BadZipFile, zipfile.LargeZipFile, OSError) as e:
raise SourceError("{}: Error staging source: {}".format(self, e)) from e
# Override and translate which filenames to extract
def _extract_members(self, archive, base_dir):
if not base_dir.endswith(os.sep):
base_dir = base_dir + os.sep
l = len(base_dir)
for member in archive.infolist():
if member.filename == base_dir:
continue
if member.filename.startswith(base_dir):
member.filename = member.filename[l:]
yield member
# We want to iterate over all paths of an archive, but namelist()
# is not enough because some archives simply do not contain the leading
# directory paths for the archived files.
def _list_archive_paths(self, archive):
visited = {}
for member in archive.infolist():
# ZipInfo.is_dir() is only available in python >= 3.6, but all
# it does is check for a trailing '/' in the name
#
if not member.filename.endswith('/'):
# Loop over the components of a path, for a path of a/b/c/d
# we will first visit 'a', then 'a/b' and then 'a/b/c', excluding
# the final component
components = member.filename.split('/')
for i in range(len(components) - 1):
dir_component = '/'.join([components[j] for j in range(i + 1)])
if dir_component not in visited:
visited[dir_component] = True
try:
# Dont yield directory members which actually do
# exist in the archive
_ = archive.getinfo(dir_component)
except KeyError:
if dir_component != '.':
yield dir_component
continue
# Avoid considering the '.' directory, if any is included in the archive
# this is to avoid the default 'base-dir: *' value behaving differently
# depending on whether the archive was encoded with a leading '.' or not
elif member.filename == '.' or member.filename == './':
continue
yield member.filename
def _find_base_dir(self, archive, pattern):
paths = self._list_archive_paths(archive)
matches = sorted(list(utils.glob(paths, pattern)))
if not matches:
raise SourceError("{}: Could not find base directory matching pattern: {}".format(self, pattern))
return matches[0]
def setup():
return ZipSource
buildstream-1.6.9/buildstream/sandbox/ 0000775 0000000 0000000 00000000000 14375152700 0020017 5 ustar 00root root 0000000 0000000 buildstream-1.6.9/buildstream/sandbox/__init__.py 0000664 0000000 0000000 00000001655 14375152700 0022137 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
from .sandbox import Sandbox, SandboxFlags
from ._sandboxchroot import SandboxChroot
from ._sandboxbwrap import SandboxBwrap
from ._sandboxdummy import SandboxDummy
buildstream-1.6.9/buildstream/sandbox/_config.py 0000664 0000000 0000000 00000004115 14375152700 0021776 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jim MacArthur
# SandboxConfig
#
# A container for sandbox configuration data. We want the internals
# of this to be opaque, hence putting it in its own private file.
class SandboxConfig():
def __init__(self, build_uid, build_gid, build_os=None, build_arch=None):
self.build_uid = build_uid
self.build_gid = build_gid
self.build_os = build_os
self.build_arch = build_arch
# get_unique_key():
#
# This returns the SandboxConfig's contribution
# to an element's cache key.
#
# Returns:
# (dict): A dictionary to add to an element's cache key
#
def get_unique_key(self):
# Currently operating system and machine architecture
# are not configurable and we have no sandbox implementation
# which can conform to such configurations.
#
# However this should be the right place to support
# such configurations in the future.
#
unique_key = {
'os': self.build_os,
'arch': self.build_arch
}
# Avoid breaking cache key calculation with
# the addition of configurabuild build uid/gid
if self.build_uid != 0:
unique_key['build-uid'] = self.build_uid
if self.build_gid != 0:
unique_key['build-gid'] = self.build_gid
return unique_key
buildstream-1.6.9/buildstream/sandbox/_mount.py 0000664 0000000 0000000 00000012331 14375152700 0021672 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
import os
from collections import OrderedDict
from contextlib import contextmanager, ExitStack
from .. import utils
from .._fuse import SafeHardlinks
# Mount()
#
# Helper data object representing a single mount point in the mount map
#
class Mount():
def __init__(self, sandbox, mount_point, safe_hardlinks):
scratch_directory = sandbox._get_scratch_directory()
root_directory = sandbox.get_directory()
self.mount_point = mount_point
self.safe_hardlinks = safe_hardlinks
# FIXME: When the criteria for mounting something and it's parent
# mount is identical, then there is no need to mount an additional
# fuse layer (i.e. if the root is read-write and there is a directory
# marked for staged artifacts directly within the rootfs, they can
# safely share the same fuse layer).
#
# In these cases it would be saner to redirect the sub-mount to
# a regular mount point within the parent's redirected mount.
#
if self.safe_hardlinks:
# Redirected mount
self.mount_origin = os.path.join(root_directory, mount_point.lstrip(os.sep))
self.mount_base = os.path.join(scratch_directory, utils.url_directory_name(mount_point))
self.mount_source = os.path.join(self.mount_base, 'mount')
self.mount_tempdir = os.path.join(self.mount_base, 'temp')
os.makedirs(self.mount_origin, exist_ok=True)
os.makedirs(self.mount_tempdir, exist_ok=True)
else:
# No redirection needed
self.mount_source = os.path.join(root_directory, mount_point.lstrip(os.sep))
external_mount_sources = sandbox._get_mount_sources()
external_mount_source = external_mount_sources.get(mount_point)
if external_mount_source is None:
os.makedirs(self.mount_source, exist_ok=True)
else:
if os.path.isdir(external_mount_source):
os.makedirs(self.mount_source, exist_ok=True)
else:
# When mounting a regular file, ensure the parent
# directory exists in the sandbox; and that an empty
# file is created at the mount location.
parent_dir = os.path.dirname(self.mount_source.rstrip('/'))
os.makedirs(parent_dir, exist_ok=True)
if not os.path.exists(self.mount_source):
with open(self.mount_source, 'w', encoding='utf-8'):
pass
@contextmanager
def mounted(self, sandbox):
if self.safe_hardlinks:
mount = SafeHardlinks(self.mount_origin, self.mount_tempdir)
with mount.mounted(self.mount_source):
yield
else:
# Nothing to mount here
yield
# MountMap()
#
# Helper object for mapping of the sandbox mountpoints
#
# Args:
# sandbox (Sandbox): The sandbox object
# root_readonly (bool): Whether the sandbox root is readonly
#
class MountMap():
def __init__(self, sandbox, root_readonly):
# We will be doing the mounts in the order in which they were declared.
self.mounts = OrderedDict()
# We want safe hardlinks on rootfs whenever root is not readonly
self.mounts['/'] = Mount(sandbox, '/', not root_readonly)
for mark in sandbox._get_marked_directories():
directory = mark['directory']
artifact = mark['artifact']
# We want safe hardlinks for any non-root directory where
# artifacts will be staged to
self.mounts[directory] = Mount(sandbox, directory, artifact)
# get_mount_source()
#
# Gets the host directory where the mountpoint in the
# sandbox should be bind mounted from
#
# Args:
# mountpoint (str): The absolute mountpoint path inside the sandbox
#
# Returns:
# The host path to be mounted at the mount point
#
def get_mount_source(self, mountpoint):
return self.mounts[mountpoint].mount_source
# mounted()
#
# A context manager which ensures all the mount sources
# were mounted with any fuse layers which may have been needed.
#
# Args:
# sandbox (Sandbox): The sandbox
#
@contextmanager
def mounted(self, sandbox):
with ExitStack() as stack:
for _, mount in self.mounts.items():
stack.enter_context(mount.mounted(sandbox))
yield
buildstream-1.6.9/buildstream/sandbox/_mounter.py 0000664 0000000 0000000 00000011104 14375152700 0022216 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
import sys
from contextlib import contextmanager
from .._exceptions import SandboxError
from .. import utils, _signals
# A class to wrap the `mount` and `umount` system commands
class Mounter:
@classmethod
def _mount(cls, dest, src=None, mount_type=None,
stdout=sys.stdout, stderr=sys.stderr, options=None,
flags=None):
argv = [utils.get_host_tool('mount')]
if mount_type:
argv.extend(['-t', mount_type])
if options:
argv.extend(['-o', options])
if flags:
argv.extend(flags)
if src is not None:
argv += [src]
argv += [dest]
status, _ = utils._call(
argv,
terminate=True,
stdout=stdout,
stderr=stderr
)
if status != 0:
raise SandboxError('`{}` failed with exit code {}'
.format(' '.join(argv), status))
return dest
@classmethod
def _umount(cls, path, stdout=sys.stdout, stderr=sys.stderr):
cmd = [utils.get_host_tool('umount'), '-R', path]
status, _ = utils._call(
cmd,
terminate=True,
stdout=stdout,
stderr=stderr
)
if status != 0:
raise SandboxError('`{}` failed with exit code {}'
.format(' '.join(cmd), status))
# mount()
#
# A wrapper for the `mount` command. The device is unmounted when
# the context is left.
#
# Args:
# dest (str) - The directory to mount to
# src (str) - The directory to mount
# stdout (file) - stdout
# stderr (file) - stderr
# mount_type (str|None) - The mount type (can be omitted or None)
# kwargs - Arguments to pass to the mount command, such as `ro=True`
#
# Yields:
# (str) The path to the destination
#
@classmethod
@contextmanager
def mount(cls, dest, src=None, stdout=sys.stdout,
stderr=sys.stderr, mount_type=None, **kwargs):
def kill_proc():
cls._umount(dest, stdout, stderr)
options = ','.join([key for key, val in kwargs.items() if val])
path = cls._mount(dest, src, mount_type, stdout=stdout, stderr=stderr, options=options)
try:
with _signals.terminator(kill_proc):
yield path
finally:
cls._umount(dest, stdout, stderr)
# bind_mount()
#
# Mount a directory to a different location (a hardlink for all
# intents and purposes). The directory is unmounted when the
# context is left.
#
# Args:
# dest (str) - The directory to mount to
# src (str) - The directory to mount
# stdout (file) - stdout
# stderr (file) - stderr
# kwargs - Arguments to pass to the mount command, such as `ro=True`
#
# Yields:
# (str) The path to the destination
#
# While this is equivalent to `mount --rbind`, this option may not
# exist and can be dangerous, requiring careful cleanupIt is
# recommended to use this function over a manual mount invocation.
#
@classmethod
@contextmanager
def bind_mount(cls, dest, src=None, stdout=sys.stdout,
stderr=sys.stderr, **kwargs):
def kill_proc():
cls._umount(dest, stdout, stderr)
kwargs['rbind'] = True
options = ','.join([key for key, val in kwargs.items() if val])
path = cls._mount(dest, src, None, stdout, stderr, options)
try:
with _signals.terminator(kill_proc):
# Make the rbind a slave to avoid unmounting vital devices in
# /proc
cls._mount(dest, flags=['--make-rslave'])
yield path
finally:
cls._umount(dest, stdout, stderr)
buildstream-1.6.9/buildstream/sandbox/_sandboxbwrap.py 0000664 0000000 0000000 00000041603 14375152700 0023226 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Andrew Leeming
# Tristan Van Berkom
import os
import sys
import time
import errno
import signal
import subprocess
import shutil
from contextlib import ExitStack
import psutil
from .._exceptions import SandboxError
from .. import utils, _signals
from ._mount import MountMap
from . import Sandbox, SandboxFlags
# SandboxBwrap()
#
# Default bubblewrap based sandbox implementation.
#
class SandboxBwrap(Sandbox):
# Minimal set of devices for the sandbox
DEVICES = [
'/dev/full',
'/dev/null',
'/dev/urandom',
'/dev/random',
'/dev/zero'
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.user_ns_available = kwargs['user_ns_available']
self.die_with_parent_available = kwargs['die_with_parent_available']
self._linux32 = kwargs['linux32']
def run(self, command, flags, *, cwd=None, env=None):
stdout, stderr = self._get_output()
root_directory = self.get_directory()
# Fallback to the sandbox default settings for
# the cwd and env.
#
if cwd is None:
cwd = self._get_work_directory()
if env is None:
env = self._get_environment()
if not self._has_command(command[0], env):
raise SandboxError("Staged artifacts do not provide command "
"'{}'".format(command[0]),
reason='missing-command')
# We want command args as a list of strings
if isinstance(command, str):
command = [command]
# Create the mount map, this will tell us where
# each mount point needs to be mounted from and to
mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY)
root_mount_source = mount_map.get_mount_source('/')
if cwd is None:
cwd = '/'
# start command with linux32 if needed
if self._linux32:
bwrap_command = [utils.get_host_tool('linux32')]
else:
bwrap_command = []
# Grab the full path of the bwrap binary
bwrap_command += [utils.get_host_tool('bwrap')]
for k, v in env.items():
bwrap_command += ['--setenv', k, v]
for k in os.environ.keys() - env.keys():
bwrap_command += ['--unsetenv', k]
# Create a new pid namespace, this also ensures that any subprocesses
# are cleaned up when the bwrap process exits.
bwrap_command += ['--unshare-pid']
# Ensure subprocesses are cleaned up when the bwrap parent dies.
if self.die_with_parent_available:
bwrap_command += ['--die-with-parent']
# Add in the root filesystem stuff first.
#
# The rootfs is mounted as RW initially so that further mounts can be
# placed on top. If a RO root is required, after all other mounts are
# complete, root is remounted as RO
bwrap_command += ["--bind", root_mount_source, "/"]
if not flags & SandboxFlags.NETWORK_ENABLED:
bwrap_command += ['--unshare-net']
bwrap_command += ['--unshare-uts', '--hostname', 'buildstream']
bwrap_command += ['--unshare-ipc']
if cwd is not None:
bwrap_command += ['--chdir', cwd]
# Give it a proc and tmpfs
bwrap_command += [
'--proc', '/proc',
'--tmpfs', '/tmp'
]
# In interactive mode, we want a complete devpts inside
# the container, so there is a /dev/console and such. In
# the regular non-interactive sandbox, we want to hand pick
# a minimal set of devices to expose to the sandbox.
#
if flags & SandboxFlags.INTERACTIVE:
bwrap_command += ['--dev', '/dev']
else:
for device in self.DEVICES:
bwrap_command += ['--dev-bind', device, device]
# Create a tmpfs for /dev/shm, if we're in interactive this
# is handled by `--dev /dev`
#
if flags & SandboxFlags.CREATE_DEV_SHM:
bwrap_command += ['--tmpfs', '/dev/shm']
# Add bind mounts to any marked directories
marked_directories = self._get_marked_directories()
mount_source_overrides = self._get_mount_sources()
for mark in marked_directories:
mount_point = mark['directory']
mount_source = mount_source_overrides.get(mount_point, mount_map.get_mount_source(mount_point))
# Use --dev-bind for all mounts, this is simply a bind mount which does
# not restrictive about devices.
#
# While it's important for users to be able to mount devices
# into the sandbox for `bst shell` testing purposes, it is
# harmless to do in a build environment where the directories
# we mount just never contain device files.
#
bwrap_command += ['--dev-bind', mount_source, mount_point]
if flags & SandboxFlags.ROOT_READ_ONLY:
bwrap_command += ["--remount-ro", "/"]
# Set UID and GUI
if self.user_ns_available:
bwrap_command += ['--unshare-user']
if not flags & SandboxFlags.INHERIT_UID:
uid = self._get_config().build_uid
gid = self._get_config().build_gid
bwrap_command += ['--uid', str(uid), '--gid', str(gid)]
# Add the command
bwrap_command += command
# bwrap might create some directories while being suid
# and may give them to root gid, if it does, we'll want
# to clean them up after, so record what we already had
# there just in case so that we can safely cleanup the debris.
#
existing_basedirs = {
directory: os.path.lexists(os.path.join(root_directory, directory))
for directory in ['dev/shm', 'tmp', 'dev', 'proc']
}
# Use the MountMap context manager to ensure that any redirected
# mounts through fuse layers are in context and ready for bwrap
# to mount them from.
#
with ExitStack() as stack:
stack.enter_context(mount_map.mounted(self))
# Ensure the cwd exists
if cwd is not None:
workdir = os.path.join(root_mount_source, cwd.lstrip(os.sep))
os.makedirs(workdir, exist_ok=True)
# If we're interactive, we want to inherit our stdin,
# otherwise redirect to /dev/null, ensuring process
# disconnected from terminal.
if flags & SandboxFlags.INTERACTIVE:
stdin = sys.stdin
else:
stdin = stack.enter_context(open(os.devnull, "r")) # pylint: disable=unspecified-encoding
# Run bubblewrap !
exit_code = self.run_bwrap(bwrap_command, stdin, stdout, stderr,
(flags & SandboxFlags.INTERACTIVE))
# Cleanup things which bwrap might have left behind, while
# everything is still mounted because bwrap can be creating
# the devices on the fuse mount, so we should remove it there.
if not flags & SandboxFlags.INTERACTIVE:
for device in self.DEVICES:
device_path = os.path.join(root_mount_source, device.lstrip('/'))
# This will remove the device in a loop, allowing some
# retries in case the device file leaked by bubblewrap is still busy
self.try_remove_device(device_path)
# Remove /tmp, this is a bwrap owned thing we want to be sure
# never ends up in an artifact
for basedir in ['dev/shm', 'tmp', 'dev', 'proc']:
# Skip removal of directories which already existed before
# launching bwrap
if existing_basedirs[basedir]:
continue
base_directory = os.path.join(root_mount_source, basedir)
if flags & SandboxFlags.INTERACTIVE:
# Be more lenient in interactive mode here.
#
# In interactive mode; it's possible that the project shell
# configuration has mounted some things below the base
# directories, such as /dev/dri, and in this case it's less
# important to consider cleanup, as we wont be collecting
# this build result and creating an artifact.
#
# Note: Ideally; we should instead fix upstream bubblewrap to
# cleanup any debris it creates at startup time, and do
# the same ourselves for any directories we explicitly create.
#
shutil.rmtree(base_directory, ignore_errors=True)
else:
try:
os.rmdir(base_directory)
except FileNotFoundError:
# ignore this, if bwrap cleaned up properly then it's not a problem.
#
# If the directory was not empty on the other hand, then this is clearly
# a bug, bwrap mounted a tempfs here and when it exits, that better be empty.
pass
return exit_code
def run_bwrap(self, argv, stdin, stdout, stderr, interactive):
# Wrapper around subprocess.Popen() with common settings.
#
# This function blocks until the subprocess has terminated.
#
# It then returns a tuple of (exit code, stdout output, stderr output).
# If stdout was not equal to subprocess.PIPE, stdout will be None. Same for
# stderr.
# Fetch the process actually launched inside the bwrap sandbox, or the
# intermediat control bwrap processes.
#
# NOTE:
# The main bwrap process itself is setuid root and as such we cannot
# send it any signals. Since we launch bwrap with --unshare-pid, it's
# direct child is another bwrap process which retains ownership of the
# pid namespace. This is the right process to kill when terminating.
#
# The grandchild is the binary which we asked bwrap to launch on our
# behalf, whatever this binary is, it is the right process to use
# for suspending and resuming. In the case that this is a shell, the
# shell will be group leader and all build scripts will stop/resume
# with that shell.
#
def get_user_proc(bwrap_pid, grand_child=False):
bwrap_proc = psutil.Process(bwrap_pid)
bwrap_children = bwrap_proc.children()
if bwrap_children:
if grand_child:
bwrap_grand_children = bwrap_children[0].children()
if bwrap_grand_children:
return bwrap_grand_children[0]
else:
return bwrap_children[0]
return None
def terminate_bwrap():
if process:
user_proc = get_user_proc(process.pid)
if user_proc:
user_proc.kill()
def suspend_bwrap():
if process:
user_proc = get_user_proc(process.pid, grand_child=True)
if user_proc:
group_id = os.getpgid(user_proc.pid)
os.killpg(group_id, signal.SIGSTOP)
def resume_bwrap():
if process:
user_proc = get_user_proc(process.pid, grand_child=True)
if user_proc:
group_id = os.getpgid(user_proc.pid)
os.killpg(group_id, signal.SIGCONT)
with ExitStack() as stack:
# We want to launch bwrap in a new session in non-interactive
# mode so that we handle the SIGTERM and SIGTSTP signals separately
# from the nested bwrap process, but in interactive mode this
# causes launched shells to lack job control (we dont really
# know why that is).
#
if interactive:
new_session = False
else:
new_session = True
stack.enter_context(_signals.suspendable(suspend_bwrap, resume_bwrap))
stack.enter_context(_signals.terminator(terminate_bwrap))
process = subprocess.Popen( # pylint: disable=consider-using-with
argv,
# The default is to share file descriptors from the parent process
# to the subprocess, which is rarely good for sandboxing.
close_fds=True,
stdin=stdin,
stdout=stdout,
stderr=stderr,
start_new_session=new_session
)
# Wait for the child process to finish, ensuring that
# a SIGINT has exactly the effect the user probably
# expects (i.e. let the child process handle it).
try:
while True:
try:
_, status = os.waitpid(process.pid, 0)
# If the process exits due to a signal, we
# brutally murder it to avoid zombies
if not os.WIFEXITED(status):
user_proc = get_user_proc(process.pid)
if user_proc:
utils._kill_process_tree(user_proc.pid)
# If we receive a KeyboardInterrupt we continue
# waiting for the process since we are in the same
# process group and it should also have received
# the SIGINT.
except KeyboardInterrupt:
continue
break
# If we can't find the process, it has already died of its
# own accord, and therefore we don't need to check or kill
# anything.
except psutil.NoSuchProcess:
pass
# Return the exit code - see the documentation for
# os.WEXITSTATUS to see why this is required.
if os.WIFEXITED(status):
exit_code = os.WEXITSTATUS(status)
else:
exit_code = -1
if interactive and stdin.isatty():
# Make this process the foreground process again, otherwise the
# next read() on stdin will trigger SIGTTIN and stop the process.
# This is required because the sandboxed process does not have
# permission to do this on its own (running in separate PID namespace).
#
# tcsetpgrp() will trigger SIGTTOU when called from a background
# process, so ignore it temporarily.
handler = signal.signal(signal.SIGTTOU, signal.SIG_IGN)
os.tcsetpgrp(0, os.getpid())
signal.signal(signal.SIGTTOU, handler)
return exit_code
def try_remove_device(self, device_path):
# Put some upper limit on the tries here
max_tries = 1000
tries = 0
while True:
try:
os.unlink(device_path)
except OSError as e:
if e.errno == errno.EBUSY:
# This happens on some machines, seems there is a race sometimes
# after bubblewrap returns and the device files it bind-mounted did
# not finish unmounting.
#
if tries < max_tries:
tries += 1
time.sleep(1 / 100)
continue
# We've reached the upper limit of tries, bail out now
# because something must have went wrong
#
raise
if e.errno == errno.ENOENT:
# Bubblewrap cleaned it up for us, no problem if we cant remove it
break
# Something unexpected, reraise this error
raise
else:
# Successfully removed the symlink
break
buildstream-1.6.9/buildstream/sandbox/_sandboxchroot.py 0000664 0000000 0000000 00000030472 14375152700 0023413 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Maat
# Tristan Van Berkom
import os
import sys
import stat
import signal
import subprocess
from contextlib import contextmanager, ExitStack
import psutil
from .._exceptions import SandboxError
from .. import utils
from .. import _signals
from ._mounter import Mounter
from ._mount import MountMap
from . import Sandbox, SandboxFlags
class SandboxChroot(Sandbox):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
uid = self._get_config().build_uid
gid = self._get_config().build_gid
if uid != 0 or gid != 0:
raise SandboxError("Chroot sandboxes cannot specify a non-root uid/gid "
"({},{} were supplied via config)".format(uid, gid))
self.mount_map = None
def run(self, command, flags, *, cwd=None, env=None):
# Default settings
if cwd is None:
cwd = self._get_work_directory()
if cwd is None:
cwd = '/'
if env is None:
env = self._get_environment()
if not self._has_command(command[0], env):
raise SandboxError("Staged artifacts do not provide command "
"'{}'".format(command[0]),
reason='missing-command')
# Command must be a list
if isinstance(command, str):
command = [command]
stdout, stderr = self._get_output()
# Create the mount map, this will tell us where
# each mount point needs to be mounted from and to
self.mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY)
root_mount_source = self.mount_map.get_mount_source('/')
# Create a sysroot and run the command inside it
with ExitStack() as stack:
os.makedirs('/var/run/buildstream', exist_ok=True)
# FIXME: While we do not currently do anything to prevent
# network access, we also don't copy /etc/resolv.conf to
# the new rootfs.
#
# This effectively disables network access, since DNs will
# never resolve, so anything a normal process wants to do
# will fail. Malicious processes could gain rights to
# anything anyway.
#
# Nonetheless a better solution could perhaps be found.
rootfs = stack.enter_context(utils._tempdir(dir='/var/run/buildstream'))
stack.enter_context(self.create_devices(self.get_directory(), flags))
stack.enter_context(self.mount_dirs(rootfs, flags, stdout, stderr))
if flags & SandboxFlags.INTERACTIVE:
stdin = sys.stdin
else:
stdin = stack.enter_context(open(os.devnull, 'r')) # pylint: disable=unspecified-encoding
# Ensure the cwd exists
if cwd is not None:
workdir = os.path.join(root_mount_source, cwd.lstrip(os.sep))
os.makedirs(workdir, exist_ok=True)
status = self.chroot(rootfs, command, stdin, stdout,
stderr, cwd, env, flags)
return status
# chroot()
#
# A helper function to chroot into the rootfs.
#
# Args:
# rootfs (str): The path of the sysroot to chroot into
# command (list): The command to execute in the chroot env
# stdin (file): The stdin
# stdout (file): The stdout
# stderr (file): The stderr
# cwd (str): The current working directory
# env (dict): The environment variables to use while executing the command
# flags (:class:`SandboxFlags`): The flags to enable on the sandbox
#
# Returns:
# (int): The exit code of the executed command
#
def chroot(self, rootfs, command, stdin, stdout, stderr, cwd, env, flags):
# pylint: disable=subprocess-popen-preexec-fn
def kill_proc():
if process:
# First attempt to gracefully terminate
proc = psutil.Process(process.pid)
proc.terminate()
try:
proc.wait(20)
except psutil.TimeoutExpired:
utils._kill_process_tree(process.pid)
def suspend_proc():
group_id = os.getpgid(process.pid)
os.killpg(group_id, signal.SIGSTOP)
def resume_proc():
group_id = os.getpgid(process.pid)
os.killpg(group_id, signal.SIGCONT)
try:
with _signals.suspendable(suspend_proc, resume_proc), _signals.terminator(kill_proc):
process = subprocess.Popen( # pylint: disable=consider-using-with
command,
close_fds=True,
cwd=os.path.join(rootfs, cwd.lstrip(os.sep)),
env=env,
stdin=stdin,
stdout=stdout,
stderr=stderr,
# If you try to put gtk dialogs here Tristan (either)
# will personally scald you
preexec_fn=lambda: (os.chroot(rootfs), os.chdir(cwd)),
start_new_session=flags & SandboxFlags.INTERACTIVE
)
# Wait for the child process to finish, ensuring that
# a SIGINT has exactly the effect the user probably
# expects (i.e. let the child process handle it).
try:
while True:
try:
_, status = os.waitpid(process.pid, 0)
# If the process exits due to a signal, we
# brutally murder it to avoid zombies
if not os.WIFEXITED(status):
utils._kill_process_tree(process.pid)
# Unlike in the bwrap case, here only the main
# process seems to receive the SIGINT. We pass
# on the signal to the child and then continue
# to wait.
except KeyboardInterrupt:
process.send_signal(signal.SIGINT)
continue
break
# If we can't find the process, it has already died of
# its own accord, and therefore we don't need to check
# or kill anything.
except psutil.NoSuchProcess:
pass
# Return the exit code - see the documentation for
# os.WEXITSTATUS to see why this is required.
if os.WIFEXITED(status):
code = os.WEXITSTATUS(status)
else:
code = -1
except subprocess.SubprocessError as e:
# Exceptions in preexec_fn are simply reported as
# 'Exception occurred in preexec_fn', turn these into
# a more readable message.
if '{}'.format(e) == 'Exception occurred in preexec_fn.':
raise SandboxError('Could not chroot into {} or chdir into {}. '
'Ensure you are root and that the relevant directory exists.'
.format(rootfs, cwd)) from e
raise SandboxError('Could not run command {}: {}'.format(command, e)) from e
return code
# create_devices()
#
# Create the nodes in /dev/ usually required for builds (null,
# none, etc.)
#
# Args:
# rootfs (str): The path of the sysroot to prepare
# flags (:class:`.SandboxFlags`): The sandbox flags
#
@contextmanager
def create_devices(self, rootfs, flags):
devices = []
# When we are interactive, we'd rather mount /dev due to the
# sheer number of devices
if not flags & SandboxFlags.INTERACTIVE:
for device in Sandbox.DEVICES:
location = os.path.join(rootfs, device.lstrip(os.sep))
os.makedirs(os.path.dirname(location), exist_ok=True)
try:
if os.path.exists(location):
os.remove(location)
devices.append(self.mknod(device, location))
except OSError as e:
if e.errno == 1:
raise SandboxError("Permission denied while creating device node: {}.".format(e) +
"BuildStream reqiures root permissions for these setttings.") from e
raise
yield
for device in devices:
os.remove(device)
# mount_dirs()
#
# Mount paths required for the command.
#
# Args:
# rootfs (str): The path of the sysroot to prepare
# flags (:class:`.SandboxFlags`): The sandbox flags
# stdout (file): The stdout
# stderr (file): The stderr
#
@contextmanager
def mount_dirs(self, rootfs, flags, stdout, stderr):
# FIXME: This should probably keep track of potentially
# already existing files a la _sandboxwrap.py:239
@contextmanager
def mount_point(point, **kwargs):
mount_source_overrides = self._get_mount_sources()
mount_source = mount_source_overrides.get(point, self.mount_map.get_mount_source(point))
mount_point = os.path.join(rootfs, point.lstrip(os.sep))
with Mounter.bind_mount(mount_point, src=mount_source, stdout=stdout, stderr=stderr, **kwargs):
yield
@contextmanager
def mount_src(src, **kwargs):
mount_point = os.path.join(rootfs, src.lstrip(os.sep))
os.makedirs(mount_point, exist_ok=True)
with Mounter.bind_mount(mount_point, src=src, stdout=stdout, stderr=stderr, **kwargs):
yield
with ExitStack() as stack:
stack.enter_context(self.mount_map.mounted(self))
stack.enter_context(mount_point('/'))
if flags & SandboxFlags.INTERACTIVE:
stack.enter_context(mount_src('/dev'))
stack.enter_context(mount_src('/tmp'))
stack.enter_context(mount_src('/proc'))
for mark in self._get_marked_directories():
stack.enter_context(mount_point(mark['directory']))
# Remount root RO if necessary
if flags & flags & SandboxFlags.ROOT_READ_ONLY:
root_mount = Mounter.mount(rootfs, stdout=stdout, stderr=stderr, remount=True, ro=True, bind=True)
# Since the exit stack has already registered a mount
# for this path, we do not need to register another
# umount call.
root_mount.__enter__()
yield
# mknod()
#
# Create a device node equivalent to the given source node
#
# Args:
# source (str): Path of the device to mimic (e.g. '/dev/null')
# target (str): Location to create the new device in
#
# Returns:
# target (str): The location of the created node
#
def mknod(self, source, target):
try:
dev = os.stat(source)
major = os.major(dev.st_rdev)
minor = os.minor(dev.st_rdev)
target_dev = os.makedev(major, minor)
os.mknod(target, mode=stat.S_IFCHR | dev.st_mode, device=target_dev)
except PermissionError as e:
raise SandboxError('Could not create device {}, ensure that you have root permissions: {}') from e
except OSError as e:
raise SandboxError('Could not create device {}: {}'
.format(target, e)) from e
return target
buildstream-1.6.9/buildstream/sandbox/_sandboxdummy.py 0000664 0000000 0000000 00000000565 14375152700 0023250 0 ustar 00root root 0000000 0000000 from .._exceptions import SandboxError
from . import Sandbox
# SandboxDummy()
#
# Dummy sandbox to use on a different.
#
class SandboxDummy(Sandbox):
def __init__(self, reason, *args, **kwargs):
super().__init__(*args, **kwargs)
self._reason = reason
def run(self, command, flags, *, cwd=None, env=None):
raise SandboxError(self._reason)
buildstream-1.6.9/buildstream/sandbox/sandbox.py 0000664 0000000 0000000 00000023452 14375152700 0022035 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Andrew Leeming
# Tristan Van Berkom
"""
Sandbox - The build sandbox
===========================
:class:`.Element` plugins which want to interface with the sandbox
need only understand this interface, while it may be given a different
sandbox implementation, any sandbox implementation it is given will
conform to this interface.
See also: :ref:`sandboxing`.
"""
import os
from .._exceptions import ImplError
class SandboxFlags():
"""Flags indicating how the sandbox should be run.
"""
ROOT_READ_ONLY = 0x01
"""The root filesystem is read only.
This is normally true except when running integration commands
on staged dependencies, where we have to update caches and run
things such as ldconfig.
"""
NETWORK_ENABLED = 0x02
"""Whether to expose host network.
This should not be set when running builds, but can
be allowed for running a shell in a sandbox.
"""
INTERACTIVE = 0x04
"""Whether to run the sandbox interactively
This determines if the sandbox should attempt to connect
the terminal through to the calling process, or detach
the terminal entirely.
"""
INHERIT_UID = 0x08
"""Whether to use the user id and group id from the host environment
This determines if processes in the sandbox should run with the
same user id and group id as BuildStream itself. By default,
processes run with user id and group id 0, protected by a user
namespace where available.
"""
CREATE_DEV_SHM = 0x10
"""Whether to create /dev/shm in the sandbox.
This allows plugins to create /dev/shm in the sandbox. This flag
was added to fix a bug in which /dev/shm was not added in, meaning our
sandbox was not POSIX compliant.
"""
class Sandbox():
"""Sandbox()
Sandbox programming interface for :class:`.Element` plugins.
"""
# Minimal set of devices for the sandbox
DEVICES = [
'/dev/urandom',
'/dev/random',
'/dev/zero',
'/dev/null'
]
def __init__(self, context, project, directory, **kwargs):
self.__context = context
self.__project = project
self.__directories = []
self.__cwd = None
self.__env = None
self.__mount_sources = {}
# Configuration from kwargs common to all subclasses
self.__config = kwargs['config']
self.__stdout = kwargs['stdout']
self.__stderr = kwargs['stderr']
# Setup the directories
self.__directory = directory
self.__root = os.path.join(self.__directory, 'root')
self.__scratch = os.path.join(self.__directory, 'scratch')
for directory_ in [self.__root, self.__scratch]:
os.makedirs(directory_, exist_ok=True)
def get_directory(self):
"""Fetches the sandbox root directory
The root directory is where artifacts for the base
runtime environment should be staged.
Returns:
(str): The sandbox root directory
"""
return self.__root
def set_environment(self, environment):
"""Sets the environment variables for the sandbox
Args:
directory (dict): The environment variables to use in the sandbox
"""
self.__env = environment
def set_work_directory(self, directory):
"""Sets the work directory for commands run in the sandbox
Args:
directory (str): An absolute path within the sandbox
"""
self.__cwd = directory
def mark_directory(self, directory, *, artifact=False):
"""Marks a sandbox directory and ensures it will exist
Args:
directory (str): An absolute path within the sandbox to mark
artifact (bool): Whether the content staged at this location
contains artifacts
.. note::
Any marked directories will be read-write in the sandboxed
environment, only the root directory is allowed to be readonly.
"""
self.__directories.append({
'directory': directory,
'artifact': artifact
})
def run(self, command, flags, *, cwd=None, env=None):
"""Run a command in the sandbox.
Args:
command (list): The command to run in the sandboxed environment, as a list
of strings starting with the binary to run.
flags (:class:`.SandboxFlags`): The flags for running this command.
cwd (str): The sandbox relative working directory in which to run the command.
env (dict): A dictionary of string key, value pairs to set as environment
variables inside the sandbox environment.
Returns:
(int): The program exit code.
Raises:
(:class:`.ProgramNotFoundError`): If a host tool which the given sandbox
implementation requires is not found.
.. note::
The optional *cwd* argument will default to the value set with
:func:`~buildstream.sandbox.Sandbox.set_work_directory`
"""
raise ImplError("Sandbox of type '{}' does not implement run()"
.format(type(self).__name__))
################################################
# Private methods #
################################################
# _get_context()
#
# Fetches the context BuildStream was launched with.
#
# Returns:
# (Context): The context of this BuildStream invocation
def _get_context(self):
return self.__context
# _get_project()
#
# Fetches the Project this sandbox was created to build for.
#
# Returns:
# (Project): The project this sandbox was created for.
def _get_project(self):
return self.__project
# _get_marked_directories()
#
# Fetches the marked directories in the sandbox
#
# Returns:
# (list): A list of directory mark objects.
#
# The returned objects are dictionaries with the following attributes:
# directory: The absolute path within the sandbox
# artifact: Whether the path will contain artifacts or not
#
def _get_marked_directories(self):
return self.__directories
# _get_mount_source()
#
# Fetches the list of mount sources
#
# Returns:
# (dict): A dictionary where keys are mount points and values are the mount sources
def _get_mount_sources(self):
return self.__mount_sources
# _set_mount_source()
#
# Sets the mount source for a given mountpoint
#
# Args:
# mountpoint (str): The absolute mountpoint path inside the sandbox
# mount_source (str): the host path to be mounted at the mount point
def _set_mount_source(self, mountpoint, mount_source):
self.__mount_sources[mountpoint] = mount_source
# _get_environment()
#
# Fetches the environment variables for running commands
# in the sandbox.
#
# Returns:
# (str): The sandbox work directory
def _get_environment(self):
return self.__env
# _get_work_directory()
#
# Fetches the working directory for running commands
# in the sandbox.
#
# Returns:
# (str): The sandbox work directory
def _get_work_directory(self):
return self.__cwd
# _get_scratch_directory()
#
# Fetches the sandbox scratch directory, this directory can
# be used by the sandbox implementation to cache things or
# redirect temporary fuse mounts.
#
# The scratch directory is guaranteed to be on the same
# filesystem as the root directory.
#
# Returns:
# (str): The sandbox scratch directory
def _get_scratch_directory(self):
return self.__scratch
# _get_output()
#
# Fetches the stdout & stderr
#
# Returns:
# (file): The stdout, or None to inherit
# (file): The stderr, or None to inherit
def _get_output(self):
return (self.__stdout, self.__stderr)
# _get_config()
#
# Fetches the sandbox configuration object.
#
# Returns:
# (SandboxConfig): An object containing the configuration
# data passed in during construction.
def _get_config(self):
return self.__config
# _has_command()
#
# Tests whether a command exists inside the sandbox
#
# Args:
# command (list): The command to test.
# env (dict): A dictionary of string key, value pairs to set as environment
# variables inside the sandbox environment.
# Returns:
# (bool): Whether a command exists inside the sandbox.
def _has_command(self, command, env=None):
if os.path.isabs(command):
return os.path.exists(os.path.join(
self.get_directory(), command.lstrip(os.sep)))
for path in env.get('PATH').split(':'):
if os.path.exists(os.path.join(
self.get_directory(), path.lstrip(os.sep), command)):
return True
return False
buildstream-1.6.9/buildstream/scriptelement.py 0000664 0000000 0000000 00000027361 14375152700 0021622 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Jonathan Maw
"""
ScriptElement - Abstract class for scripting elements
=====================================================
The ScriptElement class is a convenience class one can derive for
implementing elements that stage elements and run command-lines on them.
Any derived classes must write their own configure() implementation, using
the public APIs exposed in this class.
Derived classes must also chain up to the parent method in their preflight()
implementations.
"""
import os
from collections import OrderedDict
from . import Element, ElementError, Scope, SandboxFlags
class ScriptElement(Element):
__install_root = "/"
__cwd = "/"
__root_read_only = False
__commands = None
__layout = []
__create_dev_shm = False
# The compose element's output is it's dependencies, so
# we must rebuild if the dependencies change even when
# not in strict build plans.
#
BST_STRICT_REBUILD = True
# Script artifacts must never have indirect dependencies,
# so runtime dependencies are forbidden.
BST_FORBID_RDEPENDS = True
# This element ignores sources, so we should forbid them from being
# added, to reduce the potential for confusion
BST_FORBID_SOURCES = True
def set_work_dir(self, work_dir=None):
"""Sets the working dir
The working dir (a.k.a. cwd) is the directory which commands will be
called from.
Args:
work_dir (str): The working directory. If called without this argument
set, it'll default to the value of the variable ``cwd``.
"""
if work_dir is None:
self.__cwd = self.get_variable("cwd") or "/"
else:
self.__cwd = work_dir
def set_install_root(self, install_root=None):
"""Sets the install root
The install root is the directory which output will be collected from
once the commands have been run.
Args:
install_root(str): The install root. If called without this argument
set, it'll default to the value of the variable ``install-root``.
"""
if install_root is None:
self.__install_root = self.get_variable("install-root") or "/"
else:
self.__install_root = install_root
def set_root_read_only(self, root_read_only):
"""Sets root read-only
When commands are run, if root_read_only is true, then the root of the
filesystem will be protected. This is strongly recommended whenever
possible.
If this variable is not set, the default permission is read-write.
Args:
root_read_only (bool): Whether to mark the root filesystem as
read-only.
"""
self.__root_read_only = root_read_only
def set_create_dev_shm(self, create_dev_shm=False):
"""Sets whether to use shared memory device in the sandbox
Args:
work_dir (bool): Whether to enable creation of the shared memory device
"""
self.__create_dev_shm = create_dev_shm
def layout_add(self, element, destination):
"""Adds an element-destination pair to the layout.
Layout is a way of defining how dependencies should be added to the
staging area for running commands.
Args:
element (str): The name of the element to stage, or None. This may be any
element found in the dependencies, whether it is a direct
or indirect dependency.
destination (str): The path inside the staging area for where to
stage this element. If it is not "/", then integration
commands will not be run.
If this function is never called, then the default behavior is to just
stage the Scope.BUILD dependencies of the element in question at the
sandbox root. Otherwise, the Scope.RUN dependencies of each specified
element will be staged in their specified destination directories.
.. note::
The order of directories in the layout is significant as they
will be mounted into the sandbox. It is an error to specify a parent
directory which will shadow a directory already present in the layout.
.. note::
In the case that no element is specified, a read-write directory will
be made available at the specified location.
"""
#
# Even if this is an empty list by default, make sure that it's
# instance data instead of appending stuff directly onto class data.
#
if not self.__layout:
self.__layout = []
self.__layout.append({"element": element,
"destination": destination})
def add_commands(self, group_name, command_list):
"""Adds a list of commands under the group-name.
.. note::
Command groups will be run in the order they were added.
.. note::
This does not perform substitutions automatically. They must
be performed beforehand (see
:func:`~buildstream.element.Element.node_subst_list`)
Args:
group_name (str): The name of the group of commands.
command_list (list): The list of commands to be run.
"""
if not self.__commands:
self.__commands = OrderedDict()
self.__commands[group_name] = command_list
def __validate_layout(self):
if self.__layout:
# Cannot proceeed if layout is used, but none are for "/"
root_defined = any(entry['destination'] == '/' for entry in self.__layout)
if not root_defined:
raise ElementError("{}: Using layout, but none are staged as '/'"
.format(self))
# Cannot proceed if layout specifies an element that isn't part
# of the dependencies.
for item in self.__layout:
if item['element']:
if not self.search(Scope.BUILD, item['element']):
raise ElementError("{}: '{}' in layout not found in dependencies"
.format(self, item['element']))
def preflight(self):
# The layout, if set, must make sense.
self.__validate_layout()
def get_unique_key(self):
return {
'commands': self.__commands,
'cwd': self.__cwd,
'install-root': self.__install_root,
'layout': self.__layout,
'root-read-only': self.__root_read_only
}
def configure_sandbox(self, sandbox):
# Setup the environment and work directory
sandbox.set_work_directory(self.__cwd)
# Setup environment
sandbox.set_environment(self.get_environment())
# Tell the sandbox to mount the install root
directories = {self.__install_root: False}
# Mark the artifact directories in the layout
for item in self.__layout:
destination = item['destination']
was_artifact = directories.get(destination, False)
directories[destination] = item['element'] or was_artifact
for directory, artifact in directories.items():
# Root does not need to be marked as it is always mounted
# with artifact (unless explicitly marked non-artifact)
if directory != '/':
sandbox.mark_directory(directory, artifact=artifact)
def stage(self, sandbox):
# Stage the elements, and run integration commands where appropriate.
if not self.__layout:
# if no layout set, stage all dependencies into /
for build_dep in self.dependencies(Scope.BUILD, recurse=False):
with self.timed_activity("Staging {} at /"
.format(build_dep.name), silent_nested=True):
build_dep.stage_dependency_artifacts(sandbox, Scope.RUN, path="/")
for build_dep in self.dependencies(Scope.BUILD, recurse=False):
with self.timed_activity("Integrating {}".format(build_dep.name), silent_nested=True):
for dep in build_dep.dependencies(Scope.RUN):
dep.integrate(sandbox)
else:
# If layout, follow its rules.
for item in self.__layout:
# Skip layout members which dont stage an element
if not item['element']:
continue
element = self.search(Scope.BUILD, item['element'])
if item['destination'] == '/':
with self.timed_activity("Staging {} at /".format(element.name),
silent_nested=True):
element.stage_dependency_artifacts(sandbox, Scope.RUN)
else:
with self.timed_activity("Staging {} at {}"
.format(element.name, item['destination']),
silent_nested=True):
real_dstdir = os.path.join(sandbox.get_directory(),
item['destination'].lstrip(os.sep))
os.makedirs(os.path.dirname(real_dstdir), exist_ok=True)
element.stage_dependency_artifacts(sandbox, Scope.RUN, path=item['destination'])
for item in self.__layout:
# Skip layout members which dont stage an element
if not item['element']:
continue
element = self.search(Scope.BUILD, item['element'])
# Integration commands can only be run for elements staged to /
if item['destination'] == '/':
with self.timed_activity("Integrating {}".format(element.name),
silent_nested=True):
for dep in element.dependencies(Scope.RUN):
dep.integrate(sandbox)
os.makedirs(os.path.join(sandbox.get_directory(), self.__install_root.lstrip(os.sep)),
exist_ok=True)
def assemble(self, sandbox):
flags = 0
if self.__root_read_only:
flags = flags | SandboxFlags.ROOT_READ_ONLY
if self.__create_dev_shm:
flags = flags | SandboxFlags.CREATE_DEV_SHM
for groupname, commands in self.__commands.items():
with self.timed_activity("Running '{}'".format(groupname)):
for cmd in commands:
self.status("Running command", detail=cmd)
# Note the -e switch to 'sh' means to exit with an error
# if any untested command fails.
exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'], flags)
if exitcode != 0:
raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode))
# Return where the result can be collected from
return self.__install_root
def setup():
return ScriptElement
buildstream-1.6.9/buildstream/source.py 0000664 0000000 0000000 00000123272 14375152700 0020242 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
"""
Source - Base source class
==========================
.. _core_source_abstract_methods:
Abstract Methods
----------------
For loading and configuration purposes, Sources must implement the
:ref:`Plugin base class abstract methods `.
.. attention::
In order to ensure that all configuration data is processed at
load time, it is important that all URLs have been processed during
:func:`Plugin.configure() `.
Source implementations *must* either call
:func:`Source.translate_url() ` or
:func:`Source.mark_download_url() `
for every URL that has been specified in the configuration during
:func:`Plugin.configure() `
Sources expose the following abstract methods. Unless explicitly mentioned,
these methods are mandatory to implement.
* :func:`Source.get_consistency() `
Report the sources consistency state.
* :func:`Source.load_ref() `
Load the ref from a specific YAML node
* :func:`Source.get_ref() `
Fetch the source ref
* :func:`Source.set_ref() `
Set a new ref explicitly
* :func:`Source.track() `
Automatically derive a new ref from a symbolic tracking branch
* :func:`Source.fetch() `
Fetch the actual payload for the currently set ref
* :func:`Source.stage() `
Stage the sources for a given ref at a specified location
* :func:`Source.init_workspace() `
Stage sources in a local directory for use as a workspace.
**Optional**: If left unimplemented, this will default to calling
:func:`Source.stage() `
* :func:`Source.get_source_fetchers() `
Get the objects that are used for fetching.
**Optional**: This only needs to be implemented for sources that need to
download from multiple URLs while fetching (e.g. a git repo and its
submodules). For details on how to define a SourceFetcher, see
:ref:`SourceFetcher `.
* :func:`Source.validate_cache() `
Perform any validations which require the sources to be cached.
**Optional**: This is completely optional and will do nothing if left unimplemented.
Accessing previous sources
--------------------------
*Since: 1.4*
In the general case, all sources are fetched and tracked independently of one
another. In situations where a source needs to access previous source(s) in
order to perform its own track and/or fetch, following attributes can be set to
request access to previous sources:
* :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_TRACK`
Indicate that access to previous sources is required during track
* :attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_FETCH`
Indicate that access to previous sources is required during fetch
The intended use of such plugins is to fetch external dependencies of other
sources, typically using some kind of package manager, such that all the
dependencies of the original source(s) are available at build time.
When implementing such a plugin, implementors should adhere to the following
guidelines:
* Implementations must be able to store the obtained artifacts in a
subdirectory.
* Implementations must be able to deterministically generate a unique ref, such
that two refs are different if and only if they produce different outputs.
* Implementations must not introduce host contamination.
.. _core_source_fetcher:
SourceFetcher - Object for fetching individual URLs
===================================================
Abstract Methods
----------------
SourceFetchers expose the following abstract methods. Unless explicitly
mentioned, these methods are mandatory to implement.
* :func:`SourceFetcher.fetch() `
Fetches the URL associated with this SourceFetcher, optionally taking an
alias override.
Class Reference
---------------
"""
import os
from collections.abc import Mapping
from contextlib import contextmanager
from . import Plugin
from . import _yaml, utils
from .types import CoreWarnings
from ._exceptions import BstError, ImplError, ErrorDomain
from ._projectrefs import ProjectRefStorage
class Consistency():
INCONSISTENT = 0
"""Inconsistent
Inconsistent sources have no explicit reference set. They cannot
produce a cache key, be fetched or staged. They can only be tracked.
"""
RESOLVED = 1
"""Resolved
Resolved sources have a reference and can produce a cache key and
be fetched, however they cannot be staged.
"""
CACHED = 2
"""Cached
Cached sources have a reference which is present in the local
source cache. Only cached sources can be staged.
"""
class SourceError(BstError):
"""This exception should be raised by :class:`.Source` implementations
to report errors to the user.
Args:
message (str): The breif error description to report to the user
detail (str): A possibly multiline, more detailed error message
reason (str): An optional machine readable reason string, used for test cases
temporary (bool): An indicator to whether the error may occur if the operation was run again. (*Since: 1.2*)
"""
def __init__(self, message, *, detail=None, reason=None, temporary=False):
super().__init__(message, detail=detail, domain=ErrorDomain.SOURCE, reason=reason, temporary=temporary)
class SourceFetcher():
"""SourceFetcher()
This interface exists so that a source that downloads from multiple
places (e.g. a git source with submodules) has a consistent interface for
fetching and substituting aliases.
*Since: 1.2*
.. attention::
When implementing a SourceFetcher, remember to call
:func:`Source.mark_download_url() `
for every URL found in the configuration data at
:func:`Plugin.configure() ` time.
"""
def __init__(self):
self.__alias = None
#############################################################
# Abstract Methods #
#############################################################
def fetch(self, alias_override=None, **kwargs):
"""Fetch remote sources and mirror them locally, ensuring at least
that the specific reference is cached locally.
Args:
alias_override (str): The alias to use instead of the default one
defined by the :ref:`aliases ` field
in the project's config.
Raises:
:class:`.SourceError`
Implementors should raise :class:`.SourceError` if the there is some
network error or if the source reference could not be matched.
"""
raise ImplError("SourceFetcher '{}' does not implement fetch()".format(type(self)))
#############################################################
# Public Methods #
#############################################################
def mark_download_url(self, url):
"""Identifies the URL that this SourceFetcher uses to download
This must be called during the fetcher's initialization
Args:
url (str): The url used to download.
"""
self.__alias = _extract_alias(url)
#############################################################
# Private Methods used in BuildStream #
#############################################################
# Returns the alias used by this fetcher
def _get_alias(self):
return self.__alias
class Source(Plugin):
"""Source()
Base Source class.
All Sources derive from this class, this interface defines how
the core will be interacting with Sources.
"""
__defaults = {} # The defaults from the project
__defaults_set = False # Flag, in case there are not defaults at all
BST_REQUIRES_PREVIOUS_SOURCES_TRACK = False
"""Whether access to previous sources is required during track
When set to True:
* all sources listed before this source in the given element will be
fetched before this source is tracked
* Source.track() will be called with an additional keyword argument
`previous_sources_dir` where previous sources will be staged
* this source can not be the first source for an element
*Since: 1.4*
"""
BST_REQUIRES_PREVIOUS_SOURCES_FETCH = False
"""Whether access to previous sources is required during fetch
When set to True:
* all sources listed before this source in the given element will be
fetched before this source is fetched
* Source.fetch() will be called with an additional keyword argument
`previous_sources_dir` where previous sources will be staged
* this source can not be the first source for an element
*Since: 1.4*
"""
def __init__(self, context, project, meta, *, alias_override=None, unique_id=None):
provenance = _yaml.node_get_provenance(meta.config)
super().__init__("{}-{}".format(meta.element_name, meta.element_index),
context, project, provenance, "source", unique_id=unique_id)
self.__element_name = meta.element_name # The name of the element owning this source
self.__element_index = meta.element_index # The index of the source in the owning element's source list
self.__element_kind = meta.element_kind # The kind of the element owning this source
self.__directory = meta.directory # Staging relative directory
self.__consistency = Consistency.INCONSISTENT # Cached consistency state
# The alias_override is only set on a re-instantiated Source
self.__alias_override = alias_override # Tuple of alias and its override to use instead
self.__expected_alias = None # The primary alias
self.__marked_urls = set() # Set of marked download URLs
# FIXME: Reconstruct a MetaSource from a Source instead of storing it.
self.__meta = meta # MetaSource stored so we can copy this source later.
# Collect the composited element configuration and
# ask the element to configure itself.
self.__init_defaults(meta)
self.__config = self.__extract_config(meta)
self.__first_pass = meta.first_pass
self._configure(self.__config)
COMMON_CONFIG_KEYS = ['kind', 'directory']
"""Common source config keys
Source config keys that must not be accessed in configure(), and
should be checked for using node_validate().
"""
#############################################################
# Abstract Methods #
#############################################################
def get_consistency(self):
"""Report whether the source has a resolved reference
Returns:
(:class:`.Consistency`): The source consistency
"""
raise ImplError("Source plugin '{}' does not implement get_consistency()".format(self.get_kind()))
def load_ref(self, node):
"""Loads the *ref* for this Source from the specified *node*.
Args:
node (dict): The YAML node to load the ref from
.. note::
The *ref* for the Source is expected to be read at
:func:`Plugin.configure() ` time,
this will only be used for loading refs from alternative locations
than in the `element.bst` file where the given Source object has
been declared.
*Since: 1.2*
"""
raise ImplError("Source plugin '{}' does not implement load_ref()".format(self.get_kind()))
def get_ref(self):
"""Fetch the internal ref, however it is represented
Returns:
(simple object): The internal source reference, or ``None``
.. note::
The reference is the user provided (or track resolved) value
the plugin uses to represent a specific input, like a commit
in a VCS or a tarball's checksum. Usually the reference is a string,
but the plugin may choose to represent it with a tuple or such.
Implementations *must* return a ``None`` value in the case that
the ref was not loaded. E.g. a ``(None, None)`` tuple is not acceptable.
"""
raise ImplError("Source plugin '{}' does not implement get_ref()".format(self.get_kind()))
def set_ref(self, ref, node):
"""Applies the internal ref, however it is represented
Args:
ref (simple object): The internal source reference to set, or ``None``
node (dict): The same dictionary which was previously passed
to :func:`Plugin.configure() `
See :func:`Source.get_ref() `
for a discussion on the *ref* parameter.
.. note::
Implementors must support the special ``None`` value here to
allow clearing any existing ref.
"""
raise ImplError("Source plugin '{}' does not implement set_ref()".format(self.get_kind()))
def track(self, **kwargs):
"""Resolve a new ref from the plugin's track option
Args:
previous_sources_dir (str): directory where previous sources are staged.
Note that this keyword argument is available only when
:attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_TRACK`
is set to True.
Returns:
(simple object): A new internal source reference, or None
If the backend in question supports resolving references from
a symbolic tracking branch or tag, then this should be implemented
to perform this task on behalf of :ref:`bst track `
commands.
This usually requires fetching new content from a remote origin
to see if a new ref has appeared for your branch or tag. If the
backend store allows one to query for a new ref from a symbolic
tracking data without downloading then that is desirable.
See :func:`Source.get_ref() `
for a discussion on the *ref* parameter.
"""
# Allow a non implementation
return None
def fetch(self, **kwargs):
"""Fetch remote sources and mirror them locally, ensuring at least
that the specific reference is cached locally.
Args:
previous_sources_dir (str): directory where previous sources are staged.
Note that this keyword argument is available only when
:attr:`~buildstream.source.Source.BST_REQUIRES_PREVIOUS_SOURCES_FETCH`
is set to True.
Raises:
:class:`.SourceError`
Implementors should raise :class:`.SourceError` if the there is some
network error or if the source reference could not be matched.
"""
raise ImplError("Source plugin '{}' does not implement fetch()".format(self.get_kind()))
def stage(self, directory):
"""Stage the sources to a directory
Args:
directory (str): Path to stage the source
Raises:
:class:`.SourceError`
Implementors should assume that *directory* already exists
and stage already cached sources to the passed directory.
Implementors should raise :class:`.SourceError` when encountering
some system error.
"""
raise ImplError("Source plugin '{}' does not implement stage()".format(self.get_kind()))
def init_workspace(self, directory):
"""Initialises a new workspace
Args:
directory (str): Path of the workspace to init
Raises:
:class:`.SourceError`
Default implementation is to call
:func:`Source.stage() `.
Implementors overriding this method should assume that *directory*
already exists.
Implementors should raise :class:`.SourceError` when encountering
some system error.
"""
self.stage(directory)
def get_source_fetchers(self):
"""Get the objects that are used for fetching
If this source doesn't download from multiple URLs,
returning None and falling back on the default behaviour
is recommended.
Returns:
iterable: The Source's SourceFetchers, if any.
.. note::
Implementors can implement this as a generator.
The :func:`SourceFetcher.fetch() `
method will be called on the returned fetchers one by one,
before consuming the next fetcher in the list.
*Since: 1.2*
"""
return []
def validate_cache(self):
"""Implement any validations once we know the sources are cached
This is guaranteed to be called only once for a given session
once the sources are known to be
:attr:`Consistency.CACHED `,
if source tracking is enabled in the session for this source,
then this will only be called if the sources become cached after
tracking completes.
*Since: 1.4*
"""
#############################################################
# Public Methods #
#############################################################
def get_mirror_directory(self):
"""Fetches the directory where this source should store things
Returns:
(str): The directory belonging to this source
"""
# Create the directory if it doesnt exist
context = self._get_context()
directory = os.path.join(context.sourcedir, self.get_kind())
os.makedirs(directory, exist_ok=True)
return directory
def translate_url(self, url, *, alias_override=None, primary=True):
"""Translates the given url which may be specified with an alias
into a fully qualified url.
Args:
url (str): A URL, which may be using an alias
alias_override (str): Optionally, an URI to override the alias with. (*Since: 1.2*)
primary (bool): Whether this is the primary URL for the source. (*Since: 1.2*)
Returns:
str: The fully qualified URL, with aliases resolved
.. note::
This must be called for every URL in the configuration during
:func:`Plugin.configure() ` if
:func:`Source.mark_download_url() `
is not called.
"""
# Ensure that the download URL is also marked
self.mark_download_url(url, primary=primary)
# Alias overriding can happen explicitly (by command-line) or
# implicitly (the Source being constructed with an __alias_override).
if alias_override or self.__alias_override:
url_alias, url_body = url.split(utils._ALIAS_SEPARATOR, 1)
if url_alias:
if alias_override:
url = alias_override + url_body
else:
# Implicit alias overrides may only be done for one
# specific alias, so that sources that fetch from multiple
# URLs and use different aliases default to only overriding
# one alias, rather than getting confused.
override_alias = self.__alias_override[0]
override_url = self.__alias_override[1]
if url_alias == override_alias:
url = override_url + url_body
return url
else:
project = self._get_project()
return project.translate_url(url, first_pass=self.__first_pass)
def mark_download_url(self, url, *, primary=True):
"""Identifies the URL that this Source uses to download
Args:
url (str): The URL used to download
primary (bool): Whether this is the primary URL for the source
.. note::
This must be called for every URL in the configuration during
:func:`Plugin.configure() ` if
:func:`Source.translate_url() `
is not called.
*Since: 1.2*
"""
# Only mark the Source level aliases on the main instance, not in
# a reinstantiated instance in mirroring.
if not self.__alias_override:
if primary:
expected_alias = _extract_alias(url)
assert (self.__expected_alias is None or
self.__expected_alias == expected_alias), \
"Primary URL marked twice with different URLs"
self.__expected_alias = expected_alias
# Enforce proper behaviour of plugins by ensuring that all
# aliased URLs have been marked at Plugin.configure() time.
#
if self._get_configuring():
# Record marked urls while configuring
#
self.__marked_urls.add(url)
else:
# If an unknown aliased URL is seen after configuring,
# this is an error.
#
# It is still possible that a URL that was not mentioned
# in the element configuration can be marked, this is
# the case for git submodules which might be automatically
# discovered.
#
assert (url in self.__marked_urls or not _extract_alias(url)), \
"URL was not seen at configure time: {}".format(url)
alias = _extract_alias(url)
# Issue a (fatal-able) warning if the source used a URL without specifying an alias
if not alias:
self.warn(
"{}: Use of unaliased source download URL: {}".format(self, url),
warning_token=CoreWarnings.UNALIASED_URL,
)
# If there is an alias in use, ensure that it exists in the project
if alias:
project = self._get_project()
alias_uri = project.get_alias_uri(alias, first_pass=self.__first_pass)
if alias_uri is None:
raise SourceError(
"{}: Invalid alias '{}' specified in URL: {}".format(self, alias, url),
reason="invalid-source-alias",
)
def get_project_directory(self):
"""Fetch the project base directory
This is useful for sources which need to load resources
stored somewhere inside the project.
Returns:
str: The project base directory
"""
project = self._get_project()
return project.directory
@contextmanager
def tempdir(self):
"""Context manager for working in a temporary directory
Yields:
(str): A path to a temporary directory
This should be used by source plugins directly instead of the tempfile
module. This one will automatically cleanup in case of termination by
catching the signal before os._exit(). It will also use the 'mirror
directory' as expected for a source.
"""
mirrordir = self.get_mirror_directory()
with utils._tempdir(dir=mirrordir) as tempdir:
yield tempdir
#############################################################
# Private Methods used in BuildStream #
#############################################################
# Wrapper around preflight() method
#
def _preflight(self):
try:
self.preflight()
except BstError as e:
# Prepend provenance to the error
raise SourceError("{}: {}".format(self, e), reason=e.reason) from e
# Update cached consistency for a source
#
# This must be called whenever the state of a source may have changed.
#
def _update_state(self):
if self.__consistency < Consistency.CACHED:
# Source consistency interrogations are silent.
context = self._get_context()
with context.silence():
self.__consistency = self.get_consistency()
# Give the Source an opportunity to validate the cached
# sources as soon as the Source becomes Consistency.CACHED.
if self.__consistency == Consistency.CACHED:
self.validate_cache()
# Return cached consistency
#
def _get_consistency(self):
return self.__consistency
# Wrapper function around plugin provided fetch method
#
# Args:
# previous_sources (list): List of Sources listed prior to this source
#
def _fetch(self, previous_sources):
if self.BST_REQUIRES_PREVIOUS_SOURCES_FETCH:
self.__ensure_previous_sources(previous_sources)
with self.tempdir() as staging_directory:
for src in previous_sources:
src._stage(staging_directory)
self.__do_fetch(previous_sources_dir=self.__ensure_directory(staging_directory))
else:
self.__do_fetch()
# Wrapper for stage() api which gives the source
# plugin a fully constructed path considering the
# 'directory' option
#
def _stage(self, directory):
staging_directory = self.__ensure_directory(directory)
self.stage(staging_directory)
# Wrapper for init_workspace()
def _init_workspace(self, directory):
directory = self.__ensure_directory(directory)
self.init_workspace(directory)
# _get_unique_key():
#
# Wrapper for get_unique_key() api
#
# Args:
# include_source (bool): Whether to include the delegated source key
#
def _get_unique_key(self, include_source):
key = {}
key['directory'] = self.__directory
if include_source:
key['unique'] = self.get_unique_key()
return key
# _project_refs():
#
# Gets the appropriate ProjectRefs object for this source,
# which depends on whether the owning element is a junction
#
# Args:
# project (Project): The project to check
#
def _project_refs(self, project):
element_kind = self.__element_kind
if element_kind == 'junction':
return project.junction_refs
return project.refs
# _load_ref():
#
# Loads the ref for the said source.
#
# Raises:
# (SourceError): If the source does not implement load_ref()
#
# Returns:
# (ref): A redundant ref specified inline for a project.refs using project
#
# This is partly a wrapper around `Source.load_ref()`, it will decide
# where to load the ref from depending on which project the source belongs
# to and whether that project uses a project.refs file.
#
# Note the return value is used to construct a summarized warning in the
# case that the toplevel project uses project.refs and also lists refs
# which will be ignored.
#
def _load_ref(self):
context = self._get_context()
project = self._get_project()
toplevel = context.get_toplevel_project()
redundant_ref = None
element_name = self.__element_name
element_idx = self.__element_index
def do_load_ref(node):
try:
self.load_ref(ref_node)
except ImplError as e:
raise SourceError("{}: Storing refs in project.refs is not supported by '{}' sources"
.format(self, self.get_kind()),
reason="unsupported-load-ref") from e
# If the main project overrides the ref, use the override
if project is not toplevel and toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS:
refs = self._project_refs(toplevel)
ref_node = refs.lookup_ref(project.name, element_name, element_idx)
if ref_node is not None:
do_load_ref(ref_node)
return redundant_ref
# If the project itself uses project.refs, clear the ref which
# was already loaded via Source.configure(), as this would
# violate the rule of refs being either in project.refs or in
# the elements themselves.
#
if project.ref_storage == ProjectRefStorage.PROJECT_REFS:
# First warn if there is a ref already loaded, and reset it
redundant_ref = self.get_ref()
if redundant_ref is not None:
self.set_ref(None, {})
# Try to load the ref
refs = self._project_refs(project)
ref_node = refs.lookup_ref(project.name, element_name, element_idx)
if ref_node is not None:
do_load_ref(ref_node)
return redundant_ref
# _set_ref()
#
# Persists the ref for this source. This will decide where to save the
# ref, or refuse to persist it, depending on active ref-storage project
# settings.
#
# Args:
# new_ref (smth): The new reference to save
# save (bool): Whether to write the new reference to file or not
#
# Returns:
# (bool): Whether the ref has changed
#
# Raises:
# (SourceError): In the case we encounter errors saving a file to disk
#
def _set_ref(self, new_ref, *, save):
context = self._get_context()
project = self._get_project()
toplevel = context.get_toplevel_project()
toplevel_refs = self._project_refs(toplevel)
provenance = self._get_provenance()
element_name = self.__element_name
element_idx = self.__element_index
#
# Step 1 - Obtain the node
#
if project is toplevel:
if toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS:
node = toplevel_refs.lookup_ref(project.name, element_name, element_idx, write=True)
else:
node = provenance.node
else:
if toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS:
node = toplevel_refs.lookup_ref(project.name, element_name, element_idx, write=True)
else:
node = {}
#
# Step 2 - Set the ref in memory, and determine changed state
#
current_ref = self.get_ref() # pylint: disable=assignment-from-no-return
# Set the ref regardless of whether it changed, the
# TrackQueue() will want to update a specific node with
# the ref, regardless of whether the original has changed.
self.set_ref(new_ref, node)
if current_ref == new_ref or not save:
# Note: We do not look for and propagate changes at this point
# which might result in desync depending if something changes about
# tracking in the future. For now, this is quite safe.
return False
def do_save_refs(refs):
try:
refs.save()
except OSError as e:
raise SourceError("{}: Error saving source reference to 'project.refs': {}"
.format(self, e),
reason="save-ref-error") from e
#
# Step 3 - Apply the change in project data
#
if toplevel.ref_storage == ProjectRefStorage.PROJECT_REFS:
do_save_refs(toplevel_refs)
else:
if provenance.filename.project is toplevel:
# Save the ref in the originating file
#
try:
_yaml.dump(provenance.toplevel, provenance.filename.name)
except OSError as e:
raise SourceError("{}: Error saving source reference to '{}': {}"
.format(self, provenance.filename.name, e),
reason="save-ref-error") from e
elif provenance.filename.project is project:
self.warn("{}: Not persisting new reference in junctioned project".format(self))
elif provenance.filename.project is None:
assert provenance.filename.name == ''
assert provenance.filename.shortname == ''
raise SourceError("{}: Error saving source reference to synthetic node."
.format(self))
else:
raise SourceError("{}: Cannot track source in a fragment from a junction"
.format(provenance.filename.shortname),
reason="tracking-junction-fragment")
return True
# Wrapper for track()
#
# Args:
# previous_sources (list): List of Sources listed prior to this source
#
def _track(self, previous_sources):
if self.BST_REQUIRES_PREVIOUS_SOURCES_TRACK:
self.__ensure_previous_sources(previous_sources)
with self.tempdir() as staging_directory:
for src in previous_sources:
src._stage(staging_directory)
new_ref = self.__do_track(previous_sources_dir=self.__ensure_directory(staging_directory))
else:
new_ref = self.__do_track()
current_ref = self.get_ref()
if new_ref is None:
# No tracking, keep current ref
new_ref = current_ref
if current_ref != new_ref:
self.info("Found new revision: {}".format(new_ref))
# Save ref in local process for subsequent sources
self._set_ref(new_ref, save=False)
return new_ref
# _requires_previous_sources()
#
# If a plugin requires access to previous sources at track or fetch time,
# then it cannot be the first source of an elemenet.
#
# Returns:
# (bool): Whether this source requires access to previous sources
#
def _requires_previous_sources(self):
return self.BST_REQUIRES_PREVIOUS_SOURCES_TRACK or self.BST_REQUIRES_PREVIOUS_SOURCES_FETCH
# Returns the alias if it's defined in the project
def _get_alias(self):
alias = self.__expected_alias
project = self._get_project()
if project.get_alias_uri(alias, first_pass=self.__first_pass):
# The alias must already be defined in the project's aliases
# otherwise http://foo gets treated like it contains an alias
return alias
else:
return None
#############################################################
# Local Private Methods #
#############################################################
# __clone_for_uri()
#
# Clone the source with an alternative URI setup for the alias
# which this source uses.
#
# This is used for iteration over source mirrors.
#
# Args:
# uri (str): The alternative URI for this source's alias
#
# Returns:
# (Source): A new clone of this Source, with the specified URI
# as the value of the alias this Source has marked as
# primary with either mark_download_url() or
# translate_url().
#
def __clone_for_uri(self, uri):
project = self._get_project()
context = self._get_context()
alias = self._get_alias()
source_kind = type(self)
clone = source_kind(context, project, self.__meta,
alias_override=(alias, uri),
unique_id=self._unique_id)
# Do the necessary post instantiation routines here
#
clone._preflight()
clone._load_ref()
clone._update_state()
return clone
# Tries to call fetch for every mirror, stopping once it succeeds
def __do_fetch(self, **kwargs):
project = self._get_project()
source_fetchers = self.get_source_fetchers()
if source_fetchers:
for fetcher in source_fetchers:
alias = fetcher._get_alias()
success = False
for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
try:
fetcher.fetch(uri)
# FIXME: Need to consider temporary vs. permanent failures,
# and how this works with retries.
except BstError as e:
last_error = e
continue
success = True
break
if not success:
raise last_error # pylint: disable=used-before-assignment
else:
alias = self._get_alias()
if self.__first_pass:
mirrors = project.first_pass_config.mirrors
else:
mirrors = project.config.mirrors
if not mirrors or not alias:
self.fetch(**kwargs)
return
for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
new_source = self.__clone_for_uri(uri)
try:
new_source.fetch(**kwargs)
# FIXME: Need to consider temporary vs. permanent failures,
# and how this works with retries.
except BstError as e:
last_error = e
continue
return
raise last_error
# Tries to call track for every mirror, stopping once it succeeds
def __do_track(self, **kwargs):
project = self._get_project()
alias = self._get_alias()
if self.__first_pass:
mirrors = project.first_pass_config.mirrors
else:
mirrors = project.config.mirrors
# If there are no mirrors, or no aliases to replace, there's nothing to do here.
if not mirrors or not alias:
return self.track(**kwargs)
# NOTE: We are assuming here that tracking only requires substituting the
# first alias used
for uri in reversed(project.get_alias_uris(alias, first_pass=self.__first_pass)):
new_source = self.__clone_for_uri(uri)
try:
ref = new_source.track(**kwargs) # pylint: disable=assignment-from-none
# FIXME: Need to consider temporary vs. permanent failures,
# and how this works with retries.
except BstError as e:
last_error = e
continue
return ref
raise last_error # pylint: disable=used-before-assignment
# Ensures a fully constructed path and returns it
def __ensure_directory(self, directory):
if self.__directory is not None:
directory = os.path.join(directory, self.__directory.lstrip(os.sep))
try:
os.makedirs(directory, exist_ok=True)
except OSError as e:
raise SourceError("Failed to create staging directory: {}"
.format(e),
reason="ensure-stage-dir-fail") from e
return directory
def __init_defaults(self, meta):
if not self.__defaults_set:
project = self._get_project()
if meta.first_pass:
sources = project.first_pass_config.source_overrides
else:
sources = project.source_overrides
type(self).__defaults = sources.get(self.get_kind(), {})
type(self).__defaults_set = True
# This will resolve the final configuration to be handed
# off to source.configure()
#
def __extract_config(self, meta):
config = _yaml.node_get(self.__defaults, Mapping, 'config', default_value={})
config = _yaml.node_chain_copy(config)
_yaml.composite(config, meta.config)
_yaml.node_final_assertions(config)
return config
# Ensures that previous sources have been tracked and fetched.
#
def __ensure_previous_sources(self, previous_sources):
for index, src in enumerate(previous_sources):
# BuildStream should track sources in the order they appear so
# previous sources should never be in an inconsistent state
assert src.get_consistency() != Consistency.INCONSISTENT
if src.get_consistency() == Consistency.RESOLVED:
src._fetch(previous_sources[0:index])
def _extract_alias(url):
parts = url.split(utils._ALIAS_SEPARATOR, 1)
if len(parts) > 1 and not parts[0].lower() in utils._URI_SCHEMES:
return parts[0]
else:
return ""
buildstream-1.6.9/buildstream/types.py 0000664 0000000 0000000 00000011527 14375152700 0020105 0 ustar 00root root 0000000 0000000 #
# Copyright (C) 2018 Bloomberg LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see .
#
# Authors:
# Tristan Van Berkom
# Jim MacArthur