Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (214)
Showing
with 1858 additions and 738 deletions
...@@ -4,11 +4,15 @@ include = ...@@ -4,11 +4,15 @@ include =
*/buildstream/* */buildstream/*
omit = omit =
# Omit profiling helper module # Omit some internals
*/buildstream/_profile.py */buildstream/_profile.py
*/buildstream/__main__.py
*/buildstream/_version.py
# Omit generated code # Omit generated code
*/buildstream/_protos/* */buildstream/_protos/*
*/.eggs/* */.eggs/*
# Omit .tox directory
*/.tox/*
[report] [report]
show_missing = True show_missing = True
......
...@@ -13,10 +13,12 @@ tests/**/*.pyc ...@@ -13,10 +13,12 @@ tests/**/*.pyc
integration-cache/ integration-cache/
tmp tmp
.coverage .coverage
.coverage-reports/
.coverage.* .coverage.*
.cache .cache
.pytest_cache/ .pytest_cache/
*.bst/ *.bst/
.tox/
# Pycache, in case buildstream is ran directly from within the source # Pycache, in case buildstream is ran directly from within the source
# tree # tree
......
image: buildstream/testsuite-debian:9-master-123-7ce6581b image: buildstream/testsuite-debian:9-5da27168-32c47d1c
cache: cache:
key: "$CI_JOB_NAME-" key: "$CI_JOB_NAME-"
...@@ -6,49 +6,14 @@ cache: ...@@ -6,49 +6,14 @@ cache:
- cache/ - cache/
stages: stages:
- prepare
- test - test
- post - post
variables: variables:
PYTEST_ADDOPTS: "--color=yes" PYTEST_ADDOPTS: "--color=yes"
INTEGRATION_CACHE: "${CI_PROJECT_DIR}/cache/integration-cache" INTEGRATION_CACHE: "${CI_PROJECT_DIR}/cache/integration-cache"
TEST_COMMAND: 'python3 setup.py test --index-url invalid://uri --addopts --integration' TEST_COMMAND: "tox -- --color=yes --integration"
COVERAGE_PREFIX: "${CI_JOB_NAME}."
#####################################################
# Prepare stage #
#####################################################
# Create a source distribution
#
source_dist:
stage: prepare
script:
# Generate the source distribution tarball
#
- python3 setup.py sdist
- tar -ztf dist/*
- tarball=$(cd dist && echo $(ls *))
# Verify that the source distribution tarball can be installed correctly
#
- pip3 install dist/*.tar.gz
- bst --version
# unpack tarball as `dist/buildstream` directory
- |
cat > dist/unpack.sh << EOF
#!/bin/sh
tar -zxf ${tarball}
mv ${tarball%.tar.gz} buildstream
EOF
# Make our helpers executable
- chmod +x dist/unpack.sh
artifacts:
paths:
- dist/
##################################################### #####################################################
...@@ -60,54 +25,53 @@ source_dist: ...@@ -60,54 +25,53 @@ source_dist:
.tests-template: &tests .tests-template: &tests
stage: test stage: test
variables:
COVERAGE_DIR: coverage-linux
before_script: before_script:
# Diagnostics # Diagnostics
- mount - mount
- df -h - df -h
# Unpack
- cd dist && ./unpack.sh
- cd buildstream
script: script:
- useradd -Um buildstream - useradd -Um buildstream
- chown -R buildstream:buildstream . - chown -R buildstream:buildstream .
# Run the tests from the source distribution, We run as a simple # Run the tests as a simple user to test for permission issues
# user to test for permission issues
- su buildstream -c "${TEST_COMMAND}" - su buildstream -c "${TEST_COMMAND}"
after_script: after_script:
# Collect our reports
- mkdir -p ${COVERAGE_DIR}
- cp dist/buildstream/.coverage ${COVERAGE_DIR}/coverage."${CI_JOB_NAME}"
except: except:
- schedules - schedules
artifacts: artifacts:
paths: paths:
- ${COVERAGE_DIR} - .coverage-reports
tests-debian-9: tests-debian-9:
image: buildstream/testsuite-debian:9-master-123-7ce6581b image: buildstream/testsuite-debian:9-5da27168-32c47d1c
<<: *tests <<: *tests
tests-fedora-27: tests-fedora-27:
image: buildstream/testsuite-fedora:27-master-123-7ce6581b image: buildstream/testsuite-fedora:27-5da27168-32c47d1c
<<: *tests <<: *tests
tests-fedora-28: tests-fedora-28:
image: buildstream/testsuite-fedora:28-master-123-7ce6581b image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
<<: *tests <<: *tests
tests-ubuntu-18.04: tests-ubuntu-18.04:
image: buildstream/testsuite-ubuntu:18.04-master-123-7ce6581b image: buildstream/testsuite-ubuntu:18.04-5da27168-32c47d1c
<<: *tests
tests-python-3.7-stretch:
image: buildstream/testsuite-python:3.7-stretch-a60f0c39
<<: *tests <<: *tests
variables:
# Note that we explicitly specify TOXENV in this case because this
# image has both 3.6 and 3.7 versions. python3.6 cannot be removed because
# some of our base dependencies declare it as their runtime dependency.
TOXENV: py37
overnight-fedora-28-aarch64: overnight-fedora-28-aarch64:
image: buildstream/testsuite-fedora:aarch64-28-master-123-7ce6581b image: buildstream/testsuite-fedora:aarch64-28-5da27168-32c47d1c
tags: tags:
- aarch64 - aarch64
<<: *tests <<: *tests
...@@ -116,15 +80,20 @@ overnight-fedora-28-aarch64: ...@@ -116,15 +80,20 @@ overnight-fedora-28-aarch64:
except: [] except: []
only: only:
- schedules - schedules
before_script:
# grpcio needs to be compiled from source on aarch64 so we additionally
# need a C++ compiler here.
# FIXME: Ideally this would be provided by the base image. This will be
# unblocked by https://gitlab.com/BuildStream/buildstream-docker-images/issues/34
- dnf install -y gcc-c++
tests-unix: tests-unix:
# Use fedora here, to a) run a test on fedora and b) ensure that we # Use fedora here, to a) run a test on fedora and b) ensure that we
# can get rid of ostree - this is not possible with debian-8 # can get rid of ostree - this is not possible with debian-8
image: buildstream/testsuite-fedora:27-master-123-7ce6581b image: buildstream/testsuite-fedora:27-5da27168-32c47d1c
<<: *tests <<: *tests
variables: variables:
BST_FORCE_BACKEND: "unix" BST_FORCE_BACKEND: "unix"
COVERAGE_DIR: coverage-unix
script: script:
...@@ -137,10 +106,9 @@ tests-unix: ...@@ -137,10 +106,9 @@ tests-unix:
# Since the unix platform is required to run as root, no user change required # Since the unix platform is required to run as root, no user change required
- ${TEST_COMMAND} - ${TEST_COMMAND}
tests-fedora-missing-deps: tests-fedora-missing-deps:
# Ensure that tests behave nicely while missing bwrap and ostree # Ensure that tests behave nicely while missing bwrap and ostree
image: buildstream/testsuite-fedora:28-master-123-7ce6581b image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
<<: *tests <<: *tests
script: script:
...@@ -155,23 +123,44 @@ tests-fedora-missing-deps: ...@@ -155,23 +123,44 @@ tests-fedora-missing-deps:
- ${TEST_COMMAND} - ${TEST_COMMAND}
tests-fedora-update-deps:
# Check if the tests pass after updating requirements to their latest
# allowed version.
allow_failure: true
image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
<<: *tests
script:
- useradd -Um buildstream
- chown -R buildstream:buildstream .
- make --always-make --directory requirements
- cat requirements/*.txt
- su buildstream -c "${TEST_COMMAND}"
# Lint separately from testing
lint:
stage: test
before_script:
# Diagnostics
- python3 --version
script:
- tox -e lint
except:
- schedules
# Automatically build documentation for every commit, we want to know # Automatically build documentation for every commit, we want to know
# if building documentation fails even if we're not deploying it. # if building documentation fails even if we're not deploying it.
# Note: We still do not enforce a consistent installation of python3-sphinx,
# as it will significantly grow the backing image.
docs: docs:
stage: test stage: test
variables:
BST_FORCE_SESSION_REBUILD: 1
script: script:
- export BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources" - env BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources" tox -e docs
# Currently sphinx_rtd_theme does not support Sphinx >1.8, this breaks search functionality - mv doc/build/html public
- pip3 install sphinx==1.7.9
- pip3 install sphinx-click
- pip3 install sphinx_rtd_theme
- cd dist && ./unpack.sh && cd buildstream
- make BST_FORCE_SESSION_REBUILD=1 -C doc
- cd ../..
- mv dist/buildstream/doc/build/html public
except: except:
- schedules - schedules
artifacts: artifacts:
...@@ -182,8 +171,8 @@ docs: ...@@ -182,8 +171,8 @@ docs:
stage: test stage: test
variables: variables:
BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
BST_EXT_REF: 573843768f4d297f85dc3067465b3c7519a8dcc3 # 0.7.0 BST_EXT_REF: 0.9.0-0-g63a19e8068bd777bd9cd59b1a9442f9749ea5a85
FD_SDK_REF: 612f66e218445eee2b1a9d7dd27c9caba571612e # freedesktop-sdk-18.08.19-54-g612f66e2 FD_SDK_REF: freedesktop-sdk-18.08.25-0-g250939d465d6dd7768a215f1fa59c4a3412fc337
before_script: before_script:
- | - |
mkdir -p "${HOME}/.config" mkdir -p "${HOME}/.config"
...@@ -191,7 +180,8 @@ docs: ...@@ -191,7 +180,8 @@ docs:
scheduler: scheduler:
fetchers: 2 fetchers: 2
EOF EOF
- (cd dist && ./unpack.sh && cd buildstream && pip3 install .) - pip3 install -r requirements/requirements.txt -r requirements/plugin-requirements.txt
- pip3 install --no-index .
- pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext - pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
- git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git - git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
- git -C freedesktop-sdk checkout ${FD_SDK_REF} - git -C freedesktop-sdk checkout ${FD_SDK_REF}
...@@ -274,30 +264,28 @@ coverage: ...@@ -274,30 +264,28 @@ coverage:
stage: post stage: post
coverage: '/TOTAL +\d+ +\d+ +(\d+\.\d+)%/' coverage: '/TOTAL +\d+ +\d+ +(\d+\.\d+)%/'
script: script:
- cd dist && ./unpack.sh && cd buildstream - cp -a .coverage-reports/ ./coverage-sources
- pip3 install --no-index . - tox -e coverage
- mkdir report - cp -a .coverage-reports/ ./coverage-report
- cd report
- cp ../../../coverage-unix/coverage.* .
- cp ../../../coverage-linux/coverage.* .
- ls coverage.*
- coverage combine --rcfile=../.coveragerc -a coverage.*
- coverage report --rcfile=../.coveragerc -m
dependencies: dependencies:
- tests-debian-9 - tests-debian-9
- tests-fedora-27 - tests-fedora-27
- tests-fedora-28 - tests-fedora-28
- tests-fedora-missing-deps
- tests-ubuntu-18.04
- tests-unix - tests-unix
- source_dist
except: except:
- schedules - schedules
artifacts:
paths:
- coverage-sources/
- coverage-report/
# Deploy, only for merges which land on master branch. # Deploy, only for merges which land on master branch.
# #
pages: pages:
stage: post stage: post
dependencies: dependencies:
- source_dist
- docs - docs
variables: variables:
ACME_DIR: public/.well-known/acme-challenge ACME_DIR: public/.well-known/acme-challenge
......
...@@ -553,7 +553,7 @@ One problem which arises from this is that we end up having symbols ...@@ -553,7 +553,7 @@ One problem which arises from this is that we end up having symbols
which are *public* according to the :ref:`rules discussed in the previous section which are *public* according to the :ref:`rules discussed in the previous section
<contributing_public_and_private>`, but must be hidden away from the <contributing_public_and_private>`, but must be hidden away from the
*"Public API Surface"*. For example, BuildStream internal classes need *"Public API Surface"*. For example, BuildStream internal classes need
to invoke methods on the ``Element`` and ``Source`` classes, wheras these to invoke methods on the ``Element`` and ``Source`` classes, whereas these
methods need to be hidden from the *"Public API Surface"*. methods need to be hidden from the *"Public API Surface"*.
This is where BuildStream deviates from the PEP-8 standard for public This is where BuildStream deviates from the PEP-8 standard for public
...@@ -631,7 +631,7 @@ An element plugin will derive from Element by importing:: ...@@ -631,7 +631,7 @@ An element plugin will derive from Element by importing::
from buildstream import Element from buildstream import Element
When importing utilities specifically, dont import function names When importing utilities specifically, don't import function names
from there, instead import the module itself:: from there, instead import the module itself::
from . import utils from . import utils
...@@ -737,7 +737,7 @@ Abstract methods ...@@ -737,7 +737,7 @@ Abstract methods
~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~
In BuildStream, an *"Abstract Method"* is a bit of a misnomer and does In BuildStream, an *"Abstract Method"* is a bit of a misnomer and does
not match up to how Python defines abstract methods, we need to seek out not match up to how Python defines abstract methods, we need to seek out
a new nomanclature to refer to these methods. a new nomenclature to refer to these methods.
In Python, an *"Abstract Method"* is a method which **must** be In Python, an *"Abstract Method"* is a method which **must** be
implemented by a subclass, whereas all methods in Python can be implemented by a subclass, whereas all methods in Python can be
...@@ -960,7 +960,7 @@ possible, and avoid any cyclic relationships in modules. ...@@ -960,7 +960,7 @@ possible, and avoid any cyclic relationships in modules.
For instance, the ``Source`` objects are owned by ``Element`` For instance, the ``Source`` objects are owned by ``Element``
objects in the BuildStream data model, and as such the ``Element`` objects in the BuildStream data model, and as such the ``Element``
will delegate some activities to the ``Source`` objects in its will delegate some activities to the ``Source`` objects in its
possesion. The ``Source`` objects should however never call functions possession. The ``Source`` objects should however never call functions
on the ``Element`` object, nor should the ``Source`` object itself on the ``Element`` object, nor should the ``Source`` object itself
have any understanding of what an ``Element`` is. have any understanding of what an ``Element`` is.
...@@ -1222,27 +1222,13 @@ For further information about using the reStructuredText with sphinx, please see ...@@ -1222,27 +1222,13 @@ For further information about using the reStructuredText with sphinx, please see
Building Docs Building Docs
~~~~~~~~~~~~~ ~~~~~~~~~~~~~
The documentation build is not integrated into the ``setup.py`` and is Before you can build the docs, you will end to ensure that you have installed
difficult (or impossible) to do so, so there is a little bit of setup the required :ref:`build dependencies <contributing_build_deps>` as mentioned
you need to take care of first. in the testing section above.
Before you can build the BuildStream documentation yourself, you need
to first install ``sphinx`` along with some additional plugins and dependencies,
using pip or some other mechanism::
# Install sphinx
pip3 install --user sphinx
# Install some sphinx extensions
pip3 install --user sphinx-click
pip3 install --user sphinx_rtd_theme
# Additional optional dependencies required
pip3 install --user arpy
To build the documentation, just run the following:: To build the documentation, just run the following::
make -C doc tox -e docs
This will give you a ``doc/build/html`` directory with the html docs which This will give you a ``doc/build/html`` directory with the html docs which
you can view in your browser locally to test. you can view in your browser locally to test.
...@@ -1260,9 +1246,10 @@ will make the docs build reuse already downloaded sources:: ...@@ -1260,9 +1246,10 @@ will make the docs build reuse already downloaded sources::
export BST_SOURCE_CACHE=~/.cache/buildstream/sources export BST_SOURCE_CACHE=~/.cache/buildstream/sources
To force rebuild session html while building the doc, simply build the docs like this:: To force rebuild session html while building the doc, simply run `tox` with the
``BST_FORCE_SESSION_REBUILD`` environment variable set, like so::
make BST_FORCE_SESSION_REBUILD=1 -C doc env BST_FORCE_SESSION_REBUILD=1 tox -e docs
Man pages Man pages
...@@ -1378,7 +1365,7 @@ Structure of an example ...@@ -1378,7 +1365,7 @@ Structure of an example
''''''''''''''''''''''' '''''''''''''''''''''''
The :ref:`tutorial <tutorial>` and the :ref:`examples <examples>` sections The :ref:`tutorial <tutorial>` and the :ref:`examples <examples>` sections
of the documentation contain a series of sample projects, each chapter in of the documentation contain a series of sample projects, each chapter in
the tutoral, or standalone example uses a sample project. the tutorial, or standalone example uses a sample project.
Here is the the structure for adding new examples and tutorial chapters. Here is the the structure for adding new examples and tutorial chapters.
...@@ -1468,63 +1455,159 @@ regenerate them locally in order to build the docs. ...@@ -1468,63 +1455,159 @@ regenerate them locally in order to build the docs.
Testing Testing
------- -------
BuildStream uses pytest for regression tests and testing out BuildStream uses `tox <https://tox.readthedocs.org/>`_ as a frontend to run the
the behavior of newly added components. tests which are implemented using `pytest <https://pytest.org/>`_. We use
pytest for regression tests and testing out the behavior of newly added
components.
The elaborate documentation for pytest can be found here: http://doc.pytest.org/en/latest/contents.html The elaborate documentation for pytest can be found here: http://doc.pytest.org/en/latest/contents.html
Don't get lost in the docs if you don't need to, follow existing examples instead. Don't get lost in the docs if you don't need to, follow existing examples instead.
.. _contributing_build_deps:
Installing build dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some of BuildStream's dependencies have non-python build dependencies. When
running tests with ``tox``, you will first need to install these dependencies.
Exact steps to install these will depend on your operating system. Commands
for installing them for some common distributions are listed below.
For Fedora-based systems::
dnf install gcc pkg-config python3-devel cairo-gobject-devel glib2-devel gobject-introspection-devel
For Debian-based systems::
apt install gcc pkg-config python3-dev libcairo2-dev libgirepository1.0-dev
Running tests Running tests
~~~~~~~~~~~~~ ~~~~~~~~~~~~~
To run the tests, just type:: To run the tests, simply navigate to the toplevel directory of your BuildStream
checkout and run::
tox
./setup.py test By default, the test suite will be run against every supported python version
found on your host. If you have multiple python versions installed, you may
want to run tests against only one version and you can do that using the ``-e``
option when running tox::
At the toplevel. tox -e py37
When debugging a test, it can be desirable to see the stdout If you would like to test and lint at the same time, or if you do have multiple
and stderr generated by a test, to do this use the ``--addopts`` python versions installed and would like to test against multiple versions, then
function to feed arguments to pytest as such:: we recommend using `detox <https://github.com/tox-dev/detox>`_, just run it with
the same arguments you would give `tox`::
./setup.py test --addopts -s detox -e lint,py36,py37
Linting is performed separately from testing. In order to run the linting step which
consists of running the ``pycodestyle`` and ``pylint`` tools, run the following::
tox -e lint
.. tip::
The project specific pylint and pycodestyle configurations are stored in the
toplevel buildstream directory in the ``.pylintrc`` file and ``setup.cfg`` files
respectively. These configurations can be interesting to use with IDEs and
other developer tooling.
The output of all failing tests will always be printed in the summary, but
if you want to observe the stdout and stderr generated by a passing test,
you can pass the ``-s`` option to pytest as such::
tox -- -s
.. tip::
The ``-s`` option is `a pytest option <https://docs.pytest.org/latest/usage.html>`_.
Any options specified before the ``--`` separator are consumed by ``tox``,
and any options after the ``--`` separator will be passed along to pytest.
You can always abort on the first failure by running:: You can always abort on the first failure by running::
./setup.py test --addopts -x tox -- -x
Similarly, you may also be interested in the ``--last-failed`` and
``--failed-first`` options as per the
`pytest cache <https://docs.pytest.org/en/latest/cache.html>`_ documentation.
If you want to run a specific test or a group of tests, you If you want to run a specific test or a group of tests, you
can specify a prefix to match. E.g. if you want to run all of can specify a prefix to match. E.g. if you want to run all of
the frontend tests you can do:: the frontend tests you can do::
./setup.py test --addopts 'tests/frontend/' tox -- tests/frontend/
Specific tests can be chosen by using the :: delimeter after the test module. Specific tests can be chosen by using the :: delimiter after the test module.
If you wanted to run the test_build_track test within frontend/buildtrack.py you could do:: If you wanted to run the test_build_track test within frontend/buildtrack.py you could do::
./setup.py test --addopts 'tests/frontend/buildtrack.py::test_build_track' tox -- tests/frontend/buildtrack.py::test_build_track
When running only a few tests, you may find the coverage and timing output
excessive, there are options to trim them. Note that coverage step will fail.
Here is an example::
tox -- --no-cov --durations=1 tests/frontend/buildtrack.py::test_build_track
We also have a set of slow integration tests that are disabled by We also have a set of slow integration tests that are disabled by
default - you will notice most of them marked with SKIP in the pytest default - you will notice most of them marked with SKIP in the pytest
output. To run them, you can use:: output. To run them, you can use::
./setup.py test --addopts '--integration' tox -- --integration
By default, buildstream also runs pylint on all files. Should you want In case BuildStream's dependencies were updated since you last ran the
to run just pylint (these checks are a lot faster), you can do so tests, you might see some errors like
with:: ``pytest: error: unrecognized arguments: --codestyle``. If this happens, you
will need to force ``tox`` to recreate the test environment(s). To do so, you
can run ``tox`` with ``-r`` or ``--recreate`` option.
./setup.py test --addopts '-m pylint' .. note::
By default, we do not allow use of site packages in our ``tox``
configuration to enable running the tests in an isolated environment.
If you need to enable use of site packages for whatever reason, you can
do so by passing the ``--sitepackages`` option to ``tox``. Also, you will
not need to install any of the build dependencies mentioned above if you
use this approach.
.. note::
While using ``tox`` is practical for developers running tests in
more predictable execution environments, it is still possible to
execute the test suite against a specific installation environment
using pytest directly::
./setup.py test
Specific options can be passed to ``pytest`` using the ``--addopts``
option::
./setup.py test --addopts 'tests/frontend/buildtrack.py::test_build_track'
Observing coverage
~~~~~~~~~~~~~~~~~~
Once you have run the tests using `tox` (or `detox`), some coverage reports will
have been left behind.
To view the coverage report of the last test run, simply run::
tox -e coverage
Alternatively, any IDE plugin that uses pytest should automatically This will collate any reports from separate python environments that may be
detect the ``.pylintrc`` in the project's root directory. under test before displaying the combined coverage.
Adding tests Adding tests
~~~~~~~~~~~~ ~~~~~~~~~~~~
Tests are found in the tests subdirectory, inside of which Tests are found in the tests subdirectory, inside of which
there is a separarate directory for each *domain* of tests. there is a separate directory for each *domain* of tests.
All tests are collected as:: All tests are collected as::
tests/*/*.py tests/*/*.py
...@@ -1547,23 +1630,50 @@ Tests that run a sandbox should be decorated with:: ...@@ -1547,23 +1630,50 @@ Tests that run a sandbox should be decorated with::
and use the integration cli helper. and use the integration cli helper.
You should first aim to write tests that exercise your changes from the cli. You must test your changes in an end-to-end fashion. Consider the first end to
This is so that the testing is end-to-end, and the changes are guaranteed to be the appropriate user interface, and the other end to be the change you have
work for the end-user. The cli is considered stable, and so tests written in made.
terms of it are unlikely to require updating as the internals of the software
change over time.
It may be impractical to sufficiently examine some changes this way. For The aim for our tests is to make assertions about how you impact and define the
example, the number of cases to test and the running time of each test may be outward user experience. You should be able to exercise all code paths via the
too high. It may also be difficult to contrive circumstances to cover every user interface, just as one can test the strength of rivets by sailing dozens
line of the change. If this is the case, next you can consider also writing of ocean liners. Keep in mind that your ocean liners could be sailing properly
unit tests that work more directly on the changes. *because* of a malfunctioning rivet. End-to-end testing will warn you that
fixing the rivet will sink the ships.
It is important to write unit tests in such a way that they do not break due to The primary user interface is the cli, so that should be the first target 'end'
changes unrelated to what they are meant to test. For example, if the test for testing. Most of the value of BuildStream comes from what you can achieve
relies on a lot of BuildStream internals, a large refactoring will likely with the cli.
require the test to be rewritten. Pure functions that only rely on the Python
Standard Library are excellent candidates for unit testing. We also have what we call a *"Public API Surface"*, as previously mentioned in
:ref:`contributing_documenting_symbols`. You should consider this a secondary
target. This is mainly for advanced users to implement their plugins against.
Note that both of these targets for testing are guaranteed to continue working
in the same way across versions. This means that tests written in terms of them
will be robust to large changes to the code. This important property means that
BuildStream developers can make large refactorings without needing to rewrite
fragile tests.
Another user to consider is the BuildStream developer, therefore internal API
surfaces are also targets for testing. For example the YAML loading code, and
the CasCache. Remember that these surfaces are still just a means to the end of
providing value through the cli and the *"Public API Surface"*.
It may be impractical to sufficiently examine some changes in an end-to-end
fashion. The number of cases to test, and the running time of each test, may be
too high. Such typically low-level things, e.g. parsers, may also be tested
with unit tests; alongside the mandatory end-to-end tests.
It is important to write unit tests that are not fragile, i.e. in such a way
that they do not break due to changes unrelated to what they are meant to test.
For example, if the test relies on a lot of BuildStream internals, a large
refactoring will likely require the test to be rewritten. Pure functions that
only rely on the Python Standard Library are excellent candidates for unit
testing.
Unit tests only make it easier to implement things correctly, end-to-end tests
make it easier to implement the right thing.
Measuring performance Measuring performance
...@@ -1656,10 +1766,8 @@ obtain profiles:: ...@@ -1656,10 +1766,8 @@ obtain profiles::
ForceCommand BST_PROFILE=artifact-receive cd /tmp && bst-artifact-receive --pull-url https://example.com/ /home/artifacts/artifacts ForceCommand BST_PROFILE=artifact-receive cd /tmp && bst-artifact-receive --pull-url https://example.com/ /home/artifacts/artifacts
The MANIFEST.in and setup.py Managing data files
---------------------------- -------------------
When adding a dependency to BuildStream, it's important to update the setup.py accordingly.
When adding data files which need to be discovered at runtime by BuildStream, update setup.py accordingly. When adding data files which need to be discovered at runtime by BuildStream, update setup.py accordingly.
When adding data files for the purpose of docs or tests, or anything that is not covered by When adding data files for the purpose of docs or tests, or anything that is not covered by
...@@ -1669,3 +1777,23 @@ At any time, running the following command to create a source distribution shoul ...@@ -1669,3 +1777,23 @@ At any time, running the following command to create a source distribution shoul
creating a tarball which contains everything we want it to include:: creating a tarball which contains everything we want it to include::
./setup.py sdist ./setup.py sdist
Updating BuildStream's Python dependencies
------------------------------------------
BuildStream's Python dependencies are listed in multiple
`requirements files <https://pip.readthedocs.io/en/latest/reference/pip_install/#requirements-file-format>`
present in the ``requirements`` directory.
All ``.txt`` files in this directory are generated from the corresponding
``.in`` file, and each ``.in`` file represents a set of dependencies. For
example, ``requirements.in`` contains all runtime dependencies of BuildStream.
``requirements.txt`` is generated from it, and contains pinned versions of all
runtime dependencies (including transitive dependencies) of BuildStream.
When adding a new dependency to BuildStream, or updating existing dependencies,
it is important to update the appropriate requirements file accordingly. After
changing the ``.in`` file, run the following to update the matching ``.txt``
file::
make -C requirements
...@@ -24,6 +24,7 @@ recursive-include doc/sessions *.run ...@@ -24,6 +24,7 @@ recursive-include doc/sessions *.run
# Tests # Tests
recursive-include tests * recursive-include tests *
include conftest.py include conftest.py
include tox.ini
include .coveragerc include .coveragerc
include .pylintrc include .pylintrc
...@@ -31,7 +32,12 @@ include .pylintrc ...@@ -31,7 +32,12 @@ include .pylintrc
recursive-include buildstream/_protos *.proto recursive-include buildstream/_protos *.proto
# Requirements files # Requirements files
include dev-requirements.txt include requirements/requirements.in
include requirements/requirements.txt
include requirements/dev-requirements.in
include requirements/dev-requirements.txt
include requirements/plugin-requirements.in
include requirements/plugin-requirements.txt
# Versioneer # Versioneer
include versioneer.py include versioneer.py
...@@ -2,16 +2,28 @@ ...@@ -2,16 +2,28 @@
buildstream 1.3.1 buildstream 1.3.1
================= =================
o Added `bst artifact log` subcommand for viewing build logs.
o BREAKING CHANGE: The bst source-bundle command has been removed. The o BREAKING CHANGE: The bst source-bundle command has been removed. The
functionality it provided has been replaced by the `--include-build-scripts` functionality it provided has been replaced by the `--include-build-scripts`
option of the `bst source-checkout` command. To produce a tarball containing option of the `bst source-checkout` command. To produce a tarball containing
an element's sources and generated build scripts you can do the command an element's sources and generated build scripts you can do the command
`bst source-checkout --include-build-scripts --tar foo.bst some-file.tar` `bst source-checkout --include-build-scripts --tar foo.bst some-file.tar`
o BREAKING CHANGE: `bst track` and `bst fetch` commands are now osbolete.
Their functionality is provided by `bst source track` and
`bst source fetch` respectively.
o Added new `bst source checkout` command to checkout sources of an element.
o BREAKING CHANGE: Default strip-commands have been removed as they are too o BREAKING CHANGE: Default strip-commands have been removed as they are too
specific. Recommendation if you are building in Linux is to use the specific. Recommendation if you are building in Linux is to use the
ones being used in freedesktop-sdk project, for example ones being used in freedesktop-sdk project, for example
o Running commands without elements specified will now attempt to use
the default element defined in the projcet configuration.
If no default element is defined, all elements in the project will be used
o All elements must now be suffixed with `.bst` o All elements must now be suffixed with `.bst`
Attempting to use an element that does not have the `.bst` extension, Attempting to use an element that does not have the `.bst` extension,
will result in a warning. will result in a warning.
...@@ -22,6 +34,12 @@ buildstream 1.3.1 ...@@ -22,6 +34,12 @@ buildstream 1.3.1
make changes to their .bst files if they are expecting these environment make changes to their .bst files if they are expecting these environment
variables to be set. variables to be set.
o BREAKING CHANGE: The 'auto-init' functionality has been removed. This would
offer to create a project in the event that bst was run against a directory
without a project, to be friendly to new users. It has been replaced with
an error message and a hint instead, to avoid bothering folks that just
made a mistake.
o Failed builds are included in the cache as well. o Failed builds are included in the cache as well.
`bst checkout` will provide anything in `%{install-root}`. `bst checkout` will provide anything in `%{install-root}`.
A build including cached fails will cause any dependant elements A build including cached fails will cause any dependant elements
...@@ -59,8 +77,8 @@ buildstream 1.3.1 ...@@ -59,8 +77,8 @@ buildstream 1.3.1
instead of just a specially-formatted build-root with a `root` and `scratch` instead of just a specially-formatted build-root with a `root` and `scratch`
subdirectory. subdirectory.
o The buildstream.conf file learned new 'prompt.auto-init', o The buildstream.conf file learned new
'prompt.really-workspace-close-remove-dir', and 'prompt.really-workspace-close-remove-dir' and
'prompt.really-workspace-reset-hard' options. These allow users to suppress 'prompt.really-workspace-reset-hard' options. These allow users to suppress
certain confirmation prompts, e.g. double-checking that the user meant to certain confirmation prompts, e.g. double-checking that the user meant to
run the command as typed. run the command as typed.
...@@ -75,8 +93,6 @@ buildstream 1.3.1 ...@@ -75,8 +93,6 @@ buildstream 1.3.1
with cached artifacts, only 'complete' elements can be pushed. If the element with cached artifacts, only 'complete' elements can be pushed. If the element
is expected to have a populated build tree then it must be cached before pushing. is expected to have a populated build tree then it must be cached before pushing.
o Added new `bst source-checkout` command to checkout sources of an element.
o `bst workspace open` now supports the creation of multiple elements and o `bst workspace open` now supports the creation of multiple elements and
allows the user to set a default location for their creation. This has meant allows the user to set a default location for their creation. This has meant
that the new CLI is no longer backwards compatible with buildstream 1.2. that the new CLI is no longer backwards compatible with buildstream 1.2.
......
...@@ -16,6 +16,9 @@ About ...@@ -16,6 +16,9 @@ About
.. image:: https://img.shields.io/pypi/v/BuildStream.svg .. image:: https://img.shields.io/pypi/v/BuildStream.svg
:target: https://pypi.org/project/BuildStream :target: https://pypi.org/project/BuildStream
.. image:: https://app.fossa.io/api/projects/git%2Bgitlab.com%2FBuildStream%2Fbuildstream.svg?type=shield
:target: https://app.fossa.io/projects/git%2Bgitlab.com%2FBuildStream%2Fbuildstream?ref=badge_shield
What is BuildStream? What is BuildStream?
==================== ====================
......
...@@ -34,3 +34,8 @@ if "_BST_COMPLETION" not in os.environ: ...@@ -34,3 +34,8 @@ if "_BST_COMPLETION" not in os.environ:
from .element import Element, ElementError from .element import Element, ElementError
from .buildelement import BuildElement from .buildelement import BuildElement
from .scriptelement import ScriptElement from .scriptelement import ScriptElement
# XXX We are exposing a private member here as we expect it to move to a
# separate package soon. See the following discussion for more details:
# https://gitlab.com/BuildStream/buildstream/issues/739#note_124819869
from ._gitsourcebase import _GitSourceBase
...@@ -19,18 +19,16 @@ ...@@ -19,18 +19,16 @@
import multiprocessing import multiprocessing
import os import os
import signal
import string import string
from collections.abc import Mapping from collections.abc import Mapping
from ..types import _KeyStrength from .types import _KeyStrength
from .._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason from ._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason
from .._message import Message, MessageType from ._message import Message, MessageType
from .. import _signals from . import utils
from .. import utils from . import _yaml
from .. import _yaml
from .cascache import CASRemote, CASRemoteSpec from ._cas import CASRemote, CASRemoteSpec
CACHE_SIZE_FILE = "cache_size" CACHE_SIZE_FILE = "cache_size"
...@@ -249,7 +247,7 @@ class ArtifactCache(): ...@@ -249,7 +247,7 @@ class ArtifactCache():
# FIXME: Asking the user what to do may be neater # FIXME: Asking the user what to do may be neater
default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'], default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'],
'buildstream.conf') 'buildstream.conf')
detail = ("There is not enough space to build the given element.\n" detail = ("There is not enough space to complete the build.\n"
"Please increase the cache-quota in {}." "Please increase the cache-quota in {}."
.format(self.context.config_origin or default_conf)) .format(self.context.config_origin or default_conf))
...@@ -375,20 +373,8 @@ class ArtifactCache(): ...@@ -375,20 +373,8 @@ class ArtifactCache():
remotes = {} remotes = {}
q = multiprocessing.Queue() q = multiprocessing.Queue()
for remote_spec in remote_specs: for remote_spec in remote_specs:
# Use subprocess to avoid creation of gRPC threads in main BuildStream process
# See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
p = multiprocessing.Process(target=self.cas.initialize_remote, args=(remote_spec, q))
try: error = CASRemote.check_remote(remote_spec, q)
# Keep SIGINT blocked in the child process
with _signals.blocked([signal.SIGINT], ignore=False):
p.start()
error = q.get()
p.join()
except KeyboardInterrupt:
utils._kill_process_tree(p.pid)
raise
if error and on_failure: if error and on_failure:
on_failure(remote_spec.url, error) on_failure(remote_spec.url, error)
...@@ -747,7 +733,7 @@ class ArtifactCache(): ...@@ -747,7 +733,7 @@ class ArtifactCache():
"servers are configured as push remotes.") "servers are configured as push remotes.")
for remote in push_remotes: for remote in push_remotes:
message_digest = self.cas.push_message(remote, message) message_digest = remote.push_message(message)
return message_digest return message_digest
...@@ -874,9 +860,7 @@ class ArtifactCache(): ...@@ -874,9 +860,7 @@ class ArtifactCache():
"\nValid values are, for example: 800M 10G 1T 50%\n" "\nValid values are, for example: 800M 10G 1T 50%\n"
.format(str(e))) from e .format(str(e))) from e
stat = os.statvfs(artifactdir_volume) available_space, total_size = self._get_volume_space_info_for(artifactdir_volume)
available_space = (stat.f_bsize * stat.f_bavail)
cache_size = self.get_cache_size() cache_size = self.get_cache_size()
# Ensure system has enough storage for the cache_quota # Ensure system has enough storage for the cache_quota
...@@ -893,7 +877,7 @@ class ArtifactCache(): ...@@ -893,7 +877,7 @@ class ArtifactCache():
"BuildStream requires a minimum cache quota of 2G.") "BuildStream requires a minimum cache quota of 2G.")
elif cache_quota > cache_size + available_space: # Check maximum elif cache_quota > cache_size + available_space: # Check maximum
if '%' in self.context.config_cache_quota: if '%' in self.context.config_cache_quota:
available = (available_space / (stat.f_blocks * stat.f_bsize)) * 100 available = (available_space / total_size) * 100
available = '{}% of total disk space'.format(round(available, 1)) available = '{}% of total disk space'.format(round(available, 1))
else: else:
available = utils._pretty_size(available_space) available = utils._pretty_size(available_space)
...@@ -919,6 +903,20 @@ class ArtifactCache(): ...@@ -919,6 +903,20 @@ class ArtifactCache():
self._cache_quota = cache_quota - headroom self._cache_quota = cache_quota - headroom
self._cache_lower_threshold = self._cache_quota / 2 self._cache_lower_threshold = self._cache_quota / 2
# _get_volume_space_info_for
#
# Get the available space and total space for the given volume
#
# Args:
# volume: volume for which to get the size
#
# Returns:
# A tuple containing first the availabe number of bytes on the requested
# volume, then the total number of bytes of the volume.
def _get_volume_space_info_for(self, volume):
stat = os.statvfs(volume)
return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
# _configured_remote_artifact_cache_specs(): # _configured_remote_artifact_cache_specs():
# #
......
...@@ -17,4 +17,5 @@ ...@@ -17,4 +17,5 @@
# Authors: # Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk> # Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
from .artifactcache import ArtifactCache, ArtifactCacheSpec, CACHE_SIZE_FILE from .cascache import CASCache
from .casremote import CASRemote, CASRemoteSpec
from collections import namedtuple
import io
import os
import multiprocessing
import signal
from urllib.parse import urlparse
import uuid
import grpc
from .. import _yaml
from .._protos.google.rpc import code_pb2
from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .._exceptions import CASRemoteError, LoadError, LoadErrorReason
from .. import _signals
from .. import utils
# The default limit for gRPC messages is 4 MiB.
# Limit payload to 1 MiB to leave sufficient headroom for metadata.
_MAX_PAYLOAD_BYTES = 1024 * 1024
class CASRemoteSpec(namedtuple('CASRemoteSpec', 'url push server_cert client_key client_cert instance_name')):
# _new_from_config_node
#
# Creates an CASRemoteSpec() from a YAML loaded node
#
@staticmethod
def _new_from_config_node(spec_node, basedir=None):
_yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert', 'instance_name'])
url = _yaml.node_get(spec_node, str, 'url')
push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
if not url:
provenance = _yaml.node_get_provenance(spec_node, 'url')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: empty artifact cache URL".format(provenance))
instance_name = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
if server_cert and basedir:
server_cert = os.path.join(basedir, server_cert)
client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None)
if client_key and basedir:
client_key = os.path.join(basedir, client_key)
client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None)
if client_cert and basedir:
client_cert = os.path.join(basedir, client_cert)
if client_key and not client_cert:
provenance = _yaml.node_get_provenance(spec_node, 'client-key')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-key' was specified without 'client-cert'".format(provenance))
if client_cert and not client_key:
provenance = _yaml.node_get_provenance(spec_node, 'client-cert')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-cert' was specified without 'client-key'".format(provenance))
return CASRemoteSpec(url, push, server_cert, client_key, client_cert, instance_name)
CASRemoteSpec.__new__.__defaults__ = (None, None, None, None)
class BlobNotFound(CASRemoteError):
def __init__(self, blob, msg):
self.blob = blob
super().__init__(msg)
# Represents a single remote CAS cache.
#
class CASRemote():
def __init__(self, spec):
self.spec = spec
self._initialized = False
self.channel = None
self.bytestream = None
self.cas = None
self.ref_storage = None
self.batch_update_supported = None
self.batch_read_supported = None
self.capabilities = None
self.max_batch_total_size_bytes = None
def init(self):
if not self._initialized:
url = urlparse(self.spec.url)
if url.scheme == 'http':
port = url.port or 80
self.channel = grpc.insecure_channel('{}:{}'.format(url.hostname, port))
elif url.scheme == 'https':
port = url.port or 443
if self.spec.server_cert:
with open(self.spec.server_cert, 'rb') as f:
server_cert_bytes = f.read()
else:
server_cert_bytes = None
if self.spec.client_key:
with open(self.spec.client_key, 'rb') as f:
client_key_bytes = f.read()
else:
client_key_bytes = None
if self.spec.client_cert:
with open(self.spec.client_cert, 'rb') as f:
client_cert_bytes = f.read()
else:
client_cert_bytes = None
credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_bytes,
private_key=client_key_bytes,
certificate_chain=client_cert_bytes)
self.channel = grpc.secure_channel('{}:{}'.format(url.hostname, port), credentials)
else:
raise CASRemoteError("Unsupported URL: {}".format(self.spec.url))
self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
try:
request = remote_execution_pb2.GetCapabilitiesRequest()
response = self.capabilities.GetCapabilities(request)
server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
except grpc.RpcError as e:
# Simply use the defaults for servers that don't implement GetCapabilities()
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
# Check whether the server supports BatchReadBlobs()
self.batch_read_supported = False
try:
request = remote_execution_pb2.BatchReadBlobsRequest()
response = self.cas.BatchReadBlobs(request)
self.batch_read_supported = True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
# Check whether the server supports BatchUpdateBlobs()
self.batch_update_supported = False
try:
request = remote_execution_pb2.BatchUpdateBlobsRequest()
response = self.cas.BatchUpdateBlobs(request)
self.batch_update_supported = True
except grpc.RpcError as e:
if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
e.code() != grpc.StatusCode.PERMISSION_DENIED):
raise
self._initialized = True
# check_remote
#
# Used when checking whether remote_specs work in the buildstream main
# thread, runs this in a seperate process to avoid creation of gRPC threads
# in the main BuildStream process
# See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
@classmethod
def check_remote(cls, remote_spec, q):
def __check_remote():
try:
remote = cls(remote_spec)
remote.init()
request = buildstream_pb2.StatusRequest()
response = remote.ref_storage.Status(request)
if remote_spec.push and not response.allow_updates:
q.put('CAS server does not allow push')
else:
# No error
q.put(None)
except grpc.RpcError as e:
# str(e) is too verbose for errors reported to the user
q.put(e.details())
except Exception as e: # pylint: disable=broad-except
# Whatever happens, we need to return it to the calling process
#
q.put(str(e))
p = multiprocessing.Process(target=__check_remote)
try:
# Keep SIGINT blocked in the child process
with _signals.blocked([signal.SIGINT], ignore=False):
p.start()
error = q.get()
p.join()
except KeyboardInterrupt:
utils._kill_process_tree(p.pid)
raise
return error
# verify_digest_on_remote():
#
# Check whether the object is already on the server in which case
# there is no need to upload it.
#
# Args:
# digest (Digest): The object digest.
#
def verify_digest_on_remote(self, digest):
self.init()
request = remote_execution_pb2.FindMissingBlobsRequest()
request.blob_digests.extend([digest])
response = self.cas.FindMissingBlobs(request)
if digest in response.missing_blob_digests:
return False
return True
# push_message():
#
# Push the given protobuf message to a remote.
#
# Args:
# message (Message): A protobuf message to push.
#
# Raises:
# (CASRemoteError): if there was an error
#
def push_message(self, message):
message_buffer = message.SerializeToString()
message_digest = utils._message_digest(message_buffer)
self.init()
with io.BytesIO(message_buffer) as b:
self._send_blob(message_digest, b)
return message_digest
################################################
# Local Private Methods #
################################################
def _fetch_blob(self, digest, stream):
resource_name = '/'.join(['blobs', digest.hash, str(digest.size_bytes)])
request = bytestream_pb2.ReadRequest()
request.resource_name = resource_name
request.read_offset = 0
for response in self.bytestream.Read(request):
stream.write(response.data)
stream.flush()
assert digest.size_bytes == os.fstat(stream.fileno()).st_size
def _send_blob(self, digest, stream, u_uid=uuid.uuid4()):
resource_name = '/'.join(['uploads', str(u_uid), 'blobs',
digest.hash, str(digest.size_bytes)])
def request_stream(resname, instream):
offset = 0
finished = False
remaining = digest.size_bytes
while not finished:
chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
remaining -= chunk_size
request = bytestream_pb2.WriteRequest()
request.write_offset = offset
# max. _MAX_PAYLOAD_BYTES chunks
request.data = instream.read(chunk_size)
request.resource_name = resname
request.finish_write = remaining <= 0
yield request
offset += chunk_size
finished = request.finish_write
response = self.bytestream.Write(request_stream(resource_name, stream))
assert response.committed_size == digest.size_bytes
# Represents a batch of blobs queued for fetching.
#
class _CASBatchRead():
def __init__(self, remote):
self._remote = remote
self._max_total_size_bytes = remote.max_batch_total_size_bytes
self._request = remote_execution_pb2.BatchReadBlobsRequest()
self._size = 0
self._sent = False
def add(self, digest):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
if new_batch_size > self._max_total_size_bytes:
# Not enough space left in current batch
return False
request_digest = self._request.digests.add()
request_digest.hash = digest.hash
request_digest.size_bytes = digest.size_bytes
self._size = new_batch_size
return True
def send(self):
assert not self._sent
self._sent = True
if not self._request.digests:
return
batch_response = self._remote.cas.BatchReadBlobs(self._request)
for response in batch_response.responses:
if response.status.code == code_pb2.NOT_FOUND:
raise BlobNotFound(response.digest.hash, "Failed to download blob {}: {}".format(
response.digest.hash, response.status.code))
if response.status.code != code_pb2.OK:
raise CASRemoteError("Failed to download blob {}: {}".format(
response.digest.hash, response.status.code))
if response.digest.size_bytes != len(response.data):
raise CASRemoteError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
response.digest.hash, response.digest.size_bytes, len(response.data)))
yield (response.digest, response.data)
# Represents a batch of blobs queued for upload.
#
class _CASBatchUpdate():
def __init__(self, remote):
self._remote = remote
self._max_total_size_bytes = remote.max_batch_total_size_bytes
self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
self._size = 0
self._sent = False
def add(self, digest, stream):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
if new_batch_size > self._max_total_size_bytes:
# Not enough space left in current batch
return False
blob_request = self._request.requests.add()
blob_request.digest.hash = digest.hash
blob_request.digest.size_bytes = digest.size_bytes
blob_request.data = stream.read(digest.size_bytes)
self._size = new_batch_size
return True
def send(self):
assert not self._sent
self._sent = True
if not self._request.requests:
return
batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
for response in batch_response.responses:
if response.status.code != code_pb2.OK:
raise CASRemoteError("Failed to upload blob {}: {}".format(
response.digest.hash, response.status.code))
...@@ -27,8 +27,8 @@ import uuid ...@@ -27,8 +27,8 @@ import uuid
import errno import errno
import threading import threading
import click
import grpc import grpc
import click
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
......
...@@ -31,9 +31,10 @@ from ._exceptions import LoadError, LoadErrorReason, BstError ...@@ -31,9 +31,10 @@ from ._exceptions import LoadError, LoadErrorReason, BstError
from ._message import Message, MessageType from ._message import Message, MessageType
from ._profile import Topics, profile_start, profile_end from ._profile import Topics, profile_start, profile_end
from ._artifactcache import ArtifactCache from ._artifactcache import ArtifactCache
from ._artifactcache.cascache import CASCache from ._cas import CASCache
from ._workspaces import Workspaces, WorkspaceProjectCache, WORKSPACE_PROJECT_FILE from ._workspaces import Workspaces, WorkspaceProjectCache
from .plugin import _plugin_lookup from .plugin import _plugin_lookup
from .sandbox import SandboxRemote
# Context() # Context()
...@@ -72,6 +73,9 @@ class Context(): ...@@ -72,6 +73,9 @@ class Context():
# The locations from which to push and pull prebuilt artifacts # The locations from which to push and pull prebuilt artifacts
self.artifact_cache_specs = None self.artifact_cache_specs = None
# The global remote execution configuration
self.remote_execution_specs = None
# The directory to store build logs # The directory to store build logs
self.logdir = None self.logdir = None
...@@ -117,10 +121,6 @@ class Context(): ...@@ -117,10 +121,6 @@ class Context():
# Whether or not to attempt to pull build trees globally # Whether or not to attempt to pull build trees globally
self.pull_buildtrees = None self.pull_buildtrees = None
# Boolean, whether to offer to create a project for the user, if we are
# invoked outside of a directory where we can resolve the project.
self.prompt_auto_init = None
# Boolean, whether we double-check with the user that they meant to # Boolean, whether we double-check with the user that they meant to
# remove a workspace directory. # remove a workspace directory.
self.prompt_workspace_close_remove_dir = None self.prompt_workspace_close_remove_dir = None
...@@ -191,7 +191,7 @@ class Context(): ...@@ -191,7 +191,7 @@ class Context():
_yaml.node_validate(defaults, [ _yaml.node_validate(defaults, [
'sourcedir', 'builddir', 'artifactdir', 'logdir', 'sourcedir', 'builddir', 'artifactdir', 'logdir',
'scheduler', 'artifacts', 'logging', 'projects', 'scheduler', 'artifacts', 'logging', 'projects',
'cache', 'prompt', 'workspacedir', 'cache', 'prompt', 'workspacedir', 'remote-execution'
]) ])
for directory in ['sourcedir', 'builddir', 'artifactdir', 'logdir', 'workspacedir']: for directory in ['sourcedir', 'builddir', 'artifactdir', 'logdir', 'workspacedir']:
...@@ -216,6 +216,8 @@ class Context(): ...@@ -216,6 +216,8 @@ class Context():
# Load artifact share configuration # Load artifact share configuration
self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults) self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults)
self.remote_execution_specs = SandboxRemote.specs_from_config_node(defaults)
# Load pull build trees configuration # Load pull build trees configuration
self.pull_buildtrees = _yaml.node_get(cache, bool, 'pull-buildtrees') self.pull_buildtrees = _yaml.node_get(cache, bool, 'pull-buildtrees')
...@@ -258,12 +260,10 @@ class Context(): ...@@ -258,12 +260,10 @@ class Context():
prompt = _yaml.node_get( prompt = _yaml.node_get(
defaults, Mapping, 'prompt') defaults, Mapping, 'prompt')
_yaml.node_validate(prompt, [ _yaml.node_validate(prompt, [
'auto-init', 'really-workspace-close-remove-dir', 'really-workspace-close-remove-dir',
'really-workspace-close-project-inaccessible', 'really-workspace-close-project-inaccessible',
'really-workspace-reset-hard', 'really-workspace-reset-hard',
]) ])
self.prompt_auto_init = _node_get_option_str(
prompt, 'auto-init', ['ask', 'no']) == 'ask'
self.prompt_workspace_close_remove_dir = _node_get_option_str( self.prompt_workspace_close_remove_dir = _node_get_option_str(
prompt, 'really-workspace-close-remove-dir', ['ask', 'yes']) == 'ask' prompt, 'really-workspace-close-remove-dir', ['ask', 'yes']) == 'ask'
self.prompt_workspace_close_project_inaccessible = _node_get_option_str( self.prompt_workspace_close_project_inaccessible = _node_get_option_str(
...@@ -277,7 +277,8 @@ class Context(): ...@@ -277,7 +277,8 @@ class Context():
# Shallow validation of overrides, parts of buildstream which rely # Shallow validation of overrides, parts of buildstream which rely
# on the overrides are expected to validate elsewhere. # on the overrides are expected to validate elsewhere.
for _, overrides in _yaml.node_items(self._project_overrides): for _, overrides in _yaml.node_items(self._project_overrides):
_yaml.node_validate(overrides, ['artifacts', 'options', 'strict', 'default-mirror']) _yaml.node_validate(overrides, ['artifacts', 'options', 'strict', 'default-mirror',
'remote-execution'])
profile_end(Topics.LOAD_CONTEXT, 'load') profile_end(Topics.LOAD_CONTEXT, 'load')
...@@ -316,11 +317,18 @@ class Context(): ...@@ -316,11 +317,18 @@ class Context():
# invoked with as opposed to a junctioned subproject. # invoked with as opposed to a junctioned subproject.
# #
# Returns: # Returns:
# (list): The list of projects # (Project): The Project object
# #
def get_toplevel_project(self): def get_toplevel_project(self):
return self._projects[0] return self._projects[0]
# get_workspaces():
#
# Return a Workspaces object containing a list of workspaces.
#
# Returns:
# (Workspaces): The Workspaces object
#
def get_workspaces(self): def get_workspaces(self):
return self._workspaces return self._workspaces
...@@ -649,20 +657,6 @@ class Context(): ...@@ -649,20 +657,6 @@ class Context():
self._cascache = CASCache(self.artifactdir) self._cascache = CASCache(self.artifactdir)
return self._cascache return self._cascache
# guess_element()
#
# Attempts to interpret which element the user intended to run commands on
#
# Returns:
# (str) The name of the element, or None if no element can be guessed
def guess_element(self):
workspace_project_dir, _ = utils._search_upward_for_files(self._directory, [WORKSPACE_PROJECT_FILE])
if workspace_project_dir:
workspace_project = self._workspace_project_cache.get(workspace_project_dir)
return workspace_project.get_default_element()
else:
return None
# _node_get_option_str() # _node_get_option_str()
# #
......
...@@ -262,8 +262,8 @@ class PlatformError(BstError): ...@@ -262,8 +262,8 @@ class PlatformError(BstError):
# Raised when errors are encountered by the sandbox implementation # Raised when errors are encountered by the sandbox implementation
# #
class SandboxError(BstError): class SandboxError(BstError):
def __init__(self, message, reason=None): def __init__(self, message, detail=None, reason=None):
super().__init__(message, domain=ErrorDomain.SANDBOX, reason=reason) super().__init__(message, detail=detail, domain=ErrorDomain.SANDBOX, reason=reason)
# ArtifactError # ArtifactError
...@@ -284,6 +284,21 @@ class CASError(BstError): ...@@ -284,6 +284,21 @@ class CASError(BstError):
super().__init__(message, detail=detail, domain=ErrorDomain.CAS, reason=reason, temporary=True) super().__init__(message, detail=detail, domain=ErrorDomain.CAS, reason=reason, temporary=True)
# CASRemoteError
#
# Raised when errors are encountered in the remote CAS
class CASRemoteError(CASError):
pass
# CASCacheError
#
# Raised when errors are encountered in the local CASCacheError
#
class CASCacheError(CASError):
pass
# PipelineError # PipelineError
# #
# Raised from pipeline operations # Raised from pipeline operations
......
...@@ -38,7 +38,7 @@ from .._message import Message, MessageType, unconditional_messages ...@@ -38,7 +38,7 @@ from .._message import Message, MessageType, unconditional_messages
from .._stream import Stream from .._stream import Stream
from .._versions import BST_FORMAT_VERSION from .._versions import BST_FORMAT_VERSION
from .. import _yaml from .. import _yaml
from .._scheduler import ElementJob from .._scheduler import ElementJob, JobStatus
# Import frontend assets # Import frontend assets
from . import Profile, LogLine, Status from . import Profile, LogLine, Status
...@@ -219,13 +219,13 @@ class App(): ...@@ -219,13 +219,13 @@ class App():
default_mirror=self._main_options.get('default_mirror')) default_mirror=self._main_options.get('default_mirror'))
except LoadError as e: except LoadError as e:
# Let's automatically start a `bst init` session in this case # Help users that are new to BuildStream by suggesting 'init'.
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF and self.interactive: # We don't want to slow down users that just made a mistake, so
click.echo("A project was not detected in the directory: {}".format(directory), err=True) # don't stop them with an offer to create a project for them.
if self.context.prompt_auto_init: if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
click.echo("", err=True) click.echo("No project found. You can create a new project like so:", err=True)
if click.confirm("Would you like to create a new project here?"): click.echo("", err=True)
self.init_project(None) click.echo(" bst init", err=True)
self._error_exit(e, "Error loading project") self._error_exit(e, "Error loading project")
...@@ -515,13 +515,13 @@ class App(): ...@@ -515,13 +515,13 @@ class App():
self._status.add_job(job) self._status.add_job(job)
self._maybe_render_status() self._maybe_render_status()
def _job_completed(self, job, success): def _job_completed(self, job, status):
self._status.remove_job(job) self._status.remove_job(job)
self._maybe_render_status() self._maybe_render_status()
# Dont attempt to handle a failure if the user has already opted to # Dont attempt to handle a failure if the user has already opted to
# terminate # terminate
if not success and not self.stream.terminated: if status == JobStatus.FAIL and not self.stream.terminated:
if isinstance(job, ElementJob): if isinstance(job, ElementJob):
element = job.element element = job.element
...@@ -599,7 +599,7 @@ class App(): ...@@ -599,7 +599,7 @@ class App():
click.echo("\nDropping into an interactive shell in the failed build sandbox\n", err=True) click.echo("\nDropping into an interactive shell in the failed build sandbox\n", err=True)
try: try:
prompt = self.shell_prompt(element) prompt = self.shell_prompt(element)
self.stream.shell(element, Scope.BUILD, prompt, isolate=True) self.stream.shell(element, Scope.BUILD, prompt, isolate=True, usebuildtree=True)
except BstError as e: except BstError as e:
click.echo("Error while attempting to create interactive shell: {}".format(e), err=True) click.echo("Error while attempting to create interactive shell: {}".format(e), err=True)
elif choice == 'log': elif choice == 'log':
......
This diff is collapsed.
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# #
import collections import collections.abc
import copy import copy
import os import os
...@@ -203,7 +203,7 @@ def is_incomplete_option(all_args, cmd_param): ...@@ -203,7 +203,7 @@ def is_incomplete_option(all_args, cmd_param):
if start_of_option(arg_str): if start_of_option(arg_str):
last_option = arg_str last_option = arg_str
return True if last_option and last_option in cmd_param.opts else False return bool(last_option and last_option in cmd_param.opts)
def is_incomplete_argument(current_params, cmd_param): def is_incomplete_argument(current_params, cmd_param):
...@@ -218,7 +218,7 @@ def is_incomplete_argument(current_params, cmd_param): ...@@ -218,7 +218,7 @@ def is_incomplete_argument(current_params, cmd_param):
return True return True
if cmd_param.nargs == -1: if cmd_param.nargs == -1:
return True return True
if isinstance(current_param_values, collections.Iterable) \ if isinstance(current_param_values, collections.abc.Iterable) \
and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs: and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
return True return True
return False return False
...@@ -297,12 +297,15 @@ def get_choices(cli, prog_name, args, incomplete, override): ...@@ -297,12 +297,15 @@ def get_choices(cli, prog_name, args, incomplete, override):
if not found_param and isinstance(ctx.command, MultiCommand): if not found_param and isinstance(ctx.command, MultiCommand):
# completion for any subcommands # completion for any subcommands
choices.extend([cmd + " " for cmd in ctx.command.list_commands(ctx)]) choices.extend([cmd + " " for cmd in ctx.command.list_commands(ctx)
if not ctx.command.get_command(ctx, cmd).hidden])
if not start_of_option(incomplete) and ctx.parent is not None \ if not start_of_option(incomplete) and ctx.parent is not None \
and isinstance(ctx.parent.command, MultiCommand) and ctx.parent.command.chain: and isinstance(ctx.parent.command, MultiCommand) and ctx.parent.command.chain:
# completion for chained commands # completion for chained commands
remaining_comands = set(ctx.parent.command.list_commands(ctx.parent)) - set(ctx.parent.protected_args) visible_commands = [cmd for cmd in ctx.parent.command.list_commands(ctx.parent)
if not ctx.parent.command.get_command(ctx.parent, cmd).hidden]
remaining_comands = set(visible_commands) - set(ctx.parent.protected_args)
choices.extend([cmd + " " for cmd in remaining_comands]) choices.extend([cmd + " " for cmd in remaining_comands])
for item in choices: for item in choices:
......
...@@ -23,8 +23,8 @@ from contextlib import ExitStack ...@@ -23,8 +23,8 @@ from contextlib import ExitStack
from mmap import mmap from mmap import mmap
import re import re
import textwrap import textwrap
import click
from ruamel import yaml from ruamel import yaml
import click
from . import Profile from . import Profile
from .. import Element, Consistency from .. import Element, Consistency
......
This diff is collapsed.