Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (401)
Showing
with 2324 additions and 607 deletions
......@@ -4,11 +4,15 @@ include =
*/buildstream/*
omit =
# Omit profiling helper module
# Omit some internals
*/buildstream/_profile.py
*/buildstream/__main__.py
*/buildstream/_version.py
# Omit generated code
*/buildstream/_protos/*
*/.eggs/*
# Omit .tox directory
*/.tox/*
[report]
show_missing = True
......
......@@ -13,10 +13,12 @@ tests/**/*.pyc
integration-cache/
tmp
.coverage
.coverage-reports/
.coverage.*
.cache
.pytest_cache/
*.bst/
.tox/
# Pycache, in case buildstream is ran directly from within the source
# tree
......
image: buildstream/testsuite-debian:9-master-119-552f5fc6
image: buildstream/testsuite-debian:9-5da27168-32c47d1c
cache:
key: "$CI_JOB_NAME-"
......@@ -6,44 +6,14 @@ cache:
- cache/
stages:
- prepare
- test
- post
#####################################################
# Prepare stage #
#####################################################
# Create a source distribution
#
source_dist:
stage: prepare
script:
# Generate the source distribution tarball
#
- python3 setup.py sdist
- tar -ztf dist/*
- tarball=$(cd dist && echo $(ls *))
# Verify that the source distribution tarball can be installed correctly
#
- pip3 install dist/*.tar.gz
- bst --version
# unpack tarball as `dist/buildstream` directory
- |
cat > dist/unpack.sh << EOF
#!/bin/sh
tar -zxf ${tarball}
mv ${tarball%.tar.gz} buildstream
EOF
# Make our helpers executable
- chmod +x dist/unpack.sh
artifacts:
paths:
- dist/
variables:
PYTEST_ADDOPTS: "--color=yes"
INTEGRATION_CACHE: "${CI_PROJECT_DIR}/cache/integration-cache"
TEST_COMMAND: "tox -- --color=yes --integration"
COVERAGE_PREFIX: "${CI_JOB_NAME}."
#####################################################
......@@ -52,107 +22,145 @@ source_dist:
# Run premerge commits
#
.linux-tests-template: &linux-tests
.tests-template: &tests
stage: test
variables:
PYTEST_ADDOPTS: "--color=yes"
script:
before_script:
# Diagnostics
- mount
- df -h
script:
- useradd -Um buildstream
- chown -R buildstream:buildstream .
- export INTEGRATION_CACHE="$(pwd)/cache/integration-cache"
# Unpack and get into dist/buildstream
- cd dist && ./unpack.sh
- chown -R buildstream:buildstream buildstream
- cd buildstream
# Run the tests as a simple user to test for permission issues
- su buildstream -c "${TEST_COMMAND}"
# Run the tests from the source distribution, We run as a simple
# user to test for permission issues
- su buildstream -c 'python3 setup.py test --index-url invalid://uri --addopts --integration'
# Go back to the toplevel and collect our reports
- cd ../..
- mkdir -p coverage-linux/
- cp dist/buildstream/.coverage coverage-linux/coverage."${CI_JOB_NAME}"
after_script:
except:
- schedules
artifacts:
paths:
- coverage-linux/
- .coverage-reports
tests-debian-9:
image: buildstream/testsuite-debian:9-master-119-552f5fc6
<<: *linux-tests
image: buildstream/testsuite-debian:9-5da27168-32c47d1c
<<: *tests
tests-fedora-27:
image: buildstream/testsuite-fedora:27-master-119-552f5fc6
<<: *linux-tests
image: buildstream/testsuite-fedora:27-5da27168-32c47d1c
<<: *tests
tests-fedora-28:
image: buildstream/testsuite-fedora:28-master-119-552f5fc6
<<: *linux-tests
image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
<<: *tests
tests-ubuntu-18.04:
image: buildstream/testsuite-ubuntu:18.04-master-119-552f5fc6
<<: *linux-tests
image: buildstream/testsuite-ubuntu:18.04-5da27168-32c47d1c
<<: *tests
tests-python-3.7-stretch:
image: buildstream/testsuite-python:3.7-stretch-a60f0c39
<<: *tests
variables:
# Note that we explicitly specify TOXENV in this case because this
# image has both 3.6 and 3.7 versions. python3.6 cannot be removed because
# some of our base dependencies declare it as their runtime dependency.
TOXENV: py37
overnight-fedora-28-aarch64:
image: buildstream/testsuite-fedora:aarch64-28-5da27168-32c47d1c
tags:
- aarch64
<<: *tests
# We need to override the exclusion from the template
# in order to run on schedules
except: []
only:
- schedules
before_script:
# grpcio needs to be compiled from source on aarch64 so we additionally
# need a C++ compiler here.
# FIXME: Ideally this would be provided by the base image. This will be
# unblocked by https://gitlab.com/BuildStream/buildstream-docker-images/issues/34
- dnf install -y gcc-c++
tests-unix:
# Use fedora here, to a) run a test on fedora and b) ensure that we
# can get rid of ostree - this is not possible with debian-8
image: buildstream/testsuite-fedora:27-master-119-552f5fc6
stage: test
image: buildstream/testsuite-fedora:27-5da27168-32c47d1c
<<: *tests
variables:
BST_FORCE_BACKEND: "unix"
PYTEST_ADDOPTS: "--color=yes"
script:
- export INTEGRATION_CACHE="$(pwd)/cache/integration-cache"
# We remove the Bubblewrap and OSTree packages here so that we catch any
# codepaths that try to use them. Removing OSTree causes fuse-libs to
# disappear unless we mark it as user-installed.
- dnf mark install fuse-libs
- dnf erase -y bubblewrap ostree
# Since the unix platform is required to run as root, no user change required
- ${TEST_COMMAND}
tests-fedora-missing-deps:
# Ensure that tests behave nicely while missing bwrap and ostree
image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
<<: *tests
script:
# We remove the Bubblewrap and OSTree packages here so that we catch any
# codepaths that try to use them. Removing OSTree causes fuse-libs to
# disappear unless we mark it as user-installed.
- dnf mark install fuse-libs
- dnf erase -y bubblewrap ostree
# Unpack and get into dist/buildstream
- cd dist && ./unpack.sh && cd buildstream
- useradd -Um buildstream
- chown -R buildstream:buildstream .
# Since the unix platform is required to run as root, no user change required
- python3 setup.py test --index-url invalid://uri --addopts --integration
- ${TEST_COMMAND}
tests-fedora-update-deps:
# Check if the tests pass after updating requirements to their latest
# allowed version.
allow_failure: true
image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
<<: *tests
script:
- useradd -Um buildstream
- chown -R buildstream:buildstream .
- make --always-make --directory requirements
- cat requirements/*.txt
- su buildstream -c "${TEST_COMMAND}"
# Lint separately from testing
lint:
stage: test
before_script:
# Diagnostics
- python3 --version
# Go back to the toplevel and collect our reports
- cd ../..
- mkdir -p coverage-unix/
- cp dist/buildstream/.coverage coverage-unix/coverage.unix
script:
- tox -e lint
except:
- schedules
artifacts:
paths:
- coverage-unix/
- logs-unix/
# Automatically build documentation for every commit, we want to know
# if building documentation fails even if we're not deploying it.
# Note: We still do not enforce a consistent installation of python3-sphinx,
# as it will significantly grow the backing image.
docs:
stage: test
variables:
BST_FORCE_SESSION_REBUILD: 1
script:
- export BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources"
# Currently sphinx_rtd_theme does not support Sphinx >1.8, this breaks search functionality
- pip3 install sphinx==1.7.9
- pip3 install sphinx-click
- pip3 install sphinx_rtd_theme
- cd dist && ./unpack.sh && cd buildstream
- make BST_FORCE_SESSION_REBUILD=1 -C doc
- cd ../..
- mv dist/buildstream/doc/build/html public
- env BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources" tox -e docs
- mv doc/build/html public
except:
- schedules
artifacts:
......@@ -163,8 +171,8 @@ docs:
stage: test
variables:
BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
BST_EXT_REF: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
FD_SDK_REF: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.11-35-g88d7c22c
BST_EXT_REF: 0.9.0-0-g63a19e8068bd777bd9cd59b1a9442f9749ea5a85
FD_SDK_REF: freedesktop-sdk-18.08.25-0-g250939d465d6dd7768a215f1fa59c4a3412fc337
before_script:
- |
mkdir -p "${HOME}/.config"
......@@ -172,7 +180,8 @@ docs:
scheduler:
fetchers: 2
EOF
- (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
- pip3 install -r requirements/requirements.txt -r requirements/plugin-requirements.txt
- pip3 install --no-index .
- pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
- git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
- git -C freedesktop-sdk checkout ${FD_SDK_REF}
......@@ -255,30 +264,28 @@ coverage:
stage: post
coverage: '/TOTAL +\d+ +\d+ +(\d+\.\d+)%/'
script:
- cd dist && ./unpack.sh && cd buildstream
- pip3 install --no-index .
- mkdir report
- cd report
- cp ../../../coverage-unix/coverage.unix .
- cp ../../../coverage-linux/coverage.* .
- ls coverage.*
- coverage combine --rcfile=../.coveragerc -a coverage.*
- coverage report --rcfile=../.coveragerc -m
- cp -a .coverage-reports/ ./coverage-sources
- tox -e coverage
- cp -a .coverage-reports/ ./coverage-report
dependencies:
- tests-debian-9
- tests-fedora-27
- tests-fedora-28
- tests-fedora-missing-deps
- tests-ubuntu-18.04
- tests-unix
- source_dist
except:
- schedules
artifacts:
paths:
- coverage-sources/
- coverage-report/
# Deploy, only for merges which land on master branch.
#
pages:
stage: post
dependencies:
- source_dist
- docs
variables:
ACME_DIR: public/.well-known/acme-challenge
......
......@@ -97,7 +97,13 @@ a new merge request. You can also `create a merge request for an existing branch
You may open merge requests for the branches you create before you are ready
to have them reviewed and considered for inclusion if you like. Until your merge
request is ready for review, the merge request title must be prefixed with the
``WIP:`` identifier.
``WIP:`` identifier. GitLab `treats this specially
<https://docs.gitlab.com/ee/user/project/merge_requests/work_in_progress_merge_requests.html>`_,
which helps reviewers.
Consider marking a merge request as WIP again if you are taking a while to
address a review point. This signals that the next action is on you, and it
won't appear in a reviewer's search for non-WIP merge requests to review.
Organized commits
......@@ -122,6 +128,12 @@ If a commit in your branch modifies behavior such that a test must also
be changed to match the new behavior, then the tests should be updated
with the same commit, so that every commit passes its own tests.
These principles apply whenever a branch is non-WIP. So for example, don't push
'fixup!' commits when addressing review comments, instead amend the commits
directly before pushing. GitLab has `good support
<https://docs.gitlab.com/ee/user/project/merge_requests/versions.html>`_ for
diffing between pushes, so 'fixup!' commits are not necessary for reviewers.
Commit messages
~~~~~~~~~~~~~~~
......@@ -144,6 +156,16 @@ number must be referenced in the commit message.
Fixes #123
Note that the 'why' of a change is as important as the 'what'.
When reviewing this, folks can suggest better alternatives when they know the
'why'. Perhaps there are other ways to avoid an error when things are not
frobnicated.
When folks modify this code, there may be uncertainty around whether the foos
should always be frobnicated. The comments, the commit message, and issue #123
should shed some light on that.
In the case that you have a commit which necessarily modifies multiple
components, then the summary line should still mention generally what
changed (if possible), followed by a colon and a brief summary.
......@@ -531,7 +553,7 @@ One problem which arises from this is that we end up having symbols
which are *public* according to the :ref:`rules discussed in the previous section
<contributing_public_and_private>`, but must be hidden away from the
*"Public API Surface"*. For example, BuildStream internal classes need
to invoke methods on the ``Element`` and ``Source`` classes, wheras these
to invoke methods on the ``Element`` and ``Source`` classes, whereas these
methods need to be hidden from the *"Public API Surface"*.
This is where BuildStream deviates from the PEP-8 standard for public
......@@ -609,7 +631,7 @@ An element plugin will derive from Element by importing::
from buildstream import Element
When importing utilities specifically, dont import function names
When importing utilities specifically, don't import function names
from there, instead import the module itself::
from . import utils
......@@ -715,7 +737,7 @@ Abstract methods
~~~~~~~~~~~~~~~~
In BuildStream, an *"Abstract Method"* is a bit of a misnomer and does
not match up to how Python defines abstract methods, we need to seek out
a new nomanclature to refer to these methods.
a new nomenclature to refer to these methods.
In Python, an *"Abstract Method"* is a method which **must** be
implemented by a subclass, whereas all methods in Python can be
......@@ -938,7 +960,7 @@ possible, and avoid any cyclic relationships in modules.
For instance, the ``Source`` objects are owned by ``Element``
objects in the BuildStream data model, and as such the ``Element``
will delegate some activities to the ``Source`` objects in its
possesion. The ``Source`` objects should however never call functions
possession. The ``Source`` objects should however never call functions
on the ``Element`` object, nor should the ``Source`` object itself
have any understanding of what an ``Element`` is.
......@@ -1200,27 +1222,13 @@ For further information about using the reStructuredText with sphinx, please see
Building Docs
~~~~~~~~~~~~~
The documentation build is not integrated into the ``setup.py`` and is
difficult (or impossible) to do so, so there is a little bit of setup
you need to take care of first.
Before you can build the BuildStream documentation yourself, you need
to first install ``sphinx`` along with some additional plugins and dependencies,
using pip or some other mechanism::
# Install sphinx
pip3 install --user sphinx
# Install some sphinx extensions
pip3 install --user sphinx-click
pip3 install --user sphinx_rtd_theme
# Additional optional dependencies required
pip3 install --user arpy
Before you can build the docs, you will end to ensure that you have installed
the required :ref:`build dependencies <contributing_build_deps>` as mentioned
in the testing section above.
To build the documentation, just run the following::
make -C doc
tox -e docs
This will give you a ``doc/build/html`` directory with the html docs which
you can view in your browser locally to test.
......@@ -1238,9 +1246,10 @@ will make the docs build reuse already downloaded sources::
export BST_SOURCE_CACHE=~/.cache/buildstream/sources
To force rebuild session html while building the doc, simply build the docs like this::
To force rebuild session html while building the doc, simply run `tox` with the
``BST_FORCE_SESSION_REBUILD`` environment variable set, like so::
make BST_FORCE_SESSION_REBUILD=1 -C doc
env BST_FORCE_SESSION_REBUILD=1 tox -e docs
Man pages
......@@ -1356,7 +1365,7 @@ Structure of an example
'''''''''''''''''''''''
The :ref:`tutorial <tutorial>` and the :ref:`examples <examples>` sections
of the documentation contain a series of sample projects, each chapter in
the tutoral, or standalone example uses a sample project.
the tutorial, or standalone example uses a sample project.
Here is the the structure for adding new examples and tutorial chapters.
......@@ -1446,63 +1455,159 @@ regenerate them locally in order to build the docs.
Testing
-------
BuildStream uses pytest for regression tests and testing out
the behavior of newly added components.
BuildStream uses `tox <https://tox.readthedocs.org/>`_ as a frontend to run the
tests which are implemented using `pytest <https://pytest.org/>`_. We use
pytest for regression tests and testing out the behavior of newly added
components.
The elaborate documentation for pytest can be found here: http://doc.pytest.org/en/latest/contents.html
Don't get lost in the docs if you don't need to, follow existing examples instead.
.. _contributing_build_deps:
Installing build dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some of BuildStream's dependencies have non-python build dependencies. When
running tests with ``tox``, you will first need to install these dependencies.
Exact steps to install these will depend on your operating system. Commands
for installing them for some common distributions are listed below.
For Fedora-based systems::
dnf install gcc pkg-config python3-devel cairo-gobject-devel glib2-devel gobject-introspection-devel
For Debian-based systems::
apt install gcc pkg-config python3-dev libcairo2-dev libgirepository1.0-dev
Running tests
~~~~~~~~~~~~~
To run the tests, just type::
To run the tests, simply navigate to the toplevel directory of your BuildStream
checkout and run::
tox
By default, the test suite will be run against every supported python version
found on your host. If you have multiple python versions installed, you may
want to run tests against only one version and you can do that using the ``-e``
option when running tox::
tox -e py37
If you would like to test and lint at the same time, or if you do have multiple
python versions installed and would like to test against multiple versions, then
we recommend using `detox <https://github.com/tox-dev/detox>`_, just run it with
the same arguments you would give `tox`::
detox -e lint,py36,py37
Linting is performed separately from testing. In order to run the linting step which
consists of running the ``pycodestyle`` and ``pylint`` tools, run the following::
./setup.py test
tox -e lint
At the toplevel.
.. tip::
When debugging a test, it can be desirable to see the stdout
and stderr generated by a test, to do this use the ``--addopts``
function to feed arguments to pytest as such::
The project specific pylint and pycodestyle configurations are stored in the
toplevel buildstream directory in the ``.pylintrc`` file and ``setup.cfg`` files
respectively. These configurations can be interesting to use with IDEs and
other developer tooling.
./setup.py test --addopts -s
The output of all failing tests will always be printed in the summary, but
if you want to observe the stdout and stderr generated by a passing test,
you can pass the ``-s`` option to pytest as such::
tox -- -s
.. tip::
The ``-s`` option is `a pytest option <https://docs.pytest.org/latest/usage.html>`_.
Any options specified before the ``--`` separator are consumed by ``tox``,
and any options after the ``--`` separator will be passed along to pytest.
You can always abort on the first failure by running::
./setup.py test --addopts -x
tox -- -x
Similarly, you may also be interested in the ``--last-failed`` and
``--failed-first`` options as per the
`pytest cache <https://docs.pytest.org/en/latest/cache.html>`_ documentation.
If you want to run a specific test or a group of tests, you
can specify a prefix to match. E.g. if you want to run all of
the frontend tests you can do::
./setup.py test --addopts 'tests/frontend/'
tox -- tests/frontend/
Specific tests can be chosen by using the :: delimeter after the test module.
Specific tests can be chosen by using the :: delimiter after the test module.
If you wanted to run the test_build_track test within frontend/buildtrack.py you could do::
./setup.py test --addopts 'tests/frontend/buildtrack.py::test_build_track'
tox -- tests/frontend/buildtrack.py::test_build_track
When running only a few tests, you may find the coverage and timing output
excessive, there are options to trim them. Note that coverage step will fail.
Here is an example::
tox -- --no-cov --durations=1 tests/frontend/buildtrack.py::test_build_track
We also have a set of slow integration tests that are disabled by
default - you will notice most of them marked with SKIP in the pytest
output. To run them, you can use::
./setup.py test --addopts '--integration'
tox -- --integration
By default, buildstream also runs pylint on all files. Should you want
to run just pylint (these checks are a lot faster), you can do so
with::
In case BuildStream's dependencies were updated since you last ran the
tests, you might see some errors like
``pytest: error: unrecognized arguments: --codestyle``. If this happens, you
will need to force ``tox`` to recreate the test environment(s). To do so, you
can run ``tox`` with ``-r`` or ``--recreate`` option.
.. note::
By default, we do not allow use of site packages in our ``tox``
configuration to enable running the tests in an isolated environment.
If you need to enable use of site packages for whatever reason, you can
do so by passing the ``--sitepackages`` option to ``tox``. Also, you will
not need to install any of the build dependencies mentioned above if you
use this approach.
.. note::
While using ``tox`` is practical for developers running tests in
more predictable execution environments, it is still possible to
execute the test suite against a specific installation environment
using pytest directly::
./setup.py test
./setup.py test --addopts '-m pylint'
Specific options can be passed to ``pytest`` using the ``--addopts``
option::
Alternatively, any IDE plugin that uses pytest should automatically
detect the ``.pylintrc`` in the project's root directory.
./setup.py test --addopts 'tests/frontend/buildtrack.py::test_build_track'
Observing coverage
~~~~~~~~~~~~~~~~~~
Once you have run the tests using `tox` (or `detox`), some coverage reports will
have been left behind.
To view the coverage report of the last test run, simply run::
tox -e coverage
This will collate any reports from separate python environments that may be
under test before displaying the combined coverage.
Adding tests
~~~~~~~~~~~~
Tests are found in the tests subdirectory, inside of which
there is a separarate directory for each *domain* of tests.
there is a separate directory for each *domain* of tests.
All tests are collected as::
tests/*/*.py
......@@ -1525,6 +1630,51 @@ Tests that run a sandbox should be decorated with::
and use the integration cli helper.
You must test your changes in an end-to-end fashion. Consider the first end to
be the appropriate user interface, and the other end to be the change you have
made.
The aim for our tests is to make assertions about how you impact and define the
outward user experience. You should be able to exercise all code paths via the
user interface, just as one can test the strength of rivets by sailing dozens
of ocean liners. Keep in mind that your ocean liners could be sailing properly
*because* of a malfunctioning rivet. End-to-end testing will warn you that
fixing the rivet will sink the ships.
The primary user interface is the cli, so that should be the first target 'end'
for testing. Most of the value of BuildStream comes from what you can achieve
with the cli.
We also have what we call a *"Public API Surface"*, as previously mentioned in
:ref:`contributing_documenting_symbols`. You should consider this a secondary
target. This is mainly for advanced users to implement their plugins against.
Note that both of these targets for testing are guaranteed to continue working
in the same way across versions. This means that tests written in terms of them
will be robust to large changes to the code. This important property means that
BuildStream developers can make large refactorings without needing to rewrite
fragile tests.
Another user to consider is the BuildStream developer, therefore internal API
surfaces are also targets for testing. For example the YAML loading code, and
the CasCache. Remember that these surfaces are still just a means to the end of
providing value through the cli and the *"Public API Surface"*.
It may be impractical to sufficiently examine some changes in an end-to-end
fashion. The number of cases to test, and the running time of each test, may be
too high. Such typically low-level things, e.g. parsers, may also be tested
with unit tests; alongside the mandatory end-to-end tests.
It is important to write unit tests that are not fragile, i.e. in such a way
that they do not break due to changes unrelated to what they are meant to test.
For example, if the test relies on a lot of BuildStream internals, a large
refactoring will likely require the test to be rewritten. Pure functions that
only rely on the Python Standard Library are excellent candidates for unit
testing.
Unit tests only make it easier to implement things correctly, end-to-end tests
make it easier to implement the right thing.
Measuring performance
---------------------
......@@ -1616,10 +1766,8 @@ obtain profiles::
ForceCommand BST_PROFILE=artifact-receive cd /tmp && bst-artifact-receive --pull-url https://example.com/ /home/artifacts/artifacts
The MANIFEST.in and setup.py
----------------------------
When adding a dependency to BuildStream, it's important to update the setup.py accordingly.
Managing data files
-------------------
When adding data files which need to be discovered at runtime by BuildStream, update setup.py accordingly.
When adding data files for the purpose of docs or tests, or anything that is not covered by
......@@ -1629,3 +1777,23 @@ At any time, running the following command to create a source distribution shoul
creating a tarball which contains everything we want it to include::
./setup.py sdist
Updating BuildStream's Python dependencies
------------------------------------------
BuildStream's Python dependencies are listed in multiple
`requirements files <https://pip.readthedocs.io/en/latest/reference/pip_install/#requirements-file-format>`
present in the ``requirements`` directory.
All ``.txt`` files in this directory are generated from the corresponding
``.in`` file, and each ``.in`` file represents a set of dependencies. For
example, ``requirements.in`` contains all runtime dependencies of BuildStream.
``requirements.txt`` is generated from it, and contains pinned versions of all
runtime dependencies (including transitive dependencies) of BuildStream.
When adding a new dependency to BuildStream, or updating existing dependencies,
it is important to update the appropriate requirements file accordingly. After
changing the ``.in`` file, run the following to update the matching ``.txt``
file::
make -C requirements
......@@ -19,10 +19,12 @@ recursive-include doc/source *.html
recursive-include doc/source *.odg
recursive-include doc/source *.svg
recursive-include doc/examples *
recursive-include doc/sessions *.run
# Tests
recursive-include tests *
include conftest.py
include tox.ini
include .coveragerc
include .pylintrc
......@@ -30,7 +32,12 @@ include .pylintrc
recursive-include buildstream/_protos *.proto
# Requirements files
include dev-requirements.txt
include requirements/requirements.in
include requirements/requirements.txt
include requirements/dev-requirements.in
include requirements/dev-requirements.txt
include requirements/plugin-requirements.in
include requirements/plugin-requirements.txt
# Versioneer
include versioneer.py
......@@ -2,12 +2,40 @@
buildstream 1.3.1
=================
o Added `bst artifact log` subcommand for viewing build logs.
o BREAKING CHANGE: The bst source-bundle command has been removed. The
functionality it provided has been replaced by the `--include-build-scripts`
option of the `bst source-checkout` command. To produce a tarball containing
an element's sources and generated build scripts you can do the command
`bst source-checkout --include-build-scripts --tar foo.bst some-file.tar`
o BREAKING CHANGE: `bst track` and `bst fetch` commands are now osbolete.
Their functionality is provided by `bst source track` and
`bst source fetch` respectively.
o Added new `bst source checkout` command to checkout sources of an element.
o BREAKING CHANGE: Default strip-commands have been removed as they are too
specific. Recommendation if you are building in Linux is to use the
ones being used in freedesktop-sdk project, for example
o All elements must now be suffixed with `.bst`
Attempting to use an element that does not have the `.bst` extension,
will result in a warning.
o BREAKING CHANGE: The 'manual' element lost its default 'MAKEFLAGS' and 'V'
environment variables. There is already a 'make' element with the same
variables. Note that this is a breaking change, it will require users to
make changes to their .bst files if they are expecting these environment
variables to be set.
o BREAKING CHANGE: The 'auto-init' functionality has been removed. This would
offer to create a project in the event that bst was run against a directory
without a project, to be friendly to new users. It has been replaced with
an error message and a hint instead, to avoid bothering folks that just
made a mistake.
o Failed builds are included in the cache as well.
`bst checkout` will provide anything in `%{install-root}`.
A build including cached fails will cause any dependant elements
......@@ -38,13 +66,49 @@ buildstream 1.3.1
a bug fix to workspaces so they can be build in workspaces too.
o Creating a build shell through the interactive mode or `bst shell --build`
will now use the cached build tree. It is now easier to debug local build
failures.
will now use the cached build tree if available locally. It is now easier to
debug local build failures.
o `bst shell --sysroot` now takes any directory that contains a sysroot,
instead of just a specially-formatted build-root with a `root` and `scratch`
subdirectory.
o The buildstream.conf file learned new
'prompt.really-workspace-close-remove-dir' and
'prompt.really-workspace-reset-hard' options. These allow users to suppress
certain confirmation prompts, e.g. double-checking that the user meant to
run the command as typed.
o Due to the element `build tree` being cached in the respective artifact their
size in some cases has significantly increased. In *most* cases the build trees
are not utilised when building targets, as such by default bst 'pull' & 'build'
will not fetch build trees from remotes. This behaviour can be overridden with
the cli main option '--pull-buildtrees', or the user configuration cache group
option 'pull-buildtrees = True'. The override will also add the build tree to
already cached artifacts. When attempting to populate an artifactcache server
with cached artifacts, only 'complete' elements can be pushed. If the element
is expected to have a populated build tree then it must be cached before pushing.
o `bst workspace open` now supports the creation of multiple elements and
allows the user to set a default location for their creation. This has meant
that the new CLI is no longer backwards compatible with buildstream 1.2.
o Add sandbox API for command batching and use it for build, script, and
compose elements.
o BREAKING CHANGE: The `git` plugin does not create a local `.git`
repository by default. If `git describe` is required to work, the
plugin has now a tag tracking feature instead. This can be enabled
by setting 'track-tags'.
o Opening a workspace now creates a .bstproject.yaml file that allows buildstream
commands to be run from a workspace that is not inside a project.
o Specifying an element is now optional for some commands when buildstream is run
from inside a workspace - the 'build', 'checkout', 'fetch', 'pull', 'push',
'shell', 'show', 'source-checkout', 'track', 'workspace close' and 'workspace reset'
commands are affected.
=================
buildstream 1.1.5
......
......@@ -16,6 +16,9 @@ About
.. image:: https://img.shields.io/pypi/v/BuildStream.svg
:target: https://pypi.org/project/BuildStream
.. image:: https://app.fossa.io/api/projects/git%2Bgitlab.com%2FBuildStream%2Fbuildstream.svg?type=shield
:target: https://app.fossa.io/projects/git%2Bgitlab.com%2FBuildStream%2Fbuildstream?ref=badge_shield
What is BuildStream?
====================
......
......@@ -27,10 +27,15 @@ if "_BST_COMPLETION" not in os.environ:
del get_versions
from .utils import UtilError, ProgramNotFoundError
from .sandbox import Sandbox, SandboxFlags
from .types import Scope, Consistency
from .sandbox import Sandbox, SandboxFlags, SandboxCommandError
from .types import Scope, Consistency, CoreWarnings
from .plugin import Plugin
from .source import Source, SourceError, SourceFetcher
from .element import Element, ElementError
from .buildelement import BuildElement
from .scriptelement import ScriptElement
# XXX We are exposing a private member here as we expect it to move to a
# separate package soon. See the following discussion for more details:
# https://gitlab.com/BuildStream/buildstream/issues/739#note_124819869
from ._gitsourcebase import _GitSourceBase
......@@ -21,7 +21,6 @@ import multiprocessing
import os
import signal
import string
from collections import namedtuple
from collections.abc import Mapping
from ..types import _KeyStrength
......@@ -31,7 +30,7 @@ from .. import _signals
from .. import utils
from .. import _yaml
from .cascache import CASCache, CASRemote
from .cascache import CASRemote, CASRemoteSpec
CACHE_SIZE_FILE = "cache_size"
......@@ -45,48 +44,8 @@ CACHE_SIZE_FILE = "cache_size"
# push (bool): Whether we should attempt to push artifacts to this cache,
# in addition to pulling from it.
#
class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert client_key client_cert')):
# _new_from_config_node
#
# Creates an ArtifactCacheSpec() from a YAML loaded node
#
@staticmethod
def _new_from_config_node(spec_node, basedir=None):
_yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert'])
url = _yaml.node_get(spec_node, str, 'url')
push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
if not url:
provenance = _yaml.node_get_provenance(spec_node, 'url')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: empty artifact cache URL".format(provenance))
server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
if server_cert and basedir:
server_cert = os.path.join(basedir, server_cert)
client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None)
if client_key and basedir:
client_key = os.path.join(basedir, client_key)
client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None)
if client_cert and basedir:
client_cert = os.path.join(basedir, client_cert)
if client_key and not client_cert:
provenance = _yaml.node_get_provenance(spec_node, 'client-key')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-key' was specified without 'client-cert'".format(provenance))
if client_cert and not client_key:
provenance = _yaml.node_get_provenance(spec_node, 'client-cert')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-cert' was specified without 'client-key'".format(provenance))
return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert)
ArtifactCacheSpec.__new__.__defaults__ = (None, None, None)
class ArtifactCacheSpec(CASRemoteSpec):
pass
# An ArtifactCache manages artifacts.
......@@ -99,7 +58,7 @@ class ArtifactCache():
self.context = context
self.extractdir = os.path.join(context.artifactdir, 'extract')
self.cas = CASCache(context.artifactdir)
self.cas = context.get_cascache()
self.global_remote_specs = []
self.project_remote_specs = {}
......@@ -476,6 +435,22 @@ class ArtifactCache():
return self.cas.contains(ref)
# contains_subdir_artifact():
#
# Check whether an artifact element contains a digest for a subdir
# which is populated in the cache, i.e non dangling.
#
# Args:
# element (Element): The Element to check
# key (str): The cache key to use
# subdir (str): The subdir to check
#
# Returns: True if the subdir exists & is populated in the cache, False otherwise
#
def contains_subdir_artifact(self, element, key, subdir):
ref = self.get_artifact_fullname(element, key)
return self.cas.contains_subdir_artifact(ref, subdir)
# list_artifacts():
#
# List artifacts in this cache in LRU order.
......@@ -533,6 +508,7 @@ class ArtifactCache():
# Args:
# element (Element): The Element to extract
# key (str): The cache key to use
# subdir (str): Optional specific subdir to extract
#
# Raises:
# ArtifactError: In cases there was an OSError, or if the artifact
......@@ -540,12 +516,12 @@ class ArtifactCache():
#
# Returns: path to extracted artifact
#
def extract(self, element, key):
def extract(self, element, key, subdir=None):
ref = self.get_artifact_fullname(element, key)
path = os.path.join(self.extractdir, element._get_project().name, element.normal_name)
return self.cas.extract(ref, path)
return self.cas.extract(ref, path, subdir=subdir)
# commit():
#
......@@ -666,11 +642,13 @@ class ArtifactCache():
# element (Element): The Element whose artifact is to be fetched
# key (str): The cache key to use
# progress (callable): The progress callback, if any
# subdir (str): The optional specific subdir to pull
# excluded_subdirs (list): The optional list of subdirs to not pull
#
# Returns:
# (bool): True if pull was successful, False if artifact was not available
#
def pull(self, element, key, *, progress=None):
def pull(self, element, key, *, progress=None, subdir=None, excluded_subdirs=None):
ref = self.get_artifact_fullname(element, key)
project = element._get_project()
......@@ -680,8 +658,13 @@ class ArtifactCache():
display_key = element._get_brief_display_key()
element.status("Pulling artifact {} <- {}".format(display_key, remote.spec.url))
if self.cas.pull(ref, remote, progress=progress):
if self.cas.pull(ref, remote, progress=progress, subdir=subdir, excluded_subdirs=excluded_subdirs):
element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
if subdir:
# Attempt to extract subdir into artifact extract dir if it already exists
# without containing the subdir. If the respective artifact extract dir does not
# exist a complete extraction will complete.
self.extract(element, key, subdir)
# no need to pull from additional remotes
return True
else:
......@@ -768,34 +751,6 @@ class ArtifactCache():
return message_digest
# verify_digest_pushed():
#
# Check whether the object is already on the server in which case
# there is no need to upload it.
#
# Args:
# project (Project): The current project
# digest (Digest): The object digest.
#
def verify_digest_pushed(self, project, digest):
if self._has_push_remotes:
push_remotes = [r for r in self._remotes[project] if r.spec.push]
else:
push_remotes = []
if not push_remotes:
raise ArtifactError("verify_digest_pushed was called, but no remote artifact " +
"servers are configured as push remotes.")
pushed = False
for remote in push_remotes:
if self.cas.verify_digest_on_remote(remote, digest):
pushed = True
return pushed
# link_key():
#
# Add a key for an existing artifact.
......@@ -919,9 +874,7 @@ class ArtifactCache():
"\nValid values are, for example: 800M 10G 1T 50%\n"
.format(str(e))) from e
stat = os.statvfs(artifactdir_volume)
available_space = (stat.f_bsize * stat.f_bavail)
available_space, total_size = self._get_volume_space_info_for(artifactdir_volume)
cache_size = self.get_cache_size()
# Ensure system has enough storage for the cache_quota
......@@ -937,15 +890,22 @@ class ArtifactCache():
"Invalid cache quota ({}): ".format(utils._pretty_size(cache_quota)) +
"BuildStream requires a minimum cache quota of 2G.")
elif cache_quota > cache_size + available_space: # Check maximum
if '%' in self.context.config_cache_quota:
available = (available_space / total_size) * 100
available = '{}% of total disk space'.format(round(available, 1))
else:
available = utils._pretty_size(available_space)
raise LoadError(LoadErrorReason.INVALID_DATA,
("Your system does not have enough available " +
"space to support the cache quota specified.\n" +
"You currently have:\n" +
"- {used} of cache in use at {local_cache_path}\n" +
"- {available} of available system storage").format(
used=utils._pretty_size(cache_size),
local_cache_path=self.context.artifactdir,
available=utils._pretty_size(available_space)))
"\nYou have specified a quota of {quota} total disk space.\n" +
"- The filesystem containing {local_cache_path} only " +
"has: {available_size} available.")
.format(
quota=self.context.config_cache_quota,
local_cache_path=self.context.artifactdir,
available_size=available))
# Place a slight headroom (2e9 (2GB) on the cache_quota) into
# cache_quota to try and avoid exceptions.
......@@ -957,6 +917,20 @@ class ArtifactCache():
self._cache_quota = cache_quota - headroom
self._cache_lower_threshold = self._cache_quota / 2
# _get_volume_space_info_for
#
# Get the available space and total space for the given volume
#
# Args:
# volume: volume for which to get the size
#
# Returns:
# A tuple containing first the availabe number of bytes on the requested
# volume, then the total number of bytes of the volume.
def _get_volume_space_info_for(self, volume):
stat = os.statvfs(volume)
return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
# _configured_remote_artifact_cache_specs():
#
......
This diff is collapsed.
......@@ -24,13 +24,16 @@ import signal
import sys
import tempfile
import uuid
import errno
import threading
import click
import grpc
import click
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .._protos.google.rpc import code_pb2
from .._exceptions import CASError
......@@ -55,18 +58,22 @@ class ArtifactTooLargeException(Exception):
# repo (str): Path to CAS repository
# enable_push (bool): Whether to allow blob uploads and artifact updates
#
def create_server(repo, *, enable_push):
def create_server(repo, *, enable_push,
max_head_size=int(10e9),
min_head_size=int(2e9)):
cas = CASCache(os.path.abspath(repo))
# Use max_workers default from Python 3.5+
max_workers = (os.cpu_count() or 1) * 5
server = grpc.server(futures.ThreadPoolExecutor(max_workers))
cache_cleaner = _CacheCleaner(cas, max_head_size, min_head_size)
bytestream_pb2_grpc.add_ByteStreamServicer_to_server(
_ByteStreamServicer(cas, enable_push=enable_push), server)
_ByteStreamServicer(cas, cache_cleaner, enable_push=enable_push), server)
remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
_ContentAddressableStorageServicer(cas, enable_push=enable_push), server)
_ContentAddressableStorageServicer(cas, cache_cleaner, enable_push=enable_push), server)
remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
_CapabilitiesServicer(), server)
......@@ -84,9 +91,19 @@ def create_server(repo, *, enable_push):
@click.option('--client-certs', help="Public client certificates for TLS (PEM-encoded)")
@click.option('--enable-push', default=False, is_flag=True,
help="Allow clients to upload blobs and update artifact cache")
@click.option('--head-room-min', type=click.INT,
help="Disk head room minimum in bytes",
default=2e9)
@click.option('--head-room-max', type=click.INT,
help="Disk head room maximum in bytes",
default=10e9)
@click.argument('repo')
def server_main(repo, port, server_key, server_cert, client_certs, enable_push):
server = create_server(repo, enable_push=enable_push)
def server_main(repo, port, server_key, server_cert, client_certs, enable_push,
head_room_min, head_room_max):
server = create_server(repo,
max_head_size=head_room_max,
min_head_size=head_room_min,
enable_push=enable_push)
use_tls = bool(server_key)
......@@ -128,10 +145,11 @@ def server_main(repo, port, server_key, server_cert, client_certs, enable_push):
class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
def __init__(self, cas, *, enable_push):
def __init__(self, cas, cache_cleaner, *, enable_push):
super().__init__()
self.cas = cas
self.enable_push = enable_push
self.cache_cleaner = cache_cleaner
def Read(self, request, context):
resource_name = request.resource_name
......@@ -189,17 +207,34 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
context.set_code(grpc.StatusCode.NOT_FOUND)
return response
try:
_clean_up_cache(self.cas, client_digest.size_bytes)
except ArtifactTooLargeException as e:
context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
context.set_details(str(e))
return response
while True:
if client_digest.size_bytes == 0:
break
try:
self.cache_cleaner.clean_up(client_digest.size_bytes)
except ArtifactTooLargeException as e:
context.set_code(grpc.StatusCode.RESOURCE_EXHAUSTED)
context.set_details(str(e))
return response
try:
os.posix_fallocate(out.fileno(), 0, client_digest.size_bytes)
break
except OSError as e:
# Multiple upload can happen in the same time
if e.errno != errno.ENOSPC:
raise
elif request.resource_name:
# If it is set on subsequent calls, it **must** match the value of the first request.
if request.resource_name != resource_name:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
if (offset + len(request.data)) > client_digest.size_bytes:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
out.write(request.data)
offset += len(request.data)
if request.finish_write:
......@@ -207,7 +242,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
out.flush()
digest = self.cas.add_object(path=out.name)
digest = self.cas.add_object(path=out.name, link_directly=True)
if digest.hash != client_digest.hash:
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return response
......@@ -220,18 +255,26 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
def __init__(self, cas, *, enable_push):
def __init__(self, cas, cache_cleaner, *, enable_push):
super().__init__()
self.cas = cas
self.enable_push = enable_push
self.cache_cleaner = cache_cleaner
def FindMissingBlobs(self, request, context):
response = remote_execution_pb2.FindMissingBlobsResponse()
for digest in request.blob_digests:
if not _has_object(self.cas, digest):
d = response.missing_blob_digests.add()
d.hash = digest.hash
d.size_bytes = digest.size_bytes
objpath = self.cas.objpath(digest)
try:
os.utime(objpath)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
d = response.missing_blob_digests.add()
d.hash = digest.hash
d.size_bytes = digest.size_bytes
return response
def BatchReadBlobs(self, request, context):
......@@ -250,12 +293,12 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
try:
with open(self.cas.objpath(digest), 'rb') as f:
if os.fstat(f.fileno()).st_size != digest.size_bytes:
blob_response.status.code = grpc.StatusCode.NOT_FOUND
blob_response.status.code = code_pb2.NOT_FOUND
continue
blob_response.data = f.read(digest.size_bytes)
except FileNotFoundError:
blob_response.status.code = grpc.StatusCode.NOT_FOUND
blob_response.status.code = code_pb2.NOT_FOUND
return response
......@@ -285,7 +328,7 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
continue
try:
_clean_up_cache(self.cas, digest.size_bytes)
self.cache_cleaner.clean_up(digest.size_bytes)
with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
out.write(blob_request.data)
......@@ -328,6 +371,12 @@ class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
try:
tree = self.cas.resolve_ref(request.key, update_mtime=True)
try:
self.cas.update_tree_mtime(tree)
except FileNotFoundError:
self.cas.remove(request.key, defer_prune=True)
context.set_code(grpc.StatusCode.NOT_FOUND)
return response
response.digest.hash = tree.hash
response.digest.size_bytes = tree.size_bytes
......@@ -400,60 +449,79 @@ def _digest_from_upload_resource_name(resource_name):
return None
def _has_object(cas, digest):
objpath = cas.objpath(digest)
return os.path.exists(objpath)
class _CacheCleaner:
__cleanup_cache_lock = threading.Lock()
# _clean_up_cache()
#
# Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
# is enough space for the incoming artifact
#
# Args:
# cas: CASCache object
# object_size: The size of the object being received in bytes
#
# Returns:
# int: The total bytes removed on the filesystem
#
def _clean_up_cache(cas, object_size):
# Determine the available disk space, in bytes, of the file system
# which mounts the repo
stats = os.statvfs(cas.casdir)
buffer_ = int(2e9) # Add a 2 GB buffer
free_disk_space = (stats.f_bfree * stats.f_bsize) - buffer_
total_disk_space = (stats.f_blocks * stats.f_bsize) - buffer_
if object_size > total_disk_space:
raise ArtifactTooLargeException("Artifact of size: {} is too large for "
"the filesystem which mounts the remote "
"cache".format(object_size))
if object_size <= free_disk_space:
# No need to clean up
return 0
# obtain a list of LRP artifacts
LRP_artifacts = cas.list_refs()
removed_size = 0 # in bytes
while object_size - removed_size > free_disk_space:
try:
to_remove = LRP_artifacts.pop(0) # The first element in the list is the LRP artifact
except IndexError:
# This exception is caught if there are no more artifacts in the list
# LRP_artifacts. This means the the artifact is too large for the filesystem
# so we abort the process
raise ArtifactTooLargeException("Artifact of size {} is too large for "
"the filesystem which mounts the remote "
"cache".format(object_size))
def __init__(self, cas, max_head_size, min_head_size=int(2e9)):
self.__cas = cas
self.__max_head_size = max_head_size
self.__min_head_size = min_head_size
removed_size += cas.remove(to_remove, defer_prune=False)
def __has_space(self, object_size):
stats = os.statvfs(self.__cas.casdir)
free_disk_space = (stats.f_bavail * stats.f_bsize) - self.__min_head_size
total_disk_space = (stats.f_blocks * stats.f_bsize) - self.__min_head_size
if removed_size > 0:
logging.info("Successfully removed {} bytes from the cache".format(removed_size))
else:
logging.info("No artifacts were removed from the cache.")
if object_size > total_disk_space:
raise ArtifactTooLargeException("Artifact of size: {} is too large for "
"the filesystem which mounts the remote "
"cache".format(object_size))
return removed_size
return object_size <= free_disk_space
# _clean_up_cache()
#
# Keep removing Least Recently Pushed (LRP) artifacts in a cache until there
# is enough space for the incoming artifact
#
# Args:
# object_size: The size of the object being received in bytes
#
# Returns:
# int: The total bytes removed on the filesystem
#
def clean_up(self, object_size):
if self.__has_space(object_size):
return 0
with _CacheCleaner.__cleanup_cache_lock:
if self.__has_space(object_size):
# Another thread has done the cleanup for us
return 0
stats = os.statvfs(self.__cas.casdir)
target_disk_space = (stats.f_bavail * stats.f_bsize) - self.__max_head_size
# obtain a list of LRP artifacts
LRP_objects = self.__cas.list_objects()
removed_size = 0 # in bytes
last_mtime = 0
while object_size - removed_size > target_disk_space:
try:
last_mtime, to_remove = LRP_objects.pop(0) # The first element in the list is the LRP artifact
except IndexError:
# This exception is caught if there are no more artifacts in the list
# LRP_artifacts. This means the the artifact is too large for the filesystem
# so we abort the process
raise ArtifactTooLargeException("Artifact of size {} is too large for "
"the filesystem which mounts the remote "
"cache".format(object_size))
try:
size = os.stat(to_remove).st_size
os.unlink(to_remove)
removed_size += size
except FileNotFoundError:
pass
self.__cas.clean_up_refs_until(last_mtime)
if removed_size > 0:
logging.info("Successfully removed {} bytes from the cache".format(removed_size))
else:
logging.info("No artifacts were removed from the cache.")
return removed_size
......@@ -31,8 +31,10 @@ from ._exceptions import LoadError, LoadErrorReason, BstError
from ._message import Message, MessageType
from ._profile import Topics, profile_start, profile_end
from ._artifactcache import ArtifactCache
from ._workspaces import Workspaces
from ._artifactcache.cascache import CASCache
from ._workspaces import Workspaces, WorkspaceProjectCache, WORKSPACE_PROJECT_FILE
from .plugin import _plugin_lookup
from .sandbox import SandboxRemote
# Context()
......@@ -46,9 +48,12 @@ from .plugin import _plugin_lookup
# verbosity levels and basically anything pertaining to the context
# in which BuildStream was invoked.
#
# Args:
# directory (str): The directory that buildstream was invoked in
#
class Context():
def __init__(self):
def __init__(self, directory=None):
# Filename indicating which configuration file was used, or None for the defaults
self.config_origin = None
......@@ -59,29 +64,35 @@ class Context():
# The directory where build sandboxes will be created
self.builddir = None
# Default root location for workspaces
self.workspacedir = None
# The local binary artifact cache directory
self.artifactdir = None
# The locations from which to push and pull prebuilt artifacts
self.artifact_cache_specs = []
self.artifact_cache_specs = None
# The global remote execution configuration
self.remote_execution_specs = None
# The directory to store build logs
self.logdir = None
# The abbreviated cache key length to display in the UI
self.log_key_length = 0
self.log_key_length = None
# Whether debug mode is enabled
self.log_debug = False
self.log_debug = None
# Whether verbose mode is enabled
self.log_verbose = False
self.log_verbose = None
# Maximum number of lines to print from build logs
self.log_error_lines = 0
self.log_error_lines = None
# Maximum number of lines to print in the master log for a detailed message
self.log_message_lines = 0
self.log_message_lines = None
# Format string for printing the pipeline at startup time
self.log_element_format = None
......@@ -90,19 +101,37 @@ class Context():
self.log_message_format = None
# Maximum number of fetch or refresh tasks
self.sched_fetchers = 4
self.sched_fetchers = None
# Maximum number of build tasks
self.sched_builders = 4
self.sched_builders = None
# Maximum number of push tasks
self.sched_pushers = 4
self.sched_pushers = None
# Maximum number of retries for network tasks
self.sched_network_retries = 2
self.sched_network_retries = None
# What to do when a build fails in non interactive mode
self.sched_error_action = 'continue'
self.sched_error_action = None
# Size of the artifact cache in bytes
self.config_cache_quota = None
# Whether or not to attempt to pull build trees globally
self.pull_buildtrees = None
# Boolean, whether we double-check with the user that they meant to
# remove a workspace directory.
self.prompt_workspace_close_remove_dir = None
# Boolean, whether we double-check with the user that they meant to
# close the workspace when they're using it to access the project.
self.prompt_workspace_close_project_inaccessible = None
# Boolean, whether we double-check with the user that they meant to do
# a hard reset of a workspace, potentially losing changes.
self.prompt_workspace_reset_hard = None
# Whether elements must be rebuilt when their dependencies have changed
self._strict_build_plan = None
......@@ -118,9 +147,11 @@ class Context():
self._projects = []
self._project_overrides = {}
self._workspaces = None
self._workspace_project_cache = WorkspaceProjectCache()
self._log_handle = None
self._log_filename = None
self.config_cache_quota = 'infinity'
self._cascache = None
self._directory = directory
# load()
#
......@@ -160,10 +191,10 @@ class Context():
_yaml.node_validate(defaults, [
'sourcedir', 'builddir', 'artifactdir', 'logdir',
'scheduler', 'artifacts', 'logging', 'projects',
'cache'
'cache', 'prompt', 'workspacedir', 'remote-execution'
])
for directory in ['sourcedir', 'builddir', 'artifactdir', 'logdir']:
for directory in ['sourcedir', 'builddir', 'artifactdir', 'logdir', 'workspacedir']:
# Allow the ~ tilde expansion and any environment variables in
# path specification in the config files.
#
......@@ -178,13 +209,18 @@ class Context():
# our artifactdir - the artifactdir may not have been created
# yet.
cache = _yaml.node_get(defaults, Mapping, 'cache')
_yaml.node_validate(cache, ['quota'])
_yaml.node_validate(cache, ['quota', 'pull-buildtrees'])
self.config_cache_quota = _yaml.node_get(cache, str, 'quota', default_value='infinity')
self.config_cache_quota = _yaml.node_get(cache, str, 'quota')
# Load artifact share configuration
self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults)
self.remote_execution_specs = SandboxRemote.specs_from_config_node(defaults)
# Load pull build trees configuration
self.pull_buildtrees = _yaml.node_get(cache, bool, 'pull-buildtrees')
# Load logging config
logging = _yaml.node_get(defaults, Mapping, 'logging')
_yaml.node_validate(logging, [
......@@ -206,29 +242,46 @@ class Context():
'on-error', 'fetchers', 'builders',
'pushers', 'network-retries'
])
self.sched_error_action = _yaml.node_get(scheduler, str, 'on-error')
self.sched_error_action = _node_get_option_str(
scheduler, 'on-error', ['continue', 'quit', 'terminate'])
self.sched_fetchers = _yaml.node_get(scheduler, int, 'fetchers')
self.sched_builders = _yaml.node_get(scheduler, int, 'builders')
self.sched_pushers = _yaml.node_get(scheduler, int, 'pushers')
self.sched_network_retries = _yaml.node_get(scheduler, int, 'network-retries')
# Load prompt preferences
#
# We convert string options to booleans here, so we can be both user
# and coder-friendly. The string options are worded to match the
# responses the user would give at the cli, for least surprise. The
# booleans are converted here because it's easiest to eyeball that the
# strings are right.
#
prompt = _yaml.node_get(
defaults, Mapping, 'prompt')
_yaml.node_validate(prompt, [
'really-workspace-close-remove-dir',
'really-workspace-close-project-inaccessible',
'really-workspace-reset-hard',
])
self.prompt_workspace_close_remove_dir = _node_get_option_str(
prompt, 'really-workspace-close-remove-dir', ['ask', 'yes']) == 'ask'
self.prompt_workspace_close_project_inaccessible = _node_get_option_str(
prompt, 'really-workspace-close-project-inaccessible', ['ask', 'yes']) == 'ask'
self.prompt_workspace_reset_hard = _node_get_option_str(
prompt, 'really-workspace-reset-hard', ['ask', 'yes']) == 'ask'
# Load per-projects overrides
self._project_overrides = _yaml.node_get(defaults, Mapping, 'projects', default_value={})
# Shallow validation of overrides, parts of buildstream which rely
# on the overrides are expected to validate elsewhere.
for _, overrides in _yaml.node_items(self._project_overrides):
_yaml.node_validate(overrides, ['artifacts', 'options', 'strict', 'default-mirror'])
_yaml.node_validate(overrides, ['artifacts', 'options', 'strict', 'default-mirror',
'remote-execution'])
profile_end(Topics.LOAD_CONTEXT, 'load')
valid_actions = ['continue', 'quit']
if self.sched_error_action not in valid_actions:
provenance = _yaml.node_get_provenance(scheduler, 'on-error')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: on-error should be one of: {}".format(
provenance, ", ".join(valid_actions)))
@property
def artifactcache(self):
if not self._artifactcache:
......@@ -245,7 +298,7 @@ class Context():
#
def add_project(self, project):
if not self._projects:
self._workspaces = Workspaces(project)
self._workspaces = Workspaces(project, self._workspace_project_cache)
self._projects.append(project)
# get_projects():
......@@ -272,6 +325,16 @@ class Context():
def get_workspaces(self):
return self._workspaces
# get_workspace_project_cache():
#
# Return the WorkspaceProjectCache object used for this BuildStream invocation
#
# Returns:
# (WorkspaceProjectCache): The WorkspaceProjectCache object
#
def get_workspace_project_cache(self):
return self._workspace_project_cache
# get_overrides():
#
# Fetch the override dictionary for the active project. This returns
......@@ -581,3 +644,49 @@ class Context():
os.environ['XDG_CONFIG_HOME'] = os.path.expanduser('~/.config')
if not os.environ.get('XDG_DATA_HOME'):
os.environ['XDG_DATA_HOME'] = os.path.expanduser('~/.local/share')
def get_cascache(self):
if self._cascache is None:
self._cascache = CASCache(self.artifactdir)
return self._cascache
# guess_element()
#
# Attempts to interpret which element the user intended to run commands on
#
# Returns:
# (str) The name of the element, or None if no element can be guessed
def guess_element(self):
workspace_project_dir, _ = utils._search_upward_for_files(self._directory, [WORKSPACE_PROJECT_FILE])
if workspace_project_dir:
workspace_project = self._workspace_project_cache.get(workspace_project_dir)
return workspace_project.get_default_element()
else:
return None
# _node_get_option_str()
#
# Like _yaml.node_get(), but also checks value is one of the allowed option
# strings. Fetches a value from a dictionary node, and makes sure it's one of
# the pre-defined options.
#
# Args:
# node (dict): The dictionary node
# key (str): The key to get a value for in node
# allowed_options (iterable): Only accept these values
#
# Returns:
# The value, if found in 'node'.
#
# Raises:
# LoadError, when the value is not of the expected type, or is not found.
#
def _node_get_option_str(node, key, allowed_options):
result = _yaml.node_get(node, str, key)
if result not in allowed_options:
provenance = _yaml.node_get_provenance(node, key)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: {} should be one of: {}".format(
provenance, key, ", ".join(allowed_options)))
return result
......@@ -38,7 +38,7 @@ from .._message import Message, MessageType, unconditional_messages
from .._stream import Stream
from .._versions import BST_FORMAT_VERSION
from .. import _yaml
from .._scheduler import ElementJob
from .._scheduler import ElementJob, JobStatus
# Import frontend assets
from . import Profile, LogLine, Status
......@@ -164,7 +164,7 @@ class App():
# Load the Context
#
try:
self.context = Context()
self.context = Context(directory)
self.context.load(config)
except BstError as e:
self._error_exit(e, "Error loading user configuration")
......@@ -182,7 +182,8 @@ class App():
'fetchers': 'sched_fetchers',
'builders': 'sched_builders',
'pushers': 'sched_pushers',
'network_retries': 'sched_network_retries'
'network_retries': 'sched_network_retries',
'pull_buildtrees': 'pull_buildtrees'
}
for cli_option, context_attr in override_map.items():
option_value = self._main_options.get(cli_option)
......@@ -218,12 +219,13 @@ class App():
default_mirror=self._main_options.get('default_mirror'))
except LoadError as e:
# Let's automatically start a `bst init` session in this case
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF and self.interactive:
click.echo("A project was not detected in the directory: {}".format(directory), err=True)
# Help users that are new to BuildStream by suggesting 'init'.
# We don't want to slow down users that just made a mistake, so
# don't stop them with an offer to create a project for them.
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
click.echo("No project found. You can create a new project like so:", err=True)
click.echo("", err=True)
if click.confirm("Would you like to create a new project here ?"):
self.init_project(None)
click.echo(" bst init", err=True)
self._error_exit(e, "Error loading project")
......@@ -513,13 +515,13 @@ class App():
self._status.add_job(job)
self._maybe_render_status()
def _job_completed(self, job, success):
def _job_completed(self, job, status):
self._status.remove_job(job)
self._maybe_render_status()
# Dont attempt to handle a failure if the user has already opted to
# terminate
if not success and not self.stream.terminated:
if status == JobStatus.FAIL and not self.stream.terminated:
if isinstance(job, ElementJob):
element = job.element
......@@ -597,7 +599,7 @@ class App():
click.echo("\nDropping into an interactive shell in the failed build sandbox\n", err=True)
try:
prompt = self.shell_prompt(element)
self.stream.shell(element, Scope.BUILD, prompt, isolate=True)
self.stream.shell(element, Scope.BUILD, prompt, isolate=True, usebuildtree=True)
except BstError as e:
click.echo("Error while attempting to create interactive shell: {}".format(e), err=True)
elif choice == 'log':
......
This diff is collapsed.
......@@ -31,7 +31,7 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import collections.abc
import copy
import os
......@@ -203,7 +203,7 @@ def is_incomplete_option(all_args, cmd_param):
if start_of_option(arg_str):
last_option = arg_str
return True if last_option and last_option in cmd_param.opts else False
return bool(last_option and last_option in cmd_param.opts)
def is_incomplete_argument(current_params, cmd_param):
......@@ -218,7 +218,7 @@ def is_incomplete_argument(current_params, cmd_param):
return True
if cmd_param.nargs == -1:
return True
if isinstance(current_param_values, collections.Iterable) \
if isinstance(current_param_values, collections.abc.Iterable) \
and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:
return True
return False
......@@ -297,12 +297,15 @@ def get_choices(cli, prog_name, args, incomplete, override):
if not found_param and isinstance(ctx.command, MultiCommand):
# completion for any subcommands
choices.extend([cmd + " " for cmd in ctx.command.list_commands(ctx)])
choices.extend([cmd + " " for cmd in ctx.command.list_commands(ctx)
if not ctx.command.get_command(ctx, cmd).hidden])
if not start_of_option(incomplete) and ctx.parent is not None \
and isinstance(ctx.parent.command, MultiCommand) and ctx.parent.command.chain:
# completion for chained commands
remaining_comands = set(ctx.parent.command.list_commands(ctx.parent)) - set(ctx.parent.protected_args)
visible_commands = [cmd for cmd in ctx.parent.command.list_commands(ctx.parent)
if not ctx.parent.command.get_command(ctx.parent, cmd).hidden]
remaining_comands = set(visible_commands) - set(ctx.parent.protected_args)
choices.extend([cmd + " " for cmd in remaining_comands])
for item in choices:
......
......@@ -23,8 +23,8 @@ from contextlib import ExitStack
from mmap import mmap
import re
import textwrap
import click
from ruamel import yaml
import click
from . import Profile
from .. import Element, Consistency
......
This diff is collapsed.
......@@ -36,6 +36,8 @@ from .types import Symbol, Dependency
from .loadelement import LoadElement
from . import MetaElement
from . import MetaSource
from ..types import CoreWarnings
from .._message import Message, MessageType
# Loader():
......@@ -106,6 +108,8 @@ class Loader():
"path to the base project directory: {}"
.format(filename, self._basedir))
self._warn_invalid_elements(targets)
# First pass, recursively load files and populate our table of LoadElements
#
deps = []
......@@ -284,6 +288,9 @@ class Loader():
"{}: Cannot depend on junction"
.format(dep.provenance))
deps_names = [dep.name for dep in element.deps]
self._warn_invalid_elements(deps_names)
return element
# _check_circular_deps():
......@@ -563,17 +570,23 @@ class Loader():
"Subproject has no ref for junction: {}".format(filename),
detail=detail)
# Stage sources
os.makedirs(self._context.builddir, exist_ok=True)
basedir = tempfile.mkdtemp(prefix="{}-".format(element.normal_name), dir=self._context.builddir)
element._stage_sources_at(basedir, mount_workspaces=False)
if len(sources) == 1 and sources[0]._get_local_path():
# Optimization for junctions with a single local source
basedir = sources[0]._get_local_path()
tempdir = None
else:
# Stage sources
os.makedirs(self._context.builddir, exist_ok=True)
basedir = tempfile.mkdtemp(prefix="{}-".format(element.normal_name), dir=self._context.builddir)
element._stage_sources_at(basedir, mount_workspaces=False)
tempdir = basedir
# Load the project
project_dir = os.path.join(basedir, element.path)
try:
from .._project import Project
project = Project(project_dir, self._context, junction=element,
parent_loader=self, tempdir=basedir)
parent_loader=self, tempdir=tempdir)
except LoadError as e:
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
raise LoadError(reason=LoadErrorReason.INVALID_JUNCTION,
......@@ -633,3 +646,88 @@ class Loader():
loader = self._get_loader(junction_path[-2], rewritable=rewritable, ticker=ticker,
fetch_subprojects=fetch_subprojects)
return junction_path[-2], junction_path[-1], loader
# Print a warning message, checks warning_token against project configuration
#
# Args:
# brief (str): The brief message
# warning_token (str): An optional configurable warning assosciated with this warning,
# this will cause PluginError to be raised if this warning is configured as fatal.
# (*Since 1.4*)
#
# Raises:
# (:class:`.LoadError`): When warning_token is considered fatal by the project configuration
#
def _warn(self, brief, *, warning_token=None):
if warning_token:
if self.project._warning_is_fatal(warning_token):
raise LoadError(warning_token, brief)
message = Message(None, MessageType.WARN, brief)
self._context.message(message)
# Print warning messages if any of the specified elements have invalid names.
#
# Valid filenames should end with ".bst" extension.
#
# Args:
# elements (list): List of element names
#
# Raises:
# (:class:`.LoadError`): When warning_token is considered fatal by the project configuration
#
def _warn_invalid_elements(self, elements):
# invalid_elements
#
# A dict that maps warning types to the matching elements.
invalid_elements = {
CoreWarnings.BAD_ELEMENT_SUFFIX: [],
CoreWarnings.BAD_CHARACTERS_IN_NAME: [],
}
for filename in elements:
if not filename.endswith(".bst"):
invalid_elements[CoreWarnings.BAD_ELEMENT_SUFFIX].append(filename)
if not self._valid_chars_name(filename):
invalid_elements[CoreWarnings.BAD_CHARACTERS_IN_NAME].append(filename)
if invalid_elements[CoreWarnings.BAD_ELEMENT_SUFFIX]:
self._warn("Target elements '{}' do not have expected file extension `.bst` "
"Improperly named elements will not be discoverable by commands"
.format(invalid_elements[CoreWarnings.BAD_ELEMENT_SUFFIX]),
warning_token=CoreWarnings.BAD_ELEMENT_SUFFIX)
if invalid_elements[CoreWarnings.BAD_CHARACTERS_IN_NAME]:
self._warn("Target elements '{}' have invalid characerts in their name."
.format(invalid_elements[CoreWarnings.BAD_CHARACTERS_IN_NAME]),
warning_token=CoreWarnings.BAD_CHARACTERS_IN_NAME)
# Check if given filename containers valid characters.
#
# Args:
# name (str): Name of the file
#
# Returns:
# (bool): True if all characters are valid, False otherwise.
#
def _valid_chars_name(self, name):
for char in name:
char_val = ord(char)
# 0-31 are control chars, 127 is DEL, and >127 means non-ASCII
if char_val <= 31 or char_val >= 127:
return False
# Disallow characters that are invalid on Windows. The list can be
# found at https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file
#
# Note that although : (colon) is not allowed, we do not raise
# warnings because of that, since we use it as a separator for
# junctioned elements.
#
# We also do not raise warnings on slashes since they are used as
# path separators.
if char in r'<>"|?*':
return False
return True
......@@ -17,7 +17,9 @@
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
import os
from .. import _yaml
from .._exceptions import LoadError, LoadErrorReason, PlatformError
from .._platform import Platform
from .optionenum import OptionEnum
......@@ -41,8 +43,34 @@ class OptionArch(OptionEnum):
super(OptionArch, self).load(node, allow_default_definition=False)
def load_default_value(self, node):
_, _, _, _, machine_arch = os.uname()
return machine_arch
arch = Platform.get_host_arch()
default_value = None
for index, value in enumerate(self.values):
try:
canonical_value = Platform.canonicalize_arch(value)
if default_value is None and canonical_value == arch:
default_value = value
# Do not terminate the loop early to ensure we validate
# all values in the list.
except PlatformError as e:
provenance = _yaml.node_get_provenance(node, key='values', indices=[index])
prefix = ""
if provenance:
prefix = "{}: ".format(provenance)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}Invalid value for {} option '{}': {}"
.format(prefix, self.OPTION_TYPE, self.name, e))
if default_value is None:
# Host architecture is not supported by the project.
# Do not raise an error here as the user may override it.
# If the user does not override it, an error will be raised
# by resolve()/validate().
default_value = arch
return default_value
def resolve(self):
......
#
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Raoul Hidalgo Charman <raoul.hidalgocharman@codethink.co.uk>
import os
from .optionenum import OptionEnum
# OptionOS
#
class OptionOS(OptionEnum):
OPTION_TYPE = 'os'
def load(self, node):
super(OptionOS, self).load(node, allow_default_definition=False)
def load_default_value(self, node):
return os.uname()[0]
def resolve(self):
# Validate that the default OS reported by uname() is explicitly
# supported by the project, if not overridden by user config or cli.
self.validate(self.value)