Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (441)
Showing
with 2655 additions and 919 deletions
......@@ -26,6 +26,7 @@ __pycache__/
buildstream/__version__.py
# Autogenerated doc
doc/source/badges/
doc/source/sessions/
doc/source/elements/
doc/source/sources/
......@@ -33,3 +34,4 @@ doc/source/modules.rst
doc/source/buildstream.rst
doc/source/buildstream.*.rst
doc/build/
versioneer.pyc
image: buildstream/testsuite-debian:9-master-114-4cab18e3
image: buildstream/testsuite-debian:9-master-119-552f5fc6
cache:
key: "$CI_JOB_NAME-"
......@@ -78,31 +78,33 @@ source_dist:
# Go back to the toplevel and collect our reports
- cd ../..
- mkdir -p coverage-linux/
- cp dist/buildstream/.coverage.* coverage-linux/coverage."${CI_JOB_NAME}"
- cp dist/buildstream/.coverage coverage-linux/coverage."${CI_JOB_NAME}"
except:
- schedules
artifacts:
paths:
- coverage-linux/
tests-debian-9:
image: buildstream/testsuite-debian:9-master-114-4cab18e3
image: buildstream/testsuite-debian:9-master-119-552f5fc6
<<: *linux-tests
tests-fedora-27:
image: buildstream/testsuite-fedora:27-master-114-4cab18e3
image: buildstream/testsuite-fedora:27-master-119-552f5fc6
<<: *linux-tests
tests-fedora-28:
image: buildstream/testsuite-fedora:28-master-114-4cab18e3
image: buildstream/testsuite-fedora:28-master-119-552f5fc6
<<: *linux-tests
tests-ubuntu-18.04:
image: buildstream/testsuite-ubuntu:18.04-master-114-4cab18e3
image: buildstream/testsuite-ubuntu:18.04-master-119-552f5fc6
<<: *linux-tests
tests-unix:
# Use fedora here, to a) run a test on fedora and b) ensure that we
# can get rid of ostree - this is not possible with debian-8
image: buildstream/testsuite-fedora:27-master-114-4cab18e3
image: buildstream/testsuite-fedora:27-master-119-552f5fc6
stage: test
variables:
BST_FORCE_BACKEND: "unix"
......@@ -126,7 +128,9 @@ tests-unix:
# Go back to the toplevel and collect our reports
- cd ../..
- mkdir -p coverage-unix/
- cp dist/buildstream/.coverage.* coverage-unix/coverage.unix
- cp dist/buildstream/.coverage coverage-unix/coverage.unix
except:
- schedules
artifacts:
paths:
- coverage-unix/
......@@ -141,17 +145,49 @@ docs:
stage: test
script:
- export BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources"
- pip3 install sphinx
# Currently sphinx_rtd_theme does not support Sphinx >1.8, this breaks search functionality
- pip3 install sphinx==1.7.9
- pip3 install sphinx-click
- pip3 install sphinx_rtd_theme
- cd dist && ./unpack.sh && cd buildstream
- make BST_FORCE_SESSION_REBUILD=1 -C doc
- cd ../..
- mv dist/buildstream/doc/build/html public
except:
- schedules
artifacts:
paths:
- public/
.overnight-tests: &overnight-tests-template
stage: test
variables:
BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
BST_EXT_REF: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
FD_SDK_REF: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.11-35-g88d7c22c
before_script:
- (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
- pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
- git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
- git -C freedesktop-sdk checkout ${FD_SDK_REF}
only:
- schedules
overnight-tests:
<<: *overnight-tests-template
script:
- make -C freedesktop-sdk
tags:
- overnight-tests
overnight-tests-no-cache:
<<: *overnight-tests-template
script:
- sed -i '/artifacts:/,+1 d' freedesktop-sdk/bootstrap/project.conf
- sed -i '/artifacts:/,+1 d' freedesktop-sdk/project.conf
- make -C freedesktop-sdk
tags:
- overnight-tests
# Check code quality with gitlab's built-in feature.
#
......@@ -170,6 +206,8 @@ code_quality:
--volume "$PWD":/code
--volume /var/run/docker.sock:/var/run/docker.sock
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
except:
- schedules
artifacts:
paths: [gl-code-quality-report.json]
......@@ -199,6 +237,8 @@ analysis:
radon raw -s -j buildstream > analysis/raw.json
radon raw -s buildstream
except:
- schedules
artifacts:
paths:
- analysis/
......@@ -224,6 +264,8 @@ coverage:
- tests-fedora-28
- tests-unix
- source_dist
except:
- schedules
# Deploy, only for merges which land on master branch.
#
......@@ -232,8 +274,14 @@ pages:
dependencies:
- source_dist
- docs
variables:
ACME_DIR: public/.well-known/acme-challenge
script:
- find public/
- mkdir -p ${ACME_DIR}
# Required to finish the creation of the Let's Encrypt certificate,
# which allows using https://docs.buildstream.build/ for accessing
# the documentation.
- echo ${ACME_CHALLENGE} > ${ACME_DIR}/$(echo ${ACME_CHALLENGE} | cut -c1-43)
artifacts:
paths:
- public/
......@@ -248,3 +296,5 @@ pages:
# See https://gitlab.com/gitlab-org/gitlab-ce/issues/35141
#
- master
except:
- schedules
This diff is collapsed.
This diff is collapsed.
# Basic toplevel package includes
include BuildStream.doap
include COPYING
include HACKING.rst
include CONTRIBUTING.rst
include MAINTAINERS
include NEWS
include README.rst
# Documentation package includes
include doc/Makefile
include doc/badges.py
include doc/bst2html.py
include doc/source/conf.py
include doc/source/index.rst
include doc/source/plugin.rsttemplate
recursive-include doc/source *.rst
recursive-include doc/source *.py
recursive-include doc/source *.in
recursive-include doc/source *.html
recursive-include doc/source *.odg
recursive-include doc/source *.svg
recursive-include doc/examples *
# Tests
recursive-include tests *.py
recursive-include tests *.yaml
recursive-include tests *.bst
recursive-include tests *.conf
recursive-include tests *.sh
recursive-include tests *.expected
recursive-include tests *
include conftest.py
include .coveragerc
include .pylintrc
# Protocol Buffers
recursive-include buildstream/_protos *.proto
# Requirements files
include dev-requirements.txt
# Versioneer
include versioneer.py
......@@ -24,6 +24,12 @@ buildstream 1.3.1
o Add new `pip` source plugin for downloading python packages using pip,
based on requirements files from previous sources.
o Generate Docker images from built artifacts using
`contrib/bst-docker-import` script.
o Added Documentation on how to create out of source builds. This includes the
new the `conf-root` variable to make the process easier. And there has been
a bug fix to workspaces so they can be build in workspaces too.
=================
buildstream 1.1.5
......
About
-----
.. image:: https://docs.buildstream.build/_static/release.svg
:target: https://gitlab.com/BuildStream/buildstream/commits/bst-1.2
.. image:: https://docs.buildstream.build/_static/snapshot.svg
:target: https://gitlab.com/BuildStream/buildstream/commits/master
.. image:: https://gitlab.com/BuildStream/buildstream/badges/master/pipeline.svg
:target: https://gitlab.com/BuildStream/buildstream/commits/master
.. image:: https://gitlab.com/BuildStream/buildstream/badges/master/coverage.svg?job=coverage
:target: https://gitlab.com/BuildStream/buildstream/commits/master
.. image:: https://img.shields.io/pypi/v/BuildStream.svg
:target: https://pypi.org/project/BuildStream
What is BuildStream?
====================
BuildStream is a Free Software tool for building/integrating software stacks.
`BuildStream <https://buildstream.build>`_ is a Free Software tool for
building/integrating software stacks.
It takes inspiration, lessons and use-cases from various projects including
OBS, Reproducible Builds, Yocto, Baserock, Buildroot, Aboriginal, GNOME Continuous,
JHBuild, Flatpak Builder and Android repo.
......@@ -52,7 +63,7 @@ BuildStream offers the following advantages:
How do I use BuildStream?
=========================
Please refer to the `documentation <https://buildstream.gitlab.io/buildstream/>`_
Please refer to the `documentation <https://docs.buildstream.build>`_
for information about installing BuildStream, and about the BuildStream YAML format
and plugin options.
......@@ -75,9 +86,9 @@ BuildStream operates on a set of YAML files (.bst files), as follows:
How can I get started?
======================
To get started, first `install BuildStream by following the installation guide
<https://buildstream.gitlab.io/buildstream/main_install.html>`_
<https://buildstream.build/install.html>`_
and then follow our tutorial in the
`user guide <https://buildstream.gitlab.io/buildstream/main_using.html>`_.
`user guide <https://docs.buildstream.build/main_using.html>`_.
We also recommend exploring some existing BuildStream projects:
......
......@@ -28,9 +28,10 @@ if "_BST_COMPLETION" not in os.environ:
from .utils import UtilError, ProgramNotFoundError
from .sandbox import Sandbox, SandboxFlags
from .types import Scope, Consistency
from .plugin import Plugin
from .source import Source, SourceError, Consistency, SourceFetcher
from .source import Source, SourceError, SourceFetcher
from .element import Element, ElementError
from .element_enums import Scope
from .buildelement import BuildElement
from .scriptelement import ScriptElement
from .downloadablefilesource import DownloadableFileSource
......@@ -19,9 +19,10 @@
import os
import string
from collections import Mapping, namedtuple
from collections import namedtuple
from collections.abc import Mapping
from ..element_enums import _KeyStrength
from ..types import _KeyStrength
from .._exceptions import ArtifactError, ImplError, LoadError, LoadErrorReason
from .._message import Message, MessageType
from .. import utils
......@@ -51,7 +52,7 @@ class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert cl
url = _yaml.node_get(spec_node, str, 'url')
push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
if not url:
provenance = _yaml.node_get_provenance(spec_node)
provenance = _yaml.node_get_provenance(spec_node, 'url')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: empty artifact cache URL".format(provenance))
......@@ -67,6 +68,16 @@ class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert cl
if client_cert and basedir:
client_cert = os.path.join(basedir, client_cert)
if client_key and not client_cert:
provenance = _yaml.node_get_provenance(spec_node, 'client-key')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-key' was specified without 'client-cert'".format(provenance))
if client_cert and not client_key:
provenance = _yaml.node_get_provenance(spec_node, 'client-cert')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-cert' was specified without 'client-key'".format(provenance))
return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert)
......@@ -81,19 +92,17 @@ ArtifactCacheSpec.__new__.__defaults__ = (None, None, None)
class ArtifactCache():
def __init__(self, context):
self.context = context
self.required_artifacts = set()
self.extractdir = os.path.join(context.artifactdir, 'extract')
self.tmpdir = os.path.join(context.artifactdir, 'tmp')
self.estimated_size = None
self.global_remote_specs = []
self.project_remote_specs = {}
self._local = False
self.cache_size = None
self.cache_quota = None
self.cache_lower_threshold = None
self._required_elements = set() # The elements required for this session
self._cache_size = None # The current cache size, sometimes it's an estimate
self._cache_quota = None # The cache quota
self._cache_lower_threshold = None # The target cache size for a cleanup
self._remotes_setup = False # Check to prevent double-setup of remotes
os.makedirs(self.extractdir, exist_ok=True)
os.makedirs(self.tmpdir, exist_ok=True)
......@@ -146,6 +155,10 @@ class ArtifactCache():
#
def setup_remotes(self, *, use_config=False, remote_url=None):
# Ensure we do not double-initialise since this can be expensive
assert not self._remotes_setup
self._remotes_setup = True
# Initialize remote artifact caches. We allow the commandline to override
# the user config in some cases (for example `bst push --remote=...`).
has_remote_caches = False
......@@ -192,35 +205,42 @@ class ArtifactCache():
(str(provenance)))
return cache_specs
# append_required_artifacts():
# mark_required_elements():
#
# Mark elements whose artifacts are required for the current run.
#
# Append to the list of elements whose artifacts are required for
# the current run. Artifacts whose elements are in this list will
# be locked by the artifact cache and not touched for the duration
# of the current pipeline.
# Artifacts whose elements are in this list will be locked by the artifact
# cache and not touched for the duration of the current pipeline.
#
# Args:
# elements (iterable): A set of elements to mark as required
#
def append_required_artifacts(self, elements):
# We lock both strong and weak keys - deleting one but not the
# other won't save space in most cases anyway, but would be a
# user inconvenience.
def mark_required_elements(self, elements):
# We risk calling this function with a generator, so we
# better consume it first.
#
elements = list(elements)
# Mark the elements as required. We cannot know that we know the
# cache keys yet, so we only check that later when deleting.
#
self._required_elements.update(elements)
# For the cache keys which were resolved so far, we bump
# the mtime of them.
#
# This is just in case we have concurrent instances of
# BuildStream running with the same artifact cache, it will
# reduce the likelyhood of one instance deleting artifacts
# which are required by the other.
for element in elements:
strong_key = element._get_cache_key(strength=_KeyStrength.STRONG)
weak_key = element._get_cache_key(strength=_KeyStrength.WEAK)
for key in (strong_key, weak_key):
if key and key not in self.required_artifacts:
self.required_artifacts.add(key)
# We also update the usage times of any artifacts
# we will be using, which helps preventing a
# buildstream process that runs in parallel with
# this one from removing artifacts in-use.
if key:
try:
self.update_atime(key)
self.update_mtime(element, key)
except ArtifactError:
pass
......@@ -228,10 +248,28 @@ class ArtifactCache():
#
# Clean the artifact cache as much as possible.
#
# Returns:
# (int): The size of the cache after having cleaned up
#
def clean(self):
artifacts = self.list_artifacts()
artifacts = self.list_artifacts() # pylint: disable=assignment-from-no-return
while self.calculate_cache_size() >= self.cache_quota - self.cache_lower_threshold:
# Build a set of the cache keys which are required
# based on the required elements at cleanup time
#
# We lock both strong and weak keys - deleting one but not the
# other won't save space, but would be a user inconvenience.
required_artifacts = set()
for element in self._required_elements:
required_artifacts.update([
element._get_cache_key(strength=_KeyStrength.STRONG),
element._get_cache_key(strength=_KeyStrength.WEAK)
])
# Do a real computation of the cache size once, just in case
self.compute_cache_size()
while self.get_cache_size() >= self._cache_lower_threshold:
try:
to_remove = artifacts.pop(0)
except IndexError:
......@@ -245,7 +283,7 @@ class ArtifactCache():
"Please increase the cache-quota in {}."
.format(self.context.config_origin or default_conf))
if self.calculate_cache_size() > self.cache_quota:
if self.has_quota_exceeded():
raise ArtifactError("Cache too full. Aborting.",
detail=detail,
reason="cache-too-full")
......@@ -253,60 +291,116 @@ class ArtifactCache():
break
key = to_remove.rpartition('/')[2]
if key not in self.required_artifacts:
size = self.remove(to_remove)
if size:
self.cache_size -= size
if key not in required_artifacts:
# Remove the actual artifact, if it's not required.
size = self.remove(to_remove) # pylint: disable=assignment-from-no-return
# Remove the size from the removed size
self.set_cache_size(self._cache_size - size)
# This should be O(1) if implemented correctly
return self.calculate_cache_size()
return self.get_cache_size()
# compute_cache_size()
#
# Computes the real artifact cache size by calling
# the abstract calculate_cache_size() method.
#
# Returns:
# (int): The size of the artifact cache.
#
def compute_cache_size(self):
self._cache_size = self.calculate_cache_size() # pylint: disable=assignment-from-no-return
return self._cache_size
# get_approximate_cache_size()
# add_artifact_size()
#
# A cheap method that aims to serve as an upper limit on the
# artifact cache size.
# Adds the reported size of a newly cached artifact to the
# overall estimated size.
#
# Args:
# artifact_size (int): The size to add.
#
# The cache size reported by this function will normally be larger
# than the real cache size, since it is calculated using the
# pre-commit artifact size, but for very small artifacts in
# certain caches additional overhead could cause this to be
# smaller than, but close to, the actual size.
def add_artifact_size(self, artifact_size):
cache_size = self.get_cache_size()
cache_size += artifact_size
self.set_cache_size(cache_size)
# get_cache_size()
#
# Nonetheless, in practice this should be safe to use as an upper
# limit on the cache size.
# Fetches the cached size of the cache, this is sometimes
# an estimate and periodically adjusted to the real size
# when a cache size calculation job runs.
#
# If the cache has built-in constant-time size reporting, please
# feel free to override this method with a more accurate
# implementation.
# When it is an estimate, the value is either correct, or
# it is greater than the actual cache size.
#
# Returns:
# (int) An approximation of the artifact cache size.
#
def get_approximate_cache_size(self):
# If we don't currently have an estimate, figure out the real
# cache size.
if self.estimated_size is None:
def get_cache_size(self):
# If we don't currently have an estimate, figure out the real cache size.
if self._cache_size is None:
stored_size = self._read_cache_size()
if stored_size is not None:
self.estimated_size = stored_size
self._cache_size = stored_size
else:
self.estimated_size = self.calculate_cache_size()
self.compute_cache_size()
return self.estimated_size
return self._cache_size
# set_cache_size()
#
# Forcefully set the overall cache size.
#
# This is used to update the size in the main process after
# having calculated in a cleanup or a cache size calculation job.
#
# Args:
# cache_size (int): The size to set.
#
def set_cache_size(self, cache_size):
assert cache_size is not None
self._cache_size = cache_size
self._write_cache_size(self._cache_size)
# has_quota_exceeded()
#
# Checks if the current artifact cache size exceeds the quota.
#
# Returns:
# (bool): True of the quota is exceeded
#
def has_quota_exceeded(self):
return self.get_cache_size() > self._cache_quota
################################################
# Abstract methods for subclasses to implement #
################################################
# update_atime()
# preflight():
#
# Preflight check.
#
def preflight(self):
pass
# update_mtime()
#
# Update the atime of an artifact.
# Update the mtime of an artifact.
#
# Args:
# element (Element): The Element to update
# key (str): The key of the artifact.
#
def update_atime(self, key):
raise ImplError("Cache '{kind}' does not implement contains()"
def update_mtime(self, element, key):
raise ImplError("Cache '{kind}' does not implement update_mtime()"
.format(kind=type(self).__name__))
# initialize_remotes():
......@@ -484,11 +578,8 @@ class ArtifactCache():
#
# Return the real artifact cache size.
#
# Implementations should also use this to update estimated_size.
#
# Returns:
#
# (int) The size of the artifact cache.
# (int): The size of the artifact cache.
#
def calculate_cache_size(self):
raise ImplError("Cache '{kind}' does not implement calculate_cache_size()"
......@@ -535,43 +626,17 @@ class ArtifactCache():
with self.context.timed_activity("Initializing remote caches", silent_nested=True):
self.initialize_remotes(on_failure=remote_failed)
# _add_artifact_size()
#
# Since we cannot keep track of the cache size between threads,
# this method will be called by the main process every time a
# process that added something to the cache finishes.
#
# This will then add the reported size to
# ArtifactCache.estimated_size.
#
def _add_artifact_size(self, artifact_size):
if not self.estimated_size:
self.estimated_size = self.calculate_cache_size()
self.estimated_size += artifact_size
self._write_cache_size(self.estimated_size)
# _set_cache_size()
#
# Similarly to the above method, when we calculate the actual size
# in a child thread, we can't update it. We instead pass the value
# back to the main thread and update it there.
#
def _set_cache_size(self, cache_size):
self.estimated_size = cache_size
# set_cache_size is called in cleanup, where it may set the cache to None
if self.estimated_size is not None:
self._write_cache_size(self.estimated_size)
# _write_cache_size()
#
# Writes the given size of the artifact to the cache's size file
#
# Args:
# size (int): The size of the artifact cache to record
#
def _write_cache_size(self, size):
assert isinstance(size, int)
size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE)
with open(size_file_path, "w") as f:
with utils.save_file_atomic(size_file_path, "w") as f:
f.write(str(size))
# _read_cache_size()
......@@ -579,6 +644,9 @@ class ArtifactCache():
# Reads and returns the size of the artifact cache that's stored in the
# cache's size file
#
# Returns:
# (int): The size of the artifact cache, as recorded in the file
#
def _read_cache_size(self):
size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE)
......@@ -628,13 +696,13 @@ class ArtifactCache():
stat = os.statvfs(artifactdir_volume)
available_space = (stat.f_bsize * stat.f_bavail)
cache_size = self.get_approximate_cache_size()
cache_size = self.get_cache_size()
# Ensure system has enough storage for the cache_quota
#
# If cache_quota is none, set it to the maximum it could possibly be.
#
# Also check that cache_quota is atleast as large as our headroom.
# Also check that cache_quota is at least as large as our headroom.
#
if cache_quota is None: # Infinity, set to max system storage
cache_quota = cache_size + available_space
......@@ -660,8 +728,8 @@ class ArtifactCache():
# if we end up writing more than 2G, but hey, this stuff is
# already really fuzzy.
#
self.cache_quota = cache_quota - headroom
self.cache_lower_threshold = self.cache_quota / 2
self._cache_quota = cache_quota - headroom
self._cache_lower_threshold = self._cache_quota / 2
# _configured_remote_artifact_cache_specs():
......
This diff is collapsed.
......@@ -35,7 +35,10 @@ from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .._exceptions import ArtifactError
from .._context import Context
from .cascache import CASCache
# The default limit for gRPC messages is 4 MiB.
# Limit payload to 1 MiB to leave sufficient headroom for metadata.
_MAX_PAYLOAD_BYTES = 1024 * 1024
# Trying to push an artifact that is too large
......@@ -55,7 +58,7 @@ def create_server(repo, *, enable_push):
context = Context()
context.artifactdir = os.path.abspath(repo)
artifactcache = CASCache(context)
artifactcache = context.artifactcache
# Use max_workers default from Python 3.5+
max_workers = (os.cpu_count() or 1) * 5
......@@ -65,7 +68,10 @@ def create_server(repo, *, enable_push):
_ByteStreamServicer(artifactcache, enable_push=enable_push), server)
remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
_ContentAddressableStorageServicer(artifactcache), server)
_ContentAddressableStorageServicer(artifactcache, enable_push=enable_push), server)
remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
_CapabilitiesServicer(), server)
buildstream_pb2_grpc.add_ReferenceStorageServicer_to_server(
_ReferenceStorageServicer(artifactcache, enable_push=enable_push), server)
......@@ -151,7 +157,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
remaining = client_digest.size_bytes - request.read_offset
while remaining > 0:
chunk_size = min(remaining, 64 * 1024)
chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
remaining -= chunk_size
response = bytestream_pb2.ReadResponse()
......@@ -216,9 +222,10 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
def __init__(self, cas):
def __init__(self, cas, *, enable_push):
super().__init__()
self.cas = cas
self.enable_push = enable_push
def FindMissingBlobs(self, request, context):
response = remote_execution_pb2.FindMissingBlobsResponse()
......@@ -229,6 +236,88 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
d.size_bytes = digest.size_bytes
return response
def BatchReadBlobs(self, request, context):
response = remote_execution_pb2.BatchReadBlobsResponse()
batch_size = 0
for digest in request.digests:
batch_size += digest.size_bytes
if batch_size > _MAX_PAYLOAD_BYTES:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return response
blob_response = response.responses.add()
blob_response.digest.hash = digest.hash
blob_response.digest.size_bytes = digest.size_bytes
try:
with open(self.cas.objpath(digest), 'rb') as f:
if os.fstat(f.fileno()).st_size != digest.size_bytes:
blob_response.status.code = grpc.StatusCode.NOT_FOUND
continue
blob_response.data = f.read(digest.size_bytes)
except FileNotFoundError:
blob_response.status.code = grpc.StatusCode.NOT_FOUND
return response
def BatchUpdateBlobs(self, request, context):
response = remote_execution_pb2.BatchUpdateBlobsResponse()
if not self.enable_push:
context.set_code(grpc.StatusCode.PERMISSION_DENIED)
return response
batch_size = 0
for blob_request in request.requests:
digest = blob_request.digest
batch_size += digest.size_bytes
if batch_size > _MAX_PAYLOAD_BYTES:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return response
blob_response = response.responses.add()
blob_response.digest.hash = digest.hash
blob_response.digest.size_bytes = digest.size_bytes
if len(blob_request.data) != digest.size_bytes:
blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
continue
try:
_clean_up_cache(self.cas, digest.size_bytes)
with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
out.write(blob_request.data)
out.flush()
server_digest = self.cas.add_object(path=out.name)
if server_digest.hash != digest.hash:
blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
except ArtifactTooLargeException:
blob_response.status.code = grpc.StatusCode.RESOURCE_EXHAUSTED
return response
class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
def GetCapabilities(self, request, context):
response = remote_execution_pb2.ServerCapabilities()
cache_capabilities = response.cache_capabilities
cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
cache_capabilities.action_cache_update_capabilities.update_enabled = False
cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
response.deprecated_api_version.major = 2
response.low_api_version.major = 2
response.high_api_version.major = 2
return response
class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
def __init__(self, cas, *, enable_push):
......
......@@ -19,7 +19,8 @@
import os
import datetime
from collections import deque, Mapping
from collections import deque
from collections.abc import Mapping
from contextlib import contextmanager
from . import utils
from . import _cachekey
......@@ -30,6 +31,7 @@ from ._exceptions import LoadError, LoadErrorReason, BstError
from ._message import Message, MessageType
from ._profile import Topics, profile_start, profile_end
from ._artifactcache import ArtifactCache
from ._artifactcache.cascache import CASCache
from ._workspaces import Workspaces
from .plugin import _plugin_lookup
......@@ -113,13 +115,13 @@ class Context():
self._cache_key = None
self._message_handler = None
self._message_depth = deque()
self._artifactcache = None
self._projects = []
self._project_overrides = {}
self._workspaces = None
self._log_handle = None
self._log_filename = None
self.config_cache_quota = 'infinity'
self.artifactdir_volume = None
# load()
#
......@@ -228,6 +230,13 @@ class Context():
"{}: on-error should be one of: {}".format(
provenance, ", ".join(valid_actions)))
@property
def artifactcache(self):
if not self._artifactcache:
self._artifactcache = CASCache(self)
return self._artifactcache
# add_project():
#
# Add a project to the context.
......@@ -355,7 +364,6 @@ class Context():
assert self._message_handler
self._message_handler(message, context=self)
return
# silence()
#
......
......@@ -217,6 +217,12 @@ class LoadErrorReason(Enum):
# A recursive include has been encountered.
RECURSIVE_INCLUDE = 21
# A recursive variable has been encountered
RECURSIVE_VARIABLE = 22
# An attempt so set the value of a protected variable
PROTECTED_VARIABLE_REDEFINED = 23
# LoadError
#
......@@ -306,3 +312,12 @@ class StreamError(BstError):
class AppError(BstError):
def __init__(self, message, detail=None, reason=None):
super().__init__(message, detail=detail, domain=ErrorDomain.APP, reason=reason)
# SkipJob
#
# Raised from a child process within a job when the job should be
# considered skipped by the parent process.
#
class SkipJob(Exception):
pass
......@@ -20,13 +20,11 @@
from contextlib import contextmanager
import os
import sys
import resource
import traceback
import datetime
from textwrap import TextWrapper
import click
from click import UsageError
from blessings import Terminal
# Import buildstream public symbols
from .. import Scope
......@@ -92,7 +90,7 @@ class App():
#
# Earily initialization
#
is_a_tty = Terminal().is_a_tty
is_a_tty = sys.stdout.isatty() and sys.stderr.isatty()
# Enable interactive mode if we're attached to a tty
if main_options['no_interactive']:
......@@ -116,14 +114,6 @@ class App():
else:
self.colors = False
# Increase the soft limit for open file descriptors to the maximum.
# SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
# Avoid hitting the limit too quickly.
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
if limits[0] != limits[1]:
# Set soft limit to hard limit
resource.setrlimit(resource.RLIMIT_NOFILE, (limits[1], limits[1]))
# create()
#
# Should be used instead of the regular constructor.
......@@ -199,10 +189,15 @@ class App():
if option_value is not None:
setattr(self.context, context_attr, option_value)
try:
Platform.create_instance(self.context)
Platform.get_platform()
except BstError as e:
self._error_exit(e, "Error instantiating platform")
try:
self.context.artifactcache.preflight()
except BstError as e:
self._error_exit(e, "Error instantiating artifact cache")
# Create the logger right before setting the message handler
self.logger = LogLine(self.context,
self._content_profile,
......
......@@ -104,7 +104,7 @@ def complete_target(args, incomplete):
# The project is not required to have an element-path
element_directory = project.get('element-path')
# If a project was loaded, use it's element-path to
# If a project was loaded, use its element-path to
# adjust our completion's base directory
if element_directory:
base_directory = os.path.join(base_directory, element_directory)
......
#
# Copyright (c) 2014 by Armin Ronacher.
# Copyright (C) 2016 Codethink Limited
#
# This program is free software; you can redistribute it and/or
......@@ -14,8 +15,22 @@
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# This module was forked from the python click library.
# This module was forked from the python click library, Included
# original copyright notice from the Click library and following disclaimer
# as per their LICENSE requirements.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import collections
import copy
import os
......
......@@ -16,8 +16,10 @@
#
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
import os
import sys
import curses
import click
from blessings import Terminal
# Import a widget internal for formatting time codes
from .widget import TimeCode
......@@ -43,6 +45,13 @@ from .._scheduler import ElementJob
#
class Status():
# Table of the terminal capabilities we require and use
_TERM_CAPABILITIES = {
'move_up': 'cuu1',
'move_x': 'hpa',
'clear_eol': 'el'
}
def __init__(self, context,
content_profile, format_profile,
success_profile, error_profile,
......@@ -56,7 +65,6 @@ class Status():
self._stream = stream
self._jobs = []
self._last_lines = 0 # Number of status lines we last printed to console
self._term = Terminal()
self._spacing = 1
self._colors = colors
self._header = _StatusHeader(context,
......@@ -69,6 +77,7 @@ class Status():
self._alloc_columns = None
self._line_length = 0
self._need_alloc = True
self._term_caps = self._init_terminal()
# add_job()
#
......@@ -121,7 +130,7 @@ class Status():
#
def clear(self):
if not self._term.does_styling:
if not self._term_caps:
return
for _ in range(self._last_lines):
......@@ -138,7 +147,7 @@ class Status():
# not necessary to call clear().
def render(self):
if not self._term.does_styling:
if not self._term_caps:
return
elapsed = self._stream.elapsed_time
......@@ -185,6 +194,55 @@ class Status():
###################################################
# Private Methods #
###################################################
# _init_terminal()
#
# Initialize the terminal and return the resolved terminal
# capabilities dictionary.
#
# Returns:
# (dict|None): The resolved terminal capabilities dictionary,
# or None if the terminal does not support all
# of the required capabilities.
#
def _init_terminal(self):
# We need both output streams to be connected to a terminal
if not (sys.stdout.isatty() and sys.stderr.isatty()):
return None
# Initialized terminal, curses might decide it doesnt
# support this terminal
try:
curses.setupterm(os.environ.get('TERM', 'dumb'))
except curses.error:
return None
term_caps = {}
# Resolve the string capabilities we need for the capability
# names we need.
#
for capname, capval in self._TERM_CAPABILITIES.items():
code = curses.tigetstr(capval)
# If any of the required capabilities resolve empty strings or None,
# then we don't have the capabilities we need for a status bar on
# this terminal.
if not code:
return None
# Decode sequences as latin1, as they are always 8-bit bytes,
# so when b'\xff' is returned, this must be decoded to u'\xff'.
#
# This technique is employed by the python blessings library
# as well, and should provide better compatibility with most
# terminals.
#
term_caps[capname] = code.decode('latin1')
return term_caps
def _check_term_width(self):
term_width, _ = click.get_terminal_size()
if self._term_width != term_width:
......@@ -192,12 +250,24 @@ class Status():
self._need_alloc = True
def _move_up(self):
assert self._term_caps is not None
# Explicitly move to beginning of line, fixes things up
# when there was a ^C or ^Z printed to the terminal.
click.echo(self._term.move_x(0) + self._term.move_up, nl=False, err=True)
move_x = curses.tparm(self._term_caps['move_x'].encode('latin1'), 0)
move_x = move_x.decode('latin1')
move_up = curses.tparm(self._term_caps['move_up'].encode('latin1'))
move_up = move_up.decode('latin1')
click.echo(move_x + move_up, nl=False, err=True)
def _clear_line(self):
click.echo(self._term.clear_eol, nl=False, err=True)
assert self._term_caps is not None
clear_eol = curses.tparm(self._term_caps['clear_eol'].encode('latin1'))
clear_eol = clear_eol.decode('latin1')
click.echo(clear_eol, nl=False, err=True)
def _allocate(self):
if not self._need_alloc:
......
......@@ -42,9 +42,12 @@ from .mount import Mount
#
class SafeHardlinks(Mount):
def __init__(self, directory, tempdir):
def __init__(self, directory, tempdir, fuse_mount_options=None):
self.directory = directory
self.tempdir = tempdir
if fuse_mount_options is None:
fuse_mount_options = {}
super().__init__(fuse_mount_options=fuse_mount_options)
def create_operations(self):
return SafeHardlinkOps(self.directory, self.tempdir)
......@@ -121,7 +124,7 @@ class SafeHardlinkOps(Operations):
st = os.lstat(full_path)
return dict((key, getattr(st, key)) for key in (
'st_atime', 'st_ctime', 'st_gid', 'st_mode',
'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
'st_mtime', 'st_nlink', 'st_size', 'st_uid', 'st_rdev'))
def readdir(self, path, fh):
full_path = self._full_path(path)
......
......@@ -60,7 +60,7 @@ class FuseMountError(Exception):
#
# With the daemon approach, we know that the fuse is mounted right
# away when fuse_main() returns, then the daemon will go and handle
# requests on it's own, but then we have no way to shut down the
# requests on its own, but then we have no way to shut down the
# daemon.
#
# With the blocking approach, we still have it as a child process
......@@ -87,6 +87,9 @@ class Mount():
# User Facing API #
################################################
def __init__(self, fuse_mount_options=None):
self._fuse_mount_options = {} if fuse_mount_options is None else fuse_mount_options
# mount():
#
# User facing API for mounting a fuse subclass implementation
......@@ -102,7 +105,7 @@ class Mount():
self.__process = Process(target=self.__run_fuse)
# Ensure the child fork() does not inherit our signal handlers, if the
# child wants to handle a signal then it will first set it's own
# child wants to handle a signal then it will first set its own
# handler, and then unblock it.
with _signals.blocked([signal.SIGTERM, signal.SIGTSTP, signal.SIGINT], ignore=False):
self.__process.start()
......@@ -179,12 +182,13 @@ class Mount():
# Ask the subclass to give us an Operations object
#
self.__operations = self.create_operations()
self.__operations = self.create_operations() # pylint: disable=assignment-from-no-return
# Run fuse in foreground in this child process, internally libfuse
# will handle SIGTERM and gracefully exit it's own little main loop.
# will handle SIGTERM and gracefully exit its own little main loop.
#
FUSE(self.__operations, self.__mountpoint, nothreads=True, foreground=True, nonempty=True)
FUSE(self.__operations, self.__mountpoint, nothreads=True, foreground=True, nonempty=True,
**self._fuse_mount_options)
# Explicit 0 exit code, if the operations crashed for some reason, the exit
# code will not be 0, and we want to know about it.
......
import os
from collections import Mapping
from collections.abc import Mapping
from . import _yaml
from ._exceptions import LoadError, LoadErrorReason
......