Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (49)
Showing
with 228 additions and 83 deletions
......@@ -74,6 +74,10 @@ buildstream 1.3.1
o Add sandbox API for command batching and use it for build, script, and
compose elements.
o BREAKING CHANGE: The `git` plugin does not create a local `.git`
repository by default. If `git describe` is required to work, the
plugin has now a tag tracking feature instead. This can be enabled
by setting 'track-tags'.
=================
buildstream 1.1.5
......
......@@ -28,7 +28,7 @@ if "_BST_COMPLETION" not in os.environ:
from .utils import UtilError, ProgramNotFoundError
from .sandbox import Sandbox, SandboxFlags, SandboxCommandError
from .types import Scope, Consistency
from .types import Scope, Consistency, CoreWarnings
from .plugin import Plugin
from .source import Source, SourceError, SourceFetcher
from .element import Element, ElementError
......
......@@ -874,9 +874,7 @@ class ArtifactCache():
"\nValid values are, for example: 800M 10G 1T 50%\n"
.format(str(e))) from e
stat = os.statvfs(artifactdir_volume)
available_space = (stat.f_bsize * stat.f_bavail)
available_space, total_size = self._get_volume_space_info_for(artifactdir_volume)
cache_size = self.get_cache_size()
# Ensure system has enough storage for the cache_quota
......@@ -893,7 +891,7 @@ class ArtifactCache():
"BuildStream requires a minimum cache quota of 2G.")
elif cache_quota > cache_size + available_space: # Check maximum
if '%' in self.context.config_cache_quota:
available = (available_space / (stat.f_blocks * stat.f_bsize)) * 100
available = (available_space / total_size) * 100
available = '{}% of total disk space'.format(round(available, 1))
else:
available = utils._pretty_size(available_space)
......@@ -919,6 +917,20 @@ class ArtifactCache():
self._cache_quota = cache_quota - headroom
self._cache_lower_threshold = self._cache_quota / 2
# _get_volume_space_info_for
#
# Get the available space and total space for the given volume
#
# Args:
# volume: volume for which to get the size
#
# Returns:
# A tuple containing first the availabe number of bytes on the requested
# volume, then the total number of bytes of the volume.
def _get_volume_space_info_for(self, volume):
stat = os.statvfs(volume)
return stat.f_bsize * stat.f_bavail, stat.f_bsize * stat.f_blocks
# _configured_remote_artifact_cache_specs():
#
......
......@@ -427,10 +427,7 @@ class CASCache():
def push_message(self, remote, message):
message_buffer = message.SerializeToString()
message_sha = hashlib.sha256(message_buffer)
message_digest = remote_execution_pb2.Digest()
message_digest.hash = message_sha.hexdigest()
message_digest.size_bytes = len(message_buffer)
message_digest = utils._message_digest(message_buffer)
remote.init()
......
......@@ -36,7 +36,7 @@ from .types import Symbol, Dependency
from .loadelement import LoadElement
from . import MetaElement
from . import MetaSource
from ..plugin import CoreWarnings
from ..types import CoreWarnings
from .._message import Message, MessageType
......
......@@ -17,7 +17,7 @@
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
import os
from .._platform import Platform
from .optionenum import OptionEnum
......@@ -41,8 +41,7 @@ class OptionArch(OptionEnum):
super(OptionArch, self).load(node, allow_default_definition=False)
def load_default_value(self, node):
_, _, _, _, machine_arch = os.uname()
return machine_arch
return Platform.get_host_arch()
def resolve(self):
......
#
# Copyright (C) 2017 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Raoul Hidalgo Charman <raoul.hidalgocharman@codethink.co.uk>
import os
from .optionenum import OptionEnum
# OptionOS
#
class OptionOS(OptionEnum):
OPTION_TYPE = 'os'
def load(self, node):
super(OptionOS, self).load(node, allow_default_definition=False)
def load_default_value(self, node):
return os.uname()[0]
def resolve(self):
# Validate that the default OS reported by uname() is explicitly
# supported by the project, if not overridden by user config or cli.
self.validate(self.value)
......@@ -28,6 +28,7 @@ from .optionenum import OptionEnum
from .optionflags import OptionFlags
from .optioneltmask import OptionEltMask
from .optionarch import OptionArch
from .optionos import OptionOS
_OPTION_TYPES = {
......@@ -36,6 +37,7 @@ _OPTION_TYPES = {
OptionFlags.OPTION_TYPE: OptionFlags,
OptionEltMask.OPTION_TYPE: OptionEltMask,
OptionArch.OPTION_TYPE: OptionArch,
OptionOS.OPTION_TYPE: OptionOS,
}
......
......@@ -25,6 +25,7 @@ from .. import utils
from ..sandbox import SandboxDummy
from . import Platform
from .._exceptions import PlatformError
class Linux(Platform):
......@@ -58,6 +59,9 @@ class Linux(Platform):
else:
self._user_ns_available = False
# Set linux32 option
self._linux32 = False
def create_sandbox(self, *args, **kwargs):
if not self._local_sandbox_available:
return self._create_dummy_sandbox(*args, **kwargs)
......@@ -71,11 +75,33 @@ class Linux(Platform):
if self._user_ns_available:
# User namespace support allows arbitrary build UID/GID settings.
return True
else:
pass
elif (config.build_uid != self._uid or config.build_gid != self._gid):
# Without user namespace support, the UID/GID in the sandbox
# will match the host UID/GID.
return config.build_uid == self._uid and config.build_gid == self._gid
return False
# We can't do builds for another host or architecture except x86-32 on
# x86-64
host_os = self.get_host_os()
host_arch = self.get_host_arch()
if config.build_os != host_os:
raise PlatformError("Configured and host OS don't match.")
elif config.build_arch != host_arch:
# We can use linux32 for building 32bit on 64bit machines
if (host_os == "Linux" and
((config.build_arch == "x86-32" and host_arch == "x86-64") or
(config.build_arch == "aarch32" and host_arch == "aarch64"))):
# check linux32 is available
try:
utils.get_host_tool('linux32')
self._linux32 = True
except utils.ProgramNotFoundError:
pass
else:
raise PlatformError("Configured architecture and host architecture don't match.")
return True
################################################
# Private Methods #
......@@ -100,6 +126,7 @@ class Linux(Platform):
kwargs['user_ns_available'] = self._user_ns_available
kwargs['die_with_parent_available'] = self._die_with_parent_available
kwargs['json_status_available'] = self._json_status_available
kwargs['linux32'] = self._linux32
return SandboxBwrap(*args, **kwargs)
def _check_user_ns_available(self):
......
......@@ -73,6 +73,44 @@ class Platform():
else:
return min(cpu_count, cap)
@staticmethod
def get_host_os():
return os.uname()[0]
# get_host_arch():
#
# This returns the architecture of the host machine. The possible values
# map from uname -m in order to be a OS independent list.
#
# Returns:
# (string): String representing the architecture
@staticmethod
def get_host_arch():
# get the hardware identifier from uname
uname_machine = os.uname()[4]
uname_to_arch = {
"aarch64": "aarch64",
"aarch64_be": "aarch64-be",
"amd64": "x86-64",
"arm": "aarch32",
"armv8l": "aarch64",
"armv8b": "aarch64-be",
"i386": "x86-32",
"i486": "x86-32",
"i586": "x86-32",
"i686": "x86-32",
"ppc64": "power-isa-be",
"ppc64le": "power-isa-le",
"sparc": "sparc-v9",
"sparc64": "sparc-v9",
"x86_64": "x86-64"
}
try:
return uname_to_arch[uname_machine]
except KeyError:
raise PlatformError("uname gave unsupported machine architecture: {}"
.format(uname_machine))
##################################################################
# Sandbox functions #
##################################################################
......
......@@ -44,4 +44,13 @@ class Unix(Platform):
def check_sandbox_config(self, config):
# With the chroot sandbox, the UID/GID in the sandbox
# will match the host UID/GID (typically 0/0).
return config.build_uid == self._uid and config.build_gid == self._gid
if config.build_uid != self._uid or config.build_gid != self._gid:
return False
# Check host os and architecture match
if config.build_os != self.get_host_os():
raise PlatformError("Configured and host OS don't match.")
elif config.build_arch != self.get_host_arch():
raise PlatformError("Configured and host architecture don't match.")
return True
......@@ -33,7 +33,7 @@ from ._artifactcache import ArtifactCache
from .sandbox import SandboxRemote
from ._elementfactory import ElementFactory
from ._sourcefactory import SourceFactory
from .plugin import CoreWarnings
from .types import CoreWarnings
from ._projectrefs import ProjectRefs, ProjectRefStorage
from ._versions import BST_FORMAT_VERSION
from ._loader import Loader
......
......@@ -106,10 +106,16 @@ class BuildQueue(Queue):
def done(self, job, element, result, success):
if success:
# Inform element in main process that assembly is done
element._assemble_done()
# Inform element in main process that assembly is done
element._assemble_done()
# This has to be done after _assemble_done, such that the
# element may register its cache key as required
# This has to be done after _assemble_done, such that the
# element may register its cache key as required
#
# FIXME: Element._assemble() does not report both the failure state and the
# size of the newly cached failed artifact, so we can only adjust the
# artifact cache size for a successful build even though we know a
# failed build also grows the artifact cache size.
#
if success:
self._check_cache_size(job, element, result)
......@@ -292,7 +292,6 @@ class Queue():
# See the Job object for an explanation of the call signature
#
def _job_done(self, job, element, success, result):
element._update_state()
# Update values that need to be synchronized in the main task
# before calling any queue implementation
......
......@@ -1199,7 +1199,7 @@ class Stream():
element_source_dir = self._get_element_dirname(directory, element)
if list(element.sources()):
os.makedirs(element_source_dir)
element._stage_sources_at(element_source_dir)
element._stage_sources_at(element_source_dir, mount_workspaces=False)
# Write a master build script to the sandbox
def _write_build_script(self, directory, elements):
......
......@@ -23,7 +23,7 @@
# This version is bumped whenever enhancements are made
# to the `project.conf` format or the core element format.
#
BST_FORMAT_VERSION = 18
BST_FORMAT_VERSION = 20
# The base BuildStream artifact version
......
......@@ -352,6 +352,7 @@ _sentinel = object()
# key (str): The key to get a value for in node
# indices (list of ints): Optionally decend into lists of lists
# default_value: Optionally return this value if the key is not found
# allow_none: (bool): Allow None to be a valid value
#
# Returns:
# The value if found in node, otherwise default_value is returned
......@@ -362,7 +363,7 @@ _sentinel = object()
# Note:
# Returned strings are stripped of leading and trailing whitespace
#
def node_get(node, expected_type, key, indices=None, default_value=_sentinel):
def node_get(node, expected_type, key, indices=None, *, default_value=_sentinel, allow_none=False):
value = node.get(key, default_value)
provenance = node_get_provenance(node)
if value is _sentinel:
......@@ -377,8 +378,8 @@ def node_get(node, expected_type, key, indices=None, default_value=_sentinel):
value = value[index]
path += '[{:d}]'.format(index)
# We want to allow None as a valid value for any type
if value is None:
# Optionally allow None as a valid value for any type
if value is None and (allow_none or default_value is None):
return None
if not isinstance(value, expected_type):
......
......@@ -68,7 +68,7 @@ class YamlCache():
# (bool): Whether the file is cached.
def is_cached(self, project, filepath):
cache_path = self._get_filepath(project, filepath)
project_name = project.name if project else ""
project_name = self.get_project_name(project)
try:
project_cache = self._project_caches[project_name]
if cache_path in project_cache.elements:
......@@ -167,7 +167,7 @@ class YamlCache():
# value (decorated dict): The data to put into the cache.
def put_from_key(self, project, filepath, key, value):
cache_path = self._get_filepath(project, filepath)
project_name = project.name if project else ""
project_name = self.get_project_name(project)
try:
project_cache = self._project_caches[project_name]
except KeyError:
......@@ -237,7 +237,7 @@ class YamlCache():
# (decorated dict): The parsed yaml from the cache, or None if the file isn't in the cache.
def _get(self, project, filepath, key):
cache_path = self._get_filepath(project, filepath)
project_name = project.name if project else ""
project_name = self.get_project_name(project)
try:
project_cache = self._project_caches[project_name]
try:
......@@ -253,6 +253,30 @@ class YamlCache():
pass
return None
# get_project_name():
#
# Gets a name appropriate for Project. Projects must use their junction's
# name if present, otherwise elements with the same contents under the
# same path with identically-named projects are considered the same yaml
# object, despite existing in different Projects.
#
# Args:
# project (Project): The project this file is in, or None.
#
# Returns:
# (str): The project's junction's name if present, the project's name,
# or an empty string if there is no project
@staticmethod
def get_project_name(project):
if project:
if project.junction:
project_name = project.junction.name
else:
project_name = project.name
else:
project_name = ""
return project_name
CachedProject = namedtuple('CachedProject', ['elements'])
......@@ -287,7 +311,7 @@ class BstPickler(pickle.Pickler):
if isinstance(obj, _yaml.ProvenanceFile):
if obj.project:
# ProvenanceFile's project object cannot be stored as it is.
project_tag = obj.project.name
project_tag = YamlCache.get_project_name(obj.project)
# ProvenanceFile's filename must be stored relative to the
# project, as the project dir may move.
name = os.path.relpath(obj.name, obj.project.directory)
......@@ -319,14 +343,14 @@ class BstUnpickler(pickle.Unpickler):
if project_tag is not None:
for p in self._context.get_projects():
if project_tag == p.name:
if YamlCache.get_project_name(p) == project_tag:
project = p
break
name = os.path.join(project.directory, tagged_name)
if not project:
projects = [p.name for p in self._context.get_projects()]
projects = [YamlCache.get_project_name(p) for p in self._context.get_projects()]
raise pickle.UnpicklingError("No project with name {} found in {}"
.format(project_tag, projects))
else:
......
......@@ -96,10 +96,9 @@ from . import _cachekey
from . import _signals
from . import _site
from ._platform import Platform
from .plugin import CoreWarnings
from .sandbox._config import SandboxConfig
from .sandbox._sandboxremote import SandboxRemote
from .types import _KeyStrength
from .types import _KeyStrength, CoreWarnings
from .storage.directory import Directory
from .storage._filebaseddirectory import FileBasedDirectory
......@@ -2410,6 +2409,11 @@ class Element(Plugin):
project.ensure_fully_loaded()
sandbox_config = _yaml.node_chain_copy(project._sandbox)
# Get the platform to ask for host architecture
platform = Platform.get_platform()
host_arch = platform.get_host_arch()
host_os = platform.get_host_os()
# The default config is already composited with the project overrides
sandbox_defaults = _yaml.node_get(self.__defaults, Mapping, 'sandbox', default_value={})
sandbox_defaults = _yaml.node_chain_copy(sandbox_defaults)
......@@ -2419,10 +2423,13 @@ class Element(Plugin):
_yaml.node_final_assertions(sandbox_config)
# Sandbox config, unlike others, has fixed members so we should validate them
_yaml.node_validate(sandbox_config, ['build-uid', 'build-gid'])
_yaml.node_validate(sandbox_config, ['build-uid', 'build-gid', 'build-os', 'build-arch'])
return SandboxConfig(self.node_get_member(sandbox_config, int, 'build-uid'),
self.node_get_member(sandbox_config, int, 'build-gid'))
return SandboxConfig(
self.node_get_member(sandbox_config, int, 'build-uid'),
self.node_get_member(sandbox_config, int, 'build-gid'),
self.node_get_member(sandbox_config, str, 'build-os', default=host_os),
self.node_get_member(sandbox_config, str, 'build-arch', default=host_arch))
# This makes a special exception for the split rules, which
# elements may extend but whos defaults are defined in the project.
......
......@@ -119,6 +119,7 @@ from . import _yaml
from . import utils
from ._exceptions import PluginError, ImplError
from ._message import Message, MessageType
from .types import CoreWarnings
class Plugin():
......@@ -322,7 +323,7 @@ class Plugin():
provenance = _yaml.node_get_provenance(node, key=member_name)
return str(provenance)
def node_get_member(self, node, expected_type, member_name, default=_yaml._sentinel):
def node_get_member(self, node, expected_type, member_name, default=_yaml._sentinel, *, allow_none=False):
"""Fetch the value of a node member, raising an error if the value is
missing or incorrectly typed.
......@@ -331,6 +332,7 @@ class Plugin():
expected_type (type): The expected type of the node member
member_name (str): The name of the member to fetch
default (expected_type): A value to return when *member_name* is not specified in *node*
allow_none (bool): Allow explicitly set None values in the YAML (*Since: 1.4*)
Returns:
The value of *member_name* in *node*, otherwise *default*
......@@ -351,7 +353,7 @@ class Plugin():
# Fetch an optional integer
level = self.node_get_member(node, int, 'level', -1)
"""
return _yaml.node_get(node, expected_type, member_name, default_value=default)
return _yaml.node_get(node, expected_type, member_name, default_value=default, allow_none=allow_none)
def node_get_project_path(self, node, key, *,
check_is_file=False, check_is_dir=False):
......@@ -766,38 +768,6 @@ class Plugin():
return self.name
class CoreWarnings():
"""CoreWarnings()
Some common warnings which are raised by core functionalities within BuildStream are found in this class.
"""
OVERLAPS = "overlaps"
"""
This warning will be produced when buildstream detects an overlap on an element
which is not whitelisted. See :ref:`Overlap Whitelist <public_overlap_whitelist>`
"""
REF_NOT_IN_TRACK = "ref-not-in-track"
"""
This warning will be produced when a source is configured with a reference
which is found to be invalid based on the configured track
"""
BAD_ELEMENT_SUFFIX = "bad-element-suffix"
"""
This warning will be produced when an element whose name does not end in .bst
is referenced either on the command line or by another element
"""
__CORE_WARNINGS = [
value
for name, value in CoreWarnings.__dict__.items()
if not name.startswith("__")
]
# Hold on to a lookup table by counter of all instantiated plugins.
# We use this to send the id back from child processes so we can lookup
# corresponding element/source in the master process.
......@@ -828,6 +798,24 @@ def _plugin_lookup(unique_id):
return __PLUGINS_TABLE[unique_id]
# No need for unregister, WeakValueDictionary() will remove entries
# in itself when the referenced plugins are garbage collected.
def _plugin_register(plugin):
global __PLUGINS_UNIQUE_ID # pylint: disable=global-statement
__PLUGINS_UNIQUE_ID += 1
__PLUGINS_TABLE[__PLUGINS_UNIQUE_ID] = plugin
return __PLUGINS_UNIQUE_ID
# A local table for _prefix_warning()
#
__CORE_WARNINGS = [
value
for name, value in CoreWarnings.__dict__.items()
if not name.startswith("__")
]
# _prefix_warning():
#
# Prefix a warning with the plugin kind. CoreWarnings are not prefixed.
......@@ -843,12 +831,3 @@ def _prefix_warning(plugin, warning):
if any((warning is core_warning for core_warning in __CORE_WARNINGS)):
return warning
return "{}:{}".format(plugin.get_kind(), warning)
# No need for unregister, WeakValueDictionary() will remove entries
# in itself when the referenced plugins are garbage collected.
def _plugin_register(plugin):
global __PLUGINS_UNIQUE_ID # pylint: disable=global-statement
__PLUGINS_UNIQUE_ID += 1
__PLUGINS_TABLE[__PLUGINS_UNIQUE_ID] = plugin
return __PLUGINS_UNIQUE_ID