Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (76)
Showing
with 435 additions and 210 deletions
......@@ -143,7 +143,6 @@ docs:
- pip3 install sphinx-click
- pip3 install sphinx_rtd_theme
- cd dist && ./unpack.sh && cd buildstream
- pip3 install .
- make BST_FORCE_SESSION_REBUILD=1 -C doc
- cd ../..
- mv dist/buildstream/doc/build/html public
......
......@@ -6,9 +6,9 @@
[//]: # (Short summary of the action to be executed)
* [ ] Action 1
* [ ] Action 2
* [ ] Action 3
* [ ] Action 1
* [ ] Action 2
* [ ] Action 3
## Acceptance Criteria
......
......@@ -184,7 +184,7 @@ ignore-on-opaque-inference=yes
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,contextlib.closing,gi.repository.GLib.GError
ignored-classes=optparse.Values,thread._local,_thread._local,contextlib.closing,gi.repository.GLib.GError,pathlib.PurePath
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
......
......@@ -261,9 +261,6 @@ using pip or some other mechanism::
# Additional optional dependencies required
pip3 install --user arpy
Furthermore, the documentation build requires that BuildStream itself
be installed, as it will be used in the process of generating its docs.
To build the documentation, just run the following::
make -C doc
......@@ -470,6 +467,11 @@ the frontend tests you can do::
./setup.py test --addopts '-k tests/frontend/'
Specific tests can be chosen by using the :: delimeter after the test module.
If you wanted to run the test_build_track test within frontend/buildtrack.py you could do::
./setup.py test --adopts '-k tests/frontend/buildtrack.py::test_build_track'
We also have a set of slow integration tests that are disabled by
default - you will notice most of them marked with SKIP in the pytest
output. To run them, you can use::
......
......@@ -30,6 +30,7 @@ if "_BST_COMPLETION" not in os.environ:
from .sandbox import Sandbox, SandboxFlags
from .plugin import Plugin
from .source import Source, SourceError, Consistency, SourceFetcher
from .element import Element, ElementError, Scope
from .element import Element, ElementError
from .element_enums import Scope
from .buildelement import BuildElement
from .scriptelement import ScriptElement
##################################################################
# Private Entry Point #
##################################################################
#
# This allows running the cli when BuildStream is uninstalled,
# as long as BuildStream repo is in PYTHONPATH, one can run it
# with:
#
# python3 -m buildstream [program args]
#
# This is used when we need to run BuildStream before installing,
# like when we build documentation.
#
if __name__ == '__main__':
# pylint: disable=no-value-for-parameter
from ._frontend.cli import cli
cli()
......@@ -21,7 +21,7 @@ import os
import string
from collections import Mapping, namedtuple
from ..element import _KeyStrength
from ..element_enums import _KeyStrength
from .._exceptions import ArtifactError, ImplError, LoadError, LoadErrorReason
from .._message import Message, MessageType
from .. import utils
......
......@@ -213,6 +213,29 @@ class CASCache(ArtifactCache):
remotes_for_project = self._remotes[element._get_project()]
return any(remote.spec.push for remote in remotes_for_project)
def pull_key(self, key, size_bytes, project):
""" Pull a single key rather than an artifact.
Does not update local refs. """
for remote in self._remotes[project]:
try:
remote.init()
tree = remote_execution_pb2.Digest()
tree.hash = key
tree.size_bytes = size_bytes
self._fetch_directory(remote, tree)
# no need to pull from additional remotes
return True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
raise
return False
def pull(self, element, key, *, progress=None):
ref = self.get_artifact_fullname(element, key)
......@@ -254,10 +277,93 @@ class CASCache(ArtifactCache):
self.set_ref(newref, tree)
def _push_refs_to_remote(self, refs, remote, may_have_dependencies):
skipped_remote = True
try:
for ref in refs:
tree = self.resolve_ref(ref)
# Check whether ref is already on the server in which case
# there is no need to push the artifact
try:
request = buildstream_pb2.GetReferenceRequest()
request.key = ref
response = remote.ref_storage.GetReference(request)
if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
# ref is already on the server with the same tree
continue
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
# Intentionally re-raise RpcError for outer except block.
raise
missing_blobs = {}
required_blobs = self._required_blobs(tree)
# Limit size of FindMissingBlobs request
for required_blobs_group in _grouper(required_blobs, 512):
request = remote_execution_pb2.FindMissingBlobsRequest()
for required_digest in required_blobs_group:
d = request.blob_digests.add()
d.hash = required_digest.hash
d.size_bytes = required_digest.size_bytes
response = remote.cas.FindMissingBlobs(request)
for digest in response.missing_blob_digests:
d = remote_execution_pb2.Digest()
d.hash = digest.hash
d.size_bytes = digest.size_bytes
missing_blobs[d.hash] = d
# Upload any blobs missing on the server
skipped_remote = False
for digest in missing_blobs.values():
def request_stream():
resource_name = os.path.join(digest.hash, str(digest.size_bytes))
with open(self.objpath(digest), 'rb') as f:
assert os.fstat(f.fileno()).st_size == digest.size_bytes
offset = 0
finished = False
remaining = digest.size_bytes
while not finished:
chunk_size = min(remaining, 64 * 1024)
remaining -= chunk_size
request = bytestream_pb2.WriteRequest()
request.write_offset = offset
# max. 64 kB chunks
request.data = f.read(chunk_size)
request.resource_name = resource_name
request.finish_write = remaining <= 0
yield request
offset += chunk_size
finished = request.finish_write
response = remote.bytestream.Write(request_stream())
request = buildstream_pb2.UpdateReferenceRequest()
request.keys.append(ref)
request.digest.hash = tree.hash
request.digest.size_bytes = tree.size_bytes
remote.ref_storage.UpdateReference(request)
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
raise ArtifactError("Failed to push artifact {}: {}".format(refs, e), temporary=True) from e
return not skipped_remote
def push(self, element, keys):
keys = list(keys)
refs = [self.get_artifact_fullname(element, key) for key in keys]
project = element._get_project()
return self.push_refs(refs, project, element=element)
def push_refs(self, refs, project, may_have_dependencies=True, element=None):
push_remotes = [r for r in self._remotes[project] if r.spec.push]
......@@ -265,94 +371,52 @@ class CASCache(ArtifactCache):
for remote in push_remotes:
remote.init()
skipped_remote = True
element.info("Pushing {} -> {}".format(element._get_brief_display_key(), remote.spec.url))
try:
for ref in refs:
tree = self.resolve_ref(ref)
# Check whether ref is already on the server in which case
# there is no need to push the artifact
try:
request = buildstream_pb2.GetReferenceRequest()
request.key = ref
response = remote.ref_storage.GetReference(request)
if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
# ref is already on the server with the same tree
continue
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
# Intentionally re-raise RpcError for outer except block.
raise
missing_blobs = {}
required_blobs = self._required_blobs(tree)
# Limit size of FindMissingBlobs request
for required_blobs_group in _grouper(required_blobs, 512):
request = remote_execution_pb2.FindMissingBlobsRequest()
for required_digest in required_blobs_group:
d = request.blob_digests.add()
d.hash = required_digest.hash
d.size_bytes = required_digest.size_bytes
response = remote.cas.FindMissingBlobs(request)
for digest in response.missing_blob_digests:
d = remote_execution_pb2.Digest()
d.hash = digest.hash
d.size_bytes = digest.size_bytes
missing_blobs[d.hash] = d
# Upload any blobs missing on the server
skipped_remote = False
for digest in missing_blobs.values():
def request_stream():
resource_name = os.path.join(digest.hash, str(digest.size_bytes))
with open(self.objpath(digest), 'rb') as f:
assert os.fstat(f.fileno()).st_size == digest.size_bytes
offset = 0
finished = False
remaining = digest.size_bytes
while not finished:
chunk_size = min(remaining, 64 * 1024)
remaining -= chunk_size
request = bytestream_pb2.WriteRequest()
request.write_offset = offset
# max. 64 kB chunks
request.data = f.read(chunk_size)
request.resource_name = resource_name
request.finish_write = remaining <= 0
yield request
offset += chunk_size
finished = request.finish_write
response = remote.bytestream.Write(request_stream())
request = buildstream_pb2.UpdateReferenceRequest()
request.keys.append(ref)
request.digest.hash = tree.hash
request.digest.size_bytes = tree.size_bytes
remote.ref_storage.UpdateReference(request)
pushed = True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
raise ArtifactError("Failed to push artifact {}: {}".format(refs, e), temporary=True) from e
if skipped_remote:
if self._push_refs_to_remote(refs, remote, may_have_dependencies):
pushed = True
elif element:
self.context.message(Message(
None,
MessageType.SKIPPED,
"Remote ({}) already has {} cached".format(
remote.spec.url, element._get_brief_display_key())
))
return pushed
def verify_key_pushed(self, key, project):
ref = key
push_remotes = [r for r in self._remotes[project] if r.spec.push]
pushed = False
for remote in push_remotes:
remote.init()
if self._verify_ref_on_remote(ref, remote):
pushed = True
return pushed
def _verify_ref_on_remote(self, ref, remote):
tree = self.resolve_ref(ref)
# Check whether ref is already on the server in which case
# there is no need to push the artifact
try:
request = buildstream_pb2.GetReferenceRequest()
request.key = ref
response = remote.ref_storage.GetReference(request)
if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
# ref is already on the server with the same tree
return True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.NOT_FOUND:
raise
return False
################################################
# API Private Methods #
################################################
......@@ -726,26 +790,27 @@ class CASCache(ArtifactCache):
#
q.put(str(e))
def _required_blobs(self, tree):
def _required_blobs(self, tree, may_have_dependencies=True):
# parse directory, and recursively add blobs
d = remote_execution_pb2.Digest()
d.hash = tree.hash
d.size_bytes = tree.size_bytes
yield d
directory = remote_execution_pb2.Directory()
if may_have_dependencies:
directory = remote_execution_pb2.Directory()
with open(self.objpath(tree), 'rb') as f:
directory.ParseFromString(f.read())
with open(self.objpath(tree), 'rb') as f:
directory.ParseFromString(f.read())
for filenode in directory.files:
d = remote_execution_pb2.Digest()
d.hash = filenode.digest.hash
d.size_bytes = filenode.digest.size_bytes
yield d
for filenode in directory.files:
d = remote_execution_pb2.Digest()
d.hash = filenode.digest.hash
d.size_bytes = filenode.digest.size_bytes
yield d
for dirnode in directory.directories:
yield from self._required_blobs(dirnode.digest)
for dirnode in directory.directories:
yield from self._required_blobs(dirnode.digest)
def _fetch_blob(self, remote, digest, out):
resource_name = os.path.join(digest.hash, str(digest.size_bytes))
......
......@@ -31,9 +31,13 @@ from .element import Element
#
class ElementFactory(PluginContext):
def __init__(self, plugin_base, plugin_origins=None):
def __init__(self, plugin_base, *,
format_versions={},
plugin_origins=None):
super().__init__(plugin_base, Element, [_site.element_plugins], plugin_origins)
super().__init__(plugin_base, Element, [_site.element_plugins],
plugin_origins=plugin_origins,
format_versions=format_versions)
# create():
#
......@@ -54,4 +58,7 @@ class ElementFactory(PluginContext):
#
def create(self, context, project, artifacts, meta):
element_type, default_config = self.lookup(meta.kind)
return element_type(context, project, artifacts, meta, default_config)
element = element_type(context, project, artifacts, meta, default_config)
version = self._format_versions.get(meta.kind, 0)
self._assert_plugin_format(element, version)
return element
#
# Copyright (C) 2016 Codethink Limited
# Copyright (C) 2018 Codethink Limited
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
......@@ -16,6 +16,7 @@
#
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
# Tiago Gomes <tiago.gomes@codethink.co.uk>
from enum import Enum
......@@ -206,6 +207,16 @@ class LoadErrorReason(Enum):
# Try to load a directory not a yaml file
LOADING_DIRECTORY = 18
# A project path leads outside of the project directory
PROJ_PATH_INVALID = 19
# A project path points to a file of the not right kind (e.g. a
# socket)
PROJ_PATH_INVALID_KIND = 20
# A recursive include has been encountered.
RECURSIVE_INCLUDE = 21
# LoadError
#
......
......@@ -33,6 +33,7 @@ from .. import Scope
# Import various buildstream internals
from .._context import Context
from .._platform import Platform
from .._project import Project
from .._exceptions import BstError, StreamError, LoadError, LoadErrorReason, AppError
from .._message import Message, MessageType, unconditional_messages
......@@ -198,6 +199,20 @@ class App():
if option_value is not None:
setattr(self.context, context_attr, option_value)
Platform.create_instance(self.context)
# Create the logger right before setting the message handler
self.logger = LogLine(self.context,
self._content_profile,
self._format_profile,
self._success_profile,
self._error_profile,
self._detail_profile,
indent=INDENT)
# Propagate pipeline feedback to the user
self.context.set_message_handler(self._message_handler)
#
# Load the Project
#
......@@ -218,18 +233,6 @@ class App():
except BstError as e:
self._error_exit(e, "Error loading project")
# Create the logger right before setting the message handler
self.logger = LogLine(self.context,
self._content_profile,
self._format_profile,
self._success_profile,
self._error_profile,
self._detail_profile,
indent=INDENT)
# Propagate pipeline feedback to the user
self.context.set_message_handler(self._message_handler)
# Now that we have a logger and message handler,
# we can override the global exception hook.
sys.excepthook = self._global_exception_handler
......
......@@ -483,8 +483,11 @@ class LogLine(Widget):
text += '\n'
# Plugins
text += self._format_plugins(project._element_factory.loaded_dependencies,
project._source_factory.loaded_dependencies)
text += self._format_plugins(project.first_pass_config.element_factory.loaded_dependencies,
project.first_pass_config.source_factory.loaded_dependencies)
if project.config.element_factory and project.config.source_factory:
text += self._format_plugins(project.config.element_factory.loaded_dependencies,
project.config.source_factory.loaded_dependencies)
# Pipeline state
text += self.content_profile.fmt("Pipeline\n", bold=True)
......
import os
from collections import Mapping
from . import _yaml
from ._exceptions import LoadError, LoadErrorReason
# Includes()
#
# This takes care of processing include directives "(@)".
#
# Args:
# loader (Loader): The Loader object
class Includes:
def __init__(self, loader):
self._loader = loader
self._loaded = {}
# process()
#
# Process recursively include directives in a YAML node.
#
# Args:
# node (dict): A YAML node
# included (set): Fail for recursion if trying to load any files in this set
# current_loader (Loader): Use alternative loader (for junction files)
# only_local (bool): Whether to ignore junction files
def process(self, node, *,
included=set(),
current_loader=None,
only_local=False):
if current_loader is None:
current_loader = self._loader
if isinstance(node.get('(@)'), str):
includes = [_yaml.node_get(node, str, '(@)')]
else:
includes = _yaml.node_get(node, list, '(@)', default_value=None)
if '(@)' in node:
del node['(@)']
if includes:
for include in reversed(includes):
if only_local and ':' in include:
continue
include_node, file_path, sub_loader = self._include_file(include,
current_loader)
if file_path in included:
provenance = _yaml.node_get_provenance(node)
raise LoadError(LoadErrorReason.RECURSIVE_INCLUDE,
"{}: trying to recursively include {}". format(provenance,
file_path))
# Because the included node will be modified, we need
# to copy it so that we do not modify the toplevel
# node of the provenance.
include_node = _yaml.node_chain_copy(include_node)
try:
included.add(file_path)
self.process(include_node, included=included,
current_loader=sub_loader,
only_local=only_local)
finally:
included.remove(file_path)
_yaml.composite(include_node, node)
to_delete = [key for key, _ in _yaml.node_items(node) if key not in include_node]
for key, value in include_node.items():
node[key] = value
for key in to_delete:
del node[key]
for _, value in _yaml.node_items(node):
self._process_value(value,
included=included,
current_loader=current_loader,
only_local=only_local)
# _include_file()
#
# Load include YAML file from with a loader.
#
# Args:
# include (str): file path relative to loader's project directory.
# Can be prefixed with junctio name.
# loader (Loader): Loader for the current project.
def _include_file(self, include, loader):
shortname = include
if ':' in include:
junction, include = include.split(':', 1)
junction_loader = loader._get_loader(junction, fetch_subprojects=True)
current_loader = junction_loader
else:
current_loader = loader
project = current_loader.project
directory = project.directory
file_path = os.path.join(directory, include)
key = (current_loader, file_path)
if file_path not in self._loaded:
self._loaded[key] = _yaml.load(os.path.join(directory, include),
shortname=shortname,
project=project)
return self._loaded[key], file_path, current_loader
# _process_value()
#
# Select processing for value that could be a list or a dictionary.
#
# Args:
# value: Value to process. Can be a list or a dictionary.
# included (set): Fail for recursion if trying to load any files in this set
# current_loader (Loader): Use alternative loader (for junction files)
# only_local (bool): Whether to ignore junction files
def _process_value(self, value, *,
included=set(),
current_loader=None,
only_local=False):
if isinstance(value, Mapping):
self.process(value,
included=included,
current_loader=current_loader,
only_local=only_local)
elif isinstance(value, list):
for v in value:
self._process_value(v,
included=included,
current_loader=current_loader,
only_local=only_local)
......@@ -25,11 +25,11 @@ import shutil
from .._exceptions import LoadError, LoadErrorReason
from .. import Consistency
from .._project import Project
from .. import _yaml
from ..element import Element
from .._profile import Topics, profile_start, profile_end
from .._platform import Platform
from .._includes import Includes
from .types import Symbol, Dependency
from .loadelement import LoadElement
......@@ -46,30 +46,19 @@ from . import MetaSource
# Args:
# context (Context): The Context object
# project (Project): The toplevel Project object
# filenames (list of str): Target, element-path relative bst filenames in the project
# parent (Loader): A parent Loader object, in the case this is a junctioned Loader
# tempdir (str): A directory to cleanup with the Loader, given to the loader by a parent
# loader in the case that this loader is a subproject loader.
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
class Loader():
def __init__(self, context, project, filenames, *, parent=None, tempdir=None, fetch_subprojects=False):
def __init__(self, context, project, *, parent=None, tempdir=None):
# Ensure we have an absolute path for the base directory
basedir = project.element_path
if not os.path.isabs(basedir):
basedir = os.path.abspath(basedir)
for filename in filenames:
if os.path.isabs(filename):
# XXX Should this just be an assertion ?
# Expect that the caller gives us the right thing at least ?
raise LoadError(LoadErrorReason.INVALID_DATA,
"Target '{}' was not specified as a relative "
"path to the base project directory: {}"
.format(filename, basedir))
#
# Public members
#
......@@ -78,11 +67,10 @@ class Loader():
#
# Private members
#
self._fetch_subprojects = fetch_subprojects
self._context = context
self._options = project.options # Project options (OptionPool)
self._basedir = basedir # Base project directory
self._targets = filenames # Target bst elements
self._first_pass_options = project.first_pass_config.options # Project options (OptionPool)
self._tempdir = tempdir # A directory to cleanup
self._parent = parent # The parent loader
......@@ -90,6 +78,8 @@ class Loader():
self._elements = {} # Dict of elements
self._loaders = {} # Dict of junction loaders
self._includes = Includes(self)
# load():
#
# Loads the project based on the parameters given to the constructor
......@@ -98,20 +88,32 @@ class Loader():
# rewritable (bool): Whether the loaded files should be rewritable
# this is a bit more expensive due to deep copies
# ticker (callable): An optional function for tracking load progress
# targets (list of str): Target, element-path relative bst filenames in the project
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Raises: LoadError
#
# Returns: The toplevel LoadElement
def load(self, rewritable=False, ticker=None):
def load(self, targets, rewritable=False, ticker=None, fetch_subprojects=False):
for filename in targets:
if os.path.isabs(filename):
# XXX Should this just be an assertion ?
# Expect that the caller gives us the right thing at least ?
raise LoadError(LoadErrorReason.INVALID_DATA,
"Target '{}' was not specified as a relative "
"path to the base project directory: {}"
.format(filename, self._basedir))
# First pass, recursively load files and populate our table of LoadElements
#
deps = []
for target in self._targets:
for target in targets:
profile_start(Topics.LOAD_PROJECT, target)
junction, name, loader = self._parse_name(target, rewritable, ticker)
loader._load_file(name, rewritable, ticker)
junction, name, loader = self._parse_name(target, rewritable, ticker,
fetch_subprojects=fetch_subprojects)
loader._load_file(name, rewritable, ticker, fetch_subprojects)
deps.append(Dependency(name, junction=junction))
profile_end(Topics.LOAD_PROJECT, target)
......@@ -126,7 +128,7 @@ class Loader():
dummy = DummyTarget(name='', full_name='', deps=deps)
self._elements[''] = dummy
profile_key = "_".join(t for t in self._targets)
profile_key = "_".join(t for t in targets)
profile_start(Topics.CIRCULAR_CHECK, profile_key)
self._check_circular_deps('')
profile_end(Topics.CIRCULAR_CHECK, profile_key)
......@@ -135,9 +137,10 @@ class Loader():
#
# Sort direct dependencies of elements by their dependency ordering
#
for target in self._targets:
for target in targets:
profile_start(Topics.SORT_DEPENDENCIES, target)
junction, name, loader = self._parse_name(target, rewritable, ticker)
junction, name, loader = self._parse_name(target, rewritable, ticker,
fetch_subprojects=fetch_subprojects)
loader._sort_dependencies(name)
profile_end(Topics.SORT_DEPENDENCIES, target)
# Finally, wrap what we have into LoadElements and return the target
......@@ -198,11 +201,12 @@ class Loader():
# filename (str): The element-path relative bst file
# rewritable (bool): Whether we should load in round trippable mode
# ticker (callable): A callback to report loaded filenames to the frontend
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Returns:
# (LoadElement): A loaded LoadElement
#
def _load_file(self, filename, rewritable, ticker):
def _load_file(self, filename, rewritable, ticker, fetch_subprojects):
# Silently ignore already loaded files
if filename in self._elements:
......@@ -215,7 +219,7 @@ class Loader():
# Load the data and process any conditional statements therein
fullpath = os.path.join(self._basedir, filename)
try:
node = _yaml.load(fullpath, shortname=filename, copy_tree=rewritable)
node = _yaml.load(fullpath, shortname=filename, copy_tree=rewritable, project=self.project)
except LoadError as e:
if e.reason == LoadErrorReason.MISSING_FILE:
# If we can't find the file, try to suggest plausible
......@@ -241,7 +245,15 @@ class Loader():
message, detail=detail) from e
else:
raise
self._options.process_node(node)
kind = _yaml.node_get(node, str, Symbol.KIND)
if kind == "junction":
self._first_pass_options.process_node(node)
else:
self.project.ensure_fully_loaded()
self._includes.process(node)
self._options.process_node(node)
element = LoadElement(node, filename, self)
......@@ -250,12 +262,13 @@ class Loader():
# Load all dependency files for the new LoadElement
for dep in element.deps:
if dep.junction:
self._load_file(dep.junction, rewritable, ticker)
loader = self._get_loader(dep.junction, rewritable=rewritable, ticker=ticker)
self._load_file(dep.junction, rewritable, ticker, fetch_subprojects)
loader = self._get_loader(dep.junction, rewritable=rewritable, ticker=ticker,
fetch_subprojects=fetch_subprojects)
else:
loader = self
dep_element = loader._load_file(dep.name, rewritable, ticker)
dep_element = loader._load_file(dep.name, rewritable, ticker, fetch_subprojects)
if _yaml.node_get(dep_element.node, str, Symbol.KIND) == 'junction':
raise LoadError(LoadErrorReason.INVALID_DATA,
......@@ -432,7 +445,8 @@ class Loader():
_yaml.node_get(node, Mapping, Symbol.ENVIRONMENT, default_value={}),
_yaml.node_get(node, list, Symbol.ENV_NOCACHE, default_value=[]),
_yaml.node_get(node, Mapping, Symbol.PUBLIC, default_value={}),
_yaml.node_get(node, Mapping, Symbol.SANDBOX, default_value={}))
_yaml.node_get(node, Mapping, Symbol.SANDBOX, default_value={}),
element_kind == 'junction')
# Cache it now, make sure it's already there before recursing
self._meta_elements[element_name] = meta_element
......@@ -454,11 +468,12 @@ class Loader():
#
# Args:
# filename (str): Junction name
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Raises: LoadError
#
# Returns: A Loader or None if specified junction does not exist
def _get_loader(self, filename, *, rewritable=False, ticker=None, level=0):
def _get_loader(self, filename, *, rewritable=False, ticker=None, level=0, fetch_subprojects=False):
# return previously determined result
if filename in self._loaders:
loader = self._loaders[filename]
......@@ -475,13 +490,14 @@ class Loader():
if self._parent:
# junctions in the parent take precedence over junctions defined
# in subprojects
loader = self._parent._get_loader(filename, rewritable=rewritable, ticker=ticker, level=level + 1)
loader = self._parent._get_loader(filename, rewritable=rewritable, ticker=ticker,
level=level + 1, fetch_subprojects=fetch_subprojects)
if loader:
self._loaders[filename] = loader
return loader
try:
self._load_file(filename, rewritable, ticker)
self._load_file(filename, rewritable, ticker, fetch_subprojects)
except LoadError as e:
if e.reason != LoadErrorReason.MISSING_FILE:
# other load error
......@@ -510,7 +526,7 @@ class Loader():
# Handle the case where a subproject needs to be fetched
#
if source.get_consistency() == Consistency.RESOLVED:
if self._fetch_subprojects:
if fetch_subprojects:
if ticker:
ticker(filename, 'Fetching subproject from {} source'.format(source.get_kind()))
source._fetch()
......@@ -536,7 +552,9 @@ class Loader():
# Load the project
project_dir = os.path.join(basedir, element.path)
try:
project = Project(project_dir, self._context, junction=element)
from .._project import Project
project = Project(project_dir, self._context, junction=element,
parent_loader=self, tempdir=basedir)
except LoadError as e:
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
raise LoadError(reason=LoadErrorReason.INVALID_JUNCTION,
......@@ -546,11 +564,7 @@ class Loader():
else:
raise
loader = Loader(self._context, project, [],
parent=self,
tempdir=basedir,
fetch_subprojects=self._fetch_subprojects)
loader = project.loader
self._loaders[filename] = loader
return loader
......@@ -581,13 +595,14 @@ class Loader():
# rewritable (bool): Whether the loaded files should be rewritable
# this is a bit more expensive due to deep copies
# ticker (callable): An optional function for tracking load progress
# fetch_subprojects (bool): Whether to fetch subprojects while loading
#
# Returns:
# (tuple): - (str): name of the junction element
# - (str): name of the element
# - (Loader): loader for sub-project
#
def _parse_name(self, name, rewritable, ticker):
def _parse_name(self, name, rewritable, ticker, fetch_subprojects=False):
# We allow to split only once since deep junctions names are forbidden.
# Users who want to refer to elements in sub-sub-projects are required
# to create junctions on the top level project.
......@@ -595,6 +610,7 @@ class Loader():
if len(junction_path) == 1:
return None, junction_path[-1], self
else:
self._load_file(junction_path[-2], rewritable, ticker)
loader = self._get_loader(junction_path[-2], rewritable=rewritable, ticker=ticker)
self._load_file(junction_path[-2], rewritable, ticker, fetch_subprojects)
loader = self._get_loader(junction_path[-2], rewritable=rewritable, ticker=ticker,
fetch_subprojects=fetch_subprojects)
return junction_path[-2], junction_path[-1], loader
......@@ -36,9 +36,11 @@ class MetaElement():
# env_nocache: List of environment vars which should not be considered in cache keys
# public: Public domain data dictionary
# sandbox: Configuration specific to the sandbox environment
# first_pass: The element is to be loaded with first pass configuration (junction)
#
def __init__(self, project, name, kind, provenance, sources, config,
variables, environment, env_nocache, public, sandbox):
variables, environment, env_nocache, public, sandbox,
first_pass):
self.project = project
self.name = name
self.kind = kind
......@@ -52,3 +54,4 @@ class MetaElement():
self.sandbox = sandbox
self.build_dependencies = []
self.dependencies = []
self.first_pass = first_pass
......@@ -30,6 +30,7 @@ class MetaSource():
# element_kind: The kind of the owning element
# kind: The kind of the source
# config: The configuration data for the source
# first_pass: This source will be used with first project pass configuration (used for junctions).
#
def __init__(self, element_name, element_index, element_kind, kind, config, directory):
self.element_name = element_name
......@@ -38,3 +39,4 @@ class MetaSource():
self.kind = kind
self.config = config
self.directory = directory
self.first_pass = False
......@@ -107,16 +107,19 @@ class OptionPool():
#
# Args:
# cli_options (list): A list of (str, str) tuples
# ignore_unknown (bool): Whether to silently ignore unknown options.
#
def load_cli_values(self, cli_options):
def load_cli_values(self, cli_options, *, ignore_unknown=False):
for option_name, option_value in cli_options:
try:
option = self._options[option_name]
except KeyError as e:
raise LoadError(LoadErrorReason.INVALID_DATA,
"Unknown option '{}' specified on the command line"
.format(option_name)) from e
option.set_value(option_value)
if not ignore_unknown:
raise LoadError(LoadErrorReason.INVALID_DATA,
"Unknown option '{}' specified on the command line"
.format(option_name)) from e
else:
option.set_value(option_value)
# resolve()
#
......
......@@ -25,9 +25,7 @@ from operator import itemgetter
from ._exceptions import PipelineError
from ._message import Message, MessageType
from ._loader import Loader
from ._profile import Topics, profile_start, profile_end
from .element import Element
from . import Scope, Consistency
from ._project import ProjectRefStorage
......@@ -80,7 +78,6 @@ class Pipeline():
# Private members
#
self._artifacts = artifacts
self._loader = None
# load()
#
......@@ -109,30 +106,9 @@ class Pipeline():
profile_start(Topics.LOAD_PIPELINE, "_".join(t.replace(os.sep, '-') for t in targets))
self._loader = Loader(self._context, self._project, targets,
fetch_subprojects=fetch_subprojects)
with self._context.timed_activity("Loading pipeline", silent_nested=True):
meta_elements = self._loader.load(rewritable, None)
# Resolve the real elements now that we've loaded the project
with self._context.timed_activity("Resolving pipeline"):
elements = [
Element._new_from_meta(meta, self._artifacts)
for meta in meta_elements
]
# Now warn about any redundant source references which may have
# been discovered in the resolve() phase.
redundant_refs = Element._get_redundant_source_refs()
if redundant_refs:
detail = "The following inline specified source references will be ignored:\n\n"
lines = [
"{}:{}".format(source._get_provenance(), ref)
for source, ref in redundant_refs
]
detail += "\n".join(lines)
self._message(MessageType.WARN, "Ignoring redundant source references", detail=detail)
elements = self._project.load_elements(targets, self._artifacts,
rewritable=rewritable,
fetch_subprojects=fetch_subprojects)
# Now create element groups to match the input target groups
elt_iter = iter(elements)
......@@ -388,17 +364,6 @@ class Pipeline():
detail += " " + element._get_full_name() + "\n"
raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline")
# cleanup()
#
# Cleans up resources used by the Pipeline.
#
def cleanup(self):
if self._loader:
self._loader.cleanup()
# Reset the element loader state
Element._reset_load_state()
#############################################################
# Private Methods #
#############################################################
......
......@@ -24,15 +24,16 @@ from .. import utils
from .._artifactcache.cascache import CASCache
from .._message import Message, MessageType
from ..sandbox import SandboxBwrap
from ..sandbox import SandboxRemote
from . import Platform
class Linux(Platform):
def __init__(self, context, project):
def __init__(self, context):
super().__init__(context, project)
super().__init__(context)
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
self._user_ns_available = self._check_user_ns_available(context)
......@@ -46,7 +47,7 @@ class Linux(Platform):
# Inform the bubblewrap sandbox as to whether it can use user namespaces or not
kwargs['user_ns_available'] = self._user_ns_available
kwargs['die_with_parent_available'] = self._die_with_parent_available
return SandboxBwrap(*args, **kwargs)
return SandboxRemote(*args, **kwargs)
################################################
# Private Methods #
......
......@@ -35,9 +35,8 @@ class Platform():
# Args:
# context (context): The project context
#
def __init__(self, context, project):
def __init__(self, context):
self.context = context
self.project = project
@classmethod
def create_instance(cls, *args, **kwargs):
......