Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Select Git revision
Show changes
Commits on Source (57)
Showing
with 440 additions and 254 deletions
...@@ -23,6 +23,11 @@ a reasonable timeframe for identifying these. ...@@ -23,6 +23,11 @@ a reasonable timeframe for identifying these.
Patch submissions Patch submissions
----------------- -----------------
If you want to submit a patch, do ask for developer permissions on our
IRC channel first (GitLab's button also works, but you may need to
shout about it - we often overlook this) - for CI reasons, it's much
easier if patches are in branches of the main repository.
Branches must be submitted as merge requests in gitlab. If the branch Branches must be submitted as merge requests in gitlab. If the branch
fixes an issue or is related to any issues, these issues must be mentioned fixes an issue or is related to any issues, these issues must be mentioned
in the merge request or preferably the commit messages themselves. in the merge request or preferably the commit messages themselves.
......
================= =================
buildstream 1.3.1 buildstream 1.1.5
================= =================
o Add a `--tar` option to `bst checkout` which allows a tarball to be o Add a `--tar` option to `bst checkout` which allows a tarball to be
...@@ -9,6 +9,15 @@ buildstream 1.3.1 ...@@ -9,6 +9,15 @@ buildstream 1.3.1
and the preferred mirror to fetch from can be defined in the command and the preferred mirror to fetch from can be defined in the command
line or user config. line or user config.
o Added new `remote` source plugin for downloading file blobs
o Failed builds are included in the cache as well.
`bst checkout` will provide anything in `%{install-root}`.
A build including cached fails will cause any dependant elements
to not be scheduled and fail during artifact assembly,
and display the retry prompt during an interactive session.
================= =================
buildstream 1.1.4 buildstream 1.1.4
================= =================
......
...@@ -32,6 +32,7 @@ from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc ...@@ -32,6 +32,7 @@ from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .._message import MessageType, Message
from .. import _signals, utils from .. import _signals, utils
from .._exceptions import ArtifactError from .._exceptions import ArtifactError
...@@ -264,7 +265,7 @@ class CASCache(ArtifactCache): ...@@ -264,7 +265,7 @@ class CASCache(ArtifactCache):
for remote in push_remotes: for remote in push_remotes:
remote.init() remote.init()
skipped_remote = True
element.info("Pushing {} -> {}".format(element._get_brief_display_key(), remote.spec.url)) element.info("Pushing {} -> {}".format(element._get_brief_display_key(), remote.spec.url))
try: try:
...@@ -280,8 +281,6 @@ class CASCache(ArtifactCache): ...@@ -280,8 +281,6 @@ class CASCache(ArtifactCache):
if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes: if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
# ref is already on the server with the same tree # ref is already on the server with the same tree
element.info("Skipping {}, remote ({}) already has artifact cached".format(
element._get_brief_display_key(), remote.spec.url))
continue continue
except grpc.RpcError as e: except grpc.RpcError as e:
...@@ -309,6 +308,7 @@ class CASCache(ArtifactCache): ...@@ -309,6 +308,7 @@ class CASCache(ArtifactCache):
missing_blobs[d.hash] = d missing_blobs[d.hash] = d
# Upload any blobs missing on the server # Upload any blobs missing on the server
skipped_remote = False
for digest in missing_blobs.values(): for digest in missing_blobs.values():
def request_stream(): def request_stream():
resource_name = os.path.join(digest.hash, str(digest.size_bytes)) resource_name = os.path.join(digest.hash, str(digest.size_bytes))
...@@ -344,6 +344,13 @@ class CASCache(ArtifactCache): ...@@ -344,6 +344,13 @@ class CASCache(ArtifactCache):
if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED: if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
raise ArtifactError("Failed to push artifact {}: {}".format(refs, e), temporary=True) from e raise ArtifactError("Failed to push artifact {}: {}".format(refs, e), temporary=True) from e
if skipped_remote:
self.context.message(Message(
None,
MessageType.SKIPPED,
"Remote ({}) already has {} cached".format(
remote.spec.url, element._get_brief_display_key())
))
return pushed return pushed
################################################ ################################################
......
...@@ -197,29 +197,55 @@ class Context(): ...@@ -197,29 +197,55 @@ class Context():
"\nValid values are, for example: 800M 10G 1T 50%\n" "\nValid values are, for example: 800M 10G 1T 50%\n"
.format(str(e))) from e .format(str(e))) from e
# If we are asked not to set a quota, we set it to the maximum # Headroom intended to give BuildStream a bit of leeway.
# disk space available minus a headroom of 2GB, such that we # This acts as the minimum size of cache_quota and also
# at least try to avoid raising Exceptions. # is taken from the user requested cache_quota.
# #
# Of course, we might still end up running out during a build if 'BST_TEST_SUITE' in os.environ:
# if we end up writing more than 2G, but hey, this stuff is headroom = 0
# already really fuzzy. else:
# headroom = 2e9
if cache_quota is None:
stat = os.statvfs(artifactdir_volume) stat = os.statvfs(artifactdir_volume)
# Again, the artifact directory may not yet have been available_space = (stat.f_bsize * stat.f_bavail)
# created
# Again, the artifact directory may not yet have been created yet
#
if not os.path.exists(self.artifactdir): if not os.path.exists(self.artifactdir):
cache_size = 0 cache_size = 0
else: else:
cache_size = utils._get_dir_size(self.artifactdir) cache_size = utils._get_dir_size(self.artifactdir)
cache_quota = cache_size + stat.f_bsize * stat.f_bavail
if 'BST_TEST_SUITE' in os.environ:
headroom = 0
else:
headroom = 2e9
# Ensure system has enough storage for the cache_quota
#
# If cache_quota is none, set it to the maximum it could possibly be.
#
# Also check that cache_quota is atleast as large as our headroom.
#
if cache_quota is None: # Infinity, set to max system storage
cache_quota = cache_size + available_space
if cache_quota < headroom: # Check minimum
raise LoadError(LoadErrorReason.INVALID_DATA,
"Invalid cache quota ({}): ".format(utils._pretty_size(cache_quota)) +
"BuildStream requires a minimum cache quota of 2G.")
elif cache_quota > cache_size + available_space: # Check maximum
raise LoadError(LoadErrorReason.INVALID_DATA,
("Your system does not have enough available " +
"space to support the cache quota specified.\n" +
"You currently have:\n" +
"- {used} of cache in use at {local_cache_path}\n" +
"- {available} of available system storage").format(
used=utils._pretty_size(cache_size),
local_cache_path=self.artifactdir,
available=utils._pretty_size(available_space)))
# Place a slight headroom (2e9 (2GB) on the cache_quota) into
# cache_quota to try and avoid exceptions.
#
# Of course, we might still end up running out during a build
# if we end up writing more than 2G, but hey, this stuff is
# already really fuzzy.
#
self.cache_quota = cache_quota - headroom self.cache_quota = cache_quota - headroom
self.cache_lower_threshold = self.cache_quota / 2 self.cache_lower_threshold = self.cache_quota / 2
......
...@@ -88,6 +88,7 @@ class ErrorDomain(Enum): ...@@ -88,6 +88,7 @@ class ErrorDomain(Enum):
ELEMENT = 11 ELEMENT = 11
APP = 12 APP = 12
STREAM = 13 STREAM = 13
VIRTUAL_FS = 14
# BstError is an internal base exception class for BuildSream # BstError is an internal base exception class for BuildSream
......
...@@ -68,9 +68,10 @@ def complete_path(path_type, incomplete, base_directory='.'): ...@@ -68,9 +68,10 @@ def complete_path(path_type, incomplete, base_directory='.'):
# If there was nothing on the left of the last separator, # If there was nothing on the left of the last separator,
# we are completing files in the filesystem root # we are completing files in the filesystem root
base_path = os.path.join(base_directory, base_path) base_path = os.path.join(base_directory, base_path)
else:
elif os.path.isdir(incomplete): incomplete_base_path = os.path.join(base_directory, incomplete)
base_path = incomplete if os.path.isdir(incomplete_base_path):
base_path = incomplete_base_path
try: try:
if base_path: if base_path:
......
...@@ -160,6 +160,7 @@ class TypeName(Widget): ...@@ -160,6 +160,7 @@ class TypeName(Widget):
MessageType.START: "blue", MessageType.START: "blue",
MessageType.SUCCESS: "green", MessageType.SUCCESS: "green",
MessageType.FAIL: "red", MessageType.FAIL: "red",
MessageType.SKIPPED: "yellow",
MessageType.ERROR: "red", MessageType.ERROR: "red",
MessageType.BUG: "red", MessageType.BUG: "red",
} }
...@@ -368,7 +369,9 @@ class LogLine(Widget): ...@@ -368,7 +369,9 @@ class LogLine(Widget):
if consistency == Consistency.INCONSISTENT: if consistency == Consistency.INCONSISTENT:
line = p.fmt_subst(line, 'state', "no reference", fg='red') line = p.fmt_subst(line, 'state', "no reference", fg='red')
else: else:
if element._cached(): if element._cached_failure():
line = p.fmt_subst(line, 'state', "failed", fg='red')
elif element._cached_success():
line = p.fmt_subst(line, 'state', "cached", fg='magenta') line = p.fmt_subst(line, 'state', "cached", fg='magenta')
elif consistency == Consistency.RESOLVED: elif consistency == Consistency.RESOLVED:
line = p.fmt_subst(line, 'state', "fetch needed", fg='red') line = p.fmt_subst(line, 'state', "fetch needed", fg='red')
...@@ -522,11 +525,14 @@ class LogLine(Widget): ...@@ -522,11 +525,14 @@ class LogLine(Widget):
text += "\n\n" text += "\n\n"
if self._failure_messages: if self._failure_messages:
text += self.content_profile.fmt("Failure Summary\n", bold=True)
values = OrderedDict() values = OrderedDict()
for element, messages in sorted(self._failure_messages.items(), key=lambda x: x[0].name): for element, messages in sorted(self._failure_messages.items(), key=lambda x: x[0].name):
for queue in stream.queues:
if any(el.name == element.name for el in queue.failed_elements):
values[element.name] = ''.join(self._render(v) for v in messages) values[element.name] = ''.join(self._render(v) for v in messages)
if values:
text += self.content_profile.fmt("Failure Summary\n", bold=True)
text += self._format_values(values, style_value=False) text += self._format_values(values, style_value=False)
text += self.content_profile.fmt("Pipeline Summary\n", bold=True) text += self.content_profile.fmt("Pipeline Summary\n", bold=True)
......
...@@ -36,6 +36,7 @@ class MessageType(): ...@@ -36,6 +36,7 @@ class MessageType():
START = "start" # Status start message START = "start" # Status start message
SUCCESS = "success" # Successful status complete message SUCCESS = "success" # Successful status complete message
FAIL = "failure" # Failing status complete message FAIL = "failure" # Failing status complete message
SKIPPED = "skipped"
# Messages which should be reported regardless of whether # Messages which should be reported regardless of whether
......
...@@ -489,7 +489,7 @@ class _Planner(): ...@@ -489,7 +489,7 @@ class _Planner():
self.plan_element(dep, depth) self.plan_element(dep, depth)
# Dont try to plan builds of elements that are cached already # Dont try to plan builds of elements that are cached already
if not element._cached(): if not element._cached_success():
for dep in element.dependencies(Scope.BUILD, recurse=False): for dep in element.dependencies(Scope.BUILD, recurse=False):
self.plan_element(dep, depth + 1) self.plan_element(dep, depth + 1)
...@@ -501,4 +501,4 @@ class _Planner(): ...@@ -501,4 +501,4 @@ class _Planner():
self.plan_element(root, 0) self.plan_element(root, 0)
depth_sorted = sorted(self.depth_map.items(), key=itemgetter(1), reverse=True) depth_sorted = sorted(self.depth_map.items(), key=itemgetter(1), reverse=True)
return [item[0] for item in depth_sorted if plan_cached or not item[0]._cached()] return [item[0] for item in depth_sorted if plan_cached or not item[0]._cached_success()]
...@@ -18,8 +18,12 @@ ...@@ -18,8 +18,12 @@
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk> # Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
# Jürg Billeter <juerg.billeter@codethink.co.uk> # Jürg Billeter <juerg.billeter@codethink.co.uk>
from datetime import timedelta
from . import Queue, QueueStatus from . import Queue, QueueStatus
from ..jobs import ElementJob
from ..resources import ResourceType from ..resources import ResourceType
from ..._message import MessageType
# A queue which assembles elements # A queue which assembles elements
...@@ -30,6 +34,38 @@ class BuildQueue(Queue): ...@@ -30,6 +34,38 @@ class BuildQueue(Queue):
complete_name = "Built" complete_name = "Built"
resources = [ResourceType.PROCESS] resources = [ResourceType.PROCESS]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._tried = set()
def enqueue(self, elts):
to_queue = []
for element in elts:
if not element._cached_failure() or element in self._tried:
to_queue.append(element)
continue
# Bypass queue processing entirely the first time it's tried.
self._tried.add(element)
_, description, detail = element._get_build_result()
logfile = element._get_build_log()
self._message(element, MessageType.FAIL, description,
detail=detail, action_name=self.action_name,
elapsed=timedelta(seconds=0),
logfile=logfile)
job = ElementJob(self._scheduler, self.action_name,
logfile, element=element, queue=self,
resources=self.resources,
action_cb=self.process,
complete_cb=self._job_done,
max_retries=self._max_retries)
self._done_queue.append(job)
self.failed_elements.append(element)
self._scheduler._job_complete_callback(job, False)
return super().enqueue(to_queue)
def process(self, element): def process(self, element):
element._assemble() element._assemble()
return element._get_unique_id() return element._get_unique_id()
...@@ -43,7 +79,7 @@ class BuildQueue(Queue): ...@@ -43,7 +79,7 @@ class BuildQueue(Queue):
# Keep it in the queue. # Keep it in the queue.
return QueueStatus.WAIT return QueueStatus.WAIT
if element._cached(): if element._cached_success():
return QueueStatus.SKIP return QueueStatus.SKIP
if not element._buildable(): if not element._buildable():
......
...@@ -296,6 +296,7 @@ class Queue(): ...@@ -296,6 +296,7 @@ class Queue():
# See the Job object for an explanation of the call signature # See the Job object for an explanation of the call signature
# #
def _job_done(self, job, element, success, result): def _job_done(self, job, element, success, result):
element._update_state()
# Update values that need to be synchronized in the main task # Update values that need to be synchronized in the main task
# before calling any queue implementation # before calling any queue implementation
...@@ -335,8 +336,9 @@ class Queue(): ...@@ -335,8 +336,9 @@ class Queue():
# No exception occured, handle the success/failure state in the normal way # No exception occured, handle the success/failure state in the normal way
# #
if success:
self._done_queue.append(job) self._done_queue.append(job)
if success:
if processed: if processed:
self.processed_elements.append(element) self.processed_elements.append(element)
else: else:
......
...@@ -407,15 +407,16 @@ class Stream(): ...@@ -407,15 +407,16 @@ class Stream():
integrate=integrate) as sandbox: integrate=integrate) as sandbox:
# Copy or move the sandbox to the target directory # Copy or move the sandbox to the target directory
sandbox_root = sandbox.get_directory() sandbox_vroot = sandbox.get_virtual_directory()
if not tar: if not tar:
with target.timed_activity("Checking out files in '{}'" with target.timed_activity("Checking out files in '{}'"
.format(location)): .format(location)):
try: try:
if hardlinks: if hardlinks:
self._checkout_hardlinks(sandbox_root, location) self._checkout_hardlinks(sandbox_vroot, location)
else: else:
utils.copy_files(sandbox_root, location) sandbox_vroot.export_files(location)
except OSError as e: except OSError as e:
raise StreamError("Failed to checkout files: '{}'" raise StreamError("Failed to checkout files: '{}'"
.format(e)) from e .format(e)) from e
...@@ -424,14 +425,12 @@ class Stream(): ...@@ -424,14 +425,12 @@ class Stream():
with target.timed_activity("Creating tarball"): with target.timed_activity("Creating tarball"):
with os.fdopen(sys.stdout.fileno(), 'wb') as fo: with os.fdopen(sys.stdout.fileno(), 'wb') as fo:
with tarfile.open(fileobj=fo, mode="w|") as tf: with tarfile.open(fileobj=fo, mode="w|") as tf:
Stream._add_directory_to_tarfile( sandbox_vroot.export_to_tar(tf, '.')
tf, sandbox_root, '.')
else: else:
with target.timed_activity("Creating tarball '{}'" with target.timed_activity("Creating tarball '{}'"
.format(location)): .format(location)):
with tarfile.open(location, "w:") as tf: with tarfile.open(location, "w:") as tf:
Stream._add_directory_to_tarfile( sandbox_vroot.export_to_tar(tf, '.')
tf, sandbox_root, '.')
except BstError as e: except BstError as e:
raise StreamError("Error while staging dependencies into a sandbox" raise StreamError("Error while staging dependencies into a sandbox"
...@@ -1050,46 +1049,13 @@ class Stream(): ...@@ -1050,46 +1049,13 @@ class Stream():
# Helper function for checkout() # Helper function for checkout()
# #
def _checkout_hardlinks(self, sandbox_root, directory): def _checkout_hardlinks(self, sandbox_vroot, directory):
try: try:
removed = utils.safe_remove(directory) utils.safe_remove(directory)
except OSError as e: except OSError as e:
raise StreamError("Failed to remove checkout directory: {}".format(e)) from e raise StreamError("Failed to remove checkout directory: {}".format(e)) from e
if removed: sandbox_vroot.export_files(directory, can_link=True, can_destroy=True)
# Try a simple rename of the sandbox root; if that
# doesnt cut it, then do the regular link files code path
try:
os.rename(sandbox_root, directory)
except OSError:
os.makedirs(directory, exist_ok=True)
utils.link_files(sandbox_root, directory)
else:
utils.link_files(sandbox_root, directory)
# Add a directory entry deterministically to a tar file
#
# This function takes extra steps to ensure the output is deterministic.
# First, it sorts the results of os.listdir() to ensure the ordering of
# the files in the archive is the same. Second, it sets a fixed
# timestamp for each entry. See also https://bugs.python.org/issue24465.
@staticmethod
def _add_directory_to_tarfile(tf, dir_name, dir_arcname, mtime=0):
for filename in sorted(os.listdir(dir_name)):
name = os.path.join(dir_name, filename)
arcname = os.path.join(dir_arcname, filename)
tarinfo = tf.gettarinfo(name, arcname)
tarinfo.mtime = mtime
if tarinfo.isreg():
with open(name, "rb") as f:
tf.addfile(tarinfo, f)
elif tarinfo.isdir():
tf.addfile(tarinfo)
Stream._add_directory_to_tarfile(tf, name, arcname, mtime)
else:
tf.addfile(tarinfo)
# Write the element build script to the given directory # Write the element build script to the given directory
def _write_element_script(self, directory, element): def _write_element_script(self, directory, element):
......
# #
# Copyright (C) 2016 Codethink Limited # Copyright (C) 2016 Codethink Limited
# Copyright (C) 2018 Bloomberg Finance LP
# #
# This program is free software; you can redistribute it and/or # This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public # modify it under the terms of the GNU Lesser General Public
...@@ -204,6 +205,7 @@ class BuildElement(Element): ...@@ -204,6 +205,7 @@ class BuildElement(Element):
def prepare(self, sandbox): def prepare(self, sandbox):
commands = self.__commands['configure-commands'] commands = self.__commands['configure-commands']
if commands: if commands:
with self.timed_activity("Running configure-commands"):
for cmd in commands: for cmd in commands:
self.__run_command(sandbox, cmd, 'configure-commands') self.__run_command(sandbox, cmd, 'configure-commands')
...@@ -240,4 +242,5 @@ class BuildElement(Element): ...@@ -240,4 +242,5 @@ class BuildElement(Element):
exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'], exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'],
SandboxFlags.ROOT_READ_ONLY) SandboxFlags.ROOT_READ_ONLY)
if exitcode != 0: if exitcode != 0:
raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode)) raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode),
collect=self.get_variable('install-root'))
This diff is collapsed.
...@@ -34,7 +34,6 @@ The default configuration and possible options are as such: ...@@ -34,7 +34,6 @@ The default configuration and possible options are as such:
""" """
import os import os
from buildstream import utils
from buildstream import Element, Scope from buildstream import Element, Scope
...@@ -56,6 +55,9 @@ class ComposeElement(Element): ...@@ -56,6 +55,9 @@ class ComposeElement(Element):
# added, to reduce the potential for confusion # added, to reduce the potential for confusion
BST_FORBID_SOURCES = True BST_FORBID_SOURCES = True
# This plugin has been modified to avoid the use of Sandbox.get_directory
BST_VIRTUAL_DIRECTORY = True
def configure(self, node): def configure(self, node):
self.node_validate(node, [ self.node_validate(node, [
'integrate', 'include', 'exclude', 'include-orphans' 'integrate', 'include', 'exclude', 'include-orphans'
...@@ -104,7 +106,8 @@ class ComposeElement(Element): ...@@ -104,7 +106,8 @@ class ComposeElement(Element):
orphans=self.include_orphans) orphans=self.include_orphans)
manifest.update(files) manifest.update(files)
basedir = sandbox.get_directory() # Make a snapshot of all the files.
vbasedir = sandbox.get_virtual_directory()
modified_files = set() modified_files = set()
removed_files = set() removed_files = set()
added_files = set() added_files = set()
...@@ -116,38 +119,24 @@ class ComposeElement(Element): ...@@ -116,38 +119,24 @@ class ComposeElement(Element):
if require_split: if require_split:
# Make a snapshot of all the files before integration-commands are run. # Make a snapshot of all the files before integration-commands are run.
snapshot = { snapshot = set(vbasedir.list_relative_paths())
f: getmtime(os.path.join(basedir, f)) vbasedir.mark_unmodified()
for f in utils.list_relative_paths(basedir)
}
for dep in self.dependencies(Scope.BUILD): for dep in self.dependencies(Scope.BUILD):
dep.integrate(sandbox) dep.integrate(sandbox)
if require_split: if require_split:
# Calculate added, modified and removed files # Calculate added, modified and removed files
basedir_contents = set(utils.list_relative_paths(basedir)) post_integration_snapshot = vbasedir.list_relative_paths()
modified_files = set(vbasedir.list_modified_paths())
basedir_contents = set(post_integration_snapshot)
for path in manifest: for path in manifest:
if path in basedir_contents: if path in snapshot and path not in basedir_contents:
if path in snapshot:
preintegration_mtime = snapshot[path]
if preintegration_mtime != getmtime(os.path.join(basedir, path)):
modified_files.add(path)
else:
# If the path appears in the manifest but not the initial snapshot,
# it may be a file staged inside a directory symlink. In this case
# the path we got from the manifest won't show up in the snapshot
# because utils.list_relative_paths() doesn't recurse into symlink
# directories.
pass
elif path in snapshot:
removed_files.add(path) removed_files.add(path)
for path in basedir_contents: for path in basedir_contents:
if path not in snapshot: if path not in snapshot:
added_files.add(path) added_files.add(path)
self.info("Integration modified {}, added {} and removed {} files" self.info("Integration modified {}, added {} and removed {} files"
.format(len(modified_files), len(added_files), len(removed_files))) .format(len(modified_files), len(added_files), len(removed_files)))
...@@ -166,8 +155,7 @@ class ComposeElement(Element): ...@@ -166,8 +155,7 @@ class ComposeElement(Element):
# instead of into a subdir. The element assemble() method should # instead of into a subdir. The element assemble() method should
# support this in some way. # support this in some way.
# #
installdir = os.path.join(basedir, 'buildstream', 'install') installdir = vbasedir.descend(['buildstream', 'install'], create=True)
os.makedirs(installdir, exist_ok=True)
# We already saved the manifest for created files in the integration phase, # We already saved the manifest for created files in the integration phase,
# now collect the rest of the manifest. # now collect the rest of the manifest.
...@@ -191,19 +179,12 @@ class ComposeElement(Element): ...@@ -191,19 +179,12 @@ class ComposeElement(Element):
with self.timed_activity("Creating composition", detail=detail, silent_nested=True): with self.timed_activity("Creating composition", detail=detail, silent_nested=True):
self.info("Composing {} files".format(len(manifest))) self.info("Composing {} files".format(len(manifest)))
utils.link_files(basedir, installdir, files=manifest) installdir.import_files(vbasedir, files=manifest, can_link=True)
# And we're done # And we're done
return os.path.join(os.sep, 'buildstream', 'install') return os.path.join(os.sep, 'buildstream', 'install')
# Like os.path.getmtime(), but doesnt explode on symlinks
#
def getmtime(path):
stat = os.lstat(path)
return stat.st_mtime
# Plugin entry point # Plugin entry point
def setup(): def setup():
return ComposeElement return ComposeElement
...@@ -31,7 +31,6 @@ The empty configuration is as such: ...@@ -31,7 +31,6 @@ The empty configuration is as such:
""" """
import os import os
import shutil
from buildstream import Element, BuildElement, ElementError from buildstream import Element, BuildElement, ElementError
...@@ -39,6 +38,9 @@ from buildstream import Element, BuildElement, ElementError ...@@ -39,6 +38,9 @@ from buildstream import Element, BuildElement, ElementError
class ImportElement(BuildElement): class ImportElement(BuildElement):
# pylint: disable=attribute-defined-outside-init # pylint: disable=attribute-defined-outside-init
# This plugin has been modified to avoid the use of Sandbox.get_directory
BST_VIRTUAL_DIRECTORY = True
def configure(self, node): def configure(self, node):
self.source = self.node_subst_member(node, 'source') self.source = self.node_subst_member(node, 'source')
self.target = self.node_subst_member(node, 'target') self.target = self.node_subst_member(node, 'target')
...@@ -68,27 +70,22 @@ class ImportElement(BuildElement): ...@@ -68,27 +70,22 @@ class ImportElement(BuildElement):
# Do not mount workspaces as the files are copied from outside the sandbox # Do not mount workspaces as the files are copied from outside the sandbox
self._stage_sources_in_sandbox(sandbox, 'input', mount_workspaces=False) self._stage_sources_in_sandbox(sandbox, 'input', mount_workspaces=False)
rootdir = sandbox.get_directory() rootdir = sandbox.get_virtual_directory()
inputdir = os.path.join(rootdir, 'input') inputdir = rootdir.descend(['input'])
outputdir = os.path.join(rootdir, 'output') outputdir = rootdir.descend(['output'], create=True)
# The directory to grab # The directory to grab
inputdir = os.path.join(inputdir, self.source.lstrip(os.sep)) inputdir = inputdir.descend(self.source.strip(os.sep).split(os.sep))
inputdir = inputdir.rstrip(os.sep)
# The output target directory # The output target directory
outputdir = os.path.join(outputdir, self.target.lstrip(os.sep)) outputdir = outputdir.descend(self.target.strip(os.sep).split(os.sep), create=True)
outputdir = outputdir.rstrip(os.sep)
# Ensure target directory parent
os.makedirs(os.path.dirname(outputdir), exist_ok=True)
if not os.path.exists(inputdir): if inputdir.is_empty():
raise ElementError("{}: No files were found inside directory '{}'" raise ElementError("{}: No files were found inside directory '{}'"
.format(self, self.source)) .format(self, self.source))
# Move it over # Move it over
shutil.move(inputdir, outputdir) outputdir.import_files(inputdir)
# And we're done # And we're done
return '/output' return '/output'
......
...@@ -24,13 +24,15 @@ Stack elements are simply a symbolic element used for representing ...@@ -24,13 +24,15 @@ Stack elements are simply a symbolic element used for representing
a logical group of elements. a logical group of elements.
""" """
import os
from buildstream import Element from buildstream import Element
# Element implementation for the 'stack' kind. # Element implementation for the 'stack' kind.
class StackElement(Element): class StackElement(Element):
# This plugin has been modified to avoid the use of Sandbox.get_directory
BST_VIRTUAL_DIRECTORY = True
def configure(self, node): def configure(self, node):
pass pass
...@@ -52,7 +54,7 @@ class StackElement(Element): ...@@ -52,7 +54,7 @@ class StackElement(Element):
# Just create a dummy empty artifact, its existence is a statement # Just create a dummy empty artifact, its existence is a statement
# that all this stack's dependencies are built. # that all this stack's dependencies are built.
rootdir = sandbox.get_directory() vrootdir = sandbox.get_virtual_directory()
# XXX FIXME: This is currently needed because the artifact # XXX FIXME: This is currently needed because the artifact
# cache wont let us commit an empty artifact. # cache wont let us commit an empty artifact.
...@@ -61,10 +63,7 @@ class StackElement(Element): ...@@ -61,10 +63,7 @@ class StackElement(Element):
# the actual artifact data in a subdirectory, then we # the actual artifact data in a subdirectory, then we
# will be able to store some additional state in the # will be able to store some additional state in the
# artifact cache, and we can also remove this hack. # artifact cache, and we can also remove this hack.
outputdir = os.path.join(rootdir, 'output', 'bst') vrootdir.descend(['output', 'bst'], create=True)
# Ensure target directory parent
os.makedirs(os.path.dirname(outputdir), exist_ok=True)
# And we're done # And we're done
return '/output' return '/output'
......
...@@ -71,6 +71,7 @@ git - stage files from a git repository ...@@ -71,6 +71,7 @@ git - stage files from a git repository
""" """
import os import os
import errno
import re import re
import shutil import shutil
from collections import Mapping from collections import Mapping
...@@ -119,11 +120,21 @@ class GitMirror(SourceFetcher): ...@@ -119,11 +120,21 @@ class GitMirror(SourceFetcher):
fail="Failed to clone git repository {}".format(url), fail="Failed to clone git repository {}".format(url),
fail_temporarily=True) fail_temporarily=True)
# Attempt atomic rename into destination, this will fail if
# another process beat us to the punch
try: try:
shutil.move(tmpdir, self.mirror) os.rename(tmpdir, self.mirror)
except (shutil.Error, OSError) as e: except OSError as e:
raise SourceError("{}: Failed to move cloned git repository {} from '{}' to '{}'"
.format(self.source, url, tmpdir, self.mirror)) from e # When renaming and the destination repo already exists, os.rename()
# will fail with ENOTEMPTY, since an empty directory will be silently
# replaced
if e.errno == errno.ENOTEMPTY:
self.source.status("{}: Discarding duplicate clone of {}"
.format(self.source, url))
else:
raise SourceError("{}: Failed to move cloned git repository {} from '{}' to '{}': {}"
.format(self.source, url, tmpdir, self.mirror, e)) from e
def _fetch(self, alias_override=None): def _fetch(self, alias_override=None):
url = self.source.translate_url(self.url, alias_override=alias_override) url = self.source.translate_url(self.url, alias_override=alias_override)
......
...@@ -32,7 +32,8 @@ from .._fuse import SafeHardlinks ...@@ -32,7 +32,8 @@ from .._fuse import SafeHardlinks
class Mount(): class Mount():
def __init__(self, sandbox, mount_point, safe_hardlinks): def __init__(self, sandbox, mount_point, safe_hardlinks):
scratch_directory = sandbox._get_scratch_directory() scratch_directory = sandbox._get_scratch_directory()
root_directory = sandbox.get_directory() # Getting external_directory here is acceptable as we're part of the sandbox code.
root_directory = sandbox.get_virtual_directory().external_directory
self.mount_point = mount_point self.mount_point = mount_point
self.safe_hardlinks = safe_hardlinks self.safe_hardlinks = safe_hardlinks
......
...@@ -56,7 +56,9 @@ class SandboxBwrap(Sandbox): ...@@ -56,7 +56,9 @@ class SandboxBwrap(Sandbox):
def run(self, command, flags, *, cwd=None, env=None): def run(self, command, flags, *, cwd=None, env=None):
stdout, stderr = self._get_output() stdout, stderr = self._get_output()
root_directory = self.get_directory()
# Allowable access to underlying storage as we're part of the sandbox
root_directory = self.get_virtual_directory().external_directory
# Fallback to the sandbox default settings for # Fallback to the sandbox default settings for
# the cwd and env. # the cwd and env.
......