Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (30)
Showing
with 273 additions and 115 deletions
......@@ -11,10 +11,9 @@ before being considered for inclusion, we strongly recommend proposing
in advance of commencing work.
If you are experiencing an issue with BuildStream or would like to submit a small patch/feature, then
you can open issue `here <https://gitlab.com/BuildStream/buildstream/issues/new?issue%5Bassignee_id%5D=&issue%5Bmilestone_id%5D=>`
you can `oepn an issue <https://gitlab.com/BuildStream/buildstream/issues/new?issue%5Bassignee_id%5D=&issue%5Bmilestone_id%5D=>`_
For policies on how to submit and issue and how to use our project labels, we recommend that you read the policies guide
`here <https://gitlab.com/BuildStream/nosoftware/alignment/blob/master/BuildStream_policies.md>`
For policies on how to submit and issue and how to use our project labels, we recommend that you read the `policies guide <https://gitlab.com/BuildStream/nosoftware/alignment/blob/master/BuildStream_policies.md>`_
New features must be well documented and tested either in our main
test suite if possible, or otherwise in the integration tests.
......
......@@ -228,8 +228,8 @@ class CASCache(ArtifactCache):
for remote in self._remotes[project]:
try:
remote.init()
element.info("Pulling {} <- {}".format(element._get_brief_display_key(), remote.spec.url))
display_key = element._get_brief_display_key()
element.status("Pulling artifact {} <- {}".format(display_key, remote.spec.url))
request = buildstream_pb2.GetReferenceRequest()
request.key = ref
......@@ -243,6 +243,7 @@ class CASCache(ArtifactCache):
self.set_ref(ref, tree)
element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
# no need to pull from additional remotes
return True
......@@ -251,11 +252,8 @@ class CASCache(ArtifactCache):
raise ArtifactError("Failed to pull artifact {}: {}".format(
element._get_brief_display_key(), e)) from e
else:
self.context.message(Message(
None,
MessageType.SKIPPED,
"Remote ({}) does not have {} cached".format(
remote.spec.url, element._get_brief_display_key())
element.info("Remote ({}) does not have {} cached".format(
remote.spec.url, element._get_brief_display_key()
))
return False
......@@ -336,35 +334,43 @@ class CASCache(ArtifactCache):
for remote in push_remotes:
remote.init()
element.info("Pushing {} -> {}".format(element._get_brief_display_key(), remote.spec.url))
display_key = element._get_brief_display_key()
element.status("Pushing artifact {} -> {}".format(display_key, remote.spec.url))
if self._push_refs_to_remote(refs, remote):
element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url))
pushed = True
else:
self.context.message(Message(
None,
MessageType.SKIPPED,
"Remote ({}) already has {} cached".format(
remote.spec.url, element._get_brief_display_key())
element.info("Remote ({}) already has {} cached".format(
remote.spec.url, element._get_brief_display_key()
))
return pushed
def push_directory(self, project, directory):
""" Push the given virtual directory to all remotes.
Args:
project (Project): The current project
directory (Directory): A virtual directory object to push.
Raises: ArtifactError if no push remotes are configured.
"""
push_remotes = [r for r in self._remotes[project] if r.spec.push]
if not push_remotes:
raise ArtifactError("CASCache: push_directory was called, but no remote artifact " +
"servers are configured as push remotes.")
if directory.ref is None:
return None
return
for remote in push_remotes:
remote.init()
self._send_directory(remote, directory.ref)
return directory.ref
def push_message(self, project, message):
push_remotes = [r for r in self._remotes[project] if r.spec.push]
......
......@@ -312,3 +312,12 @@ class StreamError(BstError):
class AppError(BstError):
def __init__(self, message, detail=None, reason=None):
super().__init__(message, detail=detail, domain=ErrorDomain.APP, reason=reason)
# SkipJob
#
# Raised from a child process within a job when the job should be
# considered skipped by the parent process.
#
class SkipJob(Exception):
pass
......@@ -26,7 +26,6 @@ import datetime
from textwrap import TextWrapper
import click
from click import UsageError
from blessings import Terminal
# Import buildstream public symbols
from .. import Scope
......@@ -92,7 +91,7 @@ class App():
#
# Earily initialization
#
is_a_tty = Terminal().is_a_tty
is_a_tty = sys.stdout.isatty() and sys.stderr.isatty()
# Enable interactive mode if we're attached to a tty
if main_options['no_interactive']:
......@@ -116,14 +115,6 @@ class App():
else:
self.colors = False
# Increase the soft limit for open file descriptors to the maximum.
# SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
# Avoid hitting the limit too quickly.
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
if limits[0] != limits[1]:
# Set soft limit to hard limit
resource.setrlimit(resource.RLIMIT_NOFILE, (limits[1], limits[1]))
# create()
#
# Should be used instead of the regular constructor.
......
......@@ -16,8 +16,10 @@
#
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
import os
import sys
import click
from blessings import Terminal
import curses
# Import a widget internal for formatting time codes
from .widget import TimeCode
......@@ -43,6 +45,13 @@ from .._scheduler import ElementJob
#
class Status():
# Table of the terminal capabilities we require and use
_TERM_CAPABILITIES = {
'move_up': 'cuu1',
'move_x': 'hpa',
'clear_eol': 'el'
}
def __init__(self, context,
content_profile, format_profile,
success_profile, error_profile,
......@@ -56,7 +65,6 @@ class Status():
self._stream = stream
self._jobs = []
self._last_lines = 0 # Number of status lines we last printed to console
self._term = Terminal()
self._spacing = 1
self._colors = colors
self._header = _StatusHeader(context,
......@@ -69,6 +77,7 @@ class Status():
self._alloc_columns = None
self._line_length = 0
self._need_alloc = True
self._term_caps = self._init_terminal()
# add_job()
#
......@@ -121,7 +130,7 @@ class Status():
#
def clear(self):
if not self._term.does_styling:
if not self._term_caps:
return
for _ in range(self._last_lines):
......@@ -138,7 +147,7 @@ class Status():
# not necessary to call clear().
def render(self):
if not self._term.does_styling:
if not self._term_caps:
return
elapsed = self._stream.elapsed_time
......@@ -185,6 +194,55 @@ class Status():
###################################################
# Private Methods #
###################################################
# _init_terminal()
#
# Initialize the terminal and return the resolved terminal
# capabilities dictionary.
#
# Returns:
# (dict|None): The resolved terminal capabilities dictionary,
# or None if the terminal does not support all
# of the required capabilities.
#
def _init_terminal(self):
# We need both output streams to be connected to a terminal
if not (sys.stdout.isatty() and sys.stderr.isatty()):
return None
# Initialized terminal, curses might decide it doesnt
# support this terminal
try:
curses.setupterm(os.environ.get('TERM', 'dumb'))
except curses.error:
return None
term_caps = {}
# Resolve the string capabilities we need for the capability
# names we need.
#
for capname, capval in self._TERM_CAPABILITIES.items():
code = curses.tigetstr(capval)
# If any of the required capabilities resolve empty strings or None,
# then we don't have the capabilities we need for a status bar on
# this terminal.
if not code:
return None
# Decode sequences as latin1, as they are always 8-bit bytes,
# so when b'\xff' is returned, this must be decoded to u'\xff'.
#
# This technique is employed by the python blessings library
# as well, and should provide better compatibility with most
# terminals.
#
term_caps[capname] = code.decode('latin1')
return term_caps
def _check_term_width(self):
term_width, _ = click.get_terminal_size()
if self._term_width != term_width:
......@@ -192,12 +250,24 @@ class Status():
self._need_alloc = True
def _move_up(self):
assert self._term_caps is not None
# Explicitly move to beginning of line, fixes things up
# when there was a ^C or ^Z printed to the terminal.
click.echo(self._term.move_x(0) + self._term.move_up, nl=False, err=True)
move_x = curses.tparm(self._term_caps['move_x'].encode('latin1'), 0)
move_x = move_x.decode('latin1')
move_up = curses.tparm(self._term_caps['move_up'].encode('latin1'))
move_up = move_up.decode('latin1')
click.echo(move_x + move_up, nl=False, err=True)
def _clear_line(self):
click.echo(self._term.clear_eol, nl=False, err=True)
assert self._term_caps is not None
clear_eol = curses.tparm(self._term_caps['clear_eol'].encode('latin1'))
clear_eol = clear_eol.decode('latin1')
click.echo(clear_eol, nl=False, err=True)
def _allocate(self):
if not self._need_alloc:
......
#
# Copyright (C) 2017 Codethink Limited
# Copyright (C) 2018 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
import os
import resource
from .._exceptions import PlatformError
from ..sandbox import DummySandbox
from . import Platform
class Darwin(Platform):
# This value comes from OPEN_MAX in syslimits.h
OPEN_MAX = 10240
def __init__(self, context):
super().__init__(context)
@property
def artifactcache(self):
return self._artifact_cache
def create_sandbox(self, *args, **kwargs):
return SandboxDummy(*args, **kwargs)
def get_cpu_count(self, cap=None):
if cap < os.cpu_count():
return cap
else:
return os.cpu_count()
def set_resource_limits(self, soft_limit=OPEN_MAX, hard_limit=None):
super().set_resource_limits(soft_limit)
......@@ -17,13 +17,14 @@
# Authors:
# Tristan Maat <tristan.maat@codethink.co.uk>
import os
import subprocess
from .. import _site
from .. import utils
from .._artifactcache.cascache import CASCache
from .._message import Message, MessageType
from ..sandbox import SandboxBwrap
from ..sandbox import DummySandbox
from . import Platform
......@@ -32,27 +33,44 @@ class Linux(Platform):
def __init__(self, context):
super().__init__(context)
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
self._user_ns_available = self._check_user_ns_available(context)
self._artifact_cache = CASCache(context, enable_push=self._user_ns_available)
if self._local_sandbox_available():
self._user_ns_available = self._check_user_ns_available(context)
else:
self._user_ns_available = False
# _user_ns_available needs to be set before chaining up to the super class
# This is because it will call create_artifact_cache()
super().__init__(context)
@property
def artifactcache(self):
return self._artifact_cache
def create_sandbox(self, *args, **kwargs):
# Inform the bubblewrap sandbox as to whether it can use user namespaces or not
kwargs['user_ns_available'] = self._user_ns_available
kwargs['die_with_parent_available'] = self._die_with_parent_available
return SandboxBwrap(*args, **kwargs)
if not self._local_sandbox_available():
return SandboxDummy(*args, **kwargs)
else:
from ..sandbox._sandboxbwrap import SandboxBwrap
# Inform the bubblewrap sandbox as to whether it can use user namespaces or not
kwargs['user_ns_available'] = self._user_ns_available
kwargs['die_with_parent_available'] = self._die_with_parent_available
return SandboxBwrap(*args, **kwargs)
def create_artifact_cache(self, context, *, enable_push):
return super().create_artifact_cache(context=context, enable_push=self._user_ns_available)
################################################
# Private Methods #
################################################
def _check_user_ns_available(self, context):
def _local_sandbox_available(self):
try:
return os.path.exists(utils.get_host_tool('bwrap')) and os.path.exists('/dev/fuse')
except utils.ProgramNotFoundError:
return False
def _check_user_ns_available(self, context):
# Here, lets check if bwrap is able to create user namespaces,
# issue a warning if it's not available, and save the state
# locally so that we can inform the sandbox to not try it
......
......@@ -19,8 +19,10 @@
import os
import sys
import resource
from .._exceptions import PlatformError, ImplError
from .._artifactcache.cascache import CASCache
class Platform():
......@@ -37,22 +39,28 @@ class Platform():
#
def __init__(self, context):
self.context = context
self.set_resource_limits()
self._artifact_cache = self.create_artifact_cache(context, enable_push=True)
@classmethod
def create_instance(cls, *args, **kwargs):
if sys.platform.startswith('linux'):
backend = 'linux'
else:
backend = 'unix'
# Meant for testing purposes and therefore hidden in the
# deepest corners of the source code. Try not to abuse this,
# please?
if os.getenv('BST_FORCE_BACKEND'):
backend = os.getenv('BST_FORCE_BACKEND')
elif sys.platform.startswith('linux'):
backend = 'linux'
elif sys.platform.startswith('darwin'):
backend = 'darwin'
else:
backend = 'unix'
if backend == 'linux':
from .linux import Linux as PlatformImpl
elif backend == 'darwin':
from .darwin import Darwin as PlatformImpl
elif backend == 'unix':
from .unix import Unix as PlatformImpl
else:
......@@ -66,6 +74,9 @@ class Platform():
raise PlatformError("Platform needs to be initialized first")
return cls._instance
def get_cpu_count(self, cap=None):
return min(len(os.sched_getaffinity(0)), cap)
##################################################################
# Platform properties #
##################################################################
......@@ -92,3 +103,18 @@ class Platform():
def create_sandbox(self, *args, **kwargs):
raise ImplError("Platform {platform} does not implement create_sandbox()"
.format(platform=type(self).__name__))
def set_resource_limits(self, soft_limit=None, hard_limit=None):
# Need to set resources for _frontend/app.py as this is dependent on the platform
# SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
# Avoid hitting the limit too quickly.
limits = resource.getrlimit(resource.RLIMIT_NOFILE)
if limits[0] != limits[1]:
if soft_limit is None:
soft_limit = limits[1]
if hard_limit is None:
hard_limit = limits[1]
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
def create_artifact_cache(self, context, *, enable_push=True):
return CASCache(context=context, enable_push=enable_push)
......@@ -21,7 +21,6 @@ import os
from .._artifactcache.cascache import CASCache
from .._exceptions import PlatformError
from ..sandbox import SandboxChroot
from . import Platform
......@@ -31,7 +30,6 @@ class Unix(Platform):
def __init__(self, context):
super().__init__(context)
self._artifact_cache = CASCache(context)
# Not necessarily 100% reliable, but we want to fail early.
if os.geteuid() != 0:
......@@ -42,4 +40,5 @@ class Unix(Platform):
return self._artifact_cache
def create_sandbox(self, *args, **kwargs):
from ..sandbox._sandboxchroot import SandboxChroot
return SandboxChroot(*args, **kwargs)
......@@ -38,6 +38,7 @@ from ._loader import Loader
from .element import Element
from ._message import Message, MessageType
from ._includes import Includes
from ._platform import Platform
# Project Configuration file
......@@ -617,7 +618,8 @@ class Project():
# Based on some testing (mainly on AWS), maximum effective
# max-jobs value seems to be around 8-10 if we have enough cores
# users should set values based on workload and build infrastructure
output.base_variables['max-jobs'] = str(min(len(os.sched_getaffinity(0)), 8))
platform = Platform.get_platform()
output.base_variables['max-jobs'] = str(platform.get_cpu_count(8))
# Export options into variables, if that was requested
output.options.export_variables(output.base_variables)
......
......@@ -31,7 +31,7 @@ import multiprocessing
import psutil
# BuildStream toplevel imports
from ..._exceptions import ImplError, BstError, set_last_task_error
from ..._exceptions import ImplError, BstError, set_last_task_error, SkipJob
from ..._message import Message, MessageType, unconditional_messages
from ... import _signals, utils
......@@ -40,6 +40,7 @@ from ... import _signals, utils
RC_OK = 0
RC_FAIL = 1
RC_PERM_FAIL = 2
RC_SKIPPED = 3
# Used to distinguish between status messages and return values
......@@ -117,7 +118,7 @@ class Job():
self._max_retries = max_retries # Maximum number of automatic retries
self._result = None # Return value of child action in the parent
self._tries = 0 # Try count, for retryable jobs
self._skipped_flag = False # Indicate whether the job was skipped.
# If False, a retry will not be attempted regardless of whether _tries is less than _max_retries.
#
self._retry_flag = True
......@@ -277,6 +278,14 @@ class Job():
def set_task_id(self, task_id):
self._task_id = task_id
# skipped
#
# Returns:
# bool: True if the job was skipped while processing.
@property
def skipped(self):
return self._skipped_flag
#######################################################
# Abstract Methods #
#######################################################
......@@ -398,6 +407,13 @@ class Job():
try:
# Try the task action
result = self.child_process()
except SkipJob as e:
elapsed = datetime.datetime.now() - starttime
self.message(MessageType.SKIPPED, str(e),
elapsed=elapsed, logfile=filename)
# Alert parent of skip by return code
self._child_shutdown(RC_SKIPPED)
except BstError as e:
elapsed = datetime.datetime.now() - starttime
self._retry_flag = e.temporary
......@@ -545,14 +561,18 @@ class Job():
# We don't want to retry if we got OK or a permanent fail.
# This is set in _child_action but must also be set for the parent.
#
self._retry_flag = returncode not in (RC_OK, RC_PERM_FAIL)
self._retry_flag = returncode == RC_FAIL
# Set the flag to alert Queue that this job skipped.
self._skipped_flag = returncode == RC_SKIPPED
if self._retry_flag and (self._tries <= self._max_retries) and not self._scheduler.terminated:
self.spawn()
return
self.parent_complete(returncode == RC_OK, self._result)
self._scheduler.job_completed(self, returncode == RC_OK)
success = returncode in (RC_OK, RC_SKIPPED)
self.parent_complete(success, self._result)
self._scheduler.job_completed(self, success)
# Force the deletion of the queue and process objects to try and clean up FDs
self._queue = self._process = None
......
......@@ -47,6 +47,7 @@ class BuildQueue(Queue):
to_queue.append(element)
continue
# XXX: Fix this, See https://mail.gnome.org/archives/buildstream-list/2018-September/msg00029.html
# Bypass queue processing entirely the first time it's tried.
self._tried.add(element)
_, description, detail = element._get_build_result()
......@@ -113,5 +114,3 @@ class BuildQueue(Queue):
# This has to be done after _assemble_done, such that the
# element may register its cache key as required
self._check_cache_size(job, element, result)
return True
......@@ -72,11 +72,9 @@ class FetchQueue(Queue):
def done(self, _, element, result, success):
if not success:
return False
return
element._update_state()
# Successful fetch, we must be CACHED now
assert element._get_consistency() == Consistency.CACHED
return True
......@@ -21,6 +21,7 @@
# Local imports
from . import Queue, QueueStatus
from ..resources import ResourceType
from ..._exceptions import SkipJob
# A queue which pulls element artifacts
......@@ -33,7 +34,8 @@ class PullQueue(Queue):
def process(self, element):
# returns whether an artifact was downloaded or not
return element._pull()
if not element._pull():
raise SkipJob(self.action_name)
def status(self, element):
# state of dependencies may have changed, recalculate element state
......@@ -63,7 +65,3 @@ class PullQueue(Queue):
# do not get an artifact size from pull jobs, we have to
# actually check the cache size.
self._scheduler.check_cache_size()
# Element._pull() returns True if it downloaded an artifact,
# here we want to appear skipped if we did not download.
return result
......@@ -21,6 +21,7 @@
# Local imports
from . import Queue, QueueStatus
from ..resources import ResourceType
from ..._exceptions import SkipJob
# A queue which pushes element artifacts
......@@ -33,20 +34,11 @@ class PushQueue(Queue):
def process(self, element):
# returns whether an artifact was uploaded or not
return element._push()
if not element._push():
raise SkipJob(self.action_name)
def status(self, element):
if element._skip_push():
return QueueStatus.SKIP
return QueueStatus.READY
def done(self, _, element, result, success):
if not success:
return False
# Element._push() returns True if it uploaded an artifact,
# here we want to appear skipped if the remote already had
# the artifact.
return result
......@@ -136,10 +136,6 @@ class Queue():
# success (bool): True if the process() implementation did not
# raise any exception
#
# Returns:
# (bool): True if the element should appear to be processsed,
# Otherwise False will count the element as "skipped"
#
def done(self, job, element, result, success):
pass
......@@ -306,8 +302,7 @@ class Queue():
# and determine if it should be considered as processed
# or skipped.
try:
processed = self.done(job, element, result, success)
self.done(job, element, result, success)
except BstError as e:
# Report error and mark as failed
......@@ -337,7 +332,7 @@ class Queue():
self._done_queue.append(job)
if success:
if processed:
if not job.skipped:
self.processed_elements.append(element)
else:
self.skipped_elements.append(element)
......
......@@ -51,18 +51,11 @@ class TrackQueue(Queue):
def done(self, _, element, result, success):
if not success:
return False
changed = False
return
# Set the new refs in the main process one by one as they complete
for unique_id, new_ref in result:
source = _plugin_lookup(unique_id)
# We appear processed if at least one source has changed
if source._save_ref(new_ref):
changed = True
source._save_ref(new_ref)
element._tracking_done()
# We'll appear as a skipped element if tracking resulted in no change
return changed
......@@ -703,6 +703,7 @@ class Stream():
# Create a temporary directory to build the source tree in
builddir = self._context.builddir
os.makedirs(builddir, exist_ok=True)
prefix = "{}-".format(target.normal_name)
with TemporaryDirectory(prefix=prefix, dir=builddir) as tempdir:
......@@ -1085,6 +1086,7 @@ class Stream():
for element in elements:
source_dir = os.path.join(directory, "source")
element_source_dir = os.path.join(source_dir, element.normal_name)
os.makedirs(element_source_dir)
element._stage_sources_at(element_source_dir)
......
......@@ -1760,8 +1760,6 @@ class Element(Plugin):
return False
# Notify successfull download
display_key = self._get_brief_display_key()
self.info("Downloaded artifact {}".format(display_key))
return True
# _skip_push():
......@@ -1800,16 +1798,13 @@ class Element(Plugin):
self.warn("Not pushing tainted artifact.")
return False
display_key = self._get_brief_display_key()
with self.timed_activity("Pushing artifact {}".format(display_key)):
# Push all keys used for local commit
pushed = self.__artifacts.push(self, self.__get_cache_keys_for_commit())
if not pushed:
return False
# Push all keys used for local commit
pushed = self.__artifacts.push(self, self.__get_cache_keys_for_commit())
if not pushed:
return False
# Notify successful upload
self.info("Pushed artifact {}".format(display_key))
return True
# Notify successful upload
return True
# _shell():
#
......@@ -2142,14 +2137,11 @@ class Element(Plugin):
project = self._get_project()
platform = Platform.get_platform()
if self.__remote_execution_url and self.BST_VIRTUAL_DIRECTORY:
if not self.__artifacts.has_push_remotes(element=self):
# Give an early warning if remote execution will not work
raise ElementError("Artifact {} is configured to use remote execution but has no push remotes. "
.format(self.name) +
"The remote artifact server(s) may not be correctly configured or contactable.")
if (directory is not None and
self.__remote_execution_url and
self.BST_VIRTUAL_DIRECTORY):
self.info("Using a remote sandbox for artifact {}".format(self.name))
self.info("Using a remote sandbox for artifact {} with directory '{}'".format(self.name, directory))
sandbox = SandboxRemote(context, project,
directory,
......
......@@ -18,6 +18,5 @@
# Tristan Maat <tristan.maat@codethink.co.uk>
from .sandbox import Sandbox, SandboxFlags
from ._sandboxchroot import SandboxChroot
from ._sandboxbwrap import SandboxBwrap
from ._sandboxremote import SandboxRemote
from ._sandboxdummy import SandboxDummy