Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (44)
Showing
with 665 additions and 125 deletions
......@@ -79,6 +79,8 @@ source_dist:
- cd ../..
- mkdir -p coverage-linux/
- cp dist/buildstream/.coverage.* coverage-linux/coverage."${CI_JOB_NAME}"
except:
- schedules
artifacts:
paths:
- coverage-linux/
......@@ -127,6 +129,8 @@ tests-unix:
- cd ../..
- mkdir -p coverage-unix/
- cp dist/buildstream/.coverage.* coverage-unix/coverage.unix
except:
- schedules
artifacts:
paths:
- coverage-unix/
......@@ -148,10 +152,41 @@ docs:
- make BST_FORCE_SESSION_REBUILD=1 -C doc
- cd ../..
- mv dist/buildstream/doc/build/html public
except:
- schedules
artifacts:
paths:
- public/
.overnight-tests: &overnight-tests-template
stage: test
variables:
bst_ext_url: git+https://gitlab.com/BuildStream/bst-external.git
bst_ext_ref: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
fd_sdk_ref: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.12
before_script:
- (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
- pip3 install --user -e ${bst_ext_url}@${bst_ext_ref}#egg=bst_ext
- git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
- git -C freedesktop-sdk checkout ${fd_sdk_ref}
only:
- schedules
overnight-tests:
<<: *overnight-tests-template
script:
- make -C freedesktop-sdk
tags:
- overnight-tests
overnight-tests-no-cache:
<<: *overnight-tests-template
script:
- sed -i '/artifacts:/,+1 d' freedesktop-sdk/bootstrap/project.conf
- sed -i '/artifacts:/,+1 d' freedesktop-sdk/project.conf
- make -C freedesktop-sdk
tags:
- overnight-tests
# Check code quality with gitlab's built-in feature.
#
......@@ -170,6 +205,8 @@ code_quality:
--volume "$PWD":/code
--volume /var/run/docker.sock:/var/run/docker.sock
"registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
except:
- schedules
artifacts:
paths: [gl-code-quality-report.json]
......@@ -199,6 +236,8 @@ analysis:
radon raw -s -j buildstream > analysis/raw.json
radon raw -s buildstream
except:
- schedules
artifacts:
paths:
- analysis/
......@@ -224,6 +263,8 @@ coverage:
- tests-fedora-28
- tests-unix
- source_dist
except:
- schedules
# Deploy, only for merges which land on master branch.
#
......@@ -232,8 +273,14 @@ pages:
dependencies:
- source_dist
- docs
variables:
ACME_DIR: public/.well-known/acme-challenge
script:
- find public/
- mkdir -p ${ACME_DIR}
# Required to finish the creation of the Let's Encrypt certificate,
# which allows using https://docs.buildstream.build/ for accessing
# the documentation.
- echo ${ACME_CHALLENGE} > ${ACME_DIR}/$(echo ${ACME_CHALLENGE} | cut -c1-43)
artifacts:
paths:
- public/
......@@ -248,3 +295,5 @@ pages:
# See https://gitlab.com/gitlab-org/gitlab-ce/issues/35141
#
- master
except:
- schedules
......@@ -383,6 +383,13 @@ class ArtifactCache():
# Abstract methods for subclasses to implement #
################################################
# preflight():
#
# Preflight check.
#
def preflight(self):
pass
# update_atime()
#
# Update the atime of an artifact.
......
......@@ -44,12 +44,16 @@ from .._exceptions import ArtifactError
from . import ArtifactCache
# The default limit for gRPC messages is 4 MiB.
# Limit payload to 1 MiB to leave sufficient headroom for metadata.
_MAX_PAYLOAD_BYTES = 1024 * 1024
# A CASCache manages artifacts in a CAS repository as specified in the
# Remote Execution API.
#
# Args:
# context (Context): The BuildStream context
# enable_push (bool): Whether pushing is allowed by the platform
#
# Pushing is explicitly disabled by the platform in some cases,
# like when we are falling back to functioning without using
......@@ -57,7 +61,7 @@ from . import ArtifactCache
#
class CASCache(ArtifactCache):
def __init__(self, context, *, enable_push=True):
def __init__(self, context):
super().__init__(context)
self.casdir = os.path.join(context.artifactdir, 'cas')
......@@ -66,8 +70,6 @@ class CASCache(ArtifactCache):
self._calculate_cache_quota()
self._enable_push = enable_push
# Per-project list of _CASRemote instances.
self._remotes = {}
......@@ -78,6 +80,12 @@ class CASCache(ArtifactCache):
# Implementation of abstract methods #
################################################
def preflight(self):
if (not os.path.isdir(os.path.join(self.casdir, 'refs', 'heads')) or
not os.path.isdir(os.path.join(self.casdir, 'objects'))):
raise ArtifactError("CAS repository check failed for '{}'"
.format(self.casdir))
def contains(self, element, key):
refpath = self._refpath(self.get_artifact_fullname(element, key))
......@@ -209,7 +217,7 @@ class CASCache(ArtifactCache):
return bool(remotes_for_project)
def has_push_remotes(self, *, element=None):
if not self._has_push_remotes or not self._enable_push:
if not self._has_push_remotes:
# No project has push remotes
return False
elif element is None:
......@@ -854,6 +862,80 @@ class CASCache(ArtifactCache):
assert digest.size_bytes == os.fstat(stream.fileno()).st_size
# _ensure_blob():
#
# Fetch and add blob if it's not already local.
#
# Args:
# remote (Remote): The remote to use.
# digest (Digest): Digest object for the blob to fetch.
#
# Returns:
# (str): The path of the object
#
def _ensure_blob(self, remote, digest):
objpath = self.objpath(digest)
if os.path.exists(objpath):
# already in local repository
return objpath
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
self._fetch_blob(remote, digest, f)
added_digest = self.add_object(path=f.name)
assert added_digest.hash == digest.hash
return objpath
def _batch_download_complete(self, batch):
for digest, data in batch.send():
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
f.write(data)
f.flush()
added_digest = self.add_object(path=f.name)
assert added_digest.hash == digest.hash
# Helper function for _fetch_directory().
def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
self._batch_download_complete(batch)
# All previously scheduled directories are now locally available,
# move them to the processing queue.
fetch_queue.extend(fetch_next_queue)
fetch_next_queue.clear()
return _CASBatchRead(remote)
# Helper function for _fetch_directory().
def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
in_local_cache = os.path.exists(self.objpath(digest))
if in_local_cache:
# Skip download, already in local cache.
pass
elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
not remote.batch_read_supported):
# Too large for batch request, download in independent request.
self._ensure_blob(remote, digest)
in_local_cache = True
else:
if not batch.add(digest):
# Not enough space left in batch request.
# Complete pending batch first.
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
batch.add(digest)
if recursive:
if in_local_cache:
# Add directory to processing queue.
fetch_queue.append(digest)
else:
# Directory will be available after completing pending batch.
# Add directory to deferred processing queue.
fetch_next_queue.append(digest)
return batch
# _fetch_directory():
#
# Fetches remote directory and adds it to content addressable store.
......@@ -867,39 +949,32 @@ class CASCache(ArtifactCache):
# dir_digest (Digest): Digest object for the directory to fetch.
#
def _fetch_directory(self, remote, dir_digest):
objpath = self.objpath(dir_digest)
if os.path.exists(objpath):
# already in local cache
return
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
self._fetch_blob(remote, dir_digest, out)
directory = remote_execution_pb2.Directory()
fetch_queue = [dir_digest]
fetch_next_queue = []
batch = _CASBatchRead(remote)
with open(out.name, 'rb') as f:
directory.ParseFromString(f.read())
while len(fetch_queue) + len(fetch_next_queue) > 0:
if len(fetch_queue) == 0:
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
for filenode in directory.files:
fileobjpath = self.objpath(filenode.digest)
if os.path.exists(fileobjpath):
# already in local cache
continue
dir_digest = fetch_queue.pop(0)
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
self._fetch_blob(remote, filenode.digest, f)
objpath = self._ensure_blob(remote, dir_digest)
digest = self.add_object(path=f.name)
assert digest.hash == filenode.digest.hash
directory = remote_execution_pb2.Directory()
with open(objpath, 'rb') as f:
directory.ParseFromString(f.read())
for dirnode in directory.directories:
self._fetch_directory(remote, dirnode.digest)
batch = self._fetch_directory_node(remote, dirnode.digest, batch,
fetch_queue, fetch_next_queue, recursive=True)
for filenode in directory.files:
batch = self._fetch_directory_node(remote, filenode.digest, batch,
fetch_queue, fetch_next_queue)
# Place directory blob only in final location when we've
# downloaded all referenced blobs to avoid dangling
# references in the repository.
digest = self.add_object(path=out.name)
assert digest.hash == dir_digest.hash
# Fetch final batch
self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
def _fetch_tree(self, remote, digest):
# download but do not store the Tree object
......@@ -914,16 +989,7 @@ class CASCache(ArtifactCache):
tree.children.extend([tree.root])
for directory in tree.children:
for filenode in directory.files:
fileobjpath = self.objpath(filenode.digest)
if os.path.exists(fileobjpath):
# already in local cache
continue
with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
self._fetch_blob(remote, filenode.digest, f)
added_digest = self.add_object(path=f.name)
assert added_digest.hash == filenode.digest.hash
self._ensure_blob(remote, filenode.digest)
# place directory blob only in final location when we've downloaded
# all referenced blobs to avoid dangling references in the repository
......@@ -942,12 +1008,12 @@ class CASCache(ArtifactCache):
finished = False
remaining = digest.size_bytes
while not finished:
chunk_size = min(remaining, 64 * 1024)
chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
remaining -= chunk_size
request = bytestream_pb2.WriteRequest()
request.write_offset = offset
# max. 64 kB chunks
# max. _MAX_PAYLOAD_BYTES chunks
request.data = instream.read(chunk_size)
request.resource_name = resname
request.finish_write = remaining <= 0
......@@ -1035,11 +1101,78 @@ class _CASRemote():
self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
try:
request = remote_execution_pb2.GetCapabilitiesRequest()
response = self.capabilities.GetCapabilities(request)
server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
except grpc.RpcError as e:
# Simply use the defaults for servers that don't implement GetCapabilities()
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
# Check whether the server supports BatchReadBlobs()
self.batch_read_supported = False
try:
request = remote_execution_pb2.BatchReadBlobsRequest()
response = self.cas.BatchReadBlobs(request)
self.batch_read_supported = True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
self._initialized = True
# Represents a batch of blobs queued for fetching.
#
class _CASBatchRead():
def __init__(self, remote):
self._remote = remote
self._max_total_size_bytes = remote.max_batch_total_size_bytes
self._request = remote_execution_pb2.BatchReadBlobsRequest()
self._size = 0
self._sent = False
def add(self, digest):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
if new_batch_size > self._max_total_size_bytes:
# Not enough space left in current batch
return False
request_digest = self._request.digests.add()
request_digest.hash = digest.hash
request_digest.size_bytes = digest.size_bytes
self._size = new_batch_size
return True
def send(self):
assert not self._sent
self._sent = True
if len(self._request.digests) == 0:
return
batch_response = self._remote.cas.BatchReadBlobs(self._request)
for response in batch_response.responses:
if response.status.code != grpc.StatusCode.OK.value[0]:
raise ArtifactError("Failed to download blob {}: {}".format(
response.digest.hash, response.status.code))
if response.digest.size_bytes != len(response.data):
raise ArtifactError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
response.digest.hash, response.digest.size_bytes, len(response.data)))
yield (response.digest, response.data)
def _grouper(iterable, n):
while True:
try:
......
......@@ -35,11 +35,10 @@ from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .._exceptions import ArtifactError
from .._context import Context
from .cascache import CASCache
# The default limit for gRPC messages is 4 MiB
_MAX_BATCH_TOTAL_SIZE_BYTES = 4 * 1024 * 1024
# The default limit for gRPC messages is 4 MiB.
# Limit payload to 1 MiB to leave sufficient headroom for metadata.
_MAX_PAYLOAD_BYTES = 1024 * 1024
# Trying to push an artifact that is too large
......@@ -59,7 +58,7 @@ def create_server(repo, *, enable_push):
context = Context()
context.artifactdir = os.path.abspath(repo)
artifactcache = CASCache(context)
artifactcache = context.artifactcache
# Use max_workers default from Python 3.5+
max_workers = (os.cpu_count() or 1) * 5
......@@ -158,7 +157,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
remaining = client_digest.size_bytes - request.read_offset
while remaining > 0:
chunk_size = min(remaining, 64 * 1024)
chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
remaining -= chunk_size
response = bytestream_pb2.ReadResponse()
......@@ -242,7 +241,7 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
for digest in request.digests:
batch_size += digest.size_bytes
if batch_size > _MAX_BATCH_TOTAL_SIZE_BYTES:
if batch_size > _MAX_PAYLOAD_BYTES:
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
return response
......@@ -269,7 +268,7 @@ class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
cache_capabilities = response.cache_capabilities
cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
cache_capabilities.action_cache_update_capabilities.update_enabled = False
cache_capabilities.max_batch_total_size_bytes = _MAX_BATCH_TOTAL_SIZE_BYTES
cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
response.deprecated_api_version.major = 2
......
......@@ -30,6 +30,7 @@ from ._exceptions import LoadError, LoadErrorReason, BstError
from ._message import Message, MessageType
from ._profile import Topics, profile_start, profile_end
from ._artifactcache import ArtifactCache
from ._artifactcache.cascache import CASCache
from ._workspaces import Workspaces
from .plugin import _plugin_lookup
......@@ -113,6 +114,7 @@ class Context():
self._cache_key = None
self._message_handler = None
self._message_depth = deque()
self._artifactcache = None
self._projects = []
self._project_overrides = {}
self._workspaces = None
......@@ -227,6 +229,13 @@ class Context():
"{}: on-error should be one of: {}".format(
provenance, ", ".join(valid_actions)))
@property
def artifactcache(self):
if not self._artifactcache:
self._artifactcache = CASCache(self)
return self._artifactcache
# add_project():
#
# Add a project to the context.
......
......@@ -198,10 +198,15 @@ class App():
if option_value is not None:
setattr(self.context, context_attr, option_value)
try:
Platform.create_instance(self.context)
Platform.get_platform()
except BstError as e:
self._error_exit(e, "Error instantiating platform")
try:
self.context.artifactcache.preflight()
except BstError as e:
self._error_exit(e, "Error instantiating artifact cache")
# Create the logger right before setting the message handler
self.logger = LogLine(self.context,
self._content_profile,
......
......@@ -28,7 +28,6 @@ from .. import Consistency
from .. import _yaml
from ..element import Element
from .._profile import Topics, profile_start, profile_end
from .._platform import Platform
from .._includes import Includes
from .types import Symbol, Dependency
......@@ -518,8 +517,7 @@ class Loader():
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Expected junction but element kind is {}".format(filename, meta_element.kind))
platform = Platform.get_platform()
element = Element._new_from_meta(meta_element, platform.artifactcache)
element = Element._new_from_meta(meta_element, self._context.artifactcache)
element._preflight()
sources = list(element.sources())
......
......@@ -17,11 +17,11 @@
# Authors:
# Tristan Maat <tristan.maat@codethink.co.uk>
import os
import subprocess
from .. import _site
from .. import utils
from .._artifactcache.cascache import CASCache
from .._message import Message, MessageType
from ..sandbox import SandboxBwrap
......@@ -30,17 +30,15 @@ from . import Platform
class Linux(Platform):
def __init__(self, context):
def __init__(self):
super().__init__(context)
super().__init__()
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
self._user_ns_available = self._check_user_ns_available(context)
self._artifact_cache = CASCache(context, enable_push=self._user_ns_available)
self._uid = os.geteuid()
self._gid = os.getegid()
@property
def artifactcache(self):
return self._artifact_cache
self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
self._user_ns_available = self._check_user_ns_available()
def create_sandbox(self, *args, **kwargs):
# Inform the bubblewrap sandbox as to whether it can use user namespaces or not
......@@ -48,10 +46,19 @@ class Linux(Platform):
kwargs['die_with_parent_available'] = self._die_with_parent_available
return SandboxBwrap(*args, **kwargs)
def check_sandbox_config(self, config):
if self._user_ns_available:
# User namespace support allows arbitrary build UID/GID settings.
return True
else:
# Without user namespace support, the UID/GID in the sandbox
# will match the host UID/GID.
return config.build_uid == self._uid and config.build_gid == self._gid
################################################
# Private Methods #
################################################
def _check_user_ns_available(self, context):
def _check_user_ns_available(self):
# Here, lets check if bwrap is able to create user namespaces,
# issue a warning if it's not available, and save the state
......@@ -75,9 +82,4 @@ class Linux(Platform):
return True
else:
context.message(
Message(None, MessageType.WARN,
"Unable to create user namespaces with bubblewrap, resorting to fallback",
detail="Some builds may not function due to lack of uid / gid 0, " +
"artifacts created will not be trusted for push purposes."))
return False
......@@ -29,17 +29,13 @@ class Platform():
# Platform()
#
# A class to manage platform-specific details. Currently holds the
# sandbox factory, the artifact cache and staging operations, as
# well as platform helpers.
# sandbox factory as well as platform helpers.
#
# Args:
# context (context): The project context
#
def __init__(self, context):
self.context = context
def __init__(self):
pass
@classmethod
def create_instance(cls, *args, **kwargs):
def _create_instance(cls):
if sys.platform.startswith('linux'):
backend = 'linux'
else:
......@@ -58,22 +54,14 @@ class Platform():
else:
raise PlatformError("No such platform: '{}'".format(backend))
cls._instance = PlatformImpl(*args, **kwargs)
cls._instance = PlatformImpl()
@classmethod
def get_platform(cls):
if not cls._instance:
raise PlatformError("Platform needs to be initialized first")
cls._create_instance()
return cls._instance
##################################################################
# Platform properties #
##################################################################
@property
def artifactcache(self):
raise ImplError("Platform {platform} does not implement an artifactcache"
.format(platform=type(self).__name__))
##################################################################
# Sandbox functions #
##################################################################
......@@ -92,3 +80,7 @@ class Platform():
def create_sandbox(self, *args, **kwargs):
raise ImplError("Platform {platform} does not implement create_sandbox()"
.format(platform=type(self).__name__))
def check_sandbox_config(self, config):
raise ImplError("Platform {platform} does not implement check_sandbox_config()"
.format(platform=type(self).__name__))
......@@ -19,7 +19,6 @@
import os
from .._artifactcache.cascache import CASCache
from .._exceptions import PlatformError
from ..sandbox import SandboxChroot
......@@ -28,18 +27,21 @@ from . import Platform
class Unix(Platform):
def __init__(self, context):
def __init__(self):
super().__init__(context)
self._artifact_cache = CASCache(context)
super().__init__()
self._uid = os.geteuid()
self._gid = os.getegid()
# Not necessarily 100% reliable, but we want to fail early.
if os.geteuid() != 0:
if self._uid != 0:
raise PlatformError("Root privileges are required to run without bubblewrap.")
@property
def artifactcache(self):
return self._artifact_cache
def create_sandbox(self, *args, **kwargs):
return SandboxChroot(*args, **kwargs)
def check_sandbox_config(self, config):
# With the chroot sandbox, the UID/GID in the sandbox
# will match the host UID/GID (typically 0/0).
return config.build_uid == self._uid and config.build_gid == self._gid
// Copyright 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package google.rpc;
option go_package = "google.golang.org/genproto/googleapis/rpc/code;code";
option java_multiple_files = true;
option java_outer_classname = "CodeProto";
option java_package = "com.google.rpc";
option objc_class_prefix = "RPC";
// The canonical error codes for Google APIs.
//
//
// Sometimes multiple error codes may apply. Services should return
// the most specific error code that applies. For example, prefer
// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
enum Code {
// Not an error; returned on success
//
// HTTP Mapping: 200 OK
OK = 0;
// The operation was cancelled, typically by the caller.
//
// HTTP Mapping: 499 Client Closed Request
CANCELLED = 1;
// Unknown error. For example, this error may be returned when
// a `Status` value received from another address space belongs to
// an error space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
//
// HTTP Mapping: 500 Internal Server Error
UNKNOWN = 2;
// The client specified an invalid argument. Note that this differs
// from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
//
// HTTP Mapping: 400 Bad Request
INVALID_ARGUMENT = 3;
// The deadline expired before the operation could complete. For operations
// that change the state of the system, this error may be returned
// even if the operation has completed successfully. For example, a
// successful response from a server could have been delayed long
// enough for the deadline to expire.
//
// HTTP Mapping: 504 Gateway Timeout
DEADLINE_EXCEEDED = 4;
// Some requested entity (e.g., file or directory) was not found.
//
// Note to server developers: if a request is denied for an entire class
// of users, such as gradual feature rollout or undocumented whitelist,
// `NOT_FOUND` may be used. If a request is denied for some users within
// a class of users, such as user-based access control, `PERMISSION_DENIED`
// must be used.
//
// HTTP Mapping: 404 Not Found
NOT_FOUND = 5;
// The entity that a client attempted to create (e.g., file or directory)
// already exists.
//
// HTTP Mapping: 409 Conflict
ALREADY_EXISTS = 6;
// The caller does not have permission to execute the specified
// operation. `PERMISSION_DENIED` must not be used for rejections
// caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
// instead for those errors). `PERMISSION_DENIED` must not be
// used if the caller can not be identified (use `UNAUTHENTICATED`
// instead for those errors). This error code does not imply the
// request is valid or the requested entity exists or satisfies
// other pre-conditions.
//
// HTTP Mapping: 403 Forbidden
PERMISSION_DENIED = 7;
// The request does not have valid authentication credentials for the
// operation.
//
// HTTP Mapping: 401 Unauthorized
UNAUTHENTICATED = 16;
// Some resource has been exhausted, perhaps a per-user quota, or
// perhaps the entire file system is out of space.
//
// HTTP Mapping: 429 Too Many Requests
RESOURCE_EXHAUSTED = 8;
// The operation was rejected because the system is not in a state
// required for the operation's execution. For example, the directory
// to be deleted is non-empty, an rmdir operation is applied to
// a non-directory, etc.
//
// Service implementors can use the following guidelines to decide
// between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
// (a) Use `UNAVAILABLE` if the client can retry just the failing call.
// (b) Use `ABORTED` if the client should retry at a higher level
// (e.g., when a client-specified test-and-set fails, indicating the
// client should restart a read-modify-write sequence).
// (c) Use `FAILED_PRECONDITION` if the client should not retry until
// the system state has been explicitly fixed. E.g., if an "rmdir"
// fails because the directory is non-empty, `FAILED_PRECONDITION`
// should be returned since the client should not retry unless
// the files are deleted from the directory.
//
// HTTP Mapping: 400 Bad Request
FAILED_PRECONDITION = 9;
// The operation was aborted, typically due to a concurrency issue such as
// a sequencer check failure or transaction abort.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.
//
// HTTP Mapping: 409 Conflict
ABORTED = 10;
// The operation was attempted past the valid range. E.g., seeking or
// reading past end-of-file.
//
// Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
// be fixed if the system state changes. For example, a 32-bit file
// system will generate `INVALID_ARGUMENT` if asked to read at an
// offset that is not in the range [0,2^32-1], but it will generate
// `OUT_OF_RANGE` if asked to read from an offset past the current
// file size.
//
// There is a fair bit of overlap between `FAILED_PRECONDITION` and
// `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
// error) when it applies so that callers who are iterating through
// a space can easily look for an `OUT_OF_RANGE` error to detect when
// they are done.
//
// HTTP Mapping: 400 Bad Request
OUT_OF_RANGE = 11;
// The operation is not implemented or is not supported/enabled in this
// service.
//
// HTTP Mapping: 501 Not Implemented
UNIMPLEMENTED = 12;
// Internal errors. This means that some invariants expected by the
// underlying system have been broken. This error code is reserved
// for serious errors.
//
// HTTP Mapping: 500 Internal Server Error
INTERNAL = 13;
// The service is currently unavailable. This is most likely a
// transient condition, which can be corrected by retrying with
// a backoff.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.
//
// HTTP Mapping: 503 Service Unavailable
UNAVAILABLE = 14;
// Unrecoverable data loss or corruption.
//
// HTTP Mapping: 500 Internal Server Error
DATA_LOSS = 15;
}
\ No newline at end of file
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/rpc/code.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/rpc/code.proto',
package='google.rpc',
syntax='proto3',
serialized_options=_b('\n\016com.google.rpcB\tCodeProtoP\001Z3google.golang.org/genproto/googleapis/rpc/code;code\242\002\003RPC'),
serialized_pb=_b('\n\x15google/rpc/code.proto\x12\ngoogle.rpc*\xb7\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\r\n\tCANCELLED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f\x42X\n\x0e\x63om.google.rpcB\tCodeProtoP\x01Z3google.golang.org/genproto/googleapis/rpc/code;code\xa2\x02\x03RPCb\x06proto3')
)
_CODE = _descriptor.EnumDescriptor(
name='Code',
full_name='google.rpc.Code',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='CANCELLED', index=1, number=1,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=2, number=2,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INVALID_ARGUMENT', index=3, number=3,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DEADLINE_EXCEEDED', index=4, number=4,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NOT_FOUND', index=5, number=5,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALREADY_EXISTS', index=6, number=6,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PERMISSION_DENIED', index=7, number=7,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNAUTHENTICATED', index=8, number=16,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESOURCE_EXHAUSTED', index=9, number=8,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FAILED_PRECONDITION', index=10, number=9,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ABORTED', index=11, number=10,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OUT_OF_RANGE', index=12, number=11,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNIMPLEMENTED', index=13, number=12,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTERNAL', index=14, number=13,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNAVAILABLE', index=15, number=14,
serialized_options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DATA_LOSS', index=16, number=15,
serialized_options=None,
type=None),
],
containing_type=None,
serialized_options=None,
serialized_start=38,
serialized_end=349,
)
_sym_db.RegisterEnumDescriptor(_CODE)
Code = enum_type_wrapper.EnumTypeWrapper(_CODE)
OK = 0
CANCELLED = 1
UNKNOWN = 2
INVALID_ARGUMENT = 3
DEADLINE_EXCEEDED = 4
NOT_FOUND = 5
ALREADY_EXISTS = 6
PERMISSION_DENIED = 7
UNAUTHENTICATED = 16
RESOURCE_EXHAUSTED = 8
FAILED_PRECONDITION = 9
ABORTED = 10
OUT_OF_RANGE = 11
UNIMPLEMENTED = 12
INTERNAL = 13
UNAVAILABLE = 14
DATA_LOSS = 15
DESCRIPTOR.enum_types_by_name['Code'] = _CODE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
......@@ -17,7 +17,6 @@
# Tristan Daniël Maat <tristan.maat@codethink.co.uk>
#
from .job import Job
from ..._platform import Platform
class CacheSizeJob(Job):
......@@ -25,8 +24,8 @@ class CacheSizeJob(Job):
super().__init__(*args, **kwargs)
self._complete_cb = complete_cb
platform = Platform.get_platform()
self._artifacts = platform.artifactcache
context = self._scheduler.context
self._artifacts = context.artifactcache
def child_process(self):
return self._artifacts.compute_cache_size()
......
......@@ -17,15 +17,14 @@
# Tristan Daniël Maat <tristan.maat@codethink.co.uk>
#
from .job import Job
from ..._platform import Platform
class CleanupJob(Job):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
platform = Platform.get_platform()
self._artifacts = platform.artifactcache
context = self._scheduler.context
self._artifacts = context.artifactcache
def child_process(self):
return self._artifacts.clean()
......
......@@ -24,7 +24,6 @@ from . import Queue, QueueStatus
from ..jobs import ElementJob
from ..resources import ResourceType
from ..._message import MessageType
from ..._platform import Platform
# A queue which assembles elements
......@@ -94,8 +93,8 @@ class BuildQueue(Queue):
# as returned from Element._assemble() to the estimated
# artifact cache size
#
platform = Platform.get_platform()
artifacts = platform.artifactcache
context = self._scheduler.context
artifacts = context.artifactcache
artifacts.add_artifact_size(artifact_size)
......
......@@ -29,7 +29,6 @@ from contextlib import contextmanager
# Local imports
from .resources import Resources, ResourceType
from .jobs import CacheSizeJob, CleanupJob
from .._platform import Platform
# A decent return code for Scheduler.run()
......@@ -348,8 +347,8 @@ class Scheduler():
# which will report the calculated cache size.
#
def _run_cleanup(self, cache_size):
platform = Platform.get_platform()
artifacts = platform.artifactcache
context = self.context
artifacts = context.artifactcache
if not artifacts.has_quota_exceeded():
return
......
......@@ -32,7 +32,6 @@ from ._exceptions import StreamError, ImplError, BstError, set_last_task_error
from ._message import Message, MessageType
from ._scheduler import Scheduler, SchedStatus, TrackQueue, FetchQueue, BuildQueue, PullQueue, PushQueue
from ._pipeline import Pipeline, PipelineSelection
from ._platform import Platform
from . import utils, _yaml, _site
from . import Scope, Consistency
......@@ -71,8 +70,7 @@ class Stream():
#
# Private members
#
self._platform = Platform.get_platform()
self._artifacts = self._platform.artifactcache
self._artifacts = context.artifactcache
self._context = context
self._project = project
self._pipeline = Pipeline(context, project, self._artifacts)
......
......@@ -246,15 +246,23 @@ class Element(Plugin):
self.__config = self.__extract_config(meta)
self._configure(self.__config)
# Extract Sandbox config
self.__sandbox_config = self.__extract_sandbox_config(meta)
# Extract remote execution URL
if not self.__is_junction:
self.__remote_execution_url = project.remote_execution_url
else:
self.__remote_execution_url = None
# Extract Sandbox config
self.__sandbox_config = self.__extract_sandbox_config(meta)
self.__sandbox_config_supported = True
if not self.__use_remote_execution():
platform = Platform.get_platform()
if not platform.check_sandbox_config(self.__sandbox_config):
# Local sandbox does not fully support specified sandbox config.
# This will taint the artifact, disable pushing.
self.__sandbox_config_supported = False
def __lt__(self, other):
return self.name < other.name
......@@ -1521,6 +1529,11 @@ class Element(Plugin):
context = self._get_context()
with self._output_file() as output_file:
if not self.__sandbox_config_supported:
self.warn("Sandbox configuration is not supported by the platform.",
detail="Falling back to UID {} GID {}. Artifact will not be pushed."
.format(self.__sandbox_config.build_uid, self.__sandbox_config.build_gid))
# Explicitly clean it up, keep the build dir around if exceptions are raised
os.makedirs(context.builddir, exist_ok=True)
rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
......@@ -1532,8 +1545,6 @@ class Element(Plugin):
with _signals.terminator(cleanup_rootdir), \
self.__sandbox(rootdir, output_file, output_file, self.__sandbox_config) as sandbox: # nopep8
sandbox_vroot = sandbox.get_virtual_directory()
# By default, the dynamic public data is the same as the static public data.
# The plugin's assemble() method may modify this, though.
self.__dynamic_public = _yaml.node_copy(self.__public)
......@@ -1581,7 +1592,6 @@ class Element(Plugin):
finally:
if collect is not None:
try:
# Sandbox will probably have replaced its virtual directory, so get it again
sandbox_vroot = sandbox.get_virtual_directory()
collectvdir = sandbox_vroot.descend(collect.lstrip(os.sep).split(os.sep))
except VirtualDirectoryError:
......@@ -1606,6 +1616,7 @@ class Element(Plugin):
collectvdir.export_files(filesdir, can_link=True)
try:
sandbox_vroot = sandbox.get_virtual_directory()
sandbox_build_dir = sandbox_vroot.descend(
self.get_variable('build-root').lstrip(os.sep).split(os.sep))
# Hard link files from build-root dir to buildtreedir directory
......@@ -2084,7 +2095,7 @@ class Element(Plugin):
#
# Raises an error if the artifact is not cached.
#
def __assert_cached(self, keystrength=_KeyStrength.STRONG):
def __assert_cached(self, keystrength=None):
assert self.__is_cached(keystrength=keystrength), "{}: Missing artifact {}".format(
self, self._get_brief_display_key())
......@@ -2112,10 +2123,19 @@ class Element(Plugin):
workspaced_dependencies = self.__get_artifact_metadata_workspaced_dependencies()
# Other conditions should be or-ed
self.__tainted = workspaced or workspaced_dependencies
self.__tainted = (workspaced or workspaced_dependencies or
not self.__sandbox_config_supported)
return self.__tainted
# __use_remote_execution():
#
# Returns True if remote execution is configured and the element plugin
# supports it.
#
def __use_remote_execution(self):
return self.__remote_execution_url and self.BST_VIRTUAL_DIRECTORY
# __sandbox():
#
# A context manager to prepare a Sandbox object at the specified directory,
......@@ -2137,9 +2157,7 @@ class Element(Plugin):
project = self._get_project()
platform = Platform.get_platform()
if (directory is not None and
self.__remote_execution_url and
self.BST_VIRTUAL_DIRECTORY):
if directory is not None and self.__use_remote_execution():
self.info("Using a remote sandbox for artifact {} with directory '{}'".format(self.name, directory))
......
......@@ -205,7 +205,17 @@ class GitMirror(SourceFetcher):
[self.source.host_git, 'rev-parse', tracking],
fail="Unable to find commit for specified branch name '{}'".format(tracking),
cwd=self.mirror)
return output.rstrip('\n')
ref = output.rstrip('\n')
# Prefix the ref with the closest annotated tag, if available,
# to make the ref human readable
exit_code, output = self.source.check_output(
[self.source.host_git, 'describe', '--tags', '--abbrev=40', '--long', ref],
cwd=self.mirror)
if exit_code == 0:
ref = output.rstrip('\n')
return ref
def stage(self, directory, track=None):
fullpath = os.path.join(directory, self.path)
......