Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (20)
Showing
with 234 additions and 105 deletions
...@@ -53,26 +53,20 @@ tests-fedora-28: ...@@ -53,26 +53,20 @@ tests-fedora-28:
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:28-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:28-master-46405991
<<: *tests <<: *tests
tests-ubuntu-18.04: tests-fedora-29:
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-ubuntu:18.04-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:29-master-47052095
<<: *tests <<: *tests
tests-python-3.7-stretch: tests-ubuntu-18.04:
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-python:3.7-stretch-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-ubuntu:18.04-master-46405991
<<: *tests <<: *tests
variables:
# Note that we explicitly specify TOXENV in this case because this
# image has both 3.6 and 3.7 versions. python3.6 cannot be removed because
# some of our base dependencies declare it as their runtime dependency.
TOXENV: py37
tests-centos-7.6: tests-centos-7.6:
<<: *tests <<: *tests
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-centos:7.6.1810-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-centos:7.6.1810-master-46405991
overnight-fedora-28-aarch64: overnight-fedora-29-aarch64:
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:aarch64-28-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:aarch64-29-master-47052095
tags: tags:
- aarch64 - aarch64
<<: *tests <<: *tests
...@@ -91,7 +85,7 @@ overnight-fedora-28-aarch64: ...@@ -91,7 +85,7 @@ overnight-fedora-28-aarch64:
tests-unix: tests-unix:
# Use fedora here, to a) run a test on fedora and b) ensure that we # Use fedora here, to a) run a test on fedora and b) ensure that we
# can get rid of ostree - this is not possible with debian-8 # can get rid of ostree - this is not possible with debian-8
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:28-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:29-master-47052095
<<: *tests <<: *tests
variables: variables:
BST_FORCE_BACKEND: "unix" BST_FORCE_BACKEND: "unix"
...@@ -109,7 +103,7 @@ tests-unix: ...@@ -109,7 +103,7 @@ tests-unix:
tests-fedora-missing-deps: tests-fedora-missing-deps:
# Ensure that tests behave nicely while missing bwrap and ostree # Ensure that tests behave nicely while missing bwrap and ostree
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:28-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:29-master-47052095
<<: *tests <<: *tests
script: script:
...@@ -128,7 +122,7 @@ tests-fedora-update-deps: ...@@ -128,7 +122,7 @@ tests-fedora-update-deps:
# Check if the tests pass after updating requirements to their latest # Check if the tests pass after updating requirements to their latest
# allowed version. # allowed version.
allow_failure: true allow_failure: true
image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:28-master-46405991 image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora:29-master-47052095
<<: *tests <<: *tests
script: script:
...@@ -166,6 +160,7 @@ tests-wsl: ...@@ -166,6 +160,7 @@ tests-wsl:
script: script:
- "${TEST_COMMAND}" - "${TEST_COMMAND}"
when: manual
# Automatically build documentation for every commit, we want to know # Automatically build documentation for every commit, we want to know
# if building documentation fails even if we're not deploying it. # if building documentation fails even if we're not deploying it.
...@@ -288,6 +283,7 @@ coverage: ...@@ -288,6 +283,7 @@ coverage:
dependencies: dependencies:
- tests-debian-9 - tests-debian-9
- tests-fedora-28 - tests-fedora-28
- tests-fedora-29
- tests-fedora-missing-deps - tests-fedora-missing-deps
- tests-ubuntu-18.04 - tests-ubuntu-18.04
- tests-unix - tests-unix
......
...@@ -35,6 +35,8 @@ from .._exceptions import CASCacheError ...@@ -35,6 +35,8 @@ from .._exceptions import CASCacheError
from .casremote import BlobNotFound, _CASBatchRead, _CASBatchUpdate from .casremote import BlobNotFound, _CASBatchRead, _CASBatchUpdate
_BUFFER_SIZE = 65536
# A CASCache manages a CAS repository as specified in the Remote Execution API. # A CASCache manages a CAS repository as specified in the Remote Execution API.
# #
...@@ -43,9 +45,10 @@ from .casremote import BlobNotFound, _CASBatchRead, _CASBatchUpdate ...@@ -43,9 +45,10 @@ from .casremote import BlobNotFound, _CASBatchRead, _CASBatchUpdate
# #
class CASCache(): class CASCache():
def __init__(self, path): def __init__(self, path, *, disable_exec=False):
self.casdir = os.path.join(path, 'cas') self.casdir = os.path.join(path, 'cas')
self.tmpdir = os.path.join(path, 'tmp') self.tmpdir = os.path.join(path, 'tmp')
self._disable_exec = disable_exec
os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True) os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True)
os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True) os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True)
os.makedirs(self.tmpdir, exist_ok=True) os.makedirs(self.tmpdir, exist_ok=True)
...@@ -340,8 +343,12 @@ class CASCache(): ...@@ -340,8 +343,12 @@ class CASCache():
# Returns: # Returns:
# (str): The path of the object # (str): The path of the object
# #
def objpath(self, digest): def objpath(self, digest, *, is_exec=False):
return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:]) if is_exec and not self._disable_exec:
filename = '{}.exec'.format(digest.hash[2:])
else:
filename = digest.hash[2:]
return os.path.join(self.casdir, 'objects', digest.hash[:2], filename)
# add_object(): # add_object():
# #
...@@ -358,7 +365,7 @@ class CASCache(): ...@@ -358,7 +365,7 @@ class CASCache():
# #
# Either `path` or `buffer` must be passed, but not both. # Either `path` or `buffer` must be passed, but not both.
# #
def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False): def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False, is_exec=False):
# Exactly one of the two parameters has to be specified # Exactly one of the two parameters has to be specified
assert (path is None) != (buffer is None) assert (path is None) != (buffer is None)
...@@ -371,16 +378,13 @@ class CASCache(): ...@@ -371,16 +378,13 @@ class CASCache():
with contextlib.ExitStack() as stack: with contextlib.ExitStack() as stack:
if path is not None and link_directly: if path is not None and link_directly:
tmp = stack.enter_context(open(path, 'rb')) tmp = stack.enter_context(open(path, 'rb'))
for chunk in iter(lambda: tmp.read(4096), b""): for chunk in iter(lambda: tmp.read(_BUFFER_SIZE), b""):
h.update(chunk) h.update(chunk)
else: else:
tmp = stack.enter_context(utils._tempnamedfile(dir=self.tmpdir)) tmp = stack.enter_context(self._temporary_object(is_exec=is_exec))
# Set mode bits to 0644
os.chmod(tmp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
if path: if path:
with open(path, 'rb') as f: with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(4096), b""): for chunk in iter(lambda: f.read(_BUFFER_SIZE), b""):
h.update(chunk) h.update(chunk)
tmp.write(chunk) tmp.write(chunk)
else: else:
...@@ -393,7 +397,7 @@ class CASCache(): ...@@ -393,7 +397,7 @@ class CASCache():
digest.size_bytes = os.fstat(tmp.fileno()).st_size digest.size_bytes = os.fstat(tmp.fileno()).st_size
# Place file at final location # Place file at final location
objpath = self.objpath(digest) objpath = self.objpath(digest, is_exec=is_exec)
os.makedirs(os.path.dirname(objpath), exist_ok=True) os.makedirs(os.path.dirname(objpath), exist_ok=True)
os.link(tmp.name, objpath) os.link(tmp.name, objpath)
...@@ -602,11 +606,7 @@ class CASCache(): ...@@ -602,11 +606,7 @@ class CASCache():
for filenode in directory.files: for filenode in directory.files:
# regular file, create hardlink # regular file, create hardlink
fullpath = os.path.join(dest, filenode.name) fullpath = os.path.join(dest, filenode.name)
os.link(self.objpath(filenode.digest), fullpath) os.link(self.objpath(filenode.digest, is_exec=filenode.is_executable), fullpath)
if filenode.is_executable:
os.chmod(fullpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
for dirnode in directory.directories: for dirnode in directory.directories:
# Don't try to checkout a dangling ref # Don't try to checkout a dangling ref
...@@ -698,8 +698,8 @@ class CASCache(): ...@@ -698,8 +698,8 @@ class CASCache():
elif stat.S_ISREG(mode): elif stat.S_ISREG(mode):
filenode = directory.files.add() filenode = directory.files.add()
filenode.name = name filenode.name = name
self.add_object(path=full_path, digest=filenode.digest)
filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR
self.add_object(path=full_path, digest=filenode.digest, is_exec=filenode.is_executable)
elif stat.S_ISLNK(mode): elif stat.S_ISLNK(mode):
symlinknode = directory.symlinks.add() symlinknode = directory.symlinks.add()
symlinknode.name = name symlinknode.name = name
...@@ -798,7 +798,7 @@ class CASCache(): ...@@ -798,7 +798,7 @@ class CASCache():
for filenode in directory.files: for filenode in directory.files:
if update_mtime: if update_mtime:
os.utime(self.objpath(filenode.digest)) os.utime(self.objpath(filenode.digest, is_exec=filenode.is_executable))
reachable.add(filenode.digest.hash) reachable.add(filenode.digest.hash)
for dirnode in directory.directories: for dirnode in directory.directories:
...@@ -809,7 +809,7 @@ class CASCache(): ...@@ -809,7 +809,7 @@ class CASCache():
d = remote_execution_pb2.Digest() d = remote_execution_pb2.Digest()
d.hash = directory_digest.hash d.hash = directory_digest.hash
d.size_bytes = directory_digest.size_bytes d.size_bytes = directory_digest.size_bytes
yield d yield False, d
directory = remote_execution_pb2.Directory() directory = remote_execution_pb2.Directory()
...@@ -820,11 +820,26 @@ class CASCache(): ...@@ -820,11 +820,26 @@ class CASCache():
d = remote_execution_pb2.Digest() d = remote_execution_pb2.Digest()
d.hash = filenode.digest.hash d.hash = filenode.digest.hash
d.size_bytes = filenode.digest.size_bytes d.size_bytes = filenode.digest.size_bytes
yield d yield filenode.is_executable, d
for dirnode in directory.directories: for dirnode in directory.directories:
yield from self._required_blobs(dirnode.digest) yield from self._required_blobs(dirnode.digest)
# _temporary_object():
#
# Returns:
# (file): A file object to a named temporary file.
#
# Create a named temporary file with 0o0644 access rights.
@contextlib.contextmanager
def _temporary_object(self, *, is_exec=False):
with utils._tempnamedfile(dir=self.tmpdir) as f:
access = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if is_exec and not self._disable_exec:
access |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(f.name, access)
yield f
# _ensure_blob(): # _ensure_blob():
# #
# Fetch and add blob if it's not already local. # Fetch and add blob if it's not already local.
...@@ -836,32 +851,34 @@ class CASCache(): ...@@ -836,32 +851,34 @@ class CASCache():
# Returns: # Returns:
# (str): The path of the object # (str): The path of the object
# #
def _ensure_blob(self, remote, digest): def _ensure_blob(self, remote, digest, is_exec=False):
objpath = self.objpath(digest) objpath = self.objpath(digest, is_exec=is_exec)
if os.path.exists(objpath): if os.path.exists(objpath):
# already in local repository # already in local repository
return objpath return objpath
with utils._tempnamedfile(dir=self.tmpdir) as f: with self._temporary_object(is_exec=is_exec) as f:
remote._fetch_blob(digest, f) remote._fetch_blob(digest, f)
added_digest = self.add_object(path=f.name, link_directly=True) added_digest = self.add_object(path=f.name, link_directly=True, is_exec=is_exec)
assert added_digest.hash == digest.hash assert added_digest.hash == digest.hash
return objpath return objpath
def _batch_download_complete(self, batch): def _batch_download_complete(self, batch, executable):
for digest, data in batch.send(): for digest, data in batch.send():
with utils._tempnamedfile(dir=self.tmpdir) as f: for is_exec in executable[digest.hash]:
f.write(data) with self._temporary_object(is_exec=is_exec) as f:
f.flush() f.write(data)
f.flush()
added_digest = self.add_object(path=f.name, link_directly=True) added_digest = self.add_object(path=f.name, link_directly=True, is_exec=is_exec)
assert added_digest.hash == digest.hash assert added_digest.hash == digest.hash
del executable[digest.hash]
# Helper function for _fetch_directory(). # Helper function for _fetch_directory().
def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue): def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue, executable):
self._batch_download_complete(batch) self._batch_download_complete(batch, executable)
# All previously scheduled directories are now locally available, # All previously scheduled directories are now locally available,
# move them to the processing queue. # move them to the processing queue.
...@@ -870,8 +887,9 @@ class CASCache(): ...@@ -870,8 +887,9 @@ class CASCache():
return _CASBatchRead(remote) return _CASBatchRead(remote)
# Helper function for _fetch_directory(). # Helper function for _fetch_directory().
def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False): def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, executable,
in_local_cache = os.path.exists(self.objpath(digest)) *, recursive=False, is_exec=False):
in_local_cache = os.path.exists(self.objpath(digest, is_exec=is_exec))
if in_local_cache: if in_local_cache:
# Skip download, already in local cache. # Skip download, already in local cache.
...@@ -879,14 +897,17 @@ class CASCache(): ...@@ -879,14 +897,17 @@ class CASCache():
elif (digest.size_bytes >= remote.max_batch_total_size_bytes or elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
not remote.batch_read_supported): not remote.batch_read_supported):
# Too large for batch request, download in independent request. # Too large for batch request, download in independent request.
self._ensure_blob(remote, digest) self._ensure_blob(remote, digest, is_exec=is_exec)
in_local_cache = True in_local_cache = True
else: else:
if not batch.add(digest): if not batch.add(digest):
# Not enough space left in batch request. # Not enough space left in batch request.
# Complete pending batch first. # Complete pending batch first.
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue) batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue, executable)
batch.add(digest) batch.add(digest)
if digest.hash not in executable:
executable[digest.hash] = set()
executable[digest.hash].add(is_exec)
if recursive: if recursive:
if in_local_cache: if in_local_cache:
...@@ -915,13 +936,14 @@ class CASCache(): ...@@ -915,13 +936,14 @@ class CASCache():
def _fetch_directory(self, remote, dir_digest, *, excluded_subdirs=None): def _fetch_directory(self, remote, dir_digest, *, excluded_subdirs=None):
fetch_queue = [dir_digest] fetch_queue = [dir_digest]
fetch_next_queue = [] fetch_next_queue = []
executable = {}
batch = _CASBatchRead(remote) batch = _CASBatchRead(remote)
if not excluded_subdirs: if not excluded_subdirs:
excluded_subdirs = [] excluded_subdirs = []
while len(fetch_queue) + len(fetch_next_queue) > 0: while len(fetch_queue) + len(fetch_next_queue) > 0:
if not fetch_queue: if not fetch_queue:
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue) batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue, executable)
dir_digest = fetch_queue.pop(0) dir_digest = fetch_queue.pop(0)
...@@ -934,14 +956,18 @@ class CASCache(): ...@@ -934,14 +956,18 @@ class CASCache():
for dirnode in directory.directories: for dirnode in directory.directories:
if dirnode.name not in excluded_subdirs: if dirnode.name not in excluded_subdirs:
batch = self._fetch_directory_node(remote, dirnode.digest, batch, batch = self._fetch_directory_node(remote, dirnode.digest, batch,
fetch_queue, fetch_next_queue, recursive=True) fetch_queue, fetch_next_queue,
executable,
recursive=True)
for filenode in directory.files: for filenode in directory.files:
batch = self._fetch_directory_node(remote, filenode.digest, batch, batch = self._fetch_directory_node(remote, filenode.digest, batch,
fetch_queue, fetch_next_queue) fetch_queue, fetch_next_queue,
executable,
is_exec=filenode.is_executable)
# Fetch final batch # Fetch final batch
self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue) self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue, executable)
def _fetch_subdir(self, remote, tree, subdir): def _fetch_subdir(self, remote, tree, subdir):
subdirdigest = self._get_subdir(tree, subdir) subdirdigest = self._get_subdir(tree, subdir)
...@@ -960,7 +986,7 @@ class CASCache(): ...@@ -960,7 +986,7 @@ class CASCache():
tree.children.extend([tree.root]) tree.children.extend([tree.root])
for directory in tree.children: for directory in tree.children:
for filenode in directory.files: for filenode in directory.files:
self._ensure_blob(remote, filenode.digest) self._ensure_blob(remote, filenode.digest, is_exec=filenode.is_executable)
# place directory blob only in final location when we've downloaded # place directory blob only in final location when we've downloaded
# all referenced blobs to avoid dangling references in the repository # all referenced blobs to avoid dangling references in the repository
...@@ -973,22 +999,28 @@ class CASCache(): ...@@ -973,22 +999,28 @@ class CASCache():
def _send_directory(self, remote, digest, u_uid=uuid.uuid4()): def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
required_blobs = self._required_blobs(digest) required_blobs = self._required_blobs(digest)
executable = {}
missing_blobs = dict() missing_blobs = dict()
# Limit size of FindMissingBlobs request # Limit size of FindMissingBlobs request
for required_blobs_group in _grouper(required_blobs, 512): for required_blobs_group in _grouper(required_blobs, 512):
request = remote_execution_pb2.FindMissingBlobsRequest(instance_name=remote.spec.instance_name) request = remote_execution_pb2.FindMissingBlobsRequest(instance_name=remote.spec.instance_name)
for required_digest in required_blobs_group: for is_exec, required_digest in required_blobs_group:
d = request.blob_digests.add() d = request.blob_digests.add()
d.hash = required_digest.hash d.hash = required_digest.hash
d.size_bytes = required_digest.size_bytes d.size_bytes = required_digest.size_bytes
if required_digest.hash not in executable:
executable[required_digest.hash] = set()
executable[required_digest.hash].add(is_exec)
response = remote.cas.FindMissingBlobs(request) response = remote.cas.FindMissingBlobs(request)
for missing_digest in response.missing_blob_digests: for missing_digest in response.missing_blob_digests:
d = remote_execution_pb2.Digest() d = remote_execution_pb2.Digest()
d.hash = missing_digest.hash d.hash = missing_digest.hash
d.size_bytes = missing_digest.size_bytes d.size_bytes = missing_digest.size_bytes
missing_blobs[d.hash] = d for is_exec in executable[missing_digest.hash]:
missing_blobs[d.hash] = (is_exec, d)
# Upload any blobs missing on the server # Upload any blobs missing on the server
self._send_blobs(remote, missing_blobs.values(), u_uid) self._send_blobs(remote, missing_blobs.values(), u_uid)
...@@ -996,8 +1028,8 @@ class CASCache(): ...@@ -996,8 +1028,8 @@ class CASCache():
def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()): def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
batch = _CASBatchUpdate(remote) batch = _CASBatchUpdate(remote)
for digest in digests: for is_exec, digest in digests:
with open(self.objpath(digest), 'rb') as f: with open(self.objpath(digest, is_exec=is_exec), 'rb') as f:
assert os.fstat(f.fileno()).st_size == digest.size_bytes assert os.fstat(f.fileno()).st_size == digest.size_bytes
if (digest.size_bytes >= remote.max_batch_total_size_bytes or if (digest.size_bytes >= remote.max_batch_total_size_bytes or
......
...@@ -61,7 +61,7 @@ class ArtifactTooLargeException(Exception): ...@@ -61,7 +61,7 @@ class ArtifactTooLargeException(Exception):
def create_server(repo, *, enable_push, def create_server(repo, *, enable_push,
max_head_size=int(10e9), max_head_size=int(10e9),
min_head_size=int(2e9)): min_head_size=int(2e9)):
cas = CASCache(os.path.abspath(repo)) cas = CASCache(os.path.abspath(repo), disable_exec=True)
# Use max_workers default from Python 3.5+ # Use max_workers default from Python 3.5+
max_workers = (os.cpu_count() or 1) * 5 max_workers = (os.cpu_count() or 1) * 5
......
...@@ -361,14 +361,17 @@ class Context(): ...@@ -361,14 +361,17 @@ class Context():
# (bool): Whether or not to use strict build plan # (bool): Whether or not to use strict build plan
# #
def get_strict(self): def get_strict(self):
if self._strict_build_plan is None:
# Either we're not overridden or we've never worked it out before
# so work out if we should be strict, and then cache the result
toplevel = self.get_toplevel_project()
overrides = self.get_overrides(toplevel.name)
self._strict_build_plan = _yaml.node_get(overrides, bool, 'strict', default_value=True)
# If it was set by the CLI, it overrides any config # If it was set by the CLI, it overrides any config
if self._strict_build_plan is not None: # Ditto if we've already computed this, then we return the computed
return self._strict_build_plan # value which we cache here too.
return self._strict_build_plan
toplevel = self.get_toplevel_project()
overrides = self.get_overrides(toplevel.name)
return _yaml.node_get(overrides, bool, 'strict', default_value=True)
# get_cache_key(): # get_cache_key():
# #
......
...@@ -440,6 +440,9 @@ def show(app, elements, deps, except_, order, format_): ...@@ -440,6 +440,9 @@ def show(app, elements, deps, except_, order, format_):
%{public} Public domain data %{public} Public domain data
%{workspaced} If the element is workspaced %{workspaced} If the element is workspaced
%{workspace-dirs} A list of workspace directories %{workspace-dirs} A list of workspace directories
%{deps} A list of all dependencies
%{build-deps} A list of build dependencies
%{runtime-deps} A list of runtime dependencies
The value of the %{symbol} without the leading '%' character is understood The value of the %{symbol} without the leading '%' character is understood
as a pythonic formatting string, so python formatting features apply, as a pythonic formatting string, so python formatting features apply,
......
...@@ -27,7 +27,7 @@ from ruamel import yaml ...@@ -27,7 +27,7 @@ from ruamel import yaml
import click import click
from . import Profile from . import Profile
from .. import Element, Consistency from .. import Element, Consistency, Scope
from .. import _yaml from .. import _yaml
from .. import __version__ as bst_version from .. import __version__ as bst_version
from .._exceptions import ImplError from .._exceptions import ImplError
...@@ -435,6 +435,27 @@ class LogLine(Widget): ...@@ -435,6 +435,27 @@ class LogLine(Widget):
line = p.fmt_subst( line = p.fmt_subst(
line, 'workspace-dirs', '') line, 'workspace-dirs', '')
# Dependencies
if "%{deps" in format_:
deps = [e.name for e in element.dependencies(Scope.ALL, recurse=False)]
line = p.fmt_subst(
line, 'deps',
yaml.safe_dump(deps, default_style=None).rstrip('\n'))
# Build Dependencies
if "%{build-deps" in format_:
build_deps = [e.name for e in element.dependencies(Scope.BUILD, recurse=False)]
line = p.fmt_subst(
line, 'build-deps',
yaml.safe_dump(build_deps, default_style=False).rstrip('\n'))
# Runtime Dependencies
if "%{runtime-deps" in format_:
runtime_deps = [e.name for e in element.dependencies(Scope.RUN, recurse=False)]
line = p.fmt_subst(
line, 'runtime-deps',
yaml.safe_dump(runtime_deps, default_style=False).rstrip('\n'))
report += line + '\n' report += line + '\n'
return report.rstrip('\n') return report.rstrip('\n')
......
...@@ -365,8 +365,8 @@ _sentinel = object() ...@@ -365,8 +365,8 @@ _sentinel = object()
# #
def node_get(node, expected_type, key, indices=None, *, default_value=_sentinel, allow_none=False): def node_get(node, expected_type, key, indices=None, *, default_value=_sentinel, allow_none=False):
value = node.get(key, default_value) value = node.get(key, default_value)
provenance = node_get_provenance(node)
if value is _sentinel: if value is _sentinel:
provenance = node_get_provenance(node)
raise LoadError(LoadErrorReason.INVALID_DATA, raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: Dictionary did not contain expected key '{}'".format(provenance, key)) "{}: Dictionary did not contain expected key '{}'".format(provenance, key))
...@@ -914,6 +914,10 @@ RoundTripRepresenter.add_representer(SanitizedDict, ...@@ -914,6 +914,10 @@ RoundTripRepresenter.add_representer(SanitizedDict,
SafeRepresenter.represent_dict) SafeRepresenter.represent_dict)
# Types we can short-circuit in node_sanitize for speed.
__SANITIZE_SHORT_CIRCUIT_TYPES = (int, float, str, bool, tuple)
# node_sanitize() # node_sanitize()
# #
# Returnes an alphabetically ordered recursive copy # Returnes an alphabetically ordered recursive copy
...@@ -922,9 +926,21 @@ RoundTripRepresenter.add_representer(SanitizedDict, ...@@ -922,9 +926,21 @@ RoundTripRepresenter.add_representer(SanitizedDict,
# Only dicts are ordered, list elements are left in order. # Only dicts are ordered, list elements are left in order.
# #
def node_sanitize(node): def node_sanitize(node):
# Short-circuit None which occurs ca. twice per element
if node is None:
return node
node_type = type(node)
# Next short-circuit integers, floats, strings, booleans, and tuples
if node_type in __SANITIZE_SHORT_CIRCUIT_TYPES:
return node
# Now short-circuit lists. Note this is only for the raw list
# type, CommentedSeq and others get caught later.
elif node_type is list:
return [node_sanitize(elt) for elt in node]
if isinstance(node, collections.abc.Mapping): # Finally ChainMap and dict, and other Mappings need special handling
if node_type in (dict, ChainMap) or isinstance(node, collections.Mapping):
result = SanitizedDict() result = SanitizedDict()
key_list = [key for key, _ in node_items(node)] key_list = [key for key, _ in node_items(node)]
...@@ -932,10 +948,12 @@ def node_sanitize(node): ...@@ -932,10 +948,12 @@ def node_sanitize(node):
result[key] = node_sanitize(node[key]) result[key] = node_sanitize(node[key])
return result return result
# Catch the case of CommentedSeq and friends. This is more rare and so
# we keep complexity down by still using isinstance here.
elif isinstance(node, list): elif isinstance(node, list):
return [node_sanitize(elt) for elt in node] return [node_sanitize(elt) for elt in node]
# Everything else (such as commented scalars) just gets returned as-is.
return node return node
...@@ -1064,15 +1082,52 @@ class ChainMap(collections.ChainMap): ...@@ -1064,15 +1082,52 @@ class ChainMap(collections.ChainMap):
return default return default
# Node copying
#
# Unfortunately we copy nodes a *lot* and `isinstance()` is super-slow when
# things from collections.abc get involved. The result is the following
# intricate but substantially faster group of tuples and the use of `in`.
#
# If any of the {node,list}_{chain_,}_copy routines raise a ValueError
# then it's likely additional types need adding to these tuples.
# When chaining a copy, these types are skipped since the ChainMap will
# retrieve them from the source node when needed. Other copiers might copy
# them, so we call them __QUICK_TYPES.
__QUICK_TYPES = (str, bool,
yaml.scalarstring.PreservedScalarString,
yaml.scalarstring.SingleQuotedScalarString,
yaml.scalarstring.DoubleQuotedScalarString)
# These types have to be iterated like a dictionary
__DICT_TYPES = (dict, ChainMap, yaml.comments.CommentedMap)
# These types have to be iterated like a list
__LIST_TYPES = (list, yaml.comments.CommentedSeq)
# These are the provenance types, which have to be cloned rather than any other
# copying tactic.
__PROVENANCE_TYPES = (Provenance, DictProvenance, MemberProvenance, ElementProvenance)
# These are the directives used to compose lists, we need this because it's
# slightly faster during the node_final_assertions checks
__NODE_ASSERT_COMPOSITION_DIRECTIVES = ('(>)', '(<)', '(=)')
def node_chain_copy(source): def node_chain_copy(source):
copy = ChainMap({}, source) copy = ChainMap({}, source)
for key, value in source.items(): for key, value in source.items():
if isinstance(value, collections.abc.Mapping): value_type = type(value)
if value_type in __DICT_TYPES:
copy[key] = node_chain_copy(value) copy[key] = node_chain_copy(value)
elif isinstance(value, list): elif value_type in __LIST_TYPES:
copy[key] = list_chain_copy(value) copy[key] = list_chain_copy(value)
elif isinstance(value, Provenance): elif value_type in __PROVENANCE_TYPES:
copy[key] = value.clone() copy[key] = value.clone()
elif value_type in __QUICK_TYPES:
pass # No need to copy these, the chainmap deals with it
else:
raise ValueError("Unable to be quick about node_chain_copy of {}".format(value_type))
return copy return copy
...@@ -1080,14 +1135,17 @@ def node_chain_copy(source): ...@@ -1080,14 +1135,17 @@ def node_chain_copy(source):
def list_chain_copy(source): def list_chain_copy(source):
copy = [] copy = []
for item in source: for item in source:
if isinstance(item, collections.abc.Mapping): item_type = type(item)
if item_type in __DICT_TYPES:
copy.append(node_chain_copy(item)) copy.append(node_chain_copy(item))
elif isinstance(item, list): elif item_type in __LIST_TYPES:
copy.append(list_chain_copy(item)) copy.append(list_chain_copy(item))
elif isinstance(item, Provenance): elif item_type in __PROVENANCE_TYPES:
copy.append(item.clone()) copy.append(item.clone())
else: elif item_type in __QUICK_TYPES:
copy.append(item) copy.append(item)
else: # Fallback
raise ValueError("Unable to be quick about list_chain_copy of {}".format(item_type))
return copy return copy
...@@ -1095,14 +1153,17 @@ def list_chain_copy(source): ...@@ -1095,14 +1153,17 @@ def list_chain_copy(source):
def node_copy(source): def node_copy(source):
copy = {} copy = {}
for key, value in source.items(): for key, value in source.items():
if isinstance(value, collections.abc.Mapping): value_type = type(value)
if value_type in __DICT_TYPES:
copy[key] = node_copy(value) copy[key] = node_copy(value)
elif isinstance(value, list): elif value_type in __LIST_TYPES:
copy[key] = list_copy(value) copy[key] = list_copy(value)
elif isinstance(value, Provenance): elif value_type in __PROVENANCE_TYPES:
copy[key] = value.clone() copy[key] = value.clone()
else: elif value_type in __QUICK_TYPES:
copy[key] = value copy[key] = value
else:
raise ValueError("Unable to be quick about node_copy of {}".format(value_type))
ensure_provenance(copy) ensure_provenance(copy)
...@@ -1112,14 +1173,17 @@ def node_copy(source): ...@@ -1112,14 +1173,17 @@ def node_copy(source):
def list_copy(source): def list_copy(source):
copy = [] copy = []
for item in source: for item in source:
if isinstance(item, collections.abc.Mapping): item_type = type(item)
if item_type in __DICT_TYPES:
copy.append(node_copy(item)) copy.append(node_copy(item))
elif isinstance(item, list): elif item_type in __LIST_TYPES:
copy.append(list_copy(item)) copy.append(list_copy(item))
elif isinstance(item, Provenance): elif item_type in __PROVENANCE_TYPES:
copy.append(item.clone()) copy.append(item.clone())
else: elif item_type in __QUICK_TYPES:
copy.append(item) copy.append(item)
else:
raise ValueError("Unable to be quick about list_copy of {}".format(item_type))
return copy return copy
...@@ -1142,22 +1206,26 @@ def node_final_assertions(node): ...@@ -1142,22 +1206,26 @@ def node_final_assertions(node):
# indicates that the user intended to override a list which # indicates that the user intended to override a list which
# never existed in the underlying data # never existed in the underlying data
# #
if key in ['(>)', '(<)', '(=)']: if key in __NODE_ASSERT_COMPOSITION_DIRECTIVES:
provenance = node_get_provenance(node, key) provenance = node_get_provenance(node, key)
raise LoadError(LoadErrorReason.TRAILING_LIST_DIRECTIVE, raise LoadError(LoadErrorReason.TRAILING_LIST_DIRECTIVE,
"{}: Attempt to override non-existing list".format(provenance)) "{}: Attempt to override non-existing list".format(provenance))
if isinstance(value, collections.abc.Mapping): value_type = type(value)
if value_type in __DICT_TYPES:
node_final_assertions(value) node_final_assertions(value)
elif isinstance(value, list): elif value_type in __LIST_TYPES:
list_final_assertions(value) list_final_assertions(value)
def list_final_assertions(values): def list_final_assertions(values):
for value in values: for value in values:
if isinstance(value, collections.abc.Mapping): value_type = type(value)
if value_type in __DICT_TYPES:
node_final_assertions(value) node_final_assertions(value)
elif isinstance(value, list): elif value_type in __LIST_TYPES:
list_final_assertions(value) list_final_assertions(value)
......
...@@ -235,7 +235,7 @@ def sha256sum(filename): ...@@ -235,7 +235,7 @@ def sha256sum(filename):
try: try:
h = hashlib.sha256() h = hashlib.sha256()
with open(filename, "rb") as f: with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""): for chunk in iter(lambda: f.read(65536), b""):
h.update(chunk) h.update(chunk)
except OSError as e: except OSError as e:
......
.TH "BST ARTIFACT CHECKOUT" "1" "24-Jan-2019" "" "bst artifact checkout Manual" .TH "BST ARTIFACT CHECKOUT" "1" "12-Feb-2019" "" "bst artifact checkout Manual"
.SH NAME .SH NAME
bst\-artifact\-checkout \- Checkout contents of an artifact bst\-artifact\-checkout \- Checkout contents of an artifact
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST ARTIFACT LOG" "1" "24-Jan-2019" "" "bst artifact log Manual" .TH "BST ARTIFACT LOG" "1" "12-Feb-2019" "" "bst artifact log Manual"
.SH NAME .SH NAME
bst\-artifact\-log \- Show logs of an artifact bst\-artifact\-log \- Show logs of an artifact
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST ARTIFACT PULL" "1" "24-Jan-2019" "" "bst artifact pull Manual" .TH "BST ARTIFACT PULL" "1" "12-Feb-2019" "" "bst artifact pull Manual"
.SH NAME .SH NAME
bst\-artifact\-pull \- Pull a built artifact bst\-artifact\-pull \- Pull a built artifact
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST ARTIFACT PUSH" "1" "24-Jan-2019" "" "bst artifact push Manual" .TH "BST ARTIFACT PUSH" "1" "12-Feb-2019" "" "bst artifact push Manual"
.SH NAME .SH NAME
bst\-artifact\-push \- Push a built artifact bst\-artifact\-push \- Push a built artifact
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST-ARTIFACT-SERVER" "1" "24-Jan-2019" "" "bst-artifact-server Manual" .TH "BST-ARTIFACT-SERVER" "1" "12-Feb-2019" "" "bst-artifact-server Manual"
.SH NAME .SH NAME
bst-artifact-server \- CAS Artifact Server bst-artifact-server \- CAS Artifact Server
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST ARTIFACT" "1" "24-Jan-2019" "" "bst artifact Manual" .TH "BST ARTIFACT" "1" "12-Feb-2019" "" "bst artifact Manual"
.SH NAME .SH NAME
bst\-artifact \- Manipulate cached artifacts bst\-artifact \- Manipulate cached artifacts
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST BUILD" "1" "24-Jan-2019" "" "bst build Manual" .TH "BST BUILD" "1" "12-Feb-2019" "" "bst build Manual"
.SH NAME .SH NAME
bst\-build \- Build elements in a pipeline bst\-build \- Build elements in a pipeline
.SH SYNOPSIS .SH SYNOPSIS
...@@ -32,3 +32,6 @@ Allow tracking to cross junction boundaries ...@@ -32,3 +32,6 @@ Allow tracking to cross junction boundaries
.TP .TP
\fB\-\-track\-save\fP \fB\-\-track\-save\fP
Deprecated: This is ignored Deprecated: This is ignored
.TP
\fB\-r,\fP \-\-remote TEXT
The URL of the remote cache (defaults to the first configured cache)
.TH "BST HELP" "1" "24-Jan-2019" "" "bst help Manual" .TH "BST HELP" "1" "12-Feb-2019" "" "bst help Manual"
.SH NAME .SH NAME
bst\-help \- Print usage information bst\-help \- Print usage information
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST INIT" "1" "24-Jan-2019" "" "bst init Manual" .TH "BST INIT" "1" "12-Feb-2019" "" "bst init Manual"
.SH NAME .SH NAME
bst\-init \- Initialize a new BuildStream project bst\-init \- Initialize a new BuildStream project
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST SHELL" "1" "24-Jan-2019" "" "bst shell Manual" .TH "BST SHELL" "1" "12-Feb-2019" "" "bst shell Manual"
.SH NAME .SH NAME
bst\-shell \- Shell into an element's sandbox environment bst\-shell \- Shell into an element's sandbox environment
.SH SYNOPSIS .SH SYNOPSIS
......
.TH "BST SHOW" "1" "24-Jan-2019" "" "bst show Manual" .TH "BST SHOW" "1" "12-Feb-2019" "" "bst show Manual"
.SH NAME .SH NAME
bst\-show \- Show elements in the pipeline bst\-show \- Show elements in the pipeline
.SH SYNOPSIS .SH SYNOPSIS
...@@ -43,6 +43,9 @@ the following symbols can be used in the format string: ...@@ -43,6 +43,9 @@ the following symbols can be used in the format string:
%{public} Public domain data %{public} Public domain data
%{workspaced} If the element is workspaced %{workspaced} If the element is workspaced
%{workspace-dirs} A list of workspace directories %{workspace-dirs} A list of workspace directories
%{deps} A list of all dependencies
%{build-deps} A list of build dependencies
%{runtime-deps} A list of runtime dependencies
.PP .PP
The value of the %{symbol} without the leading '%' character is understood The value of the %{symbol} without the leading '%' character is understood
as a pythonic formatting string, so python formatting features apply, as a pythonic formatting string, so python formatting features apply,
......
.TH "BST SOURCE CHECKOUT" "1" "24-Jan-2019" "" "bst source checkout Manual" .TH "BST SOURCE CHECKOUT" "1" "12-Feb-2019" "" "bst source checkout Manual"
.SH NAME .SH NAME
bst\-source\-checkout \- Checkout sources for an element bst\-source\-checkout \- Checkout sources for an element
.SH SYNOPSIS .SH SYNOPSIS
......