Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (7)
......@@ -45,9 +45,10 @@ _BUFFER_SIZE = 65536
#
class CASCache():
def __init__(self, path):
def __init__(self, path, *, disable_exec=False):
self.casdir = os.path.join(path, 'cas')
self.tmpdir = os.path.join(path, 'tmp')
self._disable_exec = disable_exec
os.makedirs(os.path.join(self.casdir, 'refs', 'heads'), exist_ok=True)
os.makedirs(os.path.join(self.casdir, 'objects'), exist_ok=True)
os.makedirs(self.tmpdir, exist_ok=True)
......@@ -342,8 +343,12 @@ class CASCache():
# Returns:
# (str): The path of the object
#
def objpath(self, digest):
return os.path.join(self.casdir, 'objects', digest.hash[:2], digest.hash[2:])
def objpath(self, digest, *, is_exec=False):
if is_exec and not self._disable_exec:
filename = '{}.exec'.format(digest.hash[2:])
else:
filename = digest.hash[2:]
return os.path.join(self.casdir, 'objects', digest.hash[:2], filename)
# add_object():
#
......@@ -360,7 +365,7 @@ class CASCache():
#
# Either `path` or `buffer` must be passed, but not both.
#
def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False):
def add_object(self, *, digest=None, path=None, buffer=None, link_directly=False, is_exec=False):
# Exactly one of the two parameters has to be specified
assert (path is None) != (buffer is None)
......@@ -376,10 +381,7 @@ class CASCache():
for chunk in iter(lambda: tmp.read(_BUFFER_SIZE), b""):
h.update(chunk)
else:
tmp = stack.enter_context(utils._tempnamedfile(dir=self.tmpdir))
# Set mode bits to 0644
os.chmod(tmp.name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
tmp = stack.enter_context(self._temporary_object(is_exec=is_exec))
if path:
with open(path, 'rb') as f:
for chunk in iter(lambda: f.read(_BUFFER_SIZE), b""):
......@@ -395,7 +397,7 @@ class CASCache():
digest.size_bytes = os.fstat(tmp.fileno()).st_size
# Place file at final location
objpath = self.objpath(digest)
objpath = self.objpath(digest, is_exec=is_exec)
os.makedirs(os.path.dirname(objpath), exist_ok=True)
os.link(tmp.name, objpath)
......@@ -604,11 +606,7 @@ class CASCache():
for filenode in directory.files:
# regular file, create hardlink
fullpath = os.path.join(dest, filenode.name)
os.link(self.objpath(filenode.digest), fullpath)
if filenode.is_executable:
os.chmod(fullpath, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)
os.link(self.objpath(filenode.digest, is_exec=filenode.is_executable), fullpath)
for dirnode in directory.directories:
# Don't try to checkout a dangling ref
......@@ -700,8 +698,8 @@ class CASCache():
elif stat.S_ISREG(mode):
filenode = directory.files.add()
filenode.name = name
self.add_object(path=full_path, digest=filenode.digest)
filenode.is_executable = (mode & stat.S_IXUSR) == stat.S_IXUSR
self.add_object(path=full_path, digest=filenode.digest, is_exec=filenode.is_executable)
elif stat.S_ISLNK(mode):
symlinknode = directory.symlinks.add()
symlinknode.name = name
......@@ -800,7 +798,7 @@ class CASCache():
for filenode in directory.files:
if update_mtime:
os.utime(self.objpath(filenode.digest))
os.utime(self.objpath(filenode.digest, is_exec=filenode.is_executable))
reachable.add(filenode.digest.hash)
for dirnode in directory.directories:
......@@ -811,7 +809,7 @@ class CASCache():
d = remote_execution_pb2.Digest()
d.hash = directory_digest.hash
d.size_bytes = directory_digest.size_bytes
yield d
yield False, d
directory = remote_execution_pb2.Directory()
......@@ -822,11 +820,26 @@ class CASCache():
d = remote_execution_pb2.Digest()
d.hash = filenode.digest.hash
d.size_bytes = filenode.digest.size_bytes
yield d
yield filenode.is_executable, d
for dirnode in directory.directories:
yield from self._required_blobs(dirnode.digest)
# _temporary_object():
#
# Returns:
# (file): A file object to a named temporary file.
#
# Create a named temporary file with 0o0644 access rights.
@contextlib.contextmanager
def _temporary_object(self, *, is_exec=False):
with utils._tempnamedfile(dir=self.tmpdir) as f:
access = stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH
if is_exec and not self._disable_exec:
access |= stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
os.chmod(f.name, access)
yield f
# _ensure_blob():
#
# Fetch and add blob if it's not already local.
......@@ -838,27 +851,27 @@ class CASCache():
# Returns:
# (str): The path of the object
#
def _ensure_blob(self, remote, digest):
objpath = self.objpath(digest)
def _ensure_blob(self, remote, digest, is_exec=False):
objpath = self.objpath(digest, is_exec=is_exec)
if os.path.exists(objpath):
# already in local repository
return objpath
with utils._tempnamedfile(dir=self.tmpdir) as f:
with self._temporary_object(is_exec=is_exec) as f:
remote._fetch_blob(digest, f)
added_digest = self.add_object(path=f.name, link_directly=True)
added_digest = self.add_object(path=f.name, link_directly=True, is_exec=is_exec)
assert added_digest.hash == digest.hash
return objpath
def _batch_download_complete(self, batch):
for digest, data in batch.send():
with utils._tempnamedfile(dir=self.tmpdir) as f:
for digest, data, is_exec in batch.send():
with self._temporary_object(is_exec=is_exec) as f:
f.write(data)
f.flush()
added_digest = self.add_object(path=f.name, link_directly=True)
added_digest = self.add_object(path=f.name, link_directly=True, is_exec=is_exec)
assert added_digest.hash == digest.hash
# Helper function for _fetch_directory().
......@@ -872,8 +885,9 @@ class CASCache():
return _CASBatchRead(remote)
# Helper function for _fetch_directory().
def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
in_local_cache = os.path.exists(self.objpath(digest))
def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue,
*, recursive=False, is_exec=False):
in_local_cache = os.path.exists(self.objpath(digest, is_exec=is_exec))
if in_local_cache:
# Skip download, already in local cache.
......@@ -881,14 +895,14 @@ class CASCache():
elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
not remote.batch_read_supported):
# Too large for batch request, download in independent request.
self._ensure_blob(remote, digest)
self._ensure_blob(remote, digest, is_exec=is_exec)
in_local_cache = True
else:
if not batch.add(digest):
if not batch.add(digest, is_exec=is_exec):
# Not enough space left in batch request.
# Complete pending batch first.
batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
batch.add(digest)
batch.add(digest, is_exec=is_exec)
if recursive:
if in_local_cache:
......@@ -936,11 +950,13 @@ class CASCache():
for dirnode in directory.directories:
if dirnode.name not in excluded_subdirs:
batch = self._fetch_directory_node(remote, dirnode.digest, batch,
fetch_queue, fetch_next_queue, recursive=True)
fetch_queue, fetch_next_queue,
recursive=True)
for filenode in directory.files:
batch = self._fetch_directory_node(remote, filenode.digest, batch,
fetch_queue, fetch_next_queue)
fetch_queue, fetch_next_queue,
is_exec=filenode.is_executable)
# Fetch final batch
self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
......@@ -962,7 +978,7 @@ class CASCache():
tree.children.extend([tree.root])
for directory in tree.children:
for filenode in directory.files:
self._ensure_blob(remote, filenode.digest)
self._ensure_blob(remote, filenode.digest, is_exec=filenode.is_executable)
# place directory blob only in final location when we've downloaded
# all referenced blobs to avoid dangling references in the repository
......@@ -975,22 +991,28 @@ class CASCache():
def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
required_blobs = self._required_blobs(digest)
executable = {}
missing_blobs = dict()
# Limit size of FindMissingBlobs request
for required_blobs_group in _grouper(required_blobs, 512):
request = remote_execution_pb2.FindMissingBlobsRequest(instance_name=remote.spec.instance_name)
for required_digest in required_blobs_group:
for is_exec, required_digest in required_blobs_group:
d = request.blob_digests.add()
d.hash = required_digest.hash
d.size_bytes = required_digest.size_bytes
if required_digest.hash not in executable:
executable[required_digest.hash] = set()
executable[required_digest.hash].add(is_exec)
response = remote.cas.FindMissingBlobs(request)
for missing_digest in response.missing_blob_digests:
d = remote_execution_pb2.Digest()
d.hash = missing_digest.hash
d.size_bytes = missing_digest.size_bytes
missing_blobs[d.hash] = d
for is_exec in executable[missing_digest.hash]:
missing_blobs[d.hash] = (is_exec, d)
# Upload any blobs missing on the server
self._send_blobs(remote, missing_blobs.values(), u_uid)
......@@ -998,8 +1020,8 @@ class CASCache():
def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
batch = _CASBatchUpdate(remote)
for digest in digests:
with open(self.objpath(digest), 'rb') as f:
for is_exec, digest in digests:
with open(self.objpath(digest, is_exec=is_exec), 'rb') as f:
assert os.fstat(f.fileno()).st_size == digest.size_bytes
if (digest.size_bytes >= remote.max_batch_total_size_bytes or
......
......@@ -306,8 +306,9 @@ class _CASBatchRead():
self._request = remote_execution_pb2.BatchReadBlobsRequest()
self._size = 0
self._sent = False
self._is_exec = {}
def add(self, digest):
def add(self, digest, *, is_exec=False):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
......@@ -319,6 +320,9 @@ class _CASBatchRead():
request_digest.hash = digest.hash
request_digest.size_bytes = digest.size_bytes
self._size = new_batch_size
if digest.hash not in self._is_exec:
self._is_exec[digest.hash] = set()
self._is_exec[digest.hash].add(is_exec)
return True
def send(self):
......@@ -341,7 +345,8 @@ class _CASBatchRead():
raise CASRemoteError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
response.digest.hash, response.digest.size_bytes, len(response.data)))
yield (response.digest, response.data)
for is_exec in self._is_exec[response.digest.hash]:
yield (response.digest, response.data, is_exec)
# Represents a batch of blobs queued for upload.
......
......@@ -61,7 +61,7 @@ class ArtifactTooLargeException(Exception):
def create_server(repo, *, enable_push,
max_head_size=int(10e9),
min_head_size=int(2e9)):
cas = CASCache(os.path.abspath(repo))
cas = CASCache(os.path.abspath(repo), disable_exec=True)
# Use max_workers default from Python 3.5+
max_workers = (os.cpu_count() or 1) * 5
......
......@@ -20,8 +20,6 @@
import os
from functools import cmp_to_key
from collections.abc import Mapping
import tempfile
import shutil
from .._exceptions import LoadError, LoadErrorReason
from .. import Consistency
......@@ -49,12 +47,10 @@ from .._message import Message, MessageType
# context (Context): The Context object
# project (Project): The toplevel Project object
# parent (Loader): A parent Loader object, in the case this is a junctioned Loader
# tempdir (str): A directory to cleanup with the Loader, given to the loader by a parent
# loader in the case that this loader is a subproject loader.
#
class Loader():
def __init__(self, context, project, *, parent=None, tempdir=None):
def __init__(self, context, project, *, parent=None):
# Ensure we have an absolute path for the base directory
basedir = project.element_path
......@@ -73,7 +69,6 @@ class Loader():
self._options = project.options # Project options (OptionPool)
self._basedir = basedir # Base project directory
self._first_pass_options = project.first_pass_config.options # Project options (OptionPool)
self._tempdir = tempdir # A directory to cleanup
self._parent = parent # The parent loader
self._meta_elements = {} # Dict of resolved meta elements by name
......@@ -159,30 +154,6 @@ class Loader():
return ret
# cleanup():
#
# Remove temporary checkout directories of subprojects
#
def cleanup(self):
if self._parent and not self._tempdir:
# already done
return
# recurse
for loader in self._loaders.values():
# value may be None with nested junctions without overrides
if loader is not None:
loader.cleanup()
if not self._parent:
# basedir of top-level loader is never a temporary directory
return
# safe guard to not accidentally delete directories outside builddir
if self._tempdir.startswith(self._context.builddir + os.sep):
if os.path.exists(self._tempdir):
shutil.rmtree(self._tempdir)
###########################################
# Private Methods #
###########################################
......@@ -540,23 +511,28 @@ class Loader():
"Subproject has no ref for junction: {}".format(filename),
detail=detail)
if len(sources) == 1 and sources[0]._get_local_path():
workspace = element._get_workspace()
if workspace:
# If a workspace is open, load it from there instead
basedir = workspace.get_absolute_path()
elif len(sources) == 1 and sources[0]._get_local_path():
# Optimization for junctions with a single local source
basedir = sources[0]._get_local_path()
tempdir = None
else:
# Stage sources
os.makedirs(self._context.builddir, exist_ok=True)
basedir = tempfile.mkdtemp(prefix="{}-".format(element.normal_name), dir=self._context.builddir)
element._stage_sources_at(basedir, mount_workspaces=False)
tempdir = basedir
element._update_state()
basedir = os.path.join(self.project.directory, ".bst", "staged-junctions",
filename, element._get_cache_key())
if not os.path.exists(basedir):
os.makedirs(basedir, exist_ok=True)
element._stage_sources_at(basedir, mount_workspaces=False)
# Load the project
project_dir = os.path.join(basedir, element.path)
try:
from .._project import Project
project = Project(project_dir, self._context, junction=element,
parent_loader=self, tempdir=tempdir)
parent_loader=self)
except LoadError as e:
if e.reason == LoadErrorReason.MISSING_PROJECT_CONF:
raise LoadError(reason=LoadErrorReason.INVALID_JUNCTION,
......
......@@ -91,7 +91,7 @@ class ProjectConfig:
class Project():
def __init__(self, directory, context, *, junction=None, cli_options=None,
default_mirror=None, parent_loader=None, tempdir=None):
default_mirror=None, parent_loader=None):
# The project name
self.name = None
......@@ -147,7 +147,7 @@ class Project():
self._project_includes = None
profile_start(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-'))
self._load(parent_loader=parent_loader, tempdir=tempdir)
self._load(parent_loader=parent_loader)
profile_end(Topics.LOAD_PROJECT, self.directory.replace(os.sep, '-'))
self._partially_loaded = True
......@@ -389,8 +389,6 @@ class Project():
# Cleans up resources used loading elements
#
def cleanup(self):
self.loader.cleanup()
# Reset the element loader state
Element._reset_load_state()
......@@ -439,7 +437,7 @@ class Project():
#
# Raises: LoadError if there was a problem with the project.conf
#
def _load(self, parent_loader=None, tempdir=None):
def _load(self, parent_loader=None):
# Load builtin default
projectfile = os.path.join(self.directory, _PROJECT_CONF_FILE)
......@@ -505,8 +503,7 @@ class Project():
self._fatal_warnings = _yaml.node_get(pre_config_node, list, 'fatal-warnings', default_value=[])
self.loader = Loader(self._context, self,
parent=parent_loader,
tempdir=tempdir)
parent=parent_loader)
self._project_includes = Includes(self.loader, copy_tree=False)
......
......@@ -2,6 +2,8 @@ import os
import tarfile
import hashlib
import pytest
import shutil
import stat
import subprocess
from tests.testutils.site import IS_WINDOWS
from tests.testutils import create_repo, ALL_REPO_KINDS, generate_junction
......@@ -709,3 +711,34 @@ def test_build_checkout_cross_junction(datafiles, cli, tmpdir):
filename = os.path.join(checkout, 'etc', 'animal.conf')
assert os.path.exists(filename)
@pytest.mark.datafiles(DATA_DIR)
def test_access_rights(datafiles, cli):
project = str(datafiles)
checkout = os.path.join(cli.directory, 'checkout')
shutil.copyfile(os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello'),
os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello-2'))
os.chmod(os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello'),
0o0755)
os.chmod(os.path.join(project, 'files', 'bin-files', 'usr', 'bin', 'hello-2'),
0o0644)
result = cli.run(project=project, args=['build', 'target.bst'])
result.assert_success()
checkout_args = ['artifact', 'checkout', 'target.bst',
'--directory', checkout]
# Now check it out
result = cli.run(project=project, args=checkout_args)
result.assert_success()
st = os.lstat(os.path.join(checkout, 'usr', 'bin', 'hello'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0755
st = os.lstat(os.path.join(checkout, 'usr', 'bin', 'hello-2'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0644
import os
import shutil
import stat
import pytest
from buildstream.plugintestutils import cli
from tests.testutils import create_artifact_share, generate_junction
......@@ -462,3 +463,74 @@ def test_build_remote_option(caplog, cli, tmpdir, datafiles):
assert shareproject.repo not in result.stderr
assert shareuser.repo not in result.stderr
assert sharecli.repo in result.stderr
@pytest.mark.datafiles(DATA_DIR)
def test_pull_access_rights(caplog, cli, tmpdir, datafiles):
project = str(datafiles)
checkout = os.path.join(str(tmpdir), 'checkout')
# Work-around datafiles not preserving mode
os.chmod(os.path.join(project, 'files/bin-files/usr/bin/hello'), 0o0755)
# We need a big file that does not go into a batch to test a different
# code path
os.makedirs(os.path.join(project, 'files/dev-files/usr/share'), exist_ok=True)
with open(os.path.join(project, 'files/dev-files/usr/share/big-file'), 'w') as f:
buf = ' ' * 4096
for _ in range(1024):
f.write(buf)
with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
cli.configure({
'artifacts': {'url': share.repo, 'push': True}
})
result = cli.run(project=project, args=['build', 'compose-all.bst'])
result.assert_success()
result = cli.run(project=project,
args=['artifact', 'checkout',
'--hardlinks', '--no-integrate',
'compose-all.bst',
'--directory', checkout])
result.assert_success()
st = os.lstat(os.path.join(checkout, 'usr/include/pony.h'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0644
st = os.lstat(os.path.join(checkout, 'usr/bin/hello'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0755
st = os.lstat(os.path.join(checkout, 'usr/share/big-file'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0644
shutil.rmtree(checkout)
artifacts = os.path.join(cli.directory, 'artifacts')
shutil.rmtree(artifacts)
result = cli.run(project=project, args=['artifact', 'pull', 'compose-all.bst'])
result.assert_success()
result = cli.run(project=project,
args=['artifact', 'checkout',
'--hardlinks', '--no-integrate',
'compose-all.bst',
'--directory', checkout])
result.assert_success()
st = os.lstat(os.path.join(checkout, 'usr/include/pony.h'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0644
st = os.lstat(os.path.join(checkout, 'usr/bin/hello'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0755
st = os.lstat(os.path.join(checkout, 'usr/share/big-file'))
assert stat.S_ISREG(st.st_mode)
assert stat.S_IMODE(st.st_mode) == 0o0644
......@@ -49,7 +49,7 @@ class ArtifactShare():
os.makedirs(self.repodir)
self.cas = CASCache(self.repodir)
self.cas = CASCache(self.repodir, disable_exec=True)
self.total_space = total_space
self.free_space = free_space
......