Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • willsalmon/buildstream
  • CumHoleZH/buildstream
  • tchaik/buildstream
  • DCotyPortfolio/buildstream
  • jesusoctavioas/buildstream
  • patrickmmartin/buildstream
  • franred/buildstream
  • tintou/buildstream
  • alatiera/buildstream
  • martinblanchard/buildstream
  • neverdie22042524/buildstream
  • Mattlk13/buildstream
  • PServers/buildstream
  • phamnghia610909/buildstream
  • chiaratolentino/buildstream
  • eysz7-x-x/buildstream
  • kerrick1/buildstream
  • matthew-yates/buildstream
  • twofeathers/buildstream
  • mhadjimichael/buildstream
  • pointswaves/buildstream
  • Mr.JackWilson/buildstream
  • Tw3akG33k/buildstream
  • AlexFazakas/buildstream
  • eruidfkiy/buildstream
  • clamotion2/buildstream
  • nanonyme/buildstream
  • wickyjaaa/buildstream
  • nmanchev/buildstream
  • bojorquez.ja/buildstream
  • mostynb/buildstream
  • highpit74/buildstream
  • Demo112/buildstream
  • ba2014sheer/buildstream
  • tonimadrino/buildstream
  • usuario2o/buildstream
  • Angelika123456/buildstream
  • neo355/buildstream
  • corentin-ferlay/buildstream
  • coldtom/buildstream
  • wifitvbox81/buildstream
  • 358253885/buildstream
  • seanborg/buildstream
  • SotK/buildstream
  • DouglasWinship/buildstream
  • karansthr97/buildstream
  • louib/buildstream
  • bwh-ct/buildstream
  • robjh/buildstream
  • we88c0de/buildstream
  • zhengxian5555/buildstream
51 results
Show changes
Commits on Source (154)
Showing
with 951 additions and 639 deletions
......@@ -4,11 +4,15 @@ include =
*/buildstream/*
omit =
# Omit profiling helper module
# Omit some internals
*/buildstream/_profile.py
*/buildstream/__main__.py
*/buildstream/_version.py
# Omit generated code
*/buildstream/_protos/*
*/.eggs/*
# Omit .tox directory
*/.tox/*
[report]
show_missing = True
......
......@@ -13,11 +13,12 @@ tests/**/*.pyc
integration-cache/
tmp
.coverage
.coverage-reports/
.coverage.*
.cache
.pytest_cache/
*.bst/
.tox
.tox/
# Pycache, in case buildstream is ran directly from within the source
# tree
......
......@@ -13,6 +13,7 @@ variables:
PYTEST_ADDOPTS: "--color=yes"
INTEGRATION_CACHE: "${CI_PROJECT_DIR}/cache/integration-cache"
TEST_COMMAND: "tox -- --color=yes --integration"
COVERAGE_PREFIX: "${CI_JOB_NAME}."
#####################################################
......@@ -24,15 +25,13 @@ variables:
.tests-template: &tests
stage: test
variables:
COVERAGE_DIR: coverage-linux
before_script:
# Diagnostics
- mount
- df -h
script:
- mkdir -p "${INTEGRATION_CACHE}"
- useradd -Um buildstream
- chown -R buildstream:buildstream .
......@@ -40,14 +39,11 @@ variables:
- su buildstream -c "${TEST_COMMAND}"
after_script:
# Collect our reports
- mkdir -p ${COVERAGE_DIR}
- cp .coverage ${COVERAGE_DIR}/coverage."${CI_JOB_NAME}"
except:
- schedules
artifacts:
paths:
- ${COVERAGE_DIR}
- .coverage-reports
tests-debian-9:
image: buildstream/testsuite-debian:9-5da27168-32c47d1c
......@@ -65,8 +61,22 @@ tests-ubuntu-18.04:
image: buildstream/testsuite-ubuntu:18.04-5da27168-32c47d1c
<<: *tests
tests-python-3.7-stretch:
image: buildstream/testsuite-python:3.7-stretch-a60f0c39
<<: *tests
variables:
# Note that we explicitly specify TOXENV in this case because this
# image has both 3.6 and 3.7 versions. python3.6 cannot be removed because
# some of our base dependencies declare it as their runtime dependency.
TOXENV: py37
tests-centos-7.6:
<<: *tests
image: buildstream/testsuite-centos:7.6-5da27168-32c47d1c
overnight-fedora-28-aarch64:
image: buildstream/testsuite-fedora:aarch64-28-06bab030-32a101f6
image: buildstream/testsuite-fedora:aarch64-28-5da27168-32c47d1c
tags:
- aarch64
<<: *tests
......@@ -75,6 +85,12 @@ overnight-fedora-28-aarch64:
except: []
only:
- schedules
before_script:
# grpcio needs to be compiled from source on aarch64 so we additionally
# need a C++ compiler here.
# FIXME: Ideally this would be provided by the base image. This will be
# unblocked by https://gitlab.com/BuildStream/buildstream-docker-images/issues/34
- dnf install -y gcc-c++
tests-unix:
# Use fedora here, to a) run a test on fedora and b) ensure that we
......@@ -83,7 +99,6 @@ tests-unix:
<<: *tests
variables:
BST_FORCE_BACKEND: "unix"
COVERAGE_DIR: coverage-unix
script:
......@@ -96,7 +111,6 @@ tests-unix:
# Since the unix platform is required to run as root, no user change required
- ${TEST_COMMAND}
tests-fedora-missing-deps:
# Ensure that tests behave nicely while missing bwrap and ostree
image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
......@@ -114,6 +128,22 @@ tests-fedora-missing-deps:
- ${TEST_COMMAND}
tests-fedora-update-deps:
# Check if the tests pass after updating requirements to their latest
# allowed version.
allow_failure: true
image: buildstream/testsuite-fedora:28-5da27168-32c47d1c
<<: *tests
script:
- useradd -Um buildstream
- chown -R buildstream:buildstream .
- make --always-make --directory requirements
- cat requirements/*.txt
- su buildstream -c "${TEST_COMMAND}"
# Lint separately from testing
lint:
stage: test
......@@ -146,8 +176,8 @@ docs:
stage: test
variables:
BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
BST_EXT_REF: 573843768f4d297f85dc3067465b3c7519a8dcc3 # 0.7.0
FD_SDK_REF: 612f66e218445eee2b1a9d7dd27c9caba571612e # freedesktop-sdk-18.08.19-54-g612f66e2
BST_EXT_REF: 0.9.0-0-g63a19e8068bd777bd9cd59b1a9442f9749ea5a85
FD_SDK_REF: freedesktop-sdk-18.08.25-0-g250939d465d6dd7768a215f1fa59c4a3412fc337
before_script:
- |
mkdir -p "${HOME}/.config"
......@@ -160,6 +190,9 @@ docs:
- pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
- git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
- git -C freedesktop-sdk checkout ${FD_SDK_REF}
artifacts:
paths:
- "${HOME}/.cache/buildstream/logs"
only:
- schedules
......@@ -239,22 +272,22 @@ coverage:
stage: post
coverage: '/TOTAL +\d+ +\d+ +(\d+\.\d+)%/'
script:
- pip3 install -r requirements/requirements.txt -r requirements/dev-requirements.txt
- pip3 install --no-index .
- mkdir report
- cd report
- cp ../coverage-unix/coverage.* .
- cp ../coverage-linux/coverage.* .
- ls coverage.*
- coverage combine --rcfile=../.coveragerc -a coverage.*
- coverage report --rcfile=../.coveragerc -m
- cp -a .coverage-reports/ ./coverage-sources
- tox -e coverage
- cp -a .coverage-reports/ ./coverage-report
dependencies:
- tests-debian-9
- tests-fedora-27
- tests-fedora-28
- tests-fedora-missing-deps
- tests-ubuntu-18.04
- tests-unix
except:
- schedules
artifacts:
paths:
- coverage-sources/
- coverage-report/
# Deploy, only for merges which land on master branch.
#
......
......@@ -553,7 +553,7 @@ One problem which arises from this is that we end up having symbols
which are *public* according to the :ref:`rules discussed in the previous section
<contributing_public_and_private>`, but must be hidden away from the
*"Public API Surface"*. For example, BuildStream internal classes need
to invoke methods on the ``Element`` and ``Source`` classes, wheras these
to invoke methods on the ``Element`` and ``Source`` classes, whereas these
methods need to be hidden from the *"Public API Surface"*.
This is where BuildStream deviates from the PEP-8 standard for public
......@@ -631,7 +631,7 @@ An element plugin will derive from Element by importing::
from buildstream import Element
When importing utilities specifically, dont import function names
When importing utilities specifically, don't import function names
from there, instead import the module itself::
from . import utils
......@@ -737,7 +737,7 @@ Abstract methods
~~~~~~~~~~~~~~~~
In BuildStream, an *"Abstract Method"* is a bit of a misnomer and does
not match up to how Python defines abstract methods, we need to seek out
a new nomanclature to refer to these methods.
a new nomenclature to refer to these methods.
In Python, an *"Abstract Method"* is a method which **must** be
implemented by a subclass, whereas all methods in Python can be
......@@ -960,7 +960,7 @@ possible, and avoid any cyclic relationships in modules.
For instance, the ``Source`` objects are owned by ``Element``
objects in the BuildStream data model, and as such the ``Element``
will delegate some activities to the ``Source`` objects in its
possesion. The ``Source`` objects should however never call functions
possession. The ``Source`` objects should however never call functions
on the ``Element`` object, nor should the ``Source`` object itself
have any understanding of what an ``Element`` is.
......@@ -1223,7 +1223,7 @@ For further information about using the reStructuredText with sphinx, please see
Building Docs
~~~~~~~~~~~~~
Before you can build the docs, you will end to ensure that you have installed
the required :ref:`buid dependencies <contributing_build_deps>` as mentioned
the required :ref:`build dependencies <contributing_build_deps>` as mentioned
in the testing section above.
To build the documentation, just run the following::
......@@ -1365,7 +1365,7 @@ Structure of an example
'''''''''''''''''''''''
The :ref:`tutorial <tutorial>` and the :ref:`examples <examples>` sections
of the documentation contain a series of sample projects, each chapter in
the tutoral, or standalone example uses a sample project.
the tutorial, or standalone example uses a sample project.
Here is the the structure for adding new examples and tutorial chapters.
......@@ -1471,8 +1471,8 @@ Installing build dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Some of BuildStream's dependencies have non-python build dependencies. When
running tests with ``tox``, you will first need to install these dependencies.
Exact steps to install these will depend on your oprtation systemm. Commands
for installing them for some common distributions are lised below.
Exact steps to install these will depend on your operating system. Commands
for installing them for some common distributions are listed below.
For Fedora-based systems::
......@@ -1498,6 +1498,13 @@ option when running tox::
tox -e py37
If you would like to test and lint at the same time, or if you do have multiple
python versions installed and would like to test against multiple versions, then
we recommend using `detox <https://github.com/tox-dev/detox>`_, just run it with
the same arguments you would give `tox`::
detox -e lint,py36,py37
Linting is performed separately from testing. In order to run the linting step which
consists of running the ``pycodestyle`` and ``pylint`` tools, run the following::
......@@ -1527,17 +1534,27 @@ You can always abort on the first failure by running::
tox -- -x
Similarly, you may also be interested in the ``--last-failed`` and
``--failed-first`` options as per the
`pytest cache <https://docs.pytest.org/en/latest/cache.html>`_ documentation.
If you want to run a specific test or a group of tests, you
can specify a prefix to match. E.g. if you want to run all of
the frontend tests you can do::
tox -- tests/frontend/
Specific tests can be chosen by using the :: delimeter after the test module.
Specific tests can be chosen by using the :: delimiter after the test module.
If you wanted to run the test_build_track test within frontend/buildtrack.py you could do::
tox -- tests/frontend/buildtrack.py::test_build_track
When running only a few tests, you may find the coverage and timing output
excessive, there are options to trim them. Note that coverage step will fail.
Here is an example::
tox -- --no-cov --durations=1 tests/frontend/buildtrack.py::test_build_track
We also have a set of slow integration tests that are disabled by
default - you will notice most of them marked with SKIP in the pytest
output. To run them, you can use::
......@@ -1553,7 +1570,7 @@ can run ``tox`` with ``-r`` or ``--recreate`` option.
.. note::
By default, we do not allow use of site packages in our ``tox``
confguration to enable running the tests in an isolated environment.
configuration to enable running the tests in an isolated environment.
If you need to enable use of site packages for whatever reason, you can
do so by passing the ``--sitepackages`` option to ``tox``. Also, you will
not need to install any of the build dependencies mentioned above if you
......@@ -1574,10 +1591,23 @@ can run ``tox`` with ``-r`` or ``--recreate`` option.
./setup.py test --addopts 'tests/frontend/buildtrack.py::test_build_track'
Observing coverage
~~~~~~~~~~~~~~~~~~
Once you have run the tests using `tox` (or `detox`), some coverage reports will
have been left behind.
To view the coverage report of the last test run, simply run::
tox -e coverage
This will collate any reports from separate python environments that may be
under test before displaying the combined coverage.
Adding tests
~~~~~~~~~~~~
Tests are found in the tests subdirectory, inside of which
there is a separarate directory for each *domain* of tests.
there is a separate directory for each *domain* of tests.
All tests are collected as::
tests/*/*.py
......@@ -1752,7 +1782,7 @@ creating a tarball which contains everything we want it to include::
Updating BuildStream's Python dependencies
------------------------------------------
BuildStream's Python dependencies are listed in multiple
`requirements files <https://pip.readthedocs.io/en/latest/reference/pip_install/#requirements-file-format>`
`requirements files <https://pip.readthedocs.io/en/latest/reference/pip_install/#requirements-file-format>`_
present in the ``requirements`` directory.
All ``.txt`` files in this directory are generated from the corresponding
......
......@@ -2,6 +2,16 @@
buildstream 1.3.1
=================
o BREAKING CHANGE: The top level commands `checkout`, `push` and `pull` have
been moved to the `bst artifact` subcommand group and are now obsolete.
For example, you must now use `bst artifact pull hello.bst`.
The behaviour of `checkout` has changed. The previously mandatory LOCATION
argument should now be specified with the `--directory` option. In addition
to this, `--tar` is no longer a flag, it is a mutually incompatible option
to `--directory`. For example, `bst artifact checkout foo.bst --tar foo.tar.gz`.
o Added `bst artifact log` subcommand for viewing build logs.
o BREAKING CHANGE: The bst source-bundle command has been removed. The
......
......@@ -19,18 +19,16 @@
import multiprocessing
import os
import signal
import string
from collections.abc import Mapping
from ..types import _KeyStrength
from .._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason
from .._message import Message, MessageType
from .. import _signals
from .. import utils
from .. import _yaml
from .types import _KeyStrength
from ._exceptions import ArtifactError, CASError, LoadError, LoadErrorReason
from ._message import Message, MessageType
from . import utils
from . import _yaml
from .cascache import CASRemote, CASRemoteSpec
from ._cas import CASRemote, CASRemoteSpec
CACHE_SIZE_FILE = "cache_size"
......@@ -128,7 +126,7 @@ class ArtifactCache():
self._remotes_setup = True
# Initialize remote artifact caches. We allow the commandline to override
# the user config in some cases (for example `bst push --remote=...`).
# the user config in some cases (for example `bst artifact push --remote=...`).
has_remote_caches = False
if remote_url:
self._set_remotes([ArtifactCacheSpec(remote_url, push=True)])
......@@ -249,7 +247,7 @@ class ArtifactCache():
# FIXME: Asking the user what to do may be neater
default_conf = os.path.join(os.environ['XDG_CONFIG_HOME'],
'buildstream.conf')
detail = ("There is not enough space to build the given element.\n"
detail = ("There is not enough space to complete the build.\n"
"Please increase the cache-quota in {}."
.format(self.context.config_origin or default_conf))
......@@ -375,20 +373,8 @@ class ArtifactCache():
remotes = {}
q = multiprocessing.Queue()
for remote_spec in remote_specs:
# Use subprocess to avoid creation of gRPC threads in main BuildStream process
# See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
p = multiprocessing.Process(target=self.cas.initialize_remote, args=(remote_spec, q))
try:
# Keep SIGINT blocked in the child process
with _signals.blocked([signal.SIGINT], ignore=False):
p.start()
error = q.get()
p.join()
except KeyboardInterrupt:
utils._kill_process_tree(p.pid)
raise
error = CASRemote.check_remote(remote_spec, q)
if error and on_failure:
on_failure(remote_spec.url, error)
......@@ -747,7 +733,7 @@ class ArtifactCache():
"servers are configured as push remotes.")
for remote in push_remotes:
message_digest = self.cas.push_message(remote, message)
message_digest = remote.push_message(message)
return message_digest
......@@ -896,16 +882,16 @@ class ArtifactCache():
else:
available = utils._pretty_size(available_space)
raise LoadError(LoadErrorReason.INVALID_DATA,
("Your system does not have enough available " +
"space to support the cache quota specified.\n" +
"\nYou have specified a quota of {quota} total disk space.\n" +
"- The filesystem containing {local_cache_path} only " +
"has: {available_size} available.")
.format(
quota=self.context.config_cache_quota,
local_cache_path=self.context.artifactdir,
available_size=available))
raise ArtifactError("Your system does not have enough available " +
"space to support the cache quota specified.",
detail=("You have specified a quota of {quota} total disk space.\n" +
"The filesystem containing {local_cache_path} only " +
"has {available_size} available.")
.format(
quota=self.context.config_cache_quota,
local_cache_path=self.context.artifactdir,
available_size=available),
reason='insufficient-storage-for-quota')
# Place a slight headroom (2e9 (2GB) on the cache_quota) into
# cache_quota to try and avoid exceptions.
......
......@@ -17,4 +17,5 @@
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
from .artifactcache import ArtifactCache, ArtifactCacheSpec, CACHE_SIZE_FILE
from .cascache import CASCache
from .casremote import CASRemote, CASRemoteSpec
from collections import namedtuple
import io
import os
import multiprocessing
import signal
from urllib.parse import urlparse
import uuid
import grpc
from .. import _yaml
from .._protos.google.rpc import code_pb2
from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
from .._exceptions import CASRemoteError, LoadError, LoadErrorReason
from .. import _signals
from .. import utils
# The default limit for gRPC messages is 4 MiB.
# Limit payload to 1 MiB to leave sufficient headroom for metadata.
_MAX_PAYLOAD_BYTES = 1024 * 1024
class CASRemoteSpec(namedtuple('CASRemoteSpec', 'url push server_cert client_key client_cert instance_name')):
# _new_from_config_node
#
# Creates an CASRemoteSpec() from a YAML loaded node
#
@staticmethod
def _new_from_config_node(spec_node, basedir=None):
_yaml.node_validate(spec_node, ['url', 'push', 'server-cert', 'client-key', 'client-cert', 'instance_name'])
url = _yaml.node_get(spec_node, str, 'url')
push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
if not url:
provenance = _yaml.node_get_provenance(spec_node, 'url')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: empty artifact cache URL".format(provenance))
instance_name = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
server_cert = _yaml.node_get(spec_node, str, 'server-cert', default_value=None)
if server_cert and basedir:
server_cert = os.path.join(basedir, server_cert)
client_key = _yaml.node_get(spec_node, str, 'client-key', default_value=None)
if client_key and basedir:
client_key = os.path.join(basedir, client_key)
client_cert = _yaml.node_get(spec_node, str, 'client-cert', default_value=None)
if client_cert and basedir:
client_cert = os.path.join(basedir, client_cert)
if client_key and not client_cert:
provenance = _yaml.node_get_provenance(spec_node, 'client-key')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-key' was specified without 'client-cert'".format(provenance))
if client_cert and not client_key:
provenance = _yaml.node_get_provenance(spec_node, 'client-cert')
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}: 'client-cert' was specified without 'client-key'".format(provenance))
return CASRemoteSpec(url, push, server_cert, client_key, client_cert, instance_name)
CASRemoteSpec.__new__.__defaults__ = (None, None, None, None)
class BlobNotFound(CASRemoteError):
def __init__(self, blob, msg):
self.blob = blob
super().__init__(msg)
# Represents a single remote CAS cache.
#
class CASRemote():
def __init__(self, spec):
self.spec = spec
self._initialized = False
self.channel = None
self.bytestream = None
self.cas = None
self.ref_storage = None
self.batch_update_supported = None
self.batch_read_supported = None
self.capabilities = None
self.max_batch_total_size_bytes = None
def init(self):
if not self._initialized:
url = urlparse(self.spec.url)
if url.scheme == 'http':
port = url.port or 80
self.channel = grpc.insecure_channel('{}:{}'.format(url.hostname, port))
elif url.scheme == 'https':
port = url.port or 443
if self.spec.server_cert:
with open(self.spec.server_cert, 'rb') as f:
server_cert_bytes = f.read()
else:
server_cert_bytes = None
if self.spec.client_key:
with open(self.spec.client_key, 'rb') as f:
client_key_bytes = f.read()
else:
client_key_bytes = None
if self.spec.client_cert:
with open(self.spec.client_cert, 'rb') as f:
client_cert_bytes = f.read()
else:
client_cert_bytes = None
credentials = grpc.ssl_channel_credentials(root_certificates=server_cert_bytes,
private_key=client_key_bytes,
certificate_chain=client_cert_bytes)
self.channel = grpc.secure_channel('{}:{}'.format(url.hostname, port), credentials)
else:
raise CASRemoteError("Unsupported URL: {}".format(self.spec.url))
self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
try:
request = remote_execution_pb2.GetCapabilitiesRequest()
response = self.capabilities.GetCapabilities(request)
server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
except grpc.RpcError as e:
# Simply use the defaults for servers that don't implement GetCapabilities()
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
# Check whether the server supports BatchReadBlobs()
self.batch_read_supported = False
try:
request = remote_execution_pb2.BatchReadBlobsRequest()
response = self.cas.BatchReadBlobs(request)
self.batch_read_supported = True
except grpc.RpcError as e:
if e.code() != grpc.StatusCode.UNIMPLEMENTED:
raise
# Check whether the server supports BatchUpdateBlobs()
self.batch_update_supported = False
try:
request = remote_execution_pb2.BatchUpdateBlobsRequest()
response = self.cas.BatchUpdateBlobs(request)
self.batch_update_supported = True
except grpc.RpcError as e:
if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
e.code() != grpc.StatusCode.PERMISSION_DENIED):
raise
self._initialized = True
# check_remote
#
# Used when checking whether remote_specs work in the buildstream main
# thread, runs this in a seperate process to avoid creation of gRPC threads
# in the main BuildStream process
# See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
@classmethod
def check_remote(cls, remote_spec, q):
def __check_remote():
try:
remote = cls(remote_spec)
remote.init()
request = buildstream_pb2.StatusRequest()
response = remote.ref_storage.Status(request)
if remote_spec.push and not response.allow_updates:
q.put('CAS server does not allow push')
else:
# No error
q.put(None)
except grpc.RpcError as e:
# str(e) is too verbose for errors reported to the user
q.put(e.details())
except Exception as e: # pylint: disable=broad-except
# Whatever happens, we need to return it to the calling process
#
q.put(str(e))
p = multiprocessing.Process(target=__check_remote)
try:
# Keep SIGINT blocked in the child process
with _signals.blocked([signal.SIGINT], ignore=False):
p.start()
error = q.get()
p.join()
except KeyboardInterrupt:
utils._kill_process_tree(p.pid)
raise
return error
# verify_digest_on_remote():
#
# Check whether the object is already on the server in which case
# there is no need to upload it.
#
# Args:
# digest (Digest): The object digest.
#
def verify_digest_on_remote(self, digest):
self.init()
request = remote_execution_pb2.FindMissingBlobsRequest()
request.blob_digests.extend([digest])
response = self.cas.FindMissingBlobs(request)
if digest in response.missing_blob_digests:
return False
return True
# push_message():
#
# Push the given protobuf message to a remote.
#
# Args:
# message (Message): A protobuf message to push.
#
# Raises:
# (CASRemoteError): if there was an error
#
def push_message(self, message):
message_buffer = message.SerializeToString()
message_digest = utils._message_digest(message_buffer)
self.init()
with io.BytesIO(message_buffer) as b:
self._send_blob(message_digest, b)
return message_digest
################################################
# Local Private Methods #
################################################
def _fetch_blob(self, digest, stream):
resource_name = '/'.join(['blobs', digest.hash, str(digest.size_bytes)])
request = bytestream_pb2.ReadRequest()
request.resource_name = resource_name
request.read_offset = 0
for response in self.bytestream.Read(request):
stream.write(response.data)
stream.flush()
assert digest.size_bytes == os.fstat(stream.fileno()).st_size
def _send_blob(self, digest, stream, u_uid=uuid.uuid4()):
resource_name = '/'.join(['uploads', str(u_uid), 'blobs',
digest.hash, str(digest.size_bytes)])
def request_stream(resname, instream):
offset = 0
finished = False
remaining = digest.size_bytes
while not finished:
chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
remaining -= chunk_size
request = bytestream_pb2.WriteRequest()
request.write_offset = offset
# max. _MAX_PAYLOAD_BYTES chunks
request.data = instream.read(chunk_size)
request.resource_name = resname
request.finish_write = remaining <= 0
yield request
offset += chunk_size
finished = request.finish_write
response = self.bytestream.Write(request_stream(resource_name, stream))
assert response.committed_size == digest.size_bytes
# Represents a batch of blobs queued for fetching.
#
class _CASBatchRead():
def __init__(self, remote):
self._remote = remote
self._max_total_size_bytes = remote.max_batch_total_size_bytes
self._request = remote_execution_pb2.BatchReadBlobsRequest()
self._size = 0
self._sent = False
def add(self, digest):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
if new_batch_size > self._max_total_size_bytes:
# Not enough space left in current batch
return False
request_digest = self._request.digests.add()
request_digest.hash = digest.hash
request_digest.size_bytes = digest.size_bytes
self._size = new_batch_size
return True
def send(self):
assert not self._sent
self._sent = True
if not self._request.digests:
return
batch_response = self._remote.cas.BatchReadBlobs(self._request)
for response in batch_response.responses:
if response.status.code == code_pb2.NOT_FOUND:
raise BlobNotFound(response.digest.hash, "Failed to download blob {}: {}".format(
response.digest.hash, response.status.code))
if response.status.code != code_pb2.OK:
raise CASRemoteError("Failed to download blob {}: {}".format(
response.digest.hash, response.status.code))
if response.digest.size_bytes != len(response.data):
raise CASRemoteError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
response.digest.hash, response.digest.size_bytes, len(response.data)))
yield (response.digest, response.data)
# Represents a batch of blobs queued for upload.
#
class _CASBatchUpdate():
def __init__(self, remote):
self._remote = remote
self._max_total_size_bytes = remote.max_batch_total_size_bytes
self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
self._size = 0
self._sent = False
def add(self, digest, stream):
assert not self._sent
new_batch_size = self._size + digest.size_bytes
if new_batch_size > self._max_total_size_bytes:
# Not enough space left in current batch
return False
blob_request = self._request.requests.add()
blob_request.digest.hash = digest.hash
blob_request.digest.size_bytes = digest.size_bytes
blob_request.data = stream.read(digest.size_bytes)
self._size = new_batch_size
return True
def send(self):
assert not self._sent
self._sent = True
if not self._request.requests:
return
batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
for response in batch_response.responses:
if response.status.code != code_pb2.OK:
raise CASRemoteError("Failed to upload blob {}: {}".format(
response.digest.hash, response.status.code))
......@@ -31,7 +31,7 @@ from ._exceptions import LoadError, LoadErrorReason, BstError
from ._message import Message, MessageType
from ._profile import Topics, profile_start, profile_end
from ._artifactcache import ArtifactCache
from ._artifactcache.cascache import CASCache
from ._cas import CASCache
from ._workspaces import Workspaces, WorkspaceProjectCache, WORKSPACE_PROJECT_FILE
from .plugin import _plugin_lookup
from .sandbox import SandboxRemote
......@@ -317,11 +317,18 @@ class Context():
# invoked with as opposed to a junctioned subproject.
#
# Returns:
# (list): The list of projects
# (Project): The Project object
#
def get_toplevel_project(self):
return self._projects[0]
# get_workspaces():
#
# Return a Workspaces object containing a list of workspaces.
#
# Returns:
# (Workspaces): The Workspaces object
#
def get_workspaces(self):
return self._workspaces
......
......@@ -262,8 +262,8 @@ class PlatformError(BstError):
# Raised when errors are encountered by the sandbox implementation
#
class SandboxError(BstError):
def __init__(self, message, reason=None):
super().__init__(message, domain=ErrorDomain.SANDBOX, reason=reason)
def __init__(self, message, detail=None, reason=None):
super().__init__(message, detail=detail, domain=ErrorDomain.SANDBOX, reason=reason)
# ArtifactError
......@@ -284,6 +284,21 @@ class CASError(BstError):
super().__init__(message, detail=detail, domain=ErrorDomain.CAS, reason=reason, temporary=True)
# CASRemoteError
#
# Raised when errors are encountered in the remote CAS
class CASRemoteError(CASError):
pass
# CASCacheError
#
# Raised when errors are encountered in the local CASCacheError
#
class CASCacheError(CASError):
pass
# PipelineError
#
# Raised from pipeline operations
......
......@@ -2,6 +2,7 @@ import os
import sys
from contextlib import ExitStack
from fnmatch import fnmatch
from functools import partial
from tempfile import TemporaryDirectory
import click
......@@ -111,14 +112,25 @@ def complete_target(args, incomplete):
return complete_list
def complete_artifact(args, incomplete):
def complete_artifact(orig_args, args, incomplete):
from .._context import Context
ctx = Context()
config = None
for i, arg in enumerate(args):
if arg in ('-c', '--config'):
config = args[i + 1]
if orig_args:
for i, arg in enumerate(orig_args):
if arg in ('-c', '--config'):
try:
config = orig_args[i + 1]
except IndexError:
pass
if args:
for i, arg in enumerate(args):
if arg in ('-c', '--config'):
try:
config = args[i + 1]
except IndexError:
pass
ctx.load(config)
# element targets are valid artifact names
......@@ -128,8 +140,9 @@ def complete_artifact(args, incomplete):
return complete_list
def override_completions(cmd, cmd_param, args, incomplete):
def override_completions(orig_args, cmd, cmd_param, args, incomplete):
"""
:param orig_args: original, non-completion args
:param cmd_param: command definition
:param args: full list of args typed before the incomplete arg
:param incomplete: the incomplete text to autocomplete
......@@ -150,7 +163,7 @@ def override_completions(cmd, cmd_param, args, incomplete):
cmd_param.opts == ['--track-except']):
return complete_target(args, incomplete)
if cmd_param.name == 'artifacts':
return complete_artifact(args, incomplete)
return complete_artifact(orig_args, args, incomplete)
raise CompleteUnhandled()
......@@ -161,7 +174,7 @@ def override_main(self, args=None, prog_name=None, complete_var=None,
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
if main_bashcomplete(self, prog_name, override_completions):
if main_bashcomplete(self, prog_name, partial(override_completions, args)):
# If we're running tests we cant just go calling exit()
# from the main process.
......@@ -355,78 +368,6 @@ def build(app, elements, all_, track_, track_save, track_all, track_except, trac
build_all=all_)
##################################################################
# Pull Command #
##################################################################
@cli.command(short_help="Pull a built artifact")
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependency artifacts to pull (default: none)')
@click.option('--remote', '-r',
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def pull(app, elements, deps, remote):
"""Pull a built artifact from the configured remote artifact cache.
By default the artifact will be pulled one of the configured caches
if possible, following the usual priority order. If the `--remote` flag
is given, only the specified cache will be queried.
Specify `--deps` to control which artifacts to pull:
\b
none: No dependencies, just the element itself
all: All dependencies
"""
with app.initialized(session_name="Pull"):
if not elements:
guessed_target = app.context.guess_element()
if guessed_target:
elements = (guessed_target,)
app.stream.pull(elements, selection=deps, remote=remote)
##################################################################
# Push Command #
##################################################################
@cli.command(short_help="Push a built artifact")
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependencies to push (default: none)')
@click.option('--remote', '-r', default=None,
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def push(app, elements, deps, remote):
"""Push a built artifact to a remote artifact cache.
The default destination is the highest priority configured cache. You can
override this by passing a different cache URL with the `--remote` flag.
If bst has been configured to include build trees on artifact pulls,
an attempt will be made to pull any required build trees to avoid the
skipping of partial artifacts being pushed.
Specify `--deps` to control which artifacts to push:
\b
none: No dependencies, just the element itself
all: All dependencies
"""
with app.initialized(session_name="Push"):
if not elements:
guessed_target = app.context.guess_element()
if guessed_target:
elements = (guessed_target,)
app.stream.push(elements, selection=deps, remote=remote)
##################################################################
# Show Command #
##################################################################
......@@ -541,6 +482,12 @@ def shell(app, element, sysroot, mount, isolate, build_, cli_buildtree, command)
element, assuming it has already been built and all required
artifacts are in the local cache.
Use '--' to separate a command from the options to bst,
otherwise bst may respond to them instead. e.g.
\b
bst shell example.bst -- df -h
Use the --build option to create a temporary sysroot for
building the element instead.
......@@ -606,67 +553,6 @@ def shell(app, element, sysroot, mount, isolate, build_, cli_buildtree, command)
sys.exit(exitcode)
##################################################################
# Checkout Command #
##################################################################
@cli.command(short_help="Checkout a built artifact")
@click.option('--force', '-f', default=False, is_flag=True,
help="Allow files to be overwritten")
@click.option('--deps', '-d', default='run',
type=click.Choice(['run', 'build', 'none']),
help='The dependencies to checkout (default: run)')
@click.option('--integrate/--no-integrate', default=True, is_flag=True,
help="Whether to run integration commands")
@click.option('--hardlinks', default=False, is_flag=True,
help="Checkout hardlinks instead of copies (handle with care)")
@click.option('--tar', default=False, is_flag=True,
help="Create a tarball from the artifact contents instead "
"of a file tree. If LOCATION is '-', the tarball "
"will be dumped to the standard output.")
@click.argument('element', required=False,
type=click.Path(readable=False))
@click.argument('location', type=click.Path(), required=False)
@click.pass_obj
def checkout(app, element, location, force, deps, integrate, hardlinks, tar):
"""Checkout a built artifact to the specified location
"""
from ..element import Scope
if not element and not location:
click.echo("ERROR: LOCATION is not specified", err=True)
sys.exit(-1)
if element and not location:
# Nasty hack to get around click's optional args
location = element
element = None
if hardlinks and tar:
click.echo("ERROR: options --hardlinks and --tar conflict", err=True)
sys.exit(-1)
if deps == "run":
scope = Scope.RUN
elif deps == "build":
scope = Scope.BUILD
elif deps == "none":
scope = Scope.NONE
with app.initialized():
if not element:
element = app.context.guess_element()
if not element:
raise AppError('Missing argument "ELEMENT".')
app.stream.checkout(element,
location=location,
force=force,
scope=scope,
integrate=integrate,
hardlinks=hardlinks,
tar=tar)
##################################################################
# Source Command #
##################################################################
......@@ -1010,6 +896,147 @@ def artifact():
"""Manipulate cached artifacts"""
#####################################################################
# Artifact Checkout Command #
#####################################################################
@artifact.command(name='checkout', short_help="Checkout contents of an artifact")
@click.option('--force', '-f', default=False, is_flag=True,
help="Allow files to be overwritten")
@click.option('--deps', '-d', default=None,
type=click.Choice(['run', 'build', 'none']),
help='The dependencies to checkout (default: run)')
@click.option('--integrate/--no-integrate', default=None, is_flag=True,
help="Whether to run integration commands")
@click.option('--hardlinks', default=False, is_flag=True,
help="Checkout hardlinks instead of copying if possible")
@click.option('--tar', default=None, metavar='LOCATION',
type=click.Path(),
help="Create a tarball from the artifact contents instead "
"of a file tree. If LOCATION is '-', the tarball "
"will be dumped to the standard output.")
@click.option('--directory', default=None,
type=click.Path(file_okay=False),
help="The directory to checkout the artifact to")
@click.argument('element', required=False,
type=click.Path(readable=False))
@click.pass_obj
def artifact_checkout(app, force, deps, integrate, hardlinks, tar, directory, element):
"""Checkout contents of an artifact"""
from ..element import Scope
if hardlinks and tar is not None:
click.echo("ERROR: options --hardlinks and --tar conflict", err=True)
sys.exit(-1)
if tar is None and directory is None:
click.echo("ERROR: One of --directory or --tar must be provided", err=True)
sys.exit(-1)
if tar is not None and directory is not None:
click.echo("ERROR: options --directory and --tar conflict", err=True)
sys.exit(-1)
if tar is not None:
location = tar
tar = True
else:
location = os.getcwd() if directory is None else directory
tar = False
if deps == "build":
scope = Scope.BUILD
elif deps == "none":
scope = Scope.NONE
else:
scope = Scope.RUN
with app.initialized():
if not element:
element = app.context.guess_element()
if not element:
raise AppError('Missing argument "ELEMENT".')
app.stream.checkout(element,
location=location,
force=force,
scope=scope,
integrate=True if integrate is None else integrate,
hardlinks=hardlinks,
tar=tar)
################################################################
# Artifact Pull Command #
################################################################
@artifact.command(name="pull", short_help="Pull a built artifact")
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependency artifacts to pull (default: none)')
@click.option('--remote', '-r',
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def artifact_pull(app, elements, deps, remote):
"""Pull a built artifact from the configured remote artifact cache.
By default the artifact will be pulled one of the configured caches
if possible, following the usual priority order. If the `--remote` flag
is given, only the specified cache will be queried.
Specify `--deps` to control which artifacts to pull:
\b
none: No dependencies, just the element itself
all: All dependencies
"""
with app.initialized(session_name="Pull"):
if not elements:
guessed_target = app.context.guess_element()
if guessed_target:
elements = (guessed_target,)
app.stream.pull(elements, selection=deps, remote=remote)
##################################################################
# Artifact Push Command #
##################################################################
@artifact.command(name="push", short_help="Push a built artifact")
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependencies to push (default: none)')
@click.option('--remote', '-r', default=None,
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def artifact_push(app, elements, deps, remote):
"""Push a built artifact to a remote artifact cache.
The default destination is the highest priority configured cache. You can
override this by passing a different cache URL with the `--remote` flag.
If bst has been configured to include build trees on artifact pulls,
an attempt will be made to pull any required build trees to avoid the
skipping of partial artifacts being pushed.
Specify `--deps` to control which artifacts to push:
\b
none: No dependencies, just the element itself
all: All dependencies
"""
with app.initialized(session_name="Push"):
if not elements:
guessed_target = app.context.guess_element()
if guessed_target:
elements = (guessed_target,)
app.stream.push(elements, selection=deps, remote=remote)
################################################################
# Artifact Log Command #
################################################################
......@@ -1079,7 +1106,7 @@ def artifact_log(app, artifacts):
##################################################################
# Fetch Command #
##################################################################
@cli.command(short_help="Fetch sources in a pipeline", hidden=True)
@cli.command(short_help="COMMAND OBSOLETE - Fetch sources in a pipeline", hidden=True)
@click.option('--except', 'except_', multiple=True,
type=click.Path(readable=False),
help="Except certain dependencies from fetching")
......@@ -1101,7 +1128,7 @@ def fetch(app, elements, deps, track_, except_, track_cross_junctions):
##################################################################
# Track Command #
##################################################################
@cli.command(short_help="Track new source references", hidden=True)
@cli.command(short_help="COMMAND OBSOLETE - Track new source references", hidden=True)
@click.option('--except', 'except_', multiple=True,
type=click.Path(readable=False),
help="Except certain dependencies from tracking")
......@@ -1116,3 +1143,64 @@ def fetch(app, elements, deps, track_, except_, track_cross_junctions):
def track(app, elements, deps, except_, cross_junctions):
click.echo("This command is now obsolete. Use `bst source track` instead.", err=True)
sys.exit(1)
##################################################################
# Checkout Command #
##################################################################
@cli.command(short_help="COMMAND OBSOLETE - Checkout a built artifact", hidden=True)
@click.option('--force', '-f', default=False, is_flag=True,
help="Allow files to be overwritten")
@click.option('--deps', '-d', default='run',
type=click.Choice(['run', 'build', 'none']),
help='The dependencies to checkout (default: run)')
@click.option('--integrate/--no-integrate', default=True, is_flag=True,
help="Whether to run integration commands")
@click.option('--hardlinks', default=False, is_flag=True,
help="Checkout hardlinks instead of copies (handle with care)")
@click.option('--tar', default=False, is_flag=True,
help="Create a tarball from the artifact contents instead "
"of a file tree. If LOCATION is '-', the tarball "
"will be dumped to the standard output.")
@click.argument('element', required=False,
type=click.Path(readable=False))
@click.argument('location', type=click.Path(), required=False)
@click.pass_obj
def checkout(app, element, location, force, deps, integrate, hardlinks, tar):
click.echo("This command is now obsolete. Use `bst artifact checkout` instead " +
"and use the --directory option to specify LOCATION", err=True)
sys.exit(1)
################################################################
# Pull Command #
################################################################
@cli.command(short_help="COMMAND OBSOLETE - Pull a built artifact", hidden=True)
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependency artifacts to pull (default: none)')
@click.option('--remote', '-r',
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def pull(app, elements, deps, remote):
click.echo("This command is now obsolete. Use `bst artifact pull` instead.", err=True)
sys.exit(1)
##################################################################
# Push Command #
##################################################################
@cli.command(short_help="COMMAND OBSOLETE - Push a built artifact", hidden=True)
@click.option('--deps', '-d', default='none',
type=click.Choice(['none', 'all']),
help='The dependencies to push (default: none)')
@click.option('--remote', '-r', default=None,
help="The URL of the remote cache (defaults to the first configured cache)")
@click.argument('elements', nargs=-1,
type=click.Path(readable=False))
@click.pass_obj
def push(app, elements, deps, remote):
click.echo("This command is now obsolete. Use `bst artifact push` instead.", err=True)
sys.exit(1)
......@@ -647,8 +647,9 @@ class LogLine(Widget):
abbrev = False
if message.message_type not in ERROR_MESSAGES \
and not frontend_message and n_lines > self._message_lines:
abbrev = True
lines = lines[0:self._message_lines]
if self._message_lines > 0:
abbrev = True
else:
lines[n_lines - 1] = lines[n_lines - 1].rstrip('\n')
......@@ -674,7 +675,7 @@ class LogLine(Widget):
if self.context is not None and not self.context.log_verbose:
text += self._indent + self._err_profile.fmt("Log file: ")
text += self._indent + self._logfile_widget.render(message) + '\n'
else:
elif self._log_lines > 0:
text += self._indent + self._err_profile.fmt("Printing the last {} lines from log file:"
.format(self._log_lines)) + '\n'
text += self._indent + self._logfile_widget.render(message, abbrev=False) + '\n'
......
......@@ -112,7 +112,8 @@ class GitMirror(SourceFetcher):
else:
remote_name = "origin"
self.source.call([self.source.host_git, 'fetch', remote_name, '--prune', '--force', '--tags'],
self.source.call([self.source.host_git, 'fetch', remote_name, '--prune',
'+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*'],
fail="Failed to fetch from remote git repository: {}".format(url),
fail_temporarily=True,
cwd=self.mirror)
......@@ -296,18 +297,24 @@ class GitMirror(SourceFetcher):
shallow = set()
for _, commit_ref, _ in self.tags:
_, out = self.source.check_output([self.source.host_git, 'rev-list',
'--boundary', '{}..{}'.format(commit_ref, self.ref)],
fail="Failed to get git history {}..{} in directory: {}"
.format(commit_ref, self.ref, fullpath),
fail_temporarily=True,
cwd=self.mirror)
for line in out.splitlines():
rev = line.lstrip('-')
if line[0] == '-':
shallow.add(rev)
else:
included.add(rev)
if commit_ref == self.ref:
# rev-list does not work in case of same rev
shallow.add(self.ref)
else:
_, out = self.source.check_output([self.source.host_git, 'rev-list',
'--ancestry-path', '--boundary',
'{}..{}'.format(commit_ref, self.ref)],
fail="Failed to get git history {}..{} in directory: {}"
.format(commit_ref, self.ref, fullpath),
fail_temporarily=True,
cwd=self.mirror)
self.source.warn("refs {}..{}: {}".format(commit_ref, self.ref, out.splitlines()))
for line in out.splitlines():
rev = line.lstrip('-')
if line[0] == '-':
shallow.add(rev)
else:
included.add(rev)
shallow -= included
included |= shallow
......
......@@ -17,6 +17,8 @@
# Authors:
# Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
from .. import _yaml
from .._exceptions import LoadError, LoadErrorReason, PlatformError
from .._platform import Platform
from .optionenum import OptionEnum
......@@ -41,7 +43,34 @@ class OptionArch(OptionEnum):
super(OptionArch, self).load(node, allow_default_definition=False)
def load_default_value(self, node):
return Platform.get_host_arch()
arch = Platform.get_host_arch()
default_value = None
for index, value in enumerate(self.values):
try:
canonical_value = Platform.canonicalize_arch(value)
if default_value is None and canonical_value == arch:
default_value = value
# Do not terminate the loop early to ensure we validate
# all values in the list.
except PlatformError as e:
provenance = _yaml.node_get_provenance(node, key='values', indices=[index])
prefix = ""
if provenance:
prefix = "{}: ".format(provenance)
raise LoadError(LoadErrorReason.INVALID_DATA,
"{}Invalid value for {} option '{}': {}"
.format(prefix, self.OPTION_TYPE, self.name, e))
if default_value is None:
# Host architecture is not supported by the project.
# Do not raise an error here as the user may override it.
# If the user does not override it, an error will be raised
# by resolve()/validate().
default_value = arch
return default_value
def resolve(self):
......
......@@ -22,6 +22,7 @@
import os
import itertools
from operator import itemgetter
from collections import OrderedDict
from ._exceptions import PipelineError
from ._message import Message, MessageType
......@@ -479,7 +480,7 @@ class Pipeline():
#
class _Planner():
def __init__(self):
self.depth_map = {}
self.depth_map = OrderedDict()
self.visiting_elements = set()
# Here we want to traverse the same element more than once when
......
......@@ -77,20 +77,17 @@ class Platform():
def get_host_os():
return os.uname()[0]
# get_host_arch():
# canonicalize_arch():
#
# This returns the architecture of the host machine. The possible values
# map from uname -m in order to be a OS independent list.
# This returns the canonical, OS-independent architecture name
# or raises a PlatformError if the architecture is unknown.
#
# Returns:
# (string): String representing the architecture
@staticmethod
def get_host_arch():
# get the hardware identifier from uname
uname_machine = os.uname()[4]
uname_to_arch = {
def canonicalize_arch(arch):
aliases = {
"aarch32": "aarch32",
"aarch64": "aarch64",
"aarch64_be": "aarch64-be",
"aarch64-be": "aarch64-be",
"amd64": "x86-64",
"arm": "aarch32",
"armv8l": "aarch64",
......@@ -99,17 +96,34 @@ class Platform():
"i486": "x86-32",
"i586": "x86-32",
"i686": "x86-32",
"power-isa-be": "power-isa-be",
"power-isa-le": "power-isa-le",
"ppc64": "power-isa-be",
"ppc64le": "power-isa-le",
"sparc": "sparc-v9",
"sparc64": "sparc-v9",
"x86_64": "x86-64"
"sparc-v9": "sparc-v9",
"x86-32": "x86-32",
"x86-64": "x86-64"
}
try:
return uname_to_arch[uname_machine]
return aliases[arch.replace('_', '-')]
except KeyError:
raise PlatformError("uname gave unsupported machine architecture: {}"
.format(uname_machine))
raise PlatformError("Unknown architecture: {}".format(arch))
# get_host_arch():
#
# This returns the architecture of the host machine. The possible values
# map from uname -m in order to be a OS independent list.
#
# Returns:
# (string): String representing the architecture
@staticmethod
def get_host_arch():
# get the hardware identifier from uname
uname_machine = os.uname()[4]
return Platform.canonicalize_arch(uname_machine)
##################################################################
# Sandbox functions #
......
......@@ -62,15 +62,24 @@ class Profile():
def end(self):
self.profiler.disable()
dt = datetime.datetime.fromtimestamp(self.start)
timestamp = dt.strftime('%Y%m%dT%H%M%S')
filename = self.key.replace('/', '-')
filename = filename.replace('.', '-')
filename = os.path.join(os.getcwd(), 'profile-' + filename + '.log')
filename = os.path.join(os.getcwd(), 'profile-' + timestamp + '-' + filename)
with open(filename, "a", encoding="utf-8") as f:
time_ = dt.strftime('%Y-%m-%d %H:%M:%S') # Human friendly format
self.__write_log(filename + '.log', time_)
self.__write_binary(filename + '.cprofile')
dt = datetime.datetime.fromtimestamp(self.start)
time_ = dt.strftime('%Y-%m-%d %H:%M:%S')
########################################
# Private Methods #
########################################
def __write_log(self, filename, time_):
with open(filename, "a", encoding="utf-8") as f:
heading = '================================================================\n'
heading += 'Profile for key: {}\n'.format(self.key)
heading += 'Started at: {}\n'.format(time_)
......@@ -81,6 +90,9 @@ class Profile():
ps = pstats.Stats(self.profiler, stream=f).sort_stats('cumulative')
ps.print_stats()
def __write_binary(self, filename):
self.profiler.dump_stats(filename)
# profile_start()
#
......
......@@ -34,8 +34,8 @@ class CacheSizeJob(Job):
if status == JobStatus.OK:
self._artifacts.set_cache_size(result)
if self._complete_cb:
self._complete_cb(result)
if self._complete_cb:
self._complete_cb(status, result)
def child_process_data(self):
return {}