diff --git a/.gitignore b/.gitignore
index 90a50449fd7111162682f9dbb38022e12b0e6908..fae0f4eea80dccca9605f225bf965cd69880b59b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,7 +25,9 @@ __pycache__/
 # Generated version file
 buildstream/__version__.py
 
-#Autogenerated doc
+# Autogenerated doc
+doc/source/badges/
+doc/source/sessions/
 doc/source/elements/
 doc/source/sources/
 doc/source/modules.rst
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index f68571d9cc6122220026e094e11e79992f37ab08..07fa859d8ac67937eaecda037f7d9c237ae4cbb1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -26,6 +26,11 @@ source_dist:
   - tar -ztf dist/*
   - tarball=$(cd dist && echo $(ls *))
 
+  # Verify that the source distribution tarball can be installed correctly
+  #
+  - pip3 install dist/*.tar.gz
+  - bst --version
+
   # unpack tarball as `dist/buildstream` directory
   - |
     cat > dist/unpack.sh << EOF
@@ -74,30 +79,32 @@ source_dist:
   - cd ../..
   - mkdir -p coverage-linux/
   - cp dist/buildstream/.coverage.* coverage-linux/coverage."${CI_JOB_NAME}"
+  except:
+  - schedules
   artifacts:
     paths:
     - coverage-linux/
 
 tests-debian-9:
-  image: buildstream/testsuite-debian:9-master-114-4cab18e3
+  image: buildstream/testsuite-debian:9-master-117-aa3a33b3
   <<: *linux-tests
 
 tests-fedora-27:
-  image: buildstream/testsuite-fedora:27-master-114-4cab18e3
+  image: buildstream/testsuite-fedora:27-master-117-aa3a33b3
   <<: *linux-tests
 
 tests-fedora-28:
-  image: buildstream/testsuite-fedora:28-master-114-4cab18e3
+  image: buildstream/testsuite-fedora:28-master-117-aa3a33b3
   <<: *linux-tests
 
 tests-ubuntu-18.04:
-  image: buildstream/testsuite-ubuntu:18.04-master-114-4cab18e3
+  image: buildstream/testsuite-ubuntu:18.04-master-117-aa3a33b3
   <<: *linux-tests
 
 tests-unix:
   # Use fedora here, to a) run a test on fedora and b) ensure that we
   # can get rid of ostree - this is not possible with debian-8
-  image: buildstream/testsuite-fedora:27-master-114-4cab18e3
+  image: buildstream/testsuite-fedora:27-master-117-aa3a33b3
   stage: test
   variables:
     BST_FORCE_BACKEND: "unix"
@@ -122,6 +129,8 @@ tests-unix:
     - cd ../..
     - mkdir -p coverage-unix/
     - cp dist/buildstream/.coverage.* coverage-unix/coverage.unix
+  except:
+  - schedules
   artifacts:
     paths:
     - coverage-unix/
@@ -136,29 +145,55 @@ docs:
   stage: test
   script:
   - export BST_SOURCE_CACHE="$(pwd)/cache/integration-cache/sources"
-  - pip3 install sphinx
+  # Currently sphinx_rtd_theme does not support Sphinx >1.8, this breaks search functionality
+  - pip3 install sphinx==1.7.9
   - pip3 install sphinx-click
   - pip3 install sphinx_rtd_theme
   - cd dist && ./unpack.sh && cd buildstream
   - make BST_FORCE_SESSION_REBUILD=1 -C doc
   - cd ../..
   - mv dist/buildstream/doc/build/html public
+  except:
+  - schedules
   artifacts:
     paths:
     - public/
 
+.overnight-tests: &overnight-tests-template
+  stage: test
+  variables:
+    BST_EXT_URL: git+https://gitlab.com/BuildStream/bst-external.git
+    BST_EXT_REF: 1d6ab71151b93c8cbc0a91a36ffe9270f3b835f1 # 0.5.1
+    FD_SDK_REF: 88d7c22c2281b987faa02edd57df80d430eecf1f # 18.08.11-35-g88d7c22c
+  before_script:
+  - (cd dist && ./unpack.sh && cd buildstream && pip3 install .)
+  - pip3 install --user -e ${BST_EXT_URL}@${BST_EXT_REF}#egg=bst_ext
+  - git clone https://gitlab.com/freedesktop-sdk/freedesktop-sdk.git
+  - git -C freedesktop-sdk checkout ${FD_SDK_REF}
+  only:
+  - schedules
 
-#####################################################
-#                    Post stage                     #
-#####################################################
+overnight-tests:
+  <<: *overnight-tests-template
+  script:
+  - make -C freedesktop-sdk
+  tags:
+  - overnight-tests
 
-# Check code quality with codeclimate
-# This needs some refactoring; we probably just want to provide the codeclimate.json directly
-# as an output of radon, with some conversion
+overnight-tests-no-cache:
+  <<: *overnight-tests-template
+  script:
+  - sed -i '/artifacts:/,+1 d' freedesktop-sdk/bootstrap/project.conf
+  - sed -i '/artifacts:/,+1 d' freedesktop-sdk/project.conf
+  - make -C freedesktop-sdk
+  tags:
+  - overnight-tests
+
+# Check code quality with gitlab's built-in feature.
 #
-codequality:
+code_quality:
   image: docker:stable
-  stage: post
+  stage: test
   variables:
     DOCKER_DRIVER: overlay2
   allow_failure: true
@@ -171,8 +206,14 @@ codequality:
         --volume "$PWD":/code
         --volume /var/run/docker.sock:/var/run/docker.sock
         "registry.gitlab.com/gitlab-org/security-products/codequality:$SP_VERSION" /code
+  except:
+  - schedules
   artifacts:
-    paths: [codeclimate.json]
+    paths: [gl-code-quality-report.json]
+
+#####################################################
+#                    Post stage                     #
+#####################################################
 
 analysis:
   stage: post
@@ -196,6 +237,8 @@ analysis:
     radon raw -s -j buildstream > analysis/raw.json
     radon raw -s buildstream
 
+  except:
+  - schedules
   artifacts:
     paths:
     - analysis/
@@ -221,6 +264,8 @@ coverage:
   - tests-fedora-28
   - tests-unix
   - source_dist
+  except:
+  - schedules
 
 # Deploy, only for merges which land on master branch.
 #
@@ -229,8 +274,14 @@ pages:
   dependencies:
   - source_dist
   - docs
+  variables:
+    ACME_DIR: public/.well-known/acme-challenge
   script:
-  - find public/
+  - mkdir -p ${ACME_DIR}
+    # Required to finish the creation of the Let's Encrypt certificate,
+    # which allows using https://docs.buildstream.build/ for accessing
+    # the documentation.
+  - echo ${ACME_CHALLENGE} > ${ACME_DIR}/$(echo ${ACME_CHALLENGE} | cut -c1-43)
   artifacts:
     paths:
     - public/
@@ -245,3 +296,5 @@ pages:
   # See https://gitlab.com/gitlab-org/gitlab-ce/issues/35141
   #
   - master
+  except:
+  - schedules
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0f872e9e18b631d21e8bbc97ca37b0ef000e13a1
--- /dev/null
+++ b/CONTRIBUTING.rst
@@ -0,0 +1,1631 @@
+Contributing
+============
+Some tips and guidelines for developers hacking on BuildStream
+
+
+.. _contributing_filing_issues:
+
+Filing issues
+-------------
+If you are experiencing an issue with BuildStream, or would like to submit a patch
+to fix an issue, then you should first search the list of `open issues <https://gitlab.com/BuildStream/buildstream/issues>`_
+to see if the issue is already filed, and `open an issue <https://gitlab.com/BuildStream/buildstream/issues/new>`_
+if no issue already exists.
+
+For policies on how to submit an issue and how to use our project labels,
+we recommend that you read the `policies guide
+<https://gitlab.com/BuildStream/nosoftware/alignment/blob/master/BuildStream_policies.md>`_.
+
+
+.. _contributing_fixing_bugs:
+
+Fixing bugs
+-----------
+Before fixing a bug, it is preferred that an :ref:`issue be filed <contributing_filing_issues>`
+first in order to better document the defect, however this need not be followed to the
+letter for minor fixes.
+
+Patches which fix bugs should always come with a regression test.
+
+
+.. _contributing_adding_features:
+
+Adding new features
+-------------------
+Feature additions should be proposed on the `mailing list
+<https://mail.gnome.org/mailman/listinfo/buildstream-list>`_
+before being considered for inclusion. To save time and avoid any frustration,
+we strongly recommend proposing your new feature in advance of commencing work.
+
+Once consensus has been reached on the mailing list, then the proposing
+party should :ref:`file an issue <contributing_filing_issues>` to track the
+work. Please use the *bst_task* template for issues which represent
+feature additions.
+
+New features must be well documented and tested in our test suite.
+
+It is expected that the individual submitting the work take ownership
+of their feature within BuildStream for a reasonable timeframe of at least
+one release cycle after their work has landed on the master branch. This is
+to say that the submitter is expected to address and fix any side effects,
+bugs or regressions which may have fell through the cracks in the review
+process, giving us a reasonable timeframe for identifying these.
+
+
+.. _contributing_submitting_patches:
+
+Submitting patches
+------------------
+
+
+Ask for developer access
+~~~~~~~~~~~~~~~~~~~~~~~~
+If you want to submit a patch, do ask for developer permissions, either
+by asking us directly on our public IRC channel (irc://irc.gnome.org/#buildstream)
+or by visiting our `project page on GitLab <https://gitlab.com/BuildStream/buildstream>`_
+and using the GitLab UI to ask for permission.
+
+This will make your contribution experience smoother, as you will not
+need to setup any complicated CI settings, and rebasing your branch
+against the upstream master branch will be more painless.
+
+
+Branch names
+~~~~~~~~~~~~
+Branch names for merge requests should be prefixed with the submitter's
+name or nickname, followed by a forward slash, and then a descriptive
+name. e.g.::
+
+  username/fix-that-bug
+
+This allows us to more easily identify which branch does what and
+belongs to whom, especially so that we can effectively cleanup stale
+branches in the upstream repository over time.
+
+
+Merge requests
+~~~~~~~~~~~~~~
+Once you have created a local branch, you can push it to the upstream
+BuildStream repository using the command line::
+
+  git push origin username/fix-that-bug:username/fix-that-bug
+
+GitLab will respond to this with a message and a link to allow you to create
+a new merge request. You can also `create a merge request for an existing branch
+<https://gitlab.com/BuildStream/buildstream/merge_requests/new>`_.
+
+You may open merge requests for the branches you create before you are ready
+to have them reviewed and considered for inclusion if you like. Until your merge
+request is ready for review, the merge request title must be prefixed with the
+``WIP:`` identifier.
+
+
+Organized commits
+~~~~~~~~~~~~~~~~~
+Submitted branches must not contain a history of the work done in the
+feature branch. For example, if you had to change your approach, or
+have a later commit which fixes something in a previous commit on your
+branch, we do not want to include the history of how you came up with
+your patch in the upstream master branch.
+
+Please use git's interactive rebase feature in order to compose a clean
+patch series suitable for submission upstream.
+
+Every commit in series should pass the test suite, this is very important
+for tracking down regressions and performing git bisections in the future.
+
+We prefer that documentation changes be submitted in separate commits from
+the code changes which they document, and newly added test cases are also
+preferred in separate commits.
+
+If a commit in your branch modifies behavior such that a test must also
+be changed to match the new behavior, then the tests should be updated
+with the same commit, so that every commit passes its own tests.
+
+
+Commit messages
+~~~~~~~~~~~~~~~
+Commit messages must be formatted with a brief summary line, followed by
+an empty line and then a free form detailed description of the change.
+
+The summary line must start with what changed, followed by a colon and
+a very brief description of the change.
+
+If the commit fixes an issue, or is related to an issue; then the issue
+number must be referenced in the commit message.
+
+**Example**::
+
+  element.py: Added the frobnicator so that foos are properly frobbed.
+
+  The new frobnicator frobnicates foos all the way throughout
+  the element. Elements that are not properly frobnicated raise
+  an error to inform the user of invalid frobnication rules.
+
+  Fixes #123
+
+In the case that you have a commit which necessarily modifies multiple
+components, then the summary line should still mention generally what
+changed (if possible), followed by a colon and a brief summary.
+
+In this case the free form detailed description of the change should
+contain a bullet list describing what was changed in each component
+separately.
+
+**Example**::
+
+  artifact cache: Fixed automatic expiry in the local cache
+
+    o _artifactcache/artifactcache.py: Updated the API contract
+      of ArtifactCache.remove() so that something detailed is
+      explained here.
+
+    o _artifactcache/cascache.py: Adhere to the new API contract
+      dictated by the abstract ArtifactCache class.
+
+    o tests/artifactcache/expiry.py: Modified test expectations to
+      match the new behavior.
+
+  This is a part of #123
+
+
+Coding guidelines
+-----------------
+This section discusses coding style and other guidelines for hacking
+on BuildStream. This is important to read through for writing any non-trivial
+patches and especially outlines what people should watch out for when
+reviewing patches.
+
+Much of the rationale behind what is layed out in this section considers
+good traceability of lines of code with *git blame*, overall sensible
+modular structure, consistency in how we write code, and long term maintenance
+in mind.
+
+
+Approximate PEP-8 Style
+~~~~~~~~~~~~~~~~~~~~~~~
+Python coding style for BuildStream is approximately `pep8 <https://www.python.org/dev/peps/pep-0008/>`_.
+
+We have a couple of minor exceptions to this standard, we dont want to compromise
+code readability by being overly restrictive on line length for instance.
+
+The pep8 linter will run automatically when :ref:`running the test suite <contributing_testing>`.
+
+
+Line lengths
+''''''''''''
+Regarding laxness on the line length in our linter settings, it should be clarified
+that the line length limit is a hard limit which causes the linter to bail out
+and reject commits which break the high limit - not an invitation to write exceedingly
+long lines of code, comments, or API documenting docstrings.
+
+Code, comments and docstrings should strive to remain written for approximately 80
+or 90 character lines, where exceptions can be made when code would be less readable
+when exceeding 80 or 90 characters (often this happens in conditional statements
+when raising an exception, for example). Or, when comments contain a long link that
+causes the given line to to exceed 80 or 90 characters, we don't want this to cause
+the linter to refuse the commit.
+
+
+.. _contributing_documenting_symbols:
+
+Documenting symbols
+~~~~~~~~~~~~~~~~~~~
+In BuildStream, we maintain what we call a *"Public API Surface"* that
+is guaranteed to be stable and unchanging across stable releases. The
+symbols which fall into this special class are documented using Python's
+standard *docstrings*, while all other internals of BuildStream are documented
+with comments above the related symbol.
+
+When documenting the public API surface which is rendered in the reference
+manual, we always mention the major version in which the API was introduced,
+as shown in the examples below. If a public API exists without the *Since*
+annotation, this is taken to mean that it was available since the first stable
+release 1.0.
+
+Here are some examples to get the hang of the format of API documenting
+comments and docstrings.
+
+**Public API Surface method**::
+
+  def frobnicate(self, source, *, frobilicious=False):
+      """Frobnicates this element with the specified source
+
+      Args:
+         source (Source): The Source to frobnicate with
+         frobilicious (bool): Optionally specify that frobnication should be
+                              performed fribiliciously
+
+      Returns:
+         (Element): The frobnicated version of this Element.
+
+      *Since: 1.2*
+      """
+      ...
+
+**Internal method**::
+
+  # frobnicate():
+  #
+  # Frobnicates this element with the specified source
+  #
+  # Args:
+  #    source (Source): The Source to frobnicate with
+  #    frobilicious (bool): Optionally specify that frobnication should be
+  #                         performed fribiliciously
+  #
+  # Returns:
+  #    (Element): The frobnicated version of this Element.
+  #
+  def frobnicate(self, source, *, frobilicious=False):
+      ...
+
+**Public API Surface instance variable**::
+
+  def __init__(self, context, element):
+
+    self.name = self._compute_name(context, element)
+    """The name of this foo
+
+    *Since: 1.2*
+    """
+
+.. note::
+
+   Python does not support docstrings on instance variables, but sphinx does
+   pick them up and includes them in the generated documentation.
+
+**Internal instance variable**::
+
+  def __init__(self, context, element):
+
+    self.name = self._compute_name(context, element) # The name of this foo
+
+**Internal instance variable (long)**::
+
+  def __init__(self, context, element):
+
+    # This instance variable required a longer explanation, so
+    # it is on a line above the instance variable declaration.
+    self.name = self._compute_name(context, element)
+
+
+**Public API Surface class**::
+
+  class Foo(Bar):
+      """The main Foo object in the data model
+
+      Explanation about Foo. Note that we always document
+      the constructor arguments here, and not beside the __init__
+      method.
+
+      Args:
+         context (Context): The invocation Context
+         count (int): The number to count
+
+      *Since: 1.2*
+      """
+      ...
+
+**Internal class**::
+
+  # Foo()
+  #
+  # The main Foo object in the data model
+  #
+  # Args:
+  #    context (Context): The invocation Context
+  #    count (int): The number to count
+  #
+  class Foo(Bar):
+      ...
+
+
+.. _contributing_class_order:
+
+Class structure and ordering
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When creating or modifying an object class in BuildStream, it is
+important to keep in mind the order in which symbols should appear
+and keep this consistent.
+
+Here is an example to illustrate the expected ordering of symbols
+on a Python class in BuildStream::
+
+  class Foo(Bar):
+
+      # Public class-wide variables come first, if any.
+
+      # Private class-wide variables, if any
+
+      # Now we have the dunder/magic methods, always starting
+      # with the __init__() method.
+
+      def __init__(self, name):
+
+          super().__init__()
+
+          # NOTE: In the instance initializer we declare any instance variables,
+          #       always declare the public instance variables (if any) before
+          #       the private ones.
+          #
+          #       It is preferred to avoid any public instance variables, and
+          #       always expose an accessor method for it instead.
+
+          #
+          # Public instance variables
+          #
+          self.name = name  # The name of this foo
+
+          #
+          # Private instance variables
+          #
+          self._count = 0   # The count of this foo
+
+      ################################################
+      #               Abstract Methods               #
+      ################################################
+
+      # NOTE: Abstract methods in BuildStream are allowed to have
+      #       default methods.
+      #
+      #       Subclasses must NEVER override any method which was
+      #       not advertized as an abstract method by the parent class.
+
+      # frob()
+      #
+      # Implementors should implement this to frob this foo
+      # count times if possible.
+      #
+      # Args:
+      #    count (int): The number of times to frob this foo
+      #
+      # Returns:
+      #    (int): The number of times this foo was frobbed.
+      #
+      # Raises:
+      #    (FooError): Implementors are expected to raise this error
+      #
+      def frob(self, count):
+
+          #
+          # An abstract method in BuildStream is allowed to have
+          # a default implementation.
+          #
+          self._count = self._do_frobbing(count)
+
+          return self._count
+
+      ################################################
+      #     Implementation of abstract methods       #
+      ################################################
+
+      # NOTE: Implementations of abstract methods defined by
+      #       the parent class should NEVER document the API
+      #       here redundantly.
+
+      def frobbish(self):
+         #
+         # Implementation of the "frobbish" abstract method
+         # defined by the parent Bar class.
+         #
+         return True
+
+      ################################################
+      #                 Public Methods               #
+      ################################################
+
+      # NOTE: Public methods here are the ones which are expected
+      #       to be called from outside of this class.
+      #
+      #       These, along with any abstract methods, usually
+      #       constitute the API surface of this class.
+
+      # frobnicate()
+      #
+      # Perform the frobnication process on this Foo
+      #
+      # Raises:
+      #    (FrobError): In the case that a frobnication error was
+      #                 encountered
+      #
+      def frobnicate(self):
+          frobnicator.frobnicate(self)
+
+      # set_count()
+      #
+      # Sets the count of this foo
+      #
+      # Args:
+      #    count (int): The new count to set
+      #
+      def set_count(self, count):
+
+          self._count = count
+
+      # get_count()
+      #
+      # Accessor for the count value of this foo.
+      #
+      # Returns:
+      #    (int): The count of this foo
+      #
+      def get_count(self, count):
+
+          return self._count
+
+      ################################################
+      #                 Private Methods              #
+      ################################################
+
+      # NOTE: Private methods are the ones which are internal
+      #       implementation details of this class.
+      #
+      #       Even though these are private implementation
+      #       details, they still MUST have API documenting
+      #       comments on them.
+
+      # _do_frobbing()
+      #
+      # Does the actual frobbing
+      #
+      # Args:
+      #    count (int): The number of times to frob this foo
+      #
+      # Returns:
+      #    (int): The number of times this foo was frobbed.
+      #
+      def self._do_frobbing(self, count):
+          return count
+
+
+.. _contributing_public_and_private:
+
+Public and private symbols
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+BuildStream mostly follows the PEP-8 for defining *public* and *private* symbols
+for any given class, with some deviations. Please read the `section on inheritance
+<https://www.python.org/dev/peps/pep-0008/#designing-for-inheritance>`_ for
+reference on how the PEP-8 defines public and non-public.
+
+* A *public* symbol is any symbol which you expect to be used by clients
+  of your class or module within BuildStream.
+
+  Public symbols are written without any leading underscores.
+
+* A *private* symbol is any symbol which is entirely internal to your class
+  or module within BuildStream. These symbols cannot ever be accessed by
+  external clients or modules.
+
+  A private symbol must be denoted by a leading underscore.
+
+* When a class can have subclasses, then private symbols should be denoted
+  by two leading underscores. For example, the ``Sandbox`` or ``Platform``
+  classes which have various implementations, or the ``Element`` and ``Source``
+  classes which plugins derive from.
+
+  The double leading underscore naming convention invokes Python's name
+  mangling algorithm which helps prevent namespace collisions in the case
+  that subclasses might have a private symbol with the same name.
+
+In BuildStream, we have what we call a *"Public API Surface"*, as previously
+mentioned in :ref:`contributing_documenting_symbols`. In the :ref:`next section
+<contributing_public_api_surface>` we will discuss the *"Public API Surface"* and
+outline the exceptions to the rules discussed here.
+
+
+.. _contributing_public_api_surface:
+
+Public API surface
+~~~~~~~~~~~~~~~~~~
+BuildStream exposes what we call a *"Public API Surface"* which is stable
+and unchanging. This is for the sake of stability of the interfaces which
+plugins use, so it can also be referred to as the *"Plugin facing API"*.
+
+Any symbols which are a part of the *"Public API Surface*" are never allowed
+to change once they have landed in a stable release version of BuildStream. As
+such, we aim to keep the *"Public API Surface"* as small as possible at all
+times, and never expose any internal details to plugins inadvertently.
+
+One problem which arises from this is that we end up having symbols
+which are *public* according to the :ref:`rules discussed in the previous section
+<contributing_public_and_private>`, but must be hidden away from the
+*"Public API Surface"*. For example, BuildStream internal classes need
+to invoke methods on the ``Element`` and ``Source`` classes, wheras these
+methods need to be hidden from the *"Public API Surface"*.
+
+This is where BuildStream deviates from the PEP-8 standard for public
+and private symbol naming.
+
+In order to disambiguate between:
+
+* Symbols which are publicly accessible details of the ``Element`` class, can
+  be accessed by BuildStream internals, but must remain hidden from the
+  *"Public API Surface"*.
+
+* Symbols which are private to the ``Element`` class, and cannot be accessed
+  from outside of the ``Element`` class at all.
+
+We denote the former category of symbols with only a single underscore, and the latter
+category of symbols with a double underscore. We often refer to this distinction
+as *"API Private"* (the former category) and *"Local Private"* (the latter category).
+
+Classes which are a part of the *"Public API Surface"* and require this disambiguation
+were not discussed in :ref:`the class ordering section <contributing_class_order>`, for
+these classes, the *"API Private"* symbols always come **before** the *"Local Private"*
+symbols in the class declaration.
+
+Modules which are not a part of the *"Public API Surface"* have their Python files
+prefixed with a single underscore, and are not imported in BuildStream's the master
+``__init__.py`` which is used by plugins.
+
+.. note::
+
+   The ``utils.py`` module is public and exposes a handful of utility functions,
+   however many of the functions it provides are *"API Private"*.
+
+   In this case, the *"API Private"* functions are prefixed with a single underscore.
+
+Any objects which are a part of the *"Public API Surface"* should be exposed via the
+toplevel ``__init__.py`` of the ``buildstream`` package.
+
+
+File naming convention
+~~~~~~~~~~~~~~~~~~~~~~
+With the exception of a few helper objects and data structures, we structure
+the code in BuildStream such that every filename is named after the object it
+implements. E.g. The ``Project`` object is implemented in ``_project.py``, the
+``Context`` object in ``_context.py``, the base ``Element`` class in ``element.py``,
+etc.
+
+As mentioned in the previous section, objects which are not a part of the
+:ref:`public, plugin facing API surface <contributing_public_api_surface>` have their
+filenames prefixed with a leading underscore (like ``_context.py`` and ``_project.py``
+in the examples above).
+
+When an object name has multiple words in it, e.g. ``ArtifactCache``, then the
+resulting file is named all in lower case without any underscore to separate
+words. In the case of ``ArtifactCache``, the filename implementing this object
+is found at ``_artifactcache/artifactcache.py``.
+
+
+Imports
+~~~~~~~
+Module imports inside BuildStream are done with relative ``.`` notation:
+
+**Good**::
+
+  from ._context import Context
+
+**Bad**::
+
+  from buildstream._context import Context
+
+The exception to the above rule is when authoring plugins,
+plugins do not reside in the same namespace so they must
+address buildstream in the imports.
+
+An element plugin will derive from Element by importing::
+
+  from buildstream import Element
+
+When importing utilities specifically, dont import function names
+from there, instead import the module itself::
+
+  from . import utils
+
+This makes things clear when reading code that said functions
+are not defined in the same file but come from utils.py for example.
+
+
+.. _contributing_instance_variables:
+
+Instance variables
+~~~~~~~~~~~~~~~~~~
+It is preferred that all instance state variables be declared as :ref:`private symbols
+<contributing_public_and_private>`, however in some cases, especially when the state
+is immutable for the object's life time (like an ``Element`` name for example), it
+is acceptable to save some typing by using a publicly accessible instance variable.
+
+It is never acceptable to modify the value of an instance variable from outside
+of the declaring class, even if the variable is *public*. In other words, the class
+which exposes an instance variable is the only one in control of the value of this
+variable.
+
+* If an instance variable is public and must be modified; then it must be
+  modified using a :ref:`mutator <contributing_accessor_mutator>`.
+
+* Ideally for better encapsulation, all object state is declared as
+  :ref:`private instance variables <contributing_public_and_private>` and can
+  only be accessed by external classes via public :ref:`accessors and mutators
+  <contributing_accessor_mutator>`.
+
+.. note::
+
+   In some cases, we may use small data structures declared as objects for the sake
+   of better readability, where the object class itself has no real supporting code.
+
+   In these exceptions, it can be acceptable to modify the instance variables
+   of these objects directly, unless they are otherwise documented to be immutable.
+
+
+.. _contributing_accessor_mutator:
+
+Accessors and mutators
+~~~~~~~~~~~~~~~~~~~~~~
+An accessor and mutator, are methods defined on the object class to access (get)
+or mutate (set) a value owned by the declaring class, respectively.
+
+An accessor might derive the returned value from one or more of its components,
+and a mutator might have side effects, or delegate the mutation to a component.
+
+Accessors and mutators are always :ref:`public <contributing_public_and_private>`
+(even if they might have a single leading underscore and are considered
+:ref:`API Private <contributing_public_api_surface>`), as their purpose is to
+enforce encapsulation with regards to any accesses to the state which is owned
+by the declaring class.
+
+Accessors and mutators are functions prefixed with ``get_`` and ``set_``
+respectively, e.g.::
+
+  class Foo():
+
+      def __init__(self):
+
+          # Declare some internal state
+          self._count = 0
+
+      # get_count()
+      #
+      # Gets the count of this Foo.
+      #
+      # Returns:
+      #    (int): The current count of this Foo
+      #
+      def get_foo(self):
+          return self._count
+
+      # set_count()
+      #
+      # Sets the count of this Foo.
+      #
+      # Args:
+      #    count (int): The new count for this Foo
+      #
+      def set_foo(self, count):
+          self._count = count
+
+.. attention::
+
+   We are aware that Python offers a facility for accessors and
+   mutators using the ``@property`` decorator instead. Do not use
+   the ``@property`` decorator.
+
+   The decision to use explicitly defined functions instead of the
+   ``@property`` decorator is rather arbitrary, there is not much
+   technical merit to preferring one technique over the other.
+   However as :ref:`discussed below <contributing_always_consistent>`,
+   it is of the utmost importance that we do not mix both techniques
+   in the same codebase.
+
+
+.. _contributing_abstract_methods:
+
+Abstract methods
+~~~~~~~~~~~~~~~~
+In BuildStream, an *"Abstract Method"* is a bit of a misnomer and does
+not match up to how Python defines abstract methods, we need to seek out
+a new nomanclature to refer to these methods.
+
+In Python, an *"Abstract Method"* is a method which **must** be
+implemented by a subclass, whereas all methods in Python can be
+overridden.
+
+In BuildStream, we use the term *"Abstract Method"*, to refer to
+a method which **can** be overridden by a subclass, whereas it
+is **illegal** to override any other method.
+
+* Abstract methods are allowed to have default implementations.
+
+* Subclasses are not allowed to redefine the calling signature
+  of an abstract method, or redefine the API contract in any way.
+
+* Subclasses are not allowed to override any other methods.
+
+The key here is that in BuildStream, we consider it unacceptable
+that a subclass overrides a method of its parent class unless
+the said parent class has explicitly given permission to subclasses
+to do so, and outlined the API contract for this purpose. No surprises
+are allowed.
+
+
+Error handling
+~~~~~~~~~~~~~~
+In BuildStream, all non recoverable errors are expressed via
+subclasses of the ``BstError`` exception.
+
+This exception is handled deep in the core in a few places, and
+it is rarely necessary to handle a ``BstError``.
+
+
+Raising exceptions
+''''''''''''''''''
+When writing code in the BuildStream core, ensure that all system
+calls and third party library calls are wrapped in a ``try:`` block,
+and raise a descriptive ``BstError`` of the appropriate class explaining
+what exactly failed.
+
+Ensure that the original system call error is formatted into your new
+exception, and that you use the Python ``from`` semantic to retain the
+original call trace, example::
+
+  try:
+      os.utime(self._refpath(ref))
+  except FileNotFoundError as e:
+      raise ArtifactError("Attempt to access unavailable artifact: {}".format(e)) from e
+
+
+Enhancing exceptions
+''''''''''''''''''''
+Sometimes the ``BstError`` originates from a lower level component,
+and the code segment which raised the exception did not have enough context
+to create a complete, informative summary of the error for the user.
+
+In these cases it is necessary to handle the error and raise a new
+one, e.g.::
+
+  try:
+      extracted_artifact = self._artifacts.extract(self, cache_key)
+  except ArtifactError as e:
+      raise ElementError("Failed to extract {} while checking out {}: {}"
+                         .format(cache_key, self.name, e)) from e
+
+
+Programming errors
+''''''''''''''''''
+Sometimes you are writing code and have detected an unexpected condition,
+or a broken invariant for which the code cannot be prepared to handle
+gracefully.
+
+In these cases, do **not** raise any of the ``BstError`` class exceptions.
+
+Instead, use the ``assert`` statement, e.g.::
+
+  assert utils._is_main_process(), \
+      "Attempted to save workspace configuration from child process"
+
+This will result in a ``BUG`` message with the stack trace included being
+logged and reported in the frontend.
+
+
+BstError parameters
+'''''''''''''''''''
+When raising ``BstError`` class exceptions, there are some common properties
+which can be useful to know about:
+
+* **message:** The brief human readable error, will be formatted on one line in the frontend.
+
+* **detail:** An optional detailed human readable message to accompany the **message** summary
+  of the error. This is often used to recommend the user some course of action, or to provide
+  additional context about the error.
+
+* **temporary:** Some errors are allowed to be *temporary*, this attribute is only
+  observed from child processes which fail in a temporary way. This distinction
+  is used to determine whether the task should be *retried* or not. An error is usually
+  only a *temporary* error if the cause of the error was a network timeout.
+
+* **reason:** A machine readable identifier for the error. This is used for the purpose
+  of regression testing, such that we check that BuildStream has errored out for the
+  expected reason in a given failure mode.
+
+
+Documenting Exceptions
+''''''''''''''''''''''
+We have already seen :ref:`some examples <contributing_class_order>` of how
+exceptions are documented in API documenting comments, but this is worth some
+additional disambiguation.
+
+* Only document the exceptions which are raised directly by the function in question.
+  It is otherwise nearly impossible to keep track of what exceptions *might* be raised
+  indirectly by calling the given function.
+
+* For a regular public or private method, your audience is a caller of the function;
+  document the exception in terms of what exception might be raised as a result of
+  calling this method.
+
+* For an :ref:`abstract method <contributing_abstract_methods>`, your audience is the
+  implementor of the method in a subclass; document the exception in terms of what
+  exception is prescribed for the implementing class to raise.
+
+
+.. _contributing_always_consistent:
+
+Always be consistent
+~~~~~~~~~~~~~~~~~~~~
+There are various ways to define functions and classes in Python,
+which has evolved with various features over time.
+
+In BuildStream, we may not have leveraged all of the nice features
+we could have, that is okay, and where it does not break API, we
+can consider changing it.
+
+Even if you know there is a *better* way to do a given thing in
+Python when compared to the way we do it in BuildStream, *do not do it*.
+
+Consistency of how we do things in the codebase is more important
+than the actual way in which things are done, always.
+
+Instead, if you like a certain Python feature and think the BuildStream
+codebase should use it, then propose your change on the `mailing list
+<https://mail.gnome.org/mailman/listinfo/buildstream-list>`_. Chances
+are that we will reach agreement to use your preferred approach, and
+in that case, it will be important to apply the change unilaterally
+across the entire codebase, such that we continue to have a consistent
+codebase.
+
+
+Avoid tail calling
+~~~~~~~~~~~~~~~~~~
+With the exception of tail calling with simple functions from
+the standard Python library, such as splitting and joining lines
+of text and encoding/decoding text; always avoid tail calling.
+
+**Good**::
+
+  # Variables that we will need declared up top
+  context = self._get_context()
+  workspaces = context.get_workspaces()
+
+  ...
+
+  # Saving the workspace configuration
+  workspaces.save_config()
+
+**Bad**::
+
+  # Saving the workspace configuration
+  self._get_context().get_workspaces().save_config()
+
+**Acceptable**::
+
+  # Decode the raw text loaded from a log file for display,
+  # join them into a single utf-8 string and strip away any
+  # trailing whitespace.
+  return '\n'.join([line.decode('utf-8') for line in lines]).rstrip()
+
+When you need to obtain a delegate object via an accessor function,
+either do it at the beginning of the function, or at the beginning
+of a code block within the function that will use that object.
+
+There are several reasons for this convention:
+
+* When observing a stack trace, it is always faster and easier to
+  determine what went wrong when all statements are on separate lines.
+
+* We always want individual lines to trace back to their origin as
+  much as possible for the purpose of tracing the history of code
+  with *git blame*.
+
+  One day, you might need the ``Context`` or ``Workspaces`` object
+  in the same function for another reason, at which point it will
+  be unacceptable to leave the existing line as written, because it
+  will introduce a redundant accessor to the same object, so the
+  line written as::
+
+    self._get_context().get_workspaces().save_config()
+
+  Will have to change at that point, meaning we lose the valuable
+  information of which commit originally introduced this call
+  when running *git blame*.
+
+* For similar reasons, we prefer delegate objects be accessed near
+  the beginning of a function or code block so that there is less
+  chance that this statement will have to move in the future, if
+  the same function or code block needs the delegate object for any
+  other reason.
+
+  Asides from this, code is generally more legible and uniform when
+  variables are declared at the beginning of function blocks.
+
+
+Vertical stacking of modules
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+For the sake of overall comprehensiveness of the BuildStream
+architecture, it is important that we retain vertical stacking
+order of the dependencies and knowledge of modules as much as
+possible, and avoid any cyclic relationships in modules.
+
+For instance, the ``Source`` objects are owned by ``Element``
+objects in the BuildStream data model, and as such the ``Element``
+will delegate some activities to the ``Source`` objects in its
+possesion. The ``Source`` objects should however never call functions
+on the ``Element`` object, nor should the ``Source`` object itself
+have any understanding of what an ``Element`` is.
+
+If you are implementing a low level utility layer, for example
+as a part of the ``YAML`` loading code layers, it can be tempting
+to derive context from the higher levels of the codebase which use
+these low level utilities, instead of defining properly stand alone
+APIs for these utilities to work: Never do this.
+
+Unfortunately, unlike other languages where include files play
+a big part in ensuring that it is difficult to make a mess; Python,
+allows you to just call methods on arbitrary objects passed through
+a function call without having to import the module which defines
+those methods - this leads to cyclic dependencies of modules quickly
+if the developer does not take special care of ensuring this does not
+happen.
+
+
+Minimize arguments in methods
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+When creating an object, or adding a new API method to an existing
+object, always strive to keep as much context as possible on the
+object itself rather than expecting callers of the methods to provide
+everything the method needs every time.
+
+If the value or object that is needed in a function call is a constant
+for the lifetime of the object which exposes the given method, then
+that value or object should be passed in the constructor instead of
+via a method call.
+
+
+Minimize API surfaces
+~~~~~~~~~~~~~~~~~~~~~
+When creating an object, or adding new functionality in any way,
+try to keep the number of :ref:`public, outward facing <contributing_public_and_private>`
+symbols to a minimum, this is important for both
+:ref:`internal and public, plugin facing API surfaces <contributing_public_api_surface>`.
+
+When anyone visits a file, there are two levels of comprehension:
+
+* What do I need to know in order to *use* this object.
+
+* What do I need to know in order to *modify* this object.
+
+For the former, we want the reader to understand with as little effort
+as possible, what the public API contract is for a given object and consequently,
+how it is expected to be used. This is also why we
+:ref:`order the symbols of a class <contributing_class_order>` in such a way
+as to keep all outward facing public API surfaces at the top of the file, so that the
+reader never needs to dig deep into the bottom of the file to find something they
+might need to use.
+
+For the latter, when it comes to having to modify the file or add functionality,
+you want to retain as much freedom as possible to modify internals, while
+being sure that nothing external will be affected by internal modifications.
+Less client facing API means that you have less surrounding code to modify
+when your API changes. Further, ensuring that there is minimal outward facing
+API for any module minimizes the complexity for the developer working on
+that module, by limiting the considerations needed regarding external side
+effects of their modifications to the module.
+
+When modifying a file, one should not have to understand or think too
+much about external side effects, when the API surface of the file is
+well documented and minimal.
+
+When adding new API to a given object for a new purpose, consider whether
+the new API is in any way redundant with other API (should this value now
+go into the constructor, since we use it more than once? could this
+value be passed along with another function, and the other function renamed,
+to better suit the new purposes of this module/object?) and repurpose
+the outward facing API of an object as a whole every time.
+
+
+Avoid transient state on instances
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+At times, it can be tempting to store transient state that is
+the result of one operation on an instance, only to be retrieved
+later via an accessor function elsewhere.
+
+As a basic rule of thumb, if the value is transient and just the
+result of one operation, which needs to be observed directly after
+by another code segment, then never store it on the instance.
+
+BuildStream is complicated in the sense that it is multi processed
+and it is not always obvious how to pass the transient state around
+as a return value or a function parameter. Do not fall prey to this
+obstacle and pollute object instances with transient state.
+
+Instead, always refactor the surrounding code so that the value
+is propagated to the desired end point via a well defined API, either
+by adding new code paths or changing the design such that the
+architecture continues to make sense.
+
+
+Refactor the codebase as needed
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Especially when implementing features, always move the BuildStream
+codebase forward as a whole.
+
+Taking a short cut is alright when prototyping, but circumventing
+existing architecture and design to get a feature implemented without
+re-designing the surrounding architecture to accommodate the new
+feature instead, is never acceptable upstream.
+
+For example, let's say that you have to implement a feature and you've
+successfully prototyped it, but it launches a ``Job`` directly from a
+``Queue`` implementation to get the feature to work, while the ``Scheduler``
+is normally responsible for dispatching ``Jobs`` for the elements on
+a ``Queue``. This means that you've proven that your feature can work,
+and now it is time to start working on a patch for upstream.
+
+Consider what the scenario is and why you are circumventing the design,
+and then redesign the ``Scheduler`` and ``Queue`` objects to accommodate for
+the new feature and condition under which you need to dispatch a ``Job``,
+or how you can give the ``Queue`` implementation the additional context it
+needs.
+
+
+Adding core plugins
+-------------------
+This is a checklist of things which need to be done when adding a new
+core plugin to BuildStream proper.
+
+
+Update documentation index
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+The documentation generating scripts will automatically pick up your
+newly added plugin and generate HTML, but will not add a link to the
+documentation of your plugin automatically.
+
+Whenever adding a new plugin, you must add an entry for it in ``doc/source/core_plugins.rst``.
+
+
+Bump format version
+~~~~~~~~~~~~~~~~~~~
+In order for projects to assert that they have a new enough version
+of BuildStream to use the new plugin, the ``BST_FORMAT_VERSION`` must
+be incremented in the ``_versions.py`` file.
+
+Remember to include in your plugin's main documentation, the format
+version in which the plugin was introduced, using the standard annotation
+which we use throughout the documentation, e.g.::
+
+  .. note::
+
+     The ``foo`` plugin is available since :ref:`format version 16 <project_format_version>`
+
+
+Add tests
+~~~~~~~~~
+Needless to say, all new feature additions need to be tested. For ``Element``
+plugins, these usually need to be added to the integration tests. For ``Source``
+plugins, the tests are added in two ways:
+
+* For most normal ``Source`` plugins, it is important to add a new ``Repo``
+  implementation for your plugin in the ``tests/testutils/repo/`` directory
+  and update ``ALL_REPO_KINDS`` in ``tests/testutils/repo/__init__.py``. This
+  will include your new ``Source`` implementation in a series of already existing
+  tests, ensuring it works well under normal operating conditions.
+
+* For other source plugins, or in order to test edge cases, such as failure modes,
+  which are not tested under the normal test battery, add new tests in ``tests/sources``.
+
+
+Extend the cachekey test
+~~~~~~~~~~~~~~~~~~~~~~~~
+For any newly added plugins, it is important to add some new simple elements
+in ``tests/cachekey/project/elements`` or ``tests/cachekey/project/sources``,
+and ensure that the newly added elements are depended on by ``tests/cachekey/project/target.bst``.
+
+One new element should be added to the cache key test for every configuration
+value which your plugin understands which can possibly affect the result of
+your plugin's ``Plugin.get_unique_key()`` implementation.
+
+This test ensures that cache keys do not unexpectedly change or become incompatible
+due to code changes. As such, the cache key test should have full coverage of every
+YAML configuration which can possibly affect cache key outcome at all times.
+
+See the ``tests/cachekey/update.py`` file for instructions on running the updater,
+you need to run the updater to generate the ``.expected`` files and add the new
+``.expected`` files in the same commit which extends the cache key test.
+
+
+Protocol buffers
+----------------
+BuildStream uses protobuf and gRPC for serialization and communication with
+artifact cache servers.  This requires ``.proto`` files and Python code
+generated from the ``.proto`` files using protoc.  All these files live in the
+``buildstream/_protos`` directory.  The generated files are included in the
+git repository to avoid depending on grpcio-tools for user installations.
+
+
+Regenerating code
+~~~~~~~~~~~~~~~~~
+When ``.proto`` files are modified, the corresponding Python code needs to
+be regenerated.  As a prerequisite for code generation you need to install
+``grpcio-tools`` using pip or some other mechanism::
+
+  pip3 install --user grpcio-tools
+
+To actually regenerate the code::
+
+  ./setup.py build_grpc
+
+
+Documenting
+-----------
+BuildStream starts out as a documented project from day one and uses
+`sphinx <www.sphinx-doc.org>`_ to document itself.
+
+This section discusses formatting policies for editing files in the
+``doc/source`` directory, and describes the details of how the docs are
+generated so that you can easily generate and view the docs yourself before
+submitting patches to the documentation.
+
+For details on how API documenting comments and docstrings are formatted,
+refer to the :ref:`documenting section of the coding guidelines
+<contributing_documenting_symbols>`.
+
+
+Documentation formatting policy
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The BuildStream documentation style is as follows:
+
+* Titles and headings require two leading empty lines above them.
+  Only the first word in a title should be capitalized.
+
+  * If there is an ``.. _internal_link:`` anchor, there should be two empty lines
+    above the anchor, followed by one leading empty line.
+
+* Within a section, paragraphs should be separated by one empty line.
+
+* Notes are defined using: ``.. note::`` blocks, followed by an empty line
+  and then indented (3 spaces) text.
+
+  * Other kinds of notes can be used throughout the documentation and will
+    be decorated in different ways, these work in the same way as ``.. note::`` does.
+
+    Feel free to also use ``.. attention::`` or ``.. important::`` to call special
+    attention to a paragraph, ``.. tip::`` to give the reader a special tip on how
+    to use an advanced feature or ``.. warning::`` to warn the user about a potential
+    misuse of the API and explain its consequences.
+
+* Code blocks are defined using: ``.. code:: LANGUAGE`` blocks, followed by an empty
+  line and then indented (3 spaces) text. Note that the default language is ``python``.
+
+* Cross references should be of the form ``:role:`target```.
+
+  * Explicit anchors can be declared as ``.. _anchor_name:`` on a line by itself.
+
+  * To cross reference arbitrary locations with, for example, the anchor ``anchor_name``,
+    always provide some explicit text in the link instead of deriving the text from
+    the target, e.g.: ``:ref:`Link text <anchor_name>```.
+    Note that the "_" prefix is not used when referring to the target.
+
+For further information about using the reStructuredText with sphinx, please see the
+`Sphinx Documentation <http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_.
+
+
+Building Docs
+~~~~~~~~~~~~~
+The documentation build is not integrated into the ``setup.py`` and is
+difficult (or impossible) to do so, so there is a little bit of setup
+you need to take care of first.
+
+Before you can build the BuildStream documentation yourself, you need
+to first install ``sphinx`` along with some additional plugins and dependencies,
+using pip or some other mechanism::
+
+  # Install sphinx
+  pip3 install --user sphinx
+
+  # Install some sphinx extensions
+  pip3 install --user sphinx-click
+  pip3 install --user sphinx_rtd_theme
+
+  # Additional optional dependencies required
+  pip3 install --user arpy
+
+To build the documentation, just run the following::
+
+  make -C doc
+
+This will give you a ``doc/build/html`` directory with the html docs which
+you can view in your browser locally to test.
+
+
+Regenerating session html
+'''''''''''''''''''''''''
+The documentation build will build the session files if they are missing,
+or if explicitly asked to rebuild. We revision the generated session html files
+in order to reduce the burden on documentation contributors.
+
+To explicitly rebuild the session snapshot html files, it is recommended that you
+first set the ``BST_SOURCE_CACHE`` environment variable to your source cache, this
+will make the docs build reuse already downloaded sources::
+
+  export BST_SOURCE_CACHE=~/.cache/buildstream/sources
+
+To force rebuild session html while building the doc, simply build the docs like this::
+
+  make BST_FORCE_SESSION_REBUILD=1 -C doc
+
+
+Man pages
+~~~~~~~~~
+Unfortunately it is quite difficult to integrate the man pages build
+into the ``setup.py``, as such, whenever the frontend command line
+interface changes, the static man pages should be regenerated and
+committed with that.
+
+To do this, first ensure you have ``click_man`` installed, possibly
+with::
+
+  pip3 install --user click_man
+
+Then, in the toplevel directory of buildstream, run the following::
+
+  python3 setup.py --command-packages=click_man.commands man_pages
+
+And commit the result, ensuring that you have added anything in
+the ``man/`` subdirectory, which will be automatically included
+in the buildstream distribution.
+
+
+User guide
+~~~~~~~~~~
+The :ref:`user guide <using>` is comprised of free form documentation
+in manually written ``.rst`` files and is split up into a few sections,
+of main interest are the :ref:`tutorial <tutorial>` and the
+:ref:`examples <examples>`.
+
+The distinction of the two categories of user guides is important to
+understand too.
+
+* **Tutorial**
+
+  The tutorial is structured as a series of exercises which start with
+  the most basic concepts and build upon the previous chapters in order
+  to arrive at a basic understanding of how to create BuildStream projects.
+
+  This series of examples should be easy enough to complete in a matter
+  of a few hours for a new user, and should provide just enough insight to
+  get the user started in creating their own projects.
+
+  Going through the tutorial step by step should also result in the user
+  becoming proficient enough with the reference manual to get by on their own.
+
+* **Examples**
+
+  These exist to demonstrate how to accomplish more advanced tasks which
+  are not always obvious and discoverable.
+
+  Alternatively, these also demonstrate elegant and recommended ways of
+  accomplishing some tasks which could be done in various ways.
+
+
+Guidelines
+''''''''''
+Here are some general guidelines for adding new free form documentation
+to the user guide.
+
+* **Focus on a single subject**
+
+  It is important to stay focused on a single subject and avoid getting
+  into tangential material when creating a new entry, so that the articles
+  remain concise and the user is not distracted by unrelated subject material.
+
+  A single tutorial chapter or example should not introduce any additional
+  subject material than the material being added for the given example.
+
+* **Reuse existing sample project elements**
+
+  To help avoid distracting from the topic at hand, it is always preferable to
+  reuse the same project sample material from other examples and only deviate
+  slightly to demonstrate the new material, than to create completely new projects.
+
+  This helps us remain focused on a single topic at a time, and reduces the amount
+  of unrelated material the reader needs to learn in order to digest the new
+  example.
+
+* **Don't be redundant**
+
+  When something has already been explained in the tutorial or in another example,
+  it is best to simply refer to the other user guide entry in a new example.
+
+  Always prefer to link to the tutorial if an explanation exists in the tutorial,
+  rather than linking to another example, where possible.
+
+* **Link into the reference manual at every opportunity**
+
+  The format and plugin API is 100% documented at all times. Whenever discussing
+  anything about the format or plugin API, always do so while providing a link
+  into the more terse reference material.
+
+  We don't want users to have to search for the material themselves, and we also
+  want the user to become proficient at navigating the reference material over
+  time.
+
+* **Use concise terminology**
+
+  As developers, we tend to come up with code names for features we develop, and
+  then end up documenting a new feature in an example.
+
+  Never use a code name or shorthand to refer to a feature in the user guide, instead
+  always use fully qualified sentences outlining very explicitly what we are doing
+  in the example, or what the example is for in the case of a title.
+
+  We need to be considerate that the audience of our user guide is probably a
+  proficient developer or integrator, but has no idea what we might have decided
+  to name a given activity.
+
+
+Structure of an example
+'''''''''''''''''''''''
+The :ref:`tutorial <tutorial>` and the :ref:`examples <examples>` sections
+of the documentation contain a series of sample projects, each chapter in
+the tutoral, or standalone example uses a sample project.
+
+Here is the the structure for adding new examples and tutorial chapters.
+
+* The example has a ``${name}``.
+
+* The example has a project users can copy and use.
+
+  * This project is added in the directory ``doc/examples/${name}``.
+
+* The example has a documentation component.
+
+  * This is added at ``doc/source/examples/${name}.rst``
+  * An entry for ``examples/${name}`` is added to the toctree in ``doc/source/using_examples.rst``
+  * This documentation discusses the project elements declared in the project and may
+    provide some BuildStream command examples.
+  * This documentation links out to the reference manual at every opportunity.
+
+  .. note::
+
+     In the case of a tutorial chapter, the ``.rst`` file is added in at
+     ``doc/source/tutorial/${name}.rst`` and an entry for ``tutorial/${name}``
+     is added to ``doc/source/using_tutorial.rst``.
+
+* The example has a CI test component.
+
+  * This is an integration test added at ``tests/examples/${name}``.
+  * This test runs BuildStream in the ways described in the example
+    and assert that we get the results which we advertize to users in
+    the said examples.
+
+
+Adding BuildStream command output
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+As a part of building the docs, BuildStream will run itself and extract
+some html for the colorized output which is produced.
+
+If you want to run BuildStream to produce some nice html for your
+documentation, then you can do so by adding new ``.run`` files to the
+``doc/sessions/`` directory.
+
+Any files added as ``doc/sessions/${example}.run`` will result in generated
+file at ``doc/source/sessions/${example}.html``, and these files can be
+included in the reStructuredText documentation at any time with::
+
+  .. raw:: html
+     :file: sessions/${example}.html
+
+The ``.run`` file format is just another YAML dictionary which consists of a
+``commands`` list, instructing the program what to do command by command.
+
+Each *command* is a dictionary, the members of which are listed here:
+
+* ``directory``: The input file relative project directory.
+
+* ``output``: The input file relative output html file to generate (optional).
+
+* ``fake-output``: Don't really run the command, just pretend to and pretend
+  this was the output, an empty string will enable this too.
+
+* ``command``: The command to run, without the leading ``bst``.
+
+* ``shell``: Specifying ``True`` indicates that ``command`` should be run as
+  a shell command from the project directory, instead of a bst command (optional).
+
+When adding a new ``.run`` file, one should normally also commit the new
+resulting generated ``.html`` file(s) into the ``doc/source/sessions-stored/``
+directory at the same time, this ensures that other developers do not need to
+regenerate them locally in order to build the docs.
+
+**Example**:
+
+.. code:: yaml
+
+   commands:
+
+   # Make it fetch first
+   - directory: ../examples/foo
+     command: fetch hello.bst
+
+   # Capture a build output
+   - directory: ../examples/foo
+     output: ../source/sessions/foo-build.html
+     command: build hello.bst
+
+
+.. _contributing_testing:
+
+Testing
+-------
+BuildStream uses pytest for regression tests and testing out
+the behavior of newly added components.
+
+The elaborate documentation for pytest can be found here: http://doc.pytest.org/en/latest/contents.html
+
+Don't get lost in the docs if you don't need to, follow existing examples instead.
+
+
+Running tests
+~~~~~~~~~~~~~
+To run the tests, just type::
+
+  ./setup.py test
+
+At the toplevel.
+
+When debugging a test, it can be desirable to see the stdout
+and stderr generated by a test, to do this use the ``--addopts``
+function to feed arguments to pytest as such::
+
+  ./setup.py test --addopts -s
+
+You can always abort on the first failure by running::
+
+  ./setup.py test --addopts -x
+
+If you want to run a specific test or a group of tests, you
+can specify a prefix to match. E.g. if you want to run all of
+the frontend tests you can do::
+
+  ./setup.py test --addopts 'tests/frontend/'
+
+Specific tests can be chosen by using the :: delimeter after the test module.
+If you wanted to run the test_build_track test within frontend/buildtrack.py you could do::
+
+  ./setup.py test --addopts 'tests/frontend/buildtrack.py::test_build_track'
+
+We also have a set of slow integration tests that are disabled by
+default - you will notice most of them marked with SKIP in the pytest
+output. To run them, you can use::
+
+  ./setup.py test --addopts '--integration'
+
+By default, buildstream also runs pylint on all files. Should you want
+to run just pylint (these checks are a lot faster), you can do so
+with::
+
+  ./setup.py test --addopts '-m pylint'
+
+Alternatively, any IDE plugin that uses pytest should automatically
+detect the ``.pylintrc`` in the project's root directory.
+
+
+Adding tests
+~~~~~~~~~~~~
+Tests are found in the tests subdirectory, inside of which
+there is a separarate directory for each *domain* of tests.
+All tests are collected as::
+
+  tests/*/*.py
+
+If the new test is not appropriate for the existing test domains,
+then simply create a new directory for it under the tests subdirectory.
+
+Various tests may include data files to test on, there are examples
+of this in the existing tests. When adding data for a test, create
+a subdirectory beside your test in which to store data.
+
+When creating a test that needs data, use the datafiles extension
+to decorate your test case (again, examples exist in the existing
+tests for this), documentation on the datafiles extension can
+be found here: https://pypi.python.org/pypi/pytest-datafiles.
+
+Tests that run a sandbox should be decorated with::
+
+  @pytest.mark.integration
+
+and use the integration cli helper.
+
+
+Measuring performance
+---------------------
+
+
+Benchmarking framework
+~~~~~~~~~~~~~~~~~~~~~~~
+BuildStream has a utility to measure performance which is available from a
+separate repository at https://gitlab.com/BuildStream/benchmarks. This tool
+allows you to run a fixed set of workloads with multiple versions of
+BuildStream. From this you can see whether one version performs better or
+worse than another which is useful when looking for regressions and when
+testing potential optimizations.
+
+For full documentation on how to use the benchmarking tool see the README in
+the 'benchmarks' repository.
+
+
+Profiling tools
+~~~~~~~~~~~~~~~
+When looking for ways to speed up the code you should make use of a profiling
+tool.
+
+Python provides `cProfile <https://docs.python.org/3/library/profile.html>`_
+which gives you a list of all functions called during execution and how much
+time was spent in each function. Here is an example of running ``bst --help``
+under cProfile:
+
+    python3 -m cProfile -o bst.cprofile -- $(which bst) --help
+
+You can then analyze the results interactively using the 'pstats' module:
+
+    python3 -m pstats ./bst.cprofile
+
+For more detailed documentation of cProfile and 'pstats', see:
+https://docs.python.org/3/library/profile.html.
+
+For a richer visualisation of the callstack you can try `Pyflame
+<https://github.com/uber/pyflame>`_. Once you have followed the instructions in
+Pyflame's README to install the tool, you can profile `bst` commands as in the
+following example:
+
+    pyflame --output bst.flame --trace bst --help
+
+You may see an `Unexpected ptrace(2) exception:` error. Note that the `bst`
+operation will continue running in the background in this case, you will need
+to wait for it to complete or kill it. Once this is done, rerun the above
+command which appears to fix the issue.
+
+Once you have output from pyflame, you can use the ``flamegraph.pl`` script
+from the `Flamegraph project <https://github.com/brendangregg/FlameGraph>`_
+to generate an .svg image:
+
+    ./flamegraph.pl bst.flame > bst-flamegraph.svg
+
+The generated SVG file can then be viewed in your preferred web browser.
+
+
+Profiling specific parts of BuildStream with BST_PROFILE
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+BuildStream can also turn on cProfile for specific parts of execution
+using BST_PROFILE.
+
+BST_PROFILE can be set to a section name, or 'all' for all
+sections. There is a list of topics in `buildstream/_profile.py`. For
+example, running::
+
+    BST_PROFILE=load-pipeline bst build bootstrap-system-x86.bst
+
+will produce a profile in the current directory for the time take to
+call most of `initialized`, for each element. These profile files
+are in the same cProfile format as those mentioned in the previous
+section, and can be analysed with `pstats` or `pyflame`.
+
+
+Profiling the artifact cache receiver
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Since the artifact cache receiver is not normally run directly, it's
+necessary to alter the ForceCommand part of sshd_config to enable
+profiling. See the main documentation in `doc/source/artifacts.rst`
+for general information on setting up the artifact cache. It's also
+useful to change directory to a logging directory before starting
+`bst-artifact-receive` with profiling on.
+
+This is an example of a ForceCommand section of sshd_config used to
+obtain profiles::
+
+    Match user artifacts
+      ForceCommand BST_PROFILE=artifact-receive cd /tmp && bst-artifact-receive --pull-url https://example.com/ /home/artifacts/artifacts
+
+
+The MANIFEST.in and setup.py
+----------------------------
+When adding a dependency to BuildStream, it's important to update the setup.py accordingly.
+
+When adding data files which need to be discovered at runtime by BuildStream, update setup.py accordingly.
+
+When adding data files for the purpose of docs or tests, or anything that is not covered by
+setup.py, update the MANIFEST.in accordingly.
+
+At any time, running the following command to create a source distribution should result in
+creating a tarball which contains everything we want it to include::
+
+  ./setup.py sdist
diff --git a/HACKING.rst b/HACKING.rst
deleted file mode 100644
index 7e2914e7761367ef4115ec8dd84c1a0d136eff22..0000000000000000000000000000000000000000
--- a/HACKING.rst
+++ /dev/null
@@ -1,618 +0,0 @@
-Contributing
-============
-Some tips and guidelines for developers hacking on BuildStream
-
-
-Feature additions
------------------
-Major feature additions should be proposed on the
-`mailing list <https://mail.gnome.org/mailman/listinfo/buildstream-list>`_
-before being considered for inclusion, we strongly recommend proposing
-in advance of commencing work.
-
-New features must be well documented and tested either in our main
-test suite if possible, or otherwise in the integration tests.
-
-It is expected that the individual submitting the work take ownership
-of their feature within BuildStream for a reasonable timeframe of at least
-one release cycle after their work has landed on the master branch. This is
-to say that the submitter is expected to address and fix any side effects and
-bugs which may have fell through the cracks in the review process, giving us
-a reasonable timeframe for identifying these.
-
-
-Patch submissions
------------------
-If you want to submit a patch, do ask for developer permissions on our
-IRC channel first (GitLab's button also works, but you may need to
-shout about it - we often overlook this) - for CI reasons, it's much
-easier if patches are in branches of the main repository.
-
-Branches must be submitted as merge requests in gitlab. If the branch
-fixes an issue or is related to any issues, these issues must be mentioned
-in the merge request or preferably the commit messages themselves.
-
-Branch names for merge requests should be prefixed with the submitter's
-name or nickname, e.g. ``username/implement-flying-ponies``.
-
-You may open merge requests for the branches you create before you
-are ready to have them reviewed upstream, as long as your merge request
-is not yet ready for review then it must be prefixed with the ``WIP:``
-identifier.
-
-Submitted branches must not contain a history of the work done in the
-feature branch. Please use git's interactive rebase feature in order to
-compose a clean patch series suitable for submission.
-
-We prefer that documentation changes be submitted in separate commits from
-the code changes which they document, and new test cases are also preferred
-in separate commits.
-
-If a commit in your branch modifies behavior such that a test must also
-be changed to match the new behavior, then the tests should be updated
-with the same commit. Ideally every commit in the history of master passes
-its test cases, this makes bisections more easy to perform, but is not
-always practical with more complex branches.
-
-
-Commit messages
-~~~~~~~~~~~~~~~
-Commit messages must be formatted with a brief summary line, optionally
-followed by an empty line and then a free form detailed description of
-the change.
-
-The summary line must start with what changed, followed by a colon and
-a very brief description of the change.
-
-**Example**::
-
-  element.py: Added the frobnicator so that foos are properly frobbed.
-
-  The new frobnicator frobnicates foos all the way throughout
-  the element. Elements that are not properly frobnicated raise
-  an error to inform the user of invalid frobnication rules.
-
-
-Coding style
-------------
-Coding style details for BuildStream
-
-
-Style guide
-~~~~~~~~~~~
-Python coding style for BuildStream is pep8, which is documented here: https://www.python.org/dev/peps/pep-0008/
-
-We have a couple of minor exceptions to this standard, we dont want to compromise
-code readability by being overly restrictive on line length for instance.
-
-The pep8 linter will run automatically when running the test suite.
-
-
-Imports
-~~~~~~~
-Module imports inside BuildStream are done with relative ``.`` notation
-
-Good::
-
-  from .context import Context
-
-Bad::
-
-  from buildstream.context import Context
-
-The exception to the above rule is when authoring plugins,
-plugins do not reside in the same namespace so they must
-address buildstream in the imports.
-
-An element plugin will derive from Element by importing::
-
-  from buildstream import Element
-
-When importing utilities specifically, dont import function names
-from there, instead import the module itself::
-
-  from . import utils
-
-This makes things clear when reading code that said functions
-are not defined in the same file but come from utils.py for example.
-
-
-Policy for private symbols
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-Private symbols are expressed via a leading ``_`` single underscore, or
-in some special circumstances with a leading ``__`` double underscore.
-
-Before understanding the naming policy, it is first important to understand
-that in BuildStream, there are two levels of privateness which need to be
-considered.
-
-These are treated subtly differently and thus need to be understood:
-
-* API Private
-
-  A symbol is considered to be *API private* if it is not exposed in the *public API*.
-
-  Even if a symbol does not have any leading underscore, it may still be *API private*
-  if the containing *class* or *module* is named with a leading underscore.
-
-* Local private
-
-  A symbol is considered to be *local private* if it is not intended for access
-  outside of the defining *scope*.
-
-  If a symbol has a leading underscore, it might not be *local private* if it is
-  declared on a publicly visible class, but needs to be accessed internally by
-  other modules in the BuildStream core.
-
-
-Ordering
-''''''''
-For better readability and consistency, we try to keep private symbols below
-public symbols. In the case of public modules where we may have a mix of
-*API private* and *local private* symbols, *API private* symbols should come
-before *local private* symbols.
-
-
-Symbol naming
-'''''''''''''
-Any private symbol must start with a single leading underscore for two reasons:
-
-* So that it does not bleed into documentation and *public API*.
-
-* So that it is clear to developers which symbols are not used outside of the declaring *scope*
-
-Remember that with python, the modules (python files) are also symbols
-within their containing *package*, as such; modules which are entirely
-private to BuildStream are named as such, e.g. ``_thismodule.py``.
-
-
-Cases for double underscores
-''''''''''''''''''''''''''''
-The double underscore in python has a special function. When declaring
-a symbol in class scope which has a leading underscore, it can only be
-accessed within the class scope using the same name. Outside of class
-scope, it can only be accessed with a *cheat*.
-
-We use the double underscore in cases where the type of privateness can be
-ambiguous.
-
-* For private modules and classes
-
-  We never need to disambiguate with a double underscore
-
-* For private symbols declared in a public *scope*
-
-  In the case that we declare a private method on a public object, it
-  becomes ambiguous whether:
-
-  * The symbol is *local private*, and only used within the given scope
-
-  * The symbol is *API private*, and will be used internally by BuildStream
-    from other parts of the codebase.
-
-  In this case, we use a single underscore for *API private* methods which
-  are not *local private*, and we use a double underscore for *local private*
-  methods declared in public scope.
-
-
-Documenting private symbols
-'''''''''''''''''''''''''''
-Any symbol which is *API Private* (regardless of whether it is also
-*local private*), should have some documentation for developers to
-better understand the codebase.
-
-Contrary to many other python projects, we do not use docstrings to
-document private symbols, but prefer to keep *API Private* symbols
-documented in code comments placed *above* the symbol (or *beside* the
-symbol in some cases, such as variable declarations in a class where
-a shorter comment is more desirable), rather than docstrings placed *below*
-the symbols being documented.
-
-Other than this detail, follow the same guidelines for documenting
-symbols as described below.
-
-
-Documenting BuildStream
------------------------
-BuildStream starts out as a documented project from day one and uses
-sphinx to document itself.
-
-
-Documentation formatting policy
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The BuildStream documentation style is as follows:
-
-* Titles and headings require two leading empty lines above them. Only the first word should be capitalized.
-
-  * If there is an ``.. _internal_link`` anchor, there should be two empty lines above the anchor, followed by one leading empty line.
-
-* Within a section, paragraphs should be separated by one empty line.
-
-* Notes are defined using: ``.. note::`` blocks, followed by an empty line and then indented (3 spaces) text.
-
-* Code blocks are defined using: ``.. code:: LANGUAGE`` blocks, followed by an empty line and then indented (3 spaces) text. Note that the default language is `python`.
-
-* Cross references should be of the form ``:role:`target```.
-
-  * To cross reference arbitrary locations with, for example, the anchor ``_anchor_name``, you must give the link an explicit title: ``:ref:`Link text <anchor_name>```. Note that the "_" prefix is not required.
-
-Useful links:
-
-For further information, please see the `Sphinx Documentation <http://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_.
-
-
-Building Docs
-~~~~~~~~~~~~~
-The documentation build is not integrated into the ``setup.py`` and is
-difficult (or impossible) to do so, so there is a little bit of setup
-you need to take care of first.
-
-Before you can build the BuildStream documentation yourself, you need
-to first install ``sphinx`` along with some additional plugins and dependencies,
-using pip or some other mechanism::
-
-  # Install sphinx
-  pip3 install --user sphinx
-
-  # Install some sphinx extensions
-  pip3 install --user sphinx-click
-  pip3 install --user sphinx_rtd_theme
-
-  # Additional optional dependencies required
-  pip3 install --user arpy
-
-To build the documentation, just run the following::
-
-  make -C doc
-
-This will give you a ``doc/build/html`` directory with the html docs which
-you can view in your browser locally to test.
-
-
-Regenerating session html
-'''''''''''''''''''''''''
-The documentation build will build the session files if they are missing,
-or if explicitly asked to rebuild. We revision the generated session html files
-in order to reduce the burden on documentation contributors.
-
-To explicitly rebuild the session snapshot html files, it is recommended that you
-first set the ``BST_SOURCE_CACHE`` environment variable to your source cache, this
-will make the docs build reuse already downloaded sources::
-
-  export BST_SOURCE_CACHE=~/.cache/buildstream/sources
-
-To force rebuild session html while building the doc, simply build the docs like this::
-
-  make BST_FORCE_SESSION_REBUILD=1 -C doc
-
-
-Man pages
-~~~~~~~~~
-Unfortunately it is quite difficult to integrate the man pages build
-into the ``setup.py``, as such, whenever the frontend command line
-interface changes, the static man pages should be regenerated and
-committed with that.
-
-To do this, first ensure you have ``click_man`` installed, possibly
-with::
-
-  pip3 install --user click_man
-
-Then, in the toplevel directory of buildstream, run the following::
-
-  python3 setup.py --command-packages=click_man.commands man_pages
-
-And commit the result, ensuring that you have added anything in
-the ``man/`` subdirectory, which will be automatically included
-in the buildstream distribution.
-
-
-Documenting conventions
-~~~~~~~~~~~~~~~~~~~~~~~
-We use the sphinx.ext.napoleon extension for the purpose of having
-a bit nicer docstrings than the default sphinx docstrings.
-
-A docstring for a method, class or function should have the following
-format::
-
-  """Brief description of entity
-
-  Args:
-     argument1 (type): Description of arg
-     argument2 (type): Description of arg
-
-  Returns:
-     (type): Description of returned thing of the specified type
-
-  Raises:
-     (SomeError): When some error occurs
-     (SomeOtherError): When some other error occurs
-
-  A detailed description can go here if one is needed, only
-  after the above part documents the calling conventions.
-  """
-
-
-Documentation Examples
-~~~~~~~~~~~~~~~~~~~~~~
-The examples section of the documentation contains a series of standalone
-examples, here are the criteria for an example addition.
-
-* The example has a ``${name}``
-
-* The example has a project users can copy and use
-
-  * This project is added in the directory ``doc/examples/${name}``
-
-* The example has a documentation component
-
-  * This is added at ``doc/source/examples/${name}.rst``
-  * A reference to ``examples/${name}`` is added to the toctree in ``doc/source/examples.rst``
-  * This documentation discusses the project elements declared in the project and may
-    provide some BuildStream command examples
-  * This documentation links out to the reference manual at every opportunity
-
-* The example has a CI test component
-
-  * This is an integration test added at ``tests/examples/${name}``
-  * This test runs BuildStream in the ways described in the example
-    and assert that we get the results which we advertize to users in
-    the said examples.
-
-
-Adding BuildStream command output
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-As a part of building the docs, BuildStream will run itself and extract
-some html for the colorized output which is produced.
-
-If you want to run BuildStream to produce some nice html for your
-documentation, then you can do so by adding new ``.run`` files to the
-``doc/sessions/`` directory.
-
-Any files added as ``doc/sessions/${example}.run`` will result in generated
-file at ``doc/source/sessions/${example}.html``, and these files can be
-included in the reStructuredText documentation at any time with::
-
-  .. raw:: html
-     :file: sessions/${example}.html
-
-The ``.run`` file format is just another YAML dictionary which consists of a
-``commands`` list, instructing the program what to do command by command.
-
-Each *command* is a dictionary, the members of which are listed here:
-
-* ``directory``: The input file relative project directory
-
-* ``output``: The input file relative output html file to generate (optional)
-
-* ``fake-output``: Don't really run the command, just pretend to and pretend
-  this was the output, an empty string will enable this too.
-
-* ``command``: The command to run, without the leading ``bst``
-
-When adding a new ``.run`` file, one should normally also commit the new
-resulting generated ``.html`` file(s) into the ``doc/source/sessions-stored/``
-directory at the same time, this ensures that other developers do not need to
-regenerate them locally in order to build the docs.
-
-**Example**:
-
-.. code:: yaml
-
-   commands:
-
-   # Make it fetch first
-   - directory: ../examples/foo
-     command: fetch hello.bst
-
-   # Capture a build output
-   - directory: ../examples/foo
-     output: ../source/sessions/foo-build.html
-     command: build hello.bst
-
-
-Protocol Buffers
-----------------
-BuildStream uses protobuf and gRPC for serialization and communication with
-artifact cache servers.  This requires ``.proto`` files and Python code
-generated from the ``.proto`` files using protoc.  All these files live in the
-``buildstream/_protos`` directory.  The generated files are included in the
-git repository to avoid depending on grpcio-tools for user installations.
-
-
-Regenerating code
-~~~~~~~~~~~~~~~~~
-When ``.proto`` files are modified, the corresponding Python code needs to
-be regenerated.  As a prerequisite for code generation you need to install
-``grpcio-tools`` using pip or some other mechanism::
-
-  pip3 install --user grpcio-tools
-
-To actually regenerate the code::
-
-  ./setup.py build_grpc
-
-
-Testing BuildStream
--------------------
-BuildStream uses pytest for regression tests and testing out
-the behavior of newly added components.
-
-The elaborate documentation for pytest can be found here: http://doc.pytest.org/en/latest/contents.html
-
-Don't get lost in the docs if you don't need to, follow existing examples instead.
-
-
-Running tests
-~~~~~~~~~~~~~
-To run the tests, just type::
-
-  ./setup.py test
-
-At the toplevel.
-
-When debugging a test, it can be desirable to see the stdout
-and stderr generated by a test, to do this use the ``--addopts``
-function to feed arguments to pytest as such::
-
-  ./setup.py test --addopts -s
-
-You can always abort on the first failure by running::
-
-  ./setup.py test --addopts -x
-
-If you want to run a specific test or a group of tests, you
-can specify a prefix to match. E.g. if you want to run all of
-the frontend tests you can do::
-
-  ./setup.py test --addopts '-k tests/frontend/'
-
-Specific tests can be chosen by using the :: delimeter after the test module.
-If you wanted to run the test_build_track test within frontend/buildtrack.py you could do::
-
-  ./setup.py test --adopts '-k tests/frontend/buildtrack.py::test_build_track'
-
-We also have a set of slow integration tests that are disabled by
-default - you will notice most of them marked with SKIP in the pytest
-output. To run them, you can use::
-
-  ./setup.py test --addopts '--integration'
-
-By default, buildstream also runs pylint on all files. Should you want
-to run just pylint (these checks are a lot faster), you can do so
-with::
-
-  ./setup.py test --addopts '-m pylint'
-
-Alternatively, any IDE plugin that uses pytest should automatically
-detect the ``.pylintrc`` in the project's root directory.
-
-Adding tests
-~~~~~~~~~~~~
-Tests are found in the tests subdirectory, inside of which
-there is a separarate directory for each *domain* of tests.
-All tests are collected as::
-
-  tests/*/*.py
-
-If the new test is not appropriate for the existing test domains,
-then simply create a new directory for it under the tests subdirectory.
-
-Various tests may include data files to test on, there are examples
-of this in the existing tests. When adding data for a test, create
-a subdirectory beside your test in which to store data.
-
-When creating a test that needs data, use the datafiles extension
-to decorate your test case (again, examples exist in the existing
-tests for this), documentation on the datafiles extension can
-be found here: https://pypi.python.org/pypi/pytest-datafiles
-
-Tests that run a sandbox should be decorated with::
-
-  @pytest.mark.integration
-
-and use the integration cli helper.
-
-Measuring BuildStream performance
----------------------------------
-
-
-Benchmarking framework
-~~~~~~~~~~~~~~~~~~~~~~~
-BuildStream has a utility to measure performance which is available from a
-separate repository at https://gitlab.com/BuildStream/benchmarks. This tool
-allows you to run a fixed set of workloads with multiple versions of
-BuildStream. From this you can see whether one version performs better or
-worse than another which is useful when looking for regressions and when
-testing potential optimizations.
-
-For full documentation on how to use the benchmarking tool see the README in
-the 'benchmarks' repository.
-
-
-Profiling tools
-~~~~~~~~~~~~~~~
-When looking for ways to speed up the code you should make use of a profiling
-tool.
-
-Python provides `cProfile <https://docs.python.org/3/library/profile.html>`_
-which gives you a list of all functions called during execution and how much
-time was spent in each function. Here is an example of running ``bst --help``
-under cProfile:
-
-    python3 -m cProfile -o bst.cprofile -- $(which bst) --help
-
-You can then analyze the results interactively using the 'pstats' module:
-
-    python3 -m pstats ./bst.cprofile
-
-For more detailed documentation of cProfile and 'pstats', see:
-https://docs.python.org/3/library/profile.html.
-
-For a richer visualisation of the callstack you can try `Pyflame
-<https://github.com/uber/pyflame>`_. Once you have followed the instructions in
-Pyflame's README to install the tool, you can profile `bst` commands as in the
-following example:
-
-    pyflame --output bst.flame --trace bst --help
-
-You may see an `Unexpected ptrace(2) exception:` error. Note that the `bst`
-operation will continue running in the background in this case, you will need
-to wait for it to complete or kill it. Once this is done, rerun the above
-command which appears to fix the issue.
-
-Once you have output from pyflame, you can use the ``flamegraph.pl`` script
-from the `Flamegraph project <https://github.com/brendangregg/FlameGraph>`_
-to generate an .svg image:
-
-    ./flamegraph.pl bst.flame > bst-flamegraph.svg
-
-The generated SVG file can then be viewed in your preferred web browser.
-
-
-Profiling specific parts of BuildStream with BST_PROFILE
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-BuildStream can also turn on cProfile for specific parts of execution
-using BST_PROFILE.
-
-BST_PROFILE can be set to a section name, or 'all' for all
-sections. There is a list of topics in `buildstream/_profile.py`. For
-example, running::
-
-    BST_PROFILE=load-pipeline bst build bootstrap-system-x86.bst
-
-will produce a profile in the current directory for the time take to
-call most of `initialized`, for each element. These profile files
-are in the same cProfile format as those mentioned in the previous
-section, and can be analysed with `pstats` or `pyflame`.
-
-
-Profiling the artifact cache receiver
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Since the artifact cache receiver is not normally run directly, it's
-necessary to alter the ForceCommand part of sshd_config to enable
-profiling. See the main documentation in `doc/source/artifacts.rst`
-for general information on setting up the artifact cache. It's also
-useful to change directory to a logging directory before starting
-`bst-artifact-receive` with profiling on.
-
-This is an example of a ForceCommand section of sshd_config used to
-obtain profiles::
-
-    Match user artifacts
-      ForceCommand BST_PROFILE=artifact-receive cd /tmp && bst-artifact-receive --pull-url https://example.com/ /home/artifacts/artifacts
-
-
-The MANIFEST.in and setup.py
-----------------------------
-When adding a dependency to BuildStream, it's important to update the setup.py accordingly.
-
-When adding data files which need to be discovered at runtime by BuildStream, update setup.py accordingly.
-
-When adding data files for the purpose of docs or tests, or anything that is not covered by
-setup.py, update the MANIFEST.in accordingly.
-
-At any time, running the following command to create a source distribution should result in
-creating a tarball which contains everything we want it to include::
-
-  ./setup.py sdist
diff --git a/MANIFEST.in b/MANIFEST.in
index 80c815e557ba934300b47409eb4fcc21f129e1b8..9c16ce0511f7c7b922539e3d7616a69bd1f1d372 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,7 @@
 # Basic toplevel package includes
 include BuildStream.doap
 include COPYING
-include HACKING.rst
+include CONTRIBUTING.rst
 include MAINTAINERS
 include NEWS
 include README.rst
@@ -23,4 +23,4 @@ recursive-include tests *.expected
 recursive-include buildstream/_protos *.proto
 
 # Requirements files
-dev-requirements.txt
+include dev-requirements.txt
diff --git a/NEWS b/NEWS
index bc35ff79a60e9dc99aca0e392c2567f3f65e7f0a..7e88f4af4b2ec5ba87c7fe1034d90cb3df83f50a 100644
--- a/NEWS
+++ b/NEWS
@@ -24,6 +24,12 @@ buildstream 1.3.1
   o Add new `pip` source plugin for downloading python packages using pip,
     based on requirements files from previous sources.
 
+  o Generate Docker images from built artifacts using
+    `contrib/bst-docker-import` script.
+
+  o Added Documentation on how to create out of source builds. This includes the
+    new the `conf-root` variable to make the process easier. And there has been
+    a bug fix to workspaces so they can be build in workspaces too.
 
 =================
 buildstream 1.1.5
diff --git a/README.rst b/README.rst
index c553da0686b47e46efcc14b63fb1b5dc538da617..b514d2d7bc79daf1a8b550503a737343c8dce4d9 100644
--- a/README.rst
+++ b/README.rst
@@ -1,15 +1,26 @@
 About
 -----
+
+.. image:: https://docs.buildstream.build/_static/release.svg
+   :target: https://gitlab.com/BuildStream/buildstream/commits/bst-1.2
+
+.. image:: https://docs.buildstream.build/_static/snapshot.svg
+   :target: https://gitlab.com/BuildStream/buildstream/commits/master
+
 .. image:: https://gitlab.com/BuildStream/buildstream/badges/master/pipeline.svg
    :target: https://gitlab.com/BuildStream/buildstream/commits/master
 
 .. image:: https://gitlab.com/BuildStream/buildstream/badges/master/coverage.svg?job=coverage
    :target: https://gitlab.com/BuildStream/buildstream/commits/master
 
+.. image:: https://img.shields.io/pypi/v/BuildStream.svg
+   :target: https://pypi.org/project/BuildStream
+
 
 What is BuildStream?
 ====================
-BuildStream is a Free Software tool for building/integrating software stacks.
+`BuildStream <https://buildstream.build>`_ is a Free Software tool for 
+building/integrating software stacks.
 It takes inspiration, lessons and use-cases from various projects including
 OBS, Reproducible Builds, Yocto, Baserock, Buildroot, Aboriginal, GNOME Continuous,
 JHBuild, Flatpak Builder and Android repo.
@@ -52,7 +63,7 @@ BuildStream offers the following advantages:
 
 How do I use BuildStream?
 =========================
-Please refer to the `documentation <https://buildstream.gitlab.io/buildstream/>`_
+Please refer to the `documentation <https://docs.buildstream.build>`_
 for  information about installing BuildStream, and about the BuildStream YAML format
 and plugin options.
 
@@ -74,10 +85,10 @@ BuildStream operates on a set of YAML files (.bst files), as follows:
 
 How can I get started?
 ======================
-To start using BuildStream, first,
-`install <https://buildstream.gitlab.io/buildstream/main_install.html>`_
-BuildStream onto your machine and then follow our
-`tutorial <https://buildstream.gitlab.io/buildstream/using_tutorial.html>`_.
+To get started, first `install BuildStream by following the installation guide
+<https://buildstream.build/install.html>`_
+and then follow our tutorial in the
+`user guide <https://docs.buildstream.build/main_using.html>`_.
 
 We also recommend exploring some existing BuildStream projects:
 
@@ -86,4 +97,3 @@ We also recommend exploring some existing BuildStream projects:
 * https://gitlab.com/baserock/definitions
 
 If you have any questions please ask on our `#buildstream <irc://irc.gnome.org/buildstream>`_ channel in `irc.gnome.org <irc://irc.gnome.org>`_
-
diff --git a/buildstream/__init__.py b/buildstream/__init__.py
index 0f6efb0da490abd6c30577af20b47dcf7be7b742..af2122ef79b57cf0008f3704b4b71c1ca9e78d2a 100644
--- a/buildstream/__init__.py
+++ b/buildstream/__init__.py
@@ -28,9 +28,9 @@ if "_BST_COMPLETION" not in os.environ:
 
     from .utils import UtilError, ProgramNotFoundError
     from .sandbox import Sandbox, SandboxFlags
+    from .types import Scope, Consistency
     from .plugin import Plugin
-    from .source import Source, SourceError, Consistency, SourceFetcher
+    from .source import Source, SourceError, SourceFetcher
     from .element import Element, ElementError
-    from .element_enums import Scope
     from .buildelement import BuildElement
     from .scriptelement import ScriptElement
diff --git a/buildstream/_artifactcache/artifactcache.py b/buildstream/_artifactcache/artifactcache.py
index f28fe394f1a99e52f28ea78e81ad84421892f5df..5e698babbedffdaae853742dbc0332b95a9aee0b 100644
--- a/buildstream/_artifactcache/artifactcache.py
+++ b/buildstream/_artifactcache/artifactcache.py
@@ -19,9 +19,10 @@
 
 import os
 import string
-from collections import Mapping, namedtuple
+from collections import namedtuple
+from collections.abc import Mapping
 
-from ..element_enums import _KeyStrength
+from ..types import _KeyStrength
 from .._exceptions import ArtifactError, ImplError, LoadError, LoadErrorReason
 from .._message import Message, MessageType
 from .. import utils
@@ -51,7 +52,7 @@ class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert cl
         url = _yaml.node_get(spec_node, str, 'url')
         push = _yaml.node_get(spec_node, bool, 'push', default_value=False)
         if not url:
-            provenance = _yaml.node_get_provenance(spec_node)
+            provenance = _yaml.node_get_provenance(spec_node, 'url')
             raise LoadError(LoadErrorReason.INVALID_DATA,
                             "{}: empty artifact cache URL".format(provenance))
 
@@ -67,6 +68,16 @@ class ArtifactCacheSpec(namedtuple('ArtifactCacheSpec', 'url push server_cert cl
         if client_cert and basedir:
             client_cert = os.path.join(basedir, client_cert)
 
+        if client_key and not client_cert:
+            provenance = _yaml.node_get_provenance(spec_node, 'client-key')
+            raise LoadError(LoadErrorReason.INVALID_DATA,
+                            "{}: 'client-key' was specified without 'client-cert'".format(provenance))
+
+        if client_cert and not client_key:
+            provenance = _yaml.node_get_provenance(spec_node, 'client-cert')
+            raise LoadError(LoadErrorReason.INVALID_DATA,
+                            "{}: 'client-cert' was specified without 'client-key'".format(provenance))
+
         return ArtifactCacheSpec(url, push, server_cert, client_key, client_cert)
 
 
@@ -81,19 +92,17 @@ ArtifactCacheSpec.__new__.__defaults__ = (None, None, None)
 class ArtifactCache():
     def __init__(self, context):
         self.context = context
-        self.required_artifacts = set()
         self.extractdir = os.path.join(context.artifactdir, 'extract')
         self.tmpdir = os.path.join(context.artifactdir, 'tmp')
 
-        self.estimated_size = None
-
         self.global_remote_specs = []
         self.project_remote_specs = {}
 
-        self._local = False
-        self.cache_size = None
-        self.cache_quota = None
-        self.cache_lower_threshold = None
+        self._required_elements = set()       # The elements required for this session
+        self._cache_size = None               # The current cache size, sometimes it's an estimate
+        self._cache_quota = None              # The cache quota
+        self._cache_lower_threshold = None    # The target cache size for a cleanup
+        self._remotes_setup = False           # Check to prevent double-setup of remotes
 
         os.makedirs(self.extractdir, exist_ok=True)
         os.makedirs(self.tmpdir, exist_ok=True)
@@ -146,6 +155,10 @@ class ArtifactCache():
     #
     def setup_remotes(self, *, use_config=False, remote_url=None):
 
+        # Ensure we do not double-initialise since this can be expensive
+        assert(not self._remotes_setup)
+        self._remotes_setup = True
+
         # Initialize remote artifact caches. We allow the commandline to override
         # the user config in some cases (for example `bst push --remote=...`).
         has_remote_caches = False
@@ -192,35 +205,42 @@ class ArtifactCache():
                                   (str(provenance)))
         return cache_specs
 
-    # append_required_artifacts():
+    # mark_required_elements():
+    #
+    # Mark elements whose artifacts are required for the current run.
     #
-    # Append to the list of elements whose artifacts are required for
-    # the current run. Artifacts whose elements are in this list will
-    # be locked by the artifact cache and not touched for the duration
-    # of the current pipeline.
+    # Artifacts whose elements are in this list will be locked by the artifact
+    # cache and not touched for the duration of the current pipeline.
     #
     # Args:
     #     elements (iterable): A set of elements to mark as required
     #
-    def append_required_artifacts(self, elements):
-        # We lock both strong and weak keys - deleting one but not the
-        # other won't save space in most cases anyway, but would be a
-        # user inconvenience.
+    def mark_required_elements(self, elements):
 
+        # We risk calling this function with a generator, so we
+        # better consume it first.
+        #
+        elements = list(elements)
+
+        # Mark the elements as required. We cannot know that we know the
+        # cache keys yet, so we only check that later when deleting.
+        #
+        self._required_elements.update(elements)
+
+        # For the cache keys which were resolved so far, we bump
+        # the mtime of them.
+        #
+        # This is just in case we have concurrent instances of
+        # BuildStream running with the same artifact cache, it will
+        # reduce the likelyhood of one instance deleting artifacts
+        # which are required by the other.
         for element in elements:
             strong_key = element._get_cache_key(strength=_KeyStrength.STRONG)
             weak_key = element._get_cache_key(strength=_KeyStrength.WEAK)
-
             for key in (strong_key, weak_key):
-                if key and key not in self.required_artifacts:
-                    self.required_artifacts.add(key)
-
-                    # We also update the usage times of any artifacts
-                    # we will be using, which helps preventing a
-                    # buildstream process that runs in parallel with
-                    # this one from removing artifacts in-use.
+                if key:
                     try:
-                        self.update_atime(key)
+                        self.update_mtime(element, key)
                     except ArtifactError:
                         pass
 
@@ -228,10 +248,28 @@ class ArtifactCache():
     #
     # Clean the artifact cache as much as possible.
     #
+    # Returns:
+    #    (int): The size of the cache after having cleaned up
+    #
     def clean(self):
         artifacts = self.list_artifacts()
 
-        while self.calculate_cache_size() >= self.cache_quota - self.cache_lower_threshold:
+        # Build a set of the cache keys which are required
+        # based on the required elements at cleanup time
+        #
+        # We lock both strong and weak keys - deleting one but not the
+        # other won't save space, but would be a user inconvenience.
+        required_artifacts = set()
+        for element in self._required_elements:
+            required_artifacts.update([
+                element._get_cache_key(strength=_KeyStrength.STRONG),
+                element._get_cache_key(strength=_KeyStrength.WEAK)
+            ])
+
+        # Do a real computation of the cache size once, just in case
+        self.compute_cache_size()
+
+        while self.get_cache_size() >= self._cache_lower_threshold:
             try:
                 to_remove = artifacts.pop(0)
             except IndexError:
@@ -245,7 +283,7 @@ class ArtifactCache():
                           "Please increase the cache-quota in {}."
                           .format(self.context.config_origin or default_conf))
 
-                if self.calculate_cache_size() > self.cache_quota:
+                if self.has_quota_exceeded():
                     raise ArtifactError("Cache too full. Aborting.",
                                         detail=detail,
                                         reason="cache-too-full")
@@ -253,60 +291,116 @@ class ArtifactCache():
                     break
 
             key = to_remove.rpartition('/')[2]
-            if key not in self.required_artifacts:
+            if key not in required_artifacts:
+
+                # Remove the actual artifact, if it's not required.
                 size = self.remove(to_remove)
-                if size:
-                    self.cache_size -= size
+
+                # Remove the size from the removed size
+                self.set_cache_size(self._cache_size - size)
 
         # This should be O(1) if implemented correctly
-        return self.calculate_cache_size()
+        return self.get_cache_size()
+
+    # compute_cache_size()
+    #
+    # Computes the real artifact cache size by calling
+    # the abstract calculate_cache_size() method.
+    #
+    # Returns:
+    #    (int): The size of the artifact cache.
+    #
+    def compute_cache_size(self):
+        self._cache_size = self.calculate_cache_size()
+
+        return self._cache_size
 
-    # get_approximate_cache_size()
+    # add_artifact_size()
     #
-    # A cheap method that aims to serve as an upper limit on the
-    # artifact cache size.
+    # Adds the reported size of a newly cached artifact to the
+    # overall estimated size.
+    #
+    # Args:
+    #     artifact_size (int): The size to add.
     #
-    # The cache size reported by this function will normally be larger
-    # than the real cache size, since it is calculated using the
-    # pre-commit artifact size, but for very small artifacts in
-    # certain caches additional overhead could cause this to be
-    # smaller than, but close to, the actual size.
+    def add_artifact_size(self, artifact_size):
+        cache_size = self.get_cache_size()
+        cache_size += artifact_size
+
+        self.set_cache_size(cache_size)
+
+    # get_cache_size()
     #
-    # Nonetheless, in practice this should be safe to use as an upper
-    # limit on the cache size.
+    # Fetches the cached size of the cache, this is sometimes
+    # an estimate and periodically adjusted to the real size
+    # when a cache size calculation job runs.
     #
-    # If the cache has built-in constant-time size reporting, please
-    # feel free to override this method with a more accurate
-    # implementation.
+    # When it is an estimate, the value is either correct, or
+    # it is greater than the actual cache size.
     #
     # Returns:
     #     (int) An approximation of the artifact cache size.
     #
-    def get_approximate_cache_size(self):
-        # If we don't currently have an estimate, figure out the real
-        # cache size.
-        if self.estimated_size is None:
+    def get_cache_size(self):
+
+        # If we don't currently have an estimate, figure out the real cache size.
+        if self._cache_size is None:
             stored_size = self._read_cache_size()
             if stored_size is not None:
-                self.estimated_size = stored_size
+                self._cache_size = stored_size
             else:
-                self.estimated_size = self.calculate_cache_size()
+                self.compute_cache_size()
 
-        return self.estimated_size
+        return self._cache_size
+
+    # set_cache_size()
+    #
+    # Forcefully set the overall cache size.
+    #
+    # This is used to update the size in the main process after
+    # having calculated in a cleanup or a cache size calculation job.
+    #
+    # Args:
+    #     cache_size (int): The size to set.
+    #
+    def set_cache_size(self, cache_size):
+
+        assert cache_size is not None
+
+        self._cache_size = cache_size
+        self._write_cache_size(self._cache_size)
+
+    # has_quota_exceeded()
+    #
+    # Checks if the current artifact cache size exceeds the quota.
+    #
+    # Returns:
+    #    (bool): True of the quota is exceeded
+    #
+    def has_quota_exceeded(self):
+        return self.get_cache_size() > self._cache_quota
 
     ################################################
     # Abstract methods for subclasses to implement #
     ################################################
 
-    # update_atime()
+    # preflight():
+    #
+    # Preflight check.
+    #
+    def preflight(self):
+        pass
+
+    # update_mtime()
     #
-    # Update the atime of an artifact.
+    # Update the mtime of an artifact.
     #
     # Args:
+    #     element (Element): The Element to update
     #     key (str): The key of the artifact.
     #
-    def update_atime(self, key):
-        raise ImplError("Cache '{kind}' does not implement contains()"
+    def update_mtime(self, element, key):
+        raise ImplError("Cache '{kind}' does not implement update_mtime()"
                         .format(kind=type(self).__name__))
 
     # initialize_remotes():
@@ -484,11 +578,8 @@ class ArtifactCache():
     #
     # Return the real artifact cache size.
     #
-    # Implementations should also use this to update estimated_size.
-    #
     # Returns:
-    #
-    # (int) The size of the artifact cache.
+    #    (int): The size of the artifact cache.
     #
     def calculate_cache_size(self):
         raise ImplError("Cache '{kind}' does not implement calculate_cache_size()"
@@ -535,43 +626,17 @@ class ArtifactCache():
         with self.context.timed_activity("Initializing remote caches", silent_nested=True):
             self.initialize_remotes(on_failure=remote_failed)
 
-    # _add_artifact_size()
-    #
-    # Since we cannot keep track of the cache size between threads,
-    # this method will be called by the main process every time a
-    # process that added something to the cache finishes.
-    #
-    # This will then add the reported size to
-    # ArtifactCache.estimated_size.
-    #
-    def _add_artifact_size(self, artifact_size):
-        if not self.estimated_size:
-            self.estimated_size = self.calculate_cache_size()
-
-        self.estimated_size += artifact_size
-        self._write_cache_size(self.estimated_size)
-
-    # _set_cache_size()
-    #
-    # Similarly to the above method, when we calculate the actual size
-    # in a child thread, we can't update it. We instead pass the value
-    # back to the main thread and update it there.
-    #
-    def _set_cache_size(self, cache_size):
-        self.estimated_size = cache_size
-
-        # set_cache_size is called in cleanup, where it may set the cache to None
-        if self.estimated_size is not None:
-            self._write_cache_size(self.estimated_size)
-
     # _write_cache_size()
     #
     # Writes the given size of the artifact to the cache's size file
     #
+    # Args:
+    #    size (int): The size of the artifact cache to record
+    #
     def _write_cache_size(self, size):
         assert isinstance(size, int)
         size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE)
-        with open(size_file_path, "w") as f:
+        with utils.save_file_atomic(size_file_path, "w") as f:
             f.write(str(size))
 
     # _read_cache_size()
@@ -579,6 +644,9 @@ class ArtifactCache():
     # Reads and returns the size of the artifact cache that's stored in the
     # cache's size file
     #
+    # Returns:
+    #    (int): The size of the artifact cache, as recorded in the file
+    #
     def _read_cache_size(self):
         size_file_path = os.path.join(self.context.artifactdir, CACHE_SIZE_FILE)
 
@@ -628,13 +696,13 @@ class ArtifactCache():
         stat = os.statvfs(artifactdir_volume)
         available_space = (stat.f_bsize * stat.f_bavail)
 
-        cache_size = self.get_approximate_cache_size()
+        cache_size = self.get_cache_size()
 
         # Ensure system has enough storage for the cache_quota
         #
         # If cache_quota is none, set it to the maximum it could possibly be.
         #
-        # Also check that cache_quota is atleast as large as our headroom.
+        # Also check that cache_quota is at least as large as our headroom.
         #
         if cache_quota is None:  # Infinity, set to max system storage
             cache_quota = cache_size + available_space
@@ -660,8 +728,8 @@ class ArtifactCache():
         # if we end up writing more than 2G, but hey, this stuff is
         # already really fuzzy.
         #
-        self.cache_quota = cache_quota - headroom
-        self.cache_lower_threshold = self.cache_quota / 2
+        self._cache_quota = cache_quota - headroom
+        self._cache_lower_threshold = self._cache_quota / 2
 
 
 # _configured_remote_artifact_cache_specs():
diff --git a/buildstream/_artifactcache/cascache.py b/buildstream/_artifactcache/cascache.py
index 6db10107edf7bee20b4c482f14ca4a1ff9a11b4e..2454366ac0dafd3335e4f04566931453210dc470 100644
--- a/buildstream/_artifactcache/cascache.py
+++ b/buildstream/_artifactcache/cascache.py
@@ -19,6 +19,7 @@
 
 import hashlib
 import itertools
+import io
 import multiprocessing
 import os
 import signal
@@ -30,6 +31,8 @@ from urllib.parse import urlparse
 
 import grpc
 
+from .. import _yaml
+
 from .._protos.google.bytestream import bytestream_pb2, bytestream_pb2_grpc
 from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
 from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
@@ -41,12 +44,16 @@ from .._exceptions import ArtifactError
 from . import ArtifactCache
 
 
+# The default limit for gRPC messages is 4 MiB.
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
+_MAX_PAYLOAD_BYTES = 1024 * 1024
+
+
 # A CASCache manages artifacts in a CAS repository as specified in the
 # Remote Execution API.
 #
 # Args:
 #     context (Context): The BuildStream context
-#     enable_push (bool): Whether pushing is allowed by the platform
 #
 # Pushing is explicitly disabled by the platform in some cases,
 # like when we are falling back to functioning without using
@@ -54,7 +61,7 @@ from . import ArtifactCache
 #
 class CASCache(ArtifactCache):
 
-    def __init__(self, context, *, enable_push=True):
+    def __init__(self, context):
         super().__init__(context)
 
         self.casdir = os.path.join(context.artifactdir, 'cas')
@@ -63,8 +70,6 @@ class CASCache(ArtifactCache):
 
         self._calculate_cache_quota()
 
-        self._enable_push = enable_push
-
         # Per-project list of _CASRemote instances.
         self._remotes = {}
 
@@ -74,6 +79,13 @@ class CASCache(ArtifactCache):
     ################################################
     #     Implementation of abstract methods       #
     ################################################
+
+    def preflight(self):
+        if (not os.path.isdir(os.path.join(self.casdir, 'refs', 'heads')) or
+            not os.path.isdir(os.path.join(self.casdir, 'objects'))):
+            raise ArtifactError("CAS repository check failed for '{}'"
+                                .format(self.casdir))
+
     def contains(self, element, key):
         refpath = self._refpath(self.get_artifact_fullname(element, key))
 
@@ -113,13 +125,11 @@ class CASCache(ArtifactCache):
     def commit(self, element, content, keys):
         refs = [self.get_artifact_fullname(element, key) for key in keys]
 
-        tree = self._create_tree(content)
+        tree = self._commit_directory(content)
 
         for ref in refs:
             self.set_ref(ref, tree)
 
-        self.cache_size = None
-
     def diff(self, element, key_a, key_b, *, subdir=None):
         ref_a = self.get_artifact_fullname(element, key_a)
         ref_b = self.get_artifact_fullname(element, key_b)
@@ -151,6 +161,7 @@ class CASCache(ArtifactCache):
         q = multiprocessing.Queue()
         for remote_spec in remote_specs:
             # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+            # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
             p = multiprocessing.Process(target=self._initialize_remote, args=(remote_spec, q))
 
             try:
@@ -206,7 +217,7 @@ class CASCache(ArtifactCache):
             return bool(remotes_for_project)
 
     def has_push_remotes(self, *, element=None):
-        if not self._has_push_remotes or not self._enable_push:
+        if not self._has_push_remotes:
             # No project has push remotes
             return False
         elif element is None:
@@ -225,8 +236,8 @@ class CASCache(ArtifactCache):
         for remote in self._remotes[project]:
             try:
                 remote.init()
-
-                element.info("Pulling {} <- {}".format(element._get_brief_display_key(), remote.spec.url))
+                display_key = element._get_brief_display_key()
+                element.status("Pulling artifact {} <- {}".format(display_key, remote.spec.url))
 
                 request = buildstream_pb2.GetReferenceRequest()
                 request.key = ref
@@ -240,6 +251,7 @@ class CASCache(ArtifactCache):
 
                 self.set_ref(ref, tree)
 
+                element.info("Pulled artifact {} <- {}".format(display_key, remote.spec.url))
                 # no need to pull from additional remotes
                 return True
 
@@ -247,9 +259,32 @@ class CASCache(ArtifactCache):
                 if e.code() != grpc.StatusCode.NOT_FOUND:
                     raise ArtifactError("Failed to pull artifact {}: {}".format(
                         element._get_brief_display_key(), e)) from e
+                else:
+                    element.info("Remote ({}) does not have {} cached".format(
+                        remote.spec.url, element._get_brief_display_key()
+                    ))
 
         return False
 
+    def pull_tree(self, project, digest):
+        """ Pull a single Tree rather than an artifact.
+        Does not update local refs. """
+
+        for remote in self._remotes[project]:
+            try:
+                remote.init()
+
+                digest = self._fetch_tree(remote, digest)
+
+                # no need to pull from additional remotes
+                return digest
+
+            except grpc.RpcError as e:
+                if e.code() != grpc.StatusCode.NOT_FOUND:
+                    raise
+
+        return None
+
     def link_key(self, element, oldkey, newkey):
         oldref = self.get_artifact_fullname(element, oldkey)
         newref = self.get_artifact_fullname(element, newkey)
@@ -258,8 +293,46 @@ class CASCache(ArtifactCache):
 
         self.set_ref(newref, tree)
 
+    def _push_refs_to_remote(self, refs, remote):
+        skipped_remote = True
+        try:
+            for ref in refs:
+                tree = self.resolve_ref(ref)
+
+                # Check whether ref is already on the server in which case
+                # there is no need to push the artifact
+                try:
+                    request = buildstream_pb2.GetReferenceRequest()
+                    request.key = ref
+                    response = remote.ref_storage.GetReference(request)
+
+                    if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
+                        # ref is already on the server with the same tree
+                        continue
+
+                except grpc.RpcError as e:
+                    if e.code() != grpc.StatusCode.NOT_FOUND:
+                        # Intentionally re-raise RpcError for outer except block.
+                        raise
+
+                self._send_directory(remote, tree)
+
+                request = buildstream_pb2.UpdateReferenceRequest()
+                request.keys.append(ref)
+                request.digest.hash = tree.hash
+                request.digest.size_bytes = tree.size_bytes
+                remote.ref_storage.UpdateReference(request)
+
+                skipped_remote = False
+        except grpc.RpcError as e:
+            if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
+                raise ArtifactError("Failed to push artifact {}: {}".format(refs, e), temporary=True) from e
+
+        return not skipped_remote
+
     def push(self, element, keys):
-        refs = [self.get_artifact_fullname(element, key) for key in keys]
+
+        refs = [self.get_artifact_fullname(element, key) for key in list(keys)]
 
         project = element._get_project()
 
@@ -269,95 +342,88 @@ class CASCache(ArtifactCache):
 
         for remote in push_remotes:
             remote.init()
-            skipped_remote = True
-            element.info("Pushing {} -> {}".format(element._get_brief_display_key(), remote.spec.url))
-
-            try:
-                for ref in refs:
-                    tree = self.resolve_ref(ref)
-
-                    # Check whether ref is already on the server in which case
-                    # there is no need to push the artifact
-                    try:
-                        request = buildstream_pb2.GetReferenceRequest()
-                        request.key = ref
-                        response = remote.ref_storage.GetReference(request)
-
-                        if response.digest.hash == tree.hash and response.digest.size_bytes == tree.size_bytes:
-                            # ref is already on the server with the same tree
-                            continue
-
-                    except grpc.RpcError as e:
-                        if e.code() != grpc.StatusCode.NOT_FOUND:
-                            # Intentionally re-raise RpcError for outer except block.
-                            raise
-
-                    missing_blobs = {}
-                    required_blobs = self._required_blobs(tree)
-
-                    # Limit size of FindMissingBlobs request
-                    for required_blobs_group in _grouper(required_blobs, 512):
-                        request = remote_execution_pb2.FindMissingBlobsRequest()
-
-                        for required_digest in required_blobs_group:
-                            d = request.blob_digests.add()
-                            d.hash = required_digest.hash
-                            d.size_bytes = required_digest.size_bytes
-
-                        response = remote.cas.FindMissingBlobs(request)
-                        for digest in response.missing_blob_digests:
-                            d = remote_execution_pb2.Digest()
-                            d.hash = digest.hash
-                            d.size_bytes = digest.size_bytes
-                            missing_blobs[d.hash] = d
-
-                    # Upload any blobs missing on the server
-                    skipped_remote = False
-                    for digest in missing_blobs.values():
-                        uuid_ = uuid.uuid4()
-                        resource_name = '/'.join(['uploads', str(uuid_), 'blobs',
-                                                  digest.hash, str(digest.size_bytes)])
-
-                        def request_stream(resname):
-                            with open(self.objpath(digest), 'rb') as f:
-                                assert os.fstat(f.fileno()).st_size == digest.size_bytes
-                                offset = 0
-                                finished = False
-                                remaining = digest.size_bytes
-                                while not finished:
-                                    chunk_size = min(remaining, 64 * 1024)
-                                    remaining -= chunk_size
-
-                                    request = bytestream_pb2.WriteRequest()
-                                    request.write_offset = offset
-                                    # max. 64 kB chunks
-                                    request.data = f.read(chunk_size)
-                                    request.resource_name = resname
-                                    request.finish_write = remaining <= 0
-                                    yield request
-                                    offset += chunk_size
-                                    finished = request.finish_write
-                        response = remote.bytestream.Write(request_stream(resource_name))
-
-                    request = buildstream_pb2.UpdateReferenceRequest()
-                    request.keys.append(ref)
-                    request.digest.hash = tree.hash
-                    request.digest.size_bytes = tree.size_bytes
-                    remote.ref_storage.UpdateReference(request)
-
-                    pushed = True
+            display_key = element._get_brief_display_key()
+            element.status("Pushing artifact {} -> {}".format(display_key, remote.spec.url))
 
-            except grpc.RpcError as e:
-                if e.code() != grpc.StatusCode.RESOURCE_EXHAUSTED:
-                    raise ArtifactError("Failed to push artifact {}: {}".format(refs, e), temporary=True) from e
-
-            if skipped_remote:
-                self.context.message(Message(
-                    None,
-                    MessageType.SKIPPED,
-                    "Remote ({}) already has {} cached".format(
-                        remote.spec.url, element._get_brief_display_key())
+            if self._push_refs_to_remote(refs, remote):
+                element.info("Pushed artifact {} -> {}".format(display_key, remote.spec.url))
+                pushed = True
+            else:
+                element.info("Remote ({}) already has {} cached".format(
+                    remote.spec.url, element._get_brief_display_key()
                 ))
+
+        return pushed
+
+    def push_directory(self, project, directory):
+        """ Push the given virtual directory to all remotes.
+
+        Args:
+            project (Project): The current project
+            directory (Directory): A virtual directory object to push.
+
+        Raises: ArtifactError if no push remotes are configured.
+        """
+
+        if self._has_push_remotes:
+            push_remotes = [r for r in self._remotes[project] if r.spec.push]
+        else:
+            push_remotes = []
+
+        if not push_remotes:
+            raise ArtifactError("CASCache: push_directory was called, but no remote artifact " +
+                                "servers are configured as push remotes.")
+
+        if directory.ref is None:
+            return
+
+        for remote in push_remotes:
+            remote.init()
+
+            self._send_directory(remote, directory.ref)
+
+    def push_message(self, project, message):
+
+        push_remotes = [r for r in self._remotes[project] if r.spec.push]
+
+        message_buffer = message.SerializeToString()
+        message_sha = hashlib.sha256(message_buffer)
+        message_digest = remote_execution_pb2.Digest()
+        message_digest.hash = message_sha.hexdigest()
+        message_digest.size_bytes = len(message_buffer)
+
+        for remote in push_remotes:
+            remote.init()
+
+            with io.BytesIO(message_buffer) as b:
+                self._send_blob(remote, message_digest, b)
+
+        return message_digest
+
+    def _verify_digest_on_remote(self, remote, digest):
+        # Check whether ref is already on the server in which case
+        # there is no need to push the artifact
+        request = remote_execution_pb2.FindMissingBlobsRequest()
+        request.blob_digests.extend([digest])
+
+        response = remote.cas.FindMissingBlobs(request)
+        if digest in response.missing_blob_digests:
+            return False
+
+        return True
+
+    def verify_digest_pushed(self, project, digest):
+
+        push_remotes = [r for r in self._remotes[project] if r.spec.push]
+
+        pushed = False
+
+        for remote in push_remotes:
+            remote.init()
+
+            if self._verify_digest_on_remote(remote, digest):
+                pushed = True
+
         return pushed
 
     ################################################
@@ -443,7 +509,7 @@ class CASCache(ArtifactCache):
     def set_ref(self, ref, tree):
         refpath = self._refpath(ref)
         os.makedirs(os.path.dirname(refpath), exist_ok=True)
-        with utils.save_file_atomic(refpath, 'wb') as f:
+        with utils.save_file_atomic(refpath, 'wb', tempdir=self.tmpdir) as f:
             f.write(tree.SerializeToString())
 
     # resolve_ref():
@@ -472,18 +538,15 @@ class CASCache(ArtifactCache):
         except FileNotFoundError as e:
             raise ArtifactError("Attempt to access unavailable artifact: {}".format(e)) from e
 
-    def update_atime(self, ref):
+    def update_mtime(self, element, key):
         try:
+            ref = self.get_artifact_fullname(element, key)
             os.utime(self._refpath(ref))
         except FileNotFoundError as e:
             raise ArtifactError("Attempt to access unavailable artifact: {}".format(e)) from e
 
     def calculate_cache_size(self):
-        if self.cache_size is None:
-            self.cache_size = utils._get_dir_size(self.casdir)
-            self.estimated_size = self.cache_size
-
-        return self.cache_size
+        return utils._get_dir_size(self.casdir)
 
     # list_artifacts():
     #
@@ -526,6 +589,25 @@ class CASCache(ArtifactCache):
     #
     def remove(self, ref, *, defer_prune=False):
 
+        # Remove extract if not used by other ref
+        tree = self.resolve_ref(ref)
+        ref_name, ref_hash = os.path.split(ref)
+        extract = os.path.join(self.extractdir, ref_name, tree.hash)
+        keys_file = os.path.join(extract, 'meta', 'keys.yaml')
+        if os.path.exists(keys_file):
+            keys_meta = _yaml.load(keys_file)
+            keys = [keys_meta['strong'], keys_meta['weak']]
+            remove_extract = True
+            for other_hash in keys:
+                if other_hash == ref_hash:
+                    continue
+                remove_extract = False
+                break
+
+            if remove_extract:
+                utils._force_rmtree(extract)
+
+        # Remove cache ref
         refpath = self._refpath(ref)
         if not os.path.exists(refpath):
             raise ArtifactError("Could not find artifact for ref '{}'".format(ref))
@@ -571,6 +653,7 @@ class CASCache(ArtifactCache):
     ################################################
     #             Local Private Methods            #
     ################################################
+
     def _checkout(self, dest, tree):
         os.makedirs(dest, exist_ok=True)
 
@@ -600,7 +683,21 @@ class CASCache(ArtifactCache):
     def _refpath(self, ref):
         return os.path.join(self.casdir, 'refs', 'heads', ref)
 
-    def _create_tree(self, path, *, digest=None):
+    # _commit_directory():
+    #
+    # Adds local directory to content addressable store.
+    #
+    # Adds files, symbolic links and recursively other directories in
+    # a local directory to the content addressable store.
+    #
+    # Args:
+    #     path (str): Path to the directory to add.
+    #     dir_digest (Digest): An optional Digest object to use.
+    #
+    # Returns:
+    #     (Digest): Digest object for the directory added.
+    #
+    def _commit_directory(self, path, *, dir_digest=None):
         directory = remote_execution_pb2.Directory()
 
         for name in sorted(os.listdir(path)):
@@ -609,7 +706,7 @@ class CASCache(ArtifactCache):
             if stat.S_ISDIR(mode):
                 dirnode = directory.directories.add()
                 dirnode.name = name
-                self._create_tree(full_path, digest=dirnode.digest)
+                self._commit_directory(full_path, dir_digest=dirnode.digest)
             elif stat.S_ISREG(mode):
                 filenode = directory.files.add()
                 filenode.name = name
@@ -619,10 +716,14 @@ class CASCache(ArtifactCache):
                 symlinknode = directory.symlinks.add()
                 symlinknode.name = name
                 symlinknode.target = os.readlink(full_path)
+            elif stat.S_ISSOCK(mode):
+                # The process serving the socket can't be cached anyway
+                pass
             else:
                 raise ArtifactError("Unsupported file type for {}".format(full_path))
 
-        return self.add_object(digest=digest, buffer=directory.SerializeToString())
+        return self.add_object(digest=dir_digest,
+                               buffer=directory.SerializeToString())
 
     def _get_subdir(self, tree, subdir):
         head, name = os.path.split(subdir)
@@ -733,16 +834,16 @@ class CASCache(ArtifactCache):
             #
             q.put(str(e))
 
-    def _required_blobs(self, tree):
+    def _required_blobs(self, directory_digest):
         # parse directory, and recursively add blobs
         d = remote_execution_pb2.Digest()
-        d.hash = tree.hash
-        d.size_bytes = tree.size_bytes
+        d.hash = directory_digest.hash
+        d.size_bytes = directory_digest.size_bytes
         yield d
 
         directory = remote_execution_pb2.Directory()
 
-        with open(self.objpath(tree), 'rb') as f:
+        with open(self.objpath(directory_digest), 'rb') as f:
             directory.ParseFromString(f.read())
 
         for filenode in directory.files:
@@ -754,50 +855,226 @@ class CASCache(ArtifactCache):
         for dirnode in directory.directories:
             yield from self._required_blobs(dirnode.digest)
 
-    def _fetch_blob(self, remote, digest, out):
+    def _fetch_blob(self, remote, digest, stream):
         resource_name = '/'.join(['blobs', digest.hash, str(digest.size_bytes)])
         request = bytestream_pb2.ReadRequest()
         request.resource_name = resource_name
         request.read_offset = 0
         for response in remote.bytestream.Read(request):
-            out.write(response.data)
+            stream.write(response.data)
+        stream.flush()
 
-        out.flush()
-        assert digest.size_bytes == os.fstat(out.fileno()).st_size
+        assert digest.size_bytes == os.fstat(stream.fileno()).st_size
 
-    def _fetch_directory(self, remote, tree):
-        objpath = self.objpath(tree)
+    # _ensure_blob():
+    #
+    # Fetch and add blob if it's not already local.
+    #
+    # Args:
+    #     remote (Remote): The remote to use.
+    #     digest (Digest): Digest object for the blob to fetch.
+    #
+    # Returns:
+    #     (str): The path of the object
+    #
+    def _ensure_blob(self, remote, digest):
+        objpath = self.objpath(digest)
         if os.path.exists(objpath):
-            # already in local cache
-            return
+            # already in local repository
+            return objpath
 
-        with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
-            self._fetch_blob(remote, tree, out)
+        with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
+            self._fetch_blob(remote, digest, f)
 
-            directory = remote_execution_pb2.Directory()
+            added_digest = self.add_object(path=f.name)
+            assert added_digest.hash == digest.hash
 
-            with open(out.name, 'rb') as f:
+        return objpath
+
+    def _batch_download_complete(self, batch):
+        for digest, data in batch.send():
+            with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
+                f.write(data)
+                f.flush()
+
+                added_digest = self.add_object(path=f.name)
+                assert added_digest.hash == digest.hash
+
+    # Helper function for _fetch_directory().
+    def _fetch_directory_batch(self, remote, batch, fetch_queue, fetch_next_queue):
+        self._batch_download_complete(batch)
+
+        # All previously scheduled directories are now locally available,
+        # move them to the processing queue.
+        fetch_queue.extend(fetch_next_queue)
+        fetch_next_queue.clear()
+        return _CASBatchRead(remote)
+
+    # Helper function for _fetch_directory().
+    def _fetch_directory_node(self, remote, digest, batch, fetch_queue, fetch_next_queue, *, recursive=False):
+        in_local_cache = os.path.exists(self.objpath(digest))
+
+        if in_local_cache:
+            # Skip download, already in local cache.
+            pass
+        elif (digest.size_bytes >= remote.max_batch_total_size_bytes or
+                not remote.batch_read_supported):
+            # Too large for batch request, download in independent request.
+            self._ensure_blob(remote, digest)
+            in_local_cache = True
+        else:
+            if not batch.add(digest):
+                # Not enough space left in batch request.
+                # Complete pending batch first.
+                batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
+                batch.add(digest)
+
+        if recursive:
+            if in_local_cache:
+                # Add directory to processing queue.
+                fetch_queue.append(digest)
+            else:
+                # Directory will be available after completing pending batch.
+                # Add directory to deferred processing queue.
+                fetch_next_queue.append(digest)
+
+        return batch
+
+    # _fetch_directory():
+    #
+    # Fetches remote directory and adds it to content addressable store.
+    #
+    # Fetches files, symbolic links and recursively other directories in
+    # the remote directory and adds them to the content addressable
+    # store.
+    #
+    # Args:
+    #     remote (Remote): The remote to use.
+    #     dir_digest (Digest): Digest object for the directory to fetch.
+    #
+    def _fetch_directory(self, remote, dir_digest):
+        fetch_queue = [dir_digest]
+        fetch_next_queue = []
+        batch = _CASBatchRead(remote)
+
+        while len(fetch_queue) + len(fetch_next_queue) > 0:
+            if len(fetch_queue) == 0:
+                batch = self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
+
+            dir_digest = fetch_queue.pop(0)
+
+            objpath = self._ensure_blob(remote, dir_digest)
+
+            directory = remote_execution_pb2.Directory()
+            with open(objpath, 'rb') as f:
                 directory.ParseFromString(f.read())
 
+            for dirnode in directory.directories:
+                batch = self._fetch_directory_node(remote, dirnode.digest, batch,
+                                                   fetch_queue, fetch_next_queue, recursive=True)
+
             for filenode in directory.files:
-                fileobjpath = self.objpath(tree)
-                if os.path.exists(fileobjpath):
-                    # already in local cache
-                    continue
+                batch = self._fetch_directory_node(remote, filenode.digest, batch,
+                                                   fetch_queue, fetch_next_queue)
 
-                with tempfile.NamedTemporaryFile(dir=self.tmpdir) as f:
-                    self._fetch_blob(remote, filenode.digest, f)
+        # Fetch final batch
+        self._fetch_directory_batch(remote, batch, fetch_queue, fetch_next_queue)
 
-                    digest = self.add_object(path=f.name)
-                    assert digest.hash == filenode.digest.hash
+    def _fetch_tree(self, remote, digest):
+        # download but do not store the Tree object
+        with tempfile.NamedTemporaryFile(dir=self.tmpdir) as out:
+            self._fetch_blob(remote, digest, out)
 
-            for dirnode in directory.directories:
-                self._fetch_directory(remote, dirnode.digest)
+            tree = remote_execution_pb2.Tree()
+
+            with open(out.name, 'rb') as f:
+                tree.ParseFromString(f.read())
+
+            tree.children.extend([tree.root])
+            for directory in tree.children:
+                for filenode in directory.files:
+                    self._ensure_blob(remote, filenode.digest)
+
+                # place directory blob only in final location when we've downloaded
+                # all referenced blobs to avoid dangling references in the repository
+                dirbuffer = directory.SerializeToString()
+                dirdigest = self.add_object(buffer=dirbuffer)
+                assert dirdigest.size_bytes == len(dirbuffer)
+
+        return dirdigest
+
+    def _send_blob(self, remote, digest, stream, u_uid=uuid.uuid4()):
+        resource_name = '/'.join(['uploads', str(u_uid), 'blobs',
+                                  digest.hash, str(digest.size_bytes)])
+
+        def request_stream(resname, instream):
+            offset = 0
+            finished = False
+            remaining = digest.size_bytes
+            while not finished:
+                chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
+                remaining -= chunk_size
+
+                request = bytestream_pb2.WriteRequest()
+                request.write_offset = offset
+                # max. _MAX_PAYLOAD_BYTES chunks
+                request.data = instream.read(chunk_size)
+                request.resource_name = resname
+                request.finish_write = remaining <= 0
+
+                yield request
+
+                offset += chunk_size
+                finished = request.finish_write
+
+        response = remote.bytestream.Write(request_stream(resource_name, stream))
+
+        assert response.committed_size == digest.size_bytes
+
+    def _send_directory(self, remote, digest, u_uid=uuid.uuid4()):
+        required_blobs = self._required_blobs(digest)
+
+        missing_blobs = dict()
+        # Limit size of FindMissingBlobs request
+        for required_blobs_group in _grouper(required_blobs, 512):
+            request = remote_execution_pb2.FindMissingBlobsRequest()
+
+            for required_digest in required_blobs_group:
+                d = request.blob_digests.add()
+                d.hash = required_digest.hash
+                d.size_bytes = required_digest.size_bytes
+
+            response = remote.cas.FindMissingBlobs(request)
+            for missing_digest in response.missing_blob_digests:
+                d = remote_execution_pb2.Digest()
+                d.hash = missing_digest.hash
+                d.size_bytes = missing_digest.size_bytes
+                missing_blobs[d.hash] = d
+
+        # Upload any blobs missing on the server
+        self._send_blobs(remote, missing_blobs.values(), u_uid)
+
+    def _send_blobs(self, remote, digests, u_uid=uuid.uuid4()):
+        batch = _CASBatchUpdate(remote)
+
+        for digest in digests:
+            with open(self.objpath(digest), 'rb') as f:
+                assert os.fstat(f.fileno()).st_size == digest.size_bytes
+
+                if (digest.size_bytes >= remote.max_batch_total_size_bytes or
+                        not remote.batch_update_supported):
+                    # Too large for batch request, upload in independent request.
+                    self._send_blob(remote, digest, f, u_uid=u_uid)
+                else:
+                    if not batch.add(digest, f):
+                        # Not enough space left in batch request.
+                        # Complete pending batch first.
+                        batch.send()
+                        batch = _CASBatchUpdate(remote)
+                        batch.add(digest, f)
 
-            # place directory blob only in final location when we've downloaded
-            # all referenced blobs to avoid dangling references in the repository
-            digest = self.add_object(path=out.name)
-            assert digest.hash == tree.hash
+        # Send final batch
+        batch.send()
 
 
 # Represents a single remote CAS cache.
@@ -847,11 +1124,129 @@ class _CASRemote():
 
             self.bytestream = bytestream_pb2_grpc.ByteStreamStub(self.channel)
             self.cas = remote_execution_pb2_grpc.ContentAddressableStorageStub(self.channel)
+            self.capabilities = remote_execution_pb2_grpc.CapabilitiesStub(self.channel)
             self.ref_storage = buildstream_pb2_grpc.ReferenceStorageStub(self.channel)
 
+            self.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
+            try:
+                request = remote_execution_pb2.GetCapabilitiesRequest()
+                response = self.capabilities.GetCapabilities(request)
+                server_max_batch_total_size_bytes = response.cache_capabilities.max_batch_total_size_bytes
+                if 0 < server_max_batch_total_size_bytes < self.max_batch_total_size_bytes:
+                    self.max_batch_total_size_bytes = server_max_batch_total_size_bytes
+            except grpc.RpcError as e:
+                # Simply use the defaults for servers that don't implement GetCapabilities()
+                if e.code() != grpc.StatusCode.UNIMPLEMENTED:
+                    raise
+
+            # Check whether the server supports BatchReadBlobs()
+            self.batch_read_supported = False
+            try:
+                request = remote_execution_pb2.BatchReadBlobsRequest()
+                response = self.cas.BatchReadBlobs(request)
+                self.batch_read_supported = True
+            except grpc.RpcError as e:
+                if e.code() != grpc.StatusCode.UNIMPLEMENTED:
+                    raise
+
+            # Check whether the server supports BatchUpdateBlobs()
+            self.batch_update_supported = False
+            try:
+                request = remote_execution_pb2.BatchUpdateBlobsRequest()
+                response = self.cas.BatchUpdateBlobs(request)
+                self.batch_update_supported = True
+            except grpc.RpcError as e:
+                if (e.code() != grpc.StatusCode.UNIMPLEMENTED and
+                        e.code() != grpc.StatusCode.PERMISSION_DENIED):
+                    raise
+
             self._initialized = True
 
 
+# Represents a batch of blobs queued for fetching.
+#
+class _CASBatchRead():
+    def __init__(self, remote):
+        self._remote = remote
+        self._max_total_size_bytes = remote.max_batch_total_size_bytes
+        self._request = remote_execution_pb2.BatchReadBlobsRequest()
+        self._size = 0
+        self._sent = False
+
+    def add(self, digest):
+        assert not self._sent
+
+        new_batch_size = self._size + digest.size_bytes
+        if new_batch_size > self._max_total_size_bytes:
+            # Not enough space left in current batch
+            return False
+
+        request_digest = self._request.digests.add()
+        request_digest.hash = digest.hash
+        request_digest.size_bytes = digest.size_bytes
+        self._size = new_batch_size
+        return True
+
+    def send(self):
+        assert not self._sent
+        self._sent = True
+
+        if len(self._request.digests) == 0:
+            return
+
+        batch_response = self._remote.cas.BatchReadBlobs(self._request)
+
+        for response in batch_response.responses:
+            if response.status.code != grpc.StatusCode.OK.value[0]:
+                raise ArtifactError("Failed to download blob {}: {}".format(
+                    response.digest.hash, response.status.code))
+            if response.digest.size_bytes != len(response.data):
+                raise ArtifactError("Failed to download blob {}: expected {} bytes, received {} bytes".format(
+                    response.digest.hash, response.digest.size_bytes, len(response.data)))
+
+            yield (response.digest, response.data)
+
+
+# Represents a batch of blobs queued for upload.
+#
+class _CASBatchUpdate():
+    def __init__(self, remote):
+        self._remote = remote
+        self._max_total_size_bytes = remote.max_batch_total_size_bytes
+        self._request = remote_execution_pb2.BatchUpdateBlobsRequest()
+        self._size = 0
+        self._sent = False
+
+    def add(self, digest, stream):
+        assert not self._sent
+
+        new_batch_size = self._size + digest.size_bytes
+        if new_batch_size > self._max_total_size_bytes:
+            # Not enough space left in current batch
+            return False
+
+        blob_request = self._request.requests.add()
+        blob_request.digest.hash = digest.hash
+        blob_request.digest.size_bytes = digest.size_bytes
+        blob_request.data = stream.read(digest.size_bytes)
+        self._size = new_batch_size
+        return True
+
+    def send(self):
+        assert not self._sent
+        self._sent = True
+
+        if len(self._request.requests) == 0:
+            return
+
+        batch_response = self._remote.cas.BatchUpdateBlobs(self._request)
+
+        for response in batch_response.responses:
+            if response.status.code != grpc.StatusCode.OK.value[0]:
+                raise ArtifactError("Failed to upload blob {}: {}".format(
+                    response.digest.hash, response.status.code))
+
+
 def _grouper(iterable, n):
     while True:
         try:
diff --git a/buildstream/_artifactcache/casserver.py b/buildstream/_artifactcache/casserver.py
index 0af65729b2197f370e00ede2e5bbf3883018dc11..31b05ce0fb8a4117fbe6d32d082c7d7c14b579bd 100644
--- a/buildstream/_artifactcache/casserver.py
+++ b/buildstream/_artifactcache/casserver.py
@@ -35,7 +35,10 @@ from .._protos.buildstream.v2 import buildstream_pb2, buildstream_pb2_grpc
 from .._exceptions import ArtifactError
 from .._context import Context
 
-from .cascache import CASCache
+
+# The default limit for gRPC messages is 4 MiB.
+# Limit payload to 1 MiB to leave sufficient headroom for metadata.
+_MAX_PAYLOAD_BYTES = 1024 * 1024
 
 
 # Trying to push an artifact that is too large
@@ -55,7 +58,7 @@ def create_server(repo, *, enable_push):
     context = Context()
     context.artifactdir = os.path.abspath(repo)
 
-    artifactcache = CASCache(context)
+    artifactcache = context.artifactcache
 
     # Use max_workers default from Python 3.5+
     max_workers = (os.cpu_count() or 1) * 5
@@ -65,7 +68,10 @@ def create_server(repo, *, enable_push):
         _ByteStreamServicer(artifactcache, enable_push=enable_push), server)
 
     remote_execution_pb2_grpc.add_ContentAddressableStorageServicer_to_server(
-        _ContentAddressableStorageServicer(artifactcache), server)
+        _ContentAddressableStorageServicer(artifactcache, enable_push=enable_push), server)
+
+    remote_execution_pb2_grpc.add_CapabilitiesServicer_to_server(
+        _CapabilitiesServicer(), server)
 
     buildstream_pb2_grpc.add_ReferenceStorageServicer_to_server(
         _ReferenceStorageServicer(artifactcache, enable_push=enable_push), server)
@@ -151,7 +157,7 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
 
                 remaining = client_digest.size_bytes - request.read_offset
                 while remaining > 0:
-                    chunk_size = min(remaining, 64 * 1024)
+                    chunk_size = min(remaining, _MAX_PAYLOAD_BYTES)
                     remaining -= chunk_size
 
                     response = bytestream_pb2.ReadResponse()
@@ -216,9 +222,10 @@ class _ByteStreamServicer(bytestream_pb2_grpc.ByteStreamServicer):
 
 
 class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddressableStorageServicer):
-    def __init__(self, cas):
+    def __init__(self, cas, *, enable_push):
         super().__init__()
         self.cas = cas
+        self.enable_push = enable_push
 
     def FindMissingBlobs(self, request, context):
         response = remote_execution_pb2.FindMissingBlobsResponse()
@@ -229,6 +236,88 @@ class _ContentAddressableStorageServicer(remote_execution_pb2_grpc.ContentAddres
                 d.size_bytes = digest.size_bytes
         return response
 
+    def BatchReadBlobs(self, request, context):
+        response = remote_execution_pb2.BatchReadBlobsResponse()
+        batch_size = 0
+
+        for digest in request.digests:
+            batch_size += digest.size_bytes
+            if batch_size > _MAX_PAYLOAD_BYTES:
+                context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+                return response
+
+            blob_response = response.responses.add()
+            blob_response.digest.hash = digest.hash
+            blob_response.digest.size_bytes = digest.size_bytes
+            try:
+                with open(self.cas.objpath(digest), 'rb') as f:
+                    if os.fstat(f.fileno()).st_size != digest.size_bytes:
+                        blob_response.status.code = grpc.StatusCode.NOT_FOUND
+                        continue
+
+                    blob_response.data = f.read(digest.size_bytes)
+            except FileNotFoundError:
+                blob_response.status.code = grpc.StatusCode.NOT_FOUND
+
+        return response
+
+    def BatchUpdateBlobs(self, request, context):
+        response = remote_execution_pb2.BatchUpdateBlobsResponse()
+
+        if not self.enable_push:
+            context.set_code(grpc.StatusCode.PERMISSION_DENIED)
+            return response
+
+        batch_size = 0
+
+        for blob_request in request.requests:
+            digest = blob_request.digest
+
+            batch_size += digest.size_bytes
+            if batch_size > _MAX_PAYLOAD_BYTES:
+                context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
+                return response
+
+            blob_response = response.responses.add()
+            blob_response.digest.hash = digest.hash
+            blob_response.digest.size_bytes = digest.size_bytes
+
+            if len(blob_request.data) != digest.size_bytes:
+                blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
+                continue
+
+            try:
+                _clean_up_cache(self.cas, digest.size_bytes)
+
+                with tempfile.NamedTemporaryFile(dir=self.cas.tmpdir) as out:
+                    out.write(blob_request.data)
+                    out.flush()
+                    server_digest = self.cas.add_object(path=out.name)
+                    if server_digest.hash != digest.hash:
+                        blob_response.status.code = grpc.StatusCode.FAILED_PRECONDITION
+
+            except ArtifactTooLargeException:
+                blob_response.status.code = grpc.StatusCode.RESOURCE_EXHAUSTED
+
+        return response
+
+
+class _CapabilitiesServicer(remote_execution_pb2_grpc.CapabilitiesServicer):
+    def GetCapabilities(self, request, context):
+        response = remote_execution_pb2.ServerCapabilities()
+
+        cache_capabilities = response.cache_capabilities
+        cache_capabilities.digest_function.append(remote_execution_pb2.SHA256)
+        cache_capabilities.action_cache_update_capabilities.update_enabled = False
+        cache_capabilities.max_batch_total_size_bytes = _MAX_PAYLOAD_BYTES
+        cache_capabilities.symlink_absolute_path_strategy = remote_execution_pb2.CacheCapabilities.ALLOWED
+
+        response.deprecated_api_version.major = 2
+        response.low_api_version.major = 2
+        response.high_api_version.major = 2
+
+        return response
+
 
 class _ReferenceStorageServicer(buildstream_pb2_grpc.ReferenceStorageServicer):
     def __init__(self, cas, *, enable_push):
diff --git a/buildstream/_context.py b/buildstream/_context.py
index a94d374cf1a6c652192f709b87e2254e839687c1..d2086af933d469b407da85116077fc7d8bc0c281 100644
--- a/buildstream/_context.py
+++ b/buildstream/_context.py
@@ -19,7 +19,8 @@
 
 import os
 import datetime
-from collections import deque, Mapping
+from collections import deque
+from collections.abc import Mapping
 from contextlib import contextmanager
 from . import utils
 from . import _cachekey
@@ -30,6 +31,7 @@ from ._exceptions import LoadError, LoadErrorReason, BstError
 from ._message import Message, MessageType
 from ._profile import Topics, profile_start, profile_end
 from ._artifactcache import ArtifactCache
+from ._artifactcache.cascache import CASCache
 from ._workspaces import Workspaces
 from .plugin import _plugin_lookup
 
@@ -113,13 +115,13 @@ class Context():
         self._cache_key = None
         self._message_handler = None
         self._message_depth = deque()
+        self._artifactcache = None
         self._projects = []
         self._project_overrides = {}
         self._workspaces = None
         self._log_handle = None
         self._log_filename = None
         self.config_cache_quota = 'infinity'
-        self.artifactdir_volume = None
 
     # load()
     #
@@ -228,6 +230,13 @@ class Context():
                             "{}: on-error should be one of: {}".format(
                                 provenance, ", ".join(valid_actions)))
 
+    @property
+    def artifactcache(self):
+        if not self._artifactcache:
+            self._artifactcache = CASCache(self)
+
+        return self._artifactcache
+
     # add_project():
     #
     # Add a project to the context.
diff --git a/buildstream/_exceptions.py b/buildstream/_exceptions.py
index 3fb5e5775e772b9062f639af2e65cab72cd54574..19606776ea1745ec168c1b12e0451748b8ee7bc8 100644
--- a/buildstream/_exceptions.py
+++ b/buildstream/_exceptions.py
@@ -217,6 +217,12 @@ class LoadErrorReason(Enum):
     # A recursive include has been encountered.
     RECURSIVE_INCLUDE = 21
 
+    # A recursive variable has been encountered
+    RECURSIVE_VARIABLE = 22
+
+    # An attempt so set the value of a protected variable
+    PROTECTED_VARIABLE_REDEFINED = 23
+
 
 # LoadError
 #
@@ -306,3 +312,12 @@ class StreamError(BstError):
 class AppError(BstError):
     def __init__(self, message, detail=None, reason=None):
         super().__init__(message, detail=detail, domain=ErrorDomain.APP, reason=reason)
+
+
+# SkipJob
+#
+# Raised from a child process within a job when the job should be
+# considered skipped by the parent process.
+#
+class SkipJob(Exception):
+    pass
diff --git a/buildstream/_frontend/app.py b/buildstream/_frontend/app.py
index 1e357f1238290bb8022429dcf1c38ba96b9b46b4..f3dcd623b6bee387068c6b5ea460f5835a98a3f5 100644
--- a/buildstream/_frontend/app.py
+++ b/buildstream/_frontend/app.py
@@ -26,7 +26,6 @@ import datetime
 from textwrap import TextWrapper
 import click
 from click import UsageError
-from blessings import Terminal
 
 # Import buildstream public symbols
 from .. import Scope
@@ -92,7 +91,7 @@ class App():
         #
         # Earily initialization
         #
-        is_a_tty = Terminal().is_a_tty
+        is_a_tty = sys.stdout.isatty() and sys.stderr.isatty()
 
         # Enable interactive mode if we're attached to a tty
         if main_options['no_interactive']:
@@ -116,14 +115,6 @@ class App():
         else:
             self.colors = False
 
-        # Increase the soft limit for open file descriptors to the maximum.
-        # SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
-        # Avoid hitting the limit too quickly.
-        limits = resource.getrlimit(resource.RLIMIT_NOFILE)
-        if limits[0] != limits[1]:
-            # Set soft limit to hard limit
-            resource.setrlimit(resource.RLIMIT_NOFILE, (limits[1], limits[1]))
-
     # create()
     #
     # Should be used instead of the regular constructor.
@@ -199,10 +190,15 @@ class App():
             if option_value is not None:
                 setattr(self.context, context_attr, option_value)
         try:
-            Platform.create_instance(self.context)
+            Platform.get_platform()
         except BstError as e:
             self._error_exit(e, "Error instantiating platform")
 
+        try:
+            self.context.artifactcache.preflight()
+        except BstError as e:
+            self._error_exit(e, "Error instantiating artifact cache")
+
         # Create the logger right before setting the message handler
         self.logger = LogLine(self.context,
                               self._content_profile,
diff --git a/buildstream/_frontend/cli.py b/buildstream/_frontend/cli.py
index 20624e2acbed145dfd06a4125d77b0ae620e038e..85632959fe34e3cc3a3b52995e327676e6827ed0 100644
--- a/buildstream/_frontend/cli.py
+++ b/buildstream/_frontend/cli.py
@@ -104,7 +104,7 @@ def complete_target(args, incomplete):
     # The project is not required to have an element-path
     element_directory = project.get('element-path')
 
-    # If a project was loaded, use it's element-path to
+    # If a project was loaded, use its element-path to
     # adjust our completion's base directory
     if element_directory:
         base_directory = os.path.join(base_directory, element_directory)
diff --git a/buildstream/_frontend/complete.py b/buildstream/_frontend/complete.py
index 97d6d45ec3dc3af31029f9d3ef87729472a94026..5606f13e5f469a0cc12ed492938b781d6717748a 100644
--- a/buildstream/_frontend/complete.py
+++ b/buildstream/_frontend/complete.py
@@ -1,4 +1,5 @@
 #
+#  Copyright (c) 2014 by Armin Ronacher.
 #  Copyright (C) 2016 Codethink Limited
 #
 #  This program is free software; you can redistribute it and/or
@@ -14,8 +15,22 @@
 #  You should have received a copy of the GNU Lesser General Public
 #  License along with this library. If not, see <http://www.gnu.org/licenses/>.
 #
-#  This module was forked from the python click library.
-
+#  This module was forked from the python click library, Included
+#  original copyright notice from the Click library and following disclaimer
+#  as per their LICENSE requirements.
+#
+#  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+#  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+#  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+#  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+#  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+#  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+#  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+#  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+#  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+#  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+#  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
 import collections
 import copy
 import os
diff --git a/buildstream/_frontend/linuxapp.py b/buildstream/_frontend/linuxapp.py
index 667ce5c2b3ed956b1e09a1e18319e6adbb411abc..0444dc7b47233b7d7e68a6d7847f5282728d8054 100644
--- a/buildstream/_frontend/linuxapp.py
+++ b/buildstream/_frontend/linuxapp.py
@@ -28,9 +28,9 @@ from .app import App
 #
 def _osc_777_supported():
 
-    term = os.environ['TERM']
+    term = os.environ.get('TERM')
 
-    if term.startswith('xterm') or term.startswith('vte'):
+    if term and (term.startswith('xterm') or term.startswith('vte')):
 
         # Since vte version 4600, upstream silently ignores
         # the OSC 777 without printing garbage to the terminal.
@@ -39,10 +39,10 @@ def _osc_777_supported():
         # will trigger a desktop notification and bring attention
         # to the terminal.
         #
-        vte_version = os.environ['VTE_VERSION']
+        vte_version = os.environ.get('VTE_VERSION')
         try:
             vte_version_int = int(vte_version)
-        except ValueError:
+        except (ValueError, TypeError):
             return False
 
         if vte_version_int >= 4600:
diff --git a/buildstream/_frontend/status.py b/buildstream/_frontend/status.py
index 51b28d9cf68172e4695800865990a5803156e011..fd1a5acf1bce9dc28e2b0948e61ef8ebc770fe07 100644
--- a/buildstream/_frontend/status.py
+++ b/buildstream/_frontend/status.py
@@ -16,8 +16,10 @@
 #
 #  Authors:
 #        Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+import os
+import sys
 import click
-from blessings import Terminal
+import curses
 
 # Import a widget internal for formatting time codes
 from .widget import TimeCode
@@ -43,6 +45,13 @@ from .._scheduler import ElementJob
 #
 class Status():
 
+    # Table of the terminal capabilities we require and use
+    _TERM_CAPABILITIES = {
+        'move_up': 'cuu1',
+        'move_x': 'hpa',
+        'clear_eol': 'el'
+    }
+
     def __init__(self, context,
                  content_profile, format_profile,
                  success_profile, error_profile,
@@ -56,7 +65,6 @@ class Status():
         self._stream = stream
         self._jobs = []
         self._last_lines = 0  # Number of status lines we last printed to console
-        self._term = Terminal()
         self._spacing = 1
         self._colors = colors
         self._header = _StatusHeader(context,
@@ -69,6 +77,7 @@ class Status():
         self._alloc_columns = None
         self._line_length = 0
         self._need_alloc = True
+        self._term_caps = self._init_terminal()
 
     # add_job()
     #
@@ -121,7 +130,7 @@ class Status():
     #
     def clear(self):
 
-        if not self._term.does_styling:
+        if not self._term_caps:
             return
 
         for _ in range(self._last_lines):
@@ -138,7 +147,7 @@ class Status():
     # not necessary to call clear().
     def render(self):
 
-        if not self._term.does_styling:
+        if not self._term_caps:
             return
 
         elapsed = self._stream.elapsed_time
@@ -185,6 +194,55 @@ class Status():
     ###################################################
     #                 Private Methods                 #
     ###################################################
+
+    # _init_terminal()
+    #
+    # Initialize the terminal and return the resolved terminal
+    # capabilities dictionary.
+    #
+    # Returns:
+    #    (dict|None): The resolved terminal capabilities dictionary,
+    #                 or None if the terminal does not support all
+    #                 of the required capabilities.
+    #
+    def _init_terminal(self):
+
+        # We need both output streams to be connected to a terminal
+        if not (sys.stdout.isatty() and sys.stderr.isatty()):
+            return None
+
+        # Initialized terminal, curses might decide it doesnt
+        # support this terminal
+        try:
+            curses.setupterm(os.environ.get('TERM', 'dumb'))
+        except curses.error:
+            return None
+
+        term_caps = {}
+
+        # Resolve the string capabilities we need for the capability
+        # names we need.
+        #
+        for capname, capval in self._TERM_CAPABILITIES.items():
+            code = curses.tigetstr(capval)
+
+            # If any of the required capabilities resolve empty strings or None,
+            # then we don't have the capabilities we need for a status bar on
+            # this terminal.
+            if not code:
+                return None
+
+            # Decode sequences as latin1, as they are always 8-bit bytes,
+            # so when b'\xff' is returned, this must be decoded to u'\xff'.
+            #
+            # This technique is employed by the python blessings library
+            # as well, and should provide better compatibility with most
+            # terminals.
+            #
+            term_caps[capname] = code.decode('latin1')
+
+        return term_caps
+
     def _check_term_width(self):
         term_width, _ = click.get_terminal_size()
         if self._term_width != term_width:
@@ -192,12 +250,24 @@ class Status():
             self._need_alloc = True
 
     def _move_up(self):
+        assert self._term_caps is not None
+
         # Explicitly move to beginning of line, fixes things up
         # when there was a ^C or ^Z printed to the terminal.
-        click.echo(self._term.move_x(0) + self._term.move_up, nl=False, err=True)
+        move_x = curses.tparm(self._term_caps['move_x'].encode('latin1'), 0)
+        move_x = move_x.decode('latin1')
+
+        move_up = curses.tparm(self._term_caps['move_up'].encode('latin1'))
+        move_up = move_up.decode('latin1')
+
+        click.echo(move_x + move_up, nl=False, err=True)
 
     def _clear_line(self):
-        click.echo(self._term.clear_eol, nl=False, err=True)
+        assert self._term_caps is not None
+
+        clear_eol = curses.tparm(self._term_caps['clear_eol'].encode('latin1'))
+        clear_eol = clear_eol.decode('latin1')
+        click.echo(clear_eol, nl=False, err=True)
 
     def _allocate(self):
         if not self._need_alloc:
diff --git a/buildstream/_fuse/hardlinks.py b/buildstream/_fuse/hardlinks.py
index 1386f14cf1053669e44ddb47f8bd2f3daa9f4efd..0797cb4bc23e5f7f681e9fd9ba6b4f0bebc5102e 100644
--- a/buildstream/_fuse/hardlinks.py
+++ b/buildstream/_fuse/hardlinks.py
@@ -42,9 +42,10 @@ from .mount import Mount
 #
 class SafeHardlinks(Mount):
 
-    def __init__(self, directory, tempdir):
+    def __init__(self, directory, tempdir, fuse_mount_options={}):
         self.directory = directory
         self.tempdir = tempdir
+        super().__init__(fuse_mount_options=fuse_mount_options)
 
     def create_operations(self):
         return SafeHardlinkOps(self.directory, self.tempdir)
@@ -121,7 +122,7 @@ class SafeHardlinkOps(Operations):
         st = os.lstat(full_path)
         return dict((key, getattr(st, key)) for key in (
             'st_atime', 'st_ctime', 'st_gid', 'st_mode',
-            'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
+            'st_mtime', 'st_nlink', 'st_size', 'st_uid', 'st_rdev'))
 
     def readdir(self, path, fh):
         full_path = self._full_path(path)
diff --git a/buildstream/_fuse/mount.py b/buildstream/_fuse/mount.py
index 0ab1ce715cd0106bb84c4353d383481d095bf76c..83c98a97aca34316f56df3437e5d10a09d12ac17 100644
--- a/buildstream/_fuse/mount.py
+++ b/buildstream/_fuse/mount.py
@@ -60,7 +60,7 @@ class FuseMountError(Exception):
 #
 #   With the daemon approach, we know that the fuse is mounted right
 #   away when fuse_main() returns, then the daemon will go and handle
-#   requests on it's own, but then we have no way to shut down the
+#   requests on its own, but then we have no way to shut down the
 #   daemon.
 #
 #   With the blocking approach, we still have it as a child process
@@ -87,6 +87,9 @@ class Mount():
     #               User Facing API                #
     ################################################
 
+    def __init__(self, fuse_mount_options={}):
+        self._fuse_mount_options = fuse_mount_options
+
     # mount():
     #
     # User facing API for mounting a fuse subclass implementation
@@ -102,7 +105,7 @@ class Mount():
         self.__process = Process(target=self.__run_fuse)
 
         # Ensure the child fork() does not inherit our signal handlers, if the
-        # child wants to handle a signal then it will first set it's own
+        # child wants to handle a signal then it will first set its own
         # handler, and then unblock it.
         with _signals.blocked([signal.SIGTERM, signal.SIGTSTP, signal.SIGINT], ignore=False):
             self.__process.start()
@@ -182,9 +185,10 @@ class Mount():
         self.__operations = self.create_operations()
 
         # Run fuse in foreground in this child process, internally libfuse
-        # will handle SIGTERM and gracefully exit it's own little main loop.
+        # will handle SIGTERM and gracefully exit its own little main loop.
         #
-        FUSE(self.__operations, self.__mountpoint, nothreads=True, foreground=True, nonempty=True)
+        FUSE(self.__operations, self.__mountpoint, nothreads=True, foreground=True, nonempty=True,
+             **self._fuse_mount_options)
 
         # Explicit 0 exit code, if the operations crashed for some reason, the exit
         # code will not be 0, and we want to know about it.
diff --git a/buildstream/_includes.py b/buildstream/_includes.py
index e30003630c2c9c907861e1ba9e06e2c2b7258209..df14c9f2df0edb1e9549c70e4cc1ee7177819b6f 100644
--- a/buildstream/_includes.py
+++ b/buildstream/_includes.py
@@ -1,5 +1,5 @@
 import os
-from collections import Mapping
+from collections.abc import Mapping
 from . import _yaml
 from ._exceptions import LoadError, LoadErrorReason
 
diff --git a/buildstream/_loader/loadelement.py b/buildstream/_loader/loadelement.py
index 4104dfd5951e7dce5dda0e57a69340110b462fc5..72e89879e02ae336d2f15160a92d3f65a03a5ed9 100644
--- a/buildstream/_loader/loadelement.py
+++ b/buildstream/_loader/loadelement.py
@@ -18,7 +18,7 @@
 #        Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
 
 # System imports
-from collections import Mapping
+from collections.abc import Mapping
 
 # BuildStream toplevel imports
 from .._exceptions import LoadError, LoadErrorReason
diff --git a/buildstream/_loader/loader.py b/buildstream/_loader/loader.py
index 8553bc6ddc8bdb9f4d5f7987717569d3716ec87f..8a81a71c1333fbbccd144f6b327cfb67cc84e2c8 100644
--- a/buildstream/_loader/loader.py
+++ b/buildstream/_loader/loader.py
@@ -19,7 +19,8 @@
 
 import os
 from functools import cmp_to_key
-from collections import Mapping, namedtuple
+from collections import namedtuple
+from collections.abc import Mapping
 import tempfile
 import shutil
 
@@ -28,8 +29,8 @@ from .. import Consistency
 from .. import _yaml
 from ..element import Element
 from .._profile import Topics, profile_start, profile_end
-from .._platform import Platform
 from .._includes import Includes
+from .._yamlcache import YamlCache
 
 from .types import Symbol, Dependency
 from .loadelement import LoadElement
@@ -109,13 +110,19 @@ class Loader():
         #
         deps = []
 
-        for target in targets:
-            profile_start(Topics.LOAD_PROJECT, target)
-            junction, name, loader = self._parse_name(target, rewritable, ticker,
-                                                      fetch_subprojects=fetch_subprojects)
-            loader._load_file(name, rewritable, ticker, fetch_subprojects)
-            deps.append(Dependency(name, junction=junction))
-            profile_end(Topics.LOAD_PROJECT, target)
+        # XXX This will need to be changed to the context's top-level project if this method
+        # is ever used for subprojects
+        top_dir = self.project.directory
+
+        cache_file = YamlCache.get_cache_file(top_dir)
+        with YamlCache.open(self._context, cache_file) as yaml_cache:
+            for target in targets:
+                profile_start(Topics.LOAD_PROJECT, target)
+                junction, name, loader = self._parse_name(target, rewritable, ticker,
+                                                          fetch_subprojects=fetch_subprojects)
+                loader._load_file(name, rewritable, ticker, fetch_subprojects, yaml_cache)
+                deps.append(Dependency(name, junction=junction))
+                profile_end(Topics.LOAD_PROJECT, target)
 
         #
         # Now that we've resolve the dependencies, scan them for circular dependencies
@@ -202,11 +209,12 @@ class Loader():
     #    rewritable (bool): Whether we should load in round trippable mode
     #    ticker (callable): A callback to report loaded filenames to the frontend
     #    fetch_subprojects (bool): Whether to fetch subprojects while loading
+    #    yaml_cache (YamlCache): A yaml cache
     #
     # Returns:
     #    (LoadElement): A loaded LoadElement
     #
-    def _load_file(self, filename, rewritable, ticker, fetch_subprojects):
+    def _load_file(self, filename, rewritable, ticker, fetch_subprojects, yaml_cache=None):
 
         # Silently ignore already loaded files
         if filename in self._elements:
@@ -219,7 +227,8 @@ class Loader():
         # Load the data and process any conditional statements therein
         fullpath = os.path.join(self._basedir, filename)
         try:
-            node = _yaml.load(fullpath, shortname=filename, copy_tree=rewritable, project=self.project)
+            node = _yaml.load(fullpath, shortname=filename, copy_tree=rewritable,
+                              project=self.project, yaml_cache=yaml_cache)
         except LoadError as e:
             if e.reason == LoadErrorReason.MISSING_FILE:
                 # If we can't find the file, try to suggest plausible
@@ -262,13 +271,13 @@ class Loader():
         # Load all dependency files for the new LoadElement
         for dep in element.deps:
             if dep.junction:
-                self._load_file(dep.junction, rewritable, ticker, fetch_subprojects)
+                self._load_file(dep.junction, rewritable, ticker, fetch_subprojects, yaml_cache)
                 loader = self._get_loader(dep.junction, rewritable=rewritable, ticker=ticker,
                                           fetch_subprojects=fetch_subprojects)
             else:
                 loader = self
 
-            dep_element = loader._load_file(dep.name, rewritable, ticker, fetch_subprojects)
+            dep_element = loader._load_file(dep.name, rewritable, ticker, fetch_subprojects, yaml_cache)
 
             if _yaml.node_get(dep_element.node, str, Symbol.KIND) == 'junction':
                 raise LoadError(LoadErrorReason.INVALID_DATA,
@@ -288,12 +297,14 @@ class Loader():
     # Raises:
     #    (LoadError): In case there was a circular dependency error
     #
-    def _check_circular_deps(self, element_name, check_elements=None, validated=None):
+    def _check_circular_deps(self, element_name, check_elements=None, validated=None, sequence=None):
 
         if check_elements is None:
             check_elements = {}
         if validated is None:
             validated = {}
+        if sequence is None:
+            sequence = []
 
         element = self._elements[element_name]
 
@@ -306,16 +317,24 @@ class Loader():
             return
 
         if check_elements.get(element_name) is not None:
+            # Create `chain`, the loop of element dependencies from this
+            # element back to itself, by trimming everything before this
+            # element from the sequence under consideration.
+            chain = sequence[sequence.index(element_name):]
+            chain.append(element_name)
             raise LoadError(LoadErrorReason.CIRCULAR_DEPENDENCY,
-                            "Circular dependency detected for element: {}"
-                            .format(element.name))
+                            ("Circular dependency detected at element: {}\n" +
+                             "Dependency chain: {}")
+                            .format(element.name, " -> ".join(chain)))
 
         # Push / Check each dependency / Pop
         check_elements[element_name] = True
+        sequence.append(element_name)
         for dep in element.deps:
             loader = self._get_loader_for_dep(dep)
-            loader._check_circular_deps(dep.name, check_elements, validated)
+            loader._check_circular_deps(dep.name, check_elements, validated, sequence)
         del check_elements[element_name]
+        sequence.pop()
 
         # Eliminate duplicate paths
         validated[element_name] = True
@@ -518,8 +537,7 @@ class Loader():
             raise LoadError(LoadErrorReason.INVALID_DATA,
                             "{}: Expected junction but element kind is {}".format(filename, meta_element.kind))
 
-        platform = Platform.get_platform()
-        element = Element._new_from_meta(meta_element, platform.artifactcache)
+        element = Element._new_from_meta(meta_element, self._context.artifactcache)
         element._preflight()
 
         sources = list(element.sources())
diff --git a/buildstream/_options/optionpool.py b/buildstream/_options/optionpool.py
index b53e87a3d00759d691f180407b162393cb77aaf3..1274586f78d64aea54f19a67a6403a512632a1b5 100644
--- a/buildstream/_options/optionpool.py
+++ b/buildstream/_options/optionpool.py
@@ -18,7 +18,7 @@
 #        Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
 #
 
-from collections import Mapping
+from collections.abc import Mapping
 import jinja2
 
 from .. import _yaml
diff --git a/buildstream/_pipeline.py b/buildstream/_pipeline.py
index 4e5f4d0e948dd503c723dda0a8ae35c8eb7ace33..1f75b2e9edf00602aff2ad3242a6ed14b752f5d5 100644
--- a/buildstream/_pipeline.py
+++ b/buildstream/_pipeline.py
@@ -355,10 +355,14 @@ class Pipeline():
     #
     def assert_consistent(self, elements):
         inconsistent = []
+        inconsistent_workspaced = []
         with self._context.timed_activity("Checking sources"):
             for element in elements:
                 if element._get_consistency() == Consistency.INCONSISTENT:
-                    inconsistent.append(element)
+                    if element._get_workspace():
+                        inconsistent_workspaced.append(element)
+                    else:
+                        inconsistent.append(element)
 
         if inconsistent:
             detail = "Exact versions are missing for the following elements:\n\n"
@@ -372,6 +376,13 @@ class Pipeline():
 
             raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline")
 
+        if inconsistent_workspaced:
+            detail = "Some workspaces do not exist but are not closed\n" + \
+                     "Try closing them with `bst workspace close`\n\n"
+            for element in inconsistent_workspaced:
+                detail += "  " + element._get_full_name() + "\n"
+            raise PipelineError("Inconsistent pipeline", detail=detail, reason="inconsistent-pipeline-workspaced")
+
     #############################################################
     #                     Private Methods                       #
     #############################################################
diff --git a/buildstream/_platform/darwin.py b/buildstream/_platform/darwin.py
new file mode 100644
index 0000000000000000000000000000000000000000..04a83110e46fb6623e4f63949043ea2190847e15
--- /dev/null
+++ b/buildstream/_platform/darwin.py
@@ -0,0 +1,54 @@
+#
+#  Copyright (C) 2017 Codethink Limited
+#  Copyright (C) 2018 Bloomberg Finance LP
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import resource
+
+from .._exceptions import PlatformError
+from ..sandbox import SandboxDummy
+
+from . import Platform
+
+
+class Darwin(Platform):
+
+    # This value comes from OPEN_MAX in syslimits.h
+    OPEN_MAX = 10240
+
+    def __init__(self):
+
+        super().__init__()
+
+    def create_sandbox(self, *args, **kwargs):
+        kwargs['dummy_reason'] = \
+            "OSXFUSE is not supported and there are no supported sandbox" + \
+            "technologies for OSX at this time"
+        return SandboxDummy(*args, **kwargs)
+
+    def check_sandbox_config(self, config):
+        # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
+        return True
+
+    def get_cpu_count(self, cap=None):
+        cpu_count = os.cpu_count()
+        if cap is None:
+            return cpu_count
+        else:
+            return min(cpu_count, cap)
+
+    def set_resource_limits(self, soft_limit=OPEN_MAX, hard_limit=None):
+        super().set_resource_limits(soft_limit)
diff --git a/buildstream/_platform/linux.py b/buildstream/_platform/linux.py
index a5fd0d68723f894238942567f7d3f026a20bc99a..09db19f2da402b3d28d6c78087b47e22769e8275 100644
--- a/buildstream/_platform/linux.py
+++ b/buildstream/_platform/linux.py
@@ -17,42 +17,83 @@
 #  Authors:
 #        Tristan Maat <tristan.maat@codethink.co.uk>
 
+import os
 import subprocess
 
 from .. import _site
 from .. import utils
-from .._artifactcache.cascache import CASCache
 from .._message import Message, MessageType
-from ..sandbox import SandboxBwrap
+from ..sandbox import SandboxDummy
 
 from . import Platform
 
 
 class Linux(Platform):
 
-    def __init__(self, context):
+    def __init__(self):
 
-        super().__init__(context)
+        super().__init__()
+
+        self._uid = os.geteuid()
+        self._gid = os.getegid()
+
+        self._have_fuse = os.path.exists("/dev/fuse")
+        self._bwrap_exists = _site.check_bwrap_version(0, 0, 0)
+        self._have_good_bwrap = _site.check_bwrap_version(0, 1, 2)
+
+        self._local_sandbox_available = self._have_fuse and self._have_good_bwrap
 
         self._die_with_parent_available = _site.check_bwrap_version(0, 1, 8)
-        self._user_ns_available = self._check_user_ns_available(context)
-        self._artifact_cache = CASCache(context, enable_push=self._user_ns_available)
 
-    @property
-    def artifactcache(self):
-        return self._artifact_cache
+        if self._local_sandbox_available:
+            self._user_ns_available = self._check_user_ns_available()
+        else:
+            self._user_ns_available = False
 
     def create_sandbox(self, *args, **kwargs):
-        # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
-        kwargs['user_ns_available'] = self._user_ns_available
-        kwargs['die_with_parent_available'] = self._die_with_parent_available
-        return SandboxBwrap(*args, **kwargs)
+        if not self._local_sandbox_available:
+            return self._create_dummy_sandbox(*args, **kwargs)
+        else:
+            return self._create_bwrap_sandbox(*args, **kwargs)
+
+    def check_sandbox_config(self, config):
+        if not self._local_sandbox_available:
+            # Accept all sandbox configs as it's irrelevant with the dummy sandbox (no Sandbox.run).
+            return True
+
+        if self._user_ns_available:
+            # User namespace support allows arbitrary build UID/GID settings.
+            return True
+        else:
+            # Without user namespace support, the UID/GID in the sandbox
+            # will match the host UID/GID.
+            return config.build_uid == self._uid and config.build_gid == self._gid
 
     ################################################
     #              Private Methods                 #
     ################################################
-    def _check_user_ns_available(self, context):
 
+    def _create_dummy_sandbox(self, *args, **kwargs):
+        reasons = []
+        if not self._have_fuse:
+            reasons.append("FUSE is unavailable")
+        if not self._have_good_bwrap:
+            if self._bwrap_exists:
+                reasons.append("`bwrap` is too old (bst needs at least 0.1.2)")
+            else:
+                reasons.append("`bwrap` executable not found")
+
+        kwargs['dummy_reason'] = " and ".join(reasons)
+        return SandboxDummy(*args, **kwargs)
+
+    def _create_bwrap_sandbox(self, *args, **kwargs):
+        from ..sandbox._sandboxbwrap import SandboxBwrap
+        # Inform the bubblewrap sandbox as to whether it can use user namespaces or not
+        kwargs['user_ns_available'] = self._user_ns_available
+        kwargs['die_with_parent_available'] = self._die_with_parent_available
+        return SandboxBwrap(*args, **kwargs)
+
+    def _check_user_ns_available(self):
         # Here, lets check if bwrap is able to create user namespaces,
         # issue a warning if it's not available, and save the state
         # locally so that we can inform the sandbox to not try it
@@ -75,9 +116,4 @@ class Linux(Platform):
             return True
 
         else:
-            context.message(
-                Message(None, MessageType.WARN,
-                        "Unable to create user namespaces with bubblewrap, resorting to fallback",
-                        detail="Some builds may not function due to lack of uid / gid 0, " +
-                        "artifacts created will not be trusted for push purposes."))
             return False
diff --git a/buildstream/_platform/platform.py b/buildstream/_platform/platform.py
index 8a074eb62457aa7c8f8bc375a31e676227230b56..d3e4b949a717c3b794728f3149493eaa1390f22c 100644
--- a/buildstream/_platform/platform.py
+++ b/buildstream/_platform/platform.py
@@ -19,6 +19,7 @@
 
 import os
 import sys
+import resource
 
 from .._exceptions import PlatformError, ImplError
 
@@ -29,50 +30,48 @@ class Platform():
     # Platform()
     #
     # A class to manage platform-specific details. Currently holds the
-    # sandbox factory, the artifact cache and staging operations, as
-    # well as platform helpers.
+    # sandbox factory as well as platform helpers.
     #
-    # Args:
-    #     context (context): The project context
-    #
-    def __init__(self, context):
-        self.context = context
+    def __init__(self):
+        self.set_resource_limits()
 
     @classmethod
-    def create_instance(cls, *args, **kwargs):
-        if sys.platform.startswith('linux'):
-            backend = 'linux'
-        else:
-            backend = 'unix'
-
+    def _create_instance(cls):
         # Meant for testing purposes and therefore hidden in the
         # deepest corners of the source code. Try not to abuse this,
         # please?
         if os.getenv('BST_FORCE_BACKEND'):
             backend = os.getenv('BST_FORCE_BACKEND')
+        elif sys.platform.startswith('linux'):
+            backend = 'linux'
+        elif sys.platform.startswith('darwin'):
+            backend = 'darwin'
+        else:
+            backend = 'unix'
 
         if backend == 'linux':
             from .linux import Linux as PlatformImpl
+        elif backend == 'darwin':
+            from .darwin import Darwin as PlatformImpl
         elif backend == 'unix':
             from .unix import Unix as PlatformImpl
         else:
             raise PlatformError("No such platform: '{}'".format(backend))
 
-        cls._instance = PlatformImpl(*args, **kwargs)
+        cls._instance = PlatformImpl()
 
     @classmethod
     def get_platform(cls):
         if not cls._instance:
-            raise PlatformError("Platform needs to be initialized first")
+            cls._create_instance()
         return cls._instance
 
-    ##################################################################
-    #                       Platform properties                      #
-    ##################################################################
-    @property
-    def artifactcache(self):
-        raise ImplError("Platform {platform} does not implement an artifactcache"
-                        .format(platform=type(self).__name__))
+    def get_cpu_count(self, cap=None):
+        cpu_count = len(os.sched_getaffinity(0))
+        if cap is None:
+            return cpu_count
+        else:
+            return min(cpu_count, cap)
 
     ##################################################################
     #                        Sandbox functions                       #
@@ -92,3 +91,19 @@ class Platform():
     def create_sandbox(self, *args, **kwargs):
         raise ImplError("Platform {platform} does not implement create_sandbox()"
                         .format(platform=type(self).__name__))
+
+    def check_sandbox_config(self, config):
+        raise ImplError("Platform {platform} does not implement check_sandbox_config()"
+                        .format(platform=type(self).__name__))
+
+    def set_resource_limits(self, soft_limit=None, hard_limit=None):
+        # Need to set resources for _frontend/app.py as this is dependent on the platform
+        # SafeHardlinks FUSE needs to hold file descriptors for all processes in the sandbox.
+        # Avoid hitting the limit too quickly.
+        limits = resource.getrlimit(resource.RLIMIT_NOFILE)
+        if limits[0] != limits[1]:
+            if soft_limit is None:
+                soft_limit = limits[1]
+            if hard_limit is None:
+                hard_limit = limits[1]
+            resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
diff --git a/buildstream/_platform/unix.py b/buildstream/_platform/unix.py
index 0306a4ac5fa4032a320c7ee0c4dc00cf7e483a5b..d2acefe6579d4b4672b8ae12baff63a29fa40f0e 100644
--- a/buildstream/_platform/unix.py
+++ b/buildstream/_platform/unix.py
@@ -19,27 +19,29 @@
 
 import os
 
-from .._artifactcache.cascache import CASCache
 from .._exceptions import PlatformError
-from ..sandbox import SandboxChroot
 
 from . import Platform
 
 
 class Unix(Platform):
 
-    def __init__(self, context):
+    def __init__(self):
 
-        super().__init__(context)
-        self._artifact_cache = CASCache(context)
+        super().__init__()
+
+        self._uid = os.geteuid()
+        self._gid = os.getegid()
 
         # Not necessarily 100% reliable, but we want to fail early.
-        if os.geteuid() != 0:
+        if self._uid != 0:
             raise PlatformError("Root privileges are required to run without bubblewrap.")
 
-    @property
-    def artifactcache(self):
-        return self._artifact_cache
-
     def create_sandbox(self, *args, **kwargs):
+        from ..sandbox._sandboxchroot import SandboxChroot
         return SandboxChroot(*args, **kwargs)
+
+    def check_sandbox_config(self, config):
+        # With the chroot sandbox, the UID/GID in the sandbox
+        # will match the host UID/GID (typically 0/0).
+        return config.build_uid == self._uid and config.build_gid == self._gid
diff --git a/buildstream/_project.py b/buildstream/_project.py
index c489e9025215dee079201d7c3782ac1a4f6cfb80..83aa1f47e3e5a34aa74283bb6ad0d7c845e2952e 100644
--- a/buildstream/_project.py
+++ b/buildstream/_project.py
@@ -19,7 +19,8 @@
 #        Tiago Gomes <tiago.gomes@codethink.co.uk>
 
 import os
-from collections import Mapping, OrderedDict
+from collections import OrderedDict
+from collections.abc import Mapping
 from pluginbase import PluginBase
 from . import utils
 from . import _cachekey
@@ -38,6 +39,7 @@ from ._loader import Loader
 from .element import Element
 from ._message import Message, MessageType
 from ._includes import Includes
+from ._platform import Platform
 
 
 # Project Configuration file
@@ -128,6 +130,7 @@ class Project():
         self._shell_host_files = []   # A list of HostMount objects
 
         self.artifact_cache_specs = None
+        self.remote_execution_url = None
         self._sandbox = None
         self._splits = None
 
@@ -384,7 +387,10 @@ class Project():
             self._project_conf = _yaml.load(projectfile)
         except LoadError as e:
             # Raise a more specific error here
-            raise LoadError(LoadErrorReason.MISSING_PROJECT_CONF, str(e))
+            if e.reason == LoadErrorReason.MISSING_FILE:
+                raise LoadError(LoadErrorReason.MISSING_PROJECT_CONF, str(e)) from e
+            else:
+                raise
 
         pre_config_node = _yaml.node_copy(self._default_config_node)
         _yaml.composite(pre_config_node, self._project_conf)
@@ -398,6 +404,17 @@ class Project():
                 "Project requested format version {}, but BuildStream {}.{} only supports up until format version {}"
                 .format(format_version, major, minor, BST_FORMAT_VERSION))
 
+        # FIXME:
+        #
+        #   Performing this check manually in the absense
+        #   of proper support from _yaml.node_get(), this should
+        #   be removed in favor of a proper accessor function
+        #   from the _yaml module when #591 is fixed.
+        #
+        if self._project_conf.get('name') is None:
+            raise LoadError(LoadErrorReason.INVALID_DATA,
+                            "{}: project.conf does not contain expected key '{}'".format(projectfile, 'name'))
+
         # The project name, element path and option declarations
         # are constant and cannot be overridden by option conditional statements
         self.name = _yaml.node_get(pre_config_node, str, 'name')
@@ -460,7 +477,7 @@ class Project():
             'aliases', 'name',
             'artifacts', 'options',
             'fail-on-overlap', 'shell', 'fatal-warnings',
-            'ref-storage', 'sandbox', 'mirrors'
+            'ref-storage', 'sandbox', 'mirrors', 'remote-execution'
         ])
 
         #
@@ -471,6 +488,11 @@ class Project():
         # Load artifacts pull/push configuration for this project
         self.artifact_cache_specs = ArtifactCache.specs_from_config_node(config, self.directory)
 
+        # Load remote-execution configuration for this project
+        remote_execution = _yaml.node_get(config, Mapping, 'remote-execution')
+        _yaml.node_validate(remote_execution, ['url'])
+        self.remote_execution_url = _yaml.node_get(remote_execution, str, 'url')
+
         # Load sandbox environment variables
         self.base_environment = _yaml.node_get(config, Mapping, 'environment')
         self.base_env_nocache = _yaml.node_get(config, list, 'environment-nocache')
@@ -581,7 +603,10 @@ class Project():
         # any conditionals specified for project option declarations,
         # or conditionally specifying the project name; will be ignored.
         #
+        # Don't forget to also resolve options in the element and source overrides.
         output.options.process_node(config)
+        output.options.process_node(output.element_overrides)
+        output.options.process_node(output.source_overrides)
 
         # Load base variables
         output.base_variables = _yaml.node_get(config, Mapping, 'variables')
@@ -594,7 +619,8 @@ class Project():
         # Based on some testing (mainly on AWS), maximum effective
         # max-jobs value seems to be around 8-10 if we have enough cores
         # users should set values based on workload and build infrastructure
-        output.base_variables['max-jobs'] = str(min(len(os.sched_getaffinity(0)), 8))
+        platform = Platform.get_platform()
+        output.base_variables['max-jobs'] = str(platform.get_cpu_count(8))
 
         # Export options into variables, if that was requested
         output.options.export_variables(output.base_variables)
diff --git a/buildstream/_protos/google/rpc/code.proto b/buildstream/_protos/google/rpc/code.proto
new file mode 100644
index 0000000000000000000000000000000000000000..74e2c5c9ada78c5d91ce28439eec50ed24417391
--- /dev/null
+++ b/buildstream/_protos/google/rpc/code.proto
@@ -0,0 +1,186 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+option go_package = "google.golang.org/genproto/googleapis/rpc/code;code";
+option java_multiple_files = true;
+option java_outer_classname = "CodeProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The canonical error codes for Google APIs.
+//
+//
+// Sometimes multiple error codes may apply.  Services should return
+// the most specific error code that applies.  For example, prefer
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
+enum Code {
+  // Not an error; returned on success
+  //
+  // HTTP Mapping: 200 OK
+  OK = 0;
+
+  // The operation was cancelled, typically by the caller.
+  //
+  // HTTP Mapping: 499 Client Closed Request
+  CANCELLED = 1;
+
+  // Unknown error.  For example, this error may be returned when
+  // a `Status` value received from another address space belongs to
+  // an error space that is not known in this address space.  Also
+  // errors raised by APIs that do not return enough error information
+  // may be converted to this error.
+  //
+  // HTTP Mapping: 500 Internal Server Error
+  UNKNOWN = 2;
+
+  // The client specified an invalid argument.  Note that this differs
+  // from `FAILED_PRECONDITION`.  `INVALID_ARGUMENT` indicates arguments
+  // that are problematic regardless of the state of the system
+  // (e.g., a malformed file name).
+  //
+  // HTTP Mapping: 400 Bad Request
+  INVALID_ARGUMENT = 3;
+
+  // The deadline expired before the operation could complete. For operations
+  // that change the state of the system, this error may be returned
+  // even if the operation has completed successfully.  For example, a
+  // successful response from a server could have been delayed long
+  // enough for the deadline to expire.
+  //
+  // HTTP Mapping: 504 Gateway Timeout
+  DEADLINE_EXCEEDED = 4;
+
+  // Some requested entity (e.g., file or directory) was not found.
+  //
+  // Note to server developers: if a request is denied for an entire class
+  // of users, such as gradual feature rollout or undocumented whitelist,
+  // `NOT_FOUND` may be used. If a request is denied for some users within
+  // a class of users, such as user-based access control, `PERMISSION_DENIED`
+  // must be used.
+  //
+  // HTTP Mapping: 404 Not Found
+  NOT_FOUND = 5;
+
+  // The entity that a client attempted to create (e.g., file or directory)
+  // already exists.
+  //
+  // HTTP Mapping: 409 Conflict
+  ALREADY_EXISTS = 6;
+
+  // The caller does not have permission to execute the specified
+  // operation. `PERMISSION_DENIED` must not be used for rejections
+  // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
+  // instead for those errors). `PERMISSION_DENIED` must not be
+  // used if the caller can not be identified (use `UNAUTHENTICATED`
+  // instead for those errors). This error code does not imply the
+  // request is valid or the requested entity exists or satisfies
+  // other pre-conditions.
+  //
+  // HTTP Mapping: 403 Forbidden
+  PERMISSION_DENIED = 7;
+
+  // The request does not have valid authentication credentials for the
+  // operation.
+  //
+  // HTTP Mapping: 401 Unauthorized
+  UNAUTHENTICATED = 16;
+
+  // Some resource has been exhausted, perhaps a per-user quota, or
+  // perhaps the entire file system is out of space.
+  //
+  // HTTP Mapping: 429 Too Many Requests
+  RESOURCE_EXHAUSTED = 8;
+
+  // The operation was rejected because the system is not in a state
+  // required for the operation's execution.  For example, the directory
+  // to be deleted is non-empty, an rmdir operation is applied to
+  // a non-directory, etc.
+  //
+  // Service implementors can use the following guidelines to decide
+  // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
+  //  (a) Use `UNAVAILABLE` if the client can retry just the failing call.
+  //  (b) Use `ABORTED` if the client should retry at a higher level
+  //      (e.g., when a client-specified test-and-set fails, indicating the
+  //      client should restart a read-modify-write sequence).
+  //  (c) Use `FAILED_PRECONDITION` if the client should not retry until
+  //      the system state has been explicitly fixed.  E.g., if an "rmdir"
+  //      fails because the directory is non-empty, `FAILED_PRECONDITION`
+  //      should be returned since the client should not retry unless
+  //      the files are deleted from the directory.
+  //
+  // HTTP Mapping: 400 Bad Request
+  FAILED_PRECONDITION = 9;
+
+  // The operation was aborted, typically due to a concurrency issue such as
+  // a sequencer check failure or transaction abort.
+  //
+  // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+  // `ABORTED`, and `UNAVAILABLE`.
+  //
+  // HTTP Mapping: 409 Conflict
+  ABORTED = 10;
+
+  // The operation was attempted past the valid range.  E.g., seeking or
+  // reading past end-of-file.
+  //
+  // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
+  // be fixed if the system state changes. For example, a 32-bit file
+  // system will generate `INVALID_ARGUMENT` if asked to read at an
+  // offset that is not in the range [0,2^32-1], but it will generate
+  // `OUT_OF_RANGE` if asked to read from an offset past the current
+  // file size.
+  //
+  // There is a fair bit of overlap between `FAILED_PRECONDITION` and
+  // `OUT_OF_RANGE`.  We recommend using `OUT_OF_RANGE` (the more specific
+  // error) when it applies so that callers who are iterating through
+  // a space can easily look for an `OUT_OF_RANGE` error to detect when
+  // they are done.
+  //
+  // HTTP Mapping: 400 Bad Request
+  OUT_OF_RANGE = 11;
+
+  // The operation is not implemented or is not supported/enabled in this
+  // service.
+  //
+  // HTTP Mapping: 501 Not Implemented
+  UNIMPLEMENTED = 12;
+
+  // Internal errors.  This means that some invariants expected by the
+  // underlying system have been broken.  This error code is reserved
+  // for serious errors.
+  //
+  // HTTP Mapping: 500 Internal Server Error
+  INTERNAL = 13;
+
+  // The service is currently unavailable.  This is most likely a
+  // transient condition, which can be corrected by retrying with
+  // a backoff.
+  //
+  // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+  // `ABORTED`, and `UNAVAILABLE`.
+  //
+  // HTTP Mapping: 503 Service Unavailable
+  UNAVAILABLE = 14;
+
+  // Unrecoverable data loss or corruption.
+  //
+  // HTTP Mapping: 500 Internal Server Error
+  DATA_LOSS = 15;
+}
\ No newline at end of file
diff --git a/buildstream/_protos/google/rpc/code_pb2.py b/buildstream/_protos/google/rpc/code_pb2.py
new file mode 100644
index 0000000000000000000000000000000000000000..e06dea194e1900c2fa91a09401395cf418b6b0ed
--- /dev/null
+++ b/buildstream/_protos/google/rpc/code_pb2.py
@@ -0,0 +1,133 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: google/rpc/code.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf.internal import enum_type_wrapper
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='google/rpc/code.proto',
+  package='google.rpc',
+  syntax='proto3',
+  serialized_options=_b('\n\016com.google.rpcB\tCodeProtoP\001Z3google.golang.org/genproto/googleapis/rpc/code;code\242\002\003RPC'),
+  serialized_pb=_b('\n\x15google/rpc/code.proto\x12\ngoogle.rpc*\xb7\x02\n\x04\x43ode\x12\x06\n\x02OK\x10\x00\x12\r\n\tCANCELLED\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x14\n\x10INVALID_ARGUMENT\x10\x03\x12\x15\n\x11\x44\x45\x41\x44LINE_EXCEEDED\x10\x04\x12\r\n\tNOT_FOUND\x10\x05\x12\x12\n\x0e\x41LREADY_EXISTS\x10\x06\x12\x15\n\x11PERMISSION_DENIED\x10\x07\x12\x13\n\x0fUNAUTHENTICATED\x10\x10\x12\x16\n\x12RESOURCE_EXHAUSTED\x10\x08\x12\x17\n\x13\x46\x41ILED_PRECONDITION\x10\t\x12\x0b\n\x07\x41\x42ORTED\x10\n\x12\x10\n\x0cOUT_OF_RANGE\x10\x0b\x12\x11\n\rUNIMPLEMENTED\x10\x0c\x12\x0c\n\x08INTERNAL\x10\r\x12\x0f\n\x0bUNAVAILABLE\x10\x0e\x12\r\n\tDATA_LOSS\x10\x0f\x42X\n\x0e\x63om.google.rpcB\tCodeProtoP\x01Z3google.golang.org/genproto/googleapis/rpc/code;code\xa2\x02\x03RPCb\x06proto3')
+)
+
+_CODE = _descriptor.EnumDescriptor(
+  name='Code',
+  full_name='google.rpc.Code',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='OK', index=0, number=0,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='CANCELLED', index=1, number=1,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='UNKNOWN', index=2, number=2,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='INVALID_ARGUMENT', index=3, number=3,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DEADLINE_EXCEEDED', index=4, number=4,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='NOT_FOUND', index=5, number=5,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='ALREADY_EXISTS', index=6, number=6,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='PERMISSION_DENIED', index=7, number=7,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='UNAUTHENTICATED', index=8, number=16,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='RESOURCE_EXHAUSTED', index=9, number=8,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='FAILED_PRECONDITION', index=10, number=9,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='ABORTED', index=11, number=10,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='OUT_OF_RANGE', index=12, number=11,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='UNIMPLEMENTED', index=13, number=12,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='INTERNAL', index=14, number=13,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='UNAVAILABLE', index=15, number=14,
+      serialized_options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='DATA_LOSS', index=16, number=15,
+      serialized_options=None,
+      type=None),
+  ],
+  containing_type=None,
+  serialized_options=None,
+  serialized_start=38,
+  serialized_end=349,
+)
+_sym_db.RegisterEnumDescriptor(_CODE)
+
+Code = enum_type_wrapper.EnumTypeWrapper(_CODE)
+OK = 0
+CANCELLED = 1
+UNKNOWN = 2
+INVALID_ARGUMENT = 3
+DEADLINE_EXCEEDED = 4
+NOT_FOUND = 5
+ALREADY_EXISTS = 6
+PERMISSION_DENIED = 7
+UNAUTHENTICATED = 16
+RESOURCE_EXHAUSTED = 8
+FAILED_PRECONDITION = 9
+ABORTED = 10
+OUT_OF_RANGE = 11
+UNIMPLEMENTED = 12
+INTERNAL = 13
+UNAVAILABLE = 14
+DATA_LOSS = 15
+
+
+DESCRIPTOR.enum_types_by_name['Code'] = _CODE
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+
+DESCRIPTOR._options = None
+# @@protoc_insertion_point(module_scope)
diff --git a/buildstream/_protos/google/rpc/code_pb2_grpc.py b/buildstream/_protos/google/rpc/code_pb2_grpc.py
new file mode 100644
index 0000000000000000000000000000000000000000..a89435267cb22829c48fadd60cf5623a31dc7aa3
--- /dev/null
+++ b/buildstream/_protos/google/rpc/code_pb2_grpc.py
@@ -0,0 +1,3 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
diff --git a/buildstream/_scheduler/jobs/__init__.py b/buildstream/_scheduler/jobs/__init__.py
index 185d8258ab97e90c45872b77706930232591f18e..4b0b11daceacdcae3bd791fb845919ef9eaeb2c6 100644
--- a/buildstream/_scheduler/jobs/__init__.py
+++ b/buildstream/_scheduler/jobs/__init__.py
@@ -1,3 +1,22 @@
+#
+#  Copyright (C) 2018 Codethink Limited
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Tristan Maat <tristan.maat@codethink.co.uk>
+
 from .elementjob import ElementJob
 from .cachesizejob import CacheSizeJob
 from .cleanupjob import CleanupJob
diff --git a/buildstream/_scheduler/jobs/cachesizejob.py b/buildstream/_scheduler/jobs/cachesizejob.py
index f73a09c742512be5a96f7dcd51a2295516bae2fe..d46fd4c16e691a90836f08349a6ebff698f2c91e 100644
--- a/buildstream/_scheduler/jobs/cachesizejob.py
+++ b/buildstream/_scheduler/jobs/cachesizejob.py
@@ -17,22 +17,25 @@
 #        Tristan Daniël Maat <tristan.maat@codethink.co.uk>
 #
 from .job import Job
-from ..._platform import Platform
 
 
 class CacheSizeJob(Job):
     def __init__(self, *args, complete_cb, **kwargs):
         super().__init__(*args, **kwargs)
         self._complete_cb = complete_cb
-        self._cache = Platform._instance.artifactcache
+
+        context = self._scheduler.context
+        self._artifacts = context.artifactcache
 
     def child_process(self):
-        return self._cache.calculate_cache_size()
+        return self._artifacts.compute_cache_size()
 
     def parent_complete(self, success, result):
-        self._cache._set_cache_size(result)
-        if self._complete_cb:
-            self._complete_cb(result)
+        if success:
+            self._artifacts.set_cache_size(result)
+
+            if self._complete_cb:
+                self._complete_cb(result)
 
     def child_process_data(self):
         return {}
diff --git a/buildstream/_scheduler/jobs/cleanupjob.py b/buildstream/_scheduler/jobs/cleanupjob.py
index bb78e8751f0ed935fb5a3c3003efa69d587c3f1c..8bdbba0edf97ee5badeed55c26a468f59567669b 100644
--- a/buildstream/_scheduler/jobs/cleanupjob.py
+++ b/buildstream/_scheduler/jobs/cleanupjob.py
@@ -17,22 +17,18 @@
 #        Tristan Daniël Maat <tristan.maat@codethink.co.uk>
 #
 from .job import Job
-from ..._platform import Platform
 
 
 class CleanupJob(Job):
-    def __init__(self, *args, complete_cb, **kwargs):
+    def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
-        self._complete_cb = complete_cb
-        self._cache = Platform._instance.artifactcache
+
+        context = self._scheduler.context
+        self._artifacts = context.artifactcache
 
     def child_process(self):
-        return self._cache.clean()
+        return self._artifacts.clean()
 
     def parent_complete(self, success, result):
-        self._cache._set_cache_size(result)
-        if self._complete_cb:
-            self._complete_cb()
-
-    def child_process_data(self):
-        return {}
+        if success:
+            self._artifacts.set_cache_size(result)
diff --git a/buildstream/_scheduler/jobs/elementjob.py b/buildstream/_scheduler/jobs/elementjob.py
index fcad20ce4b0c373d26272b87c3909f2a92e23b0e..8ce5c062fb1d48f4fcf1d2b8932e752c5ff9acca 100644
--- a/buildstream/_scheduler/jobs/elementjob.py
+++ b/buildstream/_scheduler/jobs/elementjob.py
@@ -109,13 +109,7 @@ class ElementJob(Job):
         data = {}
 
         workspace = self._element._get_workspace()
-        artifact_size = self._element._get_artifact_size()
-        cache_size = self._element._get_artifact_cache().calculate_cache_size()
-
         if workspace is not None:
             data['workspace'] = workspace.to_dict()
-        if artifact_size is not None:
-            data['artifact_size'] = artifact_size
-        data['cache_size'] = cache_size
 
         return data
diff --git a/buildstream/_scheduler/jobs/job.py b/buildstream/_scheduler/jobs/job.py
index 922ce56131e75b9c95b984f8eea01899b1af39bc..a1b90a080e8a1132f554f398124bac400eb8b1bd 100644
--- a/buildstream/_scheduler/jobs/job.py
+++ b/buildstream/_scheduler/jobs/job.py
@@ -31,7 +31,7 @@ import multiprocessing
 import psutil
 
 # BuildStream toplevel imports
-from ..._exceptions import ImplError, BstError, set_last_task_error
+from ..._exceptions import ImplError, BstError, set_last_task_error, SkipJob
 from ..._message import Message, MessageType, unconditional_messages
 from ... import _signals, utils
 
@@ -40,6 +40,7 @@ from ... import _signals, utils
 RC_OK = 0
 RC_FAIL = 1
 RC_PERM_FAIL = 2
+RC_SKIPPED = 3
 
 
 # Used to distinguish between status messages and return values
@@ -109,7 +110,7 @@ class Job():
         # Private members
         #
         self._scheduler = scheduler            # The scheduler
-        self._queue = multiprocessing.Queue()  # A message passing queue
+        self._queue = None                     # A message passing queue
         self._process = None                   # The Process object
         self._watcher = None                   # Child process watcher
         self._listening = False                # Whether the parent is currently listening
@@ -117,6 +118,8 @@ class Job():
         self._max_retries = max_retries        # Maximum number of automatic retries
         self._result = None                    # Return value of child action in the parent
         self._tries = 0                        # Try count, for retryable jobs
+        self._skipped_flag = False             # Indicate whether the job was skipped.
+        self._terminated = False               # Whether this job has been explicitly terminated
 
         # If False, a retry will not be attempted regardless of whether _tries is less than _max_retries.
         #
@@ -130,6 +133,8 @@ class Job():
     #
     def spawn(self):
 
+        self._queue = multiprocessing.Queue()
+
         self._tries += 1
         self._parent_start_listening()
 
@@ -187,6 +192,8 @@ class Job():
         # Terminate the process using multiprocessing API pathway
         self._process.terminate()
 
+        self._terminated = True
+
     # terminate_wait()
     #
     # Wait for terminated jobs to complete
@@ -270,11 +277,23 @@ class Job():
     # running the integration commands).
     #
     # Args:
-    #     (int): The plugin identifier for this task
+    #     task_id (int): The plugin identifier for this task
     #
     def set_task_id(self, task_id):
         self._task_id = task_id
 
+    # skipped
+    #
+    # This will evaluate to True if the job was skipped
+    # during processing, or if it was forcefully terminated.
+    #
+    # Returns:
+    #    (bool): Whether the job should appear as skipped
+    #
+    @property
+    def skipped(self):
+        return self._skipped_flag or self._terminated
+
     #######################################################
     #                  Abstract Methods                   #
     #######################################################
@@ -396,6 +415,13 @@ class Job():
             try:
                 # Try the task action
                 result = self.child_process()
+            except SkipJob as e:
+                elapsed = datetime.datetime.now() - starttime
+                self.message(MessageType.SKIPPED, str(e),
+                             elapsed=elapsed, logfile=filename)
+
+                # Alert parent of skip by return code
+                self._child_shutdown(RC_SKIPPED)
             except BstError as e:
                 elapsed = datetime.datetime.now() - starttime
                 self._retry_flag = e.temporary
@@ -403,7 +429,7 @@ class Job():
                 if self._retry_flag and (self._tries <= self._max_retries):
                     self.message(MessageType.FAIL,
                                  "Try #{} failed, retrying".format(self._tries),
-                                 elapsed=elapsed)
+                                 elapsed=elapsed, logfile=filename)
                 else:
                     self.message(MessageType.FAIL, str(e),
                                  elapsed=elapsed, detail=e.detail,
@@ -430,7 +456,8 @@ class Job():
                 self.message(MessageType.BUG, self.action_name,
                              elapsed=elapsed, detail=detail,
                              logfile=filename)
-                self._child_shutdown(RC_FAIL)
+                # Unhandled exceptions should permenantly fail
+                self._child_shutdown(RC_PERM_FAIL)
 
             else:
                 # No exception occurred in the action
@@ -509,11 +536,6 @@ class Job():
         message.action_name = self.action_name
         message.task_id = self._task_id
 
-        if (message.message_type == MessageType.FAIL and
-                self._tries <= self._max_retries and self._retry_flag):
-            # Job will be retried, display failures as warnings in the frontend
-            message.message_type = MessageType.WARN
-
         # Send to frontend if appropriate
         if context.silent_messages() and (message.message_type not in unconditional_messages):
             return
@@ -547,14 +569,21 @@ class Job():
         # We don't want to retry if we got OK or a permanent fail.
         # This is set in _child_action but must also be set for the parent.
         #
-        self._retry_flag = returncode not in (RC_OK, RC_PERM_FAIL)
+        self._retry_flag = returncode == RC_FAIL
+
+        # Set the flag to alert Queue that this job skipped.
+        self._skipped_flag = returncode == RC_SKIPPED
 
         if self._retry_flag and (self._tries <= self._max_retries) and not self._scheduler.terminated:
             self.spawn()
             return
 
-        self.parent_complete(returncode == RC_OK, self._result)
-        self._scheduler.job_completed(self, returncode == RC_OK)
+        success = returncode in (RC_OK, RC_SKIPPED)
+        self.parent_complete(success, self._result)
+        self._scheduler.job_completed(self, success)
+
+        # Force the deletion of the queue and process objects to try and clean up FDs
+        self._queue = self._process = None
 
     # _parent_process_envelope()
     #
diff --git a/buildstream/_scheduler/queues/buildqueue.py b/buildstream/_scheduler/queues/buildqueue.py
index 2009fce977a3656ef4bd1e525e48c9373d767a0e..984a5457ab72f9dc59c8d3b3738eb356938c221f 100644
--- a/buildstream/_scheduler/queues/buildqueue.py
+++ b/buildstream/_scheduler/queues/buildqueue.py
@@ -32,7 +32,7 @@ class BuildQueue(Queue):
 
     action_name = "Build"
     complete_name = "Built"
-    resources = [ResourceType.PROCESS]
+    resources = [ResourceType.PROCESS, ResourceType.CACHE]
 
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
@@ -46,6 +46,7 @@ class BuildQueue(Queue):
                 to_queue.append(element)
                 continue
 
+            # XXX: Fix this, See https://mail.gnome.org/archives/buildstream-list/2018-September/msg00029.html
             # Bypass queue processing entirely the first time it's tried.
             self._tried.add(element)
             _, description, detail = element._get_build_result()
@@ -67,8 +68,7 @@ class BuildQueue(Queue):
         return super().enqueue(to_queue)
 
     def process(self, element):
-        element._assemble()
-        return element._get_unique_id()
+        return element._assemble()
 
     def status(self, element):
         # state of dependencies may have changed, recalculate element state
@@ -87,18 +87,22 @@ class BuildQueue(Queue):
 
         return QueueStatus.READY
 
-    def _check_cache_size(self, job, element):
-        if not job.child_data:
-            return
+    def _check_cache_size(self, job, element, artifact_size):
 
-        artifact_size = job.child_data.get('artifact_size', False)
+        # After completing a build job, add the artifact size
+        # as returned from Element._assemble() to the estimated
+        # artifact cache size
+        #
+        context = self._scheduler.context
+        artifacts = context.artifactcache
 
-        if artifact_size:
-            cache = element._get_artifact_cache()
-            cache._add_artifact_size(artifact_size)
+        artifacts.add_artifact_size(artifact_size)
 
-            if cache.get_approximate_cache_size() > cache.cache_quota:
-                self._scheduler._check_cache_size_real()
+        # If the estimated size outgrows the quota, ask the scheduler
+        # to queue a job to actually check the real cache size.
+        #
+        if artifacts.has_quota_exceeded():
+            self._scheduler.check_cache_size()
 
     def done(self, job, element, result, success):
 
@@ -106,8 +110,6 @@ class BuildQueue(Queue):
             # Inform element in main process that assembly is done
             element._assemble_done()
 
-        # This has to be done after _assemble_done, such that the
-        # element may register its cache key as required
-        self._check_cache_size(job, element)
-
-        return True
+            # This has to be done after _assemble_done, such that the
+            # element may register its cache key as required
+            self._check_cache_size(job, element, result)
diff --git a/buildstream/_scheduler/queues/fetchqueue.py b/buildstream/_scheduler/queues/fetchqueue.py
index bd90a13b6c0eb6d375f49c3bc00b9732c4007ce4..446dbbd3b21984161157d53679517031799d227f 100644
--- a/buildstream/_scheduler/queues/fetchqueue.py
+++ b/buildstream/_scheduler/queues/fetchqueue.py
@@ -72,11 +72,9 @@ class FetchQueue(Queue):
     def done(self, _, element, result, success):
 
         if not success:
-            return False
+            return
 
         element._update_state()
 
         # Successful fetch, we must be CACHED now
         assert element._get_consistency() == Consistency.CACHED
-
-        return True
diff --git a/buildstream/_scheduler/queues/pullqueue.py b/buildstream/_scheduler/queues/pullqueue.py
index 5d732fcf82bb280f8e9a769514762f4c1ae47a71..2842c5e21fc46a814404aa57a66b427efa01196b 100644
--- a/buildstream/_scheduler/queues/pullqueue.py
+++ b/buildstream/_scheduler/queues/pullqueue.py
@@ -21,6 +21,7 @@
 # Local imports
 from . import Queue, QueueStatus
 from ..resources import ResourceType
+from ..._exceptions import SkipJob
 
 
 # A queue which pulls element artifacts
@@ -29,11 +30,12 @@ class PullQueue(Queue):
 
     action_name = "Pull"
     complete_name = "Pulled"
-    resources = [ResourceType.DOWNLOAD]
+    resources = [ResourceType.DOWNLOAD, ResourceType.CACHE]
 
     def process(self, element):
         # returns whether an artifact was downloaded or not
-        return element._pull()
+        if not element._pull():
+            raise SkipJob(self.action_name)
 
     def status(self, element):
         # state of dependencies may have changed, recalculate element state
@@ -62,8 +64,4 @@ class PullQueue(Queue):
         # Build jobs will check the "approximate" size first. Since we
         # do not get an artifact size from pull jobs, we have to
         # actually check the cache size.
-        self._scheduler._check_cache_size_real()
-
-        # Element._pull() returns True if it downloaded an artifact,
-        # here we want to appear skipped if we did not download.
-        return result
+        self._scheduler.check_cache_size()
diff --git a/buildstream/_scheduler/queues/pushqueue.py b/buildstream/_scheduler/queues/pushqueue.py
index 568e053d667ea731363a53684debfcf09e38518a..35532d23da9bf5151eaaea16c14e0118f3cd7be0 100644
--- a/buildstream/_scheduler/queues/pushqueue.py
+++ b/buildstream/_scheduler/queues/pushqueue.py
@@ -21,6 +21,7 @@
 # Local imports
 from . import Queue, QueueStatus
 from ..resources import ResourceType
+from ..._exceptions import SkipJob
 
 
 # A queue which pushes element artifacts
@@ -33,20 +34,11 @@ class PushQueue(Queue):
 
     def process(self, element):
         # returns whether an artifact was uploaded or not
-        return element._push()
+        if not element._push():
+            raise SkipJob(self.action_name)
 
     def status(self, element):
         if element._skip_push():
             return QueueStatus.SKIP
 
         return QueueStatus.READY
-
-    def done(self, _, element, result, success):
-
-        if not success:
-            return False
-
-        # Element._push() returns True if it uploaded an artifact,
-        # here we want to appear skipped if the remote already had
-        # the artifact.
-        return result
diff --git a/buildstream/_scheduler/queues/queue.py b/buildstream/_scheduler/queues/queue.py
index 28da17711d7769cddbc99663df891fb6e63da31d..909cebb44399e24433ebe9f6635f1bfd71a73eb7 100644
--- a/buildstream/_scheduler/queues/queue.py
+++ b/buildstream/_scheduler/queues/queue.py
@@ -136,10 +136,6 @@ class Queue():
     #    success (bool): True if the process() implementation did not
     #                    raise any exception
     #
-    # Returns:
-    #    (bool): True if the element should appear to be processsed,
-    #            Otherwise False will count the element as "skipped"
-    #
     def done(self, job, element, result, success):
         pass
 
@@ -212,7 +208,7 @@ class Queue():
     # This will have different results for elements depending
     # on the Queue.status() implementation.
     #
-    #   o Elements which are QueueStatus.WAIT will not be effected
+    #   o Elements which are QueueStatus.WAIT will not be affected
     #
     #   o Elements which are QueueStatus.SKIP will move directly
     #     to the dequeue pool
@@ -301,15 +297,12 @@ class Queue():
         # Update values that need to be synchronized in the main task
         # before calling any queue implementation
         self._update_workspaces(element, job)
-        if job.child_data:
-            element._get_artifact_cache().cache_size = job.child_data.get('cache_size')
 
         # Give the result of the job to the Queue implementor,
         # and determine if it should be considered as processed
         # or skipped.
         try:
-            processed = self.done(job, element, result, success)
-
+            self.done(job, element, result, success)
         except BstError as e:
 
             # Report error and mark as failed
@@ -333,16 +326,20 @@ class Queue():
                           detail=traceback.format_exc())
             self.failed_elements.append(element)
         else:
-
-            # No exception occured, handle the success/failure state in the normal way
             #
+            # No exception occured in post processing
+            #
+
+            # All jobs get placed on the done queue for later processing.
             self._done_queue.append(job)
 
-            if success:
-                if processed:
-                    self.processed_elements.append(element)
-                else:
-                    self.skipped_elements.append(element)
+            # A Job can be skipped whether or not it has failed,
+            # we want to only bookkeep them as processed or failed
+            # if they are not skipped.
+            if job.skipped:
+                self.skipped_elements.append(element)
+            elif success:
+                self.processed_elements.append(element)
             else:
                 self.failed_elements.append(element)
 
diff --git a/buildstream/_scheduler/queues/trackqueue.py b/buildstream/_scheduler/queues/trackqueue.py
index c7a8f4cc7939022a797418f9693a2e58d4bc6343..133655e143ced62b98012e8d1bd91a139c7600f8 100644
--- a/buildstream/_scheduler/queues/trackqueue.py
+++ b/buildstream/_scheduler/queues/trackqueue.py
@@ -51,27 +51,11 @@ class TrackQueue(Queue):
     def done(self, _, element, result, success):
 
         if not success:
-            return False
-
-        changed = False
+            return
 
         # Set the new refs in the main process one by one as they complete
         for unique_id, new_ref in result:
             source = _plugin_lookup(unique_id)
-            try:
-                # We appear processed if at least one source has changed
-                if source._save_ref(new_ref):
-                    changed = True
-            except SourceError as e:
-                # FIXME: We currently dont have a clear path to
-                #        fail the scheduler from the main process, so
-                #        this will just warn and BuildStream will exit
-                #        with a success code.
-                #
-                source.warn("Failed to update project file",
-                            detail="{}".format(e))
+            source._save_ref(new_ref)
 
         element._tracking_done()
-
-        # We'll appear as a skipped element if tracking resulted in no change
-        return changed
diff --git a/buildstream/_scheduler/resources.py b/buildstream/_scheduler/resources.py
index bbf851b06c5c01a5a3b354758c5dfecba8156326..fcf10d7bd119753de6258ea1b8dd00fce181744f 100644
--- a/buildstream/_scheduler/resources.py
+++ b/buildstream/_scheduler/resources.py
@@ -8,7 +8,7 @@ class ResourceType():
 class Resources():
     def __init__(self, num_builders, num_fetchers, num_pushers):
         self._max_resources = {
-            ResourceType.CACHE: 1,
+            ResourceType.CACHE: 0,
             ResourceType.DOWNLOAD: num_fetchers,
             ResourceType.PROCESS: num_builders,
             ResourceType.UPLOAD: num_pushers
diff --git a/buildstream/_scheduler/scheduler.py b/buildstream/_scheduler/scheduler.py
index 38d38be4837e4d78bead398459fe1bd38b650871..b76c7308e1964198d5dc846c0d9a9a2ecf93ed80 100644
--- a/buildstream/_scheduler/scheduler.py
+++ b/buildstream/_scheduler/scheduler.py
@@ -29,7 +29,6 @@ from contextlib import contextmanager
 # Local imports
 from .resources import Resources, ResourceType
 from .jobs import CacheSizeJob, CleanupJob
-from .._platform import Platform
 
 
 # A decent return code for Scheduler.run()
@@ -241,6 +240,25 @@ class Scheduler():
         self._schedule_queue_jobs()
         self._sched()
 
+    # check_cache_size():
+    #
+    # Queues a cache size calculation job, after the cache
+    # size is calculated, a cleanup job will be run automatically
+    # if needed.
+    #
+    # FIXME: This should ensure that only one cache size job
+    #        is ever pending at a given time. If a cache size
+    #        job is already running, it is correct to queue
+    #        a new one, it is incorrect to have more than one
+    #        of these jobs pending at a given time, though.
+    #
+    def check_cache_size(self):
+        job = CacheSizeJob(self, 'cache_size', 'cache_size/cache_size',
+                           resources=[ResourceType.CACHE,
+                                      ResourceType.PROCESS],
+                           complete_cb=self._run_cleanup)
+        self.schedule_jobs([job])
+
     #######################################################
     #                  Local Private Methods              #
     #######################################################
@@ -316,24 +334,29 @@ class Scheduler():
         self.schedule_jobs(ready)
         self._sched()
 
+    # _run_cleanup()
+    #
+    # Schedules the cache cleanup job if the passed size
+    # exceeds the cache quota.
+    #
+    # Args:
+    #    cache_size (int): The calculated cache size (ignored)
+    #
+    # NOTE: This runs in response to completion of the cache size
+    #       calculation job lauched by Scheduler.check_cache_size(),
+    #       which will report the calculated cache size.
+    #
     def _run_cleanup(self, cache_size):
-        platform = Platform.get_platform()
-        if cache_size and cache_size < platform.artifactcache.cache_quota:
+        context = self.context
+        artifacts = context.artifactcache
+
+        if not artifacts.has_quota_exceeded():
             return
 
-        job = CleanupJob(self, 'cleanup', 'cleanup',
+        job = CleanupJob(self, 'cleanup', 'cleanup/cleanup',
                          resources=[ResourceType.CACHE,
                                     ResourceType.PROCESS],
-                         exclusive_resources=[ResourceType.CACHE],
-                         complete_cb=None)
-        self.schedule_jobs([job])
-
-    def _check_cache_size_real(self):
-        job = CacheSizeJob(self, 'cache_size', 'cache_size',
-                           resources=[ResourceType.CACHE,
-                                      ResourceType.PROCESS],
-                           exclusive_resources=[ResourceType.CACHE],
-                           complete_cb=self._run_cleanup)
+                         exclusive_resources=[ResourceType.CACHE])
         self.schedule_jobs([job])
 
     # _suspend_jobs()
@@ -364,6 +387,15 @@ class Scheduler():
     # A loop registered event callback for keyboard interrupts
     #
     def _interrupt_event(self):
+
+        # FIXME: This should not be needed, but for some reason we receive an
+        #        additional SIGINT event when the user hits ^C a second time
+        #        to inform us that they really intend to terminate; even though
+        #        we have disconnected our handlers at this time.
+        #
+        if self.terminated:
+            return
+
         # Leave this to the frontend to decide, if no
         # interrrupt callback was specified, then just terminate.
         if self._interrupt_callback:
diff --git a/buildstream/_site.py b/buildstream/_site.py
index ff169180fb9985be5462545b0daa570b6c1924ee..30e1000d48d7c13b802e651ce859b8e466968dfc 100644
--- a/buildstream/_site.py
+++ b/buildstream/_site.py
@@ -78,18 +78,12 @@ def check_bwrap_version(major, minor, patch):
         if not bwrap_path:
             return False
         cmd = [bwrap_path, "--version"]
-        version = str(subprocess.check_output(cmd).split()[1], "utf-8")
+        try:
+            version = str(subprocess.check_output(cmd).split()[1], "utf-8")
+        except subprocess.CalledProcessError:
+            # Failure trying to run bubblewrap
+            return False
         _bwrap_major, _bwrap_minor, _bwrap_patch = map(int, version.split("."))
 
     # Check whether the installed version meets the requirements
-    if _bwrap_major > major:
-        return True
-    elif _bwrap_major < major:
-        return False
-    else:
-        if _bwrap_minor > minor:
-            return True
-        elif _bwrap_minor < minor:
-            return False
-        else:
-            return _bwrap_patch >= patch
+    return (_bwrap_major, _bwrap_minor, _bwrap_patch) >= (major, minor, patch)
diff --git a/buildstream/_stream.py b/buildstream/_stream.py
index cceb3d3a5e9bb5e59b1afe901aed883424c5a5bb..e7a71978bb20e4ccc0453b1d2d54ebff9ac54418 100644
--- a/buildstream/_stream.py
+++ b/buildstream/_stream.py
@@ -32,7 +32,6 @@ from ._exceptions import StreamError, ImplError, BstError, set_last_task_error
 from ._message import Message, MessageType
 from ._scheduler import Scheduler, SchedStatus, TrackQueue, FetchQueue, BuildQueue, PullQueue, PushQueue
 from ._pipeline import Pipeline, PipelineSelection
-from ._platform import Platform
 from . import utils, _yaml, _site
 from . import Scope, Consistency
 
@@ -71,8 +70,7 @@ class Stream():
         #
         # Private members
         #
-        self._platform = Platform.get_platform()
-        self._artifacts = self._platform.artifactcache
+        self._artifacts = context.artifactcache
         self._context = context
         self._project = project
         self._pipeline = Pipeline(context, project, self._artifacts)
@@ -703,6 +701,7 @@ class Stream():
 
         # Create a temporary directory to build the source tree in
         builddir = self._context.builddir
+        os.makedirs(builddir, exist_ok=True)
         prefix = "{}-".format(target.normal_name)
 
         with TemporaryDirectory(prefix=prefix, dir=builddir) as tempdir:
@@ -938,13 +937,10 @@ class Stream():
         # Set the "required" artifacts that should not be removed
         # while this pipeline is active
         #
-        # FIXME: The set of required artifacts is only really needed
-        #        for build and pull tasks.
+        # It must include all the artifacts which are required by the
+        # final product. Note that this is a superset of the build plan.
         #
-        #        It must include all the artifacts which are required by the
-        #        final product. Note that this is a superset of the build plan.
-        #
-        self._artifacts.append_required_artifacts((e for e in self._pipeline.dependencies(elements, Scope.ALL)))
+        self._artifacts.mark_required_elements(self._pipeline.dependencies(elements, Scope.ALL))
 
         if selection == PipelineSelection.PLAN and dynamic_plan:
             # We use a dynamic build plan, only request artifacts of top-level targets,
@@ -1088,6 +1084,7 @@ class Stream():
         for element in elements:
             source_dir = os.path.join(directory, "source")
             element_source_dir = os.path.join(source_dir, element.normal_name)
+            os.makedirs(element_source_dir)
 
             element._stage_sources_at(element_source_dir)
 
diff --git a/buildstream/_variables.py b/buildstream/_variables.py
index 8299f1c1e50a0d20a9d6b097aaad5280132c7f5f..1a52b5680a95c9a7d30477a410f08a5705789aa9 100644
--- a/buildstream/_variables.py
+++ b/buildstream/_variables.py
@@ -61,7 +61,7 @@ class Variables():
     #    LoadError, if the string contains unresolved variable references.
     #
     def subst(self, string):
-        substitute, unmatched = self._subst(string, self.variables)
+        substitute, unmatched, _ = self._subst(string, self.variables)
         unmatched = list(set(unmatched))
         if unmatched:
             if len(unmatched) == 1:
@@ -82,6 +82,7 @@ class Variables():
         def subst_callback(match):
             nonlocal variables
             nonlocal unmatched
+            nonlocal matched
 
             token = match.group(0)
             varname = match.group(1)
@@ -91,6 +92,7 @@ class Variables():
                 # We have to check if the inner string has variables
                 # and return unmatches for those
                 unmatched += re.findall(_VARIABLE_MATCH, value)
+                matched += [varname]
             else:
                 # Return unmodified token
                 unmatched += [varname]
@@ -98,10 +100,11 @@ class Variables():
 
             return value
 
+        matched = []
         unmatched = []
         replacement = re.sub(_VARIABLE_MATCH, subst_callback, string)
 
-        return (replacement, unmatched)
+        return (replacement, unmatched, matched)
 
     # Variable resolving code
     #
@@ -131,7 +134,15 @@ class Variables():
                 # Ensure stringness of the value before substitution
                 value = _yaml.node_get(variables, str, key)
 
-                resolved_var, item_unmatched = self._subst(value, variables)
+                resolved_var, item_unmatched, matched = self._subst(value, variables)
+
+                if _wrap_variable(key) in resolved_var:
+                    referenced_through = find_recursive_variable(key, matched, variables)
+                    raise LoadError(LoadErrorReason.RECURSIVE_VARIABLE,
+                                    "{}: ".format(_yaml.node_get_provenance(variables, key)) +
+                                    ("Variable '{}' expands to contain a reference to itself. " +
+                                     "Perhaps '{}' contains '{}").format(key, referenced_through, _wrap_variable(key)))
+
                 resolved[key] = resolved_var
                 unmatched += item_unmatched
 
@@ -168,8 +179,21 @@ class Variables():
     # Helper function to fetch information about the node referring to a variable
     #
     def _find_references(self, varname):
-        fullname = '%{' + varname + '}'
+        fullname = _wrap_variable(varname)
         for key, value in _yaml.node_items(self.original):
             if fullname in value:
                 provenance = _yaml.node_get_provenance(self.original, key)
                 yield (key, provenance)
+
+
+def find_recursive_variable(variable, matched_variables, all_vars):
+    matched_values = (_yaml.node_get(all_vars, str, key) for key in matched_variables)
+    for key, value in zip(matched_variables, matched_values):
+        if _wrap_variable(variable) in value:
+            return key
+    else:
+        return None
+
+
+def _wrap_variable(var):
+    return "%{" + var + "}"
diff --git a/buildstream/_versions.py b/buildstream/_versions.py
index 713cb9d678f2dbf6625371e816759a681ba3f23b..8ad2f8cb7df65080b95287e8fe15de20b4553aa7 100644
--- a/buildstream/_versions.py
+++ b/buildstream/_versions.py
@@ -23,7 +23,7 @@
 # This version is bumped whenever enhancements are made
 # to the `project.conf` format or the core element format.
 #
-BST_FORMAT_VERSION = 16
+BST_FORMAT_VERSION = 17
 
 
 # The base BuildStream artifact version
@@ -33,4 +33,4 @@ BST_FORMAT_VERSION = 16
 # or if buildstream was changed in a way which can cause
 # the same cache key to produce something that is no longer
 # the same.
-BST_CORE_ARTIFACT_VERSION = 5
+BST_CORE_ARTIFACT_VERSION = 6
diff --git a/buildstream/_yaml.py b/buildstream/_yaml.py
index 66500fbade9c126e7b3be45aa7ddf5d679cae57e..30fc77291b265b3030bc295b1645eaf26b7e1fc5 100644
--- a/buildstream/_yaml.py
+++ b/buildstream/_yaml.py
@@ -183,20 +183,32 @@ class CompositeTypeError(CompositeError):
 #    shortname (str): The filename in shorthand for error reporting (or None)
 #    copy_tree (bool): Whether to make a copy, preserving the original toplevels
 #                      for later serialization
+#    yaml_cache (YamlCache): A yaml cache to consult rather than parsing
 #
 # Returns (dict): A loaded copy of the YAML file with provenance information
 #
 # Raises: LoadError
 #
-def load(filename, shortname=None, copy_tree=False, *, project=None):
+def load(filename, shortname=None, copy_tree=False, *, project=None, yaml_cache=None):
     if not shortname:
         shortname = filename
 
     file = ProvenanceFile(filename, shortname, project)
 
     try:
+        data = None
         with open(filename) as f:
-            return load_data(f, file, copy_tree=copy_tree)
+            contents = f.read()
+        if yaml_cache:
+            data, key = yaml_cache.get(project, filename, contents, copy_tree)
+
+        if not data:
+            data = load_data(contents, file, copy_tree=copy_tree)
+
+        if yaml_cache:
+            yaml_cache.put_from_key(project, filename, key, data)
+
+        return data
     except FileNotFoundError as e:
         raise LoadError(LoadErrorReason.MISSING_FILE,
                         "Could not find file at {}".format(filename)) from e
@@ -467,7 +479,7 @@ def node_get_project_path(node, key, project_dir, *,
                         "{}: Specified path '{}' does not exist"
                         .format(provenance, path_str))
 
-    is_inside = project_dir_path in full_resolved_path.parents or (
+    is_inside = project_dir_path.resolve() in full_resolved_path.parents or (
         full_resolved_path == project_dir_path)
 
     if path.is_absolute() or not is_inside:
@@ -960,7 +972,7 @@ def node_validate(node, valid_keys):
 #
 # The purpose of this is to create a virtual copy-on-write
 # copy of a dictionary, so that mutating it in any way does
-# not effect the underlying dictionaries.
+# not affect the underlying dictionaries.
 #
 # collections.ChainMap covers this already mostly, but fails
 # to record internal state so as to hide keys which have been
diff --git a/buildstream/_yamlcache.py b/buildstream/_yamlcache.py
new file mode 100644
index 0000000000000000000000000000000000000000..39b24cccc7c3eb748f42a39f24fc55d0839f67af
--- /dev/null
+++ b/buildstream/_yamlcache.py
@@ -0,0 +1,348 @@
+#
+#  Copyright 2018 Bloomberg Finance LP
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Jonathan Maw <jonathan.maw@codethink.co.uk>
+
+import os
+import pickle
+import hashlib
+import io
+
+import sys
+
+from contextlib import contextmanager
+from collections import namedtuple
+
+from ._cachekey import generate_key
+from ._context import Context
+from . import utils, _yaml
+
+
+YAML_CACHE_FILENAME = "yaml_cache.pickle"
+
+
+# YamlCache()
+#
+# A cache that wraps around the loading of yaml in projects.
+#
+# The recommended way to use a YamlCache is:
+#   with YamlCache.open(context) as yamlcache:
+#     # Load all the yaml
+#     ...
+#
+# Args:
+#    context (Context): The invocation Context
+#
+class YamlCache():
+
+    def __init__(self, context):
+        self._project_caches = {}
+        self._context = context
+
+    ##################
+    # Public Methods #
+    ##################
+
+    # is_cached():
+    #
+    # Checks whether a file is cached.
+    #
+    # Args:
+    #    project (Project): The project this file is in.
+    #    filepath (str): The path to the file, *relative to the project's directory*.
+    #
+    # Returns:
+    #    (bool): Whether the file is cached.
+    def is_cached(self, project, filepath):
+        cache_path = self._get_filepath(project, filepath)
+        project_name = project.name if project else ""
+        try:
+            project_cache = self._project_caches[project_name]
+            if cache_path in project_cache.elements:
+                return True
+        except KeyError:
+            pass
+        return False
+
+    # open():
+    #
+    # Return an instance of the YamlCache which writes to disk when it leaves scope.
+    #
+    # Args:
+    #    context (Context): The context.
+    #    cachefile (str): The path to the cache file.
+    #
+    # Returns:
+    #    (YamlCache): A YamlCache.
+    @staticmethod
+    @contextmanager
+    def open(context, cachefile):
+        # Try to load from disk first
+        cache = None
+        if os.path.exists(cachefile):
+            try:
+                with open(cachefile, "rb") as f:
+                    cache = BstUnpickler(f, context).load()
+            except EOFError:
+                # The file was empty
+                pass
+            except pickle.UnpicklingError as e:
+                sys.stderr.write("Failed to load YamlCache, {}\n".format(e))
+
+        # Failed to load from disk, create a new one
+        if not cache:
+            cache = YamlCache(context)
+
+        yield cache
+
+        cache._write(cachefile)
+
+    # get_cache_file():
+    #
+    # Retrieves a path to the yaml cache file.
+    #
+    # Returns:
+    #   (str): The path to the cache file
+    @staticmethod
+    def get_cache_file(top_dir):
+        return os.path.join(top_dir, ".bst", YAML_CACHE_FILENAME)
+
+    # get():
+    #
+    # Gets a parsed file from the cache.
+    #
+    # Args:
+    #    project (Project) or None: The project this file is in, if it exists.
+    #    filepath (str): The absolute path to the file.
+    #    contents (str): The contents of the file to be cached
+    #    copy_tree (bool): Whether the data should make a copy when it's being generated
+    #                      (i.e. exactly as when called in yaml)
+    #
+    # Returns:
+    #    (decorated dict): The parsed yaml from the cache, or None if the file isn't in the cache.
+    #    (str):            The key used to look up the parsed yaml in the cache
+    def get(self, project, filepath, contents, copy_tree):
+        key = self._calculate_key(contents, copy_tree)
+        data = self._get(project, filepath, key)
+        return data, key
+
+    # put():
+    #
+    # Puts a parsed file into the cache.
+    #
+    # Args:
+    #    project (Project): The project this file is in.
+    #    filepath (str): The path to the file.
+    #    contents (str): The contents of the file that has been cached
+    #    copy_tree (bool): Whether the data should make a copy when it's being generated
+    #                      (i.e. exactly as when called in yaml)
+    #    value (decorated dict): The data to put into the cache.
+    def put(self, project, filepath, contents, copy_tree, value):
+        key = self._calculate_key(contents, copy_tree)
+        self.put_from_key(project, filepath, key, value)
+
+    # put_from_key():
+    #
+    # Put a parsed file into the cache when given a key.
+    #
+    # Args:
+    #    project (Project): The project this file is in.
+    #    filepath (str): The path to the file.
+    #    key (str): The key to the file within the cache. Typically, this is the
+    #               value of `calculate_key()` with the file's unparsed contents
+    #               and any relevant metadata passed in.
+    #    value (decorated dict): The data to put into the cache.
+    def put_from_key(self, project, filepath, key, value):
+        cache_path = self._get_filepath(project, filepath)
+        project_name = project.name if project else ""
+        try:
+            project_cache = self._project_caches[project_name]
+        except KeyError:
+            project_cache = self._project_caches[project_name] = CachedProject({})
+
+        project_cache.elements[cache_path] = CachedYaml(key, value)
+
+    ###################
+    # Private Methods #
+    ###################
+
+    # Writes the yaml cache to the specified path.
+    #
+    # Args:
+    #    path (str): The path to the cache file.
+    def _write(self, path):
+        parent_dir = os.path.dirname(path)
+        os.makedirs(parent_dir, exist_ok=True)
+        with open(path, "wb") as f:
+            BstPickler(f).dump(self)
+
+    # _get_filepath():
+    #
+    # Returns a file path relative to a project if passed, or the original path if
+    # the project is None
+    #
+    # Args:
+    #    project (Project) or None: The project the filepath exists within
+    #    full_path (str): The path that the returned path is based on
+    #
+    # Returns:
+    #    (str): The path to the file, relative to a project if it exists
+    def _get_filepath(self, project, full_path):
+        if project:
+            assert full_path.startswith(project.directory)
+            filepath = os.path.relpath(full_path, project.directory)
+        else:
+            filepath = full_path
+        return full_path
+
+    # _calculate_key():
+    #
+    # Calculates a key for putting into the cache.
+    #
+    # Args:
+    #    (basic object)... : Any number of strictly-ordered basic objects
+    #
+    # Returns:
+    #   (str): A key made out of every arg passed in
+    @staticmethod
+    def _calculate_key(*args):
+        string = pickle.dumps(args)
+        return hashlib.sha1(string).hexdigest()
+
+    # _get():
+    #
+    # Gets a parsed file from the cache when given a key.
+    #
+    # Args:
+    #    project (Project): The project this file is in.
+    #    filepath (str): The path to the file.
+    #    key (str): The key to the file within the cache. Typically, this is the
+    #               value of `calculate_key()` with the file's unparsed contents
+    #               and any relevant metadata passed in.
+    #
+    # Returns:
+    #    (decorated dict): The parsed yaml from the cache, or None if the file isn't in the cache.
+    def _get(self, project, filepath, key):
+        cache_path = self._get_filepath(project, filepath)
+        project_name = project.name if project else ""
+        try:
+            project_cache = self._project_caches[project_name]
+            try:
+                cachedyaml = project_cache.elements[cache_path]
+                if cachedyaml._key == key:
+                    # We've unpickled the YamlCache, but not the specific file
+                    if cachedyaml._contents is None:
+                        cachedyaml._contents = BstUnpickler.loads(cachedyaml._pickled_contents, self._context)
+                    return cachedyaml._contents
+            except KeyError:
+                pass
+        except KeyError:
+            pass
+        return None
+
+
+CachedProject = namedtuple('CachedProject', ['elements'])
+
+
+class CachedYaml():
+    def __init__(self, key, contents):
+        self._key = key
+        self.set_contents(contents)
+
+    # Sets the contents of the CachedYaml.
+    #
+    # Args:
+    #    contents (provenanced dict): The contents to put in the cache.
+    #
+    def set_contents(self, contents):
+        self._contents = contents
+        self._pickled_contents = BstPickler.dumps(contents)
+
+    # Pickling helper method, prevents 'contents' from being serialised
+    def __getstate__(self):
+        data = self.__dict__.copy()
+        data['_contents'] = None
+        return data
+
+
+# In _yaml.load, we have a ProvenanceFile that stores the project the file
+# came from. Projects can't be pickled, but it's always going to be the same
+# project between invocations (unless the entire project is moved but the
+# file stayed in the same place)
+class BstPickler(pickle.Pickler):
+    def persistent_id(self, obj):
+        if isinstance(obj, _yaml.ProvenanceFile):
+            if obj.project:
+                # ProvenanceFile's project object cannot be stored as it is.
+                project_tag = obj.project.name
+                # ProvenanceFile's filename must be stored relative to the
+                # project, as the project dir may move.
+                name = os.path.relpath(obj.name, obj.project.directory)
+            else:
+                project_tag = None
+                name = obj.name
+            return ("ProvenanceFile", name, obj.shortname, project_tag)
+        elif isinstance(obj, Context):
+            return ("Context",)
+        else:
+            return None
+
+    @staticmethod
+    def dumps(obj):
+        stream = io.BytesIO()
+        BstPickler(stream).dump(obj)
+        stream.seek(0)
+        return stream.read()
+
+
+class BstUnpickler(pickle.Unpickler):
+    def __init__(self, file, context):
+        super().__init__(file)
+        self._context = context
+
+    def persistent_load(self, pid):
+        if pid[0] == "ProvenanceFile":
+            _, tagged_name, shortname, project_tag = pid
+
+            if project_tag is not None:
+                for p in self._context.get_projects():
+                    if project_tag == p.name:
+                        project = p
+                        break
+
+                name = os.path.join(project.directory, tagged_name)
+
+                if not project:
+                    projects = [p.name for p in self._context.get_projects()]
+                    raise pickle.UnpicklingError("No project with name {} found in {}"
+                                                 .format(key_id, projects))
+            else:
+                project = None
+                name = tagged_name
+
+            return _yaml.ProvenanceFile(name, shortname, project)
+        elif pid[0] == "Context":
+            return self._context
+        else:
+            raise pickle.UnpicklingError("Unsupported persistent object, {}".format(pid))
+
+    @staticmethod
+    def loads(text, context):
+        stream = io.BytesIO()
+        stream.write(bytes(text))
+        stream.seek(0)
+        return BstUnpickler(stream, context).load()
diff --git a/buildstream/buildelement.py b/buildstream/buildelement.py
index d729eaa81138641f21fa3301a8702b5949f4ad48..3edc6d2a1123cf9bcc8d4c4fc14d38a9bcab897f 100644
--- a/buildstream/buildelement.py
+++ b/buildstream/buildelement.py
@@ -23,6 +23,50 @@ BuildElement - Abstract class for build elements
 The BuildElement class is a convenience element one can derive from for
 implementing the most common case of element.
 
+Built-in functionality
+----------------------
+
+The BuildElement base class provides built in functionality that could be
+overridden by the individual plugins.
+
+This section will give a brief summary of how some of the common features work,
+some of them or the variables they use will be further detailed in the following
+sections.
+
+Location for running commands
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``command-subdir`` variable sets where the build commands will be executed,
+if the directory does not exist it will be created, it is defined relative to
+the buildroot.
+
+Location for configuring the project
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The ``conf-root`` is defined by default as ``.`` and is the location that
+specific build element can use to look for build configuration files. This is
+used by elements such as autotools, cmake, distutils, meson, pip and qmake.
+
+The configuration commands are run in ``command-subdir`` and by default
+``conf-root`` is ``.`` so if ``conf-root`` is not set the configuration files
+in ``command-subdir`` will be used.
+
+By setting ``conf-root`` to ``"%{build-root}/Source/conf_location"`` and your
+source elements ``directory`` variable to ``Source`` then the configuration
+files in the directory ``conf_location`` with in your Source will be used.
+The current working directory when your configuration command is run will still
+be wherever you set your ``command-subdir`` to be, regardless of where the
+configure scripts are set with ``conf-root``.
+
+.. note::
+
+   The ``conf-root`` variable is available since :ref:`format version 17 <project_format_version>`
+
+Install Location
+~~~~~~~~~~~~~~~~
+
+You should not change the ``install-root`` variable as it is a special
+writeable location in the sandbox but it is useful when writing custom
+install instructions as it may need to be supplied as the ``DESTDIR``, please
+see the :mod:`cmake <elements.cmake>` build element for example.
 
 Abstract method implementations
 -------------------------------
@@ -132,7 +176,7 @@ class BuildElement(Element):
 
         # Specifying notparallel for a given element effects the
         # cache key, while having the side effect of setting max-jobs to 1,
-        # which is normally automatically resolved and does not effect
+        # which is normally automatically resolved and does not affect
         # the cache key.
         if self.get_variable('notparallel'):
             dictionary['notparallel'] = True
@@ -155,6 +199,9 @@ class BuildElement(Element):
             command_dir = build_root
         sandbox.set_work_directory(command_dir)
 
+        # Tell sandbox which directory is preserved in the finished artifact
+        sandbox.set_output_directory(install_root)
+
         # Setup environment
         sandbox.set_environment(self.get_environment())
 
@@ -233,14 +280,13 @@ class BuildElement(Element):
         return commands
 
     def __run_command(self, sandbox, cmd, cmd_name):
-        with self.timed_activity("Running {}".format(cmd_name)):
-            self.status("Running {}".format(cmd_name), detail=cmd)
-
-            # Note the -e switch to 'sh' means to exit with an error
-            # if any untested command fails.
-            #
-            exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'],
-                                   SandboxFlags.ROOT_READ_ONLY)
-            if exitcode != 0:
-                raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode),
-                                   collect=self.get_variable('install-root'))
+        self.status("Running {}".format(cmd_name), detail=cmd)
+
+        # Note the -e switch to 'sh' means to exit with an error
+        # if any untested command fails.
+        #
+        exitcode = sandbox.run(['sh', '-c', '-e', cmd + '\n'],
+                               SandboxFlags.ROOT_READ_ONLY)
+        if exitcode != 0:
+            raise ElementError("Command '{}' failed with exitcode {}".format(cmd, exitcode),
+                               collect=self.get_variable('install-root'))
diff --git a/buildstream/data/projectconfig.yaml b/buildstream/data/projectconfig.yaml
index c1ad2d147b2bcca1236db842ca9e024cd8932c0f..bc9e5147d2b9af77fca99a888244bc509c0132fb 100644
--- a/buildstream/data/projectconfig.yaml
+++ b/buildstream/data/projectconfig.yaml
@@ -16,21 +16,7 @@ ref-storage: inline
 # Variable Configuration
 #
 variables:
-
-  # Maximum number of parallel build processes within a given
-  # build, support for this is conditional on the element type
-  # and the build system used (any element using 'make' can
-  # implement this).
-  #
-  # Note: this value defaults to the number of cores available
-  max-jobs: 4
-
-  # Note: These variables are defined later on in element.py and _project.py
-  element-name: ""
-  project-name: ""
-
   # Path configuration, to be used in build instructions.
-  #
   prefix: "/usr"
   exec_prefix: "%{prefix}"
   bindir: "%{exec_prefix}/bin"
@@ -52,6 +38,9 @@ variables:
   # normally staged
   build-root: /buildstream/%{project-name}/%{element-name}
 
+  # Indicates where the build system should look for configuration files  
+  conf-root: .
+  
   # Indicates the build installation directory in the sandbox
   install-root: /buildstream-install
 
@@ -89,7 +78,6 @@ variables:
     find "%{install-root}" -name '*.pyc' -exec \
       dd if=/dev/zero of={} bs=1 count=4 seek=4 conv=notrunc ';'
 
-
 # Base sandbox environment, can be overridden by plugins
 environment:
   PATH: /usr/bin:/bin:/usr/sbin:/sbin
@@ -204,3 +192,6 @@ shell:
   # Command to run when `bst shell` does not provide a command
   #
   command: [ 'sh', '-i' ]
+
+remote-execution:
+  url: ""
\ No newline at end of file
diff --git a/buildstream/data/userconfig.yaml b/buildstream/data/userconfig.yaml
index 5f9b01120a7b9595c279b3d002ad79af8822d27a..efe419cfc62357f958d03bc55675250ff5fd1c0b 100644
--- a/buildstream/data/userconfig.yaml
+++ b/buildstream/data/userconfig.yaml
@@ -26,8 +26,13 @@ logdir: ${XDG_CACHE_HOME}/buildstream/logs
 #    Cache
 #
 cache:
-  # Size of the artifact cache - BuildStream will attempt to keep the
+  # Size of the artifact cache in bytes - BuildStream will attempt to keep the
   # artifact cache within this size.
+  # If the value is suffixed with K, M, G or T, the specified memory size is
+  # parsed as Kilobytes, Megabytes, Gigabytes, or Terabytes (with the base
+  # 1024), respectively.
+  # Alternatively, a percentage value may be specified, which is taken relative
+  # to the isize of the file system containing the cache.
   quota: infinity
 
 #
diff --git a/buildstream/element.py b/buildstream/element.py
index a34b1ca36863a9c37586abbb5ea28802b8092a55..de1988d2ae9f1549df4ab8c2dad95b1372668b1a 100644
--- a/buildstream/element.py
+++ b/buildstream/element.py
@@ -76,7 +76,8 @@ import os
 import re
 import stat
 import copy
-from collections import Mapping, OrderedDict
+from collections import OrderedDict
+from collections.abc import Mapping
 from contextlib import contextmanager
 import tempfile
 import shutil
@@ -86,7 +87,7 @@ from ._variables import Variables
 from ._versions import BST_CORE_ARTIFACT_VERSION
 from ._exceptions import BstError, LoadError, LoadErrorReason, ImplError, ErrorDomain
 from .utils import UtilError
-from . import Plugin, Consistency
+from . import Plugin, Consistency, Scope
 from . import SandboxFlags
 from . import utils
 from . import _cachekey
@@ -95,11 +96,12 @@ from . import _site
 from ._platform import Platform
 from .plugin import CoreWarnings
 from .sandbox._config import SandboxConfig
+from .sandbox._sandboxremote import SandboxRemote
+from .types import _KeyStrength
 
 from .storage.directory import Directory
 from .storage._filebaseddirectory import FileBasedDirectory
 from .storage.directory import VirtualDirectoryError
-from .element_enums import _KeyStrength, Scope
 
 
 class ElementError(BstError):
@@ -199,7 +201,6 @@ class Element(Plugin):
         self.__strict_cache_key = None          # Our cached cache key for strict builds
         self.__artifacts = artifacts            # Artifact cache
         self.__consistency = Consistency.INCONSISTENT  # Cached overall consistency state
-        self.__cached = None                    # Whether we have a cached artifact
         self.__strong_cached = None             # Whether we have a cached artifact
         self.__weak_cached = None               # Whether we have a cached artifact
         self.__assemble_scheduled = False       # Element is scheduled to be assembled
@@ -212,8 +213,7 @@ class Element(Plugin):
         self.__staged_sources_directory = None  # Location where Element.stage_sources() was called
         self.__tainted = None                   # Whether the artifact is tainted and should not be shared
         self.__required = False                 # Whether the artifact is required in the current session
-        self.__artifact_size = None             # The size of data committed to the artifact cache
-        self.__build_result = None              # The result of assembling this Element
+        self.__build_result = None              # The result of assembling this Element (success, description, detail)
         self._build_log_path = None            # The path of the build log for this Element
 
         # hash tables of loaded artifact metadata, hashed by key
@@ -245,11 +245,25 @@ class Element(Plugin):
         # Collect the composited element configuration and
         # ask the element to configure itself.
         self.__config = self.__extract_config(meta)
-        self.configure(self.__config)
+        self._configure(self.__config)
+
+        # Extract remote execution URL
+        if not self.__is_junction:
+            self.__remote_execution_url = project.remote_execution_url
+        else:
+            self.__remote_execution_url = None
 
         # Extract Sandbox config
         self.__sandbox_config = self.__extract_sandbox_config(meta)
 
+        self.__sandbox_config_supported = True
+        if not self.__use_remote_execution():
+            platform = Platform.get_platform()
+            if not platform.check_sandbox_config(self.__sandbox_config):
+                # Local sandbox does not fully support specified sandbox config.
+                # This will taint the artifact, disable pushing.
+                self.__sandbox_config_supported = False
+
     def __lt__(self, other):
         return self.name < other.name
 
@@ -854,8 +868,8 @@ class Element(Plugin):
 
     # _new_from_meta():
     #
-    # Recursively instantiate a new Element instance, it's sources
-    # and it's dependencies from a meta element.
+    # Recursively instantiate a new Element instance, its sources
+    # and its dependencies from a meta element.
     #
     # Args:
     #    artifacts (ArtifactCache): The artifact cache
@@ -1101,9 +1115,12 @@ class Element(Plugin):
             # until the full cache query below.
             if (not self.__assemble_scheduled and not self.__assemble_done and
                     not self.__cached_success(keystrength=_KeyStrength.WEAK) and
-                    not self._pull_pending() and self._is_required()):
-                self._schedule_assemble()
-                return
+                    not self._pull_pending()):
+                # For uncached workspaced elements, assemble is required
+                # even if we only need the cache key
+                if self._is_required() or self._get_workspace():
+                    self._schedule_assemble()
+                    return
 
         if self.__strict_cache_key is None:
             dependencies = [
@@ -1117,8 +1134,6 @@ class Element(Plugin):
 
         # Query caches now that the weak and strict cache keys are available
         key_for_cache_lookup = self.__strict_cache_key if context.get_strict() else self.__weak_cache_key
-        if not self.__cached:
-            self.__cached = self.__artifacts.contains(self, key_for_cache_lookup)
         if not self.__strong_cached:
             self.__strong_cached = self.__artifacts.contains(self, self.__strict_cache_key)
         if key_for_cache_lookup == self.__weak_cache_key:
@@ -1126,13 +1141,17 @@ class Element(Plugin):
                 self.__weak_cached = self.__artifacts.contains(self, self.__weak_cache_key)
 
         if (not self.__assemble_scheduled and not self.__assemble_done and
-                not self._cached_success() and not self._pull_pending() and self._is_required()):
+                not self._cached_success() and not self._pull_pending()):
             # Workspaced sources are considered unstable if a build is pending
             # as the build will modify the contents of the workspace.
             # Determine as early as possible if a build is pending to discard
             # unstable cache keys.
-            self._schedule_assemble()
-            return
+
+            # For uncached workspaced elements, assemble is required
+            # even if we only need the cache key
+            if self._is_required() or self._get_workspace():
+                self._schedule_assemble()
+                return
 
         if self.__cache_key is None:
             # Calculate strong cache key
@@ -1199,7 +1218,7 @@ class Element(Plugin):
     # _preflight():
     #
     # A wrapper for calling the abstract preflight() method on
-    # the element and it's sources.
+    # the element and its sources.
     #
     def _preflight(self):
 
@@ -1298,7 +1317,8 @@ class Element(Plugin):
     #
     @contextmanager
     def _prepare_sandbox(self, scope, directory, deps='run', integrate=True):
-        with self.__sandbox(directory, config=self.__sandbox_config) as sandbox:
+        # bst shell and bst checkout require a local sandbox.
+        with self.__sandbox(directory, config=self.__sandbox_config, allow_remote=False) as sandbox:
 
             # Configure always comes first, and we need it.
             self.configure_sandbox(sandbox)
@@ -1361,8 +1381,12 @@ class Element(Plugin):
             if not vdirectory.is_empty():
                 raise ElementError("Staging directory '{}' is not empty".format(vdirectory))
 
-            with tempfile.TemporaryDirectory() as temp_staging_directory:
+            # It's advantageous to have this temporary directory on
+            # the same filing system as the rest of our cache.
+            temp_staging_location = os.path.join(self._get_context().artifactdir, "staging_temp")
+            temp_staging_directory = tempfile.mkdtemp(prefix=temp_staging_location)
 
+            try:
                 workspace = self._get_workspace()
                 if workspace:
                     # If mount_workspaces is set and we're doing incremental builds,
@@ -1377,6 +1401,19 @@ class Element(Plugin):
                         source._stage(temp_staging_directory)
 
                 vdirectory.import_files(temp_staging_directory)
+
+            finally:
+                # Staging may produce directories with less than 'rwx' permissions
+                # for the owner, which will break tempfile, so we need to use chmod
+                # occasionally.
+                def make_dir_writable(fn, path, excinfo):
+                    os.chmod(os.path.dirname(path), 0o777)
+                    if os.path.isdir(path):
+                        os.rmdir(path)
+                    else:
+                        os.remove(path)
+                shutil.rmtree(temp_staging_directory, onerror=make_dir_writable)
+
         # Ensure deterministic mtime of sources at build time
         vdirectory.set_deterministic_mtime()
         # Ensure deterministic owners of sources at build time
@@ -1413,7 +1450,6 @@ class Element(Plugin):
     # in a subprocess.
     #
     def _schedule_assemble(self):
-        assert self._is_required()
         assert not self.__assemble_scheduled
         self.__assemble_scheduled = True
 
@@ -1421,6 +1457,8 @@ class Element(Plugin):
         for dep in self.dependencies(Scope.BUILD, recurse=False):
             dep._set_required()
 
+        self._set_required()
+
         # Invalidate workspace key as the build modifies the workspace directory
         workspace = self._get_workspace()
         if workspace:
@@ -1443,11 +1481,13 @@ class Element(Plugin):
 
         self._update_state()
 
-        if self._get_workspace() and self._cached():
+        if self._get_workspace() and self._cached_success():
+            assert utils._is_main_process(), \
+                "Attempted to save workspace configuration from child process"
             #
             # Note that this block can only happen in the
-            # main process, since `self._cached()` cannot
-            # be true when assembly is completed in the task.
+            # main process, since `self._cached_success()` cannot
+            # be true when assembly is successful in the task.
             #
             # For this reason, it is safe to update and
             # save the workspaces configuration
@@ -1458,15 +1498,20 @@ class Element(Plugin):
             workspace.clear_running_files()
             self._get_context().get_workspaces().save_config()
 
-            # We also need to update the required artifacts, since
-            # workspaced dependencies do not have a fixed cache key
-            # when the build starts.
+            # This element will have already been marked as
+            # required, but we bump the atime again, in case
+            # we did not know the cache key until now.
             #
-            # This does *not* cause a race condition, because
-            # _assemble_done is called before a cleanup job may be
-            # launched.
+            # FIXME: This is not exactly correct, we should be
+            #        doing this at the time which we have discovered
+            #        a new cache key, this just happens to be the
+            #        last place where that can happen.
             #
-            self.__artifacts.append_required_artifacts([self])
+            #        Ultimately, we should be refactoring
+            #        Element._update_state() such that we know
+            #        when a cache key is actually discovered.
+            #
+            self.__artifacts.mark_required_elements([self])
 
     # _assemble():
     #
@@ -1477,6 +1522,9 @@ class Element(Plugin):
     #   - Call the public abstract methods for the build phase
     #   - Cache the resulting artifact
     #
+    # Returns:
+    #    (int): The size of the newly cached artifact
+    #
     def _assemble(self):
 
         # Assert call ordering
@@ -1485,6 +1533,11 @@ class Element(Plugin):
         context = self._get_context()
         with self._output_file() as output_file:
 
+            if not self.__sandbox_config_supported:
+                self.warn("Sandbox configuration is not supported by the platform.",
+                          detail="Falling back to UID {} GID {}. Artifact will not be pushed."
+                          .format(self.__sandbox_config.build_uid, self.__sandbox_config.build_gid))
+
             # Explicitly clean it up, keep the build dir around if exceptions are raised
             os.makedirs(context.builddir, exist_ok=True)
             rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
@@ -1496,8 +1549,6 @@ class Element(Plugin):
             with _signals.terminator(cleanup_rootdir), \
                 self.__sandbox(rootdir, output_file, output_file, self.__sandbox_config) as sandbox:  # nopep8
 
-                sandbox_vroot = sandbox.get_virtual_directory()
-
                 # By default, the dynamic public data is the same as the static public data.
                 # The plugin's assemble() method may modify this, though.
                 self.__dynamic_public = _yaml.node_copy(self.__public)
@@ -1545,6 +1596,7 @@ class Element(Plugin):
                 finally:
                     if collect is not None:
                         try:
+                            sandbox_vroot = sandbox.get_virtual_directory()
                             collectvdir = sandbox_vroot.descend(collect.lstrip(os.sep).split(os.sep))
                         except VirtualDirectoryError:
                             # No collect directory existed
@@ -1568,6 +1620,7 @@ class Element(Plugin):
                         collectvdir.export_files(filesdir, can_link=True)
 
                     try:
+                        sandbox_vroot = sandbox.get_virtual_directory()
                         sandbox_build_dir = sandbox_vroot.descend(
                             self.get_variable('build-root').lstrip(os.sep).split(os.sep))
                         # Hard link files from build-root dir to buildtreedir directory
@@ -1621,7 +1674,7 @@ class Element(Plugin):
                     }), os.path.join(metadir, 'workspaced-dependencies.yaml'))
 
                     with self.timed_activity("Caching artifact"):
-                        self.__artifact_size = utils._get_dir_size(assembledir)
+                        artifact_size = utils._get_dir_size(assembledir)
                         self.__artifacts.commit(self, assembledir, self.__get_cache_keys_for_commit())
 
                     if collect is not None and collectvdir is None:
@@ -1633,6 +1686,8 @@ class Element(Plugin):
             # Finally cleanup the build dir
             cleanup_rootdir()
 
+        return artifact_size
+
     def _get_build_log(self):
         return self._build_log_path
 
@@ -1644,6 +1699,10 @@ class Element(Plugin):
     #   (bool): Whether a pull operation is pending
     #
     def _pull_pending(self):
+        if self._get_workspace():
+            # Workspace builds are never pushed to artifact servers
+            return False
+
         if self.__strong_cached:
             # Artifact already in local cache
             return False
@@ -1716,8 +1775,6 @@ class Element(Plugin):
             return False
 
         # Notify successfull download
-        display_key = self._get_brief_display_key()
-        self.info("Downloaded artifact {}".format(display_key))
         return True
 
     # _skip_push():
@@ -1756,16 +1813,13 @@ class Element(Plugin):
             self.warn("Not pushing tainted artifact.")
             return False
 
-        display_key = self._get_brief_display_key()
-        with self.timed_activity("Pushing artifact {}".format(display_key)):
-            # Push all keys used for local commit
-            pushed = self.__artifacts.push(self, self.__get_cache_keys_for_commit())
-            if not pushed:
-                return False
+        # Push all keys used for local commit
+        pushed = self.__artifacts.push(self, self.__get_cache_keys_for_commit())
+        if not pushed:
+            return False
 
-            # Notify successful upload
-            self.info("Pushed artifact {}".format(display_key))
-            return True
+        # Notify successful upload
+        return True
 
     # _shell():
     #
@@ -1870,25 +1924,6 @@ class Element(Plugin):
         workspaces = self._get_context().get_workspaces()
         return workspaces.get_workspace(self._get_full_name())
 
-    # _get_artifact_size()
-    #
-    # Get the size of the artifact produced by this element in the
-    # current pipeline - if this element has not been assembled or
-    # pulled, this will be None.
-    #
-    # Note that this is the size of an artifact *before* committing it
-    # to the cache, the size on disk may differ. It can act as an
-    # approximate guide for when to do a proper size calculation.
-    #
-    # Returns:
-    #    (int|None): The size of the artifact
-    #
-    def _get_artifact_size(self):
-        return self.__artifact_size
-
-    def _get_artifact_cache(self):
-        return self.__artifacts
-
     # _write_script():
     #
     # Writes a script to the given directory.
@@ -1965,7 +2000,7 @@ class Element(Plugin):
         if workspace:
 
             # A workspace is considered inconsistent in the case
-            # that it's directory went missing
+            # that its directory went missing
             #
             fullpath = workspace.get_absolute_path()
             if not os.path.exists(fullpath):
@@ -2056,7 +2091,7 @@ class Element(Plugin):
 
     def __is_cached(self, keystrength):
         if keystrength is None:
-            return self.__cached
+            keystrength = _KeyStrength.STRONG if self._get_context().get_strict() else _KeyStrength.WEAK
 
         return self.__strong_cached if keystrength == _KeyStrength.STRONG else self.__weak_cached
 
@@ -2064,7 +2099,7 @@ class Element(Plugin):
     #
     # Raises an error if the artifact is not cached.
     #
-    def __assert_cached(self, keystrength=_KeyStrength.STRONG):
+    def __assert_cached(self, keystrength=None):
         assert self.__is_cached(keystrength=keystrength), "{}: Missing artifact {}".format(
             self, self._get_brief_display_key())
 
@@ -2092,10 +2127,19 @@ class Element(Plugin):
             workspaced_dependencies = self.__get_artifact_metadata_workspaced_dependencies()
 
             # Other conditions should be or-ed
-            self.__tainted = workspaced or workspaced_dependencies
+            self.__tainted = (workspaced or workspaced_dependencies or
+                              not self.__sandbox_config_supported)
 
         return self.__tainted
 
+    # __use_remote_execution():
+    #
+    # Returns True if remote execution is configured and the element plugin
+    # supports it.
+    #
+    def __use_remote_execution(self):
+        return self.__remote_execution_url and self.BST_VIRTUAL_DIRECTORY
+
     # __sandbox():
     #
     # A context manager to prepare a Sandbox object at the specified directory,
@@ -2107,17 +2151,38 @@ class Element(Plugin):
     #    stdout (fileobject): The stream for stdout for the sandbox
     #    stderr (fileobject): The stream for stderr for the sandbox
     #    config (SandboxConfig): The SandboxConfig object
+    #    allow_remote (bool): Whether the sandbox is allowed to be remote
     #
     # Yields:
     #    (Sandbox): A usable sandbox
     #
     @contextmanager
-    def __sandbox(self, directory, stdout=None, stderr=None, config=None):
+    def __sandbox(self, directory, stdout=None, stderr=None, config=None, allow_remote=True):
         context = self._get_context()
         project = self._get_project()
         platform = Platform.get_platform()
 
-        if directory is not None and os.path.exists(directory):
+        if directory is not None and allow_remote and self.__use_remote_execution():
+
+            self.info("Using a remote sandbox for artifact {} with directory '{}'".format(self.name, directory))
+
+            sandbox = SandboxRemote(context, project,
+                                    directory,
+                                    stdout=stdout,
+                                    stderr=stderr,
+                                    config=config,
+                                    server_url=self.__remote_execution_url,
+                                    allow_real_directory=False)
+            yield sandbox
+
+        elif directory is not None and os.path.exists(directory):
+            if allow_remote and self.__remote_execution_url:
+                self.warn("Artifact {} is configured to use remote execution but element plugin does not support it."
+                          .format(self.name), detail="Element plugin '{kind}' does not support virtual directories."
+                          .format(kind=self.get_kind()), warning_token="remote-failure")
+
+                self.info("Falling back to local sandbox for artifact {}".format(self.name))
+
             sandbox = platform.create_sandbox(context, project,
                                               directory,
                                               stdout=stdout,
@@ -2131,7 +2196,8 @@ class Element(Plugin):
             rootdir = tempfile.mkdtemp(prefix="{}-".format(self.normal_name), dir=context.builddir)
 
             # Recursive contextmanager...
-            with self.__sandbox(rootdir, stdout=stdout, stderr=stderr, config=config) as sandbox:
+            with self.__sandbox(rootdir, stdout=stdout, stderr=stderr, config=config,
+                                allow_remote=allow_remote) as sandbox:
                 yield sandbox
 
             # Cleanup the build dir
@@ -2235,7 +2301,8 @@ class Element(Plugin):
     # substituting command strings to be run in the sandbox
     #
     def __extract_variables(self, meta):
-        default_vars = _yaml.node_get(self.__defaults, Mapping, 'variables', default_value={})
+        default_vars = _yaml.node_get(self.__defaults, Mapping, 'variables',
+                                      default_value={})
 
         project = self._get_project()
         if self.__is_junction:
@@ -2248,6 +2315,13 @@ class Element(Plugin):
         _yaml.composite(variables, meta.variables)
         _yaml.node_final_assertions(variables)
 
+        for var in ('project-name', 'element-name', 'max-jobs'):
+            provenance = _yaml.node_get_provenance(variables, var)
+            if provenance and provenance.filename != '':
+                raise LoadError(LoadErrorReason.PROTECTED_VARIABLE_REDEFINED,
+                                "{}: invalid redefinition of protected variable '{}'"
+                                .format(provenance, var))
+
         return variables
 
     # This will resolve the final configuration to be handed
diff --git a/buildstream/plugin.py b/buildstream/plugin.py
index a65db4d4272240941c584d0c5b3a45d241708fd1..65b1b6e9d5f42af41eb88fe84bf2d0ec28332d88 100644
--- a/buildstream/plugin.py
+++ b/buildstream/plugin.py
@@ -169,7 +169,7 @@ class Plugin():
 
         For elements, this is the project relative bst filename,
         for sources this is the owning element's name with a suffix
-        indicating it's index on the owning element.
+        indicating its index on the owning element.
 
         For sources this is for display purposes only.
         """
@@ -179,6 +179,7 @@ class Plugin():
         self.__provenance = provenance  # The Provenance information
         self.__type_tag = type_tag      # The type of plugin (element or source)
         self.__unique_id = _plugin_register(self)  # Unique ID
+        self.__configuring = False      # Whether we are currently configuring
 
         # Infer the kind identifier
         modulename = type(self).__module__
@@ -207,8 +208,8 @@ class Plugin():
            node (dict): The loaded configuration dictionary
 
         Raises:
-           :class:`.SourceError`: If its a :class:`.Source` implementation
-           :class:`.ElementError`: If its an :class:`.Element` implementation
+           :class:`.SourceError`: If it's a :class:`.Source` implementation
+           :class:`.ElementError`: If it's an :class:`.Element` implementation
 
         Plugin implementors should implement this method to read configuration
         data and store it.
@@ -237,8 +238,8 @@ class Plugin():
         """Preflight Check
 
         Raises:
-           :class:`.SourceError`: If its a :class:`.Source` implementation
-           :class:`.ElementError`: If its an :class:`.Element` implementation
+           :class:`.SourceError`: If it's a :class:`.Source` implementation
+           :class:`.ElementError`: If it's an :class:`.Element` implementation
 
         This method is run after :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
         and after the pipeline is fully constructed.
@@ -265,7 +266,7 @@ class Plugin():
         such as an sha256 sum of a tarball content.
 
         Elements and Sources should implement this by collecting any configurations
-        which could possibly effect the output and return a dictionary of these settings.
+        which could possibly affect the output and return a dictionary of these settings.
 
         For Sources, this is guaranteed to only be called if
         :func:`Source.get_consistency() <buildstream.source.Source.get_consistency>`
@@ -682,7 +683,32 @@ class Plugin():
         else:
             yield log
 
+    # _configure():
+    #
+    # Calls configure() for the plugin, this must be called by
+    # the core instead of configure() directly, so that the
+    # _get_configuring() state is up to date.
+    #
+    # Args:
+    #    node (dict): The loaded configuration dictionary
+    #
+    def _configure(self, node):
+        self.__configuring = True
+        self.configure(node)
+        self.__configuring = False
+
+    # _get_configuring():
+    #
+    # Checks whether the plugin is in the middle of having
+    # its Plugin.configure() method called
+    #
+    # Returns:
+    #    (bool): Whether we are currently configuring
+    def _get_configuring(self):
+        return self.__configuring
+
     # _preflight():
+    #
     # Calls preflight() for the plugin, and allows generic preflight
     # checks to be added
     #
@@ -690,6 +716,7 @@ class Plugin():
     #    SourceError: If it's a Source implementation
     #    ElementError: If it's an Element implementation
     #    ProgramNotFoundError: If a required host tool is not found
+    #
     def _preflight(self):
         self.preflight()
 
diff --git a/buildstream/plugins/elements/autotools.py b/buildstream/plugins/elements/autotools.py
index 14d04d9a3f54a18967a3a36f2da2a98603d33eb1..cf5e856611e5fa77c9b664b3064c8930dd29bd79 100644
--- a/buildstream/plugins/elements/autotools.py
+++ b/buildstream/plugins/elements/autotools.py
@@ -57,7 +57,8 @@ from buildstream import BuildElement
 
 # Element implementation for the 'autotools' kind.
 class AutotoolsElement(BuildElement):
-    pass
+    # Supports virtual directories (required for remote execution)
+    BST_VIRTUAL_DIRECTORY = True
 
 
 # Plugin entry point
diff --git a/buildstream/plugins/elements/autotools.yaml b/buildstream/plugins/elements/autotools.yaml
index 021d3815c2f94daed8f0f9e0a63785a9b35a1858..85f7393e7b8e6e62a52ab7b2deaa258f67d2f90c 100644
--- a/buildstream/plugins/elements/autotools.yaml
+++ b/buildstream/plugins/elements/autotools.yaml
@@ -6,11 +6,11 @@ variables:
     export NOCONFIGURE=1;
 
     if [ -x %{conf-cmd} ]; then true;
-    elif [ -x autogen ]; then ./autogen;
-    elif [ -x autogen.sh ]; then ./autogen.sh;
-    elif [ -x bootstrap ]; then ./bootstrap;
-    elif [ -x bootstrap.sh ]; then ./bootstrap.sh;
-    else autoreconf -ivf;
+    elif [ -x %{conf-root}/autogen ]; then %{conf-root}/autogen;
+    elif [ -x %{conf-root}/autogen.sh ]; then %{conf-root}/autogen.sh;
+    elif [ -x %{conf-root}/bootstrap ]; then %{conf-root}/bootstrap;
+    elif [ -x %{conf-root}/bootstrap.sh ]; then %{conf-root}/bootstrap.sh;
+    else autoreconf -ivf %{conf-root};
     fi
 
   # Project-wide extra arguments to be passed to `configure`
@@ -22,7 +22,8 @@ variables:
   # For backwards compatibility only, do not use.
   conf-extra: ''
 
-  conf-cmd: ./configure
+  conf-cmd: "%{conf-root}/configure"
+  
   conf-args: |
 
     --prefix=%{prefix} \
@@ -50,6 +51,40 @@ variables:
   #
   # notparallel: True
 
+
+  # Automatically remove libtool archive files
+  #
+  # Set remove-libtool-modules to "true" to remove .la files for 
+  # modules intended to be opened with lt_dlopen()
+  #
+  # Set remove-libtool-libraries to "true" to remove .la files for
+  # libraries
+  #
+  # Value must be "true" or "false"
+  remove-libtool-modules: "false"  
+  remove-libtool-libraries: "false"
+
+  delete-libtool-archives: |
+    if %{remove-libtool-modules} || %{remove-libtool-libraries}; then
+      find "%{install-root}" -name "*.la" -print0 | while read -d '' -r file; do
+        if grep '^shouldnotlink=yes$' "${file}" &>/dev/null; then
+          if %{remove-libtool-modules}; then
+            echo "Removing ${file}."
+            rm "${file}"
+          else
+            echo "Not removing ${file}."
+          fi
+        else
+          if %{remove-libtool-libraries}; then
+            echo "Removing ${file}."
+            rm "${file}"
+          else
+            echo "Not removing ${file}."
+          fi
+        fi
+      done
+    fi
+
 config:
 
   # Commands for configuring the software
@@ -72,6 +107,8 @@ config:
   install-commands:
   - |
     %{make-install}
+  - |
+    %{delete-libtool-archives}
 
   # Commands for stripping debugging information out of
   # installed binaries
@@ -86,7 +123,7 @@ environment:
   V: 1
 
 # And dont consider MAKEFLAGS or V as something which may
-# effect build output.
+# affect build output.
 environment-nocache:
 - MAKEFLAGS
 - V
diff --git a/buildstream/plugins/elements/cmake.py b/buildstream/plugins/elements/cmake.py
index 8126a80acbdc35c9ca995e6d725c8394d166629e..2cb2601aef81a2593ad4a287b63577a42d2eef25 100644
--- a/buildstream/plugins/elements/cmake.py
+++ b/buildstream/plugins/elements/cmake.py
@@ -56,7 +56,8 @@ from buildstream import BuildElement
 
 # Element implementation for the 'cmake' kind.
 class CMakeElement(BuildElement):
-    pass
+    # Supports virtual directories (required for remote execution)
+    BST_VIRTUAL_DIRECTORY = True
 
 
 # Plugin entry point
diff --git a/buildstream/plugins/elements/cmake.yaml b/buildstream/plugins/elements/cmake.yaml
index b51727b04288f175ffa443038c8904acec7f7853..0fb2e715a74cc54e23affdcf21119324f3c7423b 100644
--- a/buildstream/plugins/elements/cmake.yaml
+++ b/buildstream/plugins/elements/cmake.yaml
@@ -23,7 +23,7 @@ variables:
 
   cmake: |
 
-    cmake -B%{build-dir} -H. -G"%{generator}" %{cmake-args}
+    cmake -B%{build-dir} -H"%{conf-root}" -G"%{generator}" %{cmake-args}
 
   make: cmake --build %{build-dir} -- ${JOBS}
   make-install: env DESTDIR="%{install-root}" cmake --build %{build-dir} --target install
@@ -66,7 +66,7 @@ environment:
   V: 1
 
 # And dont consider JOBS or V as something which may
-# effect build output.
+# affect build output.
 environment-nocache:
 - JOBS
 - V
diff --git a/buildstream/plugins/elements/compose.py b/buildstream/plugins/elements/compose.py
index a206dd6d71ab16a0bd8da58add63c32d7b34f254..6b99947d552d954b95ae39be77b069c08aa553f7 100644
--- a/buildstream/plugins/elements/compose.py
+++ b/buildstream/plugins/elements/compose.py
@@ -41,7 +41,7 @@ from buildstream import Element, Scope
 class ComposeElement(Element):
     # pylint: disable=attribute-defined-outside-init
 
-    # The compose element's output is it's dependencies, so
+    # The compose element's output is its dependencies, so
     # we must rebuild if the dependencies change even when
     # not in strict build plans.
     #
diff --git a/buildstream/plugins/elements/distutils.yaml b/buildstream/plugins/elements/distutils.yaml
index 7cb6f3a8dccd4750ecaedce9480b285ea4cfa6a6..cec7da6e9699ea5a353c56069d21977aa2abedb8 100644
--- a/buildstream/plugins/elements/distutils.yaml
+++ b/buildstream/plugins/elements/distutils.yaml
@@ -8,7 +8,7 @@ variables:
 
   python-build: |
 
-    %{python} setup.py build
+    %{python} %{conf-root}/setup.py build
 
   install-args: |
 
@@ -17,7 +17,7 @@ variables:
 
   python-install: |
 
-    %{python} setup.py install %{install-args}
+    %{python} %{conf-root}/setup.py install %{install-args}
 
 
 config:
diff --git a/buildstream/plugins/elements/filter.py b/buildstream/plugins/elements/filter.py
index 22fddd14f5015b93778551aa1dd145662a8fb441..672325304ee9fe2a65563b82acbfd57a5a24f0eb 100644
--- a/buildstream/plugins/elements/filter.py
+++ b/buildstream/plugins/elements/filter.py
@@ -47,7 +47,7 @@ from buildstream import Element, ElementError, Scope
 class FilterElement(Element):
     # pylint: disable=attribute-defined-outside-init
 
-    # The filter element's output is it's dependencies, so
+    # The filter element's output is its dependencies, so
     # we must rebuild if the dependencies change even when
     # not in strict build plans.
     BST_STRICT_REBUILD = True
diff --git a/buildstream/plugins/elements/junction.py b/buildstream/plugins/elements/junction.py
index ee5ed24d5b9e969d54fd33a04d287ebb44e9704c..7f9817359f55d499486aea504d68e4e3007d3ce8 100644
--- a/buildstream/plugins/elements/junction.py
+++ b/buildstream/plugins/elements/junction.py
@@ -124,7 +124,7 @@ the user to resolve possibly conflicting nested junctions by creating a junction
 with the same name in the top-level project, which then takes precedence.
 """
 
-from collections import Mapping
+from collections.abc import Mapping
 from buildstream import Element
 from buildstream._pipeline import PipelineError
 
diff --git a/buildstream/plugins/elements/make.py b/buildstream/plugins/elements/make.py
index 1f37cb412ec864ca04cd720af997176ff80fd422..6c500f3f91ef808203f68f2b03bf26ee6a35751c 100644
--- a/buildstream/plugins/elements/make.py
+++ b/buildstream/plugins/elements/make.py
@@ -38,7 +38,8 @@ from buildstream import BuildElement
 
 # Element implementation for the 'make' kind.
 class MakeElement(BuildElement):
-    pass
+    # Supports virtual directories (required for remote execution)
+    BST_VIRTUAL_DIRECTORY = True
 
 
 # Plugin entry point
diff --git a/buildstream/plugins/elements/make.yaml b/buildstream/plugins/elements/make.yaml
index 1438bb52b3908be5e7c44a5c79bd42bb96851a94..83e5c658f2f94c1243ccd78efff1a211c5baf962 100644
--- a/buildstream/plugins/elements/make.yaml
+++ b/buildstream/plugins/elements/make.yaml
@@ -36,7 +36,7 @@ environment:
   V: 1
 
 # And dont consider MAKEFLAGS or V as something which may
-# effect build output.
+# affect build output.
 environment-nocache:
 - MAKEFLAGS
 - V
diff --git a/buildstream/plugins/elements/manual.yaml b/buildstream/plugins/elements/manual.yaml
index 32ebf2be72a82d67fdb153075121565bec4fdb1c..cba5608cc8c05bf0a11e2fbfe2fc0175e5704d4e 100644
--- a/buildstream/plugins/elements/manual.yaml
+++ b/buildstream/plugins/elements/manual.yaml
@@ -35,7 +35,7 @@ environment:
   V: 1
 
 # And dont consider MAKEFLAGS or V as something which may
-# effect build output.
+# affect build output.
 environment-nocache:
 - MAKEFLAGS
 - V
diff --git a/buildstream/plugins/elements/meson.py b/buildstream/plugins/elements/meson.py
index 228e90ad1f9e6c111d7f81f1d0238ca9ef1fde1c..9e0edf19eea44a137ea7670d25b0e3d55423e73e 100644
--- a/buildstream/plugins/elements/meson.py
+++ b/buildstream/plugins/elements/meson.py
@@ -53,7 +53,8 @@ from buildstream import BuildElement
 
 # Element implementation for the 'meson' kind.
 class MesonElement(BuildElement):
-    pass
+    # Supports virtual directories (required for remote execution)
+    BST_VIRTUAL_DIRECTORY = True
 
 
 # Plugin entry point
diff --git a/buildstream/plugins/elements/meson.yaml b/buildstream/plugins/elements/meson.yaml
index 7af9a76e465af1b1b3cc57ec06c5f69d314d988b..2172cb34c3f97c95d5b4d56965aedcd497db2e5d 100644
--- a/buildstream/plugins/elements/meson.yaml
+++ b/buildstream/plugins/elements/meson.yaml
@@ -28,7 +28,7 @@ variables:
     --mandir=%{mandir} \
     --infodir=%{infodir} %{meson-extra} %{meson-global} %{meson-local}
 
-  meson: meson %{build-dir} %{meson-args}
+  meson: meson %{conf-root} %{build-dir} %{meson-args}
 
   ninja: |
     ninja -j ${NINJAJOBS} -C %{build-dir}
@@ -74,6 +74,6 @@ environment:
     %{max-jobs}
 
 # And dont consider NINJAJOBS as something which may
-# effect build output.
+# affect build output.
 environment-nocache:
 - NINJAJOBS
diff --git a/buildstream/plugins/elements/pip.yaml b/buildstream/plugins/elements/pip.yaml
index 19a226e00e34509ac5b77036a66252648c9fd907..b2b3d38572aebdd59b2e4373a7b36ee65b0818f5 100644
--- a/buildstream/plugins/elements/pip.yaml
+++ b/buildstream/plugins/elements/pip.yaml
@@ -14,7 +14,7 @@ config:
   #
   install-commands:
   - |
-    %{pip} install --no-deps --root=%{install-root} --prefix=%{prefix} .
+    %{pip} install --no-deps --root=%{install-root} --prefix=%{prefix} %{conf-root} 
 
   # Commands for stripping debugging information out of
   # installed binaries
diff --git a/buildstream/plugins/elements/qmake.py b/buildstream/plugins/elements/qmake.py
index 7896692a63ac7c8b20888e643507fb9a68656c4e..9f5bc4018ed642db7e26563f5029346c99990f81 100644
--- a/buildstream/plugins/elements/qmake.py
+++ b/buildstream/plugins/elements/qmake.py
@@ -33,7 +33,8 @@ from buildstream import BuildElement
 
 # Element implementation for the 'qmake' kind.
 class QMakeElement(BuildElement):
-    pass
+    # Supports virtual directories (required for remote execution)
+    BST_VIRTUAL_DIRECTORY = True
 
 
 # Plugin entry point
diff --git a/buildstream/plugins/elements/qmake.yaml b/buildstream/plugins/elements/qmake.yaml
index e527d45b9c951c1dbb80e31d5b161f01534ce57d..4ac31932e829b3b1ec9bdf9ca5d2836f59250168 100644
--- a/buildstream/plugins/elements/qmake.yaml
+++ b/buildstream/plugins/elements/qmake.yaml
@@ -2,7 +2,7 @@
 
 variables:
 
-  qmake: qmake -makefile
+  qmake: qmake -makefile %{conf-root}
   make: make
   make-install: make -j1 INSTALL_ROOT="%{install-root}" install
 
@@ -44,7 +44,7 @@ environment:
   V: 1
 
 # And dont consider MAKEFLAGS or V as something which may
-# effect build output.
+# affect build output.
 environment-nocache:
 - MAKEFLAGS
 - V
diff --git a/buildstream/plugins/sources/_downloadablefilesource.py b/buildstream/plugins/sources/_downloadablefilesource.py
index b4c7582fce35a43f7d63aa557a6478f748c4718d..7d1fc07bff93e0fc4fc8cf6515f35c5f81c77b88 100644
--- a/buildstream/plugins/sources/_downloadablefilesource.py
+++ b/buildstream/plugins/sources/_downloadablefilesource.py
@@ -152,7 +152,9 @@ class DownloadableFileSource(Source):
             raise SourceError("{}: Error mirroring {}: {}"
                               .format(self, self.url, e), temporary=True) from e
 
-        except (urllib.error.URLError, urllib.error.ContentTooShortError, OSError) as e:
+        except (urllib.error.URLError, urllib.error.ContentTooShortError, OSError, ValueError) as e:
+            # Note that urllib.request.Request in the try block may throw a
+            # ValueError for unknown url types, so we handle it here.
             raise SourceError("{}: Error mirroring {}: {}"
                               .format(self, self.url, e), temporary=True) from e
 
diff --git a/buildstream/plugins/sources/deb.py b/buildstream/plugins/sources/deb.py
index 1cf8beb22658b1a0acb010eb8c25323d9a141bdb..e1d4b65d13b934d8d09f1d529051e1f406517246 100644
--- a/buildstream/plugins/sources/deb.py
+++ b/buildstream/plugins/sources/deb.py
@@ -44,13 +44,13 @@ deb - stage files from .deb packages
    # Specify the ref. It's a sha256sum of the file you download.
    ref: 6c9f6f68a131ec6381da82f2bff978083ed7f4f7991d931bfa767b7965ebc94b
 
-   # Specify the basedir to return only the specified dir and it's children
+   # Specify the basedir to return only the specified dir and its children
    base-dir: ''
 
 """
 
 import tarfile
-from contextlib import contextmanager, ExitStack
+from contextlib import contextmanager
 import arpy                                       # pylint: disable=import-error
 
 from .tar import TarSource
@@ -69,8 +69,7 @@ class DebSource(TarSource):
 
     @contextmanager
     def _get_tar(self):
-        with ExitStack() as context:
-            deb_file = context.enter_context(open(self._get_mirror_file(), 'rb'))
+        with open(self._get_mirror_file(), 'rb') as deb_file:
             arpy_archive = arpy.Archive(fileobj=deb_file)
             arpy_archive.read_all_headers()
             data_tar_arpy = [v for k, v in arpy_archive.archived_files.items() if b"data.tar" in k][0]
diff --git a/buildstream/plugins/sources/git.py b/buildstream/plugins/sources/git.py
index b3bc9cac7b3e8a658d3a3b0378827964b06439f6..dbed8789921a9066cb17adad9af78ea456dc865a 100644
--- a/buildstream/plugins/sources/git.py
+++ b/buildstream/plugins/sources/git.py
@@ -43,6 +43,12 @@ git - stage files from a git repository
    # will be used to update the 'ref' when refreshing the pipeline.
    track: master
 
+   # Optionally specify the ref format used for tracking.
+   # The default is 'sha1' for the raw commit hash.
+   # If you specify 'git-describe', the commit hash will be prefixed
+   # with the closest tag.
+   ref-format: sha1
+
    # Specify the commit ref, this must be specified in order to
    # checkout sources and build, but can be automatically updated
    # if the 'track' attribute was specified.
@@ -74,19 +80,23 @@ This plugin provides the following configurable warnings:
 
 - 'git:inconsistent-submodule' - A submodule was found to be missing from the underlying git repository.
 
+This plugin also utilises the following configurable core plugin warnings:
+
+- 'ref-not-in-track' - The provided ref was not found in the provided track in the element's git repository.
 """
 
 import os
 import errno
 import re
 import shutil
-from collections import Mapping
+from collections.abc import Mapping
 from io import StringIO
 
 from configparser import RawConfigParser
 
 from buildstream import Source, SourceError, Consistency, SourceFetcher
 from buildstream import utils
+from buildstream.plugin import CoreWarnings
 
 GIT_MODULES = '.gitmodules'
 
@@ -100,13 +110,14 @@ INCONSISTENT_SUBMODULE = "inconsistent-submodules"
 #
 class GitMirror(SourceFetcher):
 
-    def __init__(self, source, path, url, ref):
+    def __init__(self, source, path, url, ref, *, primary=False):
 
         super().__init__()
         self.source = source
         self.path = path
         self.url = url
         self.ref = ref
+        self.primary = primary
         self.mirror = os.path.join(source.get_mirror_directory(), utils.url_directory_name(url))
         self.mark_download_url(url)
 
@@ -124,7 +135,8 @@ class GitMirror(SourceFetcher):
             # system configured tmpdir is not on the same partition.
             #
             with self.source.tempdir() as tmpdir:
-                url = self.source.translate_url(self.url, alias_override=alias_override)
+                url = self.source.translate_url(self.url, alias_override=alias_override,
+                                                primary=self.primary)
                 self.source.call([self.source.host_git, 'clone', '--mirror', '-n', url, tmpdir],
                                  fail="Failed to clone git repository {}".format(url),
                                  fail_temporarily=True)
@@ -146,7 +158,9 @@ class GitMirror(SourceFetcher):
                                           .format(self.source, url, tmpdir, self.mirror, e)) from e
 
     def _fetch(self, alias_override=None):
-        url = self.source.translate_url(self.url, alias_override=alias_override)
+        url = self.source.translate_url(self.url,
+                                        alias_override=alias_override,
+                                        primary=self.primary)
 
         if alias_override:
             remote_name = utils.url_directory_name(alias_override)
@@ -170,10 +184,18 @@ class GitMirror(SourceFetcher):
                          cwd=self.mirror)
 
     def fetch(self, alias_override=None):
-        self.ensure(alias_override)
-        if not self.has_ref():
-            self._fetch(alias_override)
-        self.assert_ref()
+        # Resolve the URL for the message
+        resolved_url = self.source.translate_url(self.url,
+                                                 alias_override=alias_override,
+                                                 primary=self.primary)
+
+        with self.source.timed_activity("Fetching from {}"
+                                        .format(resolved_url),
+                                        silent_nested=True):
+            self.ensure(alias_override)
+            if not self.has_ref():
+                self._fetch(alias_override)
+            self.assert_ref()
 
     def has_ref(self):
         if not self.ref:
@@ -197,9 +219,20 @@ class GitMirror(SourceFetcher):
             [self.source.host_git, 'rev-parse', tracking],
             fail="Unable to find commit for specified branch name '{}'".format(tracking),
             cwd=self.mirror)
-        return output.rstrip('\n')
+        ref = output.rstrip('\n')
 
-    def stage(self, directory):
+        if self.source.ref_format == 'git-describe':
+            # Prefix the ref with the closest tag, if available,
+            # to make the ref human readable
+            exit_code, output = self.source.check_output(
+                [self.source.host_git, 'describe', '--tags', '--abbrev=40', '--long', ref],
+                cwd=self.mirror)
+            if exit_code == 0:
+                ref = output.rstrip('\n')
+
+        return ref
+
+    def stage(self, directory, track=None):
         fullpath = os.path.join(directory, self.path)
 
         # Using --shared here avoids copying the objects into the checkout, in any
@@ -213,10 +246,14 @@ class GitMirror(SourceFetcher):
                          fail="Failed to checkout git ref {}".format(self.ref),
                          cwd=fullpath)
 
+        # Check that the user specified ref exists in the track if provided & not already tracked
+        if track:
+            self.assert_ref_in_track(fullpath, track)
+
         # Remove .git dir
         shutil.rmtree(os.path.join(fullpath, ".git"))
 
-    def init_workspace(self, directory):
+    def init_workspace(self, directory, track=None):
         fullpath = os.path.join(directory, self.path)
         url = self.source.translate_url(self.url)
 
@@ -232,6 +269,10 @@ class GitMirror(SourceFetcher):
                          fail="Failed to checkout git ref {}".format(self.ref),
                          cwd=fullpath)
 
+        # Check that the user specified ref exists in the track if provided & not already tracked
+        if track:
+            self.assert_ref_in_track(fullpath, track)
+
     # List the submodules (path/url tuples) present at the given ref of this repo
     def submodule_list(self):
         modules = "{}:{}".format(self.ref, GIT_MODULES)
@@ -296,6 +337,28 @@ class GitMirror(SourceFetcher):
 
             return None
 
+    # Assert that ref exists in track, if track has been specified.
+    def assert_ref_in_track(self, fullpath, track):
+        _, branch = self.source.check_output([self.source.host_git, 'branch', '--list', track,
+                                              '--contains', self.ref],
+                                             cwd=fullpath,)
+        if branch:
+            return True
+        else:
+            _, tag = self.source.check_output([self.source.host_git, 'tag', '--list', track,
+                                               '--contains', self.ref],
+                                              cwd=fullpath,)
+            if tag:
+                return True
+
+        detail = "The ref provided for the element does not exist locally in the provided track branch / tag " + \
+                 "'{}'.\nYou may wish to track the element to update the ref from '{}' ".format(track, track) + \
+                 "with `bst track`,\nor examine the upstream at '{}' for the specific ref.".format(self.url)
+
+        self.source.warn("{}: expected ref '{}' was not found in given track '{}' for staged repository: '{}'\n"
+                         .format(self.source, self.ref, track, self.url),
+                         detail=detail, warning_token=CoreWarnings.REF_NOT_IN_TRACK)
+
 
 class GitSource(Source):
     # pylint: disable=attribute-defined-outside-init
@@ -303,13 +366,18 @@ class GitSource(Source):
     def configure(self, node):
         ref = self.node_get_member(node, str, 'ref', None)
 
-        config_keys = ['url', 'track', 'ref', 'submodules', 'checkout-submodules']
+        config_keys = ['url', 'track', 'ref', 'submodules', 'checkout-submodules', 'ref-format']
         self.node_validate(node, config_keys + Source.COMMON_CONFIG_KEYS)
 
         self.original_url = self.node_get_member(node, str, 'url')
-        self.mirror = GitMirror(self, '', self.original_url, ref)
+        self.mirror = GitMirror(self, '', self.original_url, ref, primary=True)
         self.tracking = self.node_get_member(node, str, 'track', None)
 
+        self.ref_format = self.node_get_member(node, str, 'ref-format', 'sha1')
+        if self.ref_format not in ['sha1', 'git-describe']:
+            provenance = self.node_provenance(node, member_name='ref-format')
+            raise SourceError("{}: Unexpected value for ref-format: {}".format(provenance, self.ref_format))
+
         # At this point we now know if the source has a ref and/or a track.
         # If it is missing both then we will be unable to track or build.
         if self.mirror.ref is None and self.tracking is None:
@@ -327,12 +395,18 @@ class GitSource(Source):
         for path, _ in self.node_items(modules):
             submodule = self.node_get_member(modules, Mapping, path)
             url = self.node_get_member(submodule, str, 'url', None)
+
+            # Make sure to mark all URLs that are specified in the configuration
+            if url:
+                self.mark_download_url(url, primary=False)
+
             self.submodule_overrides[path] = url
             if 'checkout' in submodule:
                 checkout = self.node_get_member(submodule, bool, 'checkout')
                 self.submodule_checkout_overrides[path] = checkout
 
         self.mark_download_url(self.original_url)
+        self.tracked = False
 
     def preflight(self):
         # Check if git is installed, get the binary at the same time
@@ -341,7 +415,7 @@ class GitSource(Source):
     def get_unique_key(self):
         # Here we want to encode the local name of the repository and
         # the ref, if the user changes the alias to fetch the same sources
-        # from another location, it should not effect the cache key.
+        # from another location, it should not affect the cache key.
         key = [self.original_url, self.mirror.ref]
 
         # Only modify the cache key with checkout_submodules if it's something
@@ -387,8 +461,10 @@ class GitSource(Source):
                                   detail=detail, reason="track-attempt-no-track")
             return None
 
+        # Resolve the URL for the message
+        resolved_url = self.translate_url(self.mirror.url)
         with self.timed_activity("Tracking {} from {}"
-                                 .format(self.tracking, self.mirror.url),
+                                 .format(self.tracking, resolved_url),
                                  silent_nested=True):
             self.mirror.ensure()
             self.mirror._fetch()
@@ -396,6 +472,8 @@ class GitSource(Source):
             # Update self.mirror.ref and node.ref from the self.tracking branch
             ret = self.mirror.latest_commit(self.tracking)
 
+        # Set tracked attribute, parameter for if self.mirror.assert_ref_in_track is needed
+        self.tracked = True
         return ret
 
     def init_workspace(self, directory):
@@ -403,7 +481,7 @@ class GitSource(Source):
         self.refresh_submodules()
 
         with self.timed_activity('Setting up workspace "{}"'.format(directory), silent_nested=True):
-            self.mirror.init_workspace(directory)
+            self.mirror.init_workspace(directory, track=(self.tracking if not self.tracked else None))
             for mirror in self.submodules:
                 mirror.init_workspace(directory)
 
@@ -419,7 +497,7 @@ class GitSource(Source):
         # Stage the main repo in the specified directory
         #
         with self.timed_activity("Staging {}".format(self.mirror.url), silent_nested=True):
-            self.mirror.stage(directory)
+            self.mirror.stage(directory, track=(self.tracking if not self.tracked else None))
             for mirror in self.submodules:
                 if mirror.path in self.submodule_checkout_overrides:
                     checkout = self.submodule_checkout_overrides[mirror.path]
diff --git a/buildstream/plugins/sources/local.py b/buildstream/plugins/sources/local.py
index 7c19e1f9015ebc6318bf0ed1c853bbd0299e5552..8b18f9b5948a1090bfa245df7ec349e24a14d010 100644
--- a/buildstream/plugins/sources/local.py
+++ b/buildstream/plugins/sources/local.py
@@ -133,7 +133,7 @@ def unique_key(filename):
     if os.path.isdir(filename):
         return "0"
     elif os.path.islink(filename):
-        # For a symbolic link, use the link target as it's unique identifier
+        # For a symbolic link, use the link target as its unique identifier
         return os.readlink(filename)
 
     return utils.sha256sum(filename)
diff --git a/buildstream/plugins/sources/ostree.py b/buildstream/plugins/sources/ostree.py
index 526a91aa03a3d755fada340740bcffe175af8b87..177bdcc4f89c32da44ba0c8f69f8a3f9a2fa71af 100644
--- a/buildstream/plugins/sources/ostree.py
+++ b/buildstream/plugins/sources/ostree.py
@@ -105,7 +105,7 @@ class OSTreeSource(Source):
         node['ref'] = self.ref = ref
 
     def track(self):
-        # If self.tracking is not specified its' not an error, just silently return
+        # If self.tracking is not specified it's not an error, just silently return
         if not self.tracking:
             return None
 
@@ -151,7 +151,7 @@ class OSTreeSource(Source):
             # The target directory is guaranteed to exist, here we must move the
             # content of out checkout into the existing target directory.
             #
-            # We may not be able to create the target directory as it's parent
+            # We may not be able to create the target directory as its parent
             # may be readonly, and the directory itself is often a mount point.
             #
             try:
diff --git a/buildstream/plugins/sources/tar.py b/buildstream/plugins/sources/tar.py
index e32cc3dc88a4583807baf044f7ef50bb4a0824f3..8bbc8fce511aa60efae4818cf4980021cea90059 100644
--- a/buildstream/plugins/sources/tar.py
+++ b/buildstream/plugins/sources/tar.py
@@ -57,7 +57,7 @@ tar - stage files from tar archives
 
 import os
 import tarfile
-from contextlib import contextmanager, ExitStack
+from contextlib import contextmanager
 from tempfile import TemporaryFile
 
 from buildstream import SourceError
@@ -88,8 +88,7 @@ class TarSource(DownloadableFileSource):
     def _run_lzip(self):
         assert self.host_lzip
         with TemporaryFile() as lzip_stdout:
-            with ExitStack() as context:
-                lzip_file = context.enter_context(open(self._get_mirror_file(), 'r'))
+            with open(self._get_mirror_file(), 'r') as lzip_file:
                 self.call([self.host_lzip, '-d'],
                           stdin=lzip_file,
                           stdout=lzip_stdout)
@@ -127,7 +126,7 @@ class TarSource(DownloadableFileSource):
         if not base_dir.endswith(os.sep):
             base_dir = base_dir + os.sep
 
-        l = len(base_dir)
+        L = len(base_dir)
         for member in tar.getmembers():
 
             # First, ensure that a member never starts with `./`
@@ -145,9 +144,9 @@ class TarSource(DownloadableFileSource):
                 #       base directory.
                 #
                 if member.type == tarfile.LNKTYPE:
-                    member.linkname = member.linkname[l:]
+                    member.linkname = member.linkname[L:]
 
-                member.path = member.path[l:]
+                member.path = member.path[L:]
                 yield member
 
     # We want to iterate over all paths of a tarball, but getmembers()
diff --git a/buildstream/plugins/sources/zip.py b/buildstream/plugins/sources/zip.py
index d3ce0f16dfbd10ade95172cb3f9bb31c957d3d17..0bbb2cd2781abb872d72d01e7021df8ed05dcd5e 100644
--- a/buildstream/plugins/sources/zip.py
+++ b/buildstream/plugins/sources/zip.py
@@ -121,13 +121,13 @@ class ZipSource(DownloadableFileSource):
         if not base_dir.endswith(os.sep):
             base_dir = base_dir + os.sep
 
-        l = len(base_dir)
+        L = len(base_dir)
         for member in archive.infolist():
             if member.filename == base_dir:
                 continue
 
             if member.filename.startswith(base_dir):
-                member.filename = member.filename[l:]
+                member.filename = member.filename[L:]
                 yield member
 
     # We want to iterate over all paths of an archive, but namelist()
diff --git a/buildstream/sandbox/__init__.py b/buildstream/sandbox/__init__.py
index 53e170fbdf24df617ea92ff071045568419c94a9..5999aba7a2e37bed9200dd5560825397e51c1e5c 100644
--- a/buildstream/sandbox/__init__.py
+++ b/buildstream/sandbox/__init__.py
@@ -18,5 +18,5 @@
 #        Tristan Maat <tristan.maat@codethink.co.uk>
 
 from .sandbox import Sandbox, SandboxFlags
-from ._sandboxchroot import SandboxChroot
-from ._sandboxbwrap import SandboxBwrap
+from ._sandboxremote import SandboxRemote
+from ._sandboxdummy import SandboxDummy
diff --git a/buildstream/sandbox/_mount.py b/buildstream/sandbox/_mount.py
index 49068fe92fc8799166133cb2369166591d3c454f..2dc3df2b5ec13d87207b969a39ca74d8f53ae658 100644
--- a/buildstream/sandbox/_mount.py
+++ b/buildstream/sandbox/_mount.py
@@ -30,7 +30,7 @@ from .._fuse import SafeHardlinks
 # Helper data object representing a single mount point in the mount map
 #
 class Mount():
-    def __init__(self, sandbox, mount_point, safe_hardlinks):
+    def __init__(self, sandbox, mount_point, safe_hardlinks, fuse_mount_options={}):
         scratch_directory = sandbox._get_scratch_directory()
         # Getting _get_underlying_directory() here is acceptable as
         # we're part of the sandbox code. This will fail if our
@@ -39,8 +39,9 @@ class Mount():
 
         self.mount_point = mount_point
         self.safe_hardlinks = safe_hardlinks
+        self._fuse_mount_options = fuse_mount_options
 
-        # FIXME: When the criteria for mounting something and it's parent
+        # FIXME: When the criteria for mounting something and its parent
         #        mount is identical, then there is no need to mount an additional
         #        fuse layer (i.e. if the root is read-write and there is a directory
         #        marked for staged artifacts directly within the rootfs, they can
@@ -82,7 +83,7 @@ class Mount():
     @contextmanager
     def mounted(self, sandbox):
         if self.safe_hardlinks:
-            mount = SafeHardlinks(self.mount_origin, self.mount_tempdir)
+            mount = SafeHardlinks(self.mount_origin, self.mount_tempdir, self._fuse_mount_options)
             with mount.mounted(self.mount_source):
                 yield
         else:
@@ -100,12 +101,12 @@ class Mount():
 #
 class MountMap():
 
-    def __init__(self, sandbox, root_readonly):
+    def __init__(self, sandbox, root_readonly, fuse_mount_options={}):
         # We will be doing the mounts in the order in which they were declared.
         self.mounts = OrderedDict()
 
         # We want safe hardlinks on rootfs whenever root is not readonly
-        self.mounts['/'] = Mount(sandbox, '/', not root_readonly)
+        self.mounts['/'] = Mount(sandbox, '/', not root_readonly, fuse_mount_options)
 
         for mark in sandbox._get_marked_directories():
             directory = mark['directory']
@@ -113,7 +114,7 @@ class MountMap():
 
             # We want safe hardlinks for any non-root directory where
             # artifacts will be staged to
-            self.mounts[directory] = Mount(sandbox, directory, artifact)
+            self.mounts[directory] = Mount(sandbox, directory, artifact, fuse_mount_options)
 
     # get_mount_source()
     #
diff --git a/buildstream/sandbox/_sandboxbwrap.py b/buildstream/sandbox/_sandboxbwrap.py
index ea7254c1b7ea604a073cf9aa4f6d55cfa4f48838..8c406e53ed4b2c08329fba82e5bffbff70fd07f2 100644
--- a/buildstream/sandbox/_sandboxbwrap.py
+++ b/buildstream/sandbox/_sandboxbwrap.py
@@ -63,11 +63,8 @@ class SandboxBwrap(Sandbox):
         # Fallback to the sandbox default settings for
         # the cwd and env.
         #
-        if cwd is None:
-            cwd = self._get_work_directory()
-
-        if env is None:
-            env = self._get_environment()
+        cwd = self._get_work_directory(cwd=cwd)
+        env = self._get_environment(cwd=cwd, env=env)
 
         if not self._has_command(command[0], env):
             raise SandboxError("Staged artifacts do not provide command "
@@ -83,9 +80,6 @@ class SandboxBwrap(Sandbox):
         mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY)
         root_mount_source = mount_map.get_mount_source('/')
 
-        if cwd is None:
-            cwd = '/'
-
         # Grab the full path of the bwrap binary
         bwrap_command = [utils.get_host_tool('bwrap')]
 
@@ -114,9 +108,6 @@ class SandboxBwrap(Sandbox):
             bwrap_command += ['--unshare-uts', '--hostname', 'buildstream']
             bwrap_command += ['--unshare-ipc']
 
-        if cwd is not None:
-            bwrap_command += ['--chdir', cwd]
-
         # Give it a proc and tmpfs
         bwrap_command += [
             '--proc', '/proc',
@@ -157,6 +148,10 @@ class SandboxBwrap(Sandbox):
         if flags & SandboxFlags.ROOT_READ_ONLY:
             bwrap_command += ["--remount-ro", "/"]
 
+        if cwd is not None:
+            bwrap_command += ['--dir', cwd]
+            bwrap_command += ['--chdir', cwd]
+
         # Set UID and GUI
         if self.user_ns_available:
             bwrap_command += ['--unshare-user']
@@ -185,11 +180,6 @@ class SandboxBwrap(Sandbox):
         with ExitStack() as stack:
             stack.enter_context(mount_map.mounted(self))
 
-            # Ensure the cwd exists
-            if cwd is not None:
-                workdir = os.path.join(root_mount_source, cwd.lstrip(os.sep))
-                os.makedirs(workdir, exist_ok=True)
-
             # If we're interactive, we want to inherit our stdin,
             # otherwise redirect to /dev/null, ensuring process
             # disconnected from terminal.
diff --git a/buildstream/sandbox/_sandboxchroot.py b/buildstream/sandbox/_sandboxchroot.py
index a902f22ad385d34d3c912e99a7532e4b249694db..f19052b236590e3a5b99af697e122a74af4c0c59 100644
--- a/buildstream/sandbox/_sandboxchroot.py
+++ b/buildstream/sandbox/_sandboxchroot.py
@@ -35,6 +35,9 @@ from . import Sandbox, SandboxFlags
 
 
 class SandboxChroot(Sandbox):
+
+    _FUSE_MOUNT_OPTIONS = {'dev': True}
+
     def __init__(self, *args, **kwargs):
         super().__init__(*args, **kwargs)
 
@@ -48,15 +51,11 @@ class SandboxChroot(Sandbox):
 
     def run(self, command, flags, *, cwd=None, env=None):
 
-        # Default settings
-        if cwd is None:
-            cwd = self._get_work_directory()
-
-        if cwd is None:
-            cwd = '/'
-
-        if env is None:
-            env = self._get_environment()
+        # Fallback to the sandbox default settings for
+        # the cwd and env.
+        #
+        cwd = self._get_work_directory(cwd=cwd)
+        env = self._get_environment(cwd=cwd, env=env)
 
         if not self._has_command(command[0], env):
             raise SandboxError("Staged artifacts do not provide command "
@@ -71,7 +70,8 @@ class SandboxChroot(Sandbox):
 
         # Create the mount map, this will tell us where
         # each mount point needs to be mounted from and to
-        self.mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY)
+        self.mount_map = MountMap(self, flags & SandboxFlags.ROOT_READ_ONLY,
+                                  self._FUSE_MOUNT_OPTIONS)
         root_mount_source = self.mount_map.get_mount_source('/')
 
         # Create a sysroot and run the command inside it
@@ -100,9 +100,8 @@ class SandboxChroot(Sandbox):
 
             # Ensure the cwd exists
             if cwd is not None:
-                workdir = os.path.join(root_mount_source, cwd.lstrip(os.sep))
+                workdir = os.path.join(rootfs, cwd.lstrip(os.sep))
                 os.makedirs(workdir, exist_ok=True)
-
             status = self.chroot(rootfs, command, stdin, stdout,
                                  stderr, cwd, env, flags)
 
diff --git a/buildstream/sandbox/_sandboxdummy.py b/buildstream/sandbox/_sandboxdummy.py
new file mode 100644
index 0000000000000000000000000000000000000000..29ff4bf69ce259d58a2cfe7cd1f7f1236b2fe260
--- /dev/null
+++ b/buildstream/sandbox/_sandboxdummy.py
@@ -0,0 +1,41 @@
+#
+#  Copyright (C) 2017 Codethink Limited
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+
+from .._exceptions import SandboxError
+from . import Sandbox
+
+
+class SandboxDummy(Sandbox):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self._reason = kwargs.get("dummy_reason", "no reason given")
+
+    def run(self, command, flags, *, cwd=None, env=None):
+
+        # Fallback to the sandbox default settings for
+        # the cwd and env.
+        #
+        cwd = self._get_work_directory(cwd=cwd)
+        env = self._get_environment(cwd=cwd, env=env)
+
+        if not self._has_command(command[0], env):
+            raise SandboxError("Staged artifacts do not provide command "
+                               "'{}'".format(command[0]),
+                               reason='missing-command')
+
+        raise SandboxError("This platform does not support local builds: {}".format(self._reason))
diff --git a/buildstream/sandbox/_sandboxremote.py b/buildstream/sandbox/_sandboxremote.py
new file mode 100644
index 0000000000000000000000000000000000000000..f522cc772a70f055944469e13ae54c3f6981c2d9
--- /dev/null
+++ b/buildstream/sandbox/_sandboxremote.py
@@ -0,0 +1,253 @@
+#!/usr/bin/env python3
+#
+#  Copyright (C) 2018 Bloomberg LP
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Jim MacArthur <jim.macarthur@codethink.co.uk>
+
+import os
+from urllib.parse import urlparse
+
+import grpc
+
+from . import Sandbox
+from ..storage._filebaseddirectory import FileBasedDirectory
+from ..storage._casbaseddirectory import CasBasedDirectory
+from .._protos.build.bazel.remote.execution.v2 import remote_execution_pb2, remote_execution_pb2_grpc
+from .._protos.google.rpc import code_pb2
+
+
+class SandboxError(Exception):
+    pass
+
+
+# SandboxRemote()
+#
+# This isn't really a sandbox, it's a stub which sends all the sources and build
+# commands to a remote server and retrieves the results from it.
+#
+class SandboxRemote(Sandbox):
+
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+
+        url = urlparse(kwargs['server_url'])
+        if not url.scheme or not url.hostname or not url.port:
+            raise SandboxError("Configured remote URL '{}' does not match the expected layout. "
+                               .format(kwargs['server_url']) +
+                               "It should be of the form <protocol>://<domain name>:<port>.")
+        elif url.scheme != 'http':
+            raise SandboxError("Configured remote '{}' uses an unsupported protocol. "
+                               "Only plain HTTP is currenlty supported (no HTTPS).")
+
+        self.server_url = '{}:{}'.format(url.hostname, url.port)
+
+    def run_remote_command(self, command, input_root_digest, working_directory, environment):
+        # Sends an execution request to the remote execution server.
+        #
+        # This function blocks until it gets a response from the server.
+        #
+        environment_variables = [remote_execution_pb2.Command.
+                                 EnvironmentVariable(name=k, value=v)
+                                 for (k, v) in environment.items()]
+
+        # Create and send the Command object.
+        remote_command = remote_execution_pb2.Command(arguments=command,
+                                                      working_directory=working_directory,
+                                                      environment_variables=environment_variables,
+                                                      output_files=[],
+                                                      output_directories=[self._output_directory],
+                                                      platform=None)
+        context = self._get_context()
+        cascache = context.artifactcache
+        # Upload the Command message to the remote CAS server
+        command_digest = cascache.push_message(self._get_project(), remote_command)
+        if not command_digest or not cascache.verify_digest_pushed(self._get_project(), command_digest):
+            raise SandboxError("Failed pushing build command to remote CAS.")
+
+        # Create and send the action.
+        action = remote_execution_pb2.Action(command_digest=command_digest,
+                                             input_root_digest=input_root_digest,
+                                             timeout=None,
+                                             do_not_cache=False)
+
+        # Upload the Action message to the remote CAS server
+        action_digest = cascache.push_message(self._get_project(), action)
+        if not action_digest or not cascache.verify_digest_pushed(self._get_project(), action_digest):
+            raise SandboxError("Failed pushing build action to remote CAS.")
+
+        # Next, try to create a communication channel to the BuildGrid server.
+        channel = grpc.insecure_channel(self.server_url)
+        stub = remote_execution_pb2_grpc.ExecutionStub(channel)
+        request = remote_execution_pb2.ExecuteRequest(action_digest=action_digest,
+                                                      skip_cache_lookup=False)
+
+        def __run_remote_command(stub, execute_request=None, running_operation=None):
+            try:
+                last_operation = None
+                if execute_request is not None:
+                    operation_iterator = stub.Execute(execute_request)
+                else:
+                    request = remote_execution_pb2.WaitExecutionRequest(name=running_operation.name)
+                    operation_iterator = stub.WaitExecution(request)
+
+                for operation in operation_iterator:
+                    if operation.done:
+                        return operation
+                    else:
+                        last_operation = operation
+            except grpc.RpcError as e:
+                status_code = e.code()
+                if status_code == grpc.StatusCode.UNAVAILABLE:
+                    raise SandboxError("Failed contacting remote execution server at {}."
+                                       .format(self.server_url))
+
+                elif status_code in (grpc.StatusCode.INVALID_ARGUMENT,
+                                     grpc.StatusCode.FAILED_PRECONDITION,
+                                     grpc.StatusCode.RESOURCE_EXHAUSTED,
+                                     grpc.StatusCode.INTERNAL,
+                                     grpc.StatusCode.DEADLINE_EXCEEDED):
+                    raise SandboxError("{} ({}).".format(e.details(), status_code.name))
+
+                elif running_operation and status_code == grpc.StatusCode.UNIMPLEMENTED:
+                    raise SandboxError("Failed trying to recover from connection loss: "
+                                       "server does not support operation status polling recovery.")
+
+            return last_operation
+
+        operation = None
+        with self._get_context().timed_activity("Waiting for the remote build to complete"):
+            operation = __run_remote_command(stub, execute_request=request)
+            if operation is None:
+                return None
+            elif operation.done:
+                return operation
+
+            while operation is not None and not operation.done:
+                operation = __run_remote_command(stub, running_operation=operation)
+
+        return operation
+
+    def process_job_output(self, output_directories, output_files):
+        # Reads the remote execution server response to an execution request.
+        #
+        # output_directories is an array of OutputDirectory objects.
+        # output_files is an array of OutputFile objects.
+        #
+        # We only specify one output_directory, so it's an error
+        # for there to be any output files or more than one directory at the moment.
+        #
+        if output_files:
+            raise SandboxError("Output files were returned when we didn't request any.")
+        elif not output_directories:
+            error_text = "No output directory was returned from the build server."
+            raise SandboxError(error_text)
+        elif len(output_directories) > 1:
+            error_text = "More than one output directory was returned from the build server: {}."
+            raise SandboxError(error_text.format(output_directories))
+
+        tree_digest = output_directories[0].tree_digest
+        if tree_digest is None or not tree_digest.hash:
+            raise SandboxError("Output directory structure had no digest attached.")
+
+        context = self._get_context()
+        cascache = context.artifactcache
+        # Now do a pull to ensure we have the necessary parts.
+        dir_digest = cascache.pull_tree(self._get_project(), tree_digest)
+        if dir_digest is None or not dir_digest.hash or not dir_digest.size_bytes:
+            raise SandboxError("Output directory structure pulling from remote failed.")
+
+        path_components = os.path.split(self._output_directory)
+
+        # Now what we have is a digest for the output. Once we return, the calling process will
+        # attempt to descend into our directory and find that directory, so we need to overwrite
+        # that.
+
+        if not path_components:
+            # The artifact wants the whole directory; we could just return the returned hash in its
+            # place, but we don't have a means to do that yet.
+            raise SandboxError("Unimplemented: Output directory is empty or equal to the sandbox root.")
+
+        # At the moment, we will get the whole directory back in the first directory argument and we need
+        # to replace the sandbox's virtual directory with that. Creating a new virtual directory object
+        # from another hash will be interesting, though...
+
+        new_dir = CasBasedDirectory(self._get_context(), ref=dir_digest)
+        self._set_virtual_directory(new_dir)
+
+    def run(self, command, flags, *, cwd=None, env=None):
+        # Upload sources
+        upload_vdir = self.get_virtual_directory()
+
+        if isinstance(upload_vdir, FileBasedDirectory):
+            # Make a new temporary directory to put source in
+            upload_vdir = CasBasedDirectory(self._get_context(), ref=None)
+            upload_vdir.import_files(self.get_virtual_directory()._get_underlying_directory())
+
+        upload_vdir.recalculate_hash()
+
+        context = self._get_context()
+        cascache = context.artifactcache
+        # Now, push that key (without necessarily needing a ref) to the remote.
+        cascache.push_directory(self._get_project(), upload_vdir)
+        if not cascache.verify_digest_pushed(self._get_project(), upload_vdir.ref):
+            raise SandboxError("Failed to verify that source has been pushed to the remote artifact cache.")
+
+        # Fallback to the sandbox default settings for
+        # the cwd and env.
+        #
+        cwd = self._get_work_directory(cwd=cwd)
+        env = self._get_environment(cwd=cwd, env=env)
+
+        # We want command args as a list of strings
+        if isinstance(command, str):
+            command = [command]
+
+        # Now transmit the command to execute
+        operation = self.run_remote_command(command, upload_vdir.ref, cwd, env)
+
+        if operation is None:
+            # Failure of remote execution, usually due to an error in BuildStream
+            raise SandboxError("No response returned from server")
+
+        assert not operation.HasField('error') and operation.HasField('response')
+
+        execution_response = remote_execution_pb2.ExecuteResponse()
+        # The response is expected to be an ExecutionResponse message
+        assert operation.response.Is(execution_response.DESCRIPTOR)
+
+        operation.response.Unpack(execution_response)
+
+        if execution_response.status.code != code_pb2.OK:
+            # An unexpected error during execution: the remote execution
+            # system failed at processing the execution request.
+            if execution_response.status.message:
+                raise SandboxError(execution_response.status.message)
+            else:
+                raise SandboxError("Remote server failed at executing the build request.")
+
+        action_result = execution_response.result
+
+        if action_result.exit_code != 0:
+            # A normal error during the build: the remote execution system
+            # has worked correctly but the command failed.
+            # action_result.stdout and action_result.stderr also contains
+            # build command outputs which we ignore at the moment.
+            return action_result.exit_code
+
+        self.process_job_output(action_result.output_directories, action_result.output_files)
+
+        return 0
diff --git a/buildstream/sandbox/sandbox.py b/buildstream/sandbox/sandbox.py
index 87a2fb9c9bf7fa05368b3710f431c678e99704be..83714efdd10c9003e72678f5d35e58f43652b5fc 100644
--- a/buildstream/sandbox/sandbox.py
+++ b/buildstream/sandbox/sandbox.py
@@ -99,15 +99,21 @@ class Sandbox():
         self.__stdout = kwargs['stdout']
         self.__stderr = kwargs['stderr']
 
-        # Setup the directories. Root should be available to subclasses, hence
-        # being single-underscore. The others are private to this class.
+        # Setup the directories. Root and output_directory should be
+        # available to subclasses, hence being single-underscore. The
+        # others are private to this class.
         self._root = os.path.join(directory, 'root')
+        self._output_directory = None
         self.__directory = directory
         self.__scratch = os.path.join(self.__directory, 'scratch')
         for directory_ in [self._root, self.__scratch]:
             os.makedirs(directory_, exist_ok=True)
         self._vdir = None
 
+        # This is set if anyone requests access to the underlying
+        # directory via get_directory.
+        self._never_cache_vdirs = False
+
     def get_directory(self):
         """Fetches the sandbox root directory
 
@@ -120,35 +126,45 @@ class Sandbox():
 
         """
         if self.__allow_real_directory:
+            self._never_cache_vdirs = True
             return self._root
         else:
             raise BstError("You can't use get_directory")
 
     def get_virtual_directory(self):
-        """Fetches the sandbox root directory
+        """Fetches the sandbox root directory as a virtual Directory.
 
         The root directory is where artifacts for the base
-        runtime environment should be staged. Only works if
-        BST_VIRTUAL_DIRECTORY is not set.
+        runtime environment should be staged.
+
+        Use caution if you use get_directory and
+        get_virtual_directory.  If you alter the contents of the
+        directory returned by get_directory, all objects returned by
+        get_virtual_directory or derived from them are invalid and you
+        must call get_virtual_directory again to get a new copy.
 
         Returns:
-           (str): The sandbox root directory
+           (Directory): The sandbox root directory
 
         """
-        if not self._vdir:
-            # BST_CAS_DIRECTORIES is a deliberately hidden environment variable which
-            # can be used to switch on CAS-based directories for testing.
+        if self._vdir is None or self._never_cache_vdirs:
             if 'BST_CAS_DIRECTORIES' in os.environ:
                 self._vdir = CasBasedDirectory(self.__context, ref=None)
             else:
                 self._vdir = FileBasedDirectory(self._root)
         return self._vdir
 
+    def _set_virtual_directory(self, virtual_directory):
+        """ Sets virtual directory. Useful after remote execution
+        has rewritten the working directory.
+        """
+        self._vdir = virtual_directory
+
     def set_environment(self, environment):
         """Sets the environment variables for the sandbox
 
         Args:
-           directory (dict): The environment variables to use in the sandbox
+           environment (dict): The environment variables to use in the sandbox
         """
         self.__env = environment
 
@@ -160,6 +176,15 @@ class Sandbox():
         """
         self.__cwd = directory
 
+    def set_output_directory(self, directory):
+        """Sets the output directory - the directory which is preserved
+        as an artifact after assembly.
+
+        Args:
+           directory (str): An absolute path within the sandbox
+        """
+        self._output_directory = directory
+
     def mark_directory(self, directory, *, artifact=False):
         """Marks a sandbox directory and ensures it will exist
 
@@ -198,7 +223,9 @@ class Sandbox():
         .. note::
 
            The optional *cwd* argument will default to the value set with
-           :func:`~buildstream.sandbox.Sandbox.set_work_directory`
+           :func:`~buildstream.sandbox.Sandbox.set_work_directory` and this
+           function must make sure the directory will be created if it does
+           not exist yet, even if a workspace is being used.
         """
         raise ImplError("Sandbox of type '{}' does not implement run()"
                         .format(type(self).__name__))
@@ -262,20 +289,38 @@ class Sandbox():
     # Fetches the environment variables for running commands
     # in the sandbox.
     #
+    # Args:
+    #    cwd (str): The working directory the command has been requested to run in, if any.
+    #    env (str): The environment the command has been requested to run in, if any.
+    #
     # Returns:
     #    (str): The sandbox work directory
-    def _get_environment(self):
-        return self.__env
+    def _get_environment(self, *, cwd=None, env=None):
+        cwd = self._get_work_directory(cwd=cwd)
+        if env is None:
+            env = self.__env
+
+        # Naive getcwd implementations can break when bind-mounts to different
+        # paths on the same filesystem are present. Letting the command know
+        # what directory it is in makes it unnecessary to call the faulty
+        # getcwd.
+        env = dict(env)
+        env['PWD'] = cwd
+
+        return env
 
     # _get_work_directory()
     #
     # Fetches the working directory for running commands
     # in the sandbox.
     #
+    # Args:
+    #    cwd (str): The working directory the command has been requested to run in, if any.
+    #
     # Returns:
     #    (str): The sandbox work directory
-    def _get_work_directory(self):
-        return self.__cwd
+    def _get_work_directory(self, *, cwd=None):
+        return cwd or self.__cwd or '/'
 
     # _get_scratch_directory()
     #
diff --git a/buildstream/scriptelement.py b/buildstream/scriptelement.py
index 21240205815d940fdc309470b368bb87539bac51..3a5d914d07245d5d370eedb7255a738a441961bb 100644
--- a/buildstream/scriptelement.py
+++ b/buildstream/scriptelement.py
@@ -45,7 +45,7 @@ class ScriptElement(Element):
     __commands = None
     __layout = []
 
-    # The compose element's output is it's dependencies, so
+    # The compose element's output is its dependencies, so
     # we must rebuild if the dependencies change even when
     # not in strict build plans.
     #
@@ -135,7 +135,7 @@ class ScriptElement(Element):
            be made available at the specified location.
         """
         #
-        # Even if this is an empty list by default, make sure that it's
+        # Even if this is an empty list by default, make sure that its
         # instance data instead of appending stuff directly onto class data.
         #
         if not self.__layout:
diff --git a/buildstream/source.py b/buildstream/source.py
index 6d5640532f24bb18a0a85790c8c6123cd4c22e5c..e42bad1a57bec59e93efd844b8401ec8d6b6a942 100644
--- a/buildstream/source.py
+++ b/buildstream/source.py
@@ -20,6 +20,19 @@
 Source - Base source class
 ==========================
 
+Built-in functionality
+----------------------
+
+The Source base class provides built in functionality that may be overridden
+by individual plugins.
+
+* Directory
+
+  The ``directory`` variable can be set for all sources of a type in project.conf
+  or per source within a element.
+
+  This sets the location within the build root that the content of the source
+  will be loaded in to. If the location does not exist, it will be created.
 
 .. _core_source_abstract_methods:
 
@@ -28,6 +41,18 @@ Abstract Methods
 For loading and configuration purposes, Sources must implement the
 :ref:`Plugin base class abstract methods <core_plugin_abstract_methods>`.
 
+.. attention::
+
+   In order to ensure that all configuration data is processed at
+   load time, it is important that all URLs have been processed during
+   :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`.
+
+   Source implementations *must* either call
+   :func:`Source.translate_url() <buildstream.source.Source.translate_url>` or
+   :func:`Source.mark_download_url() <buildstream.source.Source.mark_download_url>`
+   for every URL that has been specified in the configuration during
+   :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
+
 Sources expose the following abstract methods. Unless explicitly mentioned,
 these methods are mandatory to implement.
 
@@ -130,38 +155,15 @@ Class Reference
 """
 
 import os
-from collections import Mapping
+from collections.abc import Mapping
 from contextlib import contextmanager
 
-from . import Plugin
+from . import Plugin, Consistency
 from . import _yaml, utils
 from ._exceptions import BstError, ImplError, ErrorDomain
 from ._projectrefs import ProjectRefStorage
 
 
-class Consistency():
-    INCONSISTENT = 0
-    """Inconsistent
-
-    Inconsistent sources have no explicit reference set. They cannot
-    produce a cache key, be fetched or staged. They can only be tracked.
-    """
-
-    RESOLVED = 1
-    """Resolved
-
-    Resolved sources have a reference and can produce a cache key and
-    be fetched, however they cannot be staged.
-    """
-
-    CACHED = 2
-    """Cached
-
-    Cached sources have a reference which is present in the local
-    source cache. Only cached sources can be staged.
-    """
-
-
 class SourceError(BstError):
     """This exception should be raised by :class:`.Source` implementations
     to report errors to the user.
@@ -184,6 +186,13 @@ class SourceFetcher():
     fetching and substituting aliases.
 
     *Since: 1.2*
+
+    .. attention::
+
+       When implementing a SourceFetcher, remember to call
+       :func:`Source.mark_download_url() <buildstream.source.Source.mark_download_url>`
+       for every URL found in the configuration data at
+       :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>` time.
     """
     def __init__(self):
         self.__alias = None
@@ -206,7 +215,7 @@ class SourceFetcher():
         Implementors should raise :class:`.SourceError` if the there is some
         network error or if the source reference could not be matched.
         """
-        raise ImplError("Source fetcher '{}' does not implement fetch()".format(type(self)))
+        raise ImplError("SourceFetcher '{}' does not implement fetch()".format(type(self)))
 
     #############################################################
     #                       Public Methods                      #
@@ -219,10 +228,7 @@ class SourceFetcher():
         Args:
            url (str): The url used to download.
         """
-        # Not guaranteed to be a valid alias yet.
-        # Ensuring it's a valid alias currently happens in Project.get_alias_uris
-        alias, _ = url.split(utils._ALIAS_SEPARATOR, 1)
-        self.__alias = alias
+        self.__alias = _extract_alias(url)
 
     #############################################################
     #            Private Methods used in BuildStream            #
@@ -280,8 +286,11 @@ class Source(Plugin):
         self.__element_kind = meta.element_kind         # The kind of the element owning this source
         self.__directory = meta.directory               # Staging relative directory
         self.__consistency = Consistency.INCONSISTENT   # Cached consistency state
+
+        # The alias_override is only set on a re-instantiated Source
         self.__alias_override = alias_override          # Tuple of alias and its override to use instead
-        self.__expected_alias = None                    # A hacky way to store the first alias used
+        self.__expected_alias = None                    # The primary alias
+        self.__marked_urls = set()                      # Set of marked download URLs
 
         # FIXME: Reconstruct a MetaSource from a Source instead of storing it.
         self.__meta = meta                              # MetaSource stored so we can copy this source later.
@@ -292,7 +301,7 @@ class Source(Plugin):
         self.__config = self.__extract_config(meta)
         self.__first_pass = meta.first_pass
 
-        self.configure(self.__config)
+        self._configure(self.__config)
 
     COMMON_CONFIG_KEYS = ['kind', 'directory']
     """Common source config keys
@@ -354,10 +363,10 @@ class Source(Plugin):
         Args:
            ref (simple object): The internal source reference to set, or ``None``
            node (dict): The same dictionary which was previously passed
-                        to :func:`~buildstream.source.Source.configure`
+                        to :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>`
 
-        See :func:`~buildstream.source.Source.get_ref` for a discussion on
-        the *ref* parameter.
+        See :func:`Source.get_ref() <buildstream.source.Source.get_ref>`
+        for a discussion on the *ref* parameter.
 
         .. note::
 
@@ -387,8 +396,8 @@ class Source(Plugin):
         backend store allows one to query for a new ref from a symbolic
         tracking data without downloading then that is desirable.
 
-        See :func:`~buildstream.source.Source.get_ref` for a discussion on
-        the *ref* parameter.
+        See :func:`Source.get_ref() <buildstream.source.Source.get_ref>`
+        for a discussion on the *ref* parameter.
         """
         # Allow a non implementation
         return None
@@ -438,7 +447,7 @@ class Source(Plugin):
            :class:`.SourceError`
 
         Default implementation is to call
-        :func:`~buildstream.source.Source.stage`.
+        :func:`Source.stage() <buildstream.source.Source.stage>`.
 
         Implementors overriding this method should assume that *directory*
         already exists.
@@ -448,20 +457,6 @@ class Source(Plugin):
         """
         self.stage(directory)
 
-    def mark_download_url(self, url):
-        """Identifies the URL that this Source uses to download
-
-        This must be called during :func:`~buildstream.plugin.Plugin.configure` if
-        :func:`~buildstream.source.Source.translate_url` is not called.
-
-        Args:
-           url (str): The url used to download
-
-        *Since: 1.2*
-        """
-        alias, _ = url.split(utils._ALIAS_SEPARATOR, 1)
-        self.__expected_alias = alias
-
     def get_source_fetchers(self):
         """Get the objects that are used for fetching
 
@@ -470,8 +465,15 @@ class Source(Plugin):
         is recommended.
 
         Returns:
-           list: A list of SourceFetchers. If SourceFetchers are not supported,
-                 this will be an empty list.
+           iterable: The Source's SourceFetchers, if any.
+
+        .. note::
+
+           Implementors can implement this as a generator.
+
+           The :func:`SourceFetcher.fetch() <buildstream.source.SourceFetcher.fetch>`
+           method will be called on the returned fetchers one by one,
+           before consuming the next fetcher in the list.
 
         *Since: 1.2*
         """
@@ -494,17 +496,27 @@ class Source(Plugin):
         os.makedirs(directory, exist_ok=True)
         return directory
 
-    def translate_url(self, url, *, alias_override=None):
+    def translate_url(self, url, *, alias_override=None, primary=True):
         """Translates the given url which may be specified with an alias
         into a fully qualified url.
 
         Args:
-           url (str): A url, which may be using an alias
+           url (str): A URL, which may be using an alias
            alias_override (str): Optionally, an URI to override the alias with. (*Since: 1.2*)
+           primary (bool): Whether this is the primary URL for the source. (*Since: 1.2*)
 
         Returns:
-           str: The fully qualified url, with aliases resolved
+           str: The fully qualified URL, with aliases resolved
+        .. note::
+
+           This must be called for every URL in the configuration during
+           :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>` if
+           :func:`Source.mark_download_url() <buildstream.source.Source.mark_download_url>`
+           is not called.
         """
+        # Ensure that the download URL is also marked
+        self.mark_download_url(url, primary=primary)
+
         # Alias overriding can happen explicitly (by command-line) or
         # implicitly (the Source being constructed with an __alias_override).
         if alias_override or self.__alias_override:
@@ -523,14 +535,56 @@ class Source(Plugin):
                         url = override_url + url_body
             return url
         else:
-            # Sneakily store the alias if it hasn't already been stored
-            if not self.__expected_alias and url and utils._ALIAS_SEPARATOR in url:
-                url_alias, _ = url.split(utils._ALIAS_SEPARATOR, 1)
-                self.__expected_alias = url_alias
-
             project = self._get_project()
             return project.translate_url(url, first_pass=self.__first_pass)
 
+    def mark_download_url(self, url, *, primary=True):
+        """Identifies the URL that this Source uses to download
+
+        Args:
+           url (str): The URL used to download
+           primary (bool): Whether this is the primary URL for the source
+
+        .. note::
+
+           This must be called for every URL in the configuration during
+           :func:`Plugin.configure() <buildstream.plugin.Plugin.configure>` if
+           :func:`Source.translate_url() <buildstream.source.Source.translate_url>`
+           is not called.
+
+        *Since: 1.2*
+        """
+        # Only mark the Source level aliases on the main instance, not in
+        # a reinstantiated instance in mirroring.
+        if not self.__alias_override:
+            if primary:
+                expected_alias = _extract_alias(url)
+
+                assert (self.__expected_alias is None or
+                        self.__expected_alias == expected_alias), \
+                    "Primary URL marked twice with different URLs"
+
+                self.__expected_alias = expected_alias
+
+        # Enforce proper behaviour of plugins by ensuring that all
+        # aliased URLs have been marked at Plugin.configure() time.
+        #
+        if self._get_configuring():
+            # Record marked urls while configuring
+            #
+            self.__marked_urls.add(url)
+        else:
+            # If an unknown aliased URL is seen after configuring,
+            # this is an error.
+            #
+            # It is still possible that a URL that was not mentioned
+            # in the element configuration can be marked, this is
+            # the case for git submodules which might be automatically
+            # discovered.
+            #
+            assert (url in self.__marked_urls or not _extract_alias(url)), \
+                "URL was not seen at configure time: {}".format(url)
+
     def get_project_directory(self):
         """Fetch the project base directory
 
@@ -774,7 +828,8 @@ class Source(Plugin):
         #
         # Step 2 - Set the ref in memory, and determine changed state
         #
-        changed = self._set_ref(new_ref, node)
+        if not self._set_ref(new_ref, node):
+            return False
 
         def do_save_refs(refs):
             try:
@@ -811,7 +866,7 @@ class Source(Plugin):
                                   .format(provenance.filename.shortname),
                                   reason="tracking-junction-fragment")
 
-        return changed
+        return True
 
     # Wrapper for track()
     #
@@ -865,26 +920,86 @@ class Source(Plugin):
     #                   Local Private Methods                   #
     #############################################################
 
+    # __clone_for_uri()
+    #
+    # Clone the source with an alternative URI setup for the alias
+    # which this source uses.
+    #
+    # This is used for iteration over source mirrors.
+    #
+    # Args:
+    #    uri (str): The alternative URI for this source's alias
+    #
+    # Returns:
+    #    (Source): A new clone of this Source, with the specified URI
+    #              as the value of the alias this Source has marked as
+    #              primary with either mark_download_url() or
+    #              translate_url().
+    #
+    def __clone_for_uri(self, uri):
+        project = self._get_project()
+        context = self._get_context()
+        alias = self._get_alias()
+        source_kind = type(self)
+
+        clone = source_kind(context, project, self.__meta, alias_override=(alias, uri))
+
+        # Do the necessary post instantiation routines here
+        #
+        clone._preflight()
+        clone._load_ref()
+        clone._update_state()
+
+        return clone
+
     # Tries to call fetch for every mirror, stopping once it succeeds
     def __do_fetch(self, **kwargs):
         project = self._get_project()
-        source_fetchers = self.get_source_fetchers()
+        context = self._get_context()
+
+        # Silence the STATUS messages which might happen as a result
+        # of checking the source fetchers.
+        with context.silence():
+            source_fetchers = self.get_source_fetchers()
+
+        # Use the source fetchers if they are provided
+        #
         if source_fetchers:
-            for fetcher in source_fetchers:
-                alias = fetcher._get_alias()
-                success = False
-                for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
-                    try:
-                        fetcher.fetch(uri)
-                    # FIXME: Need to consider temporary vs. permanent failures,
-                    #        and how this works with retries.
-                    except BstError as e:
-                        last_error = e
-                        continue
-                    success = True
-                    break
-                if not success:
-                    raise last_error
+
+            # Use a contorted loop here, this is to allow us to
+            # silence the messages which can result from consuming
+            # the items of source_fetchers, if it happens to be a generator.
+            #
+            source_fetchers = iter(source_fetchers)
+            try:
+
+                while True:
+
+                    with context.silence():
+                        fetcher = next(source_fetchers)
+
+                    alias = fetcher._get_alias()
+                    for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
+                        try:
+                            fetcher.fetch(uri)
+                        # FIXME: Need to consider temporary vs. permanent failures,
+                        #        and how this works with retries.
+                        except BstError as e:
+                            last_error = e
+                            continue
+
+                        # No error, we're done with this fetcher
+                        break
+
+                    else:
+                        # No break occurred, raise the last detected error
+                        raise last_error
+
+            except StopIteration:
+                pass
+
+        # Default codepath is to reinstantiate the Source
+        #
         else:
             alias = self._get_alias()
             if self.__first_pass:
@@ -895,12 +1010,8 @@ class Source(Plugin):
                 self.fetch(**kwargs)
                 return
 
-            context = self._get_context()
-            source_kind = type(self)
             for uri in project.get_alias_uris(alias, first_pass=self.__first_pass):
-                new_source = source_kind(context, project, self.__meta,
-                                         alias_override=(alias, uri))
-                new_source._preflight()
+                new_source = self.__clone_for_uri(uri)
                 try:
                     new_source.fetch(**kwargs)
                 # FIXME: Need to consider temporary vs. permanent failures,
@@ -908,18 +1019,22 @@ class Source(Plugin):
                 except BstError as e:
                     last_error = e
                     continue
+
+                # No error, we're done here
                 return
+
+            # Re raise the last detected error
             raise last_error
 
     # Tries to call track for every mirror, stopping once it succeeds
     def __do_track(self, **kwargs):
         project = self._get_project()
-        # If there are no mirrors, or no aliases to replace, there's nothing to do here.
         alias = self._get_alias()
         if self.__first_pass:
             mirrors = project.first_pass_config.mirrors
         else:
             mirrors = project.config.mirrors
+        # If there are no mirrors, or no aliases to replace, there's nothing to do here.
         if not mirrors or not alias:
             return self.track(**kwargs)
 
@@ -929,9 +1044,7 @@ class Source(Plugin):
         # NOTE: We are assuming here that tracking only requires substituting the
         #       first alias used
         for uri in reversed(project.get_alias_uris(alias, first_pass=self.__first_pass)):
-            new_source = source_kind(context, project, self.__meta,
-                                     alias_override=(alias, uri))
-            new_source._preflight()
+            new_source = self.__clone_for_uri(uri)
             try:
                 ref = new_source.track(**kwargs)
             # FIXME: Need to consider temporary vs. permanent failures,
@@ -988,3 +1101,11 @@ class Source(Plugin):
 
             if src.get_consistency() == Consistency.RESOLVED:
                 src._fetch(previous_sources[0:index])
+
+
+def _extract_alias(url):
+    parts = url.split(utils._ALIAS_SEPARATOR, 1)
+    if len(parts) > 1 and not parts[0].lower() in utils._URI_SCHEMES:
+        return parts[0]
+    else:
+        return ""
diff --git a/buildstream/storage/_casbaseddirectory.py b/buildstream/storage/_casbaseddirectory.py
index 5ca1007935df446e13c6baec321a2322d68c8b82..07fd206edfad2a16e0c7d9318828c136281852f8 100644
--- a/buildstream/storage/_casbaseddirectory.py
+++ b/buildstream/storage/_casbaseddirectory.py
@@ -38,7 +38,6 @@ from .._exceptions import BstError
 from .directory import Directory, VirtualDirectoryError
 from ._filebaseddirectory import FileBasedDirectory
 from ..utils import FileListResult, safe_copy, list_relative_paths
-from .._artifactcache.cascache import CASCache
 
 
 class IndexEntry():
@@ -80,7 +79,7 @@ class CasBasedDirectory(Directory):
         self.filename = filename
         self.common_name = common_name
         self.pb2_directory = remote_execution_pb2.Directory()
-        self.cas_cache = CASCache(context)
+        self.cas_cache = context.artifactcache
         if ref:
             with open(self.cas_cache.objpath(ref), 'rb') as f:
                 self.pb2_directory.ParseFromString(f.read())
@@ -543,6 +542,15 @@ class CasBasedDirectory(Directory):
                 filelist.append(k)
         return filelist
 
+    def recalculate_hash(self):
+        """ Recalcuates the hash for this directory and store the results in
+        the cache. If this directory has a parent, tell it to
+        recalculate (since changing this directory changes an entry in
+        the parent). Hashes for subdirectories also get recalculated.
+        """
+        self._recalculate_recursing_up()
+        self._recalculate_recursing_down()
+
     def _get_identifier(self):
         path = ""
         if self.parent:
diff --git a/buildstream/element_enums.py b/buildstream/types.py
similarity index 65%
rename from buildstream/element_enums.py
rename to buildstream/types.py
index 2f2fb54d261d729f6aca8a2056a07415bdcc77eb..7bc7a16641dccba208510ea99b69bb6073eadf2e 100644
--- a/buildstream/element_enums.py
+++ b/buildstream/types.py
@@ -19,31 +19,19 @@
 #        Jim MacArthur <jim.macarthur@codethink.co.uk>
 
 """
-Element - Globally visible enumerations
-=======================================
+Foundation types
+================
 
 """
 
 from enum import Enum
 
 
-# _KeyStrength():
-#
-# Strength of cache key
-#
-class _KeyStrength(Enum):
-
-    # Includes strong cache keys of all build dependencies and their
-    # runtime dependencies.
-    STRONG = 1
-
-    # Includes names of direct build dependencies but does not include
-    # cache keys of dependencies.
-    WEAK = 2
-
-
 class Scope(Enum):
-    """Types of scope for a given element"""
+    """Defines the scope of dependencies to include for a given element
+    when iterating over the dependency graph in APIs like
+    :func:`Element.dependencies() <buildstream.element.Element.dependencies>`
+    """
 
     ALL = 1
     """All elements which the given element depends on, following
@@ -59,3 +47,44 @@ class Scope(Enum):
     """All elements required for running the element. Including the element
     itself.
     """
+
+
+class Consistency():
+    """Defines the various consistency states of a :class:`.Source`.
+    """
+
+    INCONSISTENT = 0
+    """Inconsistent
+
+    Inconsistent sources have no explicit reference set. They cannot
+    produce a cache key, be fetched or staged. They can only be tracked.
+    """
+
+    RESOLVED = 1
+    """Resolved
+
+    Resolved sources have a reference and can produce a cache key and
+    be fetched, however they cannot be staged.
+    """
+
+    CACHED = 2
+    """Cached
+
+    Cached sources have a reference which is present in the local
+    source cache. Only cached sources can be staged.
+    """
+
+
+# _KeyStrength():
+#
+# Strength of cache key
+#
+class _KeyStrength(Enum):
+
+    # Includes strong cache keys of all build dependencies and their
+    # runtime dependencies.
+    STRONG = 1
+
+    # Includes names of direct build dependencies but does not include
+    # cache keys of dependencies.
+    WEAK = 2
diff --git a/buildstream/utils.py b/buildstream/utils.py
index 943346689c1594b236f505c691b194b6e88db434..7f2f9d5fd1ecbbb1cfc545d61a00d1b728481723 100644
--- a/buildstream/utils.py
+++ b/buildstream/utils.py
@@ -35,6 +35,7 @@ import tempfile
 import itertools
 import functools
 from contextlib import contextmanager
+from stat import S_ISDIR
 
 import psutil
 
@@ -47,6 +48,7 @@ _magic_timestamp = calendar.timegm([2011, 11, 11, 11, 11, 11])
 
 # The separator we use for user specified aliases
 _ALIAS_SEPARATOR = ':'
+_URI_SCHEMES = ["http", "https", "ftp", "file", "git", "sftp", "ssh"]
 
 
 class UtilError(BstError):
@@ -327,27 +329,25 @@ def safe_remove(path):
     Raises:
        UtilError: In the case of unexpected system call failures
     """
-    if os.path.lexists(path):
-
-        # Try to remove anything that is in the way, but issue
-        # a warning instead if it removes a non empty directory
-        try:
+    try:
+        if S_ISDIR(os.lstat(path).st_mode):
+            os.rmdir(path)
+        else:
             os.unlink(path)
-        except OSError as e:
-            if e.errno != errno.EISDIR:
-                raise UtilError("Failed to remove '{}': {}"
-                                .format(path, e))
-
-            try:
-                os.rmdir(path)
-            except OSError as e:
-                if e.errno == errno.ENOTEMPTY:
-                    return False
-                else:
-                    raise UtilError("Failed to remove '{}': {}"
-                                    .format(path, e))
 
-    return True
+        # File removed/unlinked successfully
+        return True
+
+    except OSError as e:
+        if e.errno == errno.ENOTEMPTY:
+            # Path is non-empty directory
+            return False
+        elif e.errno == errno.ENOENT:
+            # Path does not exist
+            return True
+
+        raise UtilError("Failed to remove '{}': {}"
+                        .format(path, e))
 
 
 def copy_files(src, dest, *, files=None, ignore_missing=False, report_written=False):
@@ -371,6 +371,8 @@ def copy_files(src, dest, *, files=None, ignore_missing=False, report_written=Fa
        Directories in `dest` are replaced with files from `src`,
        unless the existing directory in `dest` is not empty in which
        case the path will be reported in the return value.
+
+       UNIX domain socket files from `src` are ignored.
     """
     presorted = False
     if files is None:
@@ -413,6 +415,8 @@ def link_files(src, dest, *, files=None, ignore_missing=False, report_written=Fa
 
        If a hardlink cannot be created due to crossing filesystems,
        then the file will be copied instead.
+
+       UNIX domain socket files from `src` are ignored.
     """
     presorted = False
     if files is None:
@@ -498,7 +502,7 @@ def get_bst_version():
 
 @contextmanager
 def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
-                     errors=None, newline=None, closefd=True, opener=None):
+                     errors=None, newline=None, closefd=True, opener=None, tempdir=None):
     """Save a file with a temporary name and rename it into place when ready.
 
     This is a context manager which is meant for saving data to files.
@@ -525,8 +529,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
     # https://bugs.python.org/issue8604
 
     assert os.path.isabs(filename), "The utils.save_file_atomic() parameter ``filename`` must be an absolute path"
-    dirname = os.path.dirname(filename)
-    fd, tempname = tempfile.mkstemp(dir=dirname)
+    if tempdir is None:
+        tempdir = os.path.dirname(filename)
+    fd, tempname = tempfile.mkstemp(dir=tempdir)
     os.close(fd)
 
     f = open(tempname, mode=mode, buffering=buffering, encoding=encoding,
@@ -558,6 +563,9 @@ def save_file_atomic(filename, mode='w', *, buffering=-1, encoding=None,
 #
 # Get the disk usage of a given directory in bytes.
 #
+# This function assumes that files do not inadvertantly
+# disappear while this function is running.
+#
 # Arguments:
 #     (str) The path whose size to check.
 #
@@ -645,6 +653,7 @@ def _pretty_size(size, dec_places=0):
             psize /= 1024
     return "{size:g}{unit}".format(size=round(psize, dec_places), unit=unit)
 
+
 # A sentinel to be used as a default argument for functions that need
 # to distinguish between a kwarg set to None and an unset kwarg.
 _sentinel = object()
@@ -677,7 +686,7 @@ def _force_rmtree(rootpath, **kwargs):
 
     try:
         shutil.rmtree(rootpath, **kwargs)
-    except shutil.Error as e:
+    except OSError as e:
         raise UtilError("Failed to remove cache directory '{}': {}"
                         .format(rootpath, e))
 
@@ -839,6 +848,13 @@ def _process_list(srcdir, destdir, filelist, actionfunc, result,
             os.mknod(destpath, file_stat.st_mode, file_stat.st_rdev)
             os.chmod(destpath, file_stat.st_mode)
 
+        elif stat.S_ISFIFO(mode):
+            os.mkfifo(destpath, mode)
+
+        elif stat.S_ISSOCK(mode):
+            # We can't duplicate the process serving the socket anyway
+            pass
+
         else:
             # Unsupported type.
             raise UtilError('Cannot extract {} into staging-area. Unsupported type.'.format(srcpath))
@@ -977,7 +993,7 @@ def _tempdir(suffix="", prefix="tmp", dir=None):  # pylint: disable=redefined-bu
 
 # _kill_process_tree()
 #
-# Brutally murder a process and all of it's children
+# Brutally murder a process and all of its children
 #
 # Args:
 #    pid (int): Process ID
diff --git a/contrib/bst-docker-import b/contrib/bst-docker-import
new file mode 100755
index 0000000000000000000000000000000000000000..dfb16d7ce46aec8b7c8940fb61b41c92fc08ee37
--- /dev/null
+++ b/contrib/bst-docker-import
@@ -0,0 +1,102 @@
+#!/bin/bash
+#
+#  Copyright 2018 Bloomberg Finance LP
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Chadnan Singh <csingh43@bloomberg.net>
+
+# This is a helper script to generate Docker images using checkouts of
+# BuildStream elements.
+
+usage() {
+    cat <<EOF
+
+USAGE: $(basename "$0") [-c BST_CMD] [-m MESSAGE] [-t TAG] [-h] ELEMENT
+
+Create a Docker image from bst checkout of an element.
+
+OPTIONS:
+    -c BST_CMD    Path to BuildStream command (default: bst).
+    -m MESSAGE    Commit message for the imported image.
+    -t TAG        Tag of the imported image.
+    -h            Print this help text and exit.
+
+EXAMPLES:
+
+    # Import hello.bst as a Docker image with tag "bst-hello" and message "hello"
+    $(basename "$0") -m hello -t bst-hello hello.bst
+
+    # Import hello.bst as a Docker image with tag "bst-hello" using bst-here
+    $(basename "$0") -c bst-here -t bst-hello hello.bst
+
+EOF
+    exit "$1"
+}
+
+die() {
+    echo "FATAL: $1" >&2
+    exit 1
+}
+
+bst_cmd=bst
+docker_import_cmd=(docker import)
+docker_image_tag=
+
+while getopts c:m:t:h arg
+do
+    case $arg in
+    c)
+        bst_cmd="$OPTARG"
+        ;;
+    m)
+        docker_import_cmd+=('-m' "$OPTARG")
+        ;;
+    t)
+        docker_image_tag="$OPTARG"
+        ;;
+    h)
+        usage 0
+        ;;
+    \?)
+        usage 1
+    esac
+done
+
+shift $((OPTIND-1))
+if [[ "$#" != 1 ]]; then
+    echo "$0: No element specified" >&2
+    usage 1
+fi
+element="$1"
+
+# Dump to a temporary file in the current directory.
+# NOTE: We use current directory to try to ensure compatibility with scripts
+# like bst-here, assuming that the current working directory is mounted
+# inside the container.
+
+checkout_tar="bst-checkout-$(basename "$element")-$RANDOM.tar"
+
+echo "INFO: Checking out $element ..." >&2
+$bst_cmd checkout --tar "$element" "$checkout_tar" || die "Failed to checkout $element"
+echo "INFO: Successfully checked out $element" >&2
+
+echo "INFO: Importing Docker image ..." >&2
+"${docker_import_cmd[@]}" "$checkout_tar" "$docker_image_tag" || die "Failed to import Docker image from tarball"
+echo "INFO: Successfully import Docker image $docker_image_tag" >&2
+
+echo "INFO: Cleaning up ..." >&2
+rm "$checkout_tar" || die "Failed to remove $checkout_tar"
+echo "INFO: Clean up finished" >&2
diff --git a/dev-requirements.txt b/dev-requirements.txt
index ee2db0351d627cbcb711729eb04a3cd7a9926ff7..c88b4c7237a199b1a0f5fe2295ec48b972375d32 100644
--- a/dev-requirements.txt
+++ b/dev-requirements.txt
@@ -8,3 +8,4 @@ pytest-env
 pytest-pep8
 pytest-pylint
 pytest-xdist
+pytest-timeout
diff --git a/doc/Makefile b/doc/Makefile
index 51d4513ce3b3ddf14827959eadcb1a41b7acdd9a..f52b869efaa7dbd3789a752b2be77605e832a58b 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -35,7 +35,7 @@ endif
 PYTHONPATH=$(CURDIR)/..:$(CURDIR)/../buildstream/plugins
 
 
-.PHONY: all clean templates templates-clean sessions sessions-prep sessions-clean html devhelp
+.PHONY: all clean templates templates-clean sessions sessions-prep sessions-clean badges badges-clean html devhelp
 
 # Canned recipe for generating plugin api skeletons
 #   $1 = the plugin directory
@@ -70,9 +70,13 @@ endef
 
 all: html devhelp
 
-clean: templates-clean sessions-clean
+clean: templates-clean sessions-clean badges-clean
 	rm -rf build
 
+############################################################
+#                 Plugin doc templates                     #
+############################################################
+
 # Generate rst templates for the docs using a mix of sphinx-apidoc and
 # our 'plugin-doc-skeleton' routine for plugin pages.
 templates:
@@ -86,6 +90,10 @@ templates-clean:
 	rm -rf source/elements
 	rm -rf source/sources
 
+############################################################
+#                   Session captures                       #
+############################################################
+
 # Stage the stored sessions into the place where they will
 # be used in the build.
 #
@@ -111,10 +119,27 @@ sessions: sessions-prep
 sessions-clean:
 	rm -rf source/sessions
 
+
+############################################################
+#                  Generate release badges                 #
+############################################################
+badges-clean:
+	rm -rf source/badges
+
+badges:
+	mkdir -p source/badges
+	$(CURDIR)/badges.py > source/badges/snapshot.svg
+	$(CURDIR)/badges.py --release > source/badges/release.svg
+
+
+############################################################
+#                    Main sphinx build                     #
+############################################################
+
 # Targets which generate docs with sphinx build
 #
 #
-html devhelp: templates sessions
+html devhelp: templates sessions badges
 	@echo "Building $@..."
 	PYTHONPATH=$(PYTHONPATH) \
 	    $(SPHINXBUILD) -b $@ $(ALLSPHINXOPTS) "$(BUILDDIR)/$@" \
diff --git a/doc/badges.py b/doc/badges.py
new file mode 100755
index 0000000000000000000000000000000000000000..5e20dc7f972f6c700e00b97883461feae96ce300
--- /dev/null
+++ b/doc/badges.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python3
+#
+#  Copyright (C) 2018 Codethink Limited
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors:
+#        Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>
+#
+import click
+import subprocess
+import re
+
+# The badge template is modeled after the gitlab badge svgs
+#
+BADGE_TEMPLATE = """
+<svg xmlns="http://www.w3.org/2000/svg"
+     xmlns:xlink="http://www.w3.org/1999/xlink"
+     width="116" height="20">
+  <a xlink:href="{url_target}">
+    <linearGradient id="{badge_name}_b" x2="0" y2="100%">
+      <stop offset="0" stop-color="#bbb" stop-opacity=".1"/>
+      <stop offset="1" stop-opacity=".1"/>
+    </linearGradient>
+
+    <mask id="{badge_name}_a">
+      <rect width="116" height="20" rx="3" fill="#fff"/>
+    </mask>
+
+    <g mask="url(#{badge_name}_a)">
+      <path fill="#555"
+            d="M0 0 h62 v20 H0 z"/>
+      <path fill="{color}"
+            d="M62 0 h54 v20 H62 z"/>
+      <path fill="url(#{badge_name}_b)"
+            d="M0 0 h116 v20 H0 z"/>
+    </g>
+
+    <g fill="#fff" text-anchor="middle">
+      <g font-family="DejaVu Sans,Verdana,Geneva,sans-serif" font-size="11">
+        <text x="31" y="15" fill="#010101" fill-opacity=".3">
+          {badge_name}
+        </text>
+        <text x="31" y="14">
+          {badge_name}
+        </text>
+        <text x="89" y="15" fill="#010101" fill-opacity=".3">
+          {version}
+        </text>
+        <text x="89" y="14">
+          {version}
+        </text>
+      </g>
+    </g>
+  </a>
+</svg>
+"""
+
+URL_FORMAT = 'https://download.gnome.org/sources/BuildStream/{brief_version}/BuildStream-{full_version}.tar.xz'
+RELEASE_COLOR = '#0040FF'
+SNAPSHOT_COLOR = '#FF8000'
+VERSION_TAG_MATCH = r'([0-9]*)\.([0-9]*)\.([0-9]*)'
+
+
+# Parse a release tag and return a three tuple
+# of the major, minor and micro version.
+#
+# Tags which do not follow the release tag format
+# will just be returned as (0, 0, 0)
+#
+def parse_tag(tag):
+    match = re.search(VERSION_TAG_MATCH, tag)
+    if match:
+        major = match.group(1)
+        minor = match.group(2)
+        micro = match.group(3)
+        return (int(major), int(minor), int(micro))
+
+    return (0, 0, 0)
+
+
+# Call out to git and guess the latest version,
+# this will just return (0, 0, 0) in case of any error.
+#
+def guess_version(release):
+    try:
+        tags_output = subprocess.check_output(['git', 'tag'])
+    except CalledProcessError:
+        return (0, 0, 0)
+
+    # Parse the `git tag` output into a list of integer tuples
+    tags_output = tags_output.decode('UTF-8')
+    all_tags = tags_output.splitlines()
+    all_versions = [parse_tag(tag) for tag in all_tags]
+
+    # Filter the list by the minor point version, if
+    # we are checking for the latest "release" version, then
+    # only pickup even number minor points.
+    #
+    filtered_versions = [
+        version for version in all_versions
+        if (version[1] % 2) == (not release)
+    ]
+
+    # Make sure they are sorted, and take the last one
+    sorted_versions = sorted(filtered_versions)
+    latest_version = sorted_versions[-1]
+
+    return latest_version
+
+
+@click.command(short_help="Generate the version badges")
+@click.option('--release', is_flag=True, default=False,
+              help="Whether to generate the badge for the release version")
+def generate_badges(release):
+    """Generate the version badge svg files
+    """
+    major, minor, micro = guess_version(release)
+
+    if release:
+        badge_name = 'release'
+        color = RELEASE_COLOR
+    else:
+        badge_name = 'snapshot'
+        color = SNAPSHOT_COLOR
+
+    brief_version = '{major}.{minor}'.format(major=major, minor=minor)
+    full_version = '{major}.{minor}.{micro}'.format(major=major, minor=minor, micro=micro)
+    url_target = URL_FORMAT.format(brief_version=brief_version, full_version=full_version)
+    badge = BADGE_TEMPLATE.format(badge_name=badge_name,
+                                  version=full_version,
+                                  color=color,
+                                  url_target=url_target)
+    click.echo(badge, nl=False)
+    return 0
+
+
+if __name__ == '__main__':
+    generate_badges()
diff --git a/doc/bst2html.py b/doc/bst2html.py
index 7bbcf6c9a2b14d803907b5e137f6edc098ab404e..af35efe24948caf346334a7973ffc0d89eb7606f 100755
--- a/doc/bst2html.py
+++ b/doc/bst2html.py
@@ -29,7 +29,7 @@ import sys
 import re
 import shlex
 import subprocess
-from collections import Mapping
+from collections.abc import Mapping
 from contextlib import contextmanager
 from tempfile import TemporaryDirectory
 
@@ -96,8 +96,8 @@ def _ansi2html_get_styles(palette):
 
         for g in range(24):
             i = g + 232
-            l = g * 10 + 8
-            indexed_style['%s' % i] = ''.join('%02X' % c if 0 <= c <= 255 else None for c in (l, l, l))
+            L = g * 10 + 8
+            indexed_style['%s' % i] = ''.join('%02X' % c if 0 <= c <= 255 else None for c in (L, L, L))
 
         _ANSI2HTML_STYLES[palette] = (regular_style, bold_style, indexed_style)
     return _ANSI2HTML_STYLES[palette]
@@ -440,12 +440,12 @@ def run_session(description, tempdir, source_cache, palette, config_file, force)
 @click.option('--palette', '-p', default='tango',
               type=click.Choice(['solarized', 'solarized-xterm', 'tango', 'xterm', 'console']),
               help="Selects a palette for the output style")
-@click.argument('description', click.Path(file_okay=True, dir_okay=False, readable=True))
+@click.argument('description', type=click.Path(file_okay=True, dir_okay=False, readable=True))
 def run_bst(directory, force, source_cache, description, palette):
     """Run a bst command and capture stdout/stderr in html
 
-    This command normally takes a description yaml file, see the HACKING
-    file for information on it's format.
+    This command normally takes a description yaml file, see the CONTRIBUTING
+    file for information on its format.
     """
     if not source_cache and os.environ.get('BST_SOURCE_CACHE'):
         source_cache = os.environ['BST_SOURCE_CACHE']
@@ -455,6 +455,7 @@ def run_bst(directory, force, source_cache, description, palette):
 
     return 0
 
+
 if __name__ == '__main__':
     try:
         run_bst()
diff --git a/doc/examples/autotools/elements/hello.bst b/doc/examples/autotools/elements/hello.bst
index 510f5b9759a95d4719405ef94dc378f98785f961..ce3ea7466fd424b3342573253069738f6ac1f0f3 100644
--- a/doc/examples/autotools/elements/hello.bst
+++ b/doc/examples/autotools/elements/hello.bst
@@ -8,7 +8,7 @@ variables:
   # The hello world example lives in the doc/amhello folder.
   #
   # Set the %{command-subdir} variable to that location
-  # and just have the autotools element run it's commands there.
+  # and just have the autotools element run its commands there.
   #
   command-subdir: doc/amhello
 
diff --git a/doc/examples/autotools/project.conf b/doc/examples/autotools/project.conf
index 726e7dd5ab13b640ac2ea76588cd876cfe17ae1d..96e0284e021102dfe7da84617d469891e7cf00d4 100644
--- a/doc/examples/autotools/project.conf
+++ b/doc/examples/autotools/project.conf
@@ -9,5 +9,5 @@ element-path: elements
 
 # Define some aliases for the tarballs we download
 aliases:
-  alpine: https://gnome7.codethink.co.uk/tarballs/
+  alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
   gnu: http://ftpmirror.gnu.org/gnu/automake/
diff --git a/doc/examples/developing/project.conf b/doc/examples/developing/project.conf
index f799881ce44da48ee2d499309131e9f29c401c23..1ae2e08be8413fbc59991b3c9d24a729cb030525 100644
--- a/doc/examples/developing/project.conf
+++ b/doc/examples/developing/project.conf
@@ -9,4 +9,4 @@ element-path: elements
 
 # Define an alias for our alpine tarball
 aliases:
-  alpine: https://gnome7.codethink.co.uk/tarballs/
+  alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
diff --git a/doc/examples/flatpak-autotools/files/src/aclocal.m4 b/doc/examples/flatpak-autotools/files/src/aclocal.m4
index f3018f6c870ba5fb0590a4ffe78c096f541d3d70..b0616b92c8eefc34d0cd3700744059daa7f37111 100644
--- a/doc/examples/flatpak-autotools/files/src/aclocal.m4
+++ b/doc/examples/flatpak-autotools/files/src/aclocal.m4
@@ -150,7 +150,7 @@ fi])])
 
 # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be
 # written in clear, in which case automake, when reading aclocal.m4,
-# will think it sees a *use*, and therefore will trigger all it's
+# will think it sees a *use*, and therefore will trigger all its
 # C support machinery.  Also note that it means that autoscan, seeing
 # CC etc. in the Makefile, will ask for an AC_PROG_CC use...
 
diff --git a/doc/examples/integration-commands/project.conf b/doc/examples/integration-commands/project.conf
index b33267005fc49cbc2b57c1a0fc7392cf67311dcc..9ae5b2a3caa18fbb88e2d2612d15e8df3487197f 100644
--- a/doc/examples/integration-commands/project.conf
+++ b/doc/examples/integration-commands/project.conf
@@ -9,4 +9,4 @@ element-path: elements
 
 # Define an alias for our alpine tarball
 aliases:
-  alpine: https://gnome7.codethink.co.uk/tarballs/
+  alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
diff --git a/doc/examples/junctions/autotools/elements/hello.bst b/doc/examples/junctions/autotools/elements/hello.bst
index 510f5b9759a95d4719405ef94dc378f98785f961..ce3ea7466fd424b3342573253069738f6ac1f0f3 100644
--- a/doc/examples/junctions/autotools/elements/hello.bst
+++ b/doc/examples/junctions/autotools/elements/hello.bst
@@ -8,7 +8,7 @@ variables:
   # The hello world example lives in the doc/amhello folder.
   #
   # Set the %{command-subdir} variable to that location
-  # and just have the autotools element run it's commands there.
+  # and just have the autotools element run its commands there.
   #
   command-subdir: doc/amhello
 
diff --git a/doc/examples/junctions/autotools/project.conf b/doc/examples/junctions/autotools/project.conf
index 4cfc0e692f8f47b7a2163d586e6f37d9db2a4d22..2cf58245d3d36ae806d2602a84858a37a848a680 100644
--- a/doc/examples/junctions/autotools/project.conf
+++ b/doc/examples/junctions/autotools/project.conf
@@ -9,5 +9,5 @@ element-path: elements
 
 # Define some aliases for the tarballs we download
 aliases:
-  alpine: https://gnome7.codethink.co.uk/tarballs/
+  alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
   gnu: https://ftpmirror.gnu.org/gnu/automake/
diff --git a/doc/examples/running-commands/project.conf b/doc/examples/running-commands/project.conf
index de266178e760ee951adccac017f7df39a7c42354..7127b0db943812640c0051690e0cc8269f01775c 100644
--- a/doc/examples/running-commands/project.conf
+++ b/doc/examples/running-commands/project.conf
@@ -9,4 +9,4 @@ element-path: elements
 
 # Define an alias for our alpine tarball
 aliases:
-  alpine: https://gnome7.codethink.co.uk/tarballs/
+  alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
diff --git a/doc/source/CONTRIBUTING.rst b/doc/source/CONTRIBUTING.rst
new file mode 120000
index 0000000000000000000000000000000000000000..e9a8ba642365b012e8808fdfc994c73601d51ba0
--- /dev/null
+++ b/doc/source/CONTRIBUTING.rst
@@ -0,0 +1 @@
+../../CONTRIBUTING.rst
\ No newline at end of file
diff --git a/doc/source/HACKING.rst b/doc/source/HACKING.rst
deleted file mode 120000
index a2f06b723dcd66757fbb67808f66c55e8510e4f7..0000000000000000000000000000000000000000
--- a/doc/source/HACKING.rst
+++ /dev/null
@@ -1 +0,0 @@
-../../HACKING.rst
\ No newline at end of file
diff --git a/doc/source/additional_docker.rst b/doc/source/additional_docker.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0cddad2dbfc39142b359c3081cf8102e1ad9f353
--- /dev/null
+++ b/doc/source/additional_docker.rst
@@ -0,0 +1,52 @@
+
+.. _bst_and_docker:
+
+
+BuildStream and Docker
+======================
+BuildStream integrates with Docker in multiple ways. Here are some ways in
+which these integrations work.
+
+
+Run BuildStream inside Docker
+-----------------------------
+Refer to the `BuildStream inside Docker <https://buildstream.build/docker_install.html>`_
+documentation for instructions on how to run BuildStream as a Docker container.
+
+
+Generate Docker images
+----------------------
+The
+`bst-docker-import script <https://gitlab.com/BuildStream/buildstream/blob/master/contrib/bst-docker-import>`_
+can be used to generate a Docker image from built artifacts.
+
+You can download it and make it executable like this:
+
+.. code:: bash
+
+  mkdir -p ~/.local/bin
+  curl --get https://gitlab.com/BuildStream/buildstream/raw/master/contrib/bst-docker-import > ~/.local/bin/bst-docker-import
+  chmod +x ~/.local/bin/bst-docker-import
+
+Check if ``~/.local/bin`` appears in your PATH environment variable -- if it
+doesn't, you should
+`edit your ~/.profile so that it does <https://stackoverflow.com/questions/14637979/>`_.
+
+Once the script is available in your PATH and assuming you have Docker
+installed, you can start using the ``bst-docker-import`` script. Here is a
+minimal example to generate an image called ``bst-hello`` from an element
+called ``hello.bst`` assuming it is already built:
+
+.. code:: bash
+
+  bst-docker-import -t bst-hello hello.bst
+
+This script can also be used if you are running BuildStream inside Docker. In
+this case, you will need to supply the command that you are using to run
+BuildStream using the ``-c`` option.  If you are using the
+`bst-here wrapper script <https://gitlab.com/BuildStream/buildstream/blob/master/contrib/bst-here>`_,
+you can achieve the same results as the above example like this:
+
+.. code:: bash
+
+  bst-docker-import -c bst-here -t bst-hello hello.bst
diff --git a/doc/source/advanced-features/junction-elements.rst b/doc/source/advanced-features/junction-elements.rst
index d2d223b88b4871975458b37131f33980cc103503..b6065d3442e04be89d7f906f86bc4a5babdf29b2 100644
--- a/doc/source/advanced-features/junction-elements.rst
+++ b/doc/source/advanced-features/junction-elements.rst
@@ -43,7 +43,7 @@ Building callHello.bst,
 .. raw:: html
    :file: ../sessions/junctions-build.html
 
-You can see that the hello.bst element and it's dependencies from the autotools
+You can see that the hello.bst element and its dependencies from the autotools
 project have been build as part of the pipeline for callHello.bst.
 
 We can now invoke `bst shell`
diff --git a/doc/source/conf.py b/doc/source/conf.py
index e95a114e18b4500a6a56a5bd06b7befec0715a47..5aaaed28054dcf5506cbf0899363c42246207087 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -19,10 +19,10 @@
 #
 import os
 import sys
-sys.path.insert(0, os.path.abspath('..'))
-
 from buildstream import __version__
 
+sys.path.insert(0, os.path.abspath('..'))
+
 # -- General configuration ------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
@@ -112,7 +112,7 @@ add_module_names = False
 pygments_style = 'sphinx'
 
 # A list of ignored prefixes for module index sorting.
-modindex_common_prefix = [ 'buildstream.' ]
+modindex_common_prefix = ['buildstream.']
 
 # If true, keep warnings as "system message" paragraphs in the built documents.
 # keep_warnings = False
@@ -160,7 +160,7 @@ html_theme = 'sphinx_rtd_theme'
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = []
+html_static_path = ['badges']
 
 # Add any extra paths that contain custom files (such as robots.txt or
 # .htaccess) here, relative to this directory. These files are copied
diff --git a/doc/source/core_additional.rst b/doc/source/core_additional.rst
index 76e523ee809fad36de09b900a029b5176e0e0aaa..08c4456306e4858e8d171cb233e863b0c16aac11 100644
--- a/doc/source/core_additional.rst
+++ b/doc/source/core_additional.rst
@@ -8,3 +8,4 @@ Additional writings
 
    additional_cachekeys
    additional_sandboxing
+   additional_docker
diff --git a/doc/source/core_framework.rst b/doc/source/core_framework.rst
index c3b84a9b1bd21523063c241e9abba1256fa87b96..a66f3640fea64000adb8346b292ce5b14f989972 100644
--- a/doc/source/core_framework.rst
+++ b/doc/source/core_framework.rst
@@ -12,6 +12,7 @@ useful for working on BuildStream itself.
 .. toctree::
    :maxdepth: 1
 
+   buildstream.types
    buildstream.plugin
    buildstream.source
    buildstream.element
diff --git a/doc/source/developing/workspaces.rst b/doc/source/developing/workspaces.rst
index a545bb6613a48575a791650d7d295d5fbc4fe7a3..b5ed64b2c62c6f958c6c47a13d2cd62c381464e4 100644
--- a/doc/source/developing/workspaces.rst
+++ b/doc/source/developing/workspaces.rst
@@ -4,9 +4,9 @@
 
 Workspaces
 ==========
-
 In this section we will cover the use of BuildStream's workspaces feature when devloping on a 
 BuildStream project.
+
 .. note::
 
    This example is distributed with BuildStream
@@ -25,7 +25,6 @@ make changes to the source code of Buildstream elements by making use of
 BuildStream's workspace command.
 
 
-
 Opening a workspace
 -------------------
 First we need to open a workspace, we can do this by running
@@ -37,12 +36,12 @@ This command has created the workspace_hello directory in which you can see
 the source for the hello.bst element, i.e. hello.c and the corresponding
 makefile.
 
-
 You can view existing workspaces using
 
 .. raw:: html
    :file: ../sessions/developing-workspace-list.html
 
+
 Making code changes
 -------------------
 Let's say we want to alter the message printed when the hello command is run.
@@ -66,6 +65,7 @@ This gives us the new message we changed in hello.c.
 From this point we have several options. If the source is under version control
 we can commit our changes and push them to the remote repository.
 
+
 Closing your workspace
 ----------------------
 If we want to close the workspace and come back to our changes later, we can
diff --git a/doc/source/format_declaring.rst b/doc/source/format_declaring.rst
index 38c107c1b38bc76b0ebd181630d58d8a96afa96b..e1ad4f7201b8111093e30107fad607b1164f62fc 100644
--- a/doc/source/format_declaring.rst
+++ b/doc/source/format_declaring.rst
@@ -484,3 +484,25 @@ dependency and that all referenced variables are declared, the following is fine
      install-commands:
      - |
        %{make-install} RELEASE_TEXT="%{release-text}"
+
+
+Variables declared by BuildStream
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+BuildStream declares a set of :ref:`builtin <project_builtin_defaults>`
+variables that may be overridden. In addition, the following
+read-only variables are also dynamically declared by BuildStream:
+
+* ``element-name``
+
+  The name of the element being processed (e.g base/alpine.bst).
+
+* ``project-name``
+
+  The name of project where BuildStream is being used.
+
+* ``max-jobs``
+
+  Maximum number of parallel build processes within a given
+  build, support for this is conditional on the element type
+  and the build system used (any element using 'make' can
+  implement this).
diff --git a/doc/source/format_project.rst b/doc/source/format_project.rst
index b43e67005e96bf25738180af6b8dcf271f8a7876..469f99747873dbec04c618e77db603b9569a93be 100644
--- a/doc/source/format_project.rst
+++ b/doc/source/format_project.rst
@@ -57,7 +57,7 @@ the ``format-version`` field, e.g.:
   # The minimum base BuildStream format
   format-version: 0
 
-BuildStream will increment it's core YAML format version at least once
+BuildStream will increment its core YAML format version at least once
 in any given minor point release where the format has been extended
 to support a new feature.
 
@@ -204,6 +204,24 @@ with an artifact share.
 You can also specify a list of caches here; earlier entries in the list
 will have higher priority than later ones.
 
+Remote execution
+~~~~~~~~~~~~~~~~
+BuildStream supports remote execution using the Google Remote Execution API
+(REAPI). A description of how remote execution works is beyond the scope
+of this document, but you can specify a remote server complying with the REAPI
+using the `remote-execution` option:
+
+.. code:: yaml
+
+  remote-execution:
+
+    # A url defining a remote execution server
+    url: http://buildserver.example.com:50051
+
+The url should contain a hostname and port separated by ':'. Only plain HTTP is
+currently suported (no HTTPS).
+
+The Remote Execution API can be found via https://github.com/bazelbuild/remote-apis.
 
 .. _project_essentials_mirrors:
 
diff --git a/doc/source/index.rst b/doc/source/index.rst
index ac4587f10c3b001e29ce767966c1431d2446ddbf..494e90c67212e2af52b5f32cb031fb8c1a91235b 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -13,20 +13,13 @@ They begin with a basic introduction to BuildStream, background
 information on basic concepts, and a guide to the BuildStream command line interface.
 Later sections provide detailed information on BuildStream internals.
 
-
 .. toctree::
    :maxdepth: 1
 
    main_about
-   main_install
    main_using
    main_core
-   HACKING
-
+   CONTRIBUTING
 
-Resources
----------
-* GitLab repository: https://gitlab.com/BuildStream/buildstream
-* Bug Tracking: https://gitlab.com/BuildStream/buildstream/issues
-* Mailing list: https://mail.gnome.org/mailman/listinfo/buildstream-list
-* IRC Channel: irc://irc.gnome.org/#buildstream
+For any other information, including `how to install BuildStream <https://buildstream.build/install.html>`_,
+refer to `the BuildStream website <https://buildstream.build>`_.
diff --git a/doc/source/install_docker.rst b/doc/source/install_docker.rst
deleted file mode 100644
index c1ca7a2984e36961bde0db1ba61a005cfbe5bcf9..0000000000000000000000000000000000000000
--- a/doc/source/install_docker.rst
+++ /dev/null
@@ -1,45 +0,0 @@
-
-
-.. _docker:
-
-BuildStream inside Docker
--------------------------
-If your system cannot provide the base system requirements for BuildStream, then it is possible to run buildstream within a Docker image.
-
-The BuildStream project provides
-`Docker images <https://hub.docker.com/r/buildstream/buildstream-fedora>`_
-containing BuildStream and its dependencies.
-This gives you an easy way to get started using BuildStream on any Unix-like
-platform where Docker is available, including Mac OS X.
-
-We recommend using the
-`bst-here wrapper script <https://gitlab.com/BuildStream/buildstream/blob/master/contrib/bst-here>`_
-which automates the necessary container setup. You can download it and make
-it executable like this:
-
-.. code:: bash
-
-  mkdir -p ~/.local/bin
-  curl --get https://gitlab.com/BuildStream/buildstream/raw/master/contrib/bst-here > ~/.local/bin/bst-here
-  chmod +x ~/.local/bin/bst-here
-
-Check if ``~/.local/bin`` appears in your PATH environment variable -- if it
-doesn't, you should
-`edit your ~/.profile so that it does <https://stackoverflow.com/questions/14637979/>`_.
-
-Once the script is available in your PATH, you can run ``bst-here`` to open a
-shell session inside a new container based off the latest version of the
-buildstream-fedora Docker image. The current working directory will be mounted
-inside the container at ``/src``.
-
-You can also run individual BuildStream commands as ``bst-here COMMAND``. For
-example: ``bst-here show systems/my-system.bst``. Note that BuildStream won't
-be able to integrate with Bash tab-completion if you invoke it in this way.
-
-Two Docker volumes are set up by the ``bst-here`` script:
-
- * ``buildstream-cache --`` mounted at ``~/.cache/buildstream``
- * ``buildstream-config --`` mounted at ``~/.config/``
-
-These are necessary so that your BuildStream cache and configuration files
-persist between invocations of ``bst-here``.
diff --git a/doc/source/install_linux_distro.rst b/doc/source/install_linux_distro.rst
deleted file mode 100644
index 0d21bfd463d7cd51424a0bf39f24788ae843781f..0000000000000000000000000000000000000000
--- a/doc/source/install_linux_distro.rst
+++ /dev/null
@@ -1,248 +0,0 @@
-
-.. _install:
-
-Installing BuildStream on a Linux distro
-========================================
-BuildStream requires the following base system requirements:
-
-* python3 >= 3.5
-* bubblewrap >= 0.1.2
-* fuse2
-
-BuildStream also depends on the host tools for the :mod:`Source <buildstream.source>` plugins.
-Refer to the respective :ref:`source plugin <plugins_sources>` documentation for host tool
-requirements of specific plugins.
-
-The default plugins with extra host dependencies are:
-
-* bzr
-* deb
-* git
-* ostree
-* patch
-* tar
-
-If you intend to push built artifacts to a remote artifact server,
-which requires special permissions, you will also need:
-
-* ssh
-
-
-Installing from source (recommended)
-------------------------------------
-Until BuildStream is available in your distro, you will need to install
-it yourself from the `git repository <https://gitlab.com/BuildStream/buildstream.git>`_
-using python's ``pip`` package manager.
-
-For the purpose of installing BuildStream while there are no distro packages,
-you will additionally need:
-
-* pip for python3 (only required for setup)
-* Python 3 development libraries and headers
-* git (to checkout buildstream)
-
-
-Installing dependencies
-~~~~~~~~~~~~~~~~~~~~~~~
-
-
-Arch Linux
-++++++++++
-Install the dependencies with::
-
-  sudo pacman -S \
-      python fuse2 bubblewrap \
-      python-pip git
-
-For the default plugins::
-
-  sudo pacman -S \
-      bzr git lzip ostree patch python-gobject
-
-
-The package *python-arpy* is required by the deb source plugin. This is not
-obtainable via `pacman`, you must get *python-arpy* from AUR:
-https://aur.archlinux.org/packages/python-arpy/
-
-To install::
-
-  wget https://aur.archlinux.org/cgit/aur.git/snapshot/python-arpy.tar.gz
-  tar -xvf python-arpy.tar.gz
-  cd python-arpy
-  makepkg -si
-
-Debian
-++++++
-Install the dependencies with::
-
-  sudo apt-get install \
-      python3 fuse bubblewrap \
-      python3-pip python3-dev git
-
-For the default plugins:
-
-Stretch
-^^^^^^^
-With stretch, you first need to ensure that you have the backports repository
-setup as described `here <https://backports.debian.org/Instructions/>`_
-
-By adding the following line to your sources.list::
-
-  deb http://deb.debian.org/debian stretch-backports main
-
-And then running::
-
-  sudo apt update
-
-At this point you should be able to get the system requirements for the default plugins with::
-
-  sudo apt install \
-      bzr git lzip patch python3-arpy python3-gi
-  sudo apt install -t stretch-backports \
-      gir1.2-ostree-1.0 ostree
-
-Buster or Sid
-^^^^^^^^^^^^^
-For debian unstable or testing, only the following line should be enough
-to get the system requirements for the default plugins installed::
-
-  sudo apt-get install \
-      lzip gir1.2-ostree-1.0 git bzr ostree patch python3-arpy python3-gi
-
-
-Fedora
-++++++
-For recent fedora systems, the following line should get you the system
-requirements you need::
-
-  dnf install -y \
-      python3 fuse bubblewrap \
-      python3-pip python3-devel git
-
-For the default plugins::
-
-  dnf install -y \
-      bzr git lzip patch ostree python3-gobject
-  pip3 install --user arpy
-
-
-Ubuntu
-++++++
-
-Ubuntu 18.04 LTS or later
-^^^^^^^^^^^^^^^^^^^^^^^^^
-Install the dependencies with::
-
-  sudo apt install \
-      python3 fuse bubblewrap \
-      python3-pip python3-dev git
-
-For the default plugins::
-
-  sudo apt install \
-      bzr gir1.2-ostree-1.0 git lzip ostree patch python3-arpy python3-gi
-
-Ubuntu 16.04 LTS
-^^^^^^^^^^^^^^^^
-On Ubuntu 16.04, neither `bubblewrap <https://github.com/projectatomic/bubblewrap/>`_
-or `ostree <https://github.com/ostreedev/ostree>`_ are available in the official repositories.
-You will need to install them in whichever way you see fit. Refer the the upstream documentation
-for advice on this.
-
-
-Installing
-~~~~~~~~~~
-Once you have the base system dependencies, you can clone the BuildStream
-git repository and install it as a regular user::
-
-  git clone https://gitlab.com/BuildStream/buildstream.git
-  cd buildstream
-  pip3 install --user -e .
-
-This will install buildstream's pure python dependencies into
-your user's homedir in ``~/.local`` and will run BuildStream directly
-from the git checkout directory.
-
-Keep following the instructions below to ensure that the ``bst``
-command is in your ``PATH`` and to enable bash completions for it.
-
-.. note::
-
-   We recommend the ``-e`` option because you can upgrade your
-   installation by simply updating the checked out git repository.
-
-   If you want a full installation that is not linked to your
-   git checkout, just omit the ``-e`` option from the above commands.
-
-
-Adjust PATH
-~~~~~~~~~~~
-Since BuildStream is now installed under your local user's install directories,
-you need to ensure that ``PATH`` is adjusted.
-
-A regular way to do this is to add the following line to the end of your ``~/.bashrc``::
-
-  export PATH="${PATH}:${HOME}/.local/bin"
-
-.. note::
-
-   You will have to restart your terminal in order for these changes to take effect.
-
-
-Bash completions
-~~~~~~~~~~~~~~~~
-Bash completions are supported by sourcing the ``buildstream/data/bst``
-script found in the BuildStream repository. On many systems this script
-can be installed into a completions directory but when installing BuildStream
-without a package manager this is not an option.
-
-To enable completions for an installation of BuildStream you
-installed yourself from git, just append the script verbatim
-to your ``~/.bash_completion``:
-
-.. literalinclude:: ../../buildstream/data/bst
-   :language: yaml
-
-
-Upgrading BuildStream
-~~~~~~~~~~~~~~~~~~~~~
-Assuming you have followed the default instructions above, all
-you need to do to upgrade BuildStream is to update your local git
-checkout::
-
-  cd /path/to/buildstream
-  git pull --rebase
-
-If you did not specify the ``-e`` option at install time or the dependancies
-have changed, you will need to cleanly reinstall BuildStream::
-
-  pip3 uninstall buildstream
-  cd /path/to/buildstream
-  git pull --rebase
-  pip3 install --user .
-
-
-Installing from distro packages
--------------------------------
-
-
-Arch Linux
-~~~~~~~~~~
-Packages for Arch exist in `AUR <https://wiki.archlinux.org/index.php/Arch_User_Repository#Installing_packages>`_.
-Two different package versions are available:
-
-* Latest release: `buildstream <https://aur.archlinux.org/packages/buildstream>`_
-* Latest development snapshot: `buildstream-git <https://aur.archlinux.org/packages/buildstream-git>`_
-
-
-Fedora
-~~~~~~
-
-BuildStream is not yet in the official Fedora repositories, but you can
-install it from a Copr::
-
-  sudo dnf copr enable bochecha/buildstream
-  sudo dnf install buildstream
-
-Optionally, install the ``buildstream-docs`` package to have the BuildStream
-documentation in Devhelp or GNOME Builder.
diff --git a/doc/source/main_install.rst b/doc/source/main_install.rst
deleted file mode 100644
index 1a48959033287f38220ff32878c41991f1c9440c..0000000000000000000000000000000000000000
--- a/doc/source/main_install.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-Install
-=======
-This section covers how to install BuildStream onto your machine, how to run
-BuildStream inside a docker image and also how to configure an artifact server.
-
-.. note::
-
-   BuildStream is not currently supported natively on macOS and Windows. Windows
-   and macOS users should refer to :ref:`docker`.
-
-.. toctree::
-   :maxdepth: 2
-
-   install_linux_distro
-   install_docker
-   install_artifacts
diff --git a/doc/source/main_using.rst b/doc/source/main_using.rst
index 65786e9ebc3fb4dd9713265e4f206db14c888b52..d56023e7469bfe75e4dcf411b9b39742b83bb6e8 100644
--- a/doc/source/main_using.rst
+++ b/doc/source/main_using.rst
@@ -1,5 +1,7 @@
 
 
+.. _using:
+
 Using
 =====
 This section includes user facing documentation including tutorials,
@@ -15,3 +17,4 @@ guides and information on user preferences and configuration.
    using_examples
    using_config
    using_commands
+   using_configuring_artifact_server
diff --git a/doc/source/sessions/developing-build-after-changes.html b/doc/source/sessions-stored/developing-build-after-changes.html
similarity index 100%
rename from doc/source/sessions/developing-build-after-changes.html
rename to doc/source/sessions-stored/developing-build-after-changes.html
diff --git a/doc/source/sessions/developing-close-workspace.html b/doc/source/sessions-stored/developing-close-workspace.html
similarity index 100%
rename from doc/source/sessions/developing-close-workspace.html
rename to doc/source/sessions-stored/developing-close-workspace.html
diff --git a/doc/source/sessions/developing-discard-workspace.html b/doc/source/sessions-stored/developing-discard-workspace.html
similarity index 100%
rename from doc/source/sessions/developing-discard-workspace.html
rename to doc/source/sessions-stored/developing-discard-workspace.html
diff --git a/doc/source/sessions/developing-reopen-workspace.html b/doc/source/sessions-stored/developing-reopen-workspace.html
similarity index 100%
rename from doc/source/sessions/developing-reopen-workspace.html
rename to doc/source/sessions-stored/developing-reopen-workspace.html
diff --git a/doc/source/sessions/developing-reset-workspace.html b/doc/source/sessions-stored/developing-reset-workspace.html
similarity index 100%
rename from doc/source/sessions/developing-reset-workspace.html
rename to doc/source/sessions-stored/developing-reset-workspace.html
diff --git a/doc/source/sessions/developing-shell-after-changes.html b/doc/source/sessions-stored/developing-shell-after-changes.html
similarity index 100%
rename from doc/source/sessions/developing-shell-after-changes.html
rename to doc/source/sessions-stored/developing-shell-after-changes.html
diff --git a/doc/source/sessions/developing-workspace-list.html b/doc/source/sessions-stored/developing-workspace-list.html
similarity index 100%
rename from doc/source/sessions/developing-workspace-list.html
rename to doc/source/sessions-stored/developing-workspace-list.html
diff --git a/doc/source/sessions/developing-workspace-open.html b/doc/source/sessions-stored/developing-workspace-open.html
similarity index 100%
rename from doc/source/sessions/developing-workspace-open.html
rename to doc/source/sessions-stored/developing-workspace-open.html
diff --git a/doc/source/tutorial/autotools.rst b/doc/source/tutorial/autotools.rst
index 8d24ca59c5fccf60d78181e9f9eabcbe583f1429..ea2835838c91c9f7d82ff0fa8ba02a749e2bee88 100644
--- a/doc/source/tutorial/autotools.rst
+++ b/doc/source/tutorial/autotools.rst
@@ -88,7 +88,7 @@ As :ref:`the documentation <format_composition>` mentions:
 The variable we needed to override was ``command-subdir``, which is an
 automatic variable provided by the :mod:`BuildElement <buildstream.buildelement>`
 abstract class. This variable simply instructs the :mod:`BuildElement <buildstream.buildelement>`
-in which subdirectory of the ``%{build-root}`` to run it's commands in.
+in which subdirectory of the ``%{build-root}`` to run its commands in.
 
 One can always display the resolved set of variables for a given
 element's configuration using :ref:`bst show <invoking_show>`:
diff --git a/doc/source/tutorial/first-project.rst b/doc/source/tutorial/first-project.rst
index 1791b198b7518300e9b1562bee06ea3b20db1e06..18df85bb739a3cfadc2b8ce6a9af6d2bf8f03006 100644
--- a/doc/source/tutorial/first-project.rst
+++ b/doc/source/tutorial/first-project.rst
@@ -73,7 +73,7 @@ The element
 ~~~~~~~~~~~
 The :mod:`import <elements.import>` element can be used to simply add content
 directly to the output artifacts. In this case, it simply takes the ``hello.world`` file
-provided by it's source and stages it directly to the artifact output root.
+provided by its source and stages it directly to the artifact output root.
 
 .. tip::
 
diff --git a/doc/source/tutorial/integration-commands.rst b/doc/source/tutorial/integration-commands.rst
index 8d80eb9a2795dac26ae8e15d599bf4b0f867d400..ead5be8b29407edf91cc3a289533b3ade251270e 100644
--- a/doc/source/tutorial/integration-commands.rst
+++ b/doc/source/tutorial/integration-commands.rst
@@ -7,11 +7,11 @@ performed at installation time, otherwise it will not run properly.
 
 This is especially true in cases where a daemon or library interoperates
 with third party extensions and needs to maintain a system wide cache whenever
-it's extensions are installed or removed; system wide font caches are an example
+its extensions are installed or removed; system wide font caches are an example
 of this.
 
 In these cases we use :ref:`integration commands <public_integration>` to
-ensure that a runtime is ready to run after all of it's components have been *staged*.
+ensure that a runtime is ready to run after all of its components have been *staged*.
 
 .. note::
 
@@ -60,10 +60,10 @@ we've used any :ref:`builtin public data <public_builtin>`.
 
 Public data is a free form portion of an element's configuration and
 is not necessarily understood by the element on which it is declared, public
-data is intended to be read by it's reverse dependency elements.
+data is intended to be read by its reverse dependency elements.
 
 This allows annotations on some elements to inform elements later in
-the dependency chain about details of it's artifact, or to suggest how
+the dependency chain about details of its artifact, or to suggest how
 it should be processed.
 
 
diff --git a/doc/source/install_artifacts.rst b/doc/source/using_configuring_artifact_server.rst
similarity index 84%
rename from doc/source/install_artifacts.rst
rename to doc/source/using_configuring_artifact_server.rst
index 38c81ed3814cdcf561d647deb8a0e55a3cf055f4..813952cb368fd9eb4e7f37fff08f370e6a769f5c 100644
--- a/doc/source/install_artifacts.rst
+++ b/doc/source/using_configuring_artifact_server.rst
@@ -2,8 +2,8 @@
 
 .. _artifacts:
 
-Installing an artifact server
-=============================
+Configuring Artifact Server
+===========================
 BuildStream caches the results of builds in a local artifact cache, and will
 avoid building an element if there is a suitable build already present in the
 local artifact cache.
@@ -40,7 +40,7 @@ them in a specific order:
 
 When an artifact is built locally, BuildStream will try to push it to all the
 caches which have the ``push: true`` flag set. You can also manually push
-artifacts to a specific cache using the :ref:`bst pull command <commands>`.
+artifacts to a specific cache using the :ref:`bst push command <invoking_push>`.
 
 Artifacts are identified using the element's :ref:`cache key <cachekeys>` so
 the builds provided by a cache should be interchangable with those provided
@@ -72,14 +72,14 @@ Installing the server
 ~~~~~~~~~~~~~~~~~~~~~
 You will also need to install BuildStream on the artifact server in order
 to receive uploaded artifacts over ssh. Follow the instructions for installing
-BuildStream :ref:`here <install>`
+BuildStream `here <https://buildstream.build/install.html>`_.
 
 When installing BuildStream on the artifact server, it must be installed
 in a system wide location, with ``pip3 install .`` in the BuildStream
 checkout directory.
 
 Otherwise, some tinkering is required to ensure BuildStream is available
-in ``PATH`` when it's companion ``bst-artifact-server`` program is run
+in ``PATH`` when its companion ``bst-artifact-server`` program is run
 remotely.
 
 You can install only the artifact server companion program without
@@ -110,6 +110,15 @@ You can also use a key pair obtained from a trusted certificate authority instea
 
     openssl req -new -newkey rsa:4096 -x509 -sha256 -days 3650 -nodes -batch -subj "/CN=artifacts.com" -out server.crt -keyout server.key
 
+.. note::
+
+    Note that in the ``-subj "/CN=<foo>"`` argument, ``/CN`` is the *certificate common name*,
+    and as such ``<foo>`` should be the public hostname of the server. IP addresses will
+    **not** provide you with working authentication.
+
+    In addition to this, ensure that the host server is recognised by the client.
+    You may need to add the line: ``<ip address>`` ``<hostname>`` to
+    your ``/etc/hosts`` file.
 
 Authenticating users
 ~~~~~~~~~~~~~~~~~~~~
@@ -161,13 +170,13 @@ Below are two examples of how to run the cache server as a systemd service, one
 
    [Service]
    Environment="LC_ALL=C.UTF-8"
-   ExecStart=/usr/local/bin/bst-artifact-server --port 11001 --server-key {{certs_path}}/privkey.pem --
-   server-cert {{certs_path}}/fullchain.pem {{artifacts_path}}
+   ExecStart=/usr/local/bin/bst-artifact-server --port 11001 --server-key {{certs_path}}/server.key --server-cert {{certs_path}}/server.crt {{artifacts_path}}
    User=artifacts
 
    [Install]
    WantedBy=multi-user.target
 
+.. code:: ini
 
    #
    # Pull/Push
@@ -178,9 +187,7 @@ Below are two examples of how to run the cache server as a systemd service, one
 
    [Service]
    Environment="LC_ALL=C.UTF-8"
-   ExecStart=/usr/local/bin/bst-artifact-server --port 11002 --server-key {{certs_path}}/privkey.pem --
-   server-cert {{certs_path}}/fullchain.pem --client-certs /home/artifacts/authorized.crt --enable-push /
-   {{artifacts_path}}
+   ExecStart=/usr/local/bin/bst-artifact-server --port 11002 --server-key {{certs_path}}/server.key --server-cert {{certs_path}}/server.crt --client-certs {{certs_path}}/authorized.crt --enable-push {{artifacts_path}}
    User=artifacts
 
    [Install]
@@ -188,11 +195,16 @@ Below are two examples of how to run the cache server as a systemd service, one
 
 Here we define when systemd should start the service, which is after the networking stack has been started, we then define how to run the cache with the desired configuration, under the artifacts user. The {{ }} are there to denote where you should change these files to point to your desired locations.
 
+For more information on systemd services see: 
+`Creating Systemd Service Files <https://www.devdungeon.com/content/creating-systemd-service-files>`_.
+
 User configuration
 ~~~~~~~~~~~~~~~~~~
 The user configuration for artifacts is documented with the rest
 of the :ref:`user configuration documentation <user_config>`.
 
+Note that for self-signed certificates, the public key fields are mandatory.
+
 Assuming you have the same setup used in this document, and that your
 host is reachable on the internet as ``artifacts.com`` (for example),
 then a user can use the following user configuration:
@@ -230,3 +242,8 @@ Pull and push:
      client-cert: client.crt
 
      push: true
+
+.. note::
+
+    Equivalent statements can be delcared in a project's configuration file
+    (the ``project.conf``).
diff --git a/doc/source/using_examples.rst b/doc/source/using_examples.rst
index 622b09e32c5f448cce01af6c51fbf8c3fc72a8de..18b15c7114f2201ae0257e5edd4e25070c97a708 100644
--- a/doc/source/using_examples.rst
+++ b/doc/source/using_examples.rst
@@ -1,4 +1,7 @@
 
+
+.. _examples:
+
 Examples
 ========
 This page contains documentation for real examples of BuildStream projects,
diff --git a/doc/source/using_tutorial.rst b/doc/source/using_tutorial.rst
index 190c94c8da3534efc59eb261c46a8f848faea0f5..297f2341c0bc9d874f6ed9ba6b75c0b37a7651a1 100644
--- a/doc/source/using_tutorial.rst
+++ b/doc/source/using_tutorial.rst
@@ -1,4 +1,7 @@
 
+
+.. _tutorial:
+
 Tutorial
 ========
 This is a step by step walkthrough meant help the user quickly get
diff --git a/setup.cfg b/setup.cfg
index 7a27a2f58ada8f21a1945da21ae215d44c77bbc5..7d40c3d254ad4b10728eafa28ab54ed14613d489 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -11,7 +11,7 @@ parentdir_prefix = BuildStream-
 test=pytest
 
 [tool:pytest]
-addopts = --verbose --basetemp ./tmp --pep8 --pylint --pylint-rcfile=.pylintrc --cov=buildstream --cov-config .coveragerc
+addopts = --verbose --basetemp ./tmp --pep8 --pylint --pylint-rcfile=.pylintrc --cov=buildstream --cov-config .coveragerc --durations=20
 norecursedirs = tests/integration/project integration-cache tmp __pycache__ .eggs
 python_files = tests/*/*.py
 pep8maxlinelength = 119
diff --git a/setup.py b/setup.py
index f64186f13ef6694fd9f31abc775f9afdaf8feed6..2f7247aeb5d4f275223631ceac24595b6061ca1d 100755
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,14 @@ import subprocess
 import sys
 import versioneer
 
-if sys.version_info[0] != 3 or sys.version_info[1] < 5:
+
+##################################################################
+# Python requirements
+##################################################################
+REQUIRED_PYTHON_MAJOR = 3
+REQUIRED_PYTHON_MINOR = 5
+
+if sys.version_info[0] != REQUIRED_PYTHON_MAJOR or sys.version_info[1] < REQUIRED_PYTHON_MINOR:
     print("BuildStream requires Python >= 3.5")
     sys.exit(1)
 
@@ -47,12 +54,13 @@ REQUIRED_BWRAP_MINOR = 1
 REQUIRED_BWRAP_PATCH = 2
 
 
-def exit_bwrap(reason):
+def warn_bwrap(reason):
     print(reason +
-          "\nBuildStream requires Bubblewrap (bwrap) for"
-          " sandboxing the build environment. Install it using your package manager"
-          " (usually bwrap or bubblewrap)")
-    sys.exit(1)
+          "\nBuildStream requires Bubblewrap (bwrap {}.{}.{} or better),"
+          " during local builds, for"
+          " sandboxing the build environment.\nInstall it using your package manager"
+          " (usually bwrap or bubblewrap) otherwise you will be limited to"
+          " remote builds only.".format(REQUIRED_BWRAP_MAJOR, REQUIRED_BWRAP_MINOR, REQUIRED_BWRAP_PATCH))
 
 
 def bwrap_too_old(major, minor, patch):
@@ -69,18 +77,19 @@ def bwrap_too_old(major, minor, patch):
         return False
 
 
-def assert_bwrap():
+def check_for_bwrap():
     platform = os.environ.get('BST_FORCE_BACKEND', '') or sys.platform
     if platform.startswith('linux'):
         bwrap_path = shutil.which('bwrap')
         if not bwrap_path:
-            exit_bwrap("Bubblewrap not found")
+            warn_bwrap("Bubblewrap not found")
+            return
 
         version_bytes = subprocess.check_output([bwrap_path, "--version"]).split()[1]
         version_string = str(version_bytes, "utf-8")
         major, minor, patch = map(int, version_string.split("."))
         if bwrap_too_old(major, minor, patch):
-            exit_bwrap("Bubblewrap too old")
+            warn_bwrap("Bubblewrap too old")
 
 
 ###########################################
@@ -119,7 +128,7 @@ bst_install_entry_points = {
 }
 
 if not os.environ.get('BST_ARTIFACTS_ONLY', ''):
-    assert_bwrap()
+    check_for_bwrap()
     bst_install_entry_points['console_scripts'] += [
         'bst = buildstream._frontend:cli'
     ]
@@ -242,11 +251,29 @@ setup(name='BuildStream',
 
       author='BuildStream Developers',
       author_email='buildstream-list@gnome.org',
+      classifiers=[
+          'Environment :: Console',
+          'Intended Audience :: Developers',
+          'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
+          'Operating System :: POSIX',
+          'Programming Language :: Python :: 3',
+          'Programming Language :: Python :: 3.5',
+          'Programming Language :: Python :: 3.6',
+          'Programming Language :: Python :: 3.7',
+          'Topic :: Software Development :: Build Tools'
+      ],
       description='A framework for modelling build pipelines in YAML',
       license='LGPL',
       long_description=long_description,
       long_description_content_type='text/x-rst; charset=UTF-8',
-      url='https://gitlab.com/BuildStream/buildstream',
+      url='https://buildstream.build',
+      project_urls={
+          'Source': 'https://gitlab.com/BuildStream/buildstream',
+          'Documentation': 'https://docs.buildstream.build',
+          'Tracker': 'https://gitlab.com/BuildStream/buildstream/issues',
+          'Mailing List': 'https://mail.gnome.org/mailman/listinfo/buildstream-list'
+      },
+      python_requires='~={}.{}'.format(REQUIRED_PYTHON_MAJOR, REQUIRED_PYTHON_MINOR),
       packages=find_packages(exclude=('tests', 'tests.*')),
       package_data={'buildstream': ['plugins/*/*.py', 'plugins/*/*.yaml',
                                     'data/*.yaml', 'data/*.sh.in']},
@@ -273,7 +300,6 @@ setup(name='BuildStream',
           'ruamel.yaml < 0.15.52',
           'pluginbase',
           'Click',
-          'blessings',
           'jinja2 >= 2.10',
           'protobuf >= 3.5',
           'grpcio >= 1.10',
diff --git a/tests/artifactcache/config.py b/tests/artifactcache/config.py
index f594747085c4eb455a6217516f9433a35d17a15c..df40d1073fcb83ffb32e998e8130059c38147f23 100644
--- a/tests/artifactcache/config.py
+++ b/tests/artifactcache/config.py
@@ -9,8 +9,12 @@ from buildstream._context import Context
 from buildstream._project import Project
 from buildstream.utils import _deduplicate
 from buildstream import _yaml
+from buildstream._exceptions import ErrorDomain, LoadErrorReason
 
+from tests.testutils.runcli import cli
 
+
+DATA_DIR = os.path.dirname(os.path.realpath(__file__))
 cache1 = ArtifactCacheSpec(url='https://example.com/cache1', push=True)
 cache2 = ArtifactCacheSpec(url='https://example.com/cache2', push=False)
 cache3 = ArtifactCacheSpec(url='https://example.com/cache3', push=False)
@@ -106,3 +110,33 @@ def test_artifact_cache_precedence(tmpdir, override_caches, project_caches, user
     # Verify that it was correctly read.
     expected_cache_specs = list(_deduplicate(itertools.chain(override_caches, project_caches, user_caches)))
     assert parsed_cache_specs == expected_cache_specs
+
+
+# Assert that if either the client key or client cert is specified
+# without specifying its counterpart, we get a comprehensive LoadError
+# instead of an unhandled exception.
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize('config_key, config_value', [
+    ('client-cert', 'client.crt'),
+    ('client-key', 'client.key')
+])
+def test_missing_certs(cli, datafiles, config_key, config_value):
+    project = os.path.join(datafiles.dirname, datafiles.basename, 'missing-certs')
+
+    project_conf = {
+        'name': 'test',
+
+        'artifacts': {
+            'url': 'https://cache.example.com:12345',
+            'push': 'true',
+            config_key: config_value
+        }
+    }
+    project_conf_file = os.path.join(project, 'project.conf')
+    _yaml.dump(project_conf, project_conf_file)
+
+    # Use `pull` here to ensure we try to initialize the remotes, triggering the error
+    #
+    # This does not happen for a simple `bst show`.
+    result = cli.run(project=project, args=['pull', 'element.bst'])
+    result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
diff --git a/tests/artifactcache/expiry.py b/tests/artifactcache/expiry.py
index c12f32879f767a397dc7100bc40c27007cdfb2a6..e0b0bcf7410864c97e4725a9c54fff3a73da1284 100644
--- a/tests/artifactcache/expiry.py
+++ b/tests/artifactcache/expiry.py
@@ -25,7 +25,7 @@ import pytest
 from buildstream import _yaml
 from buildstream._exceptions import ErrorDomain, LoadErrorReason
 
-from tests.testutils import cli, create_element_size, wait_for_cache_granularity
+from tests.testutils import cli, create_element_size, update_element_size, wait_for_cache_granularity
 
 
 DATA_DIR = os.path.join(
@@ -94,6 +94,7 @@ def test_artifact_too_large(cli, datafiles, tmpdir, size):
     create_element_size('target.bst', project, element_path, [], size)
     res = cli.run(project=project, args=['build', 'target.bst'])
     res.assert_main_error(ErrorDomain.STREAM, None)
+    res.assert_task_error(ErrorDomain.ARTIFACT, 'cache-too-full')
 
 
 @pytest.mark.datafiles(DATA_DIR)
@@ -198,47 +199,107 @@ def test_keep_dependencies(cli, datafiles, tmpdir):
 
 # Assert that we never delete a dependency required for a build tree
 @pytest.mark.datafiles(DATA_DIR)
-def test_never_delete_dependencies(cli, datafiles, tmpdir):
+def test_never_delete_required(cli, datafiles, tmpdir):
     project = os.path.join(datafiles.dirname, datafiles.basename)
     element_path = 'elements'
 
     cli.configure({
         'cache': {
             'quota': 10000000
+        },
+        'scheduler': {
+            'builders': 1
         }
     })
 
-    # Create a build tree
-    create_element_size('dependency.bst', project,
-                        element_path, [], 8000000)
-    create_element_size('related.bst', project,
-                        element_path, ['dependency.bst'], 8000000)
-    create_element_size('target.bst', project,
-                        element_path, ['related.bst'], 8000000)
-    create_element_size('target2.bst', project,
-                        element_path, ['target.bst'], 8000000)
+    # Create a linear build tree
+    create_element_size('dep1.bst', project, element_path, [], 8000000)
+    create_element_size('dep2.bst', project, element_path, ['dep1.bst'], 8000000)
+    create_element_size('dep3.bst', project, element_path, ['dep2.bst'], 8000000)
+    create_element_size('target.bst', project, element_path, ['dep3.bst'], 8000000)
 
     # We try to build this pipeline, but it's too big for the
     # cache. Since all elements are required, the build should fail.
-    res = cli.run(project=project, args=['build', 'target2.bst'])
+    res = cli.run(project=project, args=['build', 'target.bst'])
     res.assert_main_error(ErrorDomain.STREAM, None)
+    res.assert_task_error(ErrorDomain.ARTIFACT, 'cache-too-full')
 
-    assert cli.get_element_state(project, 'dependency.bst') == 'cached'
+    # Only the first artifact fits in the cache, but we expect
+    # that the first *two* artifacts will be cached.
+    #
+    # This is because after caching the first artifact we must
+    # proceed to build the next artifact, and we cannot really
+    # know how large an artifact will be until we try to cache it.
+    #
+    # In this case, we deem it more acceptable to not delete an
+    # artifact which caused the cache to outgrow the quota.
+    #
+    # Note that this test only works because we have forced
+    # the configuration to build one element at a time, in real
+    # life there may potentially be N-builders cached artifacts
+    # which exceed the quota
+    #
+    assert cli.get_element_state(project, 'dep1.bst') == 'cached'
+    assert cli.get_element_state(project, 'dep2.bst') == 'cached'
+
+    assert cli.get_element_state(project, 'dep3.bst') != 'cached'
+    assert cli.get_element_state(project, 'target.bst') != 'cached'
+
+
+# Assert that we never delete a dependency required for a build tree,
+# even when the artifact cache was previously populated with
+# artifacts we do not require, and the new build is run with dynamic tracking.
+#
+@pytest.mark.datafiles(DATA_DIR)
+def test_never_delete_required_track(cli, datafiles, tmpdir):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    element_path = 'elements'
+
+    cli.configure({
+        'cache': {
+            'quota': 10000000
+        },
+        'scheduler': {
+            'builders': 1
+        }
+    })
+
+    # Create a linear build tree
+    repo_dep1 = create_element_size('dep1.bst', project, element_path, [], 2000000)
+    repo_dep2 = create_element_size('dep2.bst', project, element_path, ['dep1.bst'], 2000000)
+    repo_dep3 = create_element_size('dep3.bst', project, element_path, ['dep2.bst'], 2000000)
+    repo_target = create_element_size('target.bst', project, element_path, ['dep3.bst'], 2000000)
+
+    # This should all fit into the artifact cache
+    res = cli.run(project=project, args=['build', 'target.bst'])
+    res.assert_success()
+
+    # They should all be cached
+    assert cli.get_element_state(project, 'dep1.bst') == 'cached'
+    assert cli.get_element_state(project, 'dep2.bst') == 'cached'
+    assert cli.get_element_state(project, 'dep3.bst') == 'cached'
+    assert cli.get_element_state(project, 'target.bst') == 'cached'
 
-    # This is *technically* above the cache limit. BuildStream accepts
-    # some fuzziness, since it's hard to assert that we don't create
-    # an artifact larger than the cache quota. We would have to remove
-    # the artifact after-the-fact, but since it is required for the
-    # current build and nothing broke yet, it's nicer to keep it
-    # around.
+    # Now increase the size of all the elements
     #
-    # This scenario is quite unlikely, and the cache overflow will be
-    # resolved if the user does something about it anyway.
+    update_element_size('dep1.bst', project, repo_dep1, 8000000)
+    update_element_size('dep2.bst', project, repo_dep2, 8000000)
+    update_element_size('dep3.bst', project, repo_dep3, 8000000)
+    update_element_size('target.bst', project, repo_target, 8000000)
+
+    # Now repeat the same test we did in test_never_delete_required(),
+    # except this time let's add dynamic tracking
     #
-    assert cli.get_element_state(project, 'related.bst') == 'cached'
+    res = cli.run(project=project, args=['build', '--track-all', 'target.bst'])
+    res.assert_main_error(ErrorDomain.STREAM, None)
+    res.assert_task_error(ErrorDomain.ARTIFACT, 'cache-too-full')
 
+    # Expect the same result that we did in test_never_delete_required()
+    #
+    assert cli.get_element_state(project, 'dep1.bst') == 'cached'
+    assert cli.get_element_state(project, 'dep2.bst') == 'cached'
+    assert cli.get_element_state(project, 'dep3.bst') != 'cached'
     assert cli.get_element_state(project, 'target.bst') != 'cached'
-    assert cli.get_element_state(project, 'target2.bst') != 'cached'
 
 
 # Ensure that only valid cache quotas make it through the loading
@@ -290,3 +351,38 @@ def test_invalid_cache_quota(cli, datafiles, tmpdir, quota, success):
         res.assert_success()
     else:
         res.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_DATA)
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_extract_expiry(cli, datafiles, tmpdir):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    element_path = 'elements'
+
+    cli.configure({
+        'cache': {
+            'quota': 10000000,
+        }
+    })
+
+    create_element_size('target.bst', project, element_path, [], 6000000)
+    res = cli.run(project=project, args=['build', 'target.bst'])
+    res.assert_success()
+    assert cli.get_element_state(project, 'target.bst') == 'cached'
+
+    # Force creating extract
+    res = cli.run(project=project, args=['checkout', 'target.bst', os.path.join(str(tmpdir), 'checkout')])
+    res.assert_success()
+
+    extractdir = os.path.join(project, 'cache', 'artifacts', 'extract', 'test', 'target')
+    extracts = os.listdir(extractdir)
+    assert(len(extracts) == 1)
+    extract = os.path.join(extractdir, extracts[0])
+
+    # Remove target.bst from artifact cache
+    create_element_size('target2.bst', project, element_path, [], 6000000)
+    res = cli.run(project=project, args=['build', 'target2.bst'])
+    res.assert_success()
+    assert cli.get_element_state(project, 'target.bst') != 'cached'
+
+    # Now the extract should be removed.
+    assert not os.path.exists(extract)
diff --git a/tests/artifactcache/missing-certs/certificates/client.crt b/tests/artifactcache/missing-certs/certificates/client.crt
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/artifactcache/missing-certs/certificates/client.key b/tests/artifactcache/missing-certs/certificates/client.key
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/artifactcache/missing-certs/element.bst b/tests/artifactcache/missing-certs/element.bst
new file mode 100644
index 0000000000000000000000000000000000000000..3c29b4ea1334556c03b724ce505bc234e80a1892
--- /dev/null
+++ b/tests/artifactcache/missing-certs/element.bst
@@ -0,0 +1 @@
+kind: autotools
diff --git a/tests/artifactcache/project/elements/compose-all.bst b/tests/artifactcache/project/elements/compose-all.bst
new file mode 100644
index 0000000000000000000000000000000000000000..ba47081b3ce496804ca4a236a6c021451c4c076e
--- /dev/null
+++ b/tests/artifactcache/project/elements/compose-all.bst
@@ -0,0 +1,12 @@
+kind: compose
+
+depends:
+- filename: import-bin.bst
+  type: build
+- filename: import-dev.bst
+  type: build
+
+config:
+  # Dont try running the sandbox, we dont have a
+  # runtime to run anything in this context.
+  integrate: False
diff --git a/tests/artifactcache/project/elements/import-bin.bst b/tests/artifactcache/project/elements/import-bin.bst
new file mode 100644
index 0000000000000000000000000000000000000000..a847c0c23de84a3b792fccf8506cb9cebc5aa1ea
--- /dev/null
+++ b/tests/artifactcache/project/elements/import-bin.bst
@@ -0,0 +1,4 @@
+kind: import
+sources:
+- kind: local
+  path: files/bin-files
diff --git a/tests/artifactcache/project/elements/import-dev.bst b/tests/artifactcache/project/elements/import-dev.bst
new file mode 100644
index 0000000000000000000000000000000000000000..152a54667fe9de84c37971819fbb7fafb5df23c1
--- /dev/null
+++ b/tests/artifactcache/project/elements/import-dev.bst
@@ -0,0 +1,4 @@
+kind: import
+sources:
+- kind: local
+  path: files/dev-files
diff --git a/tests/artifactcache/project/elements/target.bst b/tests/artifactcache/project/elements/target.bst
new file mode 100644
index 0000000000000000000000000000000000000000..ba489f1e856633bd929db86bdac09dfca00f6415
--- /dev/null
+++ b/tests/artifactcache/project/elements/target.bst
@@ -0,0 +1,9 @@
+kind: stack
+description: |
+
+  Main stack target for the bst build test
+
+depends:
+- import-bin.bst
+- import-dev.bst
+- compose-all.bst
diff --git a/tests/artifactcache/project/files/bin-files/usr/bin/hello b/tests/artifactcache/project/files/bin-files/usr/bin/hello
new file mode 100755
index 0000000000000000000000000000000000000000..f534a40837ced35eed6c6079228387302a4c9d65
--- /dev/null
+++ b/tests/artifactcache/project/files/bin-files/usr/bin/hello
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+echo "Hello !"
diff --git a/tests/artifactcache/project/files/dev-files/usr/include/pony.h b/tests/artifactcache/project/files/dev-files/usr/include/pony.h
new file mode 100644
index 0000000000000000000000000000000000000000..40bd0c2e768fbd9238ce3a7f332c245fd950b64f
--- /dev/null
+++ b/tests/artifactcache/project/files/dev-files/usr/include/pony.h
@@ -0,0 +1,12 @@
+#ifndef __PONY_H__
+#define __PONY_H__
+
+#define PONY_BEGIN "Once upon a time, there was a pony."
+#define PONY_END "And they lived happily ever after, the end."
+
+#define MAKE_PONY(story)  \
+  PONY_BEGIN \
+  story \
+  PONY_END
+
+#endif /* __PONY_H__ */
diff --git a/tests/artifactcache/project/project.conf b/tests/artifactcache/project/project.conf
new file mode 100644
index 0000000000000000000000000000000000000000..854e38693f391e65b8f9af10ee35679e4907ccb0
--- /dev/null
+++ b/tests/artifactcache/project/project.conf
@@ -0,0 +1,4 @@
+# Project config for frontend build test
+name: test
+
+element-path: elements
diff --git a/tests/artifactcache/pull.py b/tests/artifactcache/pull.py
new file mode 100644
index 0000000000000000000000000000000000000000..e76dc5ca7b736c0d2152fdef88001e7c741bc10a
--- /dev/null
+++ b/tests/artifactcache/pull.py
@@ -0,0 +1,316 @@
+import hashlib
+import multiprocessing
+import os
+import signal
+
+import pytest
+
+from buildstream import _yaml, _signals, utils
+from buildstream._context import Context
+from buildstream._project import Project
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
+
+from tests.testutils import cli, create_artifact_share
+
+
+# Project directory
+DATA_DIR = os.path.join(
+    os.path.dirname(os.path.realpath(__file__)),
+    "project",
+)
+
+
+# Handle messages from the pipeline
+def message_handler(message, context):
+    pass
+
+
+def tree_maker(cas, tree, directory):
+    if tree.root.ByteSize() == 0:
+        tree.root.CopyFrom(directory)
+
+    for directory_node in directory.directories:
+        child_directory = tree.children.add()
+
+        with open(cas.objpath(directory_node.digest), 'rb') as f:
+            child_directory.ParseFromString(f.read())
+
+        tree_maker(cas, tree, child_directory)
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_pull(cli, tmpdir, datafiles):
+    project_dir = str(datafiles)
+
+    # Set up an artifact cache.
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+        # Configure artifact share
+        artifact_dir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        user_config_file = str(tmpdir.join('buildstream.conf'))
+        user_config = {
+            'scheduler': {
+                'pushers': 1
+            },
+            'artifacts': {
+                'url': share.repo,
+                'push': True,
+            }
+        }
+
+        # Write down the user configuration file
+        _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file)
+        # Ensure CLI calls will use it
+        cli.configure(user_config)
+
+        # First build the project with the artifact cache configured
+        result = cli.run(project=project_dir, args=['build', 'target.bst'])
+        result.assert_success()
+
+        # Assert that we are now cached locally
+        assert cli.get_element_state(project_dir, 'target.bst') == 'cached'
+        # Assert that we shared/pushed the cached artifact
+        element_key = cli.get_element_key(project_dir, 'target.bst')
+        assert share.has_artifact('test', 'target.bst', element_key)
+
+        # Delete the artifact locally
+        cli.remove_artifact_from_cache(project_dir, 'target.bst')
+
+        # Assert that we are not cached locally anymore
+        assert cli.get_element_state(project_dir, 'target.bst') != 'cached'
+
+        # Fake minimal context
+        context = Context()
+        context.load(config=user_config_file)
+        context.artifactdir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        context.set_message_handler(message_handler)
+
+        # Load the project and CAS cache
+        project = Project(project_dir, context)
+        project.ensure_fully_loaded()
+        cas = context.artifactcache
+
+        # Assert that the element's artifact is **not** cached
+        element = project.load_elements(['target.bst'], cas)[0]
+        element_key = cli.get_element_key(project_dir, 'target.bst')
+        assert not cas.contains(element, element_key)
+
+        queue = multiprocessing.Queue()
+        # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+        # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
+        process = multiprocessing.Process(target=_test_pull,
+                                          args=(user_config_file, project_dir, artifact_dir,
+                                                'target.bst', element_key, queue))
+
+        try:
+            # Keep SIGINT blocked in the child process
+            with _signals.blocked([signal.SIGINT], ignore=False):
+                process.start()
+
+            error = queue.get()
+            process.join()
+        except KeyboardInterrupt:
+            utils._kill_process_tree(process.pid)
+            raise
+
+        assert not error
+        assert cas.contains(element, element_key)
+
+
+def _test_pull(user_config_file, project_dir, artifact_dir,
+               element_name, element_key, queue):
+    # Fake minimal context
+    context = Context()
+    context.load(config=user_config_file)
+    context.artifactdir = artifact_dir
+    context.set_message_handler(message_handler)
+
+    # Load the project manually
+    project = Project(project_dir, context)
+    project.ensure_fully_loaded()
+
+    # Create a local CAS cache handle
+    cas = context.artifactcache
+
+    # Load the target element
+    element = project.load_elements([element_name], cas)[0]
+
+    # Manually setup the CAS remote
+    cas.setup_remotes(use_config=True)
+
+    if cas.has_push_remotes(element=element):
+        # Push the element's artifact
+        if not cas.pull(element, element_key):
+            queue.put("Pull operation failed")
+        else:
+            queue.put(None)
+    else:
+        queue.put("No remote configured for element {}".format(element_name))
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_pull_tree(cli, tmpdir, datafiles):
+    project_dir = str(datafiles)
+
+    # Set up an artifact cache.
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+        # Configure artifact share
+        artifact_dir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        user_config_file = str(tmpdir.join('buildstream.conf'))
+        user_config = {
+            'scheduler': {
+                'pushers': 1
+            },
+            'artifacts': {
+                'url': share.repo,
+                'push': True,
+            }
+        }
+
+        # Write down the user configuration file
+        _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file)
+        # Ensure CLI calls will use it
+        cli.configure(user_config)
+
+        # First build the project with the artifact cache configured
+        result = cli.run(project=project_dir, args=['build', 'target.bst'])
+        result.assert_success()
+
+        # Assert that we are now cached locally
+        assert cli.get_element_state(project_dir, 'target.bst') == 'cached'
+        # Assert that we shared/pushed the cached artifact
+        element_key = cli.get_element_key(project_dir, 'target.bst')
+        assert share.has_artifact('test', 'target.bst', element_key)
+
+        # Fake minimal context
+        context = Context()
+        context.load(config=user_config_file)
+        context.artifactdir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        context.set_message_handler(message_handler)
+
+        # Load the project and CAS cache
+        project = Project(project_dir, context)
+        project.ensure_fully_loaded()
+        cas = context.artifactcache
+
+        # Assert that the element's artifact is cached
+        element = project.load_elements(['target.bst'], cas)[0]
+        element_key = cli.get_element_key(project_dir, 'target.bst')
+        assert cas.contains(element, element_key)
+
+        # Retrieve the Directory object from the cached artifact
+        artifact_ref = cas.get_artifact_fullname(element, element_key)
+        artifact_digest = cas.resolve_ref(artifact_ref)
+
+        queue = multiprocessing.Queue()
+        # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+        # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
+        process = multiprocessing.Process(target=_test_push_tree,
+                                          args=(user_config_file, project_dir, artifact_dir,
+                                                artifact_digest, queue))
+
+        try:
+            # Keep SIGINT blocked in the child process
+            with _signals.blocked([signal.SIGINT], ignore=False):
+                process.start()
+
+            tree_hash, tree_size = queue.get()
+            process.join()
+        except KeyboardInterrupt:
+            utils._kill_process_tree(process.pid)
+            raise
+
+        assert tree_hash and tree_size
+
+        # Now delete the artifact locally
+        cli.remove_artifact_from_cache(project_dir, 'target.bst')
+
+        # Assert that we are not cached locally anymore
+        assert cli.get_element_state(project_dir, 'target.bst') != 'cached'
+
+        tree_digest = remote_execution_pb2.Digest(hash=tree_hash,
+                                                  size_bytes=tree_size)
+
+        queue = multiprocessing.Queue()
+        # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+        process = multiprocessing.Process(target=_test_pull_tree,
+                                          args=(user_config_file, project_dir, artifact_dir,
+                                                tree_digest, queue))
+
+        try:
+            # Keep SIGINT blocked in the child process
+            with _signals.blocked([signal.SIGINT], ignore=False):
+                process.start()
+
+            directory_hash, directory_size = queue.get()
+            process.join()
+        except KeyboardInterrupt:
+            utils._kill_process_tree(process.pid)
+            raise
+
+        assert directory_hash and directory_size
+
+        directory_digest = remote_execution_pb2.Digest(hash=directory_hash,
+                                                       size_bytes=directory_size)
+
+        # Ensure the entire Tree stucture has been pulled
+        assert os.path.exists(cas.objpath(directory_digest))
+
+
+def _test_push_tree(user_config_file, project_dir, artifact_dir, artifact_digest, queue):
+    # Fake minimal context
+    context = Context()
+    context.load(config=user_config_file)
+    context.artifactdir = artifact_dir
+    context.set_message_handler(message_handler)
+
+    # Load the project manually
+    project = Project(project_dir, context)
+    project.ensure_fully_loaded()
+
+    # Create a local CAS cache handle
+    cas = context.artifactcache
+
+    # Manually setup the CAS remote
+    cas.setup_remotes(use_config=True)
+
+    if cas.has_push_remotes():
+        directory = remote_execution_pb2.Directory()
+
+        with open(cas.objpath(artifact_digest), 'rb') as f:
+            directory.ParseFromString(f.read())
+
+        # Build the Tree object while we are still cached
+        tree = remote_execution_pb2.Tree()
+        tree_maker(cas, tree, directory)
+
+        # Push the Tree as a regular message
+        tree_digest = cas.push_message(project, tree)
+
+        queue.put((tree_digest.hash, tree_digest.size_bytes))
+    else:
+        queue.put("No remote configured")
+
+
+def _test_pull_tree(user_config_file, project_dir, artifact_dir, artifact_digest, queue):
+    # Fake minimal context
+    context = Context()
+    context.load(config=user_config_file)
+    context.artifactdir = artifact_dir
+    context.set_message_handler(message_handler)
+
+    # Load the project manually
+    project = Project(project_dir, context)
+    project.ensure_fully_loaded()
+
+    # Create a local CAS cache handle
+    cas = context.artifactcache
+
+    # Manually setup the CAS remote
+    cas.setup_remotes(use_config=True)
+
+    if cas.has_push_remotes():
+        # Pull the artifact using the Tree object
+        directory_digest = cas.pull_tree(project, artifact_digest)
+        queue.put((directory_digest.hash, directory_digest.size_bytes))
+    else:
+        queue.put("No remote configured")
diff --git a/tests/artifactcache/push.py b/tests/artifactcache/push.py
new file mode 100644
index 0000000000000000000000000000000000000000..c95aac3efddf1a929d5cc13f30d015c2e7d5858c
--- /dev/null
+++ b/tests/artifactcache/push.py
@@ -0,0 +1,311 @@
+import multiprocessing
+import os
+import signal
+
+import pytest
+
+from pluginbase import PluginBase
+from buildstream import _yaml, _signals, utils
+from buildstream._context import Context
+from buildstream._project import Project
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
+from buildstream.storage._casbaseddirectory import CasBasedDirectory
+
+from tests.testutils import cli, create_artifact_share
+
+
+# Project directory
+DATA_DIR = os.path.join(
+    os.path.dirname(os.path.realpath(__file__)),
+    "project",
+)
+
+
+# Handle messages from the pipeline
+def message_handler(message, context):
+    pass
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_push(cli, tmpdir, datafiles):
+    project_dir = str(datafiles)
+
+    # First build the project without the artifact cache configured
+    result = cli.run(project=project_dir, args=['build', 'target.bst'])
+    result.assert_success()
+
+    # Assert that we are now cached locally
+    assert cli.get_element_state(project_dir, 'target.bst') == 'cached'
+
+    # Set up an artifact cache.
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+        # Configure artifact share
+        artifact_dir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        user_config_file = str(tmpdir.join('buildstream.conf'))
+        user_config = {
+            'scheduler': {
+                'pushers': 1
+            },
+            'artifacts': {
+                'url': share.repo,
+                'push': True,
+            }
+        }
+
+        # Write down the user configuration file
+        _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file)
+
+        # Fake minimal context
+        context = Context()
+        context.load(config=user_config_file)
+        context.artifactdir = artifact_dir
+        context.set_message_handler(message_handler)
+
+        # Load the project manually
+        project = Project(project_dir, context)
+        project.ensure_fully_loaded()
+
+        # Create a local CAS cache handle
+        cas = context.artifactcache
+
+        # Assert that the element's artifact is cached
+        element = project.load_elements(['target.bst'], cas)[0]
+        element_key = cli.get_element_key(project_dir, 'target.bst')
+        assert cas.contains(element, element_key)
+
+        queue = multiprocessing.Queue()
+        # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+        # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
+        process = multiprocessing.Process(target=_test_push,
+                                          args=(user_config_file, project_dir, artifact_dir,
+                                                'target.bst', element_key, queue))
+
+        try:
+            # Keep SIGINT blocked in the child process
+            with _signals.blocked([signal.SIGINT], ignore=False):
+                process.start()
+
+            error = queue.get()
+            process.join()
+        except KeyboardInterrupt:
+            utils._kill_process_tree(process.pid)
+            raise
+
+        assert not error
+        assert share.has_artifact('test', 'target.bst', element_key)
+
+
+def _test_push(user_config_file, project_dir, artifact_dir,
+               element_name, element_key, queue):
+    # Fake minimal context
+    context = Context()
+    context.load(config=user_config_file)
+    context.artifactdir = artifact_dir
+    context.set_message_handler(message_handler)
+
+    # Load the project manually
+    project = Project(project_dir, context)
+    project.ensure_fully_loaded()
+
+    # Create a local CAS cache handle
+    cas = context.artifactcache
+
+    # Load the target element
+    element = project.load_elements([element_name], cas)[0]
+
+    # Manually setup the CAS remote
+    cas.setup_remotes(use_config=True)
+    cas.initialize_remotes()
+
+    if cas.has_push_remotes(element=element):
+        # Push the element's artifact
+        if not cas.push(element, [element_key]):
+            queue.put("Push operation failed")
+        else:
+            queue.put(None)
+    else:
+        queue.put("No remote configured for element {}".format(element_name))
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_push_directory(cli, tmpdir, datafiles):
+    project_dir = str(datafiles)
+
+    # First build the project without the artifact cache configured
+    result = cli.run(project=project_dir, args=['build', 'target.bst'])
+    result.assert_success()
+
+    # Assert that we are now cached locally
+    assert cli.get_element_state(project_dir, 'target.bst') == 'cached'
+
+    # Set up an artifact cache.
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+        # Configure artifact share
+        artifact_dir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        user_config_file = str(tmpdir.join('buildstream.conf'))
+        user_config = {
+            'scheduler': {
+                'pushers': 1
+            },
+            'artifacts': {
+                'url': share.repo,
+                'push': True,
+            }
+        }
+
+        # Write down the user configuration file
+        _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file)
+
+        # Fake minimal context
+        context = Context()
+        context.load(config=user_config_file)
+        context.artifactdir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        context.set_message_handler(message_handler)
+
+        # Load the project and CAS cache
+        project = Project(project_dir, context)
+        project.ensure_fully_loaded()
+        cas = context.artifactcache
+
+        # Assert that the element's artifact is cached
+        element = project.load_elements(['target.bst'], cas)[0]
+        element_key = cli.get_element_key(project_dir, 'target.bst')
+        assert cas.contains(element, element_key)
+
+        # Manually setup the CAS remote
+        cas.setup_remotes(use_config=True)
+        cas.initialize_remotes()
+        assert cas.has_push_remotes(element=element)
+
+        # Recreate the CasBasedDirectory object from the cached artifact
+        artifact_ref = cas.get_artifact_fullname(element, element_key)
+        artifact_digest = cas.resolve_ref(artifact_ref)
+
+        queue = multiprocessing.Queue()
+        # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+        # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
+        process = multiprocessing.Process(target=_test_push_directory,
+                                          args=(user_config_file, project_dir, artifact_dir,
+                                                artifact_digest, queue))
+
+        try:
+            # Keep SIGINT blocked in the child process
+            with _signals.blocked([signal.SIGINT], ignore=False):
+                process.start()
+
+            directory_hash = queue.get()
+            process.join()
+        except KeyboardInterrupt:
+            utils._kill_process_tree(process.pid)
+            raise
+
+        assert directory_hash
+        assert artifact_digest.hash == directory_hash
+        assert share.has_object(artifact_digest)
+
+
+def _test_push_directory(user_config_file, project_dir, artifact_dir, artifact_digest, queue):
+    # Fake minimal context
+    context = Context()
+    context.load(config=user_config_file)
+    context.artifactdir = artifact_dir
+    context.set_message_handler(message_handler)
+
+    # Load the project manually
+    project = Project(project_dir, context)
+    project.ensure_fully_loaded()
+
+    # Create a local CAS cache handle
+    cas = context.artifactcache
+
+    # Manually setup the CAS remote
+    cas.setup_remotes(use_config=True)
+    cas.initialize_remotes()
+
+    if cas.has_push_remotes():
+        # Create a CasBasedDirectory from local CAS cache content
+        directory = CasBasedDirectory(context, ref=artifact_digest)
+
+        # Push the CasBasedDirectory object
+        cas.push_directory(project, directory)
+
+        queue.put(directory.ref.hash)
+    else:
+        queue.put("No remote configured")
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_push_message(cli, tmpdir, datafiles):
+    project_dir = str(datafiles)
+
+    # Set up an artifact cache.
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+        # Configure artifact share
+        artifact_dir = os.path.join(str(tmpdir), 'cache', 'artifacts')
+        user_config_file = str(tmpdir.join('buildstream.conf'))
+        user_config = {
+            'scheduler': {
+                'pushers': 1
+            },
+            'artifacts': {
+                'url': share.repo,
+                'push': True,
+            }
+        }
+
+        # Write down the user configuration file
+        _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file)
+
+        queue = multiprocessing.Queue()
+        # Use subprocess to avoid creation of gRPC threads in main BuildStream process
+        # See https://github.com/grpc/grpc/blob/master/doc/fork_support.md for details
+        process = multiprocessing.Process(target=_test_push_message,
+                                          args=(user_config_file, project_dir, artifact_dir, queue))
+
+        try:
+            # Keep SIGINT blocked in the child process
+            with _signals.blocked([signal.SIGINT], ignore=False):
+                process.start()
+
+            message_hash, message_size = queue.get()
+            process.join()
+        except KeyboardInterrupt:
+            utils._kill_process_tree(process.pid)
+            raise
+
+        assert message_hash and message_size
+        message_digest = remote_execution_pb2.Digest(hash=message_hash,
+                                                     size_bytes=message_size)
+        assert share.has_object(message_digest)
+
+
+def _test_push_message(user_config_file, project_dir, artifact_dir, queue):
+    # Fake minimal context
+    context = Context()
+    context.load(config=user_config_file)
+    context.artifactdir = artifact_dir
+    context.set_message_handler(message_handler)
+
+    # Load the project manually
+    project = Project(project_dir, context)
+    project.ensure_fully_loaded()
+
+    # Create a local CAS cache handle
+    cas = context.artifactcache
+
+    # Manually setup the CAS remote
+    cas.setup_remotes(use_config=True)
+    cas.initialize_remotes()
+
+    if cas.has_push_remotes():
+        # Create an example message object
+        command = remote_execution_pb2.Command(arguments=['/usr/bin/gcc', '--help'],
+                                               working_directory='/buildstream-build',
+                                               output_directories=['/buildstream-install'])
+
+        # Push the message object
+        command_digest = cas.push_message(project, command)
+
+        queue.put((command_digest.hash, command_digest.size_bytes))
+    else:
+        queue.put("No remote configured")
diff --git a/tests/cachekey/cachekey.py b/tests/cachekey/cachekey.py
index 21beef8fb9136a13f8665ca3235a05ba23074964..b1f8a9140a1c150347e68534afa8410e4da7e571 100644
--- a/tests/cachekey/cachekey.py
+++ b/tests/cachekey/cachekey.py
@@ -129,6 +129,7 @@ def assert_cache_keys(project_dir, output):
                              "Use tests/cachekey/update.py to automatically " +
                              "update this test case")
 
+
 ##############################################
 #             Test Entry Point               #
 ##############################################
diff --git a/tests/cachekey/project/elements/build1.expected b/tests/cachekey/project/elements/build1.expected
index 5b3a9ad71673ed479a3bbfb57382edb025e12be0..42d0261e367400397baaa350003b34845ba533df 100644
--- a/tests/cachekey/project/elements/build1.expected
+++ b/tests/cachekey/project/elements/build1.expected
@@ -1 +1 @@
-05429485dff08bdb968f7d10c2cdda63be49c8a783d54863a0d4abce44bbebe9
\ No newline at end of file
+dd5e29baefb84f68eb4abac3a1befc332077ec4c97bb2572e57f3ca98ba46707
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/build2.expected b/tests/cachekey/project/elements/build2.expected
index 9641bd05bca289df39f260158743ad11f24a587f..cc20260645d55cd0eea78c1005e3db6f7a83845b 100644
--- a/tests/cachekey/project/elements/build2.expected
+++ b/tests/cachekey/project/elements/build2.expected
@@ -1 +1 @@
-4155c7bc836cdb092de3241fa92883bd8c7dd94c55affa406e559aeb6252c669
\ No newline at end of file
+99d80454cce44645597c885800edf0bf254d1c3606d869f2ccdd5043ec7685cb
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose1.expected b/tests/cachekey/project/elements/compose1.expected
index ee82ebb6a494f4e9171a60769c0c96a3ed4ebd8b..a76ce029f27098ebd51fce0cb474065c821edddc 100644
--- a/tests/cachekey/project/elements/compose1.expected
+++ b/tests/cachekey/project/elements/compose1.expected
@@ -1 +1 @@
-4e4c719242aa45fed398cc2fb8936195a1fcae9326d808de7fee340ae48862ea
\ No newline at end of file
+b63c517f604e8ca64e973476f687190d14a813a0bf77573b93a557f5fb7ae214
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose2.expected b/tests/cachekey/project/elements/compose2.expected
index f7b2d396bb92e426523e6d4ed21aec3e5ade917d..cc17908c77afdcc7310083a7e9c5ac6cdf3814c9 100644
--- a/tests/cachekey/project/elements/compose2.expected
+++ b/tests/cachekey/project/elements/compose2.expected
@@ -1 +1 @@
-2fc1dd398f6c6f6e1d7ca48d88557e133d2130278882e14cd1105b15a600cd7a
\ No newline at end of file
+6676f1cce86166eb66ab83254fe2deb43be93644967de110dd42713dea181508
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose3.expected b/tests/cachekey/project/elements/compose3.expected
index e6339b17596fbcc864f717980fdcba9866c7eb6c..cef7e620a449ee5afb6674c4b3f954f057d97140 100644
--- a/tests/cachekey/project/elements/compose3.expected
+++ b/tests/cachekey/project/elements/compose3.expected
@@ -1 +1 @@
-4d7c9e2e1e8cfcc4b300a11693767f41f22c7829db9063dec10856328d03ccc3
\ No newline at end of file
+0f8f444566c097067f2dfa54f26100abff85cc49bf9acf0081129f53244bc144
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose4.expected b/tests/cachekey/project/elements/compose4.expected
index de299e8926014dd8eb7fa7157dc94e72515ab4d0..96cb1c4a0055e748570e6b3c80b24d143715f7d9 100644
--- a/tests/cachekey/project/elements/compose4.expected
+++ b/tests/cachekey/project/elements/compose4.expected
@@ -1 +1 @@
-cad8f3b622f4a906f9fc3f5187a7703e2b17dfc550dd5a07479ca3ebffbd5c86
\ No newline at end of file
+aa72331d42f647e845243e8a77389febfb78acff09f70771a3545bdf0d4d70ad
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/compose5.expected b/tests/cachekey/project/elements/compose5.expected
index d7c4c3ef89983c2d021e3182d59d5b3f9c459ff9..6fec106a329ee86841ce3b19408a5db8026c8b4a 100644
--- a/tests/cachekey/project/elements/compose5.expected
+++ b/tests/cachekey/project/elements/compose5.expected
@@ -1 +1 @@
-4fd21699827aa16da8d7a1525020f9fd45422f0431749510947ff472d76c1802
\ No newline at end of file
+37bb4486f42e04b8a1c9f9cb9358adfd0d4dae0bb3b2a4072e090848cd2b955d
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/import1.expected b/tests/cachekey/project/elements/import1.expected
index bc9c1c93f8c4fc9678d05baa66e370f176684d82..8e3c582e7e7c7f617e3bf845dceb1916cbacdb0e 100644
--- a/tests/cachekey/project/elements/import1.expected
+++ b/tests/cachekey/project/elements/import1.expected
@@ -1 +1 @@
-aa443ea4607d7dd5a0c99646a1b827e3165862772fc1b26e20195aadd2ab8885
\ No newline at end of file
+ce2dce59ad7fa810c945e7385cc25d4c8992adf71fbdc44336cf136330fe2b16
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/import2.expected b/tests/cachekey/project/elements/import2.expected
index 1013f5826c424dafaf0c4f6afdc372cf442176f2..5ad1b58166a64df6e4ab577cf9fe2c613b1b5643 100644
--- a/tests/cachekey/project/elements/import2.expected
+++ b/tests/cachekey/project/elements/import2.expected
@@ -1 +1 @@
-18ea6bbb968ca6945c8c2941650f447b8df58673be7270c967c8152730eff036
\ No newline at end of file
+4fd32ee29026ecbcee717c8f04a0b807934a7042d67b8786e0eb9326757c845d
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/import3.expected b/tests/cachekey/project/elements/import3.expected
index 9a851772899692ab0f90a7dc60b680a76d94abad..c5d55728f23c7a8f52b063b9bbc8f032a1a3be30 100644
--- a/tests/cachekey/project/elements/import3.expected
+++ b/tests/cachekey/project/elements/import3.expected
@@ -1 +1 @@
-34ce4816b0307f0691302460367ab24b4e1f86e61c0e307e68bcd6833946c1f1
\ No newline at end of file
+e24dd31bda628616138014391a94040490da0820a2c42ab10ec6dfad1b694df9
\ No newline at end of file
diff --git a/tests/cachekey/project/elements/script1.expected b/tests/cachekey/project/elements/script1.expected
index e36a3df70206bf777b40406fc13f1e824ae19ee7..83dbba96468c5c651243d618cfc27d0c11fe7a7a 100644
--- a/tests/cachekey/project/elements/script1.expected
+++ b/tests/cachekey/project/elements/script1.expected
@@ -1 +1 @@
-c48922b3d80d36e6d30bed7581aa1473a5e463321b3a19606b603c055d2b4be4
\ No newline at end of file
+a139b184c8dd6a6e08231822ca2d886688e5d7720dc2290f8876d485bdb920b5
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/bzr1.expected b/tests/cachekey/project/sources/bzr1.expected
index fecc86c740e7d73b53122b8f9eecff4114fb0389..81bcac7ec4248238649a6608549940b00d542029 100644
--- a/tests/cachekey/project/sources/bzr1.expected
+++ b/tests/cachekey/project/sources/bzr1.expected
@@ -1 +1 @@
-ee271c8f469cd33330229d8dcc44e26f3480a9f47b55db46f42d1a396a94609e
\ No newline at end of file
+d2aeb3715c5842461384bea6a9bcf452310d1626ae40b6e7a9f51adc66a270fd
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/git1.expected b/tests/cachekey/project/sources/git1.expected
index eab44b627a53a8fd8d7f47092ddce17336a96e34..dca2d04f028ad218bbba3faec84af9f5821da98a 100644
--- a/tests/cachekey/project/sources/git1.expected
+++ b/tests/cachekey/project/sources/git1.expected
@@ -1 +1 @@
-39fdc83c2760589c2577fb859cc617a8fdd7ac4cf113f9d4e5c723d70cae3c09
\ No newline at end of file
+4e03d21335e578034b09191ebf4977f0f537425c3031805dfb2f835ff77925cd
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/git2.expected b/tests/cachekey/project/sources/git2.expected
index 868439c9b73ee637c7c638ab2df37df9b94875e4..d32c44557a5a5bc731b15ced4e01e5f56fc3f585 100644
--- a/tests/cachekey/project/sources/git2.expected
+++ b/tests/cachekey/project/sources/git2.expected
@@ -1 +1 @@
-8bac8c7d3b8bbd264083db8e6f3aa8894625af5396bbe62589d1ab726a87cccd
\ No newline at end of file
+75c96f6c8d3ca3ffe164cd51f42689287021e60ef524f56340539feadd5a9fb8
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/local1.expected b/tests/cachekey/project/sources/local1.expected
index bc9c1c93f8c4fc9678d05baa66e370f176684d82..8e3c582e7e7c7f617e3bf845dceb1916cbacdb0e 100644
--- a/tests/cachekey/project/sources/local1.expected
+++ b/tests/cachekey/project/sources/local1.expected
@@ -1 +1 @@
-aa443ea4607d7dd5a0c99646a1b827e3165862772fc1b26e20195aadd2ab8885
\ No newline at end of file
+ce2dce59ad7fa810c945e7385cc25d4c8992adf71fbdc44336cf136330fe2b16
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/local2.expected b/tests/cachekey/project/sources/local2.expected
index 610d3fe5df14205cc5297e6ee960483261005779..ffa2c5d5196c76f687e4b8a785f8a64806012e7e 100644
--- a/tests/cachekey/project/sources/local2.expected
+++ b/tests/cachekey/project/sources/local2.expected
@@ -1 +1 @@
-51296c83a0d5989a67f40391afcbf420cbbd76c1e6c07aa43fe2aef2e88941e3
\ No newline at end of file
+de18b7d9ee2358d6924db5a9f72257e2e2a3d5f8450cb8891f8984bfd1101345
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/ostree1.expected b/tests/cachekey/project/sources/ostree1.expected
index 891bea41bb79ce7f897f883c83c41a78ff185aa8..f12da1ba1607a3e4f56c617be768c2d53dcdaa4c 100644
--- a/tests/cachekey/project/sources/ostree1.expected
+++ b/tests/cachekey/project/sources/ostree1.expected
@@ -1 +1 @@
-cd8b506c38c116d6bea8999720a82afb8844453d5ad05385302eabc7d858859c
\ No newline at end of file
+b8414e0077057fcac4e10291d88d898d7132dc591e3b265afee1ad59831815ca
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch1.expected b/tests/cachekey/project/sources/patch1.expected
index 76c2a2a91186d481f93eb5e8229d1c828e0eb507..b193eca9f247ca596e5e0076589d048c295be8d5 100644
--- a/tests/cachekey/project/sources/patch1.expected
+++ b/tests/cachekey/project/sources/patch1.expected
@@ -1 +1 @@
-bf2fe787df6f263cfd7dbd4aa91909af4186e252da722c3d2e2383533fbc7057
\ No newline at end of file
+a426c94443da29b060af0aed3f2ffdd4470e1ce9cb0592d8696c55a767e448c1
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch2.expected b/tests/cachekey/project/sources/patch2.expected
index ea97daf8774e1fc65c09cb79f8989fd2f8f6eea9..94e975ae216f2919d34fe38d80a70b5267c6fdd5 100644
--- a/tests/cachekey/project/sources/patch2.expected
+++ b/tests/cachekey/project/sources/patch2.expected
@@ -1 +1 @@
-8f040542ebb9b1c690af99d2db4ffc0a54cb13868a364af4771d19615b9e3b02
\ No newline at end of file
+b884e246b61cc930f33216055e99a82a47dcf42435b860622039555a159fc255
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/patch3.expected b/tests/cachekey/project/sources/patch3.expected
index 96e7d4a13dbdbb4f9692ed18e5b18f8c0278c401..4e11c37cebbb0d45050e892cb213e733c43337ca 100644
--- a/tests/cachekey/project/sources/patch3.expected
+++ b/tests/cachekey/project/sources/patch3.expected
@@ -1 +1 @@
-fb3985a6527f2498974ffa418cc6832b716d9862c713d7b8dc1c22df45857ee5
\ No newline at end of file
+79e297df970b6faaa1cfd64e5a6b6c8b4611b9128a19a7f22a2ee051174fccc9
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/pip1.expected b/tests/cachekey/project/sources/pip1.expected
index 11d7c5faeb243d69bb4a48329857a7b69bdb2a74..1fe5a50c4469cd25f440f02ef714102e568d9577 100644
--- a/tests/cachekey/project/sources/pip1.expected
+++ b/tests/cachekey/project/sources/pip1.expected
@@ -1 +1 @@
-880d0dc27d6683725cfd68d60156058115a9a53793b14b727fc6d0588a473763
\ No newline at end of file
+d8bdc8848e4d2e3d70a1267e73bf0e63afa778e4c905cad1a94308634176fb87
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/remote1.expected b/tests/cachekey/project/sources/remote1.expected
index 84b807b42a0d9d662c19c70bec381ecb19d78dae..7518163226c71d407002f4a577992fa685514990 100644
--- a/tests/cachekey/project/sources/remote1.expected
+++ b/tests/cachekey/project/sources/remote1.expected
@@ -1 +1 @@
-1fe04362ce6b1e65a0907749a8b11dd2838b2505d2f0c7fee01c005bd43cd63a
\ No newline at end of file
+2ab4d2a2490dabafadfc44d95b78f690105e0f0d1cb58665a6a332920172741e
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/remote2.expected b/tests/cachekey/project/sources/remote2.expected
index 31e849a824b985a7249aefa10f326061a5d3f277..9b0428eb64957304bc6daf7a5ce056d2cb8f5336 100644
--- a/tests/cachekey/project/sources/remote2.expected
+++ b/tests/cachekey/project/sources/remote2.expected
@@ -1 +1 @@
-105c814f5c88c72e2681a39e1b01a0f2009342afa2b1c1a21c1cc4a664eced29
\ No newline at end of file
+642cbafb3020ab80dae274a983ade81757cf3a1fa4fbba01f621599830be50fd
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/tar1.expected b/tests/cachekey/project/sources/tar1.expected
index 7df0789f76c6dccc5698a5b7bf9fbedbe3ef4dae..64addbfe1165ed71ae7b61895a165ac4880a7142 100644
--- a/tests/cachekey/project/sources/tar1.expected
+++ b/tests/cachekey/project/sources/tar1.expected
@@ -1 +1 @@
-29331729ccb0f67507f9b1a315410d794d62bda6d16ee1fabd927a39808265a7
\ No newline at end of file
+d32bf753f0507f07c8b660ed8fc4428434faf7d07049de92ee203256db0149a3
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/tar2.expected b/tests/cachekey/project/sources/tar2.expected
index 044625e7d6408a894cec288c4e4baa6c73562011..9b4372330c7bb3eaa383911926f725f00bcc5ca1 100644
--- a/tests/cachekey/project/sources/tar2.expected
+++ b/tests/cachekey/project/sources/tar2.expected
@@ -1 +1 @@
-37e135a6a6270245ef0fcfda96cada821095f819b57e701f635e83a6d47b83a9
\ No newline at end of file
+41844c597dbffb4f3dcfaae2e5553836816b1b77744db01e47671ab14276795a
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/zip1.expected b/tests/cachekey/project/sources/zip1.expected
index c4aca98c43a24beb9ff9e9c393999cefe26d2dac..ac53a32d5e06c002f0b7cb2cd9777d1cd95428fc 100644
--- a/tests/cachekey/project/sources/zip1.expected
+++ b/tests/cachekey/project/sources/zip1.expected
@@ -1 +1 @@
-5834df0bd373aebd3e74fe57534dfbefbad02a0bfc391ea9b67a6c7c63823ba0
\ No newline at end of file
+a68328d4ad389a4cdc690103bc6b0bb4d2252eb4f738f6cd004645eb478fcf41
\ No newline at end of file
diff --git a/tests/cachekey/project/sources/zip2.expected b/tests/cachekey/project/sources/zip2.expected
index 220f321d93221fb7bda05fabff580dacbd50bcbe..3da5f68a754939c7a7c0b71ce412d788b9289b31 100644
--- a/tests/cachekey/project/sources/zip2.expected
+++ b/tests/cachekey/project/sources/zip2.expected
@@ -1 +1 @@
-7f8bb32b8fd8526c1909fbb239706abd7d1ab96911f17eb9f6e96a6f55833c04
\ No newline at end of file
+30c104c539200d568f9157549dd3c8a15a157cb5c56632638b99986e4edf0576
\ No newline at end of file
diff --git a/tests/cachekey/project/target.expected b/tests/cachekey/project/target.expected
index 3e7cc81db41cacf6809d51c9b4f5037ba1911366..70dcca36388e559ddb290aada8f930d7add25294 100644
--- a/tests/cachekey/project/target.expected
+++ b/tests/cachekey/project/target.expected
@@ -1 +1 @@
-f5affaacd3ac724f5415a7a8349c6dca6122841dd7f9769de4f9d6cb7185f9b8
\ No newline at end of file
+29a1252ec30dd6ae73c772381f0eb417e3874c75710d08be819f5715dcaa942b
\ No newline at end of file
diff --git a/tests/cachekey/update.py b/tests/cachekey/update.py
index 09cf19657636b03e889cb395ba69e8ec50ff6151..d574d07b31c32a9e43ff659ff383ac0d38add510 100755
--- a/tests/cachekey/update.py
+++ b/tests/cachekey/update.py
@@ -5,7 +5,7 @@
 #
 # Simply run without any arguments, from anywhere, e.g.:
 #
-#   ./tests/cachekey/update.py
+#   PYTHONPATH=. ./tests/cachekey/update.py
 #
 # After this, add any files which were newly created and commit
 # the result in order to adjust the cache key test to changed
@@ -65,5 +65,6 @@ def update_keys():
 
                 write_expected_key(element_name, actual_keys[element_name])
 
+
 if __name__ == '__main__':
     update_keys()
diff --git a/tests/completions/completions.py b/tests/completions/completions.py
index 50b41f7b30446d3e94a76bc5153a8b90feaadb8c..e6d15e68a0347842ef94be3b2145446c5d8a5fa6 100644
--- a/tests/completions/completions.py
+++ b/tests/completions/completions.py
@@ -76,7 +76,7 @@ def assert_completion(cli, cmd, word_idx, expected, cwd=None):
         words = result.output.splitlines()
 
     # The order is meaningless, bash will
-    # take the results and order it by it's
+    # take the results and order it by its
     # own little heuristics
     words = sorted(words)
     expected = sorted(expected)
diff --git a/tests/format/option-overrides/element.bst b/tests/format/option-overrides/element.bst
new file mode 100644
index 0000000000000000000000000000000000000000..3c29b4ea1334556c03b724ce505bc234e80a1892
--- /dev/null
+++ b/tests/format/option-overrides/element.bst
@@ -0,0 +1 @@
+kind: autotools
diff --git a/tests/format/option-overrides/project.conf b/tests/format/option-overrides/project.conf
new file mode 100644
index 0000000000000000000000000000000000000000..c8058f0761dd5bf1c975589b9f4e406470d22c0f
--- /dev/null
+++ b/tests/format/option-overrides/project.conf
@@ -0,0 +1,19 @@
+# Test case ensuring that we can use options
+# in the element overrides.
+#
+name: test
+
+options:
+  arch:
+    type: arch
+    description: architecture
+    values: [i686, x86_64]
+
+elements:
+  autotools:
+    variables:
+      (?):
+      - arch == 'i686':
+          conf-global: --host=i686-unknown-linux-gnu
+      - arch == 'x86_64':
+          conf-global: --host=x86_64-unknown-linux-gnu
diff --git a/tests/format/optionoverrides.py b/tests/format/optionoverrides.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5c37b3a515867df106b2af086ee4b00d0d00071
--- /dev/null
+++ b/tests/format/optionoverrides.py
@@ -0,0 +1,29 @@
+import os
+import pytest
+from buildstream import _yaml
+from tests.testutils.runcli import cli
+
+# Project directory
+DATA_DIR = os.path.dirname(os.path.realpath(__file__))
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("arch", [('i686'), ('x86_64')])
+def test_override(cli, datafiles, arch):
+    project = os.path.join(datafiles.dirname, datafiles.basename, 'option-overrides')
+
+    bst_args = ['--option', 'arch', arch]
+    bst_args += [
+        'show',
+        '--deps', 'none',
+        '--format', '%{vars}',
+        'element.bst'
+    ]
+    result = cli.run(project=project, silent=True, args=bst_args)
+    result.assert_success()
+
+    # See the associated project.conf for the expected values
+    expected_value = '--host={}-unknown-linux-gnu'.format(arch)
+
+    loaded = _yaml.load_data(result.output)
+    assert loaded['conf-global'] == expected_value
diff --git a/tests/format/project.py b/tests/format/project.py
index df1a2364b4966de4805bc3f8c52c95333266d838..46145e578adb233d02359cffd2f5b15f03ee8ec4 100644
--- a/tests/format/project.py
+++ b/tests/format/project.py
@@ -40,6 +40,13 @@ def test_invalid_project_name(cli, datafiles):
     result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_SYMBOL_NAME)
 
 
+@pytest.mark.datafiles(os.path.join(DATA_DIR))
+def test_invalid_yaml(cli, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename, "invalid-yaml")
+    result = cli.run(project=project, args=['workspace', 'list'])
+    result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.INVALID_YAML)
+
+
 @pytest.mark.datafiles(os.path.join(DATA_DIR))
 def test_load_default_project(cli, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename, "default")
@@ -181,3 +188,15 @@ def test_project_refs_options(cli, datafiles):
 
     # Assert that the cache keys are different
     assert result1.output != result2.output
+
+
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'element-path'))
+def test_element_path_project_path_contains_symlinks(cli, datafiles, tmpdir):
+    real_project = str(datafiles)
+    linked_project = os.path.join(str(tmpdir), 'linked')
+    os.symlink(real_project, linked_project)
+    os.makedirs(os.path.join(real_project, 'elements'), exist_ok=True)
+    with open(os.path.join(real_project, 'elements', 'element.bst'), 'w') as f:
+        f.write("kind: manual\n")
+    result = cli.run(project=linked_project, args=['show', 'element.bst'])
+    result.assert_success()
diff --git a/tests/format/project/invalid-yaml/manual.bst b/tests/format/project/invalid-yaml/manual.bst
new file mode 100644
index 0000000000000000000000000000000000000000..4d7f7026665231e5e58625bbbe9e4f3619163b13
--- /dev/null
+++ b/tests/format/project/invalid-yaml/manual.bst
@@ -0,0 +1 @@
+kind: manual
diff --git a/tests/format/project/invalid-yaml/project.conf b/tests/format/project/invalid-yaml/project.conf
new file mode 100644
index 0000000000000000000000000000000000000000..5f9282bbf961a90c7f56c7d55cca19760f033366
--- /dev/null
+++ b/tests/format/project/invalid-yaml/project.conf
@@ -0,0 +1,8 @@
+# Basic project configuration that doesnt override anything
+#
+
+name: pony
+
+variables:
+  sbindir: "%{bindir}
+
diff --git a/tests/format/variables.py b/tests/format/variables.py
index d570bf01dfef9a81bb570a65dfbe097d3f055967..26bb3db983f4da4d978f670716c2c33ff980d124 100644
--- a/tests/format/variables.py
+++ b/tests/format/variables.py
@@ -1,5 +1,6 @@
 import os
 import pytest
+import sys
 from buildstream import _yaml
 from buildstream._exceptions import ErrorDomain, LoadErrorReason
 from tests.testutils.runcli import cli
@@ -18,10 +19,10 @@ DATA_DIR = os.path.join(
 @pytest.mark.parametrize("target,varname,expected", [
     ('autotools.bst', 'make-install', "make -j1 DESTDIR=\"/buildstream-install\" install"),
     ('cmake.bst', 'cmake',
-     "cmake -B_builddir -H. -G\"Unix Makefiles\" -DCMAKE_INSTALL_PREFIX:PATH=\"/usr\" \\\n" +
+     "cmake -B_builddir -H\".\" -G\"Unix Makefiles\" " + "-DCMAKE_INSTALL_PREFIX:PATH=\"/usr\" \\\n" +
      "-DCMAKE_INSTALL_LIBDIR=lib   "),
     ('distutils.bst', 'python-install',
-     "python3 setup.py install --prefix \"/usr\" \\\n" +
+     "python3 ./setup.py install --prefix \"/usr\" \\\n" +
      "--root \"/buildstream-install\""),
     ('makemaker.bst', 'configure', "perl Makefile.PL PREFIX=/buildstream-install/usr"),
     ('modulebuild.bst', 'configure', "perl Build.PL --prefix \"/buildstream-install/usr\""),
@@ -44,10 +45,10 @@ def test_defaults(cli, datafiles, tmpdir, target, varname, expected):
 @pytest.mark.parametrize("target,varname,expected", [
     ('autotools.bst', 'make-install', "make -j1 DESTDIR=\"/custom/install/root\" install"),
     ('cmake.bst', 'cmake',
-     "cmake -B_builddir -H. -G\"Ninja\" -DCMAKE_INSTALL_PREFIX:PATH=\"/opt\" \\\n" +
+     "cmake -B_builddir -H\".\" -G\"Ninja\" " + "-DCMAKE_INSTALL_PREFIX:PATH=\"/opt\" \\\n" +
      "-DCMAKE_INSTALL_LIBDIR=lib   "),
     ('distutils.bst', 'python-install',
-     "python3 setup.py install --prefix \"/opt\" \\\n" +
+     "python3 ./setup.py install --prefix \"/opt\" \\\n" +
      "--root \"/custom/install/root\""),
     ('makemaker.bst', 'configure', "perl Makefile.PL PREFIX=/custom/install/root/opt"),
     ('modulebuild.bst', 'configure', "perl Build.PL --prefix \"/custom/install/root/opt\""),
@@ -72,3 +73,20 @@ def test_missing_variable(cli, datafiles, tmpdir):
     ])
     result.assert_main_error(ErrorDomain.LOAD,
                              LoadErrorReason.UNRESOLVED_VARIABLE)
+
+
+@pytest.mark.timeout(3, method="signal")
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'cyclic_variables'))
+def test_cyclic_variables(cli, datafiles):
+    print_warning("Performing cyclic test, if this test times out it will " +
+                  "exit the test sequence")
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    result = cli.run(project=project, silent=True, args=[
+        "build", "cyclic.bst"
+    ])
+    result.assert_main_error(ErrorDomain.LOAD, LoadErrorReason.RECURSIVE_VARIABLE)
+
+
+def print_warning(msg):
+    RED, END = "\033[91m", "\033[0m"
+    print(("\n{}{}{}").format(RED, msg, END), file=sys.stderr)
diff --git a/tests/format/variables/cyclic_variables/cyclic.bst b/tests/format/variables/cyclic_variables/cyclic.bst
new file mode 100644
index 0000000000000000000000000000000000000000..a05a40b274e2c3dfa0fe23edea56151ac85ff281
--- /dev/null
+++ b/tests/format/variables/cyclic_variables/cyclic.bst
@@ -0,0 +1,5 @@
+kind: manual
+
+variables:
+  a: "%{prefix}/a"
+  prefix: "%{a}/some_prefix/"
\ No newline at end of file
diff --git a/tests/format/variables/cyclic_variables/project.conf b/tests/format/variables/cyclic_variables/project.conf
new file mode 100644
index 0000000000000000000000000000000000000000..b3275362517072a94da46b9f7966cdbc97ddc670
--- /dev/null
+++ b/tests/format/variables/cyclic_variables/project.conf
@@ -0,0 +1 @@
+name: test
diff --git a/tests/frontend/buildcheckout.py b/tests/frontend/buildcheckout.py
index d0f52d6a73e366defebc1e0e5fc03910ddd8d0d0..4d409cdfed3ef486b58d54cda93d08d1b969c3ec 100644
--- a/tests/frontend/buildcheckout.py
+++ b/tests/frontend/buildcheckout.py
@@ -288,6 +288,7 @@ def test_build_checkout_force_tarball(datafiles, cli):
     assert os.path.join('.', 'usr', 'bin', 'hello') in tar.getnames()
     assert os.path.join('.', 'usr', 'include', 'pony.h') in tar.getnames()
 
+
 fetch_build_checkout_combos = \
     [("strict", kind) for kind in ALL_REPO_KINDS] + \
     [("non-strict", kind) for kind in ALL_REPO_KINDS]
diff --git a/tests/frontend/logging.py b/tests/frontend/logging.py
index 4c70895a500d56e3127bb64455d46876b504c097..733c7e85d6be3414a3bcdedee34b7d346c77dbcb 100644
--- a/tests/frontend/logging.py
+++ b/tests/frontend/logging.py
@@ -54,8 +54,7 @@ def test_custom_logging(cli, tmpdir, datafiles):
 
     custom_log_format = '%{elapsed},%{elapsed-us},%{wallclock},%{key},%{element},%{action},%{message}'
     user_config = {'logging': {'message-format': custom_log_format}}
-    user_config_file = str(tmpdir.join('buildstream.conf'))
-    _yaml.dump(_yaml.node_sanitize(user_config), filename=user_config_file)
+    cli.configure(user_config)
 
     # Create our repo object of the given source type with
     # the bin files, and then collect the initial ref.
@@ -75,7 +74,7 @@ def test_custom_logging(cli, tmpdir, datafiles):
                             element_name))
 
     # Now try to fetch it
-    result = cli.run(project=project, args=['-c', user_config_file, 'fetch', element_name])
+    result = cli.run(project=project, args=['fetch', element_name])
     result.assert_success()
 
     m = re.search("\d\d:\d\d:\d\d,\d\d:\d\d:\d\d.\d{6},\d\d:\d\d:\d\d,,,SUCCESS,Checking sources", result.stderr)
diff --git a/tests/frontend/mirror.py b/tests/frontend/mirror.py
index f6031cad812452b2afb523a1742167cc297aaba1..91a87433576468fd30e9f2282abc247080e4629e 100644
--- a/tests/frontend/mirror.py
+++ b/tests/frontend/mirror.py
@@ -139,13 +139,85 @@ def test_mirror_fetch(cli, tmpdir, datafiles, kind):
     result.assert_success()
 
 
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("ref_storage", [("inline"), ("project.refs")])
+@pytest.mark.parametrize("mirror", [("no-mirror"), ("mirror"), ("unrelated-mirror")])
+def test_mirror_fetch_ref_storage(cli, tmpdir, datafiles, ref_storage, mirror):
+    bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
+    dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
+    upstream_repodir = os.path.join(str(tmpdir), 'upstream')
+    mirror_repodir = os.path.join(str(tmpdir), 'mirror')
+    project_dir = os.path.join(str(tmpdir), 'project')
+    os.makedirs(project_dir)
+    element_dir = os.path.join(project_dir, 'elements')
+
+    # Create repo objects of the upstream and mirror
+    upstream_repo = create_repo('tar', upstream_repodir)
+    upstream_ref = upstream_repo.create(bin_files_path)
+    mirror_repo = upstream_repo.copy(mirror_repodir)
+    mirror_ref = upstream_ref
+    upstream_ref = upstream_repo.create(dev_files_path)
+
+    element = {
+        'kind': 'import',
+        'sources': [
+            upstream_repo.source_config(ref=upstream_ref if ref_storage == 'inline' else None)
+        ]
+    }
+    element_name = 'test.bst'
+    element_path = os.path.join(element_dir, element_name)
+    full_repo = element['sources'][0]['url']
+    upstream_map, repo_name = os.path.split(full_repo)
+    alias = 'foo'
+    aliased_repo = alias + ':' + repo_name
+    element['sources'][0]['url'] = aliased_repo
+    full_mirror = mirror_repo.source_config()['url']
+    mirror_map, _ = os.path.split(full_mirror)
+    os.makedirs(element_dir)
+    _yaml.dump(element, element_path)
+
+    if ref_storage == 'project.refs':
+        # Manually set project.refs to avoid caching the repo prematurely
+        project_refs = {'projects': {
+            'test': {
+                element_name: [
+                    {'ref': upstream_ref}
+                ]
+            }
+        }}
+        project_refs_path = os.path.join(project_dir, 'project.refs')
+        _yaml.dump(project_refs, project_refs_path)
+
+    project = {
+        'name': 'test',
+        'element-path': 'elements',
+        'aliases': {
+            alias: upstream_map + "/"
+        },
+        'ref-storage': ref_storage
+    }
+    if mirror != 'no-mirror':
+        mirror_data = [{
+            'name': 'middle-earth',
+            'aliases': {alias: [mirror_map + '/']}
+        }]
+        if mirror == 'unrelated-mirror':
+            mirror_data.insert(0, {
+                'name': 'narnia',
+                'aliases': {'frob': ['http://www.example.com/repo']}
+            })
+        project['mirrors'] = mirror_data
+
+    project_file = os.path.join(project_dir, 'project.conf')
+    _yaml.dump(project, project_file)
+
+    result = cli.run(project=project_dir, args=['fetch', element_name])
+    result.assert_success()
+
+
 @pytest.mark.datafiles(DATA_DIR)
 @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
 def test_mirror_fetch_upstream_absent(cli, tmpdir, datafiles, kind):
-    if kind == 'ostree':
-        # FIXME: Mirroring fallback fails with ostree
-        pytest.skip("Bug #538 - ostree mirror fallback breaks assertion")
-
     bin_files_path = os.path.join(str(datafiles), 'files', 'bin-files', 'usr')
     dev_files_path = os.path.join(str(datafiles), 'files', 'dev-files', 'usr')
     upstream_repodir = os.path.join(str(tmpdir), 'upstream')
diff --git a/tests/frontend/project/elements/rebuild-target.bst b/tests/frontend/project/elements/rebuild-target.bst
new file mode 100644
index 0000000000000000000000000000000000000000..49a02c217c774e18e07c6df664b891682da2d498
--- /dev/null
+++ b/tests/frontend/project/elements/rebuild-target.bst
@@ -0,0 +1,4 @@
+kind: compose
+
+build-depends:
+- target.bst
diff --git a/tests/frontend/project/elements/source-bundle/source-bundle-hello.bst b/tests/frontend/project/elements/source-bundle/source-bundle-hello.bst
new file mode 100644
index 0000000000000000000000000000000000000000..98c3a9556da5379bcc75371796d498eae2868bba
--- /dev/null
+++ b/tests/frontend/project/elements/source-bundle/source-bundle-hello.bst
@@ -0,0 +1,6 @@
+kind: import
+description: the kind of this element must implement generate_script() method
+
+sources:
+- kind: local
+  path: files/source-bundle
diff --git a/tests/frontend/project/files/source-bundle/llamas.txt b/tests/frontend/project/files/source-bundle/llamas.txt
new file mode 100644
index 0000000000000000000000000000000000000000..f98b24871e7bab63126b9ae3aa97e0c69302a7d7
--- /dev/null
+++ b/tests/frontend/project/files/source-bundle/llamas.txt
@@ -0,0 +1 @@
+llamas
diff --git a/tests/frontend/project/sources/fetch_source.py b/tests/frontend/project/sources/fetch_source.py
index ebd3fe7571affb406f56799c608beab085c351f9..10e89960caa2a04223bd602a75065e94a0c21946 100644
--- a/tests/frontend/project/sources/fetch_source.py
+++ b/tests/frontend/project/sources/fetch_source.py
@@ -15,14 +15,17 @@ from buildstream import Source, Consistency, SourceError, SourceFetcher
 
 
 class FetchFetcher(SourceFetcher):
-    def __init__(self, source, url):
+    def __init__(self, source, url, primary=False):
         super().__init__()
         self.source = source
         self.original_url = url
+        self.primary = primary
         self.mark_download_url(url)
 
     def fetch(self, alias_override=None):
-        url = self.source.translate_url(self.original_url, alias_override=alias_override)
+        url = self.source.translate_url(self.original_url,
+                                        alias_override=alias_override,
+                                        primary=self.primary)
         with open(self.source.output_file, "a") as f:
             success = url in self.source.fetch_succeeds and self.source.fetch_succeeds[url]
             message = "Fetch {} {} from {}\n".format(self.original_url,
@@ -37,12 +40,21 @@ class FetchSource(Source):
     # Read config to know which URLs to fetch
     def configure(self, node):
         self.original_urls = self.node_get_member(node, list, 'urls')
-        self.fetchers = [FetchFetcher(self, url) for url in self.original_urls]
         self.output_file = self.node_get_member(node, str, 'output-text')
         self.fetch_succeeds = {}
         if 'fetch-succeeds' in node:
             self.fetch_succeeds = {x[0]: x[1] for x in self.node_items(node['fetch-succeeds'])}
 
+        # First URL is the primary one for this test
+        #
+        primary = True
+        self.fetchers = []
+        for url in self.original_urls:
+            self.mark_download_url(url, primary=primary)
+            fetcher = FetchFetcher(self, url, primary=primary)
+            self.fetchers.append(fetcher)
+            primary = False
+
     def get_source_fetchers(self):
         return self.fetchers
 
diff --git a/tests/frontend/pull.py b/tests/frontend/pull.py
index 9d2d5d1a287cb070c4570d02cdff12d905f24c36..c883e20307dba2fcfdce9e3461b86bdb35dd5f63 100644
--- a/tests/frontend/pull.py
+++ b/tests/frontend/pull.py
@@ -338,3 +338,23 @@ def test_pull_missing_blob(cli, tmpdir, datafiles):
 
         # Assert that no artifacts were pulled
         assert len(result.get_pulled_elements()) == 0
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_pull_missing_notifies_user(caplog, cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    caplog.set_level(1)
+
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+
+        cli.configure({
+            'artifacts': {'url': share.repo}
+        })
+        result = cli.run(project=project, args=['build', 'target.bst'])
+
+        result.assert_success()
+        assert not result.get_pulled_elements(), \
+            "No elements should have been pulled since the cache was empty"
+
+        assert "INFO    Remote ({}) does not have".format(share.repo) in result.stderr
+        assert "SKIPPED Pull" in result.stderr
diff --git a/tests/frontend/push.py b/tests/frontend/push.py
index f351e33be0d582c6a3063d7127a44f2ebe0ef766..f2d6814d61983eaea200da93301ea95f8c78a766 100644
--- a/tests/frontend/push.py
+++ b/tests/frontend/push.py
@@ -386,3 +386,26 @@ def test_push_cross_junction(cli, tmpdir, datafiles):
 
         cache_key = cli.get_element_key(project, 'junction.bst:import-etc.bst')
         assert share.has_artifact('subtest', 'import-etc.bst', cache_key)
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_push_already_cached(caplog, cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    caplog.set_level(1)
+
+    with create_artifact_share(os.path.join(str(tmpdir), 'artifactshare')) as share:
+
+        cli.configure({
+            'artifacts': {'url': share.repo, 'push': True}
+        })
+        result = cli.run(project=project, args=['build', 'target.bst'])
+
+        result.assert_success()
+        assert "SKIPPED Push" not in result.stderr
+
+        result = cli.run(project=project, args=['push', 'target.bst'])
+
+        result.assert_success()
+        assert not result.get_pushed_elements(), "No elements should have been pushed since the cache was populated"
+        assert "INFO    Remote ({}) already has ".format(share.repo) in result.stderr
+        assert "SKIPPED Push" in result.stderr
diff --git a/tests/frontend/rebuild.py b/tests/frontend/rebuild.py
new file mode 100644
index 0000000000000000000000000000000000000000..d93aac0dcf0f5b76f76a04739e8fa73ede7ab5c5
--- /dev/null
+++ b/tests/frontend/rebuild.py
@@ -0,0 +1,36 @@
+import os
+import pytest
+from tests.testutils import cli
+
+# Project directory
+DATA_DIR = os.path.join(
+    os.path.dirname(os.path.realpath(__file__)),
+    "project",
+)
+
+
+def strict_args(args, strict):
+    if strict != "strict":
+        return ['--no-strict'] + args
+    return args
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("strict", ["strict", "non-strict"])
+def test_rebuild(datafiles, cli, strict):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    checkout = os.path.join(cli.directory, 'checkout')
+
+    # First build intermediate target.bst
+    result = cli.run(project=project, args=strict_args(['build', 'target.bst'], strict))
+    result.assert_success()
+
+    # Modify base import
+    with open(os.path.join(project, 'files', 'dev-files', 'usr', 'include', 'new.h'), "w") as f:
+        f.write("#define NEW")
+
+    # Rebuild base import and build top-level rebuild-target.bst
+    # In non-strict mode, this does not rebuild intermediate target.bst,
+    # which means that a weakly cached target.bst will be staged as dependency.
+    result = cli.run(project=project, args=strict_args(['build', 'rebuild-target.bst'], strict))
+    result.assert_success()
diff --git a/tests/frontend/source_bundle.py b/tests/frontend/source_bundle.py
new file mode 100644
index 0000000000000000000000000000000000000000..f72e80a3bcfd8674db8a120d2e579de21daf9aff
--- /dev/null
+++ b/tests/frontend/source_bundle.py
@@ -0,0 +1,48 @@
+#
+#  Copyright (C) 2018 Bloomberg Finance LP
+#
+#  This program is free software; you can redistribute it and/or
+#  modify it under the terms of the GNU Lesser General Public
+#  License as published by the Free Software Foundation; either
+#  version 2 of the License, or (at your option) any later version.
+#
+#  This library is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+#  Lesser General Public License for more details.
+#
+#  You should have received a copy of the GNU Lesser General Public
+#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
+#
+#  Authors: Chandan Singh <csingh43@bloomberg.net>
+#
+
+import os
+import tarfile
+
+import pytest
+
+from tests.testutils import cli
+
+# Project directory
+DATA_DIR = os.path.join(
+    os.path.dirname(os.path.realpath(__file__)),
+    "project",
+)
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_source_bundle(cli, tmpdir, datafiles):
+    project_path = os.path.join(datafiles.dirname, datafiles.basename)
+    element_name = 'source-bundle/source-bundle-hello.bst'
+    normal_name = 'source-bundle-source-bundle-hello'
+
+    # Verify that we can correctly produce a source-bundle
+    args = ['source-bundle', element_name, '--directory', str(tmpdir)]
+    result = cli.run(project=project_path, args=args)
+    result.assert_success()
+
+    # Verify that the source-bundle contains our sources and a build script
+    with tarfile.open(os.path.join(str(tmpdir), '{}.tar.gz'.format(normal_name))) as bundle:
+        assert os.path.join(normal_name, 'source', normal_name, 'llamas.txt') in bundle.getnames()
+        assert os.path.join(normal_name, 'build.sh') in bundle.getnames()
diff --git a/tests/frontend/track.py b/tests/frontend/track.py
index 73b63ec4c94129623fa439abbe41a3c5f10b001b..c7921fe4cddae0a40f00e85b4790250a5cecd550 100644
--- a/tests/frontend/track.py
+++ b/tests/frontend/track.py
@@ -1,3 +1,4 @@
+import stat
 import os
 import pytest
 from tests.testutils import cli, create_repo, ALL_REPO_KINDS, generate_junction
@@ -634,3 +635,36 @@ def test_track_junction_included(cli, tmpdir, datafiles, ref_storage, kind):
 
     result = cli.run(project=project, args=['track', 'junction.bst'])
     result.assert_success()
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS])
+def test_track_error_cannot_write_file(cli, tmpdir, datafiles, kind):
+    if os.geteuid() == 0:
+        pytest.skip("This is not testable with root permissions")
+
+    project = str(datafiles)
+    dev_files_path = os.path.join(project, 'files', 'dev-files')
+    element_path = os.path.join(project, 'elements')
+    element_name = 'track-test-{}.bst'.format(kind)
+
+    configure_project(project, {
+        'ref-storage': 'inline'
+    })
+
+    repo = create_repo(kind, str(tmpdir))
+    ref = repo.create(dev_files_path)
+
+    element_full_path = os.path.join(element_path, element_name)
+    generate_element(repo, element_full_path)
+
+    st = os.stat(element_path)
+    try:
+        read_mask = stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH
+        os.chmod(element_path, stat.S_IMODE(st.st_mode) & ~read_mask)
+
+        result = cli.run(project=project, args=['track', element_name])
+        result.assert_main_error(ErrorDomain.STREAM, None)
+        result.assert_task_error(ErrorDomain.SOURCE, 'save-ref-error')
+    finally:
+        os.chmod(element_path, stat.S_IMODE(st.st_mode))
diff --git a/tests/frontend/workspace.py b/tests/frontend/workspace.py
index c7af0a70fd942218ca166760452c57acc57afe90..51b7d608894dd5fac55af329b55e4f67a87cf8ac 100644
--- a/tests/frontend/workspace.py
+++ b/tests/frontend/workspace.py
@@ -43,10 +43,14 @@ DATA_DIR = os.path.join(
 )
 
 
-def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir=None):
+def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir=None,
+                   project_path=None, element_attrs=None):
     if not workspace_dir:
         workspace_dir = os.path.join(str(tmpdir), 'workspace{}'.format(suffix))
-    project_path = os.path.join(datafiles.dirname, datafiles.basename)
+    if not project_path:
+        project_path = os.path.join(datafiles.dirname, datafiles.basename)
+    else:
+        shutil.copytree(os.path.join(datafiles.dirname, datafiles.basename), project_path)
     bin_files_path = os.path.join(project_path, 'files', 'bin-files')
     element_path = os.path.join(project_path, 'elements')
     element_name = 'workspace-test-{}{}.bst'.format(kind, suffix)
@@ -66,6 +70,8 @@ def open_workspace(cli, tmpdir, datafiles, kind, track, suffix='', workspace_dir
             repo.source_config(ref=ref)
         ]
     }
+    if element_attrs:
+        element = {**element, **element_attrs}
     _yaml.dump(element,
                os.path.join(element_path,
                             element_name))
@@ -218,41 +224,42 @@ def test_close(cli, tmpdir, datafiles, kind):
 
 @pytest.mark.datafiles(DATA_DIR)
 def test_close_external_after_move_project(cli, tmpdir, datafiles):
-    tmp_parent = os.path.dirname(str(tmpdir))
-    workspace_dir = os.path.join(tmp_parent, "workspace")
-    element_name, project_path, _ = open_workspace(cli, tmpdir, datafiles, 'git', False, "", workspace_dir)
+    workspace_dir = os.path.join(str(tmpdir), "workspace")
+    project_path = os.path.join(str(tmpdir), 'initial_project')
+    element_name, _, _ = open_workspace(cli, tmpdir, datafiles, 'git', False, "", workspace_dir, project_path)
     assert os.path.exists(workspace_dir)
-    tmp_dir = os.path.join(tmp_parent, 'external_project')
-    shutil.move(project_path, tmp_dir)
-    assert os.path.exists(tmp_dir)
+    moved_dir = os.path.join(str(tmpdir), 'external_project')
+    shutil.move(project_path, moved_dir)
+    assert os.path.exists(moved_dir)
 
     # Close the workspace
-    result = cli.run(configure=False, project=tmp_dir, args=[
+    result = cli.run(project=moved_dir, args=[
         'workspace', 'close', '--remove-dir', element_name
     ])
     result.assert_success()
 
     # Assert the workspace dir has been deleted
     assert not os.path.exists(workspace_dir)
-    # Move directory back inside tmp directory so it can be recognised
-    shutil.move(tmp_dir, project_path)
 
 
 @pytest.mark.datafiles(DATA_DIR)
 def test_close_internal_after_move_project(cli, tmpdir, datafiles):
-    element_name, project, _ = open_workspace(cli, tmpdir, datafiles, 'git', False)
-    tmp_dir = os.path.join(os.path.dirname(str(tmpdir)), 'external_project')
-    shutil.move(str(tmpdir), tmp_dir)
-    assert os.path.exists(tmp_dir)
+    initial_dir = os.path.join(str(tmpdir), 'initial_project')
+    initial_workspace = os.path.join(initial_dir, 'workspace')
+    element_name, _, _ = open_workspace(cli, tmpdir, datafiles, 'git', False,
+                                        workspace_dir=initial_workspace, project_path=initial_dir)
+    moved_dir = os.path.join(str(tmpdir), 'internal_project')
+    shutil.move(initial_dir, moved_dir)
+    assert os.path.exists(moved_dir)
 
     # Close the workspace
-    result = cli.run(configure=False, project=tmp_dir, args=[
+    result = cli.run(project=moved_dir, args=[
         'workspace', 'close', '--remove-dir', element_name
     ])
     result.assert_success()
 
     # Assert the workspace dir has been deleted
-    workspace = os.path.join(tmp_dir, 'workspace')
+    workspace = os.path.join(moved_dir, 'workspace')
     assert not os.path.exists(workspace)
 
 
@@ -767,3 +774,105 @@ def test_list_supported_workspace(cli, tmpdir, datafiles, workspace_cfg, expecte
     # Check that workspace config is converted correctly if necessary
     loaded_config = _yaml.node_sanitize(_yaml.load(workspace_config_path))
     assert loaded_config == parse_dict_as_yaml(expected)
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("kind", repo_kinds)
+def test_inconsitent_pipeline_message(cli, tmpdir, datafiles, kind):
+    element_name, project, workspace = open_workspace(cli, tmpdir, datafiles, kind, False)
+
+    shutil.rmtree(workspace)
+
+    result = cli.run(project=project, args=[
+        'build', element_name
+    ])
+    result.assert_main_error(ErrorDomain.PIPELINE, "inconsistent-pipeline-workspaced")
+
+
+@pytest.mark.datafiles(DATA_DIR)
+@pytest.mark.parametrize("strict", [("strict"), ("non-strict")])
+def test_cache_key_workspace_in_dependencies(cli, tmpdir, datafiles, strict):
+    checkout = os.path.join(str(tmpdir), 'checkout')
+    element_name, project, workspace = open_workspace(cli, os.path.join(str(tmpdir), 'repo-a'),
+                                                      datafiles, 'git', False)
+
+    element_path = os.path.join(project, 'elements')
+    back_dep_element_name = 'workspace-test-back-dep.bst'
+
+    # Write out our test target
+    element = {
+        'kind': 'compose',
+        'depends': [
+            {
+                'filename': element_name,
+                'type': 'build'
+            }
+        ]
+    }
+    _yaml.dump(element,
+               os.path.join(element_path,
+                            back_dep_element_name))
+
+    # Modify workspace
+    shutil.rmtree(os.path.join(workspace, 'usr', 'bin'))
+    os.makedirs(os.path.join(workspace, 'etc'))
+    with open(os.path.join(workspace, 'etc', 'pony.conf'), 'w') as f:
+        f.write("PONY='pink'")
+
+    # Configure strict mode
+    strict_mode = True
+    if strict != 'strict':
+        strict_mode = False
+    cli.configure({
+        'projects': {
+            'test': {
+                'strict': strict_mode
+            }
+        }
+    })
+
+    # Build artifact with dependency's modified workspace
+    assert cli.get_element_state(project, element_name) == 'buildable'
+    assert cli.get_element_key(project, element_name) == "{:?<64}".format('')
+    assert cli.get_element_state(project, back_dep_element_name) == 'waiting'
+    assert cli.get_element_key(project, back_dep_element_name) == "{:?<64}".format('')
+    result = cli.run(project=project, args=['build', back_dep_element_name])
+    result.assert_success()
+    assert cli.get_element_state(project, element_name) == 'cached'
+    assert cli.get_element_key(project, element_name) != "{:?<64}".format('')
+    assert cli.get_element_state(project, back_dep_element_name) == 'cached'
+    assert cli.get_element_key(project, back_dep_element_name) != "{:?<64}".format('')
+    result = cli.run(project=project, args=['build', back_dep_element_name])
+    result.assert_success()
+
+    # Checkout the result
+    result = cli.run(project=project, args=[
+        'checkout', back_dep_element_name, checkout
+    ])
+    result.assert_success()
+
+    # Check that the pony.conf from the modified workspace exists
+    filename = os.path.join(checkout, 'etc', 'pony.conf')
+    assert os.path.exists(filename)
+
+    # Check that the original /usr/bin/hello is not in the checkout
+    assert not os.path.exists(os.path.join(checkout, 'usr', 'bin', 'hello'))
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_multiple_failed_builds(cli, tmpdir, datafiles):
+    element_config = {
+        "kind": "manual",
+        "config": {
+            "configure-commands": [
+                "unknown_command_that_will_fail"
+            ]
+        }
+    }
+    element_name, project, _ = open_workspace(cli, tmpdir, datafiles,
+                                              "git", False, element_attrs=element_config)
+
+    for _ in range(2):
+        result = cli.run(project=project, args=["build", element_name])
+        assert "BUG" not in result.stderr
+        assert cli.get_element_state(project, element_name) != "cached"
diff --git a/tests/frontend/yamlcache.py b/tests/frontend/yamlcache.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b03c70ccd9de1af7fb22c0ac4a7dce1cec5b0ab
--- /dev/null
+++ b/tests/frontend/yamlcache.py
@@ -0,0 +1,142 @@
+import os
+import pytest
+import hashlib
+import tempfile
+from ruamel import yaml
+
+from tests.testutils import cli, generate_junction, create_element_size, create_repo
+from buildstream import _yaml
+from buildstream._yamlcache import YamlCache
+from buildstream._project import Project
+from buildstream._context import Context
+from contextlib import contextmanager
+
+
+def generate_project(tmpdir, ref_storage, with_junction, name="test"):
+    if with_junction == 'junction':
+        subproject_dir = generate_project(
+            tmpdir, ref_storage,
+            'no-junction', name='test-subproject'
+        )
+
+    project_dir = os.path.join(tmpdir, name)
+    os.makedirs(project_dir)
+    # project.conf
+    project_conf_path = os.path.join(project_dir, 'project.conf')
+    elements_path = 'elements'
+    project_conf = {
+        'name': name,
+        'element-path': elements_path,
+        'ref-storage': ref_storage,
+    }
+    _yaml.dump(project_conf, project_conf_path)
+
+    # elements
+    if with_junction == 'junction':
+        junction_name = 'junction.bst'
+        junction_dir = os.path.join(project_dir, elements_path)
+        junction_path = os.path.join(project_dir, elements_path, junction_name)
+        os.makedirs(junction_dir)
+        generate_junction(tmpdir, subproject_dir, junction_path)
+        element_depends = [{'junction': junction_name, 'filename': 'test.bst'}]
+    else:
+        element_depends = []
+
+    element_name = 'test.bst'
+    create_element_size(element_name, project_dir, elements_path, element_depends, 1)
+
+    return project_dir
+
+
+@contextmanager
+def with_yamlcache(project_dir):
+    context = Context()
+    project = Project(project_dir, context)
+    cache_file = YamlCache.get_cache_file(project_dir)
+    with YamlCache.open(context, cache_file) as yamlcache:
+        yield yamlcache, project
+
+
+def yamlcache_key(yamlcache, in_file, copy_tree=False):
+    with open(in_file) as f:
+        key = yamlcache._calculate_key(f.read(), copy_tree)
+    return key
+
+
+def modified_file(input_file, tmpdir):
+    with open(input_file) as f:
+        data = f.read()
+    assert 'variables' not in data
+    data += '\nvariables: {modified: True}\n'
+    _, temppath = tempfile.mkstemp(dir=tmpdir, text=True)
+    with open(temppath, 'w') as f:
+        f.write(data)
+
+    return temppath
+
+
+@pytest.mark.parametrize('ref_storage', ['inline', 'project.refs'])
+@pytest.mark.parametrize('with_junction', ['no-junction', 'junction'])
+@pytest.mark.parametrize('move_project', ['move', 'no-move'])
+def test_yamlcache_used(cli, tmpdir, ref_storage, with_junction, move_project):
+    # Generate the project
+    project = generate_project(str(tmpdir), ref_storage, with_junction)
+    if with_junction == 'junction':
+        result = cli.run(project=project, args=['fetch', '--track', 'junction.bst'])
+        result.assert_success()
+
+    # bst show to put it in the cache
+    result = cli.run(project=project, args=['show', 'test.bst'])
+    result.assert_success()
+
+    element_path = os.path.join(project, 'elements', 'test.bst')
+    with with_yamlcache(project) as (yc, prj):
+        # Check that it's in the cache
+        assert yc.is_cached(prj, element_path)
+
+        # *Absolutely* horrible cache corruption to check it's being used
+        # Modifying the data from the cache is fraught with danger,
+        # so instead I'll load a modified version of the original file
+        temppath = modified_file(element_path, str(tmpdir))
+        contents = _yaml.load(temppath, copy_tree=False, project=prj)
+        key = yamlcache_key(yc, element_path)
+        yc.put_from_key(prj, element_path, key, contents)
+
+    # Show that a variable has been added
+    result = cli.run(project=project, args=['show', '--format', '%{vars}', 'test.bst'])
+    result.assert_success()
+    data = yaml.safe_load(result.output)
+    assert 'modified' in data
+    assert data['modified'] == 'True'
+
+
+@pytest.mark.parametrize('ref_storage', ['inline', 'project.refs'])
+@pytest.mark.parametrize('with_junction', ['junction', 'no-junction'])
+def test_yamlcache_changed_file(cli, tmpdir, ref_storage, with_junction):
+    # i.e. a file is cached, the file is changed, loading the file (with cache) returns new data
+    # inline and junction can only be changed by opening a workspace
+    # Generate the project
+    project = generate_project(str(tmpdir), ref_storage, with_junction)
+    if with_junction == 'junction':
+        result = cli.run(project=project, args=['fetch', '--track', 'junction.bst'])
+        result.assert_success()
+
+    # bst show to put it in the cache
+    result = cli.run(project=project, args=['show', 'test.bst'])
+    result.assert_success()
+
+    element_path = os.path.join(project, 'elements', 'test.bst')
+    with with_yamlcache(project) as (yc, prj):
+        # Check that it's in the cache then modify
+        assert yc.is_cached(prj, element_path)
+        with open(element_path, "a") as f:
+            f.write('\nvariables: {modified: True}\n')
+        # Load modified yaml cache file into cache
+        _yaml.load(element_path, copy_tree=False, project=prj, yaml_cache=yc)
+
+    # Show that a variable has been added
+    result = cli.run(project=project, args=['show', '--format', '%{vars}', 'test.bst'])
+    result.assert_success()
+    data = yaml.safe_load(result.output)
+    assert 'modified' in data
+    assert data['modified'] == 'True'
diff --git a/tests/integration/autotools.py b/tests/integration/autotools.py
index 6ea2b667cc4f01c14d83b57a8b50a6ba7b9d8883..3c498136511add2bbabf1252bad6d03e8796c952 100644
--- a/tests/integration/autotools.py
+++ b/tests/integration/autotools.py
@@ -38,6 +38,30 @@ def test_autotools_build(cli, tmpdir, datafiles):
                                '/usr/share/doc/amhello/README'])
 
 
+# Test that an autotools build 'works' - we use the autotools sample
+# amhello project for this.
+@pytest.mark.integration
+@pytest.mark.datafiles(DATA_DIR)
+def test_autotools_confroot_build(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    checkout = os.path.join(cli.directory, 'checkout')
+    element_name = 'autotools/amhelloconfroot.bst'
+
+    result = cli.run(project=project, args=['build', element_name])
+    assert result.exit_code == 0
+
+    result = cli.run(project=project, args=['checkout', element_name, checkout])
+    assert result.exit_code == 0
+
+    assert_contains(checkout, ['/usr', '/usr/lib', '/usr/bin',
+                               '/usr/share', '/usr/lib/debug',
+                               '/usr/lib/debug/usr', '/usr/lib/debug/usr/bin',
+                               '/usr/lib/debug/usr/bin/hello',
+                               '/usr/bin/hello', '/usr/share/doc',
+                               '/usr/share/doc/amhello',
+                               '/usr/share/doc/amhello/README'])
+
+
 # Test running an executable built with autotools
 @pytest.mark.datafiles(DATA_DIR)
 def test_autotools_run(cli, tmpdir, datafiles):
diff --git a/tests/integration/cachedfail.py b/tests/integration/cachedfail.py
index f4cabb32c1275889d54525feff152223c3368ba6..4d89ca11af21282c86e216ce3924218f38a3f2c0 100644
--- a/tests/integration/cachedfail.py
+++ b/tests/integration/cachedfail.py
@@ -121,7 +121,7 @@ def test_build_depend_on_cached_fail(cli, tmpdir, datafiles):
 
 @pytest.mark.skipif(not IS_LINUX, reason='Only available on linux')
 @pytest.mark.datafiles(DATA_DIR)
-@pytest.mark.parametrize("on_error", ("continue",))
+@pytest.mark.parametrize("on_error", ("continue", "quit"))
 def test_push_cached_fail(cli, tmpdir, datafiles, on_error):
     project = os.path.join(datafiles.dirname, datafiles.basename)
     element_path = os.path.join(project, 'elements', 'element.bst')
diff --git a/tests/integration/cmake.py b/tests/integration/cmake.py
index 3c16b29b979573d85550e2d10def5abd11542f18..e74958b919914c40ccfd2365d356c66e97417601 100644
--- a/tests/integration/cmake.py
+++ b/tests/integration/cmake.py
@@ -32,6 +32,24 @@ def test_cmake_build(cli, tmpdir, datafiles):
                                '/usr/lib/debug/usr/bin/hello'])
 
 
+@pytest.mark.datafiles(DATA_DIR)
+def test_cmake_confroot_build(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    checkout = os.path.join(cli.directory, 'checkout')
+    element_name = 'cmake/cmakeconfroothello.bst'
+
+    result = cli.run(project=project, args=['build', element_name])
+    assert result.exit_code == 0
+
+    result = cli.run(project=project, args=['checkout', element_name, checkout])
+    assert result.exit_code == 0
+
+    assert_contains(checkout, ['/usr', '/usr/bin', '/usr/bin/hello',
+                               '/usr/lib/debug', '/usr/lib/debug/usr',
+                               '/usr/lib/debug/usr/bin',
+                               '/usr/lib/debug/usr/bin/hello'])
+
+
 @pytest.mark.datafiles(DATA_DIR)
 def test_cmake_run(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
diff --git a/tests/integration/manual.py b/tests/integration/manual.py
index e71ccdd79cb82db065642ca2845a4ae801ff19dd..241ea37a800b62827af387581e6a07da43699cca 100644
--- a/tests/integration/manual.py
+++ b/tests/integration/manual.py
@@ -64,7 +64,7 @@ strip
 
 
 @pytest.mark.datafiles(DATA_DIR)
-def test_manual_element_noparallel(cli, tmpdir, datafiles):
+def test_manual_element_environment(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
     checkout = os.path.join(cli.directory, 'checkout')
     element_path = os.path.join(project, 'elements')
@@ -72,15 +72,11 @@ def test_manual_element_noparallel(cli, tmpdir, datafiles):
 
     create_manual_element(element_name, element_path, {
         'install-commands': [
-            "echo $MAKEFLAGS >> test",
             "echo $V >> test",
             "cp test %{install-root}"
         ]
     }, {
-        'max-jobs': 2,
-        'notparallel': True
     }, {
-        'MAKEFLAGS': '-j%{max-jobs} -Wall',
         'V': 2
     })
 
@@ -93,13 +89,11 @@ def test_manual_element_noparallel(cli, tmpdir, datafiles):
     with open(os.path.join(checkout, 'test')) as f:
         text = f.read()
 
-    assert text == """-j1 -Wall
-2
-"""
+    assert text == "2\n"
 
 
 @pytest.mark.datafiles(DATA_DIR)
-def test_manual_element_environment(cli, tmpdir, datafiles):
+def test_manual_element_noparallel(cli, tmpdir, datafiles):
     project = os.path.join(datafiles.dirname, datafiles.basename)
     checkout = os.path.join(cli.directory, 'checkout')
     element_path = os.path.join(project, 'elements')
@@ -112,7 +106,7 @@ def test_manual_element_environment(cli, tmpdir, datafiles):
             "cp test %{install-root}"
         ]
     }, {
-        'max-jobs': 2
+        'notparallel': True
     }, {
         'MAKEFLAGS': '-j%{max-jobs} -Wall',
         'V': 2
@@ -127,6 +121,6 @@ def test_manual_element_environment(cli, tmpdir, datafiles):
     with open(os.path.join(checkout, 'test')) as f:
         text = f.read()
 
-    assert text == """-j2 -Wall
+    assert text == """-j1 -Wall
 2
 """
diff --git a/tests/integration/project/elements/autotools/amhelloconfroot.bst b/tests/integration/project/elements/autotools/amhelloconfroot.bst
new file mode 100644
index 0000000000000000000000000000000000000000..28926446b2b78ab2fa7052b9c2260984de827713
--- /dev/null
+++ b/tests/integration/project/elements/autotools/amhelloconfroot.bst
@@ -0,0 +1,15 @@
+kind: autotools
+description: Autotools test
+
+depends:
+- base.bst
+
+sources:
+- kind: tar
+  url: project_dir:/files/amhello.tar.gz
+  ref: 9ba123fa4e660929e9a0aa99f0c487b7eee59c5e7594f3284d015640b90f5590
+  directory: SourceFile
+
+variables:
+  conf-root: "%{build-root}/SourceFile"
+  command-subdir: build
diff --git a/tests/integration/project/elements/base/base-alpine.bst b/tests/integration/project/elements/base/base-alpine.bst
index 6a2313018181b09db450106b20714966d99410a1..687588f7ca3e9c3293b2d7e10394197be600599d 100644
--- a/tests/integration/project/elements/base/base-alpine.bst
+++ b/tests/integration/project/elements/base/base-alpine.bst
@@ -7,6 +7,6 @@ description: |
 
 sources:
   - kind: tar
-    url: sysroot:tarballs/integration-tests-base.v1.x86_64.tar.xz
+    url: alpine:integration-tests-base.v1.x86_64.tar.xz
     base-dir: ''
     ref: 3eb559250ba82b64a68d86d0636a6b127aa5f6d25d3601a79f79214dc9703639
diff --git a/tests/integration/project/elements/cmake/cmakeconfroothello.bst b/tests/integration/project/elements/cmake/cmakeconfroothello.bst
new file mode 100644
index 0000000000000000000000000000000000000000..cd33dee99e27a42ae8a9bfc84a506a56f113b3ff
--- /dev/null
+++ b/tests/integration/project/elements/cmake/cmakeconfroothello.bst
@@ -0,0 +1,15 @@
+kind: cmake
+description: Cmake test
+
+depends:
+  - base.bst
+
+sources:
+  - kind: tar
+    directory: Source
+    url: project_dir:/files/cmakehello.tar.gz
+    ref: 508266f40dbc5875293bd24c4e50a9eb6b88cbacab742033f7b92f8c087b64e5
+
+variables:
+  conf-root: "%{build-root}/Source"
+  command-subdir: build
diff --git a/tests/integration/project/elements/integration.bst b/tests/integration/project/elements/integration.bst
new file mode 100644
index 0000000000000000000000000000000000000000..be21ae31be02625ea60e7f6417faaf5762a7a38c
--- /dev/null
+++ b/tests/integration/project/elements/integration.bst
@@ -0,0 +1,9 @@
+kind: manual
+depends:
+- base.bst
+
+public:
+  bst:
+    integration-commands:
+    - |
+      echo noise >/dev/null
diff --git a/tests/integration/project/elements/sockets/make-builddir-socket.bst b/tests/integration/project/elements/sockets/make-builddir-socket.bst
new file mode 100644
index 0000000000000000000000000000000000000000..c19cd85b09ab1476332126b6e8893deabdab73bb
--- /dev/null
+++ b/tests/integration/project/elements/sockets/make-builddir-socket.bst
@@ -0,0 +1,14 @@
+kind: manual
+
+depends:
+- filename: base.bst
+  type: build
+
+config:
+  build-commands:
+    - |
+      python3 -c '
+      from socket import socket, AF_UNIX, SOCK_STREAM
+      s = socket(AF_UNIX, SOCK_STREAM)
+      s.bind("testsocket")
+      '
diff --git a/tests/integration/project/elements/sockets/make-install-root-socket.bst b/tests/integration/project/elements/sockets/make-install-root-socket.bst
new file mode 100644
index 0000000000000000000000000000000000000000..85171bf54f32a541e0ff1bd4bf2c3dd825f2a981
--- /dev/null
+++ b/tests/integration/project/elements/sockets/make-install-root-socket.bst
@@ -0,0 +1,16 @@
+kind: manual
+
+depends:
+- filename: base.bst
+  type: build
+
+config:
+  install-commands:
+    - |
+      python3 -c '
+      from os.path import join
+      from sys import argv
+      from socket import socket, AF_UNIX, SOCK_STREAM
+      s = socket(AF_UNIX, SOCK_STREAM)
+      s.bind(join(argv[1], "testsocket"))
+      ' %{install-root}
diff --git a/tests/integration/project/elements/workspace/workspace-commanddir.bst b/tests/integration/project/elements/workspace/workspace-commanddir.bst
new file mode 100644
index 0000000000000000000000000000000000000000..d963346d71f7dc23c6e6c8a765427585671aa72f
--- /dev/null
+++ b/tests/integration/project/elements/workspace/workspace-commanddir.bst
@@ -0,0 +1,17 @@
+kind: manual
+description: Workspace mount test
+
+depends:
+  - filename: base.bst
+    type: build
+
+sources:
+  - kind: local
+    path: files/workspace-mount-src/
+
+variables:
+  command-subdir: build
+
+config:
+  build-commands:
+    - cc -c ../hello.c
diff --git a/tests/integration/project/project.conf b/tests/integration/project/project.conf
index c1a1c62ceeafdf4c5dc4e784152272cb3a82bd89..3bf128543f40abf35bfb0d01b0b86aa0fccdc4c6 100644
--- a/tests/integration/project/project.conf
+++ b/tests/integration/project/project.conf
@@ -2,7 +2,7 @@
 name: test
 element-path: elements
 aliases:
-  sysroot: https://gnome7.codethink.co.uk/
+  alpine: https://bst-integration-test-images.ams3.cdn.digitaloceanspaces.com/
   project_dir: file://{project_dir}
 options:
   linux:
diff --git a/tests/integration/shell.py b/tests/integration/shell.py
index 18953aa2d3cbde7536d390f1ddf759739a5733c4..947650ff1f8644b5f42206064ed687a31d146e38 100644
--- a/tests/integration/shell.py
+++ b/tests/integration/shell.py
@@ -342,3 +342,13 @@ def test_sysroot_workspace_visible(cli, tmpdir, datafiles):
     ])
     assert result.exit_code == 0
     assert result.output == workspace_hello
+
+
+# Test system integration commands can access devices in /dev
+@pytest.mark.datafiles(DATA_DIR)
+def test_integration_devices(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    element_name = 'integration.bst'
+
+    result = execute_shell(cli, project, ["true"], element=element_name)
+    assert result.exit_code == 0
diff --git a/tests/integration/sockets.py b/tests/integration/sockets.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2685062d8caa86852549c98efd28511e60f2da8
--- /dev/null
+++ b/tests/integration/sockets.py
@@ -0,0 +1,33 @@
+import os
+import pytest
+
+from buildstream import _yaml
+
+from tests.testutils import cli_integration as cli
+from tests.testutils.integration import assert_contains
+
+
+pytestmark = pytest.mark.integration
+
+DATA_DIR = os.path.join(
+    os.path.dirname(os.path.realpath(__file__)),
+    "project"
+)
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_builddir_socket_ignored(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    element_name = 'sockets/make-builddir-socket.bst'
+
+    result = cli.run(project=project, args=['build', element_name])
+    assert result.exit_code == 0
+
+
+@pytest.mark.datafiles(DATA_DIR)
+def test_install_root_socket_ignored(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    element_name = 'sockets/make-install-root-socket.bst'
+
+    result = cli.run(project=project, args=['build', element_name])
+    assert result.exit_code == 0
diff --git a/tests/integration/source-determinism.py b/tests/integration/source-determinism.py
index b60bc25f76900f35d26d6842436087f6f43040a5..d1760c26703979bf287b5fad8148e4deb2453930 100644
--- a/tests/integration/source-determinism.py
+++ b/tests/integration/source-determinism.py
@@ -2,7 +2,8 @@ import os
 import pytest
 
 from buildstream import _yaml, utils
-from tests.testutils import cli, create_repo, ALL_REPO_KINDS
+from tests.testutils import create_repo, ALL_REPO_KINDS
+from tests.testutils import cli_integration as cli
 
 
 DATA_DIR = os.path.join(
@@ -28,7 +29,7 @@ def create_test_directory(*path, mode=0o644):
 @pytest.mark.integration
 @pytest.mark.datafiles(DATA_DIR)
 @pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS] + ['local'])
-def test_deterministic_source_umask(cli, tmpdir, datafiles, kind):
+def test_deterministic_source_umask(cli, tmpdir, datafiles, kind, integration_cache):
     project = str(datafiles)
     element_name = 'list'
     element_path = os.path.join(project, 'elements', element_name)
@@ -91,14 +92,16 @@ def test_deterministic_source_umask(cli, tmpdir, datafiles, kind):
                 return f.read()
         finally:
             os.umask(old_umask)
-            cli.remove_artifact_from_cache(project, element_name)
+            cache_dir = os.path.join(integration_cache, 'artifacts')
+            cli.remove_artifact_from_cache(project, element_name,
+                                           cache_dir=cache_dir)
 
     assert get_value_for_umask(0o022) == get_value_for_umask(0o077)
 
 
 @pytest.mark.integration
 @pytest.mark.datafiles(DATA_DIR)
-def test_deterministic_source_local(cli, tmpdir, datafiles):
+def test_deterministic_source_local(cli, tmpdir, datafiles, integration_cache):
     """Only user rights should be considered for local source.
     """
     project = str(datafiles)
@@ -150,6 +153,8 @@ def test_deterministic_source_local(cli, tmpdir, datafiles):
             with open(os.path.join(checkoutdir, 'ls-l'), 'r') as f:
                 return f.read()
         finally:
-            cli.remove_artifact_from_cache(project, element_name)
+            cache_dir = os.path.join(integration_cache, 'artifacts')
+            cli.remove_artifact_from_cache(project, element_name,
+                                           cache_dir=cache_dir)
 
     assert get_value_for_mask(0o7777) == get_value_for_mask(0o0700)
diff --git a/tests/integration/workspace.py b/tests/integration/workspace.py
index 102d053fc26101618d5cc613712dd80e74ff1189..bcbcd674bf5e962de5067d6a4e9f78633960bfd2 100644
--- a/tests/integration/workspace.py
+++ b/tests/integration/workspace.py
@@ -32,6 +32,23 @@ def test_workspace_mount(cli, tmpdir, datafiles):
     assert os.path.exists(os.path.join(cli.directory, 'workspace'))
 
 
+@pytest.mark.integration
+@pytest.mark.datafiles(DATA_DIR)
+def test_workspace_commanddir(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+    workspace = os.path.join(cli.directory, 'workspace')
+    element_name = 'workspace/workspace-commanddir.bst'
+
+    res = cli.run(project=project, args=['workspace', 'open', element_name, workspace])
+    assert res.exit_code == 0
+
+    res = cli.run(project=project, args=['build', element_name])
+    assert res.exit_code == 0
+
+    assert os.path.exists(os.path.join(cli.directory, 'workspace'))
+    assert os.path.exists(os.path.join(cli.directory, 'workspace', 'build'))
+
+
 @pytest.mark.integration
 @pytest.mark.datafiles(DATA_DIR)
 def test_workspace_updated_dependency(cli, tmpdir, datafiles):
diff --git a/tests/loader/dependencies.py b/tests/loader/dependencies.py
index cb750fcb1fa0471bba64ed954681957078f8261f..98374f6d03d54803622a15f729d29a617eaf8563 100644
--- a/tests/loader/dependencies.py
+++ b/tests/loader/dependencies.py
@@ -110,6 +110,7 @@ def test_circular_dependency(datafiles):
         element = loader.load(['elements/circulartarget.bst'])[0]
 
     assert (exc.value.reason == LoadErrorReason.CIRCULAR_DEPENDENCY)
+    assert ("seconddep" in exc.value.args[0])
 
 
 @pytest.mark.datafiles(DATA_DIR)
diff --git a/tests/loader/variables.py b/tests/loader/variables.py
new file mode 100644
index 0000000000000000000000000000000000000000..9871d63c6bfa21b1c4aa860d7940c51467651f0a
--- /dev/null
+++ b/tests/loader/variables.py
@@ -0,0 +1,99 @@
+import os
+import pytest
+
+from buildstream import _yaml
+from buildstream._exceptions import ErrorDomain, LoadErrorReason
+from tests.testutils import cli
+
+DATA_DIR = os.path.join(
+    os.path.dirname(os.path.realpath(__file__)),
+    'variables',
+)
+
+PROTECTED_VARIABLES = [('project-name'), ('element-name'), ('max-jobs')]
+
+
+@pytest.mark.parametrize('protected_var', PROTECTED_VARIABLES)
+@pytest.mark.datafiles(DATA_DIR)
+def test_use_of_protected_var_project_conf(cli, tmpdir, datafiles, protected_var):
+    project = os.path.join(str(datafiles), 'simple')
+
+    conf = {
+        'name': 'test',
+        'variables': {
+            protected_var: 'some-value'
+        }
+    }
+    _yaml.dump(conf, os.path.join(project, 'project.conf'))
+
+    element = {
+        'kind': 'import',
+        'sources': [
+            {
+                'kind': 'local',
+                'path': 'foo.txt'
+            }
+        ],
+    }
+    _yaml.dump(element, os.path.join(project, 'target.bst'))
+
+    result = cli.run(project=project, args=['build', 'target.bst'])
+    result.assert_main_error(ErrorDomain.LOAD,
+                             LoadErrorReason.PROTECTED_VARIABLE_REDEFINED)
+
+
+@pytest.mark.parametrize('protected_var', PROTECTED_VARIABLES)
+@pytest.mark.datafiles(DATA_DIR)
+def test_use_of_protected_var_element_overrides(cli, tmpdir, datafiles, protected_var):
+    project = os.path.join(str(datafiles), 'simple')
+
+    conf = {
+        'name': 'test',
+        'elements': {
+            'manual': {
+                'variables': {
+                    protected_var: 'some-value'
+                }
+            }
+        }
+    }
+    _yaml.dump(conf, os.path.join(project, 'project.conf'))
+
+    element = {
+        'kind': 'manual',
+        'sources': [
+            {
+                'kind': 'local',
+                'path': 'foo.txt'
+            }
+        ],
+    }
+    _yaml.dump(element, os.path.join(project, 'target.bst'))
+
+    result = cli.run(project=project, args=['build', 'target.bst'])
+    result.assert_main_error(ErrorDomain.LOAD,
+                             LoadErrorReason.PROTECTED_VARIABLE_REDEFINED)
+
+
+@pytest.mark.parametrize('protected_var', PROTECTED_VARIABLES)
+@pytest.mark.datafiles(DATA_DIR)
+def test_use_of_protected_var_in_element(cli, tmpdir, datafiles, protected_var):
+    project = os.path.join(str(datafiles), 'simple')
+
+    element = {
+        'kind': 'import',
+        'sources': [
+            {
+                'kind': 'local',
+                'path': 'foo.txt'
+            }
+        ],
+        'variables': {
+            protected_var: 'some-value'
+        }
+    }
+    _yaml.dump(element, os.path.join(project, 'target.bst'))
+
+    result = cli.run(project=project, args=['build', 'target.bst'])
+    result.assert_main_error(ErrorDomain.LOAD,
+                             LoadErrorReason.PROTECTED_VARIABLE_REDEFINED)
diff --git a/tests/loader/variables/simple/foo.txt b/tests/loader/variables/simple/foo.txt
new file mode 100644
index 0000000000000000000000000000000000000000..257cc5642cb1a054f08cc83f2d943e56fd3ebe99
--- /dev/null
+++ b/tests/loader/variables/simple/foo.txt
@@ -0,0 +1 @@
+foo
diff --git a/tests/loader/variables/simple/project.conf b/tests/loader/variables/simple/project.conf
new file mode 100644
index 0000000000000000000000000000000000000000..5a240e3ed2eed285939b773f2e7c8b20d27e921c
--- /dev/null
+++ b/tests/loader/variables/simple/project.conf
@@ -0,0 +1 @@
+name: foo
diff --git a/tests/plugins/filter.py b/tests/plugins/filter.py
index 4a5ff340270238aea66dc540e52c4cad2193dbc4..559815a8b3749145c2a96795730f2c84d0d5ac06 100644
--- a/tests/plugins/filter.py
+++ b/tests/plugins/filter.py
@@ -174,9 +174,8 @@ def test_filter_workspace_reset(datafiles, cli, tmpdir):
 
 
 @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
-@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
-def test_filter_track(datafiles, cli, tmpdir, kind):
-    repo = create_repo(kind, str(tmpdir))
+def test_filter_track(datafiles, cli, tmpdir):
+    repo = create_repo('git', str(tmpdir))
     ref = repo.create(os.path.join(str(datafiles), "files"))
     elements_dir = os.path.join(str(tmpdir), "elements")
     project = str(tmpdir)
@@ -228,9 +227,8 @@ def test_filter_track(datafiles, cli, tmpdir, kind):
 
 
 @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
-@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
-def test_filter_track_excepted(datafiles, cli, tmpdir, kind):
-    repo = create_repo(kind, str(tmpdir))
+def test_filter_track_excepted(datafiles, cli, tmpdir):
+    repo = create_repo('git', str(tmpdir))
     ref = repo.create(os.path.join(str(datafiles), "files"))
     elements_dir = os.path.join(str(tmpdir), "elements")
     project = str(tmpdir)
@@ -282,9 +280,8 @@ def test_filter_track_excepted(datafiles, cli, tmpdir, kind):
 
 
 @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
-@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
-def test_filter_track_multi_to_one(datafiles, cli, tmpdir, kind):
-    repo = create_repo(kind, str(tmpdir))
+def test_filter_track_multi_to_one(datafiles, cli, tmpdir):
+    repo = create_repo('git', str(tmpdir))
     ref = repo.create(os.path.join(str(datafiles), "files"))
     elements_dir = os.path.join(str(tmpdir), "elements")
     project = str(tmpdir)
@@ -336,9 +333,8 @@ def test_filter_track_multi_to_one(datafiles, cli, tmpdir, kind):
 
 
 @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
-@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
-def test_filter_track_multi(datafiles, cli, tmpdir, kind):
-    repo = create_repo(kind, str(tmpdir))
+def test_filter_track_multi(datafiles, cli, tmpdir):
+    repo = create_repo('git', str(tmpdir))
     ref = repo.create(os.path.join(str(datafiles), "files"))
     elements_dir = os.path.join(str(tmpdir), "elements")
     project = str(tmpdir)
@@ -398,9 +394,8 @@ def test_filter_track_multi(datafiles, cli, tmpdir, kind):
 
 
 @pytest.mark.datafiles(os.path.join(DATA_DIR, 'basic'))
-@pytest.mark.parametrize("kind", [(kind) for kind in ALL_REPO_KINDS if kind not in ("patch", "local")])
-def test_filter_track_multi_exclude(datafiles, cli, tmpdir, kind):
-    repo = create_repo(kind, str(tmpdir))
+def test_filter_track_multi_exclude(datafiles, cli, tmpdir):
+    repo = create_repo('git', str(tmpdir))
     ref = repo.create(os.path.join(str(datafiles), "files"))
     elements_dir = os.path.join(str(tmpdir), "elements")
     project = str(tmpdir)
diff --git a/tests/sources/deb.py b/tests/sources/deb.py
index 9df0dadf0a7cd837e6f23af8a230bef6e24b7da6..b5b0311616e7220e89ac71d46cf007519e2c4a79 100644
--- a/tests/sources/deb.py
+++ b/tests/sources/deb.py
@@ -56,7 +56,7 @@ def test_fetch_bad_url(cli, tmpdir, datafiles):
     result = cli.run(project=project, args=[
         'fetch', 'target.bst'
     ])
-    assert "Try #" in result.stderr
+    assert "FAILURE Try #" in result.stderr
     result.assert_main_error(ErrorDomain.STREAM, None)
     result.assert_task_error(ErrorDomain.SOURCE, None)
 
diff --git a/tests/sources/git.py b/tests/sources/git.py
index 781d6d4d16984f4488da60ed947a58908aec7931..7ab32a6b5a286cb8be695ff3ea3529dffbf01ad0 100644
--- a/tests/sources/git.py
+++ b/tests/sources/git.py
@@ -25,6 +25,7 @@ import pytest
 
 from buildstream._exceptions import ErrorDomain
 from buildstream import _yaml
+from buildstream.plugin import CoreWarnings
 
 from tests.testutils import cli, create_repo
 from tests.testutils.site import HAVE_GIT
@@ -408,3 +409,117 @@ def test_submodule_track_no_ref_or_track(cli, tmpdir, datafiles):
     result = cli.run(project=project, args=['show', 'target.bst'])
     result.assert_main_error(ErrorDomain.SOURCE, "missing-track-and-ref")
     result.assert_task_error(None, None)
+
+
+@pytest.mark.skipif(HAVE_GIT is False, reason="git is not available")
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'template'))
+def test_ref_not_in_track_warn(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+
+    # Create the repo from 'repofiles', create a branch without latest commit
+    repo = create_repo('git', str(tmpdir))
+    ref = repo.create(os.path.join(project, 'repofiles'))
+
+    gitsource = repo.source_config(ref=ref)
+
+    # Overwrite the track value to the added branch
+    gitsource['track'] = 'foo'
+
+    # Write out our test target
+    element = {
+        'kind': 'import',
+        'sources': [
+            gitsource
+        ]
+    }
+    _yaml.dump(element, os.path.join(project, 'target.bst'))
+
+    # Assert the warning is raised as ref is not in branch foo.
+    # Assert warning not error to the user, when not set as fatal.
+    result = cli.run(project=project, args=['build', 'target.bst'])
+    assert "The ref provided for the element does not exist locally" in result.stderr
+
+
+@pytest.mark.skipif(HAVE_GIT is False, reason="git is not available")
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'template'))
+def test_ref_not_in_track_warn_error(cli, tmpdir, datafiles):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+
+    # Add fatal-warnings ref-not-in-track to project.conf
+    project_template = {
+        "name": "foo",
+        "fatal-warnings": [CoreWarnings.REF_NOT_IN_TRACK]
+    }
+
+    _yaml.dump(project_template, os.path.join(project, 'project.conf'))
+
+    # Create the repo from 'repofiles', create a branch without latest commit
+    repo = create_repo('git', str(tmpdir))
+    ref = repo.create(os.path.join(project, 'repofiles'))
+
+    gitsource = repo.source_config(ref=ref)
+
+    # Overwrite the track value to the added branch
+    gitsource['track'] = 'foo'
+
+    # Write out our test target
+    element = {
+        'kind': 'import',
+        'sources': [
+            gitsource
+        ]
+    }
+    _yaml.dump(element, os.path.join(project, 'target.bst'))
+
+    # Assert that build raises a warning here that is captured
+    # as plugin error, due to the fatal warning being set
+    result = cli.run(project=project, args=['build', 'target.bst'])
+    result.assert_main_error(ErrorDomain.STREAM, None)
+    result.assert_task_error(ErrorDomain.PLUGIN, CoreWarnings.REF_NOT_IN_TRACK)
+
+
+@pytest.mark.skipif(HAVE_GIT is False, reason="git is not available")
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'template'))
+@pytest.mark.parametrize("ref_format", ['sha1', 'git-describe'])
+@pytest.mark.parametrize("tag,extra_commit", [(False, False), (True, False), (True, True)])
+def test_track_fetch(cli, tmpdir, datafiles, ref_format, tag, extra_commit):
+    project = os.path.join(datafiles.dirname, datafiles.basename)
+
+    # Create the repo from 'repofiles' subdir
+    repo = create_repo('git', str(tmpdir))
+    ref = repo.create(os.path.join(project, 'repofiles'))
+    if tag:
+        repo.add_tag('tag')
+    if extra_commit:
+        repo.add_commit()
+
+    # Write out our test target
+    element = {
+        'kind': 'import',
+        'sources': [
+            repo.source_config()
+        ]
+    }
+    element['sources'][0]['ref-format'] = ref_format
+    element_path = os.path.join(project, 'target.bst')
+    _yaml.dump(element, element_path)
+
+    # Track it
+    result = cli.run(project=project, args=['track', 'target.bst'])
+    result.assert_success()
+
+    element = _yaml.load(element_path)
+    new_ref = element['sources'][0]['ref']
+
+    if ref_format == 'git-describe' and tag:
+        # Check and strip prefix
+        prefix = 'tag-{}-g'.format(0 if not extra_commit else 1)
+        assert new_ref.startswith(prefix)
+        new_ref = new_ref[len(prefix):]
+
+    # 40 chars for SHA-1
+    assert len(new_ref) == 40
+
+    # Fetch it
+    result = cli.run(project=project, args=['fetch', 'target.bst'])
+    result.assert_success()
diff --git a/tests/sources/tar.py b/tests/sources/tar.py
index fb02de3064a1fdd2035d481655b51976dd9d666a..1fd79f10bdd5981a9b146b968ca3309ca8026e52 100644
--- a/tests/sources/tar.py
+++ b/tests/sources/tar.py
@@ -3,6 +3,7 @@ import pytest
 import tarfile
 import tempfile
 import subprocess
+from shutil import copyfile, rmtree
 
 from buildstream._exceptions import ErrorDomain
 from buildstream import _yaml
@@ -66,7 +67,7 @@ def test_fetch_bad_url(cli, tmpdir, datafiles):
     result = cli.run(project=project, args=[
         'fetch', 'target.bst'
     ])
-    assert "Try #" in result.stderr
+    assert "FAILURE Try #" in result.stderr
     result.assert_main_error(ErrorDomain.STREAM, None)
     result.assert_task_error(ErrorDomain.SOURCE, None)
 
@@ -257,3 +258,47 @@ def test_stage_default_basedir_lzip(cli, tmpdir, datafiles, srcdir):
     original_contents = list_dir_contents(original_dir)
     checkout_contents = list_dir_contents(checkoutdir)
     assert(checkout_contents == original_contents)
+
+
+# Test that a tarball that contains a read only dir works
+@pytest.mark.datafiles(os.path.join(DATA_DIR, 'read-only'))
+def test_read_only_dir(cli, tmpdir, datafiles):
+    try:
+        project = os.path.join(datafiles.dirname, datafiles.basename)
+        generate_project(project, tmpdir)
+
+        # Get the tarball in tests/sources/tar/read-only/content
+        #
+        # NOTE that we need to do this because tarfile.open and tar.add()
+        # are packing the tar up with writeable files and dirs
+        tarball = os.path.join(str(datafiles), 'content', 'a.tar.gz')
+        if not os.path.exists(tarball):
+            raise FileNotFoundError('{} does not exist'.format(tarball))
+        copyfile(tarball, os.path.join(str(tmpdir), 'a.tar.gz'))
+
+        # Because this test can potentially leave directories behind
+        # which are difficult to remove, ask buildstream to use
+        # our temp directory, so we can clean up.
+        tmpdir_str = str(tmpdir)
+        if not tmpdir_str.endswith(os.path.sep):
+            tmpdir_str += os.path.sep
+        env = {"TMP": tmpdir_str}
+
+        # Track, fetch, build, checkout
+        result = cli.run(project=project, args=['track', 'target.bst'], env=env)
+        result.assert_success()
+        result = cli.run(project=project, args=['fetch', 'target.bst'], env=env)
+        result.assert_success()
+        result = cli.run(project=project, args=['build', 'target.bst'], env=env)
+        result.assert_success()
+
+    finally:
+
+        # Make tmpdir deletable no matter what happens
+        def make_dir_writable(fn, path, excinfo):
+            os.chmod(os.path.dirname(path), 0o777)
+            if os.path.isdir(path):
+                os.rmdir(path)
+            else:
+                os.remove(path)
+        rmtree(str(tmpdir), onerror=make_dir_writable)
diff --git a/tests/sources/tar/read-only/content/a.tar.gz b/tests/sources/tar/read-only/content/a.tar.gz
new file mode 100644
index 0000000000000000000000000000000000000000..70922627b1b037cf1349c98f8ee13f80efd15b1b
Binary files /dev/null and b/tests/sources/tar/read-only/content/a.tar.gz differ
diff --git a/tests/sources/tar/read-only/target.bst b/tests/sources/tar/read-only/target.bst
new file mode 100644
index 0000000000000000000000000000000000000000..ad413a2f48343bbacf08a12ce5214de02cf1a7d1
--- /dev/null
+++ b/tests/sources/tar/read-only/target.bst
@@ -0,0 +1,6 @@
+kind: import
+description: The kind of this element is irrelevant.
+sources:
+- kind: tar
+  url: tmpdir:/a.tar.gz
+  ref: foo
diff --git a/tests/sources/zip.py b/tests/sources/zip.py
index a168d529b1a8b7b18b1260f635701dd8897f0010..73767ee7993d7baaa8c7c9df5548ff3047788591 100644
--- a/tests/sources/zip.py
+++ b/tests/sources/zip.py
@@ -53,7 +53,7 @@ def test_fetch_bad_url(cli, tmpdir, datafiles):
     result = cli.run(project=project, args=[
         'fetch', 'target.bst'
     ])
-    assert "Try #" in result.stderr
+    assert "FAILURE Try #" in result.stderr
     result.assert_main_error(ErrorDomain.STREAM, None)
     result.assert_task_error(ErrorDomain.SOURCE, None)
 
diff --git a/tests/testutils/__init__.py b/tests/testutils/__init__.py
index c2fae1cc4f71431660df1d441fd159cc3bdc38d2..4a79c3be2a26ba0f38a5115c2e16cfce763b53fc 100644
--- a/tests/testutils/__init__.py
+++ b/tests/testutils/__init__.py
@@ -26,6 +26,6 @@
 from .runcli import cli, cli_integration
 from .repo import create_repo, ALL_REPO_KINDS
 from .artifactshare import create_artifact_share
-from .element_generators import create_element_size
+from .element_generators import create_element_size, update_element_size
 from .junction import generate_junction
 from .runner_integration import wait_for_cache_granularity
diff --git a/tests/testutils/artifactshare.py b/tests/testutils/artifactshare.py
index 05e87a499248cbbf0bda9ed39633d0788b2b4195..a8af599055fc29823f576200a6169ab192d30c09 100644
--- a/tests/testutils/artifactshare.py
+++ b/tests/testutils/artifactshare.py
@@ -11,10 +11,10 @@ from multiprocessing import Process, Queue
 import pytest_cov
 
 from buildstream import _yaml
-from buildstream._artifactcache.cascache import CASCache
 from buildstream._artifactcache.casserver import create_server
 from buildstream._context import Context
 from buildstream._exceptions import ArtifactError
+from buildstream._protos.build.bazel.remote.execution.v2 import remote_execution_pb2
 
 
 # ArtifactShare()
@@ -32,7 +32,7 @@ class ArtifactShare():
     def __init__(self, directory, *, total_space=None, free_space=None):
 
         # The working directory for the artifact share (in case it
-        # needs to do something outside of it's backend's storage folder).
+        # needs to do something outside of its backend's storage folder).
         #
         self.directory = os.path.abspath(directory)
 
@@ -48,7 +48,7 @@ class ArtifactShare():
         context = Context()
         context.artifactdir = self.repodir
 
-        self.cas = CASCache(context)
+        self.cas = context.artifactcache
 
         self.total_space = total_space
         self.free_space = free_space
@@ -87,6 +87,23 @@ class ArtifactShare():
         # Sleep until termination by signal
         signal.pause()
 
+    # has_object():
+    #
+    # Checks whether the object is present in the share
+    #
+    # Args:
+    #    digest (str): The object's digest
+    #
+    # Returns:
+    #    (bool): True if the object exists in the share, otherwise false.
+    def has_object(self, digest):
+
+        assert isinstance(digest, remote_execution_pb2.Digest)
+
+        object_path = self.cas.objpath(digest)
+
+        return os.path.exists(object_path)
+
     # has_artifact():
     #
     # Checks whether the artifact is present in the share
@@ -105,9 +122,8 @@ class ArtifactShare():
         #       same algo for creating an artifact reference
         #
 
-        # Chop off the .bst suffix first
-        assert element_name.endswith('.bst')
-        element_name = element_name[:-4]
+        # Replace path separator and chop off the .bst suffix
+        element_name = os.path.splitext(element_name.replace(os.sep, '-'))[0]
 
         valid_chars = string.digits + string.ascii_letters + '-._'
         element_name = ''.join([
diff --git a/tests/testutils/element_generators.py b/tests/testutils/element_generators.py
index 49f235c61b9a59214f517da959960df8519c9745..448c8571a2d5ddf5c08bee2cb1bacfca9e7366b3 100644
--- a/tests/testutils/element_generators.py
+++ b/tests/testutils/element_generators.py
@@ -1,41 +1,100 @@
 import os
 
 from buildstream import _yaml
+from buildstream import utils
+
+from . import create_repo
 
 
 # create_element_size()
 #
-# This will open a "<name>_data" file for writing and write
-# <size> MB of urandom (/dev/urandom) "stuff" into the file.
-# A bst import element file is then created: <name>.bst
+# Creates an import element with a git repo, using random
+# data to create a file in that repo of the specified size,
+# such that building it will add an artifact of the specified
+# size to the artifact cache.
 #
 # Args:
-#  name: (str) of the element name (e.g. target.bst)
-#  path: (str) pathway to the project/elements directory
-#  dependencies: A list of strings (can also be an empty list)
-#  size: (int) size of the element in bytes
+#    name: (str) of the element name (e.g. target.bst)
+#    project_dir (str): The path to the project
+#    element_path (str): The element path within the project
+#    dependencies: A list of strings (can also be an empty list)
+#    size: (int) size of the element in bytes
 #
 # Returns:
-#  Nothing (creates a .bst file of specified size)
+#    (Repo): A git repo which can be used to introduce trackable changes
+#            by using the update_element_size() function below.
 #
 def create_element_size(name, project_dir, elements_path, dependencies, size):
     full_elements_path = os.path.join(project_dir, elements_path)
     os.makedirs(full_elements_path, exist_ok=True)
 
-    # Create a file to be included in this element's artifact
-    with open(os.path.join(project_dir, name + '_data'), 'wb+') as f:
-        f.write(os.urandom(size))
+    # Create a git repo
+    repodir = os.path.join(project_dir, 'repos')
+    repo = create_repo('git', repodir, subdir=name)
+
+    with utils._tempdir(dir=project_dir) as tmp:
+
+        # We use a data/ subdir in the git repo we create,
+        # and we set the import element to only extract that
+        # part; this ensures we never include a .git/ directory
+        # in the cached artifacts for these sized elements.
+        #
+        datadir = os.path.join(tmp, 'data')
+        os.makedirs(datadir)
+
+        # Use /dev/urandom to create the sized file in the datadir
+        with open(os.path.join(datadir, name), 'wb+') as f:
+            f.write(os.urandom(size))
+
+        # Create the git repo from the temp directory
+        ref = repo.create(tmp)
 
-    # Simplest case: We want this file (of specified size) to just
-    # be an import element.
     element = {
         'kind': 'import',
         'sources': [
-            {
-                'kind': 'local',
-                'path': name + '_data'
-            }
+            repo.source_config(ref=ref)
         ],
+        'config': {
+            # Extract only the data directory
+            'source': 'data'
+        },
         'depends': dependencies
     }
     _yaml.dump(element, os.path.join(project_dir, elements_path, name))
+
+    # Return the repo, so that it can later be used to add commits
+    return repo
+
+
+# update_element_size()
+#
+# Updates a repo returned by create_element_size() such that
+# the newly added commit is completely changed, and has the newly
+# specified size.
+#
+# The name and project_dir arguments must match the arguments
+# previously given to create_element_size()
+#
+# Args:
+#    name: (str) of the element name (e.g. target.bst)
+#    project_dir (str): The path to the project
+#    repo: (Repo) The Repo returned by create_element_size()
+#    size: (int) The new size which the element generates, in bytes
+#
+# Returns:
+#    (Repo): A git repo which can be used to introduce trackable changes
+#            by using the update_element_size() function below.
+#
+def update_element_size(name, project_dir, repo, size):
+
+    with utils._tempdir(dir=project_dir) as tmp:
+
+        new_file = os.path.join(tmp, name)
+
+        # Use /dev/urandom to create the sized file in the datadir
+        with open(new_file, 'wb+') as f:
+            f.write(os.urandom(size))
+
+        # Modify the git repo with a new commit to the same path,
+        # replacing the original file with a new one.
+        repo.modify_file(new_file, os.path.join('data', name))
diff --git a/tests/testutils/repo/bzr.py b/tests/testutils/repo/bzr.py
index 8b0b6c295ee1ca746020310bc1dcced57c46d124..e8abdfee018d16c19b72910e7dfd81ceb25e6d99 100644
--- a/tests/testutils/repo/bzr.py
+++ b/tests/testutils/repo/bzr.py
@@ -2,6 +2,7 @@ import os
 import subprocess
 import pytest
 
+from buildstream import utils
 from .repo import Repo
 from ..site import HAVE_BZR
 
@@ -16,15 +17,16 @@ class Bzr(Repo):
         if not HAVE_BZR:
             pytest.skip("bzr is not available")
         super(Bzr, self).__init__(directory, subdir)
+        self.bzr = utils.get_host_tool('bzr')
 
     def create(self, directory):
         branch_dir = os.path.join(self.repo, 'trunk')
 
-        subprocess.call(['bzr', 'init-repo', self.repo], env=BZR_ENV)
-        subprocess.call(['bzr', 'init', branch_dir], env=BZR_ENV)
+        subprocess.call([self.bzr, 'init-repo', self.repo], env=BZR_ENV)
+        subprocess.call([self.bzr, 'init', branch_dir], env=BZR_ENV)
         self.copy_directory(directory, branch_dir)
-        subprocess.call(['bzr', 'add', '.'], env=BZR_ENV, cwd=branch_dir)
-        subprocess.call(['bzr', 'commit', '--message="Initial commit"'],
+        subprocess.call([self.bzr, 'add', '.'], env=BZR_ENV, cwd=branch_dir)
+        subprocess.call([self.bzr, 'commit', '--message="Initial commit"'],
                         env=BZR_ENV, cwd=branch_dir)
 
         return self.latest_commit()
@@ -42,7 +44,7 @@ class Bzr(Repo):
 
     def latest_commit(self):
         output = subprocess.check_output([
-            'bzr', 'version-info',
+            self.bzr, 'version-info',
             '--custom', '--template={revno}',
             os.path.join(self.repo, 'trunk')
         ], env=BZR_ENV)
diff --git a/tests/testutils/repo/git.py b/tests/testutils/repo/git.py
index eea43d608c5cc79186c9d274483c79ddad88a2ae..bc2dae691d9deb07d7777fff375305eca204a1e0 100644
--- a/tests/testutils/repo/git.py
+++ b/tests/testutils/repo/git.py
@@ -26,23 +26,39 @@ class Git(Repo):
 
         super(Git, self).__init__(directory, subdir)
 
+    def _run_git(self, *args, **kwargs):
+        argv = ['git']
+        argv.extend(args)
+        if 'env' not in kwargs:
+            kwargs['env'] = dict(GIT_ENV, PWD=self.repo)
+        kwargs.setdefault('cwd', self.repo)
+        kwargs.setdefault('check', True)
+        return subprocess.run(argv, **kwargs)
+
     def create(self, directory):
         self.copy_directory(directory, self.repo)
-        subprocess.call(['git', 'init', '.'], env=GIT_ENV, cwd=self.repo)
-        subprocess.call(['git', 'add', '.'], env=GIT_ENV, cwd=self.repo)
-        subprocess.call(['git', 'commit', '-m', 'Initial commit'], env=GIT_ENV, cwd=self.repo)
+        self._run_git('init', '.')
+        self._run_git('add', '.')
+        self._run_git('commit', '-m', 'Initial commit')
         return self.latest_commit()
 
+    def add_tag(self, tag):
+        self._run_git('tag', tag)
+
     def add_commit(self):
-        subprocess.call(['git', 'commit', '--allow-empty', '-m', 'Additional commit'],
-                        env=GIT_ENV, cwd=self.repo)
+        self._run_git('commit', '--allow-empty', '-m', 'Additional commit')
         return self.latest_commit()
 
     def add_file(self, filename):
         shutil.copy(filename, self.repo)
-        subprocess.call(['git', 'add', os.path.basename(filename)], env=GIT_ENV, cwd=self.repo)
+        self._run_git('add', os.path.basename(filename))
+        self._run_git('commit', '-m', 'Added {}'.format(os.path.basename(filename)))
+        return self.latest_commit()
+
+    def modify_file(self, new_file, path):
+        shutil.copy(new_file, os.path.join(self.repo, path))
         subprocess.call([
-            'git', 'commit', '-m', 'Added {}'.format(os.path.basename(filename))
+            'git', 'commit', path, '-m', 'Modified {}'.format(os.path.basename(path))
         ], env=GIT_ENV, cwd=self.repo)
         return self.latest_commit()
 
@@ -53,8 +69,8 @@ class Git(Repo):
         if url is not None:
             submodule['url'] = url
         self.submodules[subdir] = submodule
-        subprocess.call(['git', 'submodule', 'add', url, subdir], env=GIT_ENV, cwd=self.repo)
-        subprocess.call(['git', 'commit', '-m', 'Added the submodule'], env=GIT_ENV, cwd=self.repo)
+        self._run_git('submodule', 'add', url, subdir)
+        self._run_git('commit', '-m', 'Added the submodule')
         return self.latest_commit()
 
     def source_config(self, ref=None, checkout_submodules=None):
@@ -74,10 +90,8 @@ class Git(Repo):
         return config
 
     def latest_commit(self):
-        output = subprocess.check_output([
-            'git', 'rev-parse', 'master'
-        ], env=GIT_ENV, cwd=self.repo)
+        output = self._run_git('rev-parse', 'master', stdout=subprocess.PIPE).stdout
         return output.decode('UTF-8').strip()
 
     def branch(self, branch_name):
-        subprocess.call(['git', 'checkout', '-b', branch_name], env=GIT_ENV, cwd=self.repo)
+        self._run_git('checkout', '-b', branch_name)
diff --git a/tests/testutils/runcli.py b/tests/testutils/runcli.py
index 96d4ea457b75ec3f18c4255241c3fe627fa493b6..3535e94eaabbb42598b30e77a3494f835e60819c 100644
--- a/tests/testutils/runcli.py
+++ b/tests/testutils/runcli.py
@@ -94,14 +94,28 @@ class Result():
     #    error_reason (any): The reason field of the error which occurred
     #    fail_message (str): An optional message to override the automatic
     #                        assertion error messages
+    #    debug (bool): If true, prints information regarding the exit state of the result()
     # Raises:
     #    (AssertionError): If any of the assertions fail
     #
     def assert_main_error(self,
                           error_domain,
                           error_reason,
-                          fail_message=''):
-
+                          fail_message='',
+                          *, debug=False):
+        if debug:
+            print(
+                """
+                Exit code: {}
+                Exception: {}
+                Domain:    {}
+                Reason:    {}
+                """.format(
+                    self.exit_code,
+                    self.exception,
+                    self.exception.domain,
+                    self.exception.reason
+                ))
         assert self.exit_code == -1, fail_message
         assert self.exc is not None, fail_message
         assert self.exception is not None, fail_message
@@ -164,7 +178,7 @@ class Result():
         return list(pushed)
 
     def get_pulled_elements(self):
-        pulled = re.findall(r'\[\s*pull:(\S+)\s*\]\s*INFO\s*Downloaded artifact', self.stderr)
+        pulled = re.findall(r'\[\s*pull:(\S+)\s*\]\s*INFO\s*Pulled artifact', self.stderr)
         if pulled is None:
             return []
 
@@ -198,8 +212,10 @@ class Cli():
         for key, val in config.items():
             self.config[key] = val
 
-    def remove_artifact_from_cache(self, project, element_name):
-        cache_dir = os.path.join(project, 'cache', 'artifacts')
+    def remove_artifact_from_cache(self, project, element_name,
+                                   *, cache_dir=None):
+        if not cache_dir:
+            cache_dir = os.path.join(project, 'cache', 'artifacts')
 
         cache_dir = os.path.join(cache_dir, 'cas', 'refs', 'heads')
 
diff --git a/tests/yaml/data/implicitoverwrite.yaml b/tests/yaml/data/implicitoverwrite.yaml
index 20e5eb76ada7c05653284083e6c116ff9ec8a4a5..957d2c084003a3af5bc8b93c0d77efd025284edb 100644
--- a/tests/yaml/data/implicitoverwrite.yaml
+++ b/tests/yaml/data/implicitoverwrite.yaml
@@ -1,4 +1,4 @@
-# Composited on top of basics.yaml, overwriting it's children list
+# Composited on top of basics.yaml, overwriting its children list
 children:
 - name: overwrite1
   mood: overwrite1
diff --git a/tests/yaml/data/listappend.yaml b/tests/yaml/data/listappend.yaml
index 889ed80b276b96ef7c14f295d6e86a0f653f052b..169308c54f4980afe781624d7b465eb1cf83637e 100644
--- a/tests/yaml/data/listappend.yaml
+++ b/tests/yaml/data/listappend.yaml
@@ -1,4 +1,4 @@
-# Composited on top of basics.yaml, appending to it's children list
+# Composited on top of basics.yaml, appending to its children list
 children:
   (>):
   - name: appended1
diff --git a/tests/yaml/data/listappendprepend.yaml b/tests/yaml/data/listappendprepend.yaml
index 18471b36c022ea079ef1102a85469e3b856f039d..7e4b59a9cd6d1aa5c1b146d7409975748eb6191d 100644
--- a/tests/yaml/data/listappendprepend.yaml
+++ b/tests/yaml/data/listappendprepend.yaml
@@ -1,4 +1,4 @@
-# Composited on top of basics.yaml, prepending and appending to it's children list
+# Composited on top of basics.yaml, prepending and appending to its children list
 children:
   (<):
   - name: prepended1
diff --git a/tests/yaml/data/listoverwrite.yaml b/tests/yaml/data/listoverwrite.yaml
index 3efdfa7cb242404f5c46b5b7ac10d0a93e0235e8..eb9d7b1d9f6a9bdbb2e9be1ce3ac54b74f3d57d4 100644
--- a/tests/yaml/data/listoverwrite.yaml
+++ b/tests/yaml/data/listoverwrite.yaml
@@ -1,4 +1,4 @@
-# Composited on top of basics.yaml, overwriting it's children list
+# Composited on top of basics.yaml, overwriting its children list
 children:
   (=):
   - name: overwrite1
diff --git a/tests/yaml/data/listprepend.yaml b/tests/yaml/data/listprepend.yaml
index 3934c93beb7068c542ac200ee9e03f01608f5e54..a19a9d5d722a3ed1683d84a17512d648238d1687 100644
--- a/tests/yaml/data/listprepend.yaml
+++ b/tests/yaml/data/listprepend.yaml
@@ -1,4 +1,4 @@
-# Composited on top of basics.yaml, prepending to it's children list
+# Composited on top of basics.yaml, prepending to its children list
 children:
   (<):
   - name: prepended1
diff --git a/tests/yaml/data/secondappend.yaml b/tests/yaml/data/secondappend.yaml
index 376c4a707e69ffbe213ebf0122e4e4efd9ceb363..20cfc1ad6de993de9e2039adf4a7e2edecbc7006 100644
--- a/tests/yaml/data/secondappend.yaml
+++ b/tests/yaml/data/secondappend.yaml
@@ -1,4 +1,4 @@
-# Composited on top of listappend.yaml, appending to it's children list
+# Composited on top of listappend.yaml, appending to its children list
 children:
   (>):
   - name: secondappend1
diff --git a/tests/yaml/data/secondprepend.yaml b/tests/yaml/data/secondprepend.yaml
index 58276c3814b0191442d55ae28967c966a62d49af..5be8b9e72da63eb2d05c5e997501a8528f56223c 100644
--- a/tests/yaml/data/secondprepend.yaml
+++ b/tests/yaml/data/secondprepend.yaml
@@ -1,4 +1,4 @@
-# Composited on top of listprepend.yaml, prepending to it's children list
+# Composited on top of listprepend.yaml, prepending to its children list
 children:
   (<):
   - name: secondprepend1
diff --git a/tests/yaml/yaml.py b/tests/yaml/yaml.py
index 78176371730f23388764325401cfba11f7888ad7..26515caee06d4ca003367d53c199d669bb8f7d76 100644
--- a/tests/yaml/yaml.py
+++ b/tests/yaml/yaml.py
@@ -1,9 +1,12 @@
 import os
 import pytest
-from collections import Mapping
+import tempfile
+from collections.abc import Mapping
 
 from buildstream import _yaml
 from buildstream._exceptions import LoadError, LoadErrorReason
+from buildstream._context import Context
+from buildstream._yamlcache import YamlCache
 
 DATA_DIR = os.path.join(
     os.path.dirname(os.path.realpath(__file__)),
@@ -150,6 +153,21 @@ def test_composite_preserve_originals(datafiles):
     assert(_yaml.node_get(orig_extra, str, 'old') == 'new')
 
 
+def load_yaml_file(filename, *, cache_path, shortname=None, from_cache='raw'):
+
+    _, temppath = tempfile.mkstemp(dir=os.path.join(cache_path.dirname, cache_path.basename), text=True)
+    context = Context()
+
+    with YamlCache.open(context, temppath) as yc:
+        if from_cache == 'raw':
+            return _yaml.load(filename, shortname)
+        elif from_cache == 'cached':
+            _yaml.load(filename, shortname, yaml_cache=yc)
+            return _yaml.load(filename, shortname, yaml_cache=yc)
+        else:
+            assert False
+
+
 # Tests for list composition
 #
 # Each test composits a filename on top of basics.yaml, and tests
@@ -165,6 +183,7 @@ def test_composite_preserve_originals(datafiles):
 #    prov_col: The expected provenance column of "mood"
 #
 @pytest.mark.datafiles(os.path.join(DATA_DIR))
+@pytest.mark.parametrize('caching', [('raw'), ('cached')])
 @pytest.mark.parametrize("filename,index,length,mood,prov_file,prov_line,prov_col", [
 
     # Test results of compositing with the (<) prepend directive
@@ -195,14 +214,15 @@ def test_composite_preserve_originals(datafiles):
     ('implicitoverwrite.yaml', 0, 2, 'overwrite1', 'implicitoverwrite.yaml', 4, 8),
     ('implicitoverwrite.yaml', 1, 2, 'overwrite2', 'implicitoverwrite.yaml', 6, 8),
 ])
-def test_list_composition(datafiles, filename,
+def test_list_composition(datafiles, filename, tmpdir,
                           index, length, mood,
-                          prov_file, prov_line, prov_col):
-    base = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml')
-    overlay = os.path.join(datafiles.dirname, datafiles.basename, filename)
+                          prov_file, prov_line, prov_col, caching):
+    base_file = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml')
+    overlay_file = os.path.join(datafiles.dirname, datafiles.basename, filename)
+
+    base = load_yaml_file(base_file, cache_path=tmpdir, shortname='basics.yaml', from_cache=caching)
+    overlay = load_yaml_file(overlay_file, cache_path=tmpdir, shortname=filename, from_cache=caching)
 
-    base = _yaml.load(base, shortname='basics.yaml')
-    overlay = _yaml.load(overlay, shortname=filename)
     _yaml.composite_dict(base, overlay)
 
     children = _yaml.node_get(base, list, 'children')
@@ -254,6 +274,7 @@ def test_list_deletion(datafiles):
 #    prov_col: The expected provenance column of "mood"
 #
 @pytest.mark.datafiles(os.path.join(DATA_DIR))
+@pytest.mark.parametrize('caching', [('raw'), ('cached')])
 @pytest.mark.parametrize("filename1,filename2,index,length,mood,prov_file,prov_line,prov_col", [
 
     # Test results of compositing literal list with (>) and then (<)
@@ -310,9 +331,9 @@ def test_list_deletion(datafiles):
     ('listoverwrite.yaml', 'listprepend.yaml', 2, 4, 'overwrite1', 'listoverwrite.yaml', 5, 10),
     ('listoverwrite.yaml', 'listprepend.yaml', 3, 4, 'overwrite2', 'listoverwrite.yaml', 7, 10),
 ])
-def test_list_composition_twice(datafiles, filename1, filename2,
+def test_list_composition_twice(datafiles, tmpdir, filename1, filename2,
                                 index, length, mood,
-                                prov_file, prov_line, prov_col):
+                                prov_file, prov_line, prov_col, caching):
     file_base = os.path.join(datafiles.dirname, datafiles.basename, 'basics.yaml')
     file1 = os.path.join(datafiles.dirname, datafiles.basename, filename1)
     file2 = os.path.join(datafiles.dirname, datafiles.basename, filename2)
@@ -320,9 +341,9 @@ def test_list_composition_twice(datafiles, filename1, filename2,
     #####################
     # Round 1 - Fight !
     #####################
-    base = _yaml.load(file_base, shortname='basics.yaml')
-    overlay1 = _yaml.load(file1, shortname=filename1)
-    overlay2 = _yaml.load(file2, shortname=filename2)
+    base = load_yaml_file(file_base, cache_path=tmpdir, shortname='basics.yaml', from_cache=caching)
+    overlay1 = load_yaml_file(file1, cache_path=tmpdir, shortname=filename1, from_cache=caching)
+    overlay2 = load_yaml_file(file2, cache_path=tmpdir, shortname=filename2, from_cache=caching)
 
     _yaml.composite_dict(base, overlay1)
     _yaml.composite_dict(base, overlay2)
@@ -337,9 +358,9 @@ def test_list_composition_twice(datafiles, filename1, filename2,
     #####################
     # Round 2 - Fight !
     #####################
-    base = _yaml.load(file_base, shortname='basics.yaml')
-    overlay1 = _yaml.load(file1, shortname=filename1)
-    overlay2 = _yaml.load(file2, shortname=filename2)
+    base = load_yaml_file(file_base, cache_path=tmpdir, shortname='basics.yaml', from_cache=caching)
+    overlay1 = load_yaml_file(file1, cache_path=tmpdir, shortname=filename1, from_cache=caching)
+    overlay2 = load_yaml_file(file2, cache_path=tmpdir, shortname=filename2, from_cache=caching)
 
     _yaml.composite_dict(overlay1, overlay2)
     _yaml.composite_dict(base, overlay1)
diff --git a/versioneer.py b/versioneer.py
index 48e9b2e25141ff93c8072553f14323597e5c42bb..1c97e022716b5ebd09316722f726c4115057c4af 100644
--- a/versioneer.py
+++ b/versioneer.py
@@ -1494,7 +1494,7 @@ def get_cmdclass():
         # of Versioneer. A's setup.py imports A's Versioneer, leaving it in
         # sys.modules by the time B's setup.py is executed, causing B to run
         # with the wrong versioneer. Setuptools wraps the sub-dep builds in a
-        # sandbox that restores sys.modules to it's pre-build state, so the
+        # sandbox that restores sys.modules to its pre-build state, so the
         # parent is protected against the child's "import versioneer". By
         # removing ourselves from sys.modules here, before the child build
         # happens, we protect the child from the parent's versioneer too.