_context.py 26.2 KB
Newer Older
1
#
2
#  Copyright (C) 2016-2018 Codethink Limited
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
#
#  This program is free software; you can redistribute it and/or
#  modify it under the terms of the GNU Lesser General Public
#  License as published by the Free Software Foundation; either
#  version 2 of the License, or (at your option) any later version.
#
#  This library is distributed in the hope that it will be useful,
#  but WITHOUT ANY WARRANTY; without even the implied warranty of
#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
#  Lesser General Public License for more details.
#
#  You should have received a copy of the GNU Lesser General Public
#  License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
#  Authors:
#        Tristan Van Berkom <tristan.vanberkom@codethink.co.uk>

20
import os
21
import shutil
22
import datetime
23 24
from collections import deque
from collections.abc import Mapping
25
from contextlib import contextmanager
26
from . import utils
27
from . import _cachekey
28
from . import _signals
29
from . import _site
30
from . import _yaml
31 32
from ._exceptions import LoadError, LoadErrorReason, BstError
from ._message import Message, MessageType
33
from ._profile import Topics, PROFILER
34
from ._artifactcache import ArtifactCache
35
from ._sourcecache import SourceCache
36
from ._cas import CASCache, CASQuota, CASCacheUsage
37
from ._workspaces import Workspaces, WorkspaceProjectCache
38
from .plugin import Plugin
39
from .sandbox import SandboxRemote
40

41

42 43 44 45 46 47 48 49 50 51 52
# Context()
#
# The Context object holds all of the user preferences
# and context for a given invocation of BuildStream.
#
# This is a collection of data from configuration files and command
# line arguments and consists of information such as where to store
# logs and artifacts, where to perform builds and cache downloaded sources,
# verbosity levels and basically anything pertaining to the context
# in which BuildStream was invoked.
#
53 54 55
# Args:
#    directory (str): The directory that buildstream was invoked in
#
56
class Context():
57

58
    def __init__(self, directory=None):
59

60
        # Filename indicating which configuration file was used, or None for the defaults
61 62
        self.config_origin = None

63 64 65
        # The directory under which other directories are based
        self.cachedir = None

66
        # The directory where various sources are stored
67 68
        self.sourcedir = None

69 70 71
        # specs for source cache remotes
        self.source_cache_specs = None

72
        # The directory where build sandboxes will be created
73 74
        self.builddir = None

75 76 77 78 79 80
        # The directory for CAS
        self.casdir = None

        # The directory for temporary files
        self.tmpdir = None

Will Salmon's avatar
Will Salmon committed
81 82 83
        # Default root location for workspaces
        self.workspacedir = None

84
        # The locations from which to push and pull prebuilt artifacts
85
        self.artifact_cache_specs = None
86

87 88 89
        # The global remote execution configuration
        self.remote_execution_specs = None

90
        # The directory to store build logs
91 92
        self.logdir = None

93
        # The abbreviated cache key length to display in the UI
94
        self.log_key_length = None
95

96
        # Whether debug mode is enabled
97
        self.log_debug = None
98

99
        # Whether verbose mode is enabled
100
        self.log_verbose = None
101

102
        # Maximum number of lines to print from build logs
103
        self.log_error_lines = None
104

105
        # Maximum number of lines to print in the master log for a detailed message
106
        self.log_message_lines = None
107

108
        # Format string for printing the pipeline at startup time
109 110
        self.log_element_format = None

111 112 113
        # Format string for printing message lines in the master log
        self.log_message_format = None

114
        # Maximum number of fetch or refresh tasks
115
        self.sched_fetchers = None
116

117
        # Maximum number of build tasks
118
        self.sched_builders = None
119

120
        # Maximum number of push tasks
121
        self.sched_pushers = None
122

123
        # Maximum number of retries for network tasks
124
        self.sched_network_retries = None
125

126
        # What to do when a build fails in non interactive mode
127 128 129 130
        self.sched_error_action = None

        # Size of the artifact cache in bytes
        self.config_cache_quota = None
131

132 133 134
        # User specified cache quota, used for display messages
        self.config_cache_quota_string = None

135 136 137
        # Whether or not to attempt to pull build trees globally
        self.pull_buildtrees = None

138 139 140
        # Whether or not to cache build trees on artifact creation
        self.cache_buildtrees = None

141 142 143
        # Whether directory trees are required for all artifacts in the local cache
        self.require_artifact_directories = True

144 145 146
        # Whether file contents are required for all artifacts in the local cache
        self.require_artifact_files = True

147 148 149
        # Whether elements must be rebuilt when their dependencies have changed
        self._strict_build_plan = None

150 151 152
        # Make sure the XDG vars are set in the environment before loading anything
        self._init_xdg()

153 154
        # Private variables
        self._cache_key = None
155
        self._message_handler = None
156
        self._message_depth = deque()
157
        self._artifactcache = None
158
        self._sourcecache = None
159
        self._projects = []
160
        self._project_overrides = _yaml.new_empty_node()
161
        self._workspaces = None
162
        self._workspace_project_cache = WorkspaceProjectCache()
163 164
        self._log_handle = None
        self._log_filename = None
165
        self._cascache = None
166
        self._casquota = None
167
        self._directory = directory
168

169 170 171 172 173 174 175
    # load()
    #
    # Loads the configuration files
    #
    # Args:
    #    config (filename): The user specified configuration file, if any
    #
176

177 178 179 180 181 182 183
    # Raises:
    #   LoadError
    #
    # This will first load the BuildStream default configuration and then
    # override that configuration with the configuration file indicated
    # by *config*, if any was specified.
    #
184
    @PROFILER.profile(Topics.LOAD_CONTEXT, "load")
185
    def load(self, config=None):
186 187 188 189
        # If a specific config file is not specified, default to trying
        # a $XDG_CONFIG_HOME/buildstream.conf file
        #
        if not config:
190 191
            default_config = os.path.join(os.environ['XDG_CONFIG_HOME'],
                                          'buildstream.conf')
192 193 194
            if os.path.exists(default_config):
                config = default_config

195
        # Load default config
196
        #
197
        defaults = _yaml.load(_site.default_user_config)
198

199
        if config:
200
            self.config_origin = os.path.abspath(config)
201
            user_config = _yaml.load(config)
202
            _yaml.composite(defaults, user_config)
203

204
        # Give obsoletion warnings
205
        if 'builddir' in defaults:
206 207 208
            raise LoadError(LoadErrorReason.INVALID_DATA,
                            "builddir is obsolete, use cachedir")

209
        if 'artifactdir' in defaults:
210 211
            raise LoadError(LoadErrorReason.INVALID_DATA,
                            "artifactdir is obsolete")
212

213
        _yaml.node_validate(defaults, [
214
            'cachedir', 'sourcedir', 'builddir', 'logdir', 'scheduler',
215
            'artifacts', 'source-caches', 'logging', 'projects', 'cache', 'prompt',
216
            'workspacedir', 'remote-execution',
217 218
        ])

219
        for directory in ['cachedir', 'sourcedir', 'logdir', 'workspacedir']:
220 221 222
            # Allow the ~ tilde expansion and any environment variables in
            # path specification in the config files.
            #
223
            path = _yaml.node_get(defaults, str, directory)
224 225
            path = os.path.expanduser(path)
            path = os.path.expandvars(path)
226
            path = os.path.normpath(path)
227
            setattr(self, directory, path)
228

229 230 231 232 233
        # add directories not set by users
        self.tmpdir = os.path.join(self.cachedir, 'tmp')
        self.casdir = os.path.join(self.cachedir, 'cas')
        self.builddir = os.path.join(self.cachedir, 'build')

234 235 236 237 238 239 240
        # Move old artifact cas to cas if it exists and create symlink
        old_casdir = os.path.join(self.cachedir, 'artifacts', 'cas')
        if (os.path.exists(old_casdir) and not os.path.islink(old_casdir) and
                not os.path.exists(self.casdir)):
            os.rename(old_casdir, self.casdir)
            os.symlink(self.casdir, old_casdir)

241 242 243 244 245 246 247
        # Cleanup old extract directories
        old_extractdirs = [os.path.join(self.cachedir, 'artifacts', 'extract'),
                           os.path.join(self.cachedir, 'extract')]
        for old_extractdir in old_extractdirs:
            if os.path.isdir(old_extractdir):
                shutil.rmtree(old_extractdir, ignore_errors=True)

248
        # Load quota configuration
249 250
        # We need to find the first existing directory in the path of our
        # cachedir - the cachedir may not have been created yet.
251
        cache = _yaml.node_get(defaults, Mapping, 'cache')
252
        _yaml.node_validate(cache, ['quota', 'pull-buildtrees', 'cache-buildtrees'])
253

254 255 256 257 258 259 260 261 262
        self.config_cache_quota_string = _yaml.node_get(cache, str, 'quota')
        try:
            self.config_cache_quota = utils._parse_size(self.config_cache_quota_string,
                                                        self.casdir)
        except utils.UtilError as e:
            raise LoadError(LoadErrorReason.INVALID_DATA,
                            "{}\nPlease specify the value in bytes or as a % of full disk space.\n"
                            "\nValid values are, for example: 800M 10G 1T 50%\n"
                            .format(str(e))) from e
263

264
        # Load artifact share configuration
265
        self.artifact_cache_specs = ArtifactCache.specs_from_config_node(defaults)
266

267 268 269
        # Load source cache config
        self.source_cache_specs = SourceCache.specs_from_config_node(defaults)

270 271
        self.remote_execution_specs = SandboxRemote.specs_from_config_node(defaults)

272 273 274
        # Load pull build trees configuration
        self.pull_buildtrees = _yaml.node_get(cache, bool, 'pull-buildtrees')

275 276
        # Load cache build trees configuration
        self.cache_buildtrees = _node_get_option_str(
277
            cache, 'cache-buildtrees', ['always', 'auto', 'never'])
278

279
        # Load logging config
280
        logging = _yaml.node_get(defaults, Mapping, 'logging')
281
        _yaml.node_validate(logging, [
282 283
            'key-length', 'verbose',
            'error-lines', 'message-lines',
284
            'debug', 'element-format', 'message-format'
285
        ])
286
        self.log_key_length = _yaml.node_get(logging, int, 'key-length')
287 288 289
        self.log_debug = _yaml.node_get(logging, bool, 'debug')
        self.log_verbose = _yaml.node_get(logging, bool, 'verbose')
        self.log_error_lines = _yaml.node_get(logging, int, 'error-lines')
290
        self.log_message_lines = _yaml.node_get(logging, int, 'message-lines')
291
        self.log_element_format = _yaml.node_get(logging, str, 'element-format')
292
        self.log_message_format = _yaml.node_get(logging, str, 'message-format')
293

294
        # Load scheduler config
295
        scheduler = _yaml.node_get(defaults, Mapping, 'scheduler')
296
        _yaml.node_validate(scheduler, [
297 298 299
            'on-error', 'fetchers', 'builders',
            'pushers', 'network-retries'
        ])
300 301
        self.sched_error_action = _node_get_option_str(
            scheduler, 'on-error', ['continue', 'quit', 'terminate'])
302 303
        self.sched_fetchers = _yaml.node_get(scheduler, int, 'fetchers')
        self.sched_builders = _yaml.node_get(scheduler, int, 'builders')
304
        self.sched_pushers = _yaml.node_get(scheduler, int, 'pushers')
305
        self.sched_network_retries = _yaml.node_get(scheduler, int, 'network-retries')
306

307
        # Load per-projects overrides
308
        self._project_overrides = _yaml.node_get(defaults, dict, 'projects', default_value={})
309 310 311

        # Shallow validation of overrides, parts of buildstream which rely
        # on the overrides are expected to validate elsewhere.
312
        for _, overrides in _yaml.node_items(self._project_overrides):
313 314 315 316
            _yaml.node_validate(overrides,
                                ['artifacts', 'source-caches', 'options',
                                 'strict', 'default-mirror',
                                 'remote-execution'])
317

318 319 320
    @property
    def artifactcache(self):
        if not self._artifactcache:
321
            self._artifactcache = ArtifactCache(self)
322 323 324

        return self._artifactcache

325
    # get_cache_usage()
326 327 328 329
    #
    # Fetches the current usage of the artifact cache
    #
    # Returns:
330
    #     (CASCacheUsage): The current status
331
    #
332 333
    def get_cache_usage(self):
        return CASCacheUsage(self.get_casquota())
334

335 336 337 338 339 340 341
    @property
    def sourcecache(self):
        if not self._sourcecache:
            self._sourcecache = SourceCache(self)

        return self._sourcecache

342
    # add_project():
343 344 345 346 347 348
    #
    # Add a project to the context.
    #
    # Args:
    #    project (Project): The project to add
    #
349
    def add_project(self, project):
350
        if not self._projects:
351
            self._workspaces = Workspaces(project, self._workspace_project_cache)
352 353
        self._projects.append(project)

354
    # get_projects():
355 356 357 358 359 360
    #
    # Return the list of projects in the context.
    #
    # Returns:
    #    (list): The list of projects
    #
361
    def get_projects(self):
362 363
        return self._projects

364
    # get_toplevel_project():
365 366 367 368 369
    #
    # Return the toplevel project, the one which BuildStream was
    # invoked with as opposed to a junctioned subproject.
    #
    # Returns:
370
    #    (Project): The Project object
371
    #
372
    def get_toplevel_project(self):
373 374
        return self._projects[0]

375 376 377 378 379 380 381
    # get_workspaces():
    #
    # Return a Workspaces object containing a list of workspaces.
    #
    # Returns:
    #    (Workspaces): The Workspaces object
    #
382 383 384
    def get_workspaces(self):
        return self._workspaces

385 386 387 388 389 390 391 392 393 394
    # get_workspace_project_cache():
    #
    # Return the WorkspaceProjectCache object used for this BuildStream invocation
    #
    # Returns:
    #    (WorkspaceProjectCache): The WorkspaceProjectCache object
    #
    def get_workspace_project_cache(self):
        return self._workspace_project_cache

395
    # get_overrides():
396
    #
397 398 399
    # Fetch the override dictionary for the active project. This returns
    # a node loaded from YAML and as such, values loaded from the returned
    # node should be loaded using the _yaml.node_get() family of functions.
400
    #
401
    # Args:
402
    #    project_name (str): The project name
403 404 405 406
    #
    # Returns:
    #    (Mapping): The overrides dictionary for the specified project
    #
407
    def get_overrides(self, project_name):
408
        return _yaml.node_get(self._project_overrides, Mapping, project_name, default_value={})
409

410
    # get_strict():
411 412 413 414 415 416
    #
    # Fetch whether we are strict or not
    #
    # Returns:
    #    (bool): Whether or not to use strict build plan
    #
417
    def get_strict(self):
418 419 420 421 422 423
        if self._strict_build_plan is None:
            # Either we're not overridden or we've never worked it out before
            # so work out if we should be strict, and then cache the result
            toplevel = self.get_toplevel_project()
            overrides = self.get_overrides(toplevel.name)
            self._strict_build_plan = _yaml.node_get(overrides, bool, 'strict', default_value=True)
424 425

        # If it was set by the CLI, it overrides any config
426 427 428
        # Ditto if we've already computed this, then we return the computed
        # value which we cache here too.
        return self._strict_build_plan
429

430
    # get_cache_key():
431 432 433 434 435 436
    #
    # Returns the cache key, calculating it if necessary
    #
    # Returns:
    #    (str): A hex digest cache key for the Context
    #
437
    def get_cache_key(self):
438 439 440
        if self._cache_key is None:

            # Anything that alters the build goes into the unique key
441
            self._cache_key = _cachekey.generate_key(_yaml.new_empty_node())
442

443
        return self._cache_key
444

445
    # set_message_handler()
446 447 448 449 450
    #
    # Sets the handler for any status messages propagated through
    # the context.
    #
    # The message handler should have the same signature as
451 452
    # the message() method
    def set_message_handler(self, handler):
453 454
        self._message_handler = handler

455
    # silent_messages():
456
    #
457 458
    # Returns:
    #    (bool): Whether messages are currently being silenced
459
    #
460
    def silent_messages(self):
461 462 463 464
        for silent in self._message_depth:
            if silent:
                return True
        return False
465

466
    # message():
467
    #
468 469
    # Proxies a message back to the caller, this is the central
    # point through which all messages pass.
470 471
    #
    # Args:
472
    #    message: A Message object
473
    #
474
    def message(self, message):
475 476 477

        # Tag message only once
        if message.depth is None:
478
            message.depth = len(list(self._message_depth))
479

480 481 482
        # If we are recording messages, dump a copy into the open log file.
        self._record_message(message)

483
        # Send it off to the log handler (can be the frontend,
484
        # or it can be the child task which will propagate
485
        # to the frontend)
486
        assert self._message_handler
487 488

        self._message_handler(message, context=self)
489

490
    # silence()
491 492 493 494 495 496 497
    #
    # A context manager to silence messages, this behaves in
    # the same way as the `silent_nested` argument of the
    # Context._timed_activity() context manager: especially
    # important messages will not be silenced.
    #
    @contextmanager
498
    def silence(self):
499
        self._push_message_depth(True)
500 501 502 503
        try:
            yield
        finally:
            self._pop_message_depth()
504

505
    # timed_activity()
506 507 508 509 510 511 512 513 514 515
    #
    # Context manager for performing timed activities and logging those
    #
    # Args:
    #    context (Context): The invocation context object
    #    activity_name (str): The name of the activity
    #    detail (str): An optional detailed message, can be multiline output
    #    silent_nested (bool): If specified, nested messages will be silenced
    #
    @contextmanager
516
    def timed_activity(self, activity_name, *, unique_id=None, detail=None, silent_nested=False):
517

518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
        starttime = datetime.datetime.now()
        stopped_time = None

        def stop_time():
            nonlocal stopped_time
            stopped_time = datetime.datetime.now()

        def resume_time():
            nonlocal stopped_time
            nonlocal starttime
            sleep_time = datetime.datetime.now() - stopped_time
            starttime += sleep_time

        with _signals.suspendable(stop_time, resume_time):
            try:
                # Push activity depth for status messages
534
                message = Message(unique_id, MessageType.START, activity_name, detail=detail)
535
                self.message(message)
536 537 538
                self._push_message_depth(silent_nested)
                yield

539
            except BstError:
540 541 542
                # Note the failure in status messages and reraise, the scheduler
                # expects an error when there is an error.
                elapsed = datetime.datetime.now() - starttime
543
                message = Message(unique_id, MessageType.FAIL, activity_name, elapsed=elapsed)
544
                self._pop_message_depth()
545
                self.message(message)
546 547 548
                raise

            elapsed = datetime.datetime.now() - starttime
549
            message = Message(unique_id, MessageType.SUCCESS, activity_name, elapsed=elapsed)
550
            self._pop_message_depth()
551 552
            self.message(message)

553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
    # recorded_messages()
    #
    # Records all messages in a log file while the context manager
    # is active.
    #
    # In addition to automatically writing all messages to the
    # specified logging file, an open file handle for process stdout
    # and stderr will be available via the Context.get_log_handle() API,
    # and the full logfile path will be available via the
    # Context.get_log_filename() API.
    #
    # Args:
    #     filename (str): A logging directory relative filename,
    #                     the pid and .log extension will be automatically
    #                     appended
    #
    # Yields:
    #     (str): The fully qualified log filename
    #
    @contextmanager
    def recorded_messages(self, filename):

        # We dont allow recursing in this context manager, and
        # we also do not allow it in the main process.
        assert self._log_handle is None
        assert self._log_filename is None
        assert not utils._is_main_process()

        # Create the fully qualified logfile in the log directory,
        # appending the pid and .log extension at the end.
        self._log_filename = os.path.join(self.logdir,
                                          '{}.{}.log'.format(filename, os.getpid()))

        # Ensure the directory exists first
        directory = os.path.dirname(self._log_filename)
        os.makedirs(directory, exist_ok=True)

        with open(self._log_filename, 'a') as logfile:

            # Write one last line to the log and flush it to disk
            def flush_log():

                # If the process currently had something happening in the I/O stack
                # then trying to reenter the I/O stack will fire a runtime error.
                #
                # So just try to flush as well as we can at SIGTERM time
                try:
                    logfile.write('\n\nForcefully terminated\n')
                    logfile.flush()
                except RuntimeError:
                    os.fsync(logfile.fileno())

            self._log_handle = logfile
            with _signals.terminator(flush_log):
                yield self._log_filename

            self._log_handle = None
            self._log_filename = None

    # get_log_handle()
    #
    # Fetches the active log handle, this will return the active
    # log file handle when the Context.recorded_messages() context
    # manager is active
    #
    # Returns:
    #     (file): The active logging file handle, or None
    #
    def get_log_handle(self):
        return self._log_handle

    # get_log_filename()
    #
    # Fetches the active log filename, this will return the active
    # log filename when the Context.recorded_messages() context
    # manager is active
    #
    # Returns:
    #     (str): The active logging filename, or None
    #
    def get_log_filename(self):
        return self._log_filename

636 637 638 639 640 641 642 643 644 645
    # set_artifact_directories_optional()
    #
    # This indicates that the current context (command or configuration)
    # does not require directory trees of all artifacts to be available in the
    # local cache.
    #
    def set_artifact_directories_optional(self):
        self.require_artifact_directories = False
        self.require_artifact_files = False

646 647 648 649 650 651 652 653 654
    # set_artifact_files_optional()
    #
    # This indicates that the current context (command or configuration)
    # does not require file contents of all artifacts to be available in the
    # local cache.
    #
    def set_artifact_files_optional(self):
        self.require_artifact_files = False

655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
    # _record_message()
    #
    # Records the message if recording is enabled
    #
    # Args:
    #    message (Message): The message to record
    #
    def _record_message(self, message):

        if self._log_handle is None:
            return

        INDENT = "    "
        EMPTYTIME = "--:--:--"
        template = "[{timecode: <8}] {type: <7}"

        # If this message is associated with a plugin, print what
        # we know about the plugin.
        plugin_name = ""
        if message.unique_id:
            template += " {plugin}"
676
            plugin = Plugin._lookup(message.unique_id)
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702
            plugin_name = plugin.name

        template += ": {message}"

        detail = ''
        if message.detail is not None:
            template += "\n\n{detail}"
            detail = message.detail.rstrip('\n')
            detail = INDENT + INDENT.join(detail.splitlines(True))

        timecode = EMPTYTIME
        if message.message_type in (MessageType.SUCCESS, MessageType.FAIL):
            hours, remainder = divmod(int(message.elapsed.total_seconds()), 60**2)
            minutes, seconds = divmod(remainder, 60)
            timecode = "{0:02d}:{1:02d}:{2:02d}".format(hours, minutes, seconds)

        text = template.format(timecode=timecode,
                               plugin=plugin_name,
                               type=message.message_type.upper(),
                               message=message.message,
                               detail=detail)

        # Write to the open log file
        self._log_handle.write('{}\n'.format(text))
        self._log_handle.flush()

703 704 705 706 707 708 709 710 711 712 713
    # _push_message_depth() / _pop_message_depth()
    #
    # For status messages, send the depth of timed
    # activities inside a given task through the message
    #
    def _push_message_depth(self, silent_nested):
        self._message_depth.appendleft(silent_nested)

    def _pop_message_depth(self):
        assert self._message_depth
        self._message_depth.popleft()
714

715 716 717 718 719 720 721 722 723 724 725
    # Force the resolved XDG variables into the environment,
    # this is so that they can be used directly to specify
    # preferred locations of things from user configuration
    # files.
    def _init_xdg(self):
        if not os.environ.get('XDG_CACHE_HOME'):
            os.environ['XDG_CACHE_HOME'] = os.path.expanduser('~/.cache')
        if not os.environ.get('XDG_CONFIG_HOME'):
            os.environ['XDG_CONFIG_HOME'] = os.path.expanduser('~/.config')
        if not os.environ.get('XDG_DATA_HOME'):
            os.environ['XDG_DATA_HOME'] = os.path.expanduser('~/.local/share')
726

727 728
    def get_cascache(self):
        if self._cascache is None:
729
            self._cascache = CASCache(self.cachedir)
730 731
        return self._cascache

732 733 734 735 736
    def get_casquota(self):
        if self._casquota is None:
            self._casquota = CASQuota(self)
        return self._casquota

737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762

# _node_get_option_str()
#
# Like _yaml.node_get(), but also checks value is one of the allowed option
# strings. Fetches a value from a dictionary node, and makes sure it's one of
# the pre-defined options.
#
# Args:
#    node (dict): The dictionary node
#    key (str): The key to get a value for in node
#    allowed_options (iterable): Only accept these values
#
# Returns:
#    The value, if found in 'node'.
#
# Raises:
#    LoadError, when the value is not of the expected type, or is not found.
#
def _node_get_option_str(node, key, allowed_options):
    result = _yaml.node_get(node, str, key)
    if result not in allowed_options:
        provenance = _yaml.node_get_provenance(node, key)
        raise LoadError(LoadErrorReason.INVALID_DATA,
                        "{}: {} should be one of: {}".format(
                            provenance, key, ", ".join(allowed_options)))
    return result