...
 
Commits (56)
......@@ -17,33 +17,52 @@ variables:
JOB_NAME_REF: $CI_JOB_NAME
stages:
- checks
- clean
- test-benchmarks
- test-debian-like-project
- test-functional-test-simple
- test-functional-test-combined-config
- test-bstgen-100-square
- deploy
# Run pylint
pylint:
stage: checks
# We only want to run pylint on shared runners
tags:
- shared
image: python:3.7-stretch
before_script:
- "pip install -r requirements.txt"
- "pip install pylint"
script:
- "find . -path ./contrib -prune -o -name '*.py' -exec pylint --rcfile=.pylintrc {} +"
when: always
# Remove previous pipeline_caching
job:
stage: clean
tags: [ 'cleanup' ]
script:
- "rm -rf pipeline_cache/*"
- ./verify_run_token.sh
artifacts:
paths: [ 'pipeline_cache' ]
paths: [ 'results_cache', 'pipeline_cache' ]
benchmarks:
stage: test-benchmarks
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
- ./run_walk_ci.sh "-c bst_benchmarks/default.benchmark"
- else
- ./ci_run.sh
- ./ci_run.sh "-c bst_benchmarks/default.benchmark"
- fi
dependencies:
- job
......@@ -59,6 +78,8 @@ test-debian-like-project:
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
......@@ -80,12 +101,14 @@ functional-test-simple:
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
- ./run_walk_ci.sh "-c configs/simple.benchmark"
- else
- ./ci_run.sh
- ./ci_run.sh "-c configs/simple.benchmark"
- fi
artifacts:
paths: [ 'results_out/', 'results_cache/', 'pipeline_cache' ]
......@@ -95,12 +118,31 @@ functional-test-combined-config:
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
- ./run_walk_ci.sh "-c configs/simple.benchmark -c configs/addon-version.benchmark"
- else
- ./ci_run.sh
- ./ci_run.sh "-c configs/simple.benchmark -c configs/addon-version.benchmark"
- fi
artifacts:
paths: [ 'results_out/', 'results_cache/', 'pipeline_cache' ]
test-bstgen-100-square:
stage: test-bstgen-100-square
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
- ./run_walk_ci.sh "-c bst_benchmarks/100_square_bstgen.benchmark"
- else
- ./ci_run.sh "-c bst_benchmarks/100_square_bstgen.benchmark"
- fi
artifacts:
paths: [ 'results_out/', 'results_cache/', 'pipeline_cache' ]
......@@ -110,6 +152,8 @@ trigger_build:
tags: [ 'publish' ]
script:
- if ! [ -z "${PUBLISH_RESULT}" ]; then
- "cp pipeline_cache/*.json results_cache/"
- "cp pipeline_cache/run_token.yml results_cache/"
- "curl --request POST --form token=${IO_TOKEN} --form ref=master --form variables[JOB_REF]=$CI_JOB_URL https://gitlab.com/api/v4/projects/8992068/trigger/pipeline"
- fi
only:
......
This diff is collapsed.
#!/usr/bin/env python3
#
# Copyright (C) 2019 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
import tempfile
import argparse
import os
import logging
from shutil import copyfile
import sys
import git
import bst_benchmarks.main
import generate_benchmark_config
import digest_results
# This command line executable acts as a method to generate a locally run
# benchmark CI run. A number of elements are passed in to check that
# configuration settings are tenable - path to repository, buildstream branch,
# SHAs to be tested (defaults to HEAD of branch if not specified), specific tests
# to be carried out (defaults to default.benchmark if not set), output file
# (defaults to stdout if not set), whether master should be tested as a comparable
# (default is yes)
#
# output_file - the path where the bespoke benchmark file is to be placed
# repo_path - path to the local buildstream repo.
# bs_branch - the branch of buildstream being considered.
# shas - shas to be tested.
def main():
# Define all the defaults explicitly
repo_path = 'https://gitlab.com/BuildStream/buildstream.git'
bs_branch = 'master'
shas_to_be_tested = []
docker_tag = "30-latest"
docker_image = 'registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora'
tests_to_run = []
debug = False
keep_results = False
keep_path = "results.json"
def make_help(message):
return message + " (Default: %(default)s)"
# Resolve commandline arguments
parser = argparse.ArgumentParser(description="Automatically set up test environments for a set "
"of buildstream benchmarks and run them locally using docker.")
parser.add_argument("tests_to_run",
help=make_help("The benchmark tests to run"),
nargs="*",
default=["bst_benchmarks/default.benchmark"])
parser.add_argument("-o", "--output_file",
help=make_help("The file to write benchmark results to"),
type=argparse.FileType("w"),
default="-")
parser.add_argument("-r", "--repo_path",
help=make_help("The repository from which to clone branches to compare "
"against the defaults set in the test. Note that this can"
" be a local path using file://"),
default=repo_path,
type=str)
parser.add_argument("-b", "--bs_branch",
help=make_help("The branch to clone from the set repository"),
default=bs_branch,
type=str)
parser.add_argument("-s", "--shas_to_be_tested",
help=make_help("SHAs to clone from the set repository"),
action='append')
parser.add_argument("-d", "--docker_tag",
help=make_help("The tag to use for the buildstream image"),
default=docker_tag,
type=str)
parser.add_argument("-p", "--docker_image",
help=make_help("The docker image to use"),
default=docker_image,
type=str)
parser.add_argument("-g", "--debug",
help=make_help("Show debug messages"),
default=debug,
action='store_true')
parser.add_argument("-k", "--keep_results",
help=make_help("Retain raw benchmarking results"),
default=keep_results,
action='store_true')
parser.add_argument('--results-file',
help=make_help("The file to store benchmarking results in; Implies"
" --keep-results"),
default=keep_path,
type=str)
args = parser.parse_args()
if bool(args.output_file):
output_file = args.output_file
if bool(args.repo_path):
repo_path = args.repo_path
if bool(args.bs_branch):
bs_branch = args.bs_branch
if bool(args.shas_to_be_tested):
shas_to_be_tested = args.shas_to_be_tested
if bool(args.docker_tag):
docker_tag = args.docker_tag
if bool(args.tests_to_run):
tests_to_run = args.tests_to_run
if bool(args.debug):
debug = True
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
else:
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
if bool(args.keep_results):
keep_results = args.keep_results
if bool(args.results_file):
keep_path = os.path.abspath(args.results_file)
keep_results = True
commits = list()
# Create a temporary directory for all work
with tempfile.TemporaryDirectory(prefix='temp_staging_location') as temp_staging_area:
# Get a reference to the requested repository cloning if remote
try:
if os.path.exists(repo_path):
logging.info("Repo path resolves locally: %s", repo_path)
repo = git.Repo.init(repo_path, bare=False)
else:
logging.info("Repo path resolves remotely: %s", repo_path)
repo = git.Repo.clone_from(repo_path, temp_staging_area)
except git.exc.GitError as err: # pylint: disable=no-member
logging.error("Unable to access git repository: %s", err)
sys.exit(1)
# Iterate the commits in the requested branch and add to list to be
# processed if they are as per the command line selection. Keep a
# list of all SHAs that have been found
shas_found = []
try:
for commit in repo.iter_commits(bs_branch):
if commit.hexsha in shas_to_be_tested:
commits.append(commit)
shas_found.append(commit.hexsha)
except git.exc.GitCommandError as err: # pylint: disable=no-member
logging.error("Could not find commits in repository '%s' for branch '%s':\n%s",
repo_path, bs_branch, err)
sys.exit(1)
# Check list of found SHAs against original list and flag any missing
shas_not_found = [sha for sha in shas_to_be_tested if sha not in shas_found]
if shas_not_found:
logging.error("SHA(s) could not be found: %s", shas_not_found)
sys.exit(1)
# Create a temporary file reference for the benchmark versioning configuration
# file that will be processed together with the selected benchmark test(s).
output_tmp_file = os.path.join(temp_staging_area, 'output_file.benchmark')
# Generate the bechmark versioning configuration file for selected parameters
try:
generate_benchmark_config.generate_benchmark_configuration(
output_file=output_tmp_file,
list_of_shas=commits,
docker_version=docker_tag,
bs_branch=bs_branch,
bs_path=repo_path,
docker_path=docker_image)
# pylint: disable=broad-except
except Exception as err:
logging.error("Creating benchmarking configuration failed:\n%s", err)
sys.exit(1)
test_set = []
# Add tests to run
for test in tests_to_run:
if test in test_set:
logging.error("Duplicate benchmarking test will be ignored: %s", test)
else:
test_set.append(test)
# Generate the commandline parameters for the benchmarking versioning together
# with the selected benchmarking test(s).
test_set.append(output_tmp_file)
results_tmp_file = os.path.join(temp_staging_area, 'tmp_result')
try:
bst_benchmarks.main.run(config_files=test_set, _debug=True,
keep_images=False, reuse_images=False,
output_file=results_tmp_file)
# pylint: disable=broad-except
except Exception as err:
logging.error("Benchmarking failed:\n%s", err)
sys.exit(1)
# Copy results to keep path if set
if keep_results:
os.makedirs(os.path.dirname(keep_path), exist_ok=True)
copyfile(results_tmp_file, keep_path)
# Create temporary file for results digest
tmp_output = os.path.join(temp_staging_area, 'tmp_result')
# Create temporary file for results digest
tmp_error = os.path.join(temp_staging_area, 'tmp_error')
# TODO: _error_file is not actually in use
digest_results.parse(files=[results_tmp_file], output_file=tmp_output, _error_file=tmp_error)
# Write output to requested outfile
with open(tmp_output, "r") as fin:
output_file.write(fin.read())
output_file.close()
if __name__ == "__main__":
main()
# bstgen configuration for BuildStream benchmarking tool.
# List of versions to test.
#
# Each version in tested in a separate Docker container which ensures that each
# version can have the correct dependency set available, and we can track
# exactly what that dependency set is.
version_defaults:
base_docker_image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora
buildstream_repo: https://gitlab.com/BuildStream/BuildStream
versions:
- name: master
base_docker_ref: 30-latest
buildstream_ref: master
test_defaults:
measurements_file: /root/measurements.json
repeats: 3
log_file: /root/log.txt
tests:
- name: Hundred Square bstgen
description: |
Create one files in one hundred projects shaped as lines
script: |
git clone https://gitlab.com/BuildStream/benchmarks.git
pip3 install benchmarks/contrib/bstgen/
bstgen simple --num-file=100 --file-size=100 -w 100 -l 10 -s square test
cd test
/usr/bin/bst_measure \
-o /root/measurements.json \
-l /root/log.txt \
bst --log-file='/root/log.txt' --config='/root/usr_config_bst.yml' build
repeats: 3
This diff is collapsed.
......@@ -20,16 +20,32 @@ def main():
parser.add_argument("-o", "--output_file",
help="Output file for results",
type=Path, default=Path("/measurements.json"))
parser.add_argument("-l", "--log_file",
help="Log file for buildstream results",
type=Path)
parser.add_argument("command", help="The command to execute",
nargs=argparse.REMAINDER)
args = parser.parse_args()
results = time_process(args.command)
log_data = ""
if args.log_file:
try:
with open(args.log_file, 'r') as file:
log_data = file.read()
except FileNotFoundError:
log_data = "Log file not found"
except Exception as err:
log_data = "Unable to get log data: {}".format(err)
with args.output_file.open("w") as outfile:
json.dump({
"total-time": results["rtime"],
"max-rss-kb": results["maxrss"]
"max-rss-kb": results["maxrss"],
"bs-sha": str(os.environ["BUILDSTREAM_COMMIT"]),
"bs-ref": str(os.environ["BUILDSTREAM_REF"]),
"logs": log_data
}, outfile)
......
This diff is collapsed.
......@@ -12,13 +12,14 @@ version_defaults:
versions:
- name: master
base_docker_ref: 28-latest
base_docker_ref: 30-latest
buildstream_ref: master
test_defaults:
measurements_file: /root/measurements.json
repeats: 3
log_file: /root/log.txt
tests:
- name: Show of Debian like project for x86_64
......@@ -35,6 +36,7 @@ tests:
# Added in case yaml_cache is present
rm -rf .bst/
/usr/bin/bst_measure -o '/root/measurements.json' bst show debian-stack.bst
/usr/bin/bst_measure -o '/root/measurements.json' -l /root/log.txt \
bst --log-file='/root/log.txt' --config='/root/usr_config_bst.yml' show debian-stack.bst
repeats: 3
......@@ -11,17 +11,18 @@ version_defaults:
versions:
- name: stable
base_docker_ref: 28-latest
base_docker_ref: 30-latest
buildstream_ref: bst-1.2
- name: master
base_docker_ref: 28-latest
base_docker_ref: 30-latest
buildstream_ref: master
test_defaults:
measurements_file: /root/measurements.json
repeats: 3
log_file: /root/log.txt
tests:
- name: Startup time
......@@ -38,9 +39,9 @@ tests:
- volume: baserock-source
path: /mnt/baserock-source
script: |
git clone https://gitlab.com/BuildStream/bst-external/
cd bst-external
git checkout c4cc10467d116450471ca9f756617ede7572814c
git clone https://gitlab.com/BuildStream/bst-plugins-experimental/
cd bst-plugins-experimental
git checkout e7952f2afe58a835a71d3b8093970a20fa758990
pip3 install --user .
cd ..
......@@ -52,16 +53,24 @@ tests:
git clone https://gitlab.com/baserock/definitions
cd definitions
git checkout f1855b9376c8b711905e8640bd864c1d6ba3c5f3 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-and-fix
#TODO - The rationale behind using this historically based branch needs revisiting
git checkout e9a44856d967b9c74029be256b89f7b7b9c2f035 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-modify-for-plugin-changes
# Use pre-created source cache to avoid measuring the network fetch time.
mkdir -p ~/.config
echo "sourcedir: /mnt/baserock-source" > ~/.config/buildstream.conf
cp /root/usr_config_bst.yml ~/.config/buildstream.conf
echo "sourcedir: /mnt/baserock-source" >> ~/.config/buildstream.conf
# Disable the artifact cache to ensure a full build.
sed -e '/artifacts:/,/^$/ d' -i project.conf
bst_measure -o '/root/measurements.json' bst build gnu-toolchain/stage1-binutils.bst
# TODO - temporary fix for missing dependencies in bst-plugins-experimental
pip3 install pycairo PyGObject
bst_measure \
-o /root/measurements.json \
-l /root/log.txt \
bst --log-file='/root/log.txt' build gnu-toolchain/stage1-binutils.bst
# Or, for a more complete test: bst build systems/minimal-system-image-x86_64.bst
repeats: 3
......@@ -81,11 +90,12 @@ volumes:
Volume holding all sources needed for the Baserock components that we build.
prepare:
version: master
repo: https://gitlab.com/BuildStream/BuildStream
path: /mnt/baserock-source
script: |
git clone https://gitlab.com/BuildStream/bst-external/
cd bst-external
git checkout c4cc10467d116450471ca9f756617ede7572814c
git clone https://gitlab.com/BuildStream/bst-plugins-experimental/
cd bst-plugins-experimental
git checkout e7952f2afe58a835a71d3b8093970a20fa758990
pip3 install --user .
cd ..
......@@ -94,7 +104,11 @@ volumes:
git clone https://gitlab.com/baserock/definitions /mnt/baserock-source/definitions
cd /mnt/baserock-source/definitions
git checkout f1855b9376c8b711905e8640bd864c1d6ba3c5f3 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-and-fix
#TODO - The rationale behind using this historically based branch needs revisiting
git checkout e9a44856d967b9c74029be256b89f7b7b9c2f035 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-modify-for-plugin-changes
# TODO - temporary fix for missing dependencies in bst-plugins-experimental
pip3 install pycairo PyGObject
bst source fetch gnu-toolchain/stage1-binutils.bst
......
......@@ -18,48 +18,47 @@
# Authors:
# Sam Thursfield <sam.thursfield@codethink.co.uk>
import collections
import os
import psutil
import subprocess
import psutil
# Returns a dict containing all of the collected host info
def get_host_info():
return {
'total_system_memory_GiB': get_memory_info(),
'processor_info': get_processor(),
'kernel_release': get_kernel_info()}
return {
'total_system_memory_GiB': get_memory_info(),
'processor_info': get_processor(),
'kernel_release': get_kernel_info()
}
# This returns a number
def get_memory_info():
return bytes_to_gib(psutil.virtual_memory().total)
return bytes_to_gib(psutil.virtual_memory().total)
# This argument is a number
# This returns a number
def bytes_to_gib(value): # Bytes to Gibibytes
return round(value / (1024 * 1024 * 1024), 2)
return round(value / (1024 * 1024 * 1024), 2)
# This returns a utf-8 string
def get_kernel_info():
cmd = ["uname", "-r"]
output = subprocess.check_output(cmd)
return str(output, encoding="utf-8").rstrip()
cmd = ["uname", "-r"]
output = subprocess.check_output(cmd)
return str(output, encoding="utf-8").rstrip()
# This returns a string
def get_processor():
cores = 0
with open('/proc/cpuinfo') as f:
for line in f:
if line.strip():
if line.rstrip('\n').startswith('model name'):
model_name = line.rstrip('\n').split(':')[1]
cores += 1
# This returns the model name as a string
# This is done as the name cannot be reliably parsed
# Due to different vendors and models having different naming conventions
return {"cpu_model_name": model_name, "cpu_cores": cores}
cores = 0
with open('/proc/cpuinfo') as f:
for line in f:
if line.strip():
if line.rstrip('\n').startswith('model name'):
model_name = line.rstrip('\n').split(':')[1]
cores += 1
# This returns the model name as a string
# This is done as the name cannot be reliably parsed
# Due to different vendors and models having different naming conventions
return {"cpu_model_name": model_name, "cpu_cores": cores}
This diff is collapsed.
logging:
message-format: '%{elapsed},%{elapsed-us},%{wallclock},%{key},%{element},%{action},%{message}'
......@@ -13,6 +13,7 @@ GRAPH_PATH="$DEFAULT_RESULT_PATH/graph_set/"
RESULTS_FILE_NAME="results-$NOW-$NOWT.json"
DEFAULT_FILE_NAME="results.json"
WEB_PAGE_OUTPUT_PATH="$DEFAULT_RESULT_PATH/web_pages/$JOB_NAME"
PLOT_DAYS="1"
echo "Removing $WEB_PAGE_OUTPUT_PATH"
rm -rf "$WEB_PAGE_OUTPUT_PATH"
......@@ -25,33 +26,59 @@ if ! [ -z "${BENCHMARK_CONFIG}" ]; then
BENCHMARK_CONF="${BENCHMARK_CONFIG}"
fi
if ! [ -z "${DAYS_TO_PLOT}" ]; then
PLOT_DAYS="${DAYS_TO_PLOT}"
fi
# Unistall and re-install due to dependency issues with numpy and
# path conflicts betweeen different numpy versions
pip3 uninstall -y matplotlib
pip3 uninstall -y numpy
pip3 install matplotlib
if [ "$BRANCH" != "master" ] && [ -z "${BESPOKE_WRITE}" ] ; then
# function constructs filtering parameters for graph results, to restrict
# generated graphs to those tests that relate to the current given job only.
create_graph_config() {
tests=""
quote='"'
while IFS= read -r line; do
tests+="-l ${quote}${line}${quote} "
done < <( ./list_tests_in_results_file.py -f "$1")
echo "$tests"
}
if [ "$BRANCH" != "master" ] && [ -z "${BESPOKE_WRITE}" ] && [ -z "${DEBUG_MASTER}" ]; then
python3 -m bst_benchmarks $BENCHMARK_CONF -o $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME
if ! [ -e $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME ]; then
exit 1
fi
python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -o $GRAPH_PATH
line=$( create_graph_config $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME )
com="python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -t $PLOT_DAYS -o $GRAPH_PATH $line"
echo $com
eval $com
cat $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME
else
if ! [ -z "${PUBLISH_RESULT}" ]; then
FILE="$CACHE_PATH/$RESULTS_FILE_NAME"
if ! [ -z "${PUBLISH_RESULT}" ] || ! [ -z "${DEBUG_MASTER}" ]; then
FILE="$PIPELINE_CACHE_PATH/$RESULTS_FILE_NAME"
python3 -m bst_benchmarks $BENCHMARK_CONF -o $FILE
if ! [ -e $FILE ]; then
exit 1
fi
python3 graphresults.py -d $CACHE_PATH/ -o $GRAPH_PATH
line=$( create_graph_config $FILE )
echo $line
com="python3 graphresults.py -d $CACHE_PATH/ -i $FILE -t $PLOT_DAYS -o $GRAPH_PATH $line"
echo $com
eval $com
else
python3 -m bst_benchmarks $BENCHMARK_CONF -o $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME
if ! [ -e $FILE ]; then
exit 1
fi
python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -o $GRAPH_PATH
line=$( create_graph_config $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME )
echo $line
com="python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -t $PLOT_DAYS -o $GRAPH_PATH $line"
echo $com
eval $com
fi
fi
......@@ -66,7 +93,7 @@ python3 publishresults.py -d "$WEB_PAGE_OUTPUT_PATH/graph_set/" -o "$WEB_PAGE_OU
rm -rf "$WEB_PAGE_OUTPUT_PATH/graph_set"
if ! [ -z "${PUBLISH_RESULT}" ]; then
if ! [ -z "${PUBLISH_RESULT}" ] || ! [ -z "${DEBUG_MASTER}" ]; then
cp --verbose -r "$WEB_PAGE_OUTPUT_PATH" "$PIPELINE_CACHE_PATH/public/"
fi
......@@ -7,6 +7,6 @@
# specified in other config files.
versions:
- name: 1.1.0
base_docker_ref: latest
buildstream_ref: 1.1.0
- name: stable
base_docker_ref: 30-latest
buildstream_ref: bst-1.2
......@@ -4,22 +4,18 @@
# benchmark configuration.
version_defaults:
base_docker_image: docker.io/buildstream/buildstream-fedora
base_docker_image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora
buildstream_repo: https://gitlab.com/BuildStream/BuildStream
test_defaults:
measurements_file: /root/measurements.json
repeats: 3
log_file: /root/log.txt
tests:
- name: Startup time
# FIXME: Rather than bodging /usr/bin/time to generate JSON, we should
# have the 'bst-measure' log parser script take on this functionality.
script: |
/usr/bin/time \
-o /root/measurements.json \
-f '{ "total-time": %e, "max-rss-kb": %M }' \
-- bst --help
bst_measure -o '/root/measurements.json' bst --help
- name: Build of Baserock stage1-binutils for x86_64
description: |
......@@ -29,31 +25,38 @@ tests:
it can still be built with BuildStream 1.0.0.
mounts:
- volume: baserock-source
path: /src
path: /mnt/baserock-source
script: |
git clone https://gitlab.com/BuildStream/bst-external/
cd bst-external
git checkout c4cc10467d116450471ca9f756617ede7572814c
git clone https://gitlab.com/BuildStream/bst-plugins-experimental/
cd bst-plugins-experimental
git checkout e7952f2afe58a835a71d3b8093970a20fa758990
pip3 install --user .
cd ..
if [ ! -d /mnt/baserock-source/definitions ]
then
echo "baserock source does not exist"
exit 1
fi
git clone https://gitlab.com/baserock/definitions
cd definitions
git checkout f1855b9376c8b711905e8640bd864c1d6ba3c5f3 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-and-fix
git checkout e9a44856d967b9c74029be256b89f7b7b9c2f035 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-modify-for-plugin-changes
# Use pre-created source cache to avoid measuring the network fetch time.
mkdir -p ~/.config
echo "sourcedir: /src" > ~/.config/buildstream.conf
echo "sourcedir: /mnt/baserock-source" > ~/.config/buildstream.conf
# Disable the artifact cache to ensure a full build.
sed -e '/artifacts:/,/^$/ d' -i project.conf
# Run the build.
# FIXME: again, the log parser script will allow for more detailed results
/usr/bin/time \
# TODO - temporary fix for missing dependencies in bst-plugins-experimental
pip3 install pycairo PyGObject
bst_measure \
-o /root/measurements.json \
-f '{ "total-time": %e, "max-rss-kb": %M }' \
-- bst build gnu-toolchain/stage1-binutils.bst
-l /root/log.txt \
bst --log-file='/root/log.txt' --config='/root/usr_config_bst.yml' build gnu-toolchain/stage1-binutils.bst
# Or, for a more complete test: bst build systems/minimal-system-image-x86_64.bst
repeats: 3
......@@ -72,21 +75,25 @@ volumes:
description: |
Volume holding all sources needed for the Baserock components that we build.
prepare:
version: version-under-test
path: /src
version: master
repo: https://gitlab.com/BuildStream/BuildStream
path: /mnt/baserock-source
script: |
git clone https://gitlab.com/BuildStream/bst-external/
cd bst-external
git checkout c4cc10467d116450471ca9f756617ede7572814c
git clone https://gitlab.com/BuildStream/bst-plugins-experimental/
cd bst-plugins-experimental
git checkout e7952f2afe58a835a71d3b8093970a20fa758990
pip3 install --user .
cd ..
mkdir -p ~/.config
echo "sourcedir: /src" > ~/.config/buildstream.conf
echo "sourcedir: /mnt/baserock-source" > ~/.config/buildstream.conf
git clone https://gitlab.com/baserock/definitions /mnt/baserock-source/definitions
cd /mnt/baserock-source/definitions
git checkout e9a44856d967b9c74029be256b89f7b7b9c2f035 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-modify-for-plugin-changes
git clone https://gitlab.com/baserock/definitions
cd definitions
git checkout f1855b9376c8b711905e8640bd864c1d6ba3c5f3 # branch lachlanmackenzie/sam-bst-1.0.0-recreate-and-fix
# TODO - temporary fix for missing dependencies in bst-plugins-experimental
pip3 install pycairo PyGObject
bst source fetch gnu-toolchain/stage1-binutils.bst
......
......@@ -6,12 +6,12 @@
# version can have the correct dependency set available, and we can track
# exactly what that dependency set is.
version_defaults:
base_docker_image: docker.io/buildstream/buildstream-fedora
base_docker_image: registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora
buildstream_repo: https://gitlab.com/BuildStream/BuildStream
versions:
- name: master
base_docker_ref: latest
base_docker_ref: 30-latest
buildstream_ref: master
test_defaults:
......@@ -23,8 +23,5 @@ tests:
# FIXME: Rather than bodging /usr/bin/time to generate JSON, we should
# have the 'bst-measure' log parser script take on this functionality.
script: |
/usr/bin/time \
-o /root/measurements.json \
-f '{ "total-time": %e, "max-rss-kb": %M }' \
-- bst --help
bst_measure -o '/root/measurements.json' bst --help
#!/usr/bin/env python3
#
# Copyright (C) 2019 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
import json
import os
import logging
import argparse
import sys
import time
import datetime
import statistics
def main():
output_file = 'digest.mdwn'
error_file = 'error.mdwn'
files = list()
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--directory",
help="Directory containing multiple results files (*.json), default is current directory.",
type=str)
parser.add_argument("-s", "--specific_results",
help="Path to a specific results set",
type=str)
parser.add_argument("-o", "--output_file",
help="Output file for results digest",
type=str)
args = parser.parse_args()
if bool(args.directory):
if os.path.isdir(args.directory):
for entry in os.scandir(args.directory):
if entry.name.endswith(".json"):
files.append(entry.path)
else:
logging.error("Specified directory does not exist: %s", args.directory)
sys.exit(1)
if bool(args.specific_results):
if os.path.isfile(args.specific_results):
if args.specific_results.endswith(".json"):
files.append(args.specific_results)
else:
logging.error("Specific results file does not exist: %s", args.specific_results)
sys.exit(1)
if bool(args.output_file):
output_file = args.output_file
parse(files, output_file, error_file)
def parse(files, output_file, _error_file):
resultsdict = dict([])
error_buffer = []
for entry in files:
try:
with open(entry, "r") as f:
file = f.read()
data = json.loads(file)
try:
measurement_time = data["end_timestamp"]
version_ref = {}
version_date = {}
version_stale = {}
v_index = ''
for version in data["versions"]:
if "unique_ref" in version:
v_index = "unique_ref"
else:
v_index = "name"
version_ref[str(version[v_index])] = version["buildstream_commit"]
if 'buildstream_commit_date' in version:
if version["buildstream_commit_date"]:
version_date[str(version["buildstream_commit"])] = float(
version["buildstream_commit_date"])
else:
version_date[str(version["buildstream_commit"])] = measurement_time
else:
version_date[str(version[v_index])] = measurement_time
for test in data["tests"]:
name = test["name"]
for result in test["results"]:
version = result["version"]
if version in version_stale:
continue
# Check if measurements have been made
if "measurements" not in result:
logging.warning("Measurement corruption in: %s", f.name)
error_buffer.append("Measurement corruption in: {}\n".format(f.name))
continue
bs_ref = None
bs_commit = None
times = []
rss_kbs = []
# Iterate measurements and add
for measurement in result["measurements"]:
times.append(measurement["total-time"])
rss_kbs.append(measurement["max-rss-kb"])
if ("bs-ref" in result) and ("bs-sha" in result):
if not bs_ref:
bs_ref = result["bs-ref"]
bs_commit = result["bs-sha"]
else:
if result["bs-ref"] != bs_ref:
logging.error(
"Buildstream reference changed from %s to %s: ",
bs_ref, result["bs-ref"])
bs_ref = result["bs-ref"]
if result["bs-sha"] != bs_commit:
logging.error(
"Buildstream commit changed from %s to %s: ",
bs_commit, result["bs-sha"])
bs_commit = result["bs-sha"]
if str(version) in version_ref:
commit = version_ref[str(version)]
else:
commit = version_ref[str(commit)]
if str(commit) in version_date:
commit_time = version_date[str(commit)]
else:
commit_time = version_date[str(version)]
# Calculate averages
average_time = statistics.mean(times)
average_max_rss_kb = statistics.mean(rss_kbs)
# Standard deviations
times_sd = statistics.stdev(times)
rss_kbs_sd = statistics.stdev(rss_kbs)
# Create a key based on version and name
key = {}
key['version'] = version
key['name'] = name
ice_key = frozenset(key.items())
# Create a value for the entry
value = [
version,
commit,
measurement_time,
average_time,
average_max_rss_kb,
commit_time,
times_sd,
rss_kbs_sd]
# Add the value to the accumulated values for a given key
if ice_key not in resultsdict:
resultsdict[ice_key] = []
resultsdict[ice_key].append(value)
else:
resultsdict[ice_key].append(value)
logging.debug("%s %s %s %s %s %s %s %s", version,
name, commit, measurement_time, average_time,
times_sd, average_max_rss_kb, rss_kbs_sd)
except ValueError as error:
logging.error("Error during parse of %s: %s", file, error)
except ValueError as error:
logging.error("Failure to load %s as json file: %s", file, error)
with open(output_file, 'w') as results_file:
timestr = time.strftime("%Y%m%d-%H%M%S")
results_file.write("Benchmarking Test Results on: {}\n\n".format(timestr))
if error_buffer:
results_file.write("Errors in Results Found:\n")
results_file.writelines(error_buffer)
results_file.write('\n')
for key, value in resultsdict.items():
dict_k = dict(key)
test_description = "Test Version: {}, Test Name: {}\n".format(
dict_k['version'], dict_k['name'])
results_file.write(test_description)
list_a = list(value)
list_a.sort(key=lambda x: x[1])
for data_set in list_a:
results_file.write(
"time: {}, "
"average time to complete test (s): {}, "
"time standard deviation: {}, "
"average resident set size (kb):, {}, "
"rss standard deviation: {}\n" .format(
datetime.datetime.fromtimestamp(
data_set[5]),
data_set[3],
data_set[6],
data_set[4],
data_set[7]))
results_file.write('\n')
if __name__ == "__main__":
main()
......@@ -18,87 +18,52 @@
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
import os
import logging
import shutil
import tempfile
import yaml
from bst_benchmarks.config import DEFAULT_BASE_DOCKER_IMAGE, DEFAULT_BUILDSTREAM_REPO
from distutils.file_util import copy_file
# This function generates a benchmark configuration file that allows for
# multiple buildstream commits to be benchmarked individually.
#
# output_file - the full path for the generated benchmark configuration file
# template_file - the full path to the templat file that is to be used to
# generate the benchmark configuration file.
# list_of_shas - list of Buildstream commits that need to be processed
# docker_version - the docker version to be used in the configuration, set
# to latest, but might be more restricted in the future
# bs_branch - the Buildstream branch that is being considered, defaults to
# master
# bs_path - path to the Buildstream repo (url or local directory)
# docker_path - path to the Docker Image to be used
def generate_benchmark_configuration(output_file="generated.benchmark", template_file="bst_benchmarks/template.benchmark", list_of_shas=[], docker_version="28-latest", bs_branch='master'):
# Create temporary staging area for generated benchmark configuration
temp_staging_area = tempfile.mkdtemp(prefix='temp_staging_location')
temp_benchmark = os.path.join(temp_staging_area, 'temp_benchmark.benchmark')
def generate_benchmark_configuration(
output_file="generated.benchmark",
list_of_shas=None,
docker_version="30-latest",
bs_branch='master',
bs_path=DEFAULT_BUILDSTREAM_REPO,
docker_path=DEFAULT_BASE_DOCKER_IMAGE):
try:
# Check template file exists and then copy into temporary generated benchmark
# configuration file for processing
if os.path.isfile(template_file):
shutil.copyfile(template_file, temp_benchmark)
logging.info("Template file is: ", template_file)
else:
logging.error("Specified template file does not exist: ", template_file)
raise Exception("Specified template file does not exist: ", template_file)
if not list_of_shas:
list_of_shas = []
# Open the temporary benchmark configuration file and read
with open(temp_benchmark) as f:
page = f.readlines()
# Iterate through the list of shas and populate the stubbed entries
# with sha data from the entry, then write each entry to the new file
# entry
with open(output_file, 'w') as yaml_file:
# New benchmarking data
new_file = ""
# Header for versions
generic_version_entry = ''
# Go through each line of the template and copy everything apart from the
# stubbed generic version entry to the new benchmarking data. The stubbed
# entries are copied into a generic version entry for later use.
for line in page:
if '<name_stub>' in line:
generic_version_entry += line
elif '<docker_stub>' in line:
generic_version_entry += line
elif '<buildstream_ref_stub>' in line:
generic_version_entry += line
elif '<buildstream_stub>' in line:
generic_version_entry += line
elif '<buildstream_time_stub>' in line:
generic_version_entry += line
else:
new_file += line
generic_version_entry += '\n\n'
version_default = {'version_defaults': {'base_docker_image': docker_path,
'buildstream_repo': bs_path}}
yaml.dump(version_default, yaml_file, default_flow_style=False)
yaml_file.write('\n\n')
# Iterate through the list of shas and populate the stubbed entries
# with sha data from the entry, then write each entry to the new file
# entry
configs = []
for entry in list_of_shas:
new_file += generic_version_entry.replace('<name_stub>', str(entry)) \
.replace('<docker_stub>', docker_version) \
.replace('<buildstream_stub>', str(entry)) \
.replace('<buildstream_ref_stub>', bs_branch) \
.replace('<buildstream_time_stub>', str(entry.committed_date))
# Write the new file entry back to the temporary file
with open(temp_benchmark, 'w') as f:
f.write(new_file)
# Copy the temporary benchmark file to the requested destination
try:
copy_file(temp_benchmark, output_file)
except OSError as err:
logging.error("Unable to copy pages to target: ", args.output_path)
raise Exception('Unable to create target configuration file: ', err)
finally:
shutil.rmtree(temp_staging_area)
configs.append({'name': str(entry),
'base_docker_ref': docker_version,
'buildstream_ref': bs_branch,
'buildstream_commit': str(entry),
'buildstream_commit_date': str(entry.committed_date)})
version = {'versions': configs}
yaml.dump(version, yaml_file, default_flow_style=False)
yaml_file.write('\n\n')
This diff is collapsed.
......@@ -20,15 +20,14 @@
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
import git
import tempfile
import argparse
import shutil
import re
import os
import logging
import sys
import git
# This function generates a list of SHAs for a given repo, branch a start commit
# which is the last commit that was processed and is prior to the 1st commit to
# be returned and the last commit which might be current head as determined at
......@@ -40,6 +39,7 @@ import sys
# latest_commit - the last commit that needs to be added to the returned
# list.
def main():
repo = "https://gitlab.com/BuildStream/buildstream.git"
branch = 'master'
......@@ -75,17 +75,18 @@ def main():
try:
commits = get_list_of_commits(repo, branch, last_commit, latest_commit)
except git.exc.GitError as err:
except git.exc.GitError as err: # pylint: disable=no-member
print("Unable to extract commits: ", err)
sys.exit(1)
except Exception as ex_err:
# TODO: Come on, surely there's a million other things that could cause this?
except Exception as ex_err: # pylint: disable=broad-except
print("Nothing to extract: ", ex_err)
sys.exit(1)
print(commits)
def get_list_of_commits(repo_path, branch, lastCommit, latestCommit):
def get_list_of_commits(repo_path, branch, last_commit, latest_commit):
commits = list()
......@@ -95,21 +96,21 @@ def get_list_of_commits(repo_path, branch, lastCommit, latestCommit):
repo = git.Repo.init(repo_path, bare=False)
else:
repo = git.Repo.clone_from(repo_path, temp_staging_area)
except git.exc.GitError as err:
logging.error("Unable to access git repository: ", err)
except git.exc.GitError as err: # pylint: disable=no-member
logging.error("Unable to access git repository: %s", err)
raise
start = False
for commit in repo.iter_commits(branch):
if commit.hexsha == latestCommit:
commits.append(commit)
start = True
if commit.hexsha == lastCommit:
break
elif commit.hexsha == lastCommit:
break
elif start == True:
commits.append(commit)
if commit.hexsha == latest_commit:
commits.append(commit)
start = True
if commit.hexsha == last_commit:
break
elif commit.hexsha == last_commit:
break
elif start:
commits.append(commit)
return commits
......
This diff is collapsed.
#!/usr/bin/env python3
#
# Copyright (C) 2019 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
# Commandline executable that takes a file containing benchmarking results
# and gives back the tests that were run to generate the results.
import argparse
import logging
import os
import json
import sys
def main():
results_file = "results.json"
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--results_file",
help="File containing results to be checked",
type=str)
args = parser.parse_args()
if bool(args.results_file):
if os.path.isfile(args.results_file):
results_file = args.results_file
else:
logging.error("Specified file does not exist: %s", args.results_file)
sys.exit(1)
with open(results_file, "r") as f:
file = f.read()
data = json.loads(file)
tests = []
try:
for test in data["tests"]:
tests.append(test["name"])
except ValueError:
logging.error("Unable to resolve tests in: %s", results_file)
results = '\n'.join(tests)
print(results)
if __name__ == "__main__":
main()
......@@ -33,10 +33,11 @@ from distutils.dir_util import copy_tree
# using a supplied template form and substituting the graph page references
# into generated copies these are then transfered to a given output path.
def main():
import logging
import sys
directory = '.'
graphs_directory = directory
def main():
output_path = '.'
template_path = '.'
files = list()
......@@ -51,29 +52,26 @@ def main():
help="Template path",
type=str)
args = parser.parse_args()
with tempfile.