...
 
Commits (71)
......@@ -39,6 +39,8 @@ benchmarks:
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
......@@ -60,6 +62,8 @@ test-debian-like-project:
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
......@@ -81,6 +85,8 @@ functional-test-simple:
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
......@@ -96,6 +102,8 @@ functional-test-combined-config:
before_script:
- "docker image prune -a -f"
- "docker container prune -f"
- "docker volume prune -f"
- "rm -rf results_out/web_pages/${JOB_NAME_REF}/*"
tags: [ 'benchmarks' ]
script:
- if ! [ -z "${FOLLOW_BUILDSTREAM}" ]; then
......
#!/usr/bin/env python3
#
# Copyright (C) 2019 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
import tempfile
import argparse
import os
import logging
from shutil import copyfile
import sys
import git
import bst_benchmarks.main
import generate_benchmark_config
import digest_results
# This command line executable acts as a method to generate a locally run
# benchmark CI run. A number of elements are passed in to check that
# configuration settings are tenable - path to repository, buildstream branch,
# SHAs to be tested (defaults to HEAD of branch if not specified), specific tests
# to be carried out (defaults to default.benchmark if not set), output file
# (defaults to stdout if not set), whether master should be tested as a comparable
# (default is yes)
#
# output_file - the path where the bespoke benchmark file is to be placed
# repo_path - path to the local buildstream repo.
# bs_branch - the branch of buildstream being considered.
# shas - shas to be tested.
def main():
# Define all the defaults explicitly
repo_path = 'https://gitlab.com/BuildStream/buildstream.git'
bs_branch = 'master'
shas_to_be_tested = []
docker_tag = "28-latest"
docker_image = 'registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora'
tests_to_run = []
debug = False
keep_results = False
keep_path = "results.json"
def make_help(message):
return message + " (Default: %(default)s)"
# Resolve commandline arguments
parser = argparse.ArgumentParser(description="Automatically set up test environments for a set "
"of buildstream benchmarks and run them locally using docker.")
parser.add_argument("tests_to_run",
help=make_help("The benchmark tests to run"),
nargs="*",
default=["bst_benchmarks/default.benchmark"])
parser.add_argument("-o", "--output_file",
help=make_help("The file to write benchmark results to"),
type=argparse.FileType("w"),
default="-")
parser.add_argument("-r", "--repo_path",
help=make_help("The repository from which to clone branches to compare "
"against the defaults set in the test. Note that this can"
" be a local path using file://"),
default=repo_path,
type=str)
parser.add_argument("-b", "--bs_branch",
help=make_help("The branch to clone from the set repository"),
default=bs_branch,
type=str)
parser.add_argument("-s", "--shas_to_be_tested",
help=make_help("SHAs to clone from the set repository"),
action='append')
parser.add_argument("-d", "--docker_tag",
help=make_help("The tag to use for the buildstream image"),
default=docker_tag,
type=str)
parser.add_argument("-p", "--docker_image",
help=make_help("The docker image to use"),
default=docker_image,
type=str)
parser.add_argument("-g", "--debug",
help=make_help("Show debug messages"),
default=debug,
action='store_true')
parser.add_argument("-k", "--keep_results",
help=make_help("Retain raw benchmarking results"),
default=keep_results,
action='store_true')
parser.add_argument('--results-file',
help=make_help("The file to store benchmarking results in; Implies"
" --keep-results"),
default=keep_path,
type=str)
args = parser.parse_args()
if bool(args.output_file):
output_file = args.output_file
if bool(args.repo_path):
repo_path = args.repo_path
if bool(args.bs_branch):
bs_branch = args.bs_branch
if bool(args.shas_to_be_tested):
shas_to_be_tested = args.shas_to_be_tested
if bool(args.docker_tag):
docker_tag = args.docker_tag
if bool(args.tests_to_run):
tests_to_run = args.tests_to_run
if bool(args.debug):
debug = True
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
else:
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
if bool(args.keep_results):
keep_results = args.keep_results
if bool(args.results_file):
keep_path = os.path.abspath(args.results_file)
keep_results = True
commits = list()
# Create a temporary directory for all work
with tempfile.TemporaryDirectory(prefix='temp_staging_location') as temp_staging_area:
# Get a reference to the requested repository cloning if remote
try:
if os.path.exists(repo_path):
logging.info("Repo path resolves locally: %s", repo_path)
repo = git.Repo.init(repo_path, bare=False)
else:
logging.info("Repo path resolves remotely: %s", repo_path)
repo = git.Repo.clone_from(repo_path, temp_staging_area)
except git.exc.GitError as err:
logging.error("Unable to access git repository: %s", err)
sys.exit(1)
# Iterate the commits in the requested branch and add to list to be
# processed if they are as per the command line selection. Keep a
# list of all SHAs that have been found
shas_found = []
try:
for commit in repo.iter_commits(bs_branch):
if commit.hexsha in shas_to_be_tested:
commits.append(commit)
shas_found.append(commit.hexsha)
except git.exc.GitCommandError as err:
logging.error("Could not find commits in repository '%s' for branch '%s':\n%s",
repo_path, bs_branch, err)
sys.exit(1)
# Check list of found SHAs against original list and flag any missing
shas_not_found = [sha for sha in shas_to_be_tested if sha not in shas_found]
if shas_not_found:
logging.error("SHA(s) could not be found: %s", shas_not_found)
sys.exit(1)
# Create a temporary file reference for the benchmark versioning configuration
# file that will be processed together with the selected benchmark test(s).
output_tmp_file = os.path.join(temp_staging_area, 'output_file.benchmark')
# Generate the bechmark versioning configuration file for selected parameters
try:
generate_benchmark_config.generate_benchmark_configuration(
output_file=output_tmp_file,
list_of_shas=commits,
docker_version=docker_tag,
bs_branch=bs_branch,
bs_path=repo_path,
docker_path=docker_image)
# pylint: disable=broad-except
except Exception as err:
logging.error("Creating benchmarking configuration failed:\n%s", err)
sys.exit(1)
test_set = []
# Add tests to run
for test in tests_to_run:
if test in test_set:
logging.error("Duplicate benchmarking test will be ignored: {}".format(test))
else:
test_set.append(test)
# Generate the commandline parameters for the benchmarking versioning together
# with the selected benchmarking test(s).
test_set.append(output_tmp_file)
results_tmp_file = os.path.join(temp_staging_area, 'tmp_result')
try:
bst_benchmarks.main.run(config_files=test_set, debug=True,
keep_images=False, reuse_images=False,
output_file=results_tmp_file)
# pylint: disable=broad-except
except Exception as err:
logging.error("Benchmarking failed:\n%s", err)
sys.exit(1)
# Copy results to keep path if set
if keep_results:
os.makedirs(os.path.dirname(keep_path), exist_ok=True)
copyfile(results_tmp_file, keep_path)
# Create temporary file for results digest
tmp_output = os.path.join(temp_staging_area, 'tmp_result')
# Create temporary file for results digest
tmp_error = os.path.join(temp_staging_area, 'tmp_error')
digest_results.parse(files=[results_tmp_file], output_file=tmp_output, error_file=tmp_error)
# Write output to requested outfile
with open(tmp_output, "r") as fin:
output_file.write(fin.read())
output_file.close()
if __name__ == "__main__":
main()
This diff is collapsed.
......@@ -29,7 +29,9 @@ def main():
with args.output_file.open("w") as outfile:
json.dump({
"total-time": results["rtime"],
"max-rss-kb": results["maxrss"]
"max-rss-kb": results["maxrss"],
"bs-sha": str(os.environ["BUILDSTREAM_COMMIT"]),
"bs-ref": str(os.environ["BUILDSTREAM_REF"])
}, outfile)
......
......@@ -50,6 +50,7 @@ class BstVersionSpec():
self.buildstream_ref = buildstream_ref or name
self.buildstream_commit = buildstream_commit
self.buildstream_commit_date = buildstream_commit_date
self.unique_ref = name + '.' + self.buildstream_repo.replace(':', '.').replace('/', '.').lower()
class TestSpec():
......@@ -69,6 +70,9 @@ class TestSpec():
repeats (int): Number of times to repeat the test.
mounts (list): Optional list of VolumeMountSpec entries
description (str): Textual summary of the test
unique id (str): The same test can be run across multiple repositories this
provides a unique reference point for results for the same
test carried out on separate repos
"""
def __init__(self, name, script, measurements_file, repeats, mounts, description=""):
self.name = name
......@@ -120,8 +124,9 @@ class VolumePrepareSpec():
path (str): Path to mount the volume in the container doing preparation.
script (str): Shell commands to run to do the preparation.
"""
def __init__(self, version, path, script):
def __init__(self, version, path, script, repo):
self.version = version
self.repo = repo
self.path = path
self.script = script
......
......@@ -81,6 +81,7 @@ volumes:
Volume holding all sources needed for the Baserock components that we build.
prepare:
version: master
repo: https://gitlab.com/BuildStream/BuildStream
path: /mnt/baserock-source
script: |
git clone https://gitlab.com/BuildStream/bst-external/
......
This diff is collapsed.
......@@ -31,12 +31,26 @@ pip3 uninstall -y matplotlib
pip3 uninstall -y numpy
pip3 install matplotlib
# function constructs filtering parameters for graph results, to restrict
# generated graphs to those tests that relate to the current given job only.
create_graph_config() {
tests=""
quote='"'
while IFS= read -r line; do
tests+="-l ${quote}${line}${quote} "
done < <( ./list_tests_in_results_file.py -f "$1")
echo "$tests"
}
if [ "$BRANCH" != "master" ] && [ -z "${BESPOKE_WRITE}" ] && [ -z "${DEBUG_MASTER}" ]; then
python3 -m bst_benchmarks $BENCHMARK_CONF -o $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME
if ! [ -e $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME ]; then
exit 1
fi
python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -o $GRAPH_PATH
line=$( create_graph_config $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME )
com="python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -o $GRAPH_PATH $line"
echo $com
eval $com
cat $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME
else
if ! [ -z "${PUBLISH_RESULT}" ] || ! [ -z "${DEBUG_MASTER}" ]; then
......@@ -45,13 +59,21 @@ else
if ! [ -e $FILE ]; then
exit 1
fi
python3 graphresults.py -d $CACHE_PATH/ -i $FILE -o $GRAPH_PATH
line=$( create_graph_config $FILE )
echo $line
com="python3 graphresults.py -d $CACHE_PATH/ -o $GRAPH_PATH $line"
echo $com
eval $com
else
python3 -m bst_benchmarks $BENCHMARK_CONF -o $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME
if ! [ -e $FILE ]; then
exit 1
fi
python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -o $GRAPH_PATH
line=$( create_graph_config $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME )
echo $line
com="python3 graphresults.py -d $CACHE_PATH/ -i $DEFAULT_RESULT_PATH/$DEFAULT_FILE_NAME -o $GRAPH_PATH $line"
echo $com
eval $com
fi
fi
......
#!/usr/bin/env python3
#
# Copyright (C) 2019 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
import json
import os
import logging
import argparse
import sys
import time
import datetime
import statistics
def main():
directory = '.'
output_file = 'digest.mdwn'
error_file = 'error.mdwn'
files = list()
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory",
help="Directory containing multiple results files (*.json), default is current directory.",
type=str)
parser.add_argument("-s", "--specific_results",
help="Path to a specific results set",
type=str)
parser.add_argument("-o", "--output_file",
help="Output file for results digest",
type=str)
args = parser.parse_args()
if bool(args.directory):
if os.path.isdir(args.directory):
for entry in os.scandir(args.directory):
if entry.name.endswith(".json"):
files.append(entry.path)
else:
logging.error("Specified directory does not exist", args.directory)
sys.exit(1)
if bool(args.specific_results):
if os.path.isfile(args.specific_results):
if args.specific_results.endswith(".json"):
files.append(args.specific_results)
else:
logging.error("Specific results file does not exist", args.specific_results)
sys.exit(1)
if bool(args.output_file):
output_file = args.output_file
parse(files, output_file, error_file)
def parse(files, output_file, error_file):
resultsdict = dict([])
error_buffer = []
for entry in files:
try:
with open(entry, "r") as f:
file = f.read()
data = json.loads(file)
try:
measurement_time = data["end_timestamp"]
version_ref = {}
version_date = {}
version_stale = {}
v_index = ''
for version in data["versions"]:
if "unique_ref" in version:
v_index = "unique_ref"
else:
v_index = "name"
version_ref[str(version[v_index])] = version["buildstream_commit"]
if 'buildstream_commit_date' in version:
if version["buildstream_commit_date"]:
version_date[str(version["buildstream_commit"])] = float(version["buildstream_commit_date"])
else:
version_date[str(version["buildstream_commit"])] = measurement_time
else:
version_date[str(version[v_index])] = measurement_time
for test in data["tests"]:
name = test["name"]
for result in test["results"]:
version = result["version"]
if version in version_stale:
continue
# Check if measurements have been made
if "measurements" not in result:
logging.warning("Measurement corruption in: {}".format(f.name))
error_buffer.append("Measurement corruption in: {}\n".format(f.name))
continue
bs_ref = None
bs_commit = None
unique_ref = result["version"]
times = []
rss_kbs = []
# Iterate measurements and add
for measurement in result["measurements"]:
times.append(measurement["total-time"])
rss_kbs.append(measurement["max-rss-kb"])
if ("bs-ref" in result) and ("bs-sha" in result):
if not bs_ref:
bs_ref = result["bs-ref"]
bs_commit = result["bs-sha"]
else:
if result["bs-ref"] != bs_ref:
logging.error("Buildstream reference changed from {} to {}: ".format(bs_ref, result["bs-ref"]))
bs_ref = result["bs-ref"]
if result["bs-sha"] != bs_commit:
logging.error("Buildstream commit changed from {} to {}: ".format(bs_commit, result["bs-sha"]))
bs_commit = result["bs-sha"]
if str(version) in version_ref:
commit = version_ref[str(version)]
else:
commit = version_ref[str(commit)]
if str(commit) in version_date:
commit_time = version_date[str(commit)]
else:
commit_time = version_date[str(version)]
# Calculate averages
average_time = statistics.mean(times)
average_max_rss_kb = statistics.mean(rss_kbs)
# Standard deviations
times_sd = statistics.stdev(times)
rss_kbs_sd = statistics.stdev(rss_kbs)
# Create a key based on version and name
key = {}
key['version'] = version
key['name'] = name
ice_key = frozenset(key.items())
# Create a value for the entry
value = [version, commit, measurement_time, average_time, average_max_rss_kb, commit_time, times_sd, rss_kbs_sd]
# Add the value to the accumulated values for a given key
if ice_key not in resultsdict:
resultsdict[ice_key] = []
resultsdict[ice_key].append(value)
else:
resultsdict[ice_key].append(value)
logging.debug(str(version) + ' ' + name + ' ' + str(commit) + ' '
+ str(measurement_time) + ' ' + str(average_time) + ' '
+ str(times_sd) + ' ' + str(average_max_rss_kb) + ' '
+ str(rss_kbs_sd))
except ValueError as error:
logging.error("Error during parse of {}: {}".format(file, error))
except ValueError as error:
logging.error("Failure to load {} as json file".format(file, error))
with open(output_file, 'w') as results_file:
timestr = time.strftime("%Y%m%d-%H%M%S")
results_file.write("Benchmarking Test Results on: {}\n\n".format(timestr))
if error_buffer:
results_file.write("Errors in Results Found:\n")
results_file.writelines(error_buffer)
results_file.write('\n')
for key, value in resultsdict.items():
dict_k = dict(key)
Test_description = "Test Version: {}, Test Name: {}\n".format(dict_k['version'], dict_k['name'])
results_file.write(Test_description)
list_a = list(value)
list_a.sort(key=lambda x: x[1])
times = list()
average_times = list()
average_kb = list()
for data_set in list_a:
results_file.write("time:, {}, average time to complete test (s):, {}, time standard deviation:, {}, average resident set size (kb):, {}, rss standard deviation {}\n"
.format(datetime.datetime.fromtimestamp(data_set[5]), data_set[3], data_set[6], data_set[4], data_set[7]))
results_file.write('\n')
if __name__ == "__main__":
main()
......@@ -22,6 +22,7 @@ import os
import logging
import shutil
import tempfile
import yaml
from distutils.file_util import copy_file
......@@ -29,76 +30,36 @@ from distutils.file_util import copy_file
# multiple buildstream commits to be benchmarked individually.
#
# output_file - the full path for the generated benchmark configuration file
# template_file - the full path to the templat file that is to be used to
# generate the benchmark configuration file.
# list_of_shas - list of Buildstream commits that need to be processed
# docker_version - the docker version to be used in the configuration, set
# to latest, but might be more restricted in the future
# bs_branch - the Buildstream branch that is being considered, defaults to
# master
# bs_path - path to the Buildstream repo (url or local directory)
# docker_path - path to the Docker Image to be used
def generate_benchmark_configuration(output_file="generated.benchmark", template_file="bst_benchmarks/template.benchmark", list_of_shas=[], docker_version="28-latest", bs_branch='master'):
def generate_benchmark_configuration(output_file="generated.benchmark", list_of_shas=[], docker_version="28-latest", bs_branch='master', bs_path='https://gitlab.com/BuildStream/buildstream', docker_path='registry.gitlab.com/buildstream/buildstream-docker-images/testsuite-fedora'):
# Create temporary staging area for generated benchmark configuration
temp_staging_area = tempfile.mkdtemp(prefix='temp_staging_location')
temp_benchmark = os.path.join(temp_staging_area, 'temp_benchmark.benchmark')
# Iterate through the list of shas and populate the stubbed entries
# with sha data from the entry, then write each entry to the new file
# entry
with open(output_file, 'w') as yaml_file:
try:
# Check template file exists and then copy into temporary generated benchmark
# configuration file for processing
if os.path.isfile(template_file):
shutil.copyfile(template_file, temp_benchmark)
logging.info("Template file is: ", template_file)
else:
logging.error("Specified template file does not exist: ", template_file)
raise Exception("Specified template file does not exist: ", template_file)
version_default = {'version_defaults' : {'base_docker_image': docker_path,
'buildstream_repo': bs_path}}
yaml.dump(version_default, yaml_file, default_flow_style=False)
yaml_file.write('\n\n')
# Open the temporary benchmark configuration file and read
with open(temp_benchmark) as f:
page = f.readlines()
# New benchmarking data
new_file = ""
# Header for versions
generic_version_entry = ''
# Go through each line of the template and copy everything apart from the
# stubbed generic version entry to the new benchmarking data. The stubbed
# entries are copied into a generic version entry for later use.
for line in page:
if '<name_stub>' in line:
generic_version_entry += line
elif '<docker_stub>' in line:
generic_version_entry += line
elif '<buildstream_ref_stub>' in line:
generic_version_entry += line
elif '<buildstream_stub>' in line:
generic_version_entry += line
elif '<buildstream_time_stub>' in line:
generic_version_entry += line
else:
new_file += line
generic_version_entry += '\n\n'
# Iterate through the list of shas and populate the stubbed entries
# with sha data from the entry, then write each entry to the new file
# entry
configs = []
for entry in list_of_shas:
new_file += generic_version_entry.replace('<name_stub>', str(entry)) \
.replace('<docker_stub>', docker_version) \
.replace('<buildstream_stub>', str(entry)) \
.replace('<buildstream_ref_stub>', bs_branch) \
.replace('<buildstream_time_stub>', str(entry.committed_date))
configs.append({'name': str(entry),
'base_docker_ref': docker_version,
'buildstream_ref': bs_branch,
'buildstream_commit': str(entry),
'buildstream_commit_date': str(entry.committed_date)})
version = {'versions': configs}
yaml.dump(version, yaml_file, default_flow_style=False)
yaml_file.write('\n\n')
# Write the new file entry back to the temporary file
with open(temp_benchmark, 'w') as f:
f.write(new_file)
# Copy the temporary benchmark file to the requested destination
try:
copy_file(temp_benchmark, output_file)
except OSError as err:
logging.error("Unable to copy pages to target: ", args.output_path)
raise Exception('Unable to create target configuration file: ', err)
finally:
shutil.rmtree(temp_staging_area)
......@@ -60,6 +60,9 @@ def main():
parser.add_argument("-t", "--time_span",
help="Maximum number of days a specific commit result " \
"should be plotted", type=int)
parser.add_argument("-l", "--limit_results",
help="Specify what subset of test results should be " \
"considered.", action='append')
args = parser.parse_args()
if bool(args.directory):
......@@ -117,55 +120,89 @@ def main():
version_ref = {}
version_date = {}
version_stale = {}
version_name = {}
v_index = ''
for version in data["versions"]:
version_ref[version["name"]] = version["buildstream_commit"]
if "unique_ref" in version:
v_index = "unique_ref"
else:
v_index = "name"
version_ref[str(version[v_index])] = version["buildstream_commit"]
version_name[str(version[v_index])] = version["name"]
if 'buildstream_commit_date' in version:
if version["buildstream_commit_date"]:
version_date[version["name"]] = float(version["buildstream_commit_date"])
if version["name"] == version["buildstream_commit"]:
version_date[str(version["buildstream_commit"])] = float(version["buildstream_commit_date"])
if str(version[v_index]) == str(version["buildstream_commit"]):
now_time = datetime.datetime.now()
past_measurement = datetime.datetime.fromtimestamp(measurement_time)
if (now_time - past_measurement) > datetime.timedelta(days=max_plot_days):
version_stale[version["name"]] = "True"
version_stale[version[v_index]] = version["buildstream_ref"]
else:
version_date[version["name"]] = measurement_time
version_date[str(version["buildstream_commit"])] = measurement_time
else:
version_date[version["name"]] = measurement_time
version_date[str(version["buildstream_commit"])] = measurement_time
for test in data["tests"]:
name = test["name"]
if bool(args.limit_results):
if name not in args.limit_results:
continue
for result in test["results"]:
version = result["version"]
if version in version_stale:
continue
logging.info("Version marked as stale %s", version)
version = version_stale[version]
commit = version_ref[str(version)]
commit_time = version_date[str(version)]
total_time = 0.0
total_max_rss_kb = 0.0
count = 0
# Check if measurements have been made
if "measurements" not in result:
logging.error("Measurement corruption in: ", entry)
break
logging.error("Measurement corruption in: {}".format(str(entry)))
continue
bs_ref = None
bs_commit = None
unique_ref = result["version"]
# Iterate measurements and add
for measurement in result["measurements"]:
total_time = total_time + measurement["total-time"]
total_max_rss_kb = total_max_rss_kb + measurement["max-rss-kb"]
if ("bs-ref" in measurement) and ("bs-sha" in measurement):
if bs_ref == None:
bs_ref = measurement["bs-ref"]
bs_commit = measurement["bs-sha"]
else:
if measurement["bs-ref"] != bs_ref:
logging.error("Buildstream reference changed from {} to {}: ".format(bs_ref, measurement["bs-ref"]))
bs_ref = result["bs-ref"]
if measurement["bs-sha"] != bs_commit:
logging.error("Buildstream commit changed from {} to {}: ".format(bs_commit, measurement["bs-sha"]))
bs_commit = result["bs-sha"]
count += 1
if str(version) in version_ref:
commit = version_ref[str(version)]
else:
commit = version_ref[str(commit)]
if str(commit) in version_date:
commit_time = version_date[str(commit)]
else:
commit_time = version_date[str(version)]
# Calculate average
average_time = total_time / count
average_max_rss_kb = total_max_rss_kb / count
# Create a key based on version and name
key = {}
key['version'] = version
key['version'] = version_name[str(version)]
key['name'] = name
ice_key = frozenset(key.items())
......
#!/usr/bin/env python3
#
# Copyright (C) 2019 Codethink Limited
# Copyright (C) 2019 Bloomberg Finance LP
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
#
# Authors:
# Lachlan Mackenzie <lachlan.mackenzie@codethink.co.uk>
# Commandline executable that takes a file containing benchmarking results
# and gives back the tests that were run to generate the results.
import argparse
import logging
import os
import json
def main():
results_file = "results.json"
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--results_file",
help="File containing results to be checked",
type=str)
args = parser.parse_args()
if bool(args.results_file):
if os.path.isfile(args.results_file):
results_file = args.results_file
else:
logging.error("Specified file does not exist", args.results_file)
sys.exit(1)
with open(results_file, "r") as f:
file = f.read()
data = json.loads(file)
tests = []
try:
for test in data["tests"]:
tests.append(test["name"])
except ValueError as error:
logging.error("Unable to resolve tests in: {}".format(results_file))
results = '\n'.join(tests)
print(results)
if __name__ == "__main__":
main()