Skip to content
Snippets Groups Projects
Commit 5af9256d authored by Massimiliano Culpo's avatar Massimiliano Culpo Committed by Todd Gamblin
Browse files

Cleaned up JUnit report generation on install (#6977)

* Cleaned up JUnit report generation on install

The generation of a JUnit report was previously part of the install
command. This commit factors the logic into its own module, and uses
a template for the generation of the report.

It also improves report generation, that now can deal with multiple
specs installed at once. Finally, extending the list of supported
formats is much easier than before, as it entails just writing a
new template.

* Polished report generation + added tests for failures and errors

The generation of a JUnit report has been polished, so that the
stacktrace is correctly displayed with Jenkins JUnit plugin. Standard
error is still not used.

Added unit tests to cover for installation failures and installation
errors.
parent 784234ae
No related branches found
No related tags found
No related merge requests found
......@@ -23,24 +23,20 @@
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import codecs
import functools
import os
import platform
import shutil
import sys
import time
import xml.dom.minidom
import xml.etree.ElementTree as ET
import llnl.util.filesystem as fs
import llnl.util.tty as tty
import spack
import spack.build_environment
import spack.cmd
import spack.cmd.common.arguments as arguments
from spack.build_environment import InstallError
from spack.fetch_strategy import FetchError
from spack.package import PackageBase
import spack.fetch_strategy
import spack.report
description = "build and install packages"
section = "build"
......@@ -120,7 +116,7 @@ def setup_parser(subparser):
subparser.add_argument(
'--log-format',
default=None,
choices=['junit'],
choices=spack.report.valid_formats,
help="format to be used for log files"
)
subparser.add_argument(
......@@ -131,186 +127,6 @@ def setup_parser(subparser):
arguments.add_common_arguments(subparser, ['yes_to_all'])
# Needed for test cases
class TestResult(object):
PASSED = 0
FAILED = 1
SKIPPED = 2
ERRORED = 3
class TestSuite(object):
def __init__(self, spec):
self.root = ET.Element('testsuite')
self.tests = []
self.spec = spec
def append(self, item):
if not isinstance(item, TestCase):
raise TypeError(
'only TestCase instances may be appended to TestSuite'
)
self.tests.append(item) # Append the item to the list of tests
def dump(self, filename):
# Prepare the header for the entire test suite
number_of_errors = sum(
x.result_type == TestResult.ERRORED for x in self.tests
)
self.root.set('errors', str(number_of_errors))
number_of_failures = sum(
x.result_type == TestResult.FAILED for x in self.tests
)
self.root.set('failures', str(number_of_failures))
self.root.set('tests', str(len(self.tests)))
self.root.set('name', self.spec.short_spec)
self.root.set('hostname', platform.node())
for item in self.tests:
self.root.append(item.element)
with codecs.open(filename, 'wb', 'utf-8') as file:
xml_string = ET.tostring(self.root)
xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml()
file.write(xml_string)
class TestCase(object):
results = {
TestResult.PASSED: None,
TestResult.SKIPPED: 'skipped',
TestResult.FAILED: 'failure',
TestResult.ERRORED: 'error',
}
def __init__(self, classname, name):
self.element = ET.Element('testcase')
self.element.set('classname', str(classname))
self.element.set('name', str(name))
self.result_type = None
def set_duration(self, duration):
self.element.set('time', str(duration))
def set_result(self, result_type,
message=None, error_type=None, text=None):
self.result_type = result_type
result = TestCase.results[self.result_type]
if result is not None and result is not TestResult.PASSED:
subelement = ET.SubElement(self.element, result)
if error_type is not None:
subelement.set('type', error_type)
if message is not None:
subelement.set('message', str(message))
if text is not None:
subelement.text = text
def fetch_text(path):
if not os.path.exists(path):
return ''
with codecs.open(path, 'rb', 'utf-8') as f:
return '\n'.join(
list(line.strip() for line in f.readlines())
)
def junit_output(spec, test_suite):
# Cycle once and for all on the dependencies and skip
# the ones that are already installed. This ensures that
# for the same spec, the same number of entries will be
# displayed in the XML report
for x in spec.traverse(order='post'):
package = spack.repo.get(x)
if package.installed:
test_case = TestCase(package.name, x.short_spec)
test_case.set_duration(0.0)
test_case.set_result(
TestResult.SKIPPED,
message='Skipped [already installed]',
error_type='already_installed'
)
test_suite.append(test_case)
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, ** kwargs):
# Check if the package has been installed already
if self.installed:
return
test_case = TestCase(self.name, self.spec.short_spec)
# Try to install the package
try:
# If already installed set the spec as skipped
start_time = time.time()
# PackageBase.do_install
func(self, *args, **kwargs)
duration = time.time() - start_time
test_case.set_duration(duration)
test_case.set_result(TestResult.PASSED)
except InstallError:
# Check if the package relies on dependencies that
# did not install
duration = time.time() - start_time
test_case.set_duration(duration)
if [x for x in self.spec.dependencies(('link', 'run')) if not spack.repo.get(x).installed]: # NOQA: ignore=E501
test_case.set_duration(0.0)
test_case.set_result(
TestResult.SKIPPED,
message='Skipped [failed dependencies]',
error_type='dep_failed'
)
else:
# An InstallError is considered a failure (the recipe
# didn't work correctly)
text = fetch_text(self.build_log_path)
test_case.set_result(
TestResult.FAILED,
message='Installation failure',
text=text
)
except FetchError:
# A FetchError is considered an error as
# we didn't even start building
duration = time.time() - start_time
test_case.set_duration(duration)
text = fetch_text(self.build_log_path)
test_case.set_result(
TestResult.FAILED,
message='Unable to fetch package',
text=text
)
except Exception:
# Anything else is also an error
duration = time.time() - start_time
test_case.set_duration(duration)
text = fetch_text(self.build_log_path)
test_case.set_result(
TestResult.FAILED,
message='Unexpected exception thrown during install',
text=text
)
except BaseException:
# Anything else is also an error
duration = time.time() - start_time
test_case.set_duration(duration)
text = fetch_text(self.build_log_path)
test_case.set_result(
TestResult.FAILED,
message='Unknown error',
text=text
)
# Try to get the log
test_suite.append(test_case)
return wrapper
return decorator
def default_log_file(spec):
"""Computes the default filename for the log file and creates
the corresponding directory if not present
......@@ -323,42 +139,19 @@ def default_log_file(spec):
def install_spec(cli_args, kwargs, spec):
saved_do_install = PackageBase.do_install
decorator = lambda fn: fn
# Check if we were asked to produce some log for dashboards
if cli_args.log_format is not None:
# Compute the filename for logging
log_filename = cli_args.log_file
if not log_filename:
log_filename = default_log_file(spec)
# Create the test suite in which to log results
test_suite = TestSuite(spec)
# Temporarily decorate PackageBase.do_install to monitor
# recursive calls.
decorator = junit_output(spec, test_suite)
# Do the actual installation
try:
# decorate the install if necessary
PackageBase.do_install = decorator(PackageBase.do_install)
if cli_args.things_to_install == 'dependencies':
# Install dependencies as-if they were installed
# for root (explicit=False in the DB)
kwargs['explicit'] = False
for s in spec.dependencies():
p = spack.repo.get(s)
p.do_install(**kwargs)
s.package.do_install(**kwargs)
else:
package = spack.repo.get(spec)
kwargs['explicit'] = True
package.do_install(**kwargs)
spec.package.do_install(**kwargs)
except InstallError as e:
except spack.build_environment.InstallError as e:
if cli_args.show_log_on_error:
e.print_context()
if not os.path.exists(e.pkg.build_log_path):
......@@ -369,13 +162,6 @@ def install_spec(cli_args, kwargs, spec):
shutil.copyfileobj(log, sys.stderr)
raise
finally:
PackageBase.do_install = saved_do_install
# Dump test output if asked to
if cli_args.log_format is not None:
test_suite.dump(log_filename)
def install(parser, args, **kwargs):
if not args.package and not args.specfiles:
......@@ -386,7 +172,7 @@ def install(parser, args, **kwargs):
tty.die("The -j option must be a positive integer!")
if args.no_checksum:
spack.do_checksum = False # TODO: remove this global.
spack.do_checksum = False # TODO: remove this global.
# Parse cli arguments and construct a dictionary
# that will be passed to Package.do_install API
......@@ -466,5 +252,7 @@ def install(parser, args, **kwargs):
else:
for spec in specs:
install_spec(args, kwargs, spec)
filename = args.log_file or default_log_file(specs[0])
with spack.report.collect_info(specs, args.log_format, filename):
for spec in specs:
install_spec(args, kwargs, spec)
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
"""Tools to produce reports of spec installations"""
import collections
import functools
import itertools
import os.path
import time
import traceback
import llnl.util.lang
import spack.build_environment
import spack.fetch_strategy
import spack.package
templates = {
'junit': os.path.join('reports', 'junit.xml')
}
#: Allowed report formats
valid_formats = list(templates.keys())
__all__ = [
'valid_formats',
'collect_info'
]
def fetch_package_log(pkg):
try:
with open(pkg.build_log_path, 'r') as f:
return ''.join(f.readlines())
except Exception:
return 'Cannot open build log for {0}'.format(
pkg.spec.cshort_spec
)
class InfoCollector(object):
"""Decorates PackageBase.do_install to collect information
on the installation of certain specs.
When exiting the context this change will be rolled-back.
The data collected is available through the ``test_suites``
attribute once exited, and it's organized as a list where
each item represents the installation of one of the spec.
Args:
specs (list of Spec): specs whose install information will
be recorded
"""
#: Backup of PackageBase.do_install
_backup_do_install = spack.package.PackageBase.do_install
def __init__(self, specs):
#: Specs that will be installed
self.specs = specs
#: Context that will be used to stamp the report from
#: the template file
self.test_suites = []
def __enter__(self):
# Initialize the test suites with the data that
# is available upfront
for spec in self.specs:
name_fmt = '{0}_{1}'
name = name_fmt.format(spec.name, spec.dag_hash(length=7))
suite = {
'name': name,
'nerrors': None,
'nfailures': None,
'ntests': None,
'time': None,
'timestamp': time.strftime(
"%a, %d %b %Y %H:%M:%S", time.gmtime()
),
'properties': [],
'testcases': []
}
self.test_suites.append(suite)
Property = collections.namedtuple('Property', ['name', 'value'])
suite['properties'].append(
Property('architecture', spec.architecture)
)
suite['properties'].append(Property('compiler', spec.compiler))
# Check which specs are already installed and mark them as skipped
for dep in filter(lambda x: x.package.installed, spec.traverse()):
test_case = {
'name': dep.name,
'id': dep.dag_hash(),
'elapsed_time': '0.0',
'result': 'skipped',
'message': 'Spec already installed'
}
suite['testcases'].append(test_case)
def gather_info(do_install):
"""Decorates do_install to gather useful information for
a CI report.
It's defined here to capture the environment and build
this context as the installations proceed.
"""
@functools.wraps(do_install)
def wrapper(pkg, *args, **kwargs):
# We accounted before for what is already installed
installed_on_entry = pkg.installed
test_case = {
'name': pkg.name,
'id': pkg.spec.dag_hash(),
'elapsed_time': None,
'result': None,
'message': None
}
start_time = time.time()
value = None
try:
value = do_install(pkg, *args, **kwargs)
test_case['result'] = 'success'
if installed_on_entry:
return
except spack.build_environment.InstallError as e:
# An InstallError is considered a failure (the recipe
# didn't work correctly)
test_case['result'] = 'failure'
test_case['stdout'] = fetch_package_log(pkg)
test_case['message'] = e.message or 'Installation failure'
test_case['exception'] = e.traceback
except (Exception, BaseException) as e:
# Everything else is an error (the installation
# failed outside of the child process)
test_case['result'] = 'error'
test_case['stdout'] = fetch_package_log(pkg)
test_case['message'] = str(e) or 'Unknown error'
test_case['exception'] = traceback.format_exc()
finally:
test_case['elapsed_time'] = time.time() - start_time
# Append the case to the correct test suites. In some
# cases it may happen that a spec that is asked to be
# installed explicitly will also be installed as a
# dependency of another spec. In this case append to both
# test suites.
for s in llnl.util.lang.dedupe([pkg.spec.root, pkg.spec]):
name = name_fmt.format(s.name, s.dag_hash(length=7))
try:
item = next((
x for x in self.test_suites
if x['name'] == name
))
item['testcases'].append(test_case)
except StopIteration:
pass
return value
return wrapper
spack.package.PackageBase.do_install = gather_info(
spack.package.PackageBase.do_install
)
def __exit__(self, exc_type, exc_val, exc_tb):
# Restore the original method in PackageBase
spack.package.PackageBase.do_install = InfoCollector._backup_do_install
for suite in self.test_suites:
suite['ntests'] = len(suite['testcases'])
suite['nfailures'] = len(
[x for x in suite['testcases'] if x['result'] == 'failure']
)
suite['nerrors'] = len(
[x for x in suite['testcases'] if x['result'] == 'error']
)
suite['time'] = sum([
float(x['elapsed_time']) for x in suite['testcases']
])
class collect_info(object):
"""Collects information to build a report while installing
and dumps it on exit.
If the format name is not ``None``, this context manager
decorates PackageBase.do_install when entering the context
and unrolls the change when exiting.
Within the context, only the specs that are passed to it
on initialization will be recorded for the report. Data from
other specs will be discarded.
Examples:
.. code-block:: python
# The file 'junit.xml' is written when exiting
# the context
specs = [Spec('hdf5').concretized()]
with collect_info(specs, 'junit', 'junit.xml'):
# A report will be generated for these specs...
for spec in specs:
spec.do_install()
# ...but not for this one
Spec('zlib').concretized().do_install()
Args:
specs (list of Spec): specs to be installed
format_name (str or None): one of the supported formats
filename (str or None): name of the file where the report wil
be eventually written
Raises:
ValueError: when ``format_name`` is not in ``valid_formats``
"""
def __init__(self, specs, format_name, filename):
self.specs = specs
self.format_name = format_name
# Check that the format is valid
if format_name not in itertools.chain(valid_formats, [None]):
raise ValueError('invalid report type: {0}'.format(format_name))
self.filename = filename
self.collector = InfoCollector(specs) if self.format_name else None
def __enter__(self):
if self.format_name:
# Start the collector and patch PackageBase.do_install
self.collector.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if self.format_name:
# Close the collector and restore the
# original PackageBase.do_install
self.collector.__exit__(exc_type, exc_val, exc_tb)
# Write the report
with open(self.filename, 'w') as f:
env = spack.tengine.make_environment()
t = env.get_template(templates[self.format_name])
f.write(t.render({'test_suites': self.collector.test_suites}))
......@@ -32,6 +32,7 @@
import spack
import spack.cmd.install
import spack.package
from spack.spec import Spec
from spack.main import SpackCommand, SpackCommandError
......@@ -275,6 +276,79 @@ def test_install_from_file(spec, concretize, error_code, tmpdir):
assert install.returncode == error_code
@pytest.mark.disable_clean_stage_check
@pytest.mark.usefixtures(
'builtin_mock', 'mock_archive', 'mock_fetch', 'config', 'install_mockery'
)
@pytest.mark.parametrize('exc_typename,msg', [
('RuntimeError', 'something weird happened'),
('ValueError', 'spec is not concrete')
])
def test_junit_output_with_failures(tmpdir, exc_typename, msg):
with tmpdir.as_cwd():
install(
'--log-format=junit', '--log-file=test.xml',
'raiser',
'exc_type={0}'.format(exc_typename),
'msg="{0}"'.format(msg)
)
files = tmpdir.listdir()
filename = tmpdir.join('test.xml')
assert filename in files
content = filename.open().read()
# Count failures and errors correctly
assert 'tests="1"' in content
assert 'failures="1"' in content
assert 'errors="0"' in content
# We want to have both stdout and stderr
assert '<system-out>' in content
assert msg in content
@pytest.mark.disable_clean_stage_check
@pytest.mark.usefixtures(
'builtin_mock', 'mock_archive', 'mock_fetch', 'config', 'install_mockery'
)
@pytest.mark.parametrize('exc_typename,msg', [
('RuntimeError', 'something weird happened'),
('KeyboardInterrupt', 'Ctrl-C strikes again')
])
def test_junit_output_with_errors(tmpdir, monkeypatch, exc_typename, msg):
def just_throw(*args, **kwargs):
from six.moves import builtins
exc_type = getattr(builtins, exc_typename)
raise exc_type(msg)
monkeypatch.setattr(spack.package.PackageBase, 'do_install', just_throw)
with tmpdir.as_cwd():
install(
'--log-format=junit', '--log-file=test.xml',
'libdwarf',
fail_on_error=False
)
files = tmpdir.listdir()
filename = tmpdir.join('test.xml')
assert filename in files
content = filename.open().read()
# Count failures and errors correctly
assert 'tests="1"' in content
assert 'failures="0"' in content
assert 'errors="1"' in content
# We want to have both stdout and stderr
assert '<system-out>' in content
assert msg in content
@pytest.mark.usefixtures('noop_install', 'config')
@pytest.mark.parametrize('clispecs,filespecs', [
[[], ['mpi']],
......
<?xml version="1.0" encoding="UTF-8"?>
<!--
This file has been modeled after the basic
specifications at this url:
http://help.catchsoftware.com/display/ET/JUnit+Format
-->
<testsuites>
{% for suite in test_suites %}
<testsuite name="{{ suite.name }}"
errors="{{ suite.nerrors }}"
tests="{{ suite.ntests }}"
failures="{{ suite.nfailures }}"
time="{{ suite.time }}"
timestamp="{{ suite.timestamp }}" >
<properties>
{% for property in suite.properties %}
<property name="{{ property.name }}" value="{{ property.value }}" />
{% endfor %}
</properties>
{% for test in suite.testcases %}
<testcase classname="{{ test.name }}"
name="{{ test.id }}"
time="{{ test.elapsed_time }}">
{% if test.result == 'failure' %}
<failure message="{{ test.message }}">
{{ test.exception }}
</failure>
{% elif test.result == 'error' %}
<error message="{{ test.message }}">
{{ test.exception }}
</error>
{% elif test.result == 'skipped' %}
<skipped />
{% endif %}
{% if test.stdout %}
<system-out>
{{ test.stdout }}
</system-out>
{% endif %}
{% if test.stderr %}
<system-err>
{{ test.stderr }}
</system-err>
{% endif %}
</testcase>
{% endfor %}
{# Add an error tag? #}
</testsuite>
{% endfor %}
</testsuites>
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from six.moves import builtins
from spack import *
class Raiser(Package):
"""A package that can raise a built-in exception
of any kind with any message
"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version('1.0', '0123456789abcdef0123456789abcdef')
version('2.0', '2.0_a_hash')
variant(
'exc_type',
values=lambda x: isinstance(x, str),
default='RuntimeError',
description='type of the exception to be raised',
multi=False
)
variant(
'msg',
values=lambda x: isinstance(x, str),
default='Unknown Exception',
description='message that will be tied to the exception',
multi=False
)
def install(self, spec, prefix):
print('Raiser will raise ')
exc_typename = self.spec.variants['exc_type'].value
exc_type = getattr(builtins, exc_typename)
msg = self.spec.variants['msg'].value
raise exc_type(msg)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment