diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py
index aab7c0abc747780a5564c15468da6912592cb769..417e07e9c45da4eff49c7f871ee5ae680c3792c4 100644
--- a/lib/spack/spack/cmd/install.py
+++ b/lib/spack/spack/cmd/install.py
@@ -23,11 +23,20 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
 import argparse
+import codecs
+import functools
+import os
+import time
+import xml.dom.minidom
+import xml.etree.ElementTree as ET
 
+import llnl.util.filesystem as fs
 import llnl.util.tty as tty
-
 import spack
 import spack.cmd
+from spack.build_environment import InstallError
+from spack.fetch_strategy import FetchError
+from spack.package import PackageBase
 
 description = "Build and install packages"
 
@@ -71,7 +80,207 @@ def setup_parser(subparser):
     )
     subparser.add_argument(
         '--run-tests', action='store_true', dest='run_tests',
-        help="Run tests during installation of a package.")
+        help="Run package level tests during installation."
+    )
+    subparser.add_argument(
+        '--log-format',
+        default=None,
+        choices=['junit'],
+        help="Format to be used for log files."
+    )
+    subparser.add_argument(
+        '--log-file',
+        default=None,
+        help="Filename for the log file. If not passed a default will be used."
+    )
+
+
+# Needed for test cases
+class TestResult(object):
+    PASSED = 0
+    FAILED = 1
+    SKIPPED = 2
+    ERRORED = 3
+
+
+class TestSuite(object):
+    def __init__(self):
+        self.root = ET.Element('testsuite')
+        self.tests = []
+
+    def append(self, item):
+        if not isinstance(item, TestCase):
+            raise TypeError(
+                'only TestCase instances may be appended to TestSuite'
+            )
+        self.tests.append(item)  # Append the item to the list of tests
+
+    def dump(self, filename):
+        # Prepare the header for the entire test suite
+        number_of_errors = sum(
+            x.result_type == TestResult.ERRORED for x in self.tests
+        )
+        self.root.set('errors', str(number_of_errors))
+        number_of_failures = sum(
+            x.result_type == TestResult.FAILED for x in self.tests
+        )
+        self.root.set('failures', str(number_of_failures))
+        self.root.set('tests', str(len(self.tests)))
+
+        for item in self.tests:
+            self.root.append(item.element)
+
+        with codecs.open(filename, 'wb', 'utf-8') as file:
+            xml_string = ET.tostring(self.root)
+            xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml()
+            file.write(xml_string)
+
+
+class TestCase(object):
+
+    results = {
+        TestResult.PASSED: None,
+        TestResult.SKIPPED: 'skipped',
+        TestResult.FAILED: 'failure',
+        TestResult.ERRORED: 'error',
+    }
+
+    def __init__(self, classname, name):
+        self.element = ET.Element('testcase')
+        self.element.set('classname', str(classname))
+        self.element.set('name', str(name))
+        self.result_type = None
+
+    def set_duration(self, duration):
+        self.element.set('time', str(duration))
+
+    def set_result(self, result_type,
+                   message=None, error_type=None, text=None):
+        self.result_type = result_type
+        result = TestCase.results[self.result_type]
+        if result is not None and result is not TestResult.PASSED:
+            subelement = ET.SubElement(self.element, result)
+            if error_type is not None:
+                subelement.set('type', error_type)
+            if message is not None:
+                subelement.set('message', str(message))
+            if text is not None:
+                subelement.text = text
+
+
+def fetch_text(path):
+    if not os.path.exists(path):
+        return ''
+
+    with codecs.open(path, 'rb', 'utf-8') as f:
+        return '\n'.join(
+            list(line.strip() for line in f.readlines())
+        )
+
+
+def junit_output(spec, test_suite):
+    # Cycle once and for all on the dependencies and skip
+    # the ones that are already installed. This ensures that
+    # for the same spec, the same number of entries will be
+    # displayed in the XML report
+    for x in spec.traverse(order='post'):
+        package = spack.repo.get(x)
+        if package.installed:
+            test_case = TestCase(package.name, x.short_spec)
+            test_case.set_duration(0.0)
+            test_case.set_result(
+                TestResult.SKIPPED,
+                message='Skipped [already installed]',
+                error_type='already_installed'
+            )
+            test_suite.append(test_case)
+
+    def decorator(func):
+        @functools.wraps(func)
+        def wrapper(self, *args, ** kwargs):
+
+            # Check if the package has been installed already
+            if self.installed:
+                return
+
+            test_case = TestCase(self.name, self.spec.short_spec)
+            # Try to install the package
+            try:
+                # If already installed set the spec as skipped
+                start_time = time.time()
+                # PackageBase.do_install
+                func(self, *args, **kwargs)
+                duration = time.time() - start_time
+                test_case.set_duration(duration)
+                test_case.set_result(TestResult.PASSED)
+            except InstallError:
+                # Check if the package relies on dependencies that
+                # did not install
+                duration = time.time() - start_time
+                test_case.set_duration(duration)
+                if [x for x in self.spec.dependencies(('link', 'run')) if not spack.repo.get(x).installed]:  # NOQA: ignore=E501
+                    test_case.set_duration(0.0)
+                    test_case.set_result(
+                        TestResult.SKIPPED,
+                        message='Skipped [failed dependencies]',
+                        error_type='dep_failed'
+                    )
+                else:
+                    # An InstallError is considered a failure (the recipe
+                    # didn't work correctly)
+                    text = fetch_text(self.build_log_path)
+                    test_case.set_result(
+                        TestResult.FAILED,
+                        message='Installation failure',
+                        text=text
+                    )
+            except FetchError:
+                # A FetchError is considered an error as
+                # we didn't even start building
+                duration = time.time() - start_time
+                test_case.set_duration(duration)
+                text = fetch_text(self.build_log_path)
+                test_case.set_result(
+                    TestResult.ERRORED,
+                    message='Unable to fetch package',
+                    text=text
+                )
+            except Exception:
+                # Anything else is also an error
+                duration = time.time() - start_time
+                test_case.set_duration(duration)
+                text = fetch_text(self.build_log_path)
+                test_case.set_result(
+                    TestResult.ERRORED,
+                    message='Unexpected exception thrown during install',
+                    text=text
+                )
+            except:
+                # Anything else is also an error
+                duration = time.time() - start_time
+                test_case.set_duration(duration)
+                text = fetch_text(self.build_log_path)
+                test_case.set_result(
+                    TestResult.ERRORED,
+                    message='Unknown error',
+                    text=text
+                )
+
+            # Try to get the log
+            test_suite.append(test_case)
+        return wrapper
+    return decorator
+
+
+def default_log_file(spec):
+    """Computes the default filename for the log file and creates
+    the corresponding directory if not present
+    """
+    fmt = 'test-{x.name}-{x.version}-{hash}.xml'
+    basename = fmt.format(x=spec, hash=spec.dag_hash())
+    dirname = fs.join_path(spack.var_path, 'junit-report')
+    fs.mkdirp(dirname)
+    return fs.join_path(dirname, basename)
 
 
 def install(parser, args, **kwargs):
@@ -104,6 +313,20 @@ def install(parser, args, **kwargs):
         tty.error('only one spec can be installed at a time.')
     spec = specs.pop()
 
+    # Check if we were asked to produce some log for dashboards
+    if args.log_format is not None:
+        # Compute the filename for logging
+        log_filename = args.log_file
+        if not log_filename:
+            log_filename = default_log_file(spec)
+        # Create the test suite in which to log results
+        test_suite = TestSuite()
+        # Decorate PackageBase.do_install to get installation status
+        PackageBase.do_install = junit_output(
+            spec, test_suite
+        )(PackageBase.do_install)
+
+    # Do the actual installation
     if args.things_to_install == 'dependencies':
         # Install dependencies as-if they were installed
         # for root (explicit=False in the DB)
@@ -115,3 +338,7 @@ def install(parser, args, **kwargs):
         package = spack.repo.get(spec)
         kwargs['explicit'] = True
         package.do_install(**kwargs)
+
+    # Dump log file if asked to
+    if args.log_format is not None:
+        test_suite.dump(log_filename)
diff --git a/lib/spack/spack/cmd/test_install.py b/lib/spack/spack/cmd/test_install.py
deleted file mode 100644
index f962c5988a9da9bf5eee0adca20fd0490566910c..0000000000000000000000000000000000000000
--- a/lib/spack/spack/cmd/test_install.py
+++ /dev/null
@@ -1,245 +0,0 @@
-##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License (as
-# published by the Free Software Foundation) version 2.1, February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import argparse
-import codecs
-import os
-import time
-import xml.dom.minidom
-import xml.etree.ElementTree as ET
-
-import llnl.util.tty as tty
-import spack
-import spack.cmd
-from llnl.util.filesystem import *
-from spack.build_environment import InstallError
-from spack.fetch_strategy import FetchError
-
-description = "Run package install as a unit test, output formatted results."
-
-
-def setup_parser(subparser):
-    subparser.add_argument(
-        '-j', '--jobs', action='store', type=int,
-        help="Explicitly set number of make jobs.  Default is #cpus.")
-
-    subparser.add_argument(
-        '-n', '--no-checksum', action='store_true', dest='no_checksum',
-        help="Do not check packages against checksum")
-
-    subparser.add_argument(
-        '-o', '--output', action='store',
-        help="test output goes in this file")
-
-    subparser.add_argument(
-        'package', nargs=argparse.REMAINDER,
-        help="spec of package to install")
-
-
-class TestResult(object):
-    PASSED = 0
-    FAILED = 1
-    SKIPPED = 2
-    ERRORED = 3
-
-
-class TestSuite(object):
-
-    def __init__(self, filename):
-        self.filename = filename
-        self.root = ET.Element('testsuite')
-        self.tests = []
-
-    def __enter__(self):
-        return self
-
-    def append(self, item):
-        if not isinstance(item, TestCase):
-            raise TypeError(
-                'only TestCase instances may be appended to TestSuite')
-        self.tests.append(item)  # Append the item to the list of tests
-
-    def __exit__(self, exc_type, exc_val, exc_tb):
-        # Prepare the header for the entire test suite
-        number_of_errors = sum(
-            x.result_type == TestResult.ERRORED for x in self.tests)
-        self.root.set('errors', str(number_of_errors))
-        number_of_failures = sum(
-            x.result_type == TestResult.FAILED for x in self.tests)
-        self.root.set('failures', str(number_of_failures))
-        self.root.set('tests', str(len(self.tests)))
-
-        for item in self.tests:
-            self.root.append(item.element)
-
-        with open(self.filename, 'wb') as file:
-            xml_string = ET.tostring(self.root)
-            xml_string = xml.dom.minidom.parseString(xml_string).toprettyxml()
-            file.write(xml_string)
-
-
-class TestCase(object):
-
-    results = {
-        TestResult.PASSED: None,
-        TestResult.SKIPPED: 'skipped',
-        TestResult.FAILED: 'failure',
-        TestResult.ERRORED: 'error',
-    }
-
-    def __init__(self, classname, name, time=None):
-        self.element = ET.Element('testcase')
-        self.element.set('classname', str(classname))
-        self.element.set('name', str(name))
-        if time is not None:
-            self.element.set('time', str(time))
-        self.result_type = None
-
-    def set_result(self, result_type,
-                   message=None, error_type=None, text=None):
-        self.result_type = result_type
-        result = TestCase.results[self.result_type]
-        if result is not None and result is not TestResult.PASSED:
-            subelement = ET.SubElement(self.element, result)
-            if error_type is not None:
-                subelement.set('type', error_type)
-            if message is not None:
-                subelement.set('message', str(message))
-            if text is not None:
-                subelement.text = text
-
-
-def fetch_log(path):
-    if not os.path.exists(path):
-        return list()
-    with codecs.open(path, 'rb', 'utf-8') as F:
-        return list(line.strip() for line in F.readlines())
-
-
-def failed_dependencies(spec):
-    def get_deps(deptype):
-        return set(item for item in spec.dependencies(deptype)
-                   if not spack.repo.get(item).installed)
-    link_deps = get_deps('link')
-    run_deps = get_deps('run')
-    return link_deps.union(run_deps)
-
-
-def get_top_spec_or_die(args):
-    specs = spack.cmd.parse_specs(args.package, concretize=True)
-    if len(specs) > 1:
-        tty.die("Only 1 top-level package can be specified")
-    top_spec = iter(specs).next()
-    return top_spec
-
-
-def install_single_spec(spec, number_of_jobs):
-    package = spack.repo.get(spec)
-
-    # If it is already installed, skip the test
-    if spack.repo.get(spec).installed:
-        testcase = TestCase(package.name, package.spec.short_spec, time=0.0)
-        testcase.set_result(
-            TestResult.SKIPPED,
-            message='Skipped [already installed]',
-            error_type='already_installed')
-        return testcase
-
-    # If it relies on dependencies that did not install, skip
-    if failed_dependencies(spec):
-        testcase = TestCase(package.name, package.spec.short_spec, time=0.0)
-        testcase.set_result(
-            TestResult.SKIPPED,
-            message='Skipped [failed dependencies]',
-            error_type='dep_failed')
-        return testcase
-
-    # Otherwise try to install the spec
-    try:
-        start_time = time.time()
-        package.do_install(keep_prefix=False,
-                           keep_stage=True,
-                           install_deps=True,
-                           make_jobs=number_of_jobs,
-                           verbose=True,
-                           fake=False)
-        duration = time.time() - start_time
-        testcase = TestCase(package.name, package.spec.short_spec, duration)
-        testcase.set_result(TestResult.PASSED)
-    except InstallError:
-        # An InstallError is considered a failure (the recipe didn't work
-        # correctly)
-        duration = time.time() - start_time
-        # Try to get the log
-        lines = fetch_log(package.build_log_path)
-        text = '\n'.join(lines)
-        testcase = TestCase(package.name, package.spec.short_spec, duration)
-        testcase.set_result(TestResult.FAILED,
-                            message='Installation failure', text=text)
-
-    except FetchError:
-        # A FetchError is considered an error (we didn't even start building)
-        duration = time.time() - start_time
-        testcase = TestCase(package.name, package.spec.short_spec, duration)
-        testcase.set_result(TestResult.ERRORED,
-                            message='Unable to fetch package')
-
-    return testcase
-
-
-def get_filename(args, top_spec):
-    if not args.output:
-        fname = 'test-{x.name}-{x.version}-{hash}.xml'.format(
-            x=top_spec, hash=top_spec.dag_hash())
-        output_directory = join_path(os.getcwd(), 'test-output')
-        if not os.path.exists(output_directory):
-            os.mkdir(output_directory)
-        output_filename = join_path(output_directory, fname)
-    else:
-        output_filename = args.output
-    return output_filename
-
-
-def test_install(parser, args):
-    # Check the input
-    if not args.package:
-        tty.die("install requires a package argument")
-
-    if args.jobs is not None:
-        if args.jobs <= 0:
-            tty.die("The -j option must be a positive integer!")
-
-    if args.no_checksum:
-        spack.do_checksum = False  # TODO: remove this global.
-
-    # Get the one and only top spec
-    top_spec = get_top_spec_or_die(args)
-    # Get the filename of the test
-    output_filename = get_filename(args, top_spec)
-    # TEST SUITE
-    with TestSuite(output_filename) as test_suite:
-        # Traverse in post order : each spec is a test case
-        for spec in top_spec.traverse(order='post'):
-            test_case = install_single_spec(spec, args.jobs)
-            test_suite.append(test_case)
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 52dbd40f6f54f4f12254198e26971c16ad65322c..3a3028885f316fd3a40adddaf7430fffc8b7dd2d 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -1180,7 +1180,9 @@ def do_install(self,
                     verbose=verbose,
                     make_jobs=make_jobs,
                     run_tests=run_tests,
-                    dirty=dirty)
+                    dirty=dirty,
+                    **kwargs
+                )
 
         # Set run_tests flag before starting build.
         self.run_tests = run_tests
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index f6847d29292699ab897df840bfdfb80a47f0d469..457e5db9dc9bbfbbab045379b89e321729037de6 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -41,7 +41,7 @@
     'cc',
     'cmd.find',
     'cmd.module',
-    'cmd.test_install',
+    'cmd.install',
     'cmd.uninstall',
     'concretize',
     'concretize_preferences',
diff --git a/lib/spack/spack/test/cmd/test_install.py b/lib/spack/spack/test/cmd/install.py
similarity index 81%
rename from lib/spack/spack/test/cmd/test_install.py
rename to lib/spack/spack/test/cmd/install.py
index 4734fe1267d3e184e3436453dcf186758428de71..591bf02340a5e47afc0efad2897af7359e95357d 100644
--- a/lib/spack/spack/test/cmd/test_install.py
+++ b/lib/spack/spack/test/cmd/install.py
@@ -23,21 +23,23 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
 import StringIO
+import argparse
+import codecs
 import collections
-import os
-import unittest
 import contextlib
+import unittest
 
+import llnl.util.filesystem
 import spack
 import spack.cmd
-from spack.cmd import test_install
+import spack.cmd.install as install
 
 FILE_REGISTRY = collections.defaultdict(StringIO.StringIO)
 
 
 # Monkey-patch open to write module files to a StringIO instance
 @contextlib.contextmanager
-def mock_open(filename, mode):
+def mock_open(filename, mode, *args):
     if not mode == 'wb':
         message = 'test.test_install : unexpected opening mode for mock_open'
         raise RuntimeError(message)
@@ -103,6 +105,8 @@ def __init__(self, spec, buildLogPath):
         self.build_log_path = buildLogPath
 
     def do_install(self, *args, **kwargs):
+        for x in self.spec.dependencies():
+            x.package.do_install(*args, **kwargs)
         self.installed = True
 
 
@@ -120,36 +124,28 @@ def get(self, spec):
 def mock_fetch_log(path):
     return []
 
-specX = MockSpec('X', "1.2.0")
-specY = MockSpec('Y', "2.3.8")
+specX = MockSpec('X', '1.2.0')
+specY = MockSpec('Y', '2.3.8')
 specX._dependencies['Y'] = spack.DependencySpec(specY, spack.alldeps)
 pkgX = MockPackage(specX, 'logX')
 pkgY = MockPackage(specY, 'logY')
-
-
-class MockArgs(object):
-
-    def __init__(self, package):
-        self.package = package
-        self.jobs = None
-        self.no_checksum = False
-        self.output = None
+specX.package = pkgX
+specY.package = pkgY
 
 
 # TODO: add test(s) where Y fails to install
-class TestInstallTest(unittest.TestCase):
-    """
-    Tests test-install where X->Y
-    """
+class InstallTestJunitLog(unittest.TestCase):
+    """Tests test-install where X->Y"""
 
     def setUp(self):
-        super(TestInstallTest, self).setUp()
-
+        super(InstallTestJunitLog, self).setUp()
+        install.PackageBase = MockPackage
         # Monkey patch parse specs
+
         def monkey_parse_specs(x, concretize):
-            if x == 'X':
+            if x == ['X']:
                 return [specX]
-            elif x == 'Y':
+            elif x == ['Y']:
                 return [specY]
             return []
 
@@ -157,11 +153,12 @@ def monkey_parse_specs(x, concretize):
         spack.cmd.parse_specs = monkey_parse_specs
 
         # Monkey patch os.mkdirp
-        self.os_mkdir = os.mkdir
-        os.mkdir = lambda x: True
+        self.mkdirp = llnl.util.filesystem.mkdirp
+        llnl.util.filesystem.mkdirp = lambda x: True
 
         # Monkey patch open
-        test_install.open = mock_open
+        self.codecs_open = codecs.open
+        codecs.open = mock_open
 
         # Clean FILE_REGISTRY
         FILE_REGISTRY.clear()
@@ -176,21 +173,24 @@ def monkey_parse_specs(x, concretize):
 
     def tearDown(self):
         # Remove the monkey patched test_install.open
-        test_install.open = open
+        codecs.open = self.codecs_open
 
         # Remove the monkey patched os.mkdir
-        os.mkdir = self.os_mkdir
-        del self.os_mkdir
+        llnl.util.filesystem.mkdirp = self.mkdirp
+        del self.mkdirp
 
         # Remove the monkey patched parse_specs
         spack.cmd.parse_specs = self.parse_specs
         del self.parse_specs
-        super(TestInstallTest, self).tearDown()
+        super(InstallTestJunitLog, self).tearDown()
 
         spack.repo = self.saved_db
 
     def test_installing_both(self):
-        test_install.test_install(None, MockArgs('X'))
+        parser = argparse.ArgumentParser()
+        install.setup_parser(parser)
+        args = parser.parse_args(['--log-format=junit', 'X'])
+        install.install(parser, args)
         self.assertEqual(len(FILE_REGISTRY), 1)
         for _, content in FILE_REGISTRY.items():
             self.assertTrue('tests="2"' in content)
@@ -200,7 +200,10 @@ def test_installing_both(self):
     def test_dependency_already_installed(self):
         pkgX.installed = True
         pkgY.installed = True
-        test_install.test_install(None, MockArgs('X'))
+        parser = argparse.ArgumentParser()
+        install.setup_parser(parser)
+        args = parser.parse_args(['--log-format=junit', 'X'])
+        install.install(parser, args)
         self.assertEqual(len(FILE_REGISTRY), 1)
         for _, content in FILE_REGISTRY.items():
             self.assertTrue('tests="2"' in content)