From 7ea10e768ee1a7deab98ae538d916bbeeb0346b8 Mon Sep 17 00:00:00 2001
From: Massimiliano Culpo <massimiliano.culpo@googlemail.com>
Date: Thu, 29 Dec 2016 16:48:48 +0100
Subject: [PATCH] unit tests: replace nose with pytest  (#2502)

* Porting: substitute nose with ytest

This huge commit substitutes nose with pytest as a testing system. Things done here:

* deleted external/nose as it is no longer used
* moved mock resources in their own directory 'test/mock/'
* ported two tests (cmd/find, build_system) to pytest native syntax as an example
* build_environment, log: used monkeypatch instead of try/catch
* moved global mocking of fetch_cache to an auto-used fixture
* moved global mocking from test/__init__.py to conftest.py
* made `spack test` a wrapper around pytest
* run-unit-tests: avoid running python 2.6 tests under coverage to speed them up
* use `pytest --cov` instead of coverage run to cut down testing time

* mock/packages_test: moved mock yaml configuration to files instead of leaving it in the code as string literals

* concretize.py: ported tests to native pytest, reverted multiprocessing in pytest.ini as it was creating the wrong report for coveralls

* conftest.py, fixtures: added docstrings

* concretize_preferences.py: uses fixtures instead of subclassing MockPackagesTest

* directory_layout.py: uses fixtures instead of subclassing MockPackagesTest

* install.py: uses fixtures instead of subclassing MockPackagesTest

* optional_deps.py: uses fixtures instead of subclassing MockPackagesTest

optional_deps.py: uses fixtures instead of subclassing MockPackagesTest

* packages.py: uses fixtures instead of subclassing MockPackagesTest

* provider_index.py: uses fixtures instead of subclassing MockPackagesTest

* spec_yaml.py: uses fixtures instead of subclassing MockPackagesTest

* multimethod.py: uses fixtures instead of subclassing MockPackagesTest

* install.py: now uses mock_archive_url

* git_fetch.py: uses fixtures instead of subclassing MockPackagesTest

* hg_fetch.py: uses fixtures instead of subclassing MockPackagesTest

* svn_fetch.py, mirror.py: uses fixtures instead of subclassing MockPackagesTest
repo.py: deleted

* test_compiler_cmd.py: uses fixtures instead of subclassing MockPackagesTest

* cmd/module.py, cmd/uninstall.py: uses fixtures instead of subclassing MockDatabase

* database.py: uses fixtures instead of subclassing MockDatabase, removed mock/database

* pytest: uncluttering fixture implementations

* database: changing the scope to 'module'

* config.py: uses fixtures instead of subclassing MockPackagesTest

* spec_dag.py, spec_semantics.py: uses fixtures instead of subclassing MockPackagesTest

* stage.py: uses fixtures instead of subclassing MockPackagesTest. Removed mock directory

* pytest: added docstrings to all the fixtures

* pytest: final cleanup

* build_system_guess.py: fixed naming and docstrings as suggested by @scheibelp

* spec_syntax.py: added expected failure on parsing multiple specs closes #1976

* Add pytest and pytest-cov to Spack externals.

* Make `spack flake8` ignore externals.

* run-unit-tests runs spack test and not pytest.

* Remove all the special stuff for `spack test`

- Remove `conftest.py` magic and all the special case stuff in `bin/spack`

- Spack commands can optionally take unknown arguments, if they want to
  handle them.

- `spack test` is now a command like the others.

- `spack test` now just delegates its arguments to `pytest`, but it does
  it by receiving unknown arguments and NOT taking an explicit
  help argument.

* Fix error in fixtures.

* Improve `spack test` command a bit.

- Now supports an approximation of the old simple interface
- Also supports full pytest options if you want them.

* Use external coverage instead of pytest-cov

* Make coverage use parallel-mode.

* change __init__.py docs to include pytest
---
 .coveragerc                                   |    2 +
 .gitignore                                    |    2 +
 .travis.yml                                   |    8 +-
 bin/spack                                     |   58 +-
 lib/spack/docs/contribution_guide.rst         |    2 +-
 lib/spack/external/__init__.py                |    2 +-
 lib/spack/external/_pytest/AUTHORS            |  141 +
 lib/spack/external/_pytest/LICENSE            |   21 +
 lib/spack/external/_pytest/README.rst         |  102 +
 lib/spack/external/_pytest/__init__.py        |    2 +
 lib/spack/external/_pytest/_argcomplete.py    |  102 +
 lib/spack/external/_pytest/_code/__init__.py  |    9 +
 .../external/_pytest/_code/_py2traceback.py   |   81 +
 lib/spack/external/_pytest/_code/code.py      |  861 +++++++
 lib/spack/external/_pytest/_code/source.py    |  414 +++
 lib/spack/external/_pytest/_pluggy.py         |   11 +
 .../external/_pytest/assertion/__init__.py    |  164 ++
 .../external/_pytest/assertion/rewrite.py     |  945 +++++++
 lib/spack/external/_pytest/assertion/util.py  |  300 +++
 lib/spack/external/_pytest/cacheprovider.py   |  245 ++
 lib/spack/external/_pytest/capture.py         |  491 ++++
 lib/spack/external/_pytest/compat.py          |  230 ++
 lib/spack/external/_pytest/config.py          | 1340 ++++++++++
 lib/spack/external/_pytest/debugging.py       |  124 +
 lib/spack/external/_pytest/deprecated.py      |   24 +
 lib/spack/external/_pytest/doctest.py         |  331 +++
 lib/spack/external/_pytest/fixtures.py        | 1134 ++++++++
 lib/spack/external/_pytest/freeze_support.py  |   45 +
 lib/spack/external/_pytest/helpconfig.py      |  144 ++
 lib/spack/external/_pytest/hookspec.py        |  314 +++
 lib/spack/external/_pytest/junitxml.py        |  413 +++
 lib/spack/external/_pytest/main.py            |  762 ++++++
 lib/spack/external/_pytest/mark.py            |  328 +++
 lib/spack/external/_pytest/monkeypatch.py     |  258 ++
 lib/spack/external/_pytest/nose.py            |   71 +
 lib/spack/external/_pytest/pastebin.py        |   98 +
 lib/spack/external/_pytest/pytester.py        | 1139 +++++++++
 lib/spack/external/_pytest/python.py          | 1578 ++++++++++++
 lib/spack/external/_pytest/recwarn.py         |  226 ++
 lib/spack/external/_pytest/resultlog.py       |  107 +
 lib/spack/external/_pytest/runner.py          |  578 +++++
 lib/spack/external/_pytest/setuponly.py       |   72 +
 lib/spack/external/_pytest/setupplan.py       |   23 +
 lib/spack/external/_pytest/skipping.py        |  375 +++
 lib/spack/external/_pytest/terminal.py        |  593 +++++
 lib/spack/external/_pytest/tmpdir.py          |  124 +
 lib/spack/external/_pytest/unittest.py        |  217 ++
 .../_pytest/vendored_packages/README.md       |   13 +
 .../_pytest/vendored_packages/__init__.py     |    0
 .../pluggy-0.4.0.dist-info/DESCRIPTION.rst    |   11 +
 .../pluggy-0.4.0.dist-info/INSTALLER          |    1 +
 .../pluggy-0.4.0.dist-info/LICENSE.txt        |   22 +
 .../pluggy-0.4.0.dist-info/METADATA           |   40 +
 .../pluggy-0.4.0.dist-info/RECORD             |    9 +
 .../pluggy-0.4.0.dist-info/WHEEL              |    6 +
 .../pluggy-0.4.0.dist-info/metadata.json      |    1 +
 .../pluggy-0.4.0.dist-info/top_level.txt      |    1 +
 .../_pytest/vendored_packages/pluggy.py       |  802 ++++++
 lib/spack/external/nose/LICENSE               |  502 ----
 lib/spack/external/nose/__init__.py           |   15 -
 lib/spack/external/nose/__main__.py           |    8 -
 lib/spack/external/nose/case.py               |  397 ---
 lib/spack/external/nose/commands.py           |  172 --
 lib/spack/external/nose/config.py             |  661 -----
 lib/spack/external/nose/core.py               |  341 ---
 lib/spack/external/nose/exc.py                |    9 -
 lib/spack/external/nose/ext/__init__.py       |    3 -
 lib/spack/external/nose/ext/dtcompat.py       | 2272 -----------------
 lib/spack/external/nose/failure.py            |   42 -
 lib/spack/external/nose/importer.py           |  167 --
 lib/spack/external/nose/inspector.py          |  207 --
 lib/spack/external/nose/loader.py             |  623 -----
 lib/spack/external/nose/plugins/__init__.py   |  190 --
 lib/spack/external/nose/plugins/allmodules.py |   45 -
 lib/spack/external/nose/plugins/attrib.py     |  286 ---
 lib/spack/external/nose/plugins/base.py       |  725 ------
 lib/spack/external/nose/plugins/builtin.py    |   34 -
 lib/spack/external/nose/plugins/capture.py    |  115 -
 lib/spack/external/nose/plugins/collect.py    |   94 -
 lib/spack/external/nose/plugins/cover.py      |  271 --
 lib/spack/external/nose/plugins/debug.py      |   67 -
 lib/spack/external/nose/plugins/deprecated.py |   45 -
 lib/spack/external/nose/plugins/doctests.py   |  455 ----
 lib/spack/external/nose/plugins/errorclass.py |  210 --
 .../external/nose/plugins/failuredetail.py    |   49 -
 lib/spack/external/nose/plugins/isolate.py    |  103 -
 lib/spack/external/nose/plugins/logcapture.py |  245 --
 lib/spack/external/nose/plugins/manager.py    |  460 ----
 .../external/nose/plugins/multiprocess.py     |  835 ------
 lib/spack/external/nose/plugins/plugintest.py |  416 ---
 lib/spack/external/nose/plugins/prof.py       |  154 --
 lib/spack/external/nose/plugins/skip.py       |   63 -
 lib/spack/external/nose/plugins/testid.py     |  311 ---
 lib/spack/external/nose/plugins/xunit.py      |  341 ---
 lib/spack/external/nose/proxy.py              |  188 --
 lib/spack/external/nose/pyversion.py          |  215 --
 lib/spack/external/nose/result.py             |  200 --
 lib/spack/external/nose/selector.py           |  251 --
 lib/spack/external/nose/sphinx/__init__.py    |    1 -
 lib/spack/external/nose/sphinx/pluginopts.py  |  189 --
 lib/spack/external/nose/suite.py              |  609 -----
 lib/spack/external/nose/tools/__init__.py     |   15 -
 lib/spack/external/nose/tools/nontrivial.py   |  151 --
 lib/spack/external/nose/tools/trivial.py      |   54 -
 lib/spack/external/nose/twistedtools.py       |  173 --
 lib/spack/external/nose/usage.txt             |  115 -
 lib/spack/external/nose/util.py               |  668 -----
 lib/spack/external/pyqver2.py                 |    6 +-
 lib/spack/external/pytest.py                  |   28 +
 lib/spack/llnl/util/lang.py                   |   12 +
 lib/spack/llnl/util/tty/log.py                |    5 +-
 lib/spack/spack/__init__.py                   |    6 +
 lib/spack/spack/build_environment.py          |    9 +-
 lib/spack/spack/cmd/__init__.py               |   10 +
 lib/spack/spack/cmd/flake8.py                 |   60 +-
 lib/spack/spack/cmd/test.py                   |  123 +-
 lib/spack/spack/repository.py                 |    2 +-
 lib/spack/spack/test/__init__.py              |  129 -
 lib/spack/spack/test/architecture.py          |  223 +-
 lib/spack/spack/test/build_system_guess.py    |   87 +-
 lib/spack/spack/test/cmd/find.py              |   51 +-
 lib/spack/spack/test/cmd/module.py            |  111 +-
 lib/spack/spack/test/cmd/test_compiler_cmd.py |   90 +-
 lib/spack/spack/test/cmd/uninstall.py         |   47 +-
 lib/spack/spack/test/concretize.py            |  351 ++-
 .../spack/test/concretize_preferences.py      |  133 +-
 lib/spack/spack/test/config.py                |  261 +-
 lib/spack/spack/test/conftest.py              |  515 ++++
 lib/spack/spack/test/data/compilers.yaml      |  116 +
 lib/spack/spack/test/data/config.yaml         |   11 +
 lib/spack/spack/test/data/packages.yaml       |   14 +
 lib/spack/spack/test/database.py              |  547 ++--
 lib/spack/spack/test/directory_layout.py      |  316 ++-
 lib/spack/spack/test/git_fetch.py             |  138 +-
 lib/spack/spack/test/hg_fetch.py              |  124 +-
 lib/spack/spack/test/install.py               |  142 +-
 lib/spack/spack/test/mirror.py                |  230 +-
 lib/spack/spack/test/mock_database.py         |  108 -
 lib/spack/spack/test/mock_packages_test.py    |  281 --
 lib/spack/spack/test/mock_repo.py             |  202 --
 lib/spack/spack/test/modules.py               |  446 ++--
 lib/spack/spack/test/multimethod.py           |  134 +-
 lib/spack/spack/test/optional_deps.py         |  166 +-
 lib/spack/spack/test/packages.py              |  185 +-
 lib/spack/spack/test/provider_index.py        |   76 +-
 lib/spack/spack/test/spec_dag.py              |  267 +-
 lib/spack/spack/test/spec_semantics.py        |  526 ++--
 lib/spack/spack/test/spec_syntax.py           |  144 +-
 lib/spack/spack/test/spec_yaml.py             |  272 +-
 lib/spack/spack/test/stage.py                 |  605 ++---
 lib/spack/spack/test/svn_fetch.py             |  135 +-
 lib/spack/spack/test/tally_plugin.py          |   64 -
 lib/spack/spack/test/url_extrapolate.py       |    7 +-
 pytest.ini                                    |    5 +
 share/spack/qa/changed_files                  |   31 -
 share/spack/qa/run-unit-tests                 |    7 +-
 156 files changed, 19218 insertions(+), 17619 deletions(-)
 create mode 100644 lib/spack/external/_pytest/AUTHORS
 create mode 100644 lib/spack/external/_pytest/LICENSE
 create mode 100644 lib/spack/external/_pytest/README.rst
 create mode 100644 lib/spack/external/_pytest/__init__.py
 create mode 100644 lib/spack/external/_pytest/_argcomplete.py
 create mode 100644 lib/spack/external/_pytest/_code/__init__.py
 create mode 100644 lib/spack/external/_pytest/_code/_py2traceback.py
 create mode 100644 lib/spack/external/_pytest/_code/code.py
 create mode 100644 lib/spack/external/_pytest/_code/source.py
 create mode 100644 lib/spack/external/_pytest/_pluggy.py
 create mode 100644 lib/spack/external/_pytest/assertion/__init__.py
 create mode 100644 lib/spack/external/_pytest/assertion/rewrite.py
 create mode 100644 lib/spack/external/_pytest/assertion/util.py
 create mode 100644 lib/spack/external/_pytest/cacheprovider.py
 create mode 100644 lib/spack/external/_pytest/capture.py
 create mode 100644 lib/spack/external/_pytest/compat.py
 create mode 100644 lib/spack/external/_pytest/config.py
 create mode 100644 lib/spack/external/_pytest/debugging.py
 create mode 100644 lib/spack/external/_pytest/deprecated.py
 create mode 100644 lib/spack/external/_pytest/doctest.py
 create mode 100644 lib/spack/external/_pytest/fixtures.py
 create mode 100644 lib/spack/external/_pytest/freeze_support.py
 create mode 100644 lib/spack/external/_pytest/helpconfig.py
 create mode 100644 lib/spack/external/_pytest/hookspec.py
 create mode 100644 lib/spack/external/_pytest/junitxml.py
 create mode 100644 lib/spack/external/_pytest/main.py
 create mode 100644 lib/spack/external/_pytest/mark.py
 create mode 100644 lib/spack/external/_pytest/monkeypatch.py
 create mode 100644 lib/spack/external/_pytest/nose.py
 create mode 100644 lib/spack/external/_pytest/pastebin.py
 create mode 100644 lib/spack/external/_pytest/pytester.py
 create mode 100644 lib/spack/external/_pytest/python.py
 create mode 100644 lib/spack/external/_pytest/recwarn.py
 create mode 100644 lib/spack/external/_pytest/resultlog.py
 create mode 100644 lib/spack/external/_pytest/runner.py
 create mode 100644 lib/spack/external/_pytest/setuponly.py
 create mode 100644 lib/spack/external/_pytest/setupplan.py
 create mode 100644 lib/spack/external/_pytest/skipping.py
 create mode 100644 lib/spack/external/_pytest/terminal.py
 create mode 100644 lib/spack/external/_pytest/tmpdir.py
 create mode 100644 lib/spack/external/_pytest/unittest.py
 create mode 100644 lib/spack/external/_pytest/vendored_packages/README.md
 create mode 100644 lib/spack/external/_pytest/vendored_packages/__init__.py
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
 create mode 100644 lib/spack/external/_pytest/vendored_packages/pluggy.py
 delete mode 100644 lib/spack/external/nose/LICENSE
 delete mode 100644 lib/spack/external/nose/__init__.py
 delete mode 100644 lib/spack/external/nose/__main__.py
 delete mode 100644 lib/spack/external/nose/case.py
 delete mode 100644 lib/spack/external/nose/commands.py
 delete mode 100644 lib/spack/external/nose/config.py
 delete mode 100644 lib/spack/external/nose/core.py
 delete mode 100644 lib/spack/external/nose/exc.py
 delete mode 100644 lib/spack/external/nose/ext/__init__.py
 delete mode 100644 lib/spack/external/nose/ext/dtcompat.py
 delete mode 100644 lib/spack/external/nose/failure.py
 delete mode 100644 lib/spack/external/nose/importer.py
 delete mode 100644 lib/spack/external/nose/inspector.py
 delete mode 100644 lib/spack/external/nose/loader.py
 delete mode 100644 lib/spack/external/nose/plugins/__init__.py
 delete mode 100644 lib/spack/external/nose/plugins/allmodules.py
 delete mode 100644 lib/spack/external/nose/plugins/attrib.py
 delete mode 100644 lib/spack/external/nose/plugins/base.py
 delete mode 100644 lib/spack/external/nose/plugins/builtin.py
 delete mode 100644 lib/spack/external/nose/plugins/capture.py
 delete mode 100644 lib/spack/external/nose/plugins/collect.py
 delete mode 100644 lib/spack/external/nose/plugins/cover.py
 delete mode 100644 lib/spack/external/nose/plugins/debug.py
 delete mode 100644 lib/spack/external/nose/plugins/deprecated.py
 delete mode 100644 lib/spack/external/nose/plugins/doctests.py
 delete mode 100644 lib/spack/external/nose/plugins/errorclass.py
 delete mode 100644 lib/spack/external/nose/plugins/failuredetail.py
 delete mode 100644 lib/spack/external/nose/plugins/isolate.py
 delete mode 100644 lib/spack/external/nose/plugins/logcapture.py
 delete mode 100644 lib/spack/external/nose/plugins/manager.py
 delete mode 100644 lib/spack/external/nose/plugins/multiprocess.py
 delete mode 100644 lib/spack/external/nose/plugins/plugintest.py
 delete mode 100644 lib/spack/external/nose/plugins/prof.py
 delete mode 100644 lib/spack/external/nose/plugins/skip.py
 delete mode 100644 lib/spack/external/nose/plugins/testid.py
 delete mode 100644 lib/spack/external/nose/plugins/xunit.py
 delete mode 100644 lib/spack/external/nose/proxy.py
 delete mode 100644 lib/spack/external/nose/pyversion.py
 delete mode 100644 lib/spack/external/nose/result.py
 delete mode 100644 lib/spack/external/nose/selector.py
 delete mode 100644 lib/spack/external/nose/sphinx/__init__.py
 delete mode 100644 lib/spack/external/nose/sphinx/pluginopts.py
 delete mode 100644 lib/spack/external/nose/suite.py
 delete mode 100644 lib/spack/external/nose/tools/__init__.py
 delete mode 100644 lib/spack/external/nose/tools/nontrivial.py
 delete mode 100644 lib/spack/external/nose/tools/trivial.py
 delete mode 100644 lib/spack/external/nose/twistedtools.py
 delete mode 100644 lib/spack/external/nose/usage.txt
 delete mode 100644 lib/spack/external/nose/util.py
 create mode 100644 lib/spack/external/pytest.py
 create mode 100644 lib/spack/spack/test/conftest.py
 create mode 100644 lib/spack/spack/test/data/compilers.yaml
 create mode 100644 lib/spack/spack/test/data/config.yaml
 create mode 100644 lib/spack/spack/test/data/packages.yaml
 delete mode 100644 lib/spack/spack/test/mock_database.py
 delete mode 100644 lib/spack/spack/test/mock_packages_test.py
 delete mode 100644 lib/spack/spack/test/mock_repo.py
 delete mode 100644 lib/spack/spack/test/tally_plugin.py
 create mode 100644 pytest.ini
 delete mode 100755 share/spack/qa/changed_files

diff --git a/.coveragerc b/.coveragerc
index a1271a94fc..0201a4b502 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,6 +1,8 @@
 # -*- conf -*-
 # .coveragerc to control coverage.py
 [run]
+parallel = True
+concurrency = multiprocessing
 branch = True
 source = lib
 omit =
diff --git a/.gitignore b/.gitignore
index a451f9e14e..1a95d49377 100644
--- a/.gitignore
+++ b/.gitignore
@@ -20,3 +20,5 @@
 .coverage
 #*
 .#*
+/.cache
+/bin/spackc
diff --git a/.travis.yml b/.travis.yml
index 9553a85771..17549e42ab 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -37,10 +37,10 @@ addons:
 
 # Install various dependencies
 install:
-  - pip install coveralls
-  - pip install flake8
-  - pip install sphinx
-  - pip install mercurial
+  - pip install --upgrade coveralls
+  - pip install --upgrade flake8
+  - pip install --upgrade sphinx
+  - pip install --upgrade mercurial
 
 before_script:
   # Need this for the git tests to succeed.
diff --git a/bin/spack b/bin/spack
index cc9450ade7..2ff55a486b 100755
--- a/bin/spack
+++ b/bin/spack
@@ -31,6 +31,7 @@ if (sys.version_info[0] > 2) or (sys.version_info[:2] < (2, 6)):
              "This is Python %d.%d.%d." % v_info)
 
 import os
+import inspect
 
 # Find spack's location and its prefix.
 SPACK_FILE = os.path.realpath(os.path.expanduser(__file__))
@@ -129,6 +130,7 @@ parser.add_argument('-V', '--version', action='version',
 # subparser for setup.
 subparsers = parser.add_subparsers(metavar='SUBCOMMAND', dest="command")
 
+
 import spack.cmd
 for cmd in spack.cmd.commands:
     module = spack.cmd.get_module(cmd)
@@ -136,16 +138,8 @@ for cmd in spack.cmd.commands:
     subparser = subparsers.add_parser(cmd_name, help=module.description)
     module.setup_parser(subparser)
 
-# Just print help and exit if run with no arguments at all
-if len(sys.argv) == 1:
-    parser.print_help()
-    sys.exit(1)
-
-# actually parse the args.
-args = parser.parse_args()
-
 
-def main():
+def _main(args, unknown_args):
     # Set up environment based on args.
     tty.set_verbose(args.verbose)
     tty.set_debug(args.debug)
@@ -171,8 +165,21 @@ def main():
 
     # Try to load the particular command asked for and run it
     command = spack.cmd.get_command(args.command.replace('-', '_'))
+
+    # Allow commands to inject an optional argument and get unknown args
+    # if they want to handle them.
+    info = dict(inspect.getmembers(command))
+    varnames = info['__code__'].co_varnames
+    argcount = info['__code__'].co_argcount
+
+    # Actually execute the command
     try:
-        return_val = command(parser, args)
+        if argcount == 3 and varnames[2] == 'unknown_args':
+            return_val = command(parser, args, unknown_args)
+        else:
+            if unknown_args:
+                tty.die('unrecognized arguments: %s' % ' '.join(unknown_args))
+            return_val = command(parser, args)
     except SpackError as e:
         e.die()
     except KeyboardInterrupt:
@@ -188,11 +195,26 @@ def main():
         tty.die("Bad return value from command %s: %s"
                 % (args.command, return_val))
 
-if args.profile:
-    import cProfile
-    cProfile.run('main()', sort='time')
-elif args.pdb:
-    import pdb
-    pdb.run('main()')
-else:
-    main()
+
+def main(args):
+    # Just print help and exit if run with no arguments at all
+    if len(args) == 1:
+        parser.print_help()
+        sys.exit(1)
+
+    # actually parse the args.
+    args, unknown = parser.parse_known_args()
+
+    if args.profile:
+        import cProfile
+        cProfile.runctx('_main(args, unknown)', globals(), locals(),
+                        sort='time')
+    elif args.pdb:
+        import pdb
+        pdb.runctx('_main(args, unknown)', globals(), locals())
+    else:
+        _main(args, unknown)
+
+
+if __name__ == '__main__':
+    main(sys.argv)
diff --git a/lib/spack/docs/contribution_guide.rst b/lib/spack/docs/contribution_guide.rst
index 49595fecf8..4abf97ef92 100644
--- a/lib/spack/docs/contribution_guide.rst
+++ b/lib/spack/docs/contribution_guide.rst
@@ -75,7 +75,7 @@ This allows you to develop iteratively: make a change, test that change, make
 another change, test that change, etc. To get a list of all available unit
 tests, run:
 
-.. command-output:: spack test --list
+.. command-output:: spack test --collect-only
 
 Unit tests are crucial to making sure bugs aren't introduced into Spack. If you
 are modifying core Spack libraries or adding new functionality, please consider
diff --git a/lib/spack/external/__init__.py b/lib/spack/external/__init__.py
index 88d39a7654..49886ae595 100644
--- a/lib/spack/external/__init__.py
+++ b/lib/spack/external/__init__.py
@@ -35,7 +35,7 @@
 
     jsonschema:  An implementation of JSON Schema for Python.
 
-    nose:        The nose testing framework.
+    pytest:      Testing framework used by Spack.
 
     ordereddict: We include our own version to be Python 2.6 compatible.
 
diff --git a/lib/spack/external/_pytest/AUTHORS b/lib/spack/external/_pytest/AUTHORS
new file mode 100644
index 0000000000..8c7cb19cee
--- /dev/null
+++ b/lib/spack/external/_pytest/AUTHORS
@@ -0,0 +1,141 @@
+Holger Krekel, holger at merlinux eu
+merlinux GmbH, Germany, office at merlinux eu
+
+Contributors include::
+
+Abdeali JK
+Abhijeet Kasurde
+Ahn Ki-Wook
+Alexei Kozlenok
+Anatoly Bubenkoff
+Andreas Zeidler
+Andrzej Ostrowski
+Andy Freeland
+Anthon van der Neut
+Antony Lee
+Armin Rigo
+Aron Curzon
+Aviv Palivoda
+Ben Webb
+Benjamin Peterson
+Bernard Pratz
+Bob Ippolito
+Brian Dorsey
+Brian Okken
+Brianna Laugher
+Bruno Oliveira
+Cal Leeming
+Carl Friedrich Bolz
+Charles Cloud
+Charnjit SiNGH (CCSJ)
+Chris Lamb
+Christian Boelsen
+Christian Theunert
+Christian Tismer
+Christopher Gilling
+Daniel Grana
+Daniel Hahler
+Daniel Nuri
+Daniel Wandschneider
+Danielle Jenkins
+Dave Hunt
+David Díaz-Barquero
+David Mohr
+David Vierra
+Diego Russo
+Dmitry Dygalo
+Duncan Betts
+Edison Gustavo Muenz
+Edoardo Batini
+Eduardo Schettino
+Elizaveta Shashkova
+Endre Galaczi
+Eric Hunsberger
+Eric Siegerman
+Erik M. Bray
+Feng Ma
+Florian Bruhin
+Floris Bruynooghe
+Gabriel Reis
+Georgy Dyuldin
+Graham Horler
+Greg Price
+Grig Gheorghiu
+Grigorii Eremeev (budulianin)
+Guido Wesdorp
+Harald Armin Massa
+Ian Bicking
+Jaap Broekhuizen
+Jan Balster
+Janne Vanhala
+Jason R. Coombs
+Javier Domingo Cansino
+Javier Romero
+John Towler
+Jon Sonesen
+Jordan Guymon
+Joshua Bronson
+Jurko Gospodnetić
+Justyna Janczyszyn
+Kale Kundert
+Katarzyna Jachim
+Kevin Cox
+Lee Kamentsky
+Lev Maximov
+Lukas Bednar
+Luke Murphy
+Maciek Fijalkowski
+Maho
+Marc Schlaich
+Marcin Bachry
+Mark Abramowitz
+Markus Unterwaditzer
+Martijn Faassen
+Martin K. Scherer
+Martin Prusse
+Mathieu Clabaut
+Matt Bachmann
+Matt Williams
+Matthias Hafner
+mbyt
+Michael Aquilina
+Michael Birtwell
+Michael Droettboom
+Michael Seifert
+Mike Lundy
+Ned Batchelder
+Neven Mundar
+Nicolas Delaby
+Oleg Pidsadnyi
+Oliver Bestwalter
+Omar Kohl
+Pieter Mulder
+Piotr Banaszkiewicz
+Punyashloka Biswal
+Quentin Pradet
+Ralf Schmitt
+Raphael Pierzina
+Raquel Alegre
+Roberto Polli
+Romain Dorgueil
+Roman Bolshakov
+Ronny Pfannschmidt
+Ross Lawley
+Russel Winder
+Ryan Wooden
+Samuele Pedroni
+Simon Gomizelj
+Stefan Farmbauer
+Stefan Zimmermann
+Stefano Taschini
+Steffen Allner
+Stephan Obermann
+Tareq Alayan
+Ted Xiao
+Thomas Grainger
+Tom Viner
+Trevor Bekolay
+Tyler Goodlet
+Vasily Kuznetsov
+Wouter van Ackooy
+Xuecong Liao
diff --git a/lib/spack/external/_pytest/LICENSE b/lib/spack/external/_pytest/LICENSE
new file mode 100644
index 0000000000..9e27bd7841
--- /dev/null
+++ b/lib/spack/external/_pytest/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2004-2016 Holger Krekel and others
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/lib/spack/external/_pytest/README.rst b/lib/spack/external/_pytest/README.rst
new file mode 100644
index 0000000000..d5650af655
--- /dev/null
+++ b/lib/spack/external/_pytest/README.rst
@@ -0,0 +1,102 @@
+.. image:: http://docs.pytest.org/en/latest/_static/pytest1.png
+   :target: http://docs.pytest.org
+   :align: center
+   :alt: pytest
+
+------
+
+.. image:: https://img.shields.io/pypi/v/pytest.svg
+   :target: https://pypi.python.org/pypi/pytest
+.. image:: https://img.shields.io/pypi/pyversions/pytest.svg
+  :target: https://pypi.python.org/pypi/pytest
+.. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg
+   :target: https://coveralls.io/r/pytest-dev/pytest
+.. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master
+    :target: https://travis-ci.org/pytest-dev/pytest
+.. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true
+    :target: https://ci.appveyor.com/project/pytestbot/pytest
+
+The ``pytest`` framework makes it easy to write small tests, yet
+scales to support complex functional testing for applications and libraries.
+
+An example of a simple test:
+
+.. code-block:: python
+
+    # content of test_sample.py
+    def inc(x):
+        return x + 1
+
+    def test_answer():
+        assert inc(3) == 5
+
+
+To execute it::
+
+    $ pytest
+    ============================= test session starts =============================    
+    collected 1 items
+
+    test_sample.py F
+
+    ================================== FAILURES ===================================
+    _________________________________ test_answer _________________________________
+
+        def test_answer():
+    >       assert inc(3) == 5
+    E       assert 4 == 5
+    E        +  where 4 = inc(3)
+
+    test_sample.py:5: AssertionError
+    ========================== 1 failed in 0.04 seconds ===========================
+
+
+Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://docs.pytest.org/en/latest/getting-started.html#our-first-test-run>`_ for more examples.
+
+
+Features
+--------
+
+- Detailed info on failing `assert statements <http://docs.pytest.org/en/latest/assert.html>`_ (no need to remember ``self.assert*`` names);
+
+- `Auto-discovery
+  <http://docs.pytest.org/en/latest/goodpractices.html#python-test-discovery>`_
+  of test modules and functions;
+
+- `Modular fixtures <http://docs.pytest.org/en/latest/fixture.html>`_ for
+  managing small or parametrized long-lived test resources;
+
+- Can run `unittest <http://docs.pytest.org/en/latest/unittest.html>`_ (or trial),
+  `nose <http://docs.pytest.org/en/latest/nose.html>`_ test suites out of the box;
+
+- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested);
+
+- Rich plugin architecture, with over 150+ `external plugins <http://docs.pytest.org/en/latest/plugins.html#installing-external-plugins-searching>`_ and thriving community;
+
+
+Documentation
+-------------
+
+For full documentation, including installation, tutorials and PDF documents, please see http://docs.pytest.org.
+
+
+Bugs/Requests
+-------------
+
+Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
+
+
+Changelog
+---------
+
+Consult the `Changelog <http://docs.pytest.org/en/latest/changelog.html>`__ page for fixes and enhancements of each version.
+
+
+License
+-------
+
+Copyright Holger Krekel and others, 2004-2016.
+
+Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
+
+.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE
diff --git a/lib/spack/external/_pytest/__init__.py b/lib/spack/external/_pytest/__init__.py
new file mode 100644
index 0000000000..be20d3d41c
--- /dev/null
+++ b/lib/spack/external/_pytest/__init__.py
@@ -0,0 +1,2 @@
+#
+__version__ = '3.0.5'
diff --git a/lib/spack/external/_pytest/_argcomplete.py b/lib/spack/external/_pytest/_argcomplete.py
new file mode 100644
index 0000000000..3ab679d8be
--- /dev/null
+++ b/lib/spack/external/_pytest/_argcomplete.py
@@ -0,0 +1,102 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn<TAB>"
+instead of the default "dirname ":
+
+   optparser.add_argument(Config._file_or_dir, nargs='*'
+                               ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+  # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked  with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+    # PYTHON_ARGCOMPLETE_OK
+  near the top of the main python entry point
+- include in the file calling parse_args():
+    from _argcomplete import try_argcomplete, filescompleter
+   , call try_argcomplete just before parse_args(), and optionally add
+   filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+  completers):
+    export _ARC_DEBUG=1
+- run:
+    python-argcomplete-check-easy-install-script $(which appname)
+    echo $?
+  will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+    _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+  which should throw a KeyError: 'COMPLINE' (which is properly set by the
+  global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+    'Fast file completer class'
+    def __init__(self, directories=True):
+        self.directories = directories
+
+    def __call__(self, prefix, **kwargs):
+        """only called on non option completions"""
+        if os.path.sep in prefix[1:]: #
+            prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+        else:
+            prefix_dir = 0
+        completion = []
+        globbed = []
+        if '*' not in prefix and '?' not in prefix:
+            if prefix[-1] == os.path.sep:  # we are on unix, otherwise no bash
+                globbed.extend(glob(prefix + '.*'))
+            prefix += '*'
+        globbed.extend(glob(prefix))
+        for x in sorted(globbed):
+            if os.path.isdir(x):
+                x += '/'
+            # append stripping the prefix (like bash, not like compgen)
+            completion.append(x[prefix_dir:])
+        return completion
+
+
+if os.environ.get('_ARGCOMPLETE'):
+    try:
+        import argcomplete.completers
+    except ImportError:
+        sys.exit(-1)
+    filescompleter = FastFilesCompleter()
+
+    def try_argcomplete(parser):
+        argcomplete.autocomplete(parser)
+else:
+    def try_argcomplete(parser): pass
+    filescompleter = None
diff --git a/lib/spack/external/_pytest/_code/__init__.py b/lib/spack/external/_pytest/_code/__init__.py
new file mode 100644
index 0000000000..3463c11eac
--- /dev/null
+++ b/lib/spack/external/_pytest/_code/__init__.py
@@ -0,0 +1,9 @@
+""" python inspection/code generation API """
+from .code import Code  # noqa
+from .code import ExceptionInfo  # noqa
+from .code import Frame  # noqa
+from .code import Traceback  # noqa
+from .code import getrawcode  # noqa
+from .source import Source  # noqa
+from .source import compile_ as compile  # noqa
+from .source import getfslineno  # noqa
diff --git a/lib/spack/external/_pytest/_code/_py2traceback.py b/lib/spack/external/_pytest/_code/_py2traceback.py
new file mode 100644
index 0000000000..a830d9899a
--- /dev/null
+++ b/lib/spack/external/_pytest/_code/_py2traceback.py
@@ -0,0 +1,81 @@
+# copied from python-2.7.3's traceback.py
+# CHANGES:
+# - some_str is replaced, trying to create unicode strings
+#
+import types
+
+def format_exception_only(etype, value):
+    """Format the exception part of a traceback.
+
+    The arguments are the exception type and value such as given by
+    sys.last_type and sys.last_value. The return value is a list of
+    strings, each ending in a newline.
+
+    Normally, the list contains a single string; however, for
+    SyntaxError exceptions, it contains several lines that (when
+    printed) display detailed information about where the syntax
+    error occurred.
+
+    The message indicating which exception occurred is always the last
+    string in the list.
+
+    """
+
+    # An instance should not have a meaningful value parameter, but
+    # sometimes does, particularly for string exceptions, such as
+    # >>> raise string1, string2  # deprecated
+    #
+    # Clear these out first because issubtype(string1, SyntaxError)
+    # would throw another exception and mask the original problem.
+    if (isinstance(etype, BaseException) or
+        isinstance(etype, types.InstanceType) or
+        etype is None or type(etype) is str):
+        return [_format_final_exc_line(etype, value)]
+
+    stype = etype.__name__
+
+    if not issubclass(etype, SyntaxError):
+        return [_format_final_exc_line(stype, value)]
+
+    # It was a syntax error; show exactly where the problem was found.
+    lines = []
+    try:
+        msg, (filename, lineno, offset, badline) = value.args
+    except Exception:
+        pass
+    else:
+        filename = filename or "<string>"
+        lines.append('  File "%s", line %d\n' % (filename, lineno))
+        if badline is not None:
+            if isinstance(badline, bytes):  # python 2 only
+                badline = badline.decode('utf-8', 'replace')
+            lines.append(u'    %s\n' % badline.strip())
+            if offset is not None:
+                caretspace = badline.rstrip('\n')[:offset].lstrip()
+                # non-space whitespace (likes tabs) must be kept for alignment
+                caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+                # only three spaces to account for offset1 == pos 0
+                lines.append('   %s^\n' % ''.join(caretspace))
+        value = msg
+
+    lines.append(_format_final_exc_line(stype, value))
+    return lines
+
+def _format_final_exc_line(etype, value):
+    """Return a list of a single line -- normal case for format_exception_only"""
+    valuestr = _some_str(value)
+    if value is None or not valuestr:
+        line = "%s\n" % etype
+    else:
+        line = "%s: %s\n" % (etype, valuestr)
+    return line
+
+def _some_str(value):
+    try:
+        return unicode(value)
+    except Exception:
+        try:
+            return str(value)
+        except Exception:
+            pass
+    return '<unprintable %s object>' % type(value).__name__
diff --git a/lib/spack/external/_pytest/_code/code.py b/lib/spack/external/_pytest/_code/code.py
new file mode 100644
index 0000000000..616d5c4313
--- /dev/null
+++ b/lib/spack/external/_pytest/_code/code.py
@@ -0,0 +1,861 @@
+import sys
+from inspect import CO_VARARGS, CO_VARKEYWORDS
+import re
+from weakref import ref
+
+import py
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+if sys.version_info[0] >= 3:
+    from traceback import format_exception_only
+else:
+    from ._py2traceback import format_exception_only
+
+
+class Code(object):
+    """ wrapper around Python code objects """
+    def __init__(self, rawcode):
+        if not hasattr(rawcode, "co_filename"):
+            rawcode = getrawcode(rawcode)
+        try:
+            self.filename = rawcode.co_filename
+            self.firstlineno = rawcode.co_firstlineno - 1
+            self.name = rawcode.co_name
+        except AttributeError:
+            raise TypeError("not a code object: %r" %(rawcode,))
+        self.raw = rawcode
+
+    def __eq__(self, other):
+        return self.raw == other.raw
+
+    __hash__ = None
+
+    def __ne__(self, other):
+        return not self == other
+
+    @property
+    def path(self):
+        """ return a path object pointing to source code (note that it
+        might not point to an actually existing file). """
+        try:
+            p = py.path.local(self.raw.co_filename)
+            # maybe don't try this checking
+            if not p.check():
+                raise OSError("py.path check failed.")
+        except OSError:
+            # XXX maybe try harder like the weird logic
+            # in the standard lib [linecache.updatecache] does?
+            p = self.raw.co_filename
+
+        return p
+
+    @property
+    def fullsource(self):
+        """ return a _pytest._code.Source object for the full source file of the code
+        """
+        from _pytest._code import source
+        full, _ = source.findsource(self.raw)
+        return full
+
+    def source(self):
+        """ return a _pytest._code.Source object for the code object's source only
+        """
+        # return source only for that part of code
+        import _pytest._code
+        return _pytest._code.Source(self.raw)
+
+    def getargs(self, var=False):
+        """ return a tuple with the argument names for the code object
+
+            if 'var' is set True also return the names of the variable and
+            keyword arguments when present
+        """
+        # handfull shortcut for getting args
+        raw = self.raw
+        argcount = raw.co_argcount
+        if var:
+            argcount += raw.co_flags & CO_VARARGS
+            argcount += raw.co_flags & CO_VARKEYWORDS
+        return raw.co_varnames[:argcount]
+
+class Frame(object):
+    """Wrapper around a Python frame holding f_locals and f_globals
+    in which expressions can be evaluated."""
+
+    def __init__(self, frame):
+        self.lineno = frame.f_lineno - 1
+        self.f_globals = frame.f_globals
+        self.f_locals = frame.f_locals
+        self.raw = frame
+        self.code = Code(frame.f_code)
+
+    @property
+    def statement(self):
+        """ statement this frame is at """
+        import _pytest._code
+        if self.code.fullsource is None:
+            return _pytest._code.Source("")
+        return self.code.fullsource.getstatement(self.lineno)
+
+    def eval(self, code, **vars):
+        """ evaluate 'code' in the frame
+
+            'vars' are optional additional local variables
+
+            returns the result of the evaluation
+        """
+        f_locals = self.f_locals.copy()
+        f_locals.update(vars)
+        return eval(code, self.f_globals, f_locals)
+
+    def exec_(self, code, **vars):
+        """ exec 'code' in the frame
+
+            'vars' are optiona; additional local variables
+        """
+        f_locals = self.f_locals.copy()
+        f_locals.update(vars)
+        py.builtin.exec_(code, self.f_globals, f_locals )
+
+    def repr(self, object):
+        """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+        """
+        return py.io.saferepr(object)
+
+    def is_true(self, object):
+        return object
+
+    def getargs(self, var=False):
+        """ return a list of tuples (name, value) for all arguments
+
+            if 'var' is set True also include the variable and keyword
+            arguments when present
+        """
+        retval = []
+        for arg in self.code.getargs(var):
+            try:
+                retval.append((arg, self.f_locals[arg]))
+            except KeyError:
+                pass     # this can occur when using Psyco
+        return retval
+
+class TracebackEntry(object):
+    """ a single entry in a traceback """
+
+    _repr_style = None
+    exprinfo = None
+
+    def __init__(self, rawentry, excinfo=None):
+        self._excinfo = excinfo
+        self._rawentry = rawentry
+        self.lineno = rawentry.tb_lineno - 1
+
+    def set_repr_style(self, mode):
+        assert mode in ("short", "long")
+        self._repr_style = mode
+
+    @property
+    def frame(self):
+        import _pytest._code
+        return _pytest._code.Frame(self._rawentry.tb_frame)
+
+    @property
+    def relline(self):
+        return self.lineno - self.frame.code.firstlineno
+
+    def __repr__(self):
+        return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+    @property
+    def statement(self):
+        """ _pytest._code.Source object for the current statement """
+        source = self.frame.code.fullsource
+        return source.getstatement(self.lineno)
+
+    @property
+    def path(self):
+        """ path to the source code """
+        return self.frame.code.path
+
+    def getlocals(self):
+        return self.frame.f_locals
+    locals = property(getlocals, None, None, "locals of underlaying frame")
+
+    def getfirstlinesource(self):
+        # on Jython this firstlineno can be -1 apparently
+        return max(self.frame.code.firstlineno, 0)
+
+    def getsource(self, astcache=None):
+        """ return failing source code. """
+        # we use the passed in astcache to not reparse asttrees
+        # within exception info printing
+        from _pytest._code.source import getstatementrange_ast
+        source = self.frame.code.fullsource
+        if source is None:
+            return None
+        key = astnode = None
+        if astcache is not None:
+            key = self.frame.code.path
+            if key is not None:
+                astnode = astcache.get(key, None)
+        start = self.getfirstlinesource()
+        try:
+            astnode, _, end = getstatementrange_ast(self.lineno, source,
+                                                    astnode=astnode)
+        except SyntaxError:
+            end = self.lineno + 1
+        else:
+            if key is not None:
+                astcache[key] = astnode
+        return source[start:end]
+
+    source = property(getsource)
+
+    def ishidden(self):
+        """ return True if the current frame has a var __tracebackhide__
+            resolving to True
+
+            If __tracebackhide__ is a callable, it gets called with the
+            ExceptionInfo instance and can decide whether to hide the traceback.
+
+            mostly for internal use
+        """
+        try:
+            tbh = self.frame.f_locals['__tracebackhide__']
+        except KeyError:
+            try:
+                tbh = self.frame.f_globals['__tracebackhide__']
+            except KeyError:
+                return False
+
+        if py.builtin.callable(tbh):
+            return tbh(None if self._excinfo is None else self._excinfo())
+        else:
+            return tbh
+
+    def __str__(self):
+        try:
+            fn = str(self.path)
+        except py.error.Error:
+            fn = '???'
+        name = self.frame.code.name
+        try:
+            line = str(self.statement).lstrip()
+        except KeyboardInterrupt:
+            raise
+        except:
+            line = "???"
+        return "  File %r:%d in %s\n  %s\n" %(fn, self.lineno+1, name, line)
+
+    def name(self):
+        return self.frame.code.raw.co_name
+    name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+    """ Traceback objects encapsulate and offer higher level
+        access to Traceback entries.
+    """
+    Entry = TracebackEntry
+    def __init__(self, tb, excinfo=None):
+        """ initialize from given python traceback object and ExceptionInfo """
+        self._excinfo = excinfo
+        if hasattr(tb, 'tb_next'):
+            def f(cur):
+                while cur is not None:
+                    yield self.Entry(cur, excinfo=excinfo)
+                    cur = cur.tb_next
+            list.__init__(self, f(tb))
+        else:
+            list.__init__(self, tb)
+
+    def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+        """ return a Traceback instance wrapping part of this Traceback
+
+            by provding any combination of path, lineno and firstlineno, the
+            first frame to start the to-be-returned traceback is determined
+
+            this allows cutting the first part of a Traceback instance e.g.
+            for formatting reasons (removing some uninteresting bits that deal
+            with handling of the exception/traceback)
+        """
+        for x in self:
+            code = x.frame.code
+            codepath = code.path
+            if ((path is None or codepath == path) and
+                (excludepath is None or not hasattr(codepath, 'relto') or
+                 not codepath.relto(excludepath)) and
+                (lineno is None or x.lineno == lineno) and
+                (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+                return Traceback(x._rawentry, self._excinfo)
+        return self
+
+    def __getitem__(self, key):
+        val = super(Traceback, self).__getitem__(key)
+        if isinstance(key, type(slice(0))):
+            val = self.__class__(val)
+        return val
+
+    def filter(self, fn=lambda x: not x.ishidden()):
+        """ return a Traceback instance with certain items removed
+
+            fn is a function that gets a single argument, a TracebackEntry
+            instance, and should return True when the item should be added
+            to the Traceback, False when not
+
+            by default this removes all the TracebackEntries which are hidden
+            (see ishidden() above)
+        """
+        return Traceback(filter(fn, self), self._excinfo)
+
+    def getcrashentry(self):
+        """ return last non-hidden traceback entry that lead
+        to the exception of a traceback.
+        """
+        for i in range(-1, -len(self)-1, -1):
+            entry = self[i]
+            if not entry.ishidden():
+                return entry
+        return self[-1]
+
+    def recursionindex(self):
+        """ return the index of the frame/TracebackEntry where recursion
+            originates if appropriate, None if no recursion occurred
+        """
+        cache = {}
+        for i, entry in enumerate(self):
+            # id for the code.raw is needed to work around
+            # the strange metaprogramming in the decorator lib from pypi
+            # which generates code objects that have hash/value equality
+            #XXX needs a test
+            key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
+            #print "checking for recursion at", key
+            l = cache.setdefault(key, [])
+            if l:
+                f = entry.frame
+                loc = f.f_locals
+                for otherloc in l:
+                    if f.is_true(f.eval(co_equal,
+                        __recursioncache_locals_1=loc,
+                        __recursioncache_locals_2=otherloc)):
+                        return i
+            l.append(entry.frame.f_locals)
+        return None
+
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+                   '?', 'eval')
+
+class ExceptionInfo(object):
+    """ wraps sys.exc_info() objects and offers
+        help for navigating the traceback.
+    """
+    _striptext = ''
+    def __init__(self, tup=None, exprinfo=None):
+        import _pytest._code
+        if tup is None:
+            tup = sys.exc_info()
+            if exprinfo is None and isinstance(tup[1], AssertionError):
+                exprinfo = getattr(tup[1], 'msg', None)
+                if exprinfo is None:
+                    exprinfo = py._builtin._totext(tup[1])
+                if exprinfo and exprinfo.startswith('assert '):
+                    self._striptext = 'AssertionError: '
+        self._excinfo = tup
+        #: the exception class
+        self.type = tup[0]
+        #: the exception instance
+        self.value = tup[1]
+        #: the exception raw traceback
+        self.tb = tup[2]
+        #: the exception type name
+        self.typename = self.type.__name__
+        #: the exception traceback (_pytest._code.Traceback instance)
+        self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self))
+
+    def __repr__(self):
+        return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+    def exconly(self, tryshort=False):
+        """ return the exception as a string
+
+            when 'tryshort' resolves to True, and the exception is a
+            _pytest._code._AssertionError, only the actual exception part of
+            the exception representation is returned (so 'AssertionError: ' is
+            removed from the beginning)
+        """
+        lines = format_exception_only(self.type, self.value)
+        text = ''.join(lines)
+        text = text.rstrip()
+        if tryshort:
+            if text.startswith(self._striptext):
+                text = text[len(self._striptext):]
+        return text
+
+    def errisinstance(self, exc):
+        """ return True if the exception is an instance of exc """
+        return isinstance(self.value, exc)
+
+    def _getreprcrash(self):
+        exconly = self.exconly(tryshort=True)
+        entry = self.traceback.getcrashentry()
+        path, lineno = entry.frame.code.raw.co_filename, entry.lineno
+        return ReprFileLocation(path, lineno+1, exconly)
+
+    def getrepr(self, showlocals=False, style="long",
+            abspath=False, tbfilter=True, funcargs=False):
+        """ return str()able representation of this exception info.
+            showlocals: show locals per traceback entry
+            style: long|short|no|native traceback style
+            tbfilter: hide entries (where __tracebackhide__ is true)
+
+            in case of style==native, tbfilter and showlocals is ignored.
+        """
+        if style == 'native':
+            return ReprExceptionInfo(ReprTracebackNative(
+                py.std.traceback.format_exception(
+                    self.type,
+                    self.value,
+                    self.traceback[0]._rawentry,
+                )), self._getreprcrash())
+
+        fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+            abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+        return fmt.repr_excinfo(self)
+
+    def __str__(self):
+        entry = self.traceback[-1]
+        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+        return str(loc)
+
+    def __unicode__(self):
+        entry = self.traceback[-1]
+        loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+        return unicode(loc)
+
+    def match(self, regexp):
+        """
+        Match the regular expression 'regexp' on the string representation of
+        the exception. If it matches then True is returned (so that it is
+        possible to write 'assert excinfo.match()'). If it doesn't match an
+        AssertionError is raised.
+        """
+        __tracebackhide__ = True
+        if not re.search(regexp, str(self.value)):
+            assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
+                regexp, self.value)
+        return True
+
+
+class FormattedExcinfo(object):
+    """ presenting information about failing Functions and Generators. """
+    # for traceback entries
+    flow_marker = ">"
+    fail_marker = "E"
+
+    def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+        self.showlocals = showlocals
+        self.style = style
+        self.tbfilter = tbfilter
+        self.funcargs = funcargs
+        self.abspath = abspath
+        self.astcache = {}
+
+    def _getindent(self, source):
+        # figure out indent for given source
+        try:
+            s = str(source.getstatement(len(source)-1))
+        except KeyboardInterrupt:
+            raise
+        except:
+            try:
+                s = str(source[-1])
+            except KeyboardInterrupt:
+                raise
+            except:
+                return 0
+        return 4 + (len(s) - len(s.lstrip()))
+
+    def _getentrysource(self, entry):
+        source = entry.getsource(self.astcache)
+        if source is not None:
+            source = source.deindent()
+        return source
+
+    def _saferepr(self, obj):
+        return py.io.saferepr(obj)
+
+    def repr_args(self, entry):
+        if self.funcargs:
+            args = []
+            for argname, argvalue in entry.frame.getargs(var=True):
+                args.append((argname, self._saferepr(argvalue)))
+            return ReprFuncArgs(args)
+
+    def get_source(self, source, line_index=-1, excinfo=None, short=False):
+        """ return formatted and marked up source lines. """
+        import _pytest._code
+        lines = []
+        if source is None or line_index >= len(source.lines):
+            source = _pytest._code.Source("???")
+            line_index = 0
+        if line_index < 0:
+            line_index += len(source)
+        space_prefix = "    "
+        if short:
+            lines.append(space_prefix + source.lines[line_index].strip())
+        else:
+            for line in source.lines[:line_index]:
+                lines.append(space_prefix + line)
+            lines.append(self.flow_marker + "   " + source.lines[line_index])
+            for line in source.lines[line_index+1:]:
+                lines.append(space_prefix + line)
+        if excinfo is not None:
+            indent = 4 if short else self._getindent(source)
+            lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+        return lines
+
+    def get_exconly(self, excinfo, indent=4, markall=False):
+        lines = []
+        indent = " " * indent
+        # get the real exception information out
+        exlines = excinfo.exconly(tryshort=True).split('\n')
+        failindent = self.fail_marker + indent[1:]
+        for line in exlines:
+            lines.append(failindent + line)
+            if not markall:
+                failindent = indent
+        return lines
+
+    def repr_locals(self, locals):
+        if self.showlocals:
+            lines = []
+            keys = [loc for loc in locals if loc[0] != "@"]
+            keys.sort()
+            for name in keys:
+                value = locals[name]
+                if name == '__builtins__':
+                    lines.append("__builtins__ = <builtins>")
+                else:
+                    # This formatting could all be handled by the
+                    # _repr() function, which is only reprlib.Repr in
+                    # disguise, so is very configurable.
+                    str_repr = self._saferepr(value)
+                    #if len(str_repr) < 70 or not isinstance(value,
+                    #                            (list, tuple, dict)):
+                    lines.append("%-10s = %s" %(name, str_repr))
+                    #else:
+                    #    self._line("%-10s =\\" % (name,))
+                    #    # XXX
+                    #    py.std.pprint.pprint(value, stream=self.excinfowriter)
+            return ReprLocals(lines)
+
+    def repr_traceback_entry(self, entry, excinfo=None):
+        import _pytest._code
+        source = self._getentrysource(entry)
+        if source is None:
+            source = _pytest._code.Source("???")
+            line_index = 0
+        else:
+            # entry.getfirstlinesource() can be -1, should be 0 on jython
+            line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+        lines = []
+        style = entry._repr_style
+        if style is None:
+            style = self.style
+        if style in ("short", "long"):
+            short = style == "short"
+            reprargs = self.repr_args(entry) if not short else None
+            s = self.get_source(source, line_index, excinfo, short=short)
+            lines.extend(s)
+            if short:
+                message = "in %s" %(entry.name)
+            else:
+                message = excinfo and excinfo.typename or ""
+            path = self._makepath(entry.path)
+            filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+            localsrepr = None
+            if not short:
+                localsrepr =  self.repr_locals(entry.locals)
+            return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
+        if excinfo:
+            lines.extend(self.get_exconly(excinfo, indent=4))
+        return ReprEntry(lines, None, None, None, style)
+
+    def _makepath(self, path):
+        if not self.abspath:
+            try:
+                np = py.path.local().bestrelpath(path)
+            except OSError:
+                return path
+            if len(np) < len(str(path)):
+                path = np
+        return path
+
+    def repr_traceback(self, excinfo):
+        traceback = excinfo.traceback
+        if self.tbfilter:
+            traceback = traceback.filter()
+        recursionindex = None
+        if is_recursion_error(excinfo):
+            recursionindex = traceback.recursionindex()
+        last = traceback[-1]
+        entries = []
+        extraline = None
+        for index, entry in enumerate(traceback):
+            einfo = (last == entry) and excinfo or None
+            reprentry = self.repr_traceback_entry(entry, einfo)
+            entries.append(reprentry)
+            if index == recursionindex:
+                extraline = "!!! Recursion detected (same locals & position)"
+                break
+        return ReprTraceback(entries, extraline, style=self.style)
+
+
+    def repr_excinfo(self, excinfo):
+        if sys.version_info[0] < 3:
+            reprtraceback = self.repr_traceback(excinfo)
+            reprcrash = excinfo._getreprcrash()
+
+            return ReprExceptionInfo(reprtraceback, reprcrash)
+        else:
+            repr_chain = []
+            e = excinfo.value
+            descr = None
+            while e is not None:
+                if excinfo:
+                    reprtraceback = self.repr_traceback(excinfo)
+                    reprcrash = excinfo._getreprcrash()
+                else:
+                    # fallback to native repr if the exception doesn't have a traceback:
+                    # ExceptionInfo objects require a full traceback to work
+                    reprtraceback = ReprTracebackNative(py.std.traceback.format_exception(type(e), e, None))
+                    reprcrash = None
+
+                repr_chain += [(reprtraceback, reprcrash, descr)]
+                if e.__cause__ is not None:
+                    e = e.__cause__
+                    excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
+                    descr = 'The above exception was the direct cause of the following exception:'
+                elif e.__context__ is not None:
+                    e = e.__context__
+                    excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None
+                    descr = 'During handling of the above exception, another exception occurred:'
+                else:
+                    e = None
+            repr_chain.reverse()
+            return ExceptionChainRepr(repr_chain)
+
+
+class TerminalRepr(object):
+    def __str__(self):
+        s = self.__unicode__()
+        if sys.version_info[0] < 3:
+            s = s.encode('utf-8')
+        return s
+
+    def __unicode__(self):
+        # FYI this is called from pytest-xdist's serialization of exception
+        # information.
+        io = py.io.TextIO()
+        tw = py.io.TerminalWriter(file=io)
+        self.toterminal(tw)
+        return io.getvalue().strip()
+
+    def __repr__(self):
+        return "<%s instance at %0x>" %(self.__class__, id(self))
+
+
+class ExceptionRepr(TerminalRepr):
+    def __init__(self):
+        self.sections = []
+
+    def addsection(self, name, content, sep="-"):
+        self.sections.append((name, content, sep))
+
+    def toterminal(self, tw):
+        for name, content, sep in self.sections:
+            tw.sep(sep, name)
+            tw.line(content)
+
+
+class ExceptionChainRepr(ExceptionRepr):
+    def __init__(self, chain):
+        super(ExceptionChainRepr, self).__init__()
+        self.chain = chain
+        # reprcrash and reprtraceback of the outermost (the newest) exception
+        # in the chain
+        self.reprtraceback = chain[-1][0]
+        self.reprcrash = chain[-1][1]
+
+    def toterminal(self, tw):
+        for element in self.chain:
+            element[0].toterminal(tw)
+            if element[2] is not None:
+                tw.line("")
+                tw.line(element[2], yellow=True)
+        super(ExceptionChainRepr, self).toterminal(tw)
+
+
+class ReprExceptionInfo(ExceptionRepr):
+    def __init__(self, reprtraceback, reprcrash):
+        super(ReprExceptionInfo, self).__init__()
+        self.reprtraceback = reprtraceback
+        self.reprcrash = reprcrash
+
+    def toterminal(self, tw):
+        self.reprtraceback.toterminal(tw)
+        super(ReprExceptionInfo, self).toterminal(tw)
+
+class ReprTraceback(TerminalRepr):
+    entrysep = "_ "
+
+    def __init__(self, reprentries, extraline, style):
+        self.reprentries = reprentries
+        self.extraline = extraline
+        self.style = style
+
+    def toterminal(self, tw):
+        # the entries might have different styles
+        for i, entry in enumerate(self.reprentries):
+            if entry.style == "long":
+                tw.line("")
+            entry.toterminal(tw)
+            if i < len(self.reprentries) - 1:
+                next_entry = self.reprentries[i+1]
+                if entry.style == "long" or \
+                   entry.style == "short" and next_entry.style == "long":
+                    tw.sep(self.entrysep)
+
+        if self.extraline:
+            tw.line(self.extraline)
+
+class ReprTracebackNative(ReprTraceback):
+    def __init__(self, tblines):
+        self.style = "native"
+        self.reprentries = [ReprEntryNative(tblines)]
+        self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+    style = "native"
+
+    def __init__(self, tblines):
+        self.lines = tblines
+
+    def toterminal(self, tw):
+        tw.write("".join(self.lines))
+
+class ReprEntry(TerminalRepr):
+    localssep = "_ "
+
+    def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
+        self.lines = lines
+        self.reprfuncargs = reprfuncargs
+        self.reprlocals = reprlocals
+        self.reprfileloc = filelocrepr
+        self.style = style
+
+    def toterminal(self, tw):
+        if self.style == "short":
+            self.reprfileloc.toterminal(tw)
+            for line in self.lines:
+                red = line.startswith("E   ")
+                tw.line(line, bold=True, red=red)
+            #tw.line("")
+            return
+        if self.reprfuncargs:
+            self.reprfuncargs.toterminal(tw)
+        for line in self.lines:
+            red = line.startswith("E   ")
+            tw.line(line, bold=True, red=red)
+        if self.reprlocals:
+            #tw.sep(self.localssep, "Locals")
+            tw.line("")
+            self.reprlocals.toterminal(tw)
+        if self.reprfileloc:
+            if self.lines:
+                tw.line("")
+            self.reprfileloc.toterminal(tw)
+
+    def __str__(self):
+        return "%s\n%s\n%s" % ("\n".join(self.lines),
+                               self.reprlocals,
+                               self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+    def __init__(self, path, lineno, message):
+        self.path = str(path)
+        self.lineno = lineno
+        self.message = message
+
+    def toterminal(self, tw):
+        # filename and lineno output for each entry,
+        # using an output format that most editors unterstand
+        msg = self.message
+        i = msg.find("\n")
+        if i != -1:
+            msg = msg[:i]
+        tw.write(self.path, bold=True, red=True)
+        tw.line(":%s: %s" % (self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+    def __init__(self, lines):
+        self.lines = lines
+
+    def toterminal(self, tw):
+        for line in self.lines:
+            tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+    def __init__(self, args):
+        self.args = args
+
+    def toterminal(self, tw):
+        if self.args:
+            linesofar = ""
+            for name, value in self.args:
+                ns = "%s = %s" %(name, value)
+                if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+                    if linesofar:
+                        tw.line(linesofar)
+                    linesofar =  ns
+                else:
+                    if linesofar:
+                        linesofar += ", " + ns
+                    else:
+                        linesofar = ns
+            if linesofar:
+                tw.line(linesofar)
+            tw.line("")
+
+
+def getrawcode(obj, trycall=True):
+    """ return code object for given function. """
+    try:
+        return obj.__code__
+    except AttributeError:
+        obj = getattr(obj, 'im_func', obj)
+        obj = getattr(obj, 'func_code', obj)
+        obj = getattr(obj, 'f_code', obj)
+        obj = getattr(obj, '__code__', obj)
+        if trycall and not hasattr(obj, 'co_firstlineno'):
+            if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+                x = getrawcode(obj.__call__, trycall=False)
+                if hasattr(x, 'co_firstlineno'):
+                    return x
+        return obj
+
+
+if sys.version_info[:2] >= (3, 5):  # RecursionError introduced in 3.5
+    def is_recursion_error(excinfo):
+        return excinfo.errisinstance(RecursionError)  # noqa
+else:
+    def is_recursion_error(excinfo):
+        if not excinfo.errisinstance(RuntimeError):
+            return False
+        try:
+            return "maximum recursion depth exceeded" in str(excinfo.value)
+        except UnicodeError:
+            return False
diff --git a/lib/spack/external/_pytest/_code/source.py b/lib/spack/external/_pytest/_code/source.py
new file mode 100644
index 0000000000..fcec0f5ca7
--- /dev/null
+++ b/lib/spack/external/_pytest/_code/source.py
@@ -0,0 +1,414 @@
+from __future__ import generators
+
+from bisect import bisect_right
+import sys
+import inspect, tokenize
+import py
+cpy_compile = compile
+
+try:
+    import _ast
+    from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+    _AST_FLAG = 0
+    _ast = None
+
+
+class Source(object):
+    """ a immutable object holding a source code fragment,
+        possibly deindenting it.
+    """
+    _compilecounter = 0
+    def __init__(self, *parts, **kwargs):
+        self.lines = lines = []
+        de = kwargs.get('deindent', True)
+        rstrip = kwargs.get('rstrip', True)
+        for part in parts:
+            if not part:
+                partlines = []
+            if isinstance(part, Source):
+                partlines = part.lines
+            elif isinstance(part, (tuple, list)):
+                partlines = [x.rstrip("\n") for x in part]
+            elif isinstance(part, py.builtin._basestring):
+                partlines = part.split('\n')
+                if rstrip:
+                    while partlines:
+                        if partlines[-1].strip():
+                            break
+                        partlines.pop()
+            else:
+                partlines = getsource(part, deindent=de).lines
+            if de:
+                partlines = deindent(partlines)
+            lines.extend(partlines)
+
+    def __eq__(self, other):
+        try:
+            return self.lines == other.lines
+        except AttributeError:
+            if isinstance(other, str):
+                return str(self) == other
+            return False
+
+    __hash__ = None
+
+    def __getitem__(self, key):
+        if isinstance(key, int):
+            return self.lines[key]
+        else:
+            if key.step not in (None, 1):
+                raise IndexError("cannot slice a Source with a step")
+            newsource = Source()
+            newsource.lines = self.lines[key.start:key.stop]
+            return newsource
+
+    def __len__(self):
+        return len(self.lines)
+
+    def strip(self):
+        """ return new source object with trailing
+            and leading blank lines removed.
+        """
+        start, end = 0, len(self)
+        while start < end and not self.lines[start].strip():
+            start += 1
+        while end > start and not self.lines[end-1].strip():
+            end -= 1
+        source = Source()
+        source.lines[:] = self.lines[start:end]
+        return source
+
+    def putaround(self, before='', after='', indent=' ' * 4):
+        """ return a copy of the source object with
+            'before' and 'after' wrapped around it.
+        """
+        before = Source(before)
+        after = Source(after)
+        newsource = Source()
+        lines = [ (indent + line) for line in self.lines]
+        newsource.lines = before.lines + lines +  after.lines
+        return newsource
+
+    def indent(self, indent=' ' * 4):
+        """ return a copy of the source object with
+            all lines indented by the given indent-string.
+        """
+        newsource = Source()
+        newsource.lines = [(indent+line) for line in self.lines]
+        return newsource
+
+    def getstatement(self, lineno, assertion=False):
+        """ return Source statement which contains the
+            given linenumber (counted from 0).
+        """
+        start, end = self.getstatementrange(lineno, assertion)
+        return self[start:end]
+
+    def getstatementrange(self, lineno, assertion=False):
+        """ return (start, end) tuple which spans the minimal
+            statement region which containing the given lineno.
+        """
+        if not (0 <= lineno < len(self)):
+            raise IndexError("lineno out of range")
+        ast, start, end = getstatementrange_ast(lineno, self)
+        return start, end
+
+    def deindent(self, offset=None):
+        """ return a new source object deindented by offset.
+            If offset is None then guess an indentation offset from
+            the first non-blank line.  Subsequent lines which have a
+            lower indentation offset will be copied verbatim as
+            they are assumed to be part of multilines.
+        """
+        # XXX maybe use the tokenizer to properly handle multiline
+        #     strings etc.pp?
+        newsource = Source()
+        newsource.lines[:] = deindent(self.lines, offset)
+        return newsource
+
+    def isparseable(self, deindent=True):
+        """ return True if source is parseable, heuristically
+            deindenting it by default.
+        """
+        try:
+            import parser
+        except ImportError:
+            syntax_checker = lambda x: compile(x, 'asd', 'exec')
+        else:
+            syntax_checker = parser.suite
+
+        if deindent:
+            source = str(self.deindent())
+        else:
+            source = str(self)
+        try:
+            #compile(source+'\n', "x", "exec")
+            syntax_checker(source+'\n')
+        except KeyboardInterrupt:
+            raise
+        except Exception:
+            return False
+        else:
+            return True
+
+    def __str__(self):
+        return "\n".join(self.lines)
+
+    def compile(self, filename=None, mode='exec',
+                flag=generators.compiler_flag,
+                dont_inherit=0, _genframe=None):
+        """ return compiled code object. if filename is None
+            invent an artificial filename which displays
+            the source/line position of the caller frame.
+        """
+        if not filename or py.path.local(filename).check(file=0):
+            if _genframe is None:
+                _genframe = sys._getframe(1) # the caller
+            fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+            base = "<%d-codegen " % self._compilecounter
+            self.__class__._compilecounter += 1
+            if not filename:
+                filename = base + '%s:%d>' % (fn, lineno)
+            else:
+                filename = base + '%r %s:%d>' % (filename, fn, lineno)
+        source = "\n".join(self.lines) + '\n'
+        try:
+            co = cpy_compile(source, filename, mode, flag)
+        except SyntaxError:
+            ex = sys.exc_info()[1]
+            # re-represent syntax errors from parsing python strings
+            msglines = self.lines[:ex.lineno]
+            if ex.offset:
+                msglines.append(" "*ex.offset + '^')
+            msglines.append("(code was compiled probably from here: %s)" % filename)
+            newex = SyntaxError('\n'.join(msglines))
+            newex.offset = ex.offset
+            newex.lineno = ex.lineno
+            newex.text = ex.text
+            raise newex
+        else:
+            if flag & _AST_FLAG:
+                return co
+            lines = [(x + "\n") for x in self.lines]
+            py.std.linecache.cache[filename] = (1, None, lines, filename)
+            return co
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+            generators.compiler_flag, dont_inherit=0):
+    """ compile the given source to a raw code object,
+        and maintain an internal cache which allows later
+        retrieval of the source code for the code object
+        and any recursively created code objects.
+    """
+    if _ast is not None and isinstance(source, _ast.AST):
+        # XXX should Source support having AST?
+        return cpy_compile(source, filename, mode, flags, dont_inherit)
+    _genframe = sys._getframe(1) # the caller
+    s = Source(source)
+    co = s.compile(filename, mode, flags, _genframe=_genframe)
+    return co
+
+
+def getfslineno(obj):
+    """ Return source location (path, lineno) for the given object.
+    If the source cannot be determined return ("", -1)
+    """
+    import _pytest._code
+    try:
+        code = _pytest._code.Code(obj)
+    except TypeError:
+        try:
+            fn = (py.std.inspect.getsourcefile(obj) or
+                  py.std.inspect.getfile(obj))
+        except TypeError:
+            return "", -1
+
+        fspath = fn and py.path.local(fn) or None
+        lineno = -1
+        if fspath:
+            try:
+                _, lineno = findsource(obj)
+            except IOError:
+                pass
+    else:
+        fspath = code.path
+        lineno = code.firstlineno
+    assert isinstance(lineno, int)
+    return fspath, lineno
+
+#
+# helper functions
+#
+
+def findsource(obj):
+    try:
+        sourcelines, lineno = py.std.inspect.findsource(obj)
+    except py.builtin._sysex:
+        raise
+    except:
+        return None, -1
+    source = Source()
+    source.lines = [line.rstrip() for line in sourcelines]
+    return source, lineno
+
+
+def getsource(obj, **kwargs):
+    import _pytest._code
+    obj = _pytest._code.getrawcode(obj)
+    try:
+        strsrc = inspect.getsource(obj)
+    except IndentationError:
+        strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+    assert isinstance(strsrc, str)
+    return Source(strsrc, **kwargs)
+
+
+def deindent(lines, offset=None):
+    if offset is None:
+        for line in lines:
+            line = line.expandtabs()
+            s = line.lstrip()
+            if s:
+                offset = len(line)-len(s)
+                break
+        else:
+            offset = 0
+    if offset == 0:
+        return list(lines)
+    newlines = []
+
+    def readline_generator(lines):
+        for line in lines:
+            yield line + '\n'
+        while True:
+            yield ''
+
+    it = readline_generator(lines)
+
+    try:
+        for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
+            if sline > len(lines):
+                break # End of input reached
+            if sline > len(newlines):
+                line = lines[sline - 1].expandtabs()
+                if line.lstrip() and line[:offset].isspace():
+                    line = line[offset:] # Deindent
+                newlines.append(line)
+
+            for i in range(sline, eline):
+                # Don't deindent continuing lines of
+                # multiline tokens (i.e. multiline strings)
+                newlines.append(lines[i])
+    except (IndentationError, tokenize.TokenError):
+        pass
+    # Add any lines we didn't see. E.g. if an exception was raised.
+    newlines.extend(lines[len(newlines):])
+    return newlines
+
+
+def get_statement_startend2(lineno, node):
+    import ast
+    # flatten all statements and except handlers into one lineno-list
+    # AST's line numbers start indexing at 1
+    l = []
+    for x in ast.walk(node):
+        if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
+            l.append(x.lineno - 1)
+            for name in "finalbody", "orelse":
+                val = getattr(x, name, None)
+                if val:
+                    # treat the finally/orelse part as its own statement
+                    l.append(val[0].lineno - 1 - 1)
+    l.sort()
+    insert_index = bisect_right(l, lineno)
+    start = l[insert_index - 1]
+    if insert_index >= len(l):
+        end = None
+    else:
+        end = l[insert_index]
+    return start, end
+
+
+def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
+    if astnode is None:
+        content = str(source)
+        if sys.version_info < (2,7):
+            content += "\n"
+        try:
+            astnode = compile(content, "source", "exec", 1024)  # 1024 for AST
+        except ValueError:
+            start, end = getstatementrange_old(lineno, source, assertion)
+            return None, start, end
+    start, end = get_statement_startend2(lineno, astnode)
+    # we need to correct the end:
+    # - ast-parsing strips comments
+    # - there might be empty lines
+    # - we might have lesser indented code blocks at the end
+    if end is None:
+        end = len(source.lines)
+
+    if end > start + 1:
+        # make sure we don't span differently indented code blocks
+        # by using the BlockFinder helper used which inspect.getsource() uses itself
+        block_finder = inspect.BlockFinder()
+        # if we start with an indented line, put blockfinder to "started" mode
+        block_finder.started = source.lines[start][0].isspace()
+        it = ((x + "\n") for x in source.lines[start:end])
+        try:
+            for tok in tokenize.generate_tokens(lambda: next(it)):
+                block_finder.tokeneater(*tok)
+        except (inspect.EndOfBlock, IndentationError):
+            end = block_finder.last + start
+        except Exception:
+            pass
+
+    # the end might still point to a comment or empty line, correct it
+    while end:
+        line = source.lines[end - 1].lstrip()
+        if line.startswith("#") or not line:
+            end -= 1
+        else:
+            break
+    return astnode, start, end
+
+
+def getstatementrange_old(lineno, source, assertion=False):
+    """ return (start, end) tuple which spans the minimal
+        statement region which containing the given lineno.
+        raise an IndexError if no such statementrange can be found.
+    """
+    # XXX this logic is only used on python2.4 and below
+    # 1. find the start of the statement
+    from codeop import compile_command
+    for start in range(lineno, -1, -1):
+        if assertion:
+            line = source.lines[start]
+            # the following lines are not fully tested, change with care
+            if 'super' in line and 'self' in line and '__init__' in line:
+                raise IndexError("likely a subclass")
+            if "assert" not in line and "raise" not in line:
+                continue
+        trylines = source.lines[start:lineno+1]
+        # quick hack to prepare parsing an indented line with
+        # compile_command() (which errors on "return" outside defs)
+        trylines.insert(0, 'def xxx():')
+        trysource = '\n '.join(trylines)
+        #              ^ space here
+        try:
+            compile_command(trysource)
+        except (SyntaxError, OverflowError, ValueError):
+            continue
+
+        # 2. find the end of the statement
+        for end in range(lineno+1, len(source)+1):
+            trysource = source[start:end]
+            if trysource.isparseable():
+                return start, end
+    raise SyntaxError("no valid source range around line %d " % (lineno,))
+
+
diff --git a/lib/spack/external/_pytest/_pluggy.py b/lib/spack/external/_pytest/_pluggy.py
new file mode 100644
index 0000000000..87d32cf8dd
--- /dev/null
+++ b/lib/spack/external/_pytest/_pluggy.py
@@ -0,0 +1,11 @@
+"""
+imports symbols from vendored "pluggy" if available, otherwise
+falls back to importing "pluggy" from the default namespace.
+"""
+
+try:
+    from _pytest.vendored_packages.pluggy import *  # noqa
+    from _pytest.vendored_packages.pluggy import __version__  # noqa
+except ImportError:
+    from pluggy import *  # noqa
+    from pluggy import __version__  # noqa
diff --git a/lib/spack/external/_pytest/assertion/__init__.py b/lib/spack/external/_pytest/assertion/__init__.py
new file mode 100644
index 0000000000..3f14a7ae76
--- /dev/null
+++ b/lib/spack/external/_pytest/assertion/__init__.py
@@ -0,0 +1,164 @@
+"""
+support for presenting detailed information in failing assertions.
+"""
+import py
+import os
+import sys
+
+from _pytest.assertion import util
+from _pytest.assertion import rewrite
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("debugconfig")
+    group.addoption('--assert',
+                    action="store",
+                    dest="assertmode",
+                    choices=("rewrite", "plain",),
+                    default="rewrite",
+                    metavar="MODE",
+                    help="""Control assertion debugging tools.  'plain'
+                            performs no assertion debugging.  'rewrite'
+                            (the default) rewrites assert statements in
+                            test modules on import to provide assert
+                            expression information.""")
+
+
+def pytest_namespace():
+    return {'register_assert_rewrite': register_assert_rewrite}
+
+
+def register_assert_rewrite(*names):
+    """Register one or more module names to be rewritten on import.
+
+    This function will make sure that this module or all modules inside
+    the package will get their assert statements rewritten.
+    Thus you should make sure to call this before the module is
+    actually imported, usually in your __init__.py if you are a plugin
+    using a package.
+
+    :raise TypeError: if the given module names are not strings.
+    """
+    for name in names:
+        if not isinstance(name, str):
+            msg = 'expected module names as *args, got {0} instead'
+            raise TypeError(msg.format(repr(names)))
+    for hook in sys.meta_path:
+        if isinstance(hook, rewrite.AssertionRewritingHook):
+            importhook = hook
+            break
+    else:
+        importhook = DummyRewriteHook()
+    importhook.mark_rewrite(*names)
+
+
+class DummyRewriteHook(object):
+    """A no-op import hook for when rewriting is disabled."""
+
+    def mark_rewrite(self, *names):
+        pass
+
+
+class AssertionState:
+    """State for the assertion plugin."""
+
+    def __init__(self, config, mode):
+        self.mode = mode
+        self.trace = config.trace.root.get("assertion")
+        self.hook = None
+
+
+def install_importhook(config):
+    """Try to install the rewrite hook, raise SystemError if it fails."""
+    # Both Jython and CPython 2.6.0 have AST bugs that make the
+    # assertion rewriting hook malfunction.
+    if (sys.platform.startswith('java') or
+            sys.version_info[:3] == (2, 6, 0)):
+        raise SystemError('rewrite not supported')
+
+    config._assertstate = AssertionState(config, 'rewrite')
+    config._assertstate.hook = hook = rewrite.AssertionRewritingHook(config)
+    sys.meta_path.insert(0, hook)
+    config._assertstate.trace('installed rewrite import hook')
+
+    def undo():
+        hook = config._assertstate.hook
+        if hook is not None and hook in sys.meta_path:
+            sys.meta_path.remove(hook)
+
+    config.add_cleanup(undo)
+    return hook
+
+
+def pytest_collection(session):
+    # this hook is only called when test modules are collected
+    # so for example not in the master process of pytest-xdist
+    # (which does not collect test modules)
+    assertstate = getattr(session.config, '_assertstate', None)
+    if assertstate:
+        if assertstate.hook is not None:
+            assertstate.hook.set_session(session)
+
+
+def _running_on_ci():
+    """Check if we're currently running on a CI system."""
+    env_vars = ['CI', 'BUILD_NUMBER']
+    return any(var in os.environ for var in env_vars)
+
+
+def pytest_runtest_setup(item):
+    """Setup the pytest_assertrepr_compare hook
+
+    The newinterpret and rewrite modules will use util._reprcompare if
+    it exists to use custom reporting via the
+    pytest_assertrepr_compare hook.  This sets up this custom
+    comparison for the test.
+    """
+    def callbinrepr(op, left, right):
+        """Call the pytest_assertrepr_compare hook and prepare the result
+
+        This uses the first result from the hook and then ensures the
+        following:
+        * Overly verbose explanations are dropped unless -vv was used or
+          running on a CI.
+        * Embedded newlines are escaped to help util.format_explanation()
+          later.
+        * If the rewrite mode is used embedded %-characters are replaced
+          to protect later % formatting.
+
+        The result can be formatted by util.format_explanation() for
+        pretty printing.
+        """
+        hook_result = item.ihook.pytest_assertrepr_compare(
+            config=item.config, op=op, left=left, right=right)
+        for new_expl in hook_result:
+            if new_expl:
+                if (sum(len(p) for p in new_expl[1:]) > 80*8 and
+                        item.config.option.verbose < 2 and
+                        not _running_on_ci()):
+                    show_max = 10
+                    truncated_lines = len(new_expl) - show_max
+                    new_expl[show_max:] = [py.builtin._totext(
+                        'Detailed information truncated (%d more lines)'
+                        ', use "-vv" to show' % truncated_lines)]
+                new_expl = [line.replace("\n", "\\n") for line in new_expl]
+                res = py.builtin._totext("\n~").join(new_expl)
+                if item.config.getvalue("assertmode") == "rewrite":
+                    res = res.replace("%", "%%")
+                return res
+    util._reprcompare = callbinrepr
+
+
+def pytest_runtest_teardown(item):
+    util._reprcompare = None
+
+
+def pytest_sessionfinish(session):
+    assertstate = getattr(session.config, '_assertstate', None)
+    if assertstate:
+        if assertstate.hook is not None:
+            assertstate.hook.set_session(None)
+
+
+# Expose this plugin's implementation for the pytest_assertrepr_compare hook
+pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/lib/spack/external/_pytest/assertion/rewrite.py b/lib/spack/external/_pytest/assertion/rewrite.py
new file mode 100644
index 0000000000..abf5b491fe
--- /dev/null
+++ b/lib/spack/external/_pytest/assertion/rewrite.py
@@ -0,0 +1,945 @@
+"""Rewrite assertion AST to produce nice error messages"""
+
+import ast
+import _ast
+import errno
+import itertools
+import imp
+import marshal
+import os
+import re
+import struct
+import sys
+import types
+from fnmatch import fnmatch
+
+import py
+from _pytest.assertion import util
+
+
+# pytest caches rewritten pycs in __pycache__.
+if hasattr(imp, "get_tag"):
+    PYTEST_TAG = imp.get_tag() + "-PYTEST"
+else:
+    if hasattr(sys, "pypy_version_info"):
+        impl = "pypy"
+    elif sys.platform == "java":
+        impl = "jython"
+    else:
+        impl = "cpython"
+    ver = sys.version_info
+    PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
+    del ver, impl
+
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
+PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
+
+REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
+
+if sys.version_info >= (3,5):
+    ast_Call = ast.Call
+else:
+    ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
+
+
+class AssertionRewritingHook(object):
+    """PEP302 Import hook which rewrites asserts."""
+
+    def __init__(self, config):
+        self.config = config
+        self.fnpats = config.getini("python_files")
+        self.session = None
+        self.modules = {}
+        self._rewritten_names = set()
+        self._register_with_pkg_resources()
+        self._must_rewrite = set()
+
+    def set_session(self, session):
+        self.session = session
+
+    def find_module(self, name, path=None):
+        state = self.config._assertstate
+        state.trace("find_module called for: %s" % name)
+        names = name.rsplit(".", 1)
+        lastname = names[-1]
+        pth = None
+        if path is not None:
+            # Starting with Python 3.3, path is a _NamespacePath(), which
+            # causes problems if not converted to list.
+            path = list(path)
+            if len(path) == 1:
+                pth = path[0]
+        if pth is None:
+            try:
+                fd, fn, desc = imp.find_module(lastname, path)
+            except ImportError:
+                return None
+            if fd is not None:
+                fd.close()
+            tp = desc[2]
+            if tp == imp.PY_COMPILED:
+                if hasattr(imp, "source_from_cache"):
+                    try:
+                        fn = imp.source_from_cache(fn)
+                    except ValueError:
+                        # Python 3 doesn't like orphaned but still-importable
+                        # .pyc files.
+                        fn = fn[:-1]
+                else:
+                    fn = fn[:-1]
+            elif tp != imp.PY_SOURCE:
+                # Don't know what this is.
+                return None
+        else:
+            fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
+
+        fn_pypath = py.path.local(fn)
+        if not self._should_rewrite(name, fn_pypath, state):
+            return None
+
+        self._rewritten_names.add(name)
+
+        # The requested module looks like a test file, so rewrite it. This is
+        # the most magical part of the process: load the source, rewrite the
+        # asserts, and load the rewritten source. We also cache the rewritten
+        # module code in a special pyc. We must be aware of the possibility of
+        # concurrent pytest processes rewriting and loading pycs. To avoid
+        # tricky race conditions, we maintain the following invariant: The
+        # cached pyc is always a complete, valid pyc. Operations on it must be
+        # atomic. POSIX's atomic rename comes in handy.
+        write = not sys.dont_write_bytecode
+        cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
+        if write:
+            try:
+                os.mkdir(cache_dir)
+            except OSError:
+                e = sys.exc_info()[1].errno
+                if e == errno.EEXIST:
+                    # Either the __pycache__ directory already exists (the
+                    # common case) or it's blocked by a non-dir node. In the
+                    # latter case, we'll ignore it in _write_pyc.
+                    pass
+                elif e in [errno.ENOENT, errno.ENOTDIR]:
+                    # One of the path components was not a directory, likely
+                    # because we're in a zip file.
+                    write = False
+                elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
+                    state.trace("read only directory: %r" % fn_pypath.dirname)
+                    write = False
+                else:
+                    raise
+        cache_name = fn_pypath.basename[:-3] + PYC_TAIL
+        pyc = os.path.join(cache_dir, cache_name)
+        # Notice that even if we're in a read-only directory, I'm going
+        # to check for a cached pyc. This may not be optimal...
+        co = _read_pyc(fn_pypath, pyc, state.trace)
+        if co is None:
+            state.trace("rewriting %r" % (fn,))
+            source_stat, co = _rewrite_test(self.config, fn_pypath)
+            if co is None:
+                # Probably a SyntaxError in the test.
+                return None
+            if write:
+                _make_rewritten_pyc(state, source_stat, pyc, co)
+        else:
+            state.trace("found cached rewritten pyc for %r" % (fn,))
+        self.modules[name] = co, pyc
+        return self
+
+    def _should_rewrite(self, name, fn_pypath, state):
+        # always rewrite conftest files
+        fn = str(fn_pypath)
+        if fn_pypath.basename == 'conftest.py':
+            state.trace("rewriting conftest file: %r" % (fn,))
+            return True
+
+        if self.session is not None:
+            if self.session.isinitpath(fn):
+                state.trace("matched test file (was specified on cmdline): %r" %
+                            (fn,))
+                return True
+
+        # modules not passed explicitly on the command line are only
+        # rewritten if they match the naming convention for test files
+        for pat in self.fnpats:
+            # use fnmatch instead of fn_pypath.fnmatch because the
+            # latter might trigger an import to fnmatch.fnmatch
+            # internally, which would cause this method to be
+            # called recursively
+            if fnmatch(fn_pypath.basename, pat):
+                state.trace("matched test file %r" % (fn,))
+                return True
+
+        for marked in self._must_rewrite:
+            if name.startswith(marked):
+                state.trace("matched marked file %r (from %r)" % (name, marked))
+                return True
+
+        return False
+
+    def mark_rewrite(self, *names):
+        """Mark import names as needing to be re-written.
+
+        The named module or package as well as any nested modules will
+        be re-written on import.
+        """
+        already_imported = set(names).intersection(set(sys.modules))
+        if already_imported:
+            for name in already_imported:
+                if name not in self._rewritten_names:
+                    self._warn_already_imported(name)
+        self._must_rewrite.update(names)
+
+    def _warn_already_imported(self, name):
+        self.config.warn(
+            'P1',
+            'Module already imported so can not be re-written: %s' % name)
+
+    def load_module(self, name):
+        # If there is an existing module object named 'fullname' in
+        # sys.modules, the loader must use that existing module. (Otherwise,
+        # the reload() builtin will not work correctly.)
+        if name in sys.modules:
+            return sys.modules[name]
+
+        co, pyc = self.modules.pop(name)
+        # I wish I could just call imp.load_compiled here, but __file__ has to
+        # be set properly. In Python 3.2+, this all would be handled correctly
+        # by load_compiled.
+        mod = sys.modules[name] = imp.new_module(name)
+        try:
+            mod.__file__ = co.co_filename
+            # Normally, this attribute is 3.2+.
+            mod.__cached__ = pyc
+            mod.__loader__ = self
+            py.builtin.exec_(co, mod.__dict__)
+        except:
+            del sys.modules[name]
+            raise
+        return sys.modules[name]
+
+
+
+    def is_package(self, name):
+        try:
+            fd, fn, desc = imp.find_module(name)
+        except ImportError:
+            return False
+        if fd is not None:
+            fd.close()
+        tp = desc[2]
+        return tp == imp.PKG_DIRECTORY
+
+    @classmethod
+    def _register_with_pkg_resources(cls):
+        """
+        Ensure package resources can be loaded from this loader. May be called
+        multiple times, as the operation is idempotent.
+        """
+        try:
+            import pkg_resources
+            # access an attribute in case a deferred importer is present
+            pkg_resources.__name__
+        except ImportError:
+            return
+
+        # Since pytest tests are always located in the file system, the
+        #  DefaultProvider is appropriate.
+        pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+    def get_data(self, pathname):
+        """Optional PEP302 get_data API.
+        """
+        with open(pathname, 'rb') as f:
+            return f.read()
+
+
+def _write_pyc(state, co, source_stat, pyc):
+    # Technically, we don't have to have the same pyc format as
+    # (C)Python, since these "pycs" should never be seen by builtin
+    # import. However, there's little reason deviate, and I hope
+    # sometime to be able to use imp.load_compiled to load them. (See
+    # the comment in load_module above.)
+    try:
+        fp = open(pyc, "wb")
+    except IOError:
+        err = sys.exc_info()[1].errno
+        state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+        # we ignore any failure to write the cache file
+        # there are many reasons, permission-denied, __pycache__ being a
+        # file etc.
+        return False
+    try:
+        fp.write(imp.get_magic())
+        mtime = int(source_stat.mtime)
+        size = source_stat.size & 0xFFFFFFFF
+        fp.write(struct.pack("<ll", mtime, size))
+        marshal.dump(co, fp)
+    finally:
+        fp.close()
+    return True
+
+
+RN = "\r\n".encode("utf-8")
+N = "\n".encode("utf-8")
+
+cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
+BOM_UTF8 = '\xef\xbb\xbf'
+
+def _rewrite_test(config, fn):
+    """Try to read and rewrite *fn* and return the code object."""
+    state = config._assertstate
+    try:
+        stat = fn.stat()
+        source = fn.read("rb")
+    except EnvironmentError:
+        return None, None
+    if ASCII_IS_DEFAULT_ENCODING:
+        # ASCII is the default encoding in Python 2. Without a coding
+        # declaration, Python 2 will complain about any bytes in the file
+        # outside the ASCII range. Sadly, this behavior does not extend to
+        # compile() or ast.parse(), which prefer to interpret the bytes as
+        # latin-1. (At least they properly handle explicit coding cookies.) To
+        # preserve this error behavior, we could force ast.parse() to use ASCII
+        # as the encoding by inserting a coding cookie. Unfortunately, that
+        # messes up line numbers. Thus, we have to check ourselves if anything
+        # is outside the ASCII range in the case no encoding is explicitly
+        # declared. For more context, see issue #269. Yay for Python 3 which
+        # gets this right.
+        end1 = source.find("\n")
+        end2 = source.find("\n", end1 + 1)
+        if (not source.startswith(BOM_UTF8) and
+            cookie_re.match(source[0:end1]) is None and
+            cookie_re.match(source[end1 + 1:end2]) is None):
+            if hasattr(state, "_indecode"):
+                # encodings imported us again, so don't rewrite.
+                return None, None
+            state._indecode = True
+            try:
+                try:
+                    source.decode("ascii")
+                except UnicodeDecodeError:
+                    # Let it fail in real import.
+                    return None, None
+            finally:
+                del state._indecode
+    # On Python versions which are not 2.7 and less than or equal to 3.1, the
+    # parser expects *nix newlines.
+    if REWRITE_NEWLINES:
+        source = source.replace(RN, N) + N
+    try:
+        tree = ast.parse(source)
+    except SyntaxError:
+        # Let this pop up again in the real import.
+        state.trace("failed to parse: %r" % (fn,))
+        return None, None
+    rewrite_asserts(tree, fn, config)
+    try:
+        co = compile(tree, fn.strpath, "exec")
+    except SyntaxError:
+        # It's possible that this error is from some bug in the
+        # assertion rewriting, but I don't know of a fast way to tell.
+        state.trace("failed to compile: %r" % (fn,))
+        return None, None
+    return stat, co
+
+def _make_rewritten_pyc(state, source_stat, pyc, co):
+    """Try to dump rewritten code to *pyc*."""
+    if sys.platform.startswith("win"):
+        # Windows grants exclusive access to open files and doesn't have atomic
+        # rename, so just write into the final file.
+        _write_pyc(state, co, source_stat, pyc)
+    else:
+        # When not on windows, assume rename is atomic. Dump the code object
+        # into a file specific to this process and atomically replace it.
+        proc_pyc = pyc + "." + str(os.getpid())
+        if _write_pyc(state, co, source_stat, proc_pyc):
+            os.rename(proc_pyc, pyc)
+
+def _read_pyc(source, pyc, trace=lambda x: None):
+    """Possibly read a pytest pyc containing rewritten code.
+
+    Return rewritten code if successful or None if not.
+    """
+    try:
+        fp = open(pyc, "rb")
+    except IOError:
+        return None
+    with fp:
+        try:
+            mtime = int(source.mtime())
+            size = source.size()
+            data = fp.read(12)
+        except EnvironmentError as e:
+            trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
+            return None
+        # Check for invalid or out of date pyc file.
+        if (len(data) != 12 or data[:4] != imp.get_magic() or
+                struct.unpack("<ll", data[4:]) != (mtime, size)):
+            trace('_read_pyc(%s): invalid or out of date pyc' % source)
+            return None
+        try:
+            co = marshal.load(fp)
+        except Exception as e:
+            trace('_read_pyc(%s): marshal.load error %s' % (source, e))
+            return None
+        if not isinstance(co, types.CodeType):
+            trace('_read_pyc(%s): not a code object' % source)
+            return None
+        return co
+
+
+def rewrite_asserts(mod, module_path=None, config=None):
+    """Rewrite the assert statements in mod."""
+    AssertionRewriter(module_path, config).run(mod)
+
+
+def _saferepr(obj):
+    """Get a safe repr of an object for assertion error messages.
+
+    The assertion formatting (util.format_explanation()) requires
+    newlines to be escaped since they are a special character for it.
+    Normally assertion.util.format_explanation() does this but for a
+    custom repr it is possible to contain one of the special escape
+    sequences, especially '\n{' and '\n}' are likely to be present in
+    JSON reprs.
+
+    """
+    repr = py.io.saferepr(obj)
+    if py.builtin._istext(repr):
+        t = py.builtin.text
+    else:
+        t = py.builtin.bytes
+    return repr.replace(t("\n"), t("\\n"))
+
+
+from _pytest.assertion.util import format_explanation as _format_explanation # noqa
+
+def _format_assertmsg(obj):
+    """Format the custom assertion message given.
+
+    For strings this simply replaces newlines with '\n~' so that
+    util.format_explanation() will preserve them instead of escaping
+    newlines.  For other objects py.io.saferepr() is used first.
+
+    """
+    # reprlib appears to have a bug which means that if a string
+    # contains a newline it gets escaped, however if an object has a
+    # .__repr__() which contains newlines it does not get escaped.
+    # However in either case we want to preserve the newline.
+    if py.builtin._istext(obj) or py.builtin._isbytes(obj):
+        s = obj
+        is_repr = False
+    else:
+        s = py.io.saferepr(obj)
+        is_repr = True
+    if py.builtin._istext(s):
+        t = py.builtin.text
+    else:
+        t = py.builtin.bytes
+    s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
+    if is_repr:
+        s = s.replace(t("\\n"), t("\n~"))
+    return s
+
+def _should_repr_global_name(obj):
+    return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
+
+def _format_boolop(explanations, is_or):
+    explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
+    if py.builtin._istext(explanation):
+        t = py.builtin.text
+    else:
+        t = py.builtin.bytes
+    return explanation.replace(t('%'), t('%%'))
+
+def _call_reprcompare(ops, results, expls, each_obj):
+    for i, res, expl in zip(range(len(ops)), results, expls):
+        try:
+            done = not res
+        except Exception:
+            done = True
+        if done:
+            break
+    if util._reprcompare is not None:
+        custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
+        if custom is not None:
+            return custom
+    return expl
+
+
+unary_map = {
+    ast.Not: "not %s",
+    ast.Invert: "~%s",
+    ast.USub: "-%s",
+    ast.UAdd: "+%s"
+}
+
+binop_map = {
+    ast.BitOr: "|",
+    ast.BitXor: "^",
+    ast.BitAnd: "&",
+    ast.LShift: "<<",
+    ast.RShift: ">>",
+    ast.Add: "+",
+    ast.Sub: "-",
+    ast.Mult: "*",
+    ast.Div: "/",
+    ast.FloorDiv: "//",
+    ast.Mod: "%%", # escaped for string formatting
+    ast.Eq: "==",
+    ast.NotEq: "!=",
+    ast.Lt: "<",
+    ast.LtE: "<=",
+    ast.Gt: ">",
+    ast.GtE: ">=",
+    ast.Pow: "**",
+    ast.Is: "is",
+    ast.IsNot: "is not",
+    ast.In: "in",
+    ast.NotIn: "not in"
+}
+# Python 3.5+ compatibility
+try:
+    binop_map[ast.MatMult] = "@"
+except AttributeError:
+    pass
+
+# Python 3.4+ compatibility
+if hasattr(ast, "NameConstant"):
+    _NameConstant = ast.NameConstant
+else:
+    def _NameConstant(c):
+        return ast.Name(str(c), ast.Load())
+
+
+def set_location(node, lineno, col_offset):
+    """Set node location information recursively."""
+    def _fix(node, lineno, col_offset):
+        if "lineno" in node._attributes:
+            node.lineno = lineno
+        if "col_offset" in node._attributes:
+            node.col_offset = col_offset
+        for child in ast.iter_child_nodes(node):
+            _fix(child, lineno, col_offset)
+    _fix(node, lineno, col_offset)
+    return node
+
+
+class AssertionRewriter(ast.NodeVisitor):
+    """Assertion rewriting implementation.
+
+    The main entrypoint is to call .run() with an ast.Module instance,
+    this will then find all the assert statements and re-write them to
+    provide intermediate values and a detailed assertion error.  See
+    http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
+    for an overview of how this works.
+
+    The entry point here is .run() which will iterate over all the
+    statements in an ast.Module and for each ast.Assert statement it
+    finds call .visit() with it.  Then .visit_Assert() takes over and
+    is responsible for creating new ast statements to replace the
+    original assert statement: it re-writes the test of an assertion
+    to provide intermediate values and replace it with an if statement
+    which raises an assertion error with a detailed explanation in
+    case the expression is false.
+
+    For this .visit_Assert() uses the visitor pattern to visit all the
+    AST nodes of the ast.Assert.test field, each visit call returning
+    an AST node and the corresponding explanation string.  During this
+    state is kept in several instance attributes:
+
+    :statements: All the AST statements which will replace the assert
+       statement.
+
+    :variables: This is populated by .variable() with each variable
+       used by the statements so that they can all be set to None at
+       the end of the statements.
+
+    :variable_counter: Counter to create new unique variables needed
+       by statements.  Variables are created using .variable() and
+       have the form of "@py_assert0".
+
+    :on_failure: The AST statements which will be executed if the
+       assertion test fails.  This is the code which will construct
+       the failure message and raises the AssertionError.
+
+    :explanation_specifiers: A dict filled by .explanation_param()
+       with %-formatting placeholders and their corresponding
+       expressions to use in the building of an assertion message.
+       This is used by .pop_format_context() to build a message.
+
+    :stack: A stack of the explanation_specifiers dicts maintained by
+       .push_format_context() and .pop_format_context() which allows
+       to build another %-formatted string while already building one.
+
+    This state is reset on every new assert statement visited and used
+    by the other visitors.
+
+    """
+
+    def __init__(self, module_path, config):
+        super(AssertionRewriter, self).__init__()
+        self.module_path = module_path
+        self.config = config
+
+    def run(self, mod):
+        """Find all assert statements in *mod* and rewrite them."""
+        if not mod.body:
+            # Nothing to do.
+            return
+        # Insert some special imports at the top of the module but after any
+        # docstrings and __future__ imports.
+        aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
+                   ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
+        expect_docstring = True
+        pos = 0
+        lineno = 0
+        for item in mod.body:
+            if (expect_docstring and isinstance(item, ast.Expr) and
+                    isinstance(item.value, ast.Str)):
+                doc = item.value.s
+                if "PYTEST_DONT_REWRITE" in doc:
+                    # The module has disabled assertion rewriting.
+                    return
+                lineno += len(doc) - 1
+                expect_docstring = False
+            elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
+                  item.module != "__future__"):
+                lineno = item.lineno
+                break
+            pos += 1
+        imports = [ast.Import([alias], lineno=lineno, col_offset=0)
+                   for alias in aliases]
+        mod.body[pos:pos] = imports
+        # Collect asserts.
+        nodes = [mod]
+        while nodes:
+            node = nodes.pop()
+            for name, field in ast.iter_fields(node):
+                if isinstance(field, list):
+                    new = []
+                    for i, child in enumerate(field):
+                        if isinstance(child, ast.Assert):
+                            # Transform assert.
+                            new.extend(self.visit(child))
+                        else:
+                            new.append(child)
+                            if isinstance(child, ast.AST):
+                                nodes.append(child)
+                    setattr(node, name, new)
+                elif (isinstance(field, ast.AST) and
+                      # Don't recurse into expressions as they can't contain
+                      # asserts.
+                      not isinstance(field, ast.expr)):
+                    nodes.append(field)
+
+    def variable(self):
+        """Get a new variable."""
+        # Use a character invalid in python identifiers to avoid clashing.
+        name = "@py_assert" + str(next(self.variable_counter))
+        self.variables.append(name)
+        return name
+
+    def assign(self, expr):
+        """Give *expr* a name."""
+        name = self.variable()
+        self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
+        return ast.Name(name, ast.Load())
+
+    def display(self, expr):
+        """Call py.io.saferepr on the expression."""
+        return self.helper("saferepr", expr)
+
+    def helper(self, name, *args):
+        """Call a helper in this module."""
+        py_name = ast.Name("@pytest_ar", ast.Load())
+        attr = ast.Attribute(py_name, "_" + name, ast.Load())
+        return ast_Call(attr, list(args), [])
+
+    def builtin(self, name):
+        """Return the builtin called *name*."""
+        builtin_name = ast.Name("@py_builtins", ast.Load())
+        return ast.Attribute(builtin_name, name, ast.Load())
+
+    def explanation_param(self, expr):
+        """Return a new named %-formatting placeholder for expr.
+
+        This creates a %-formatting placeholder for expr in the
+        current formatting context, e.g. ``%(py0)s``.  The placeholder
+        and expr are placed in the current format context so that it
+        can be used on the next call to .pop_format_context().
+
+        """
+        specifier = "py" + str(next(self.variable_counter))
+        self.explanation_specifiers[specifier] = expr
+        return "%(" + specifier + ")s"
+
+    def push_format_context(self):
+        """Create a new formatting context.
+
+        The format context is used for when an explanation wants to
+        have a variable value formatted in the assertion message.  In
+        this case the value required can be added using
+        .explanation_param().  Finally .pop_format_context() is used
+        to format a string of %-formatted values as added by
+        .explanation_param().
+
+        """
+        self.explanation_specifiers = {}
+        self.stack.append(self.explanation_specifiers)
+
+    def pop_format_context(self, expl_expr):
+        """Format the %-formatted string with current format context.
+
+        The expl_expr should be an ast.Str instance constructed from
+        the %-placeholders created by .explanation_param().  This will
+        add the required code to format said string to .on_failure and
+        return the ast.Name instance of the formatted string.
+
+        """
+        current = self.stack.pop()
+        if self.stack:
+            self.explanation_specifiers = self.stack[-1]
+        keys = [ast.Str(key) for key in current.keys()]
+        format_dict = ast.Dict(keys, list(current.values()))
+        form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
+        name = "@py_format" + str(next(self.variable_counter))
+        self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
+        return ast.Name(name, ast.Load())
+
+    def generic_visit(self, node):
+        """Handle expressions we don't have custom code for."""
+        assert isinstance(node, ast.expr)
+        res = self.assign(node)
+        return res, self.explanation_param(self.display(res))
+
+    def visit_Assert(self, assert_):
+        """Return the AST statements to replace the ast.Assert instance.
+
+        This re-writes the test of an assertion to provide
+        intermediate values and replace it with an if statement which
+        raises an assertion error with a detailed explanation in case
+        the expression is false.
+
+        """
+        if isinstance(assert_.test, ast.Tuple) and self.config is not None:
+            fslocation = (self.module_path, assert_.lineno)
+            self.config.warn('R1', 'assertion is always true, perhaps '
+                              'remove parentheses?', fslocation=fslocation)
+        self.statements = []
+        self.variables = []
+        self.variable_counter = itertools.count()
+        self.stack = []
+        self.on_failure = []
+        self.push_format_context()
+        # Rewrite assert into a bunch of statements.
+        top_condition, explanation = self.visit(assert_.test)
+        # Create failure message.
+        body = self.on_failure
+        negation = ast.UnaryOp(ast.Not(), top_condition)
+        self.statements.append(ast.If(negation, body, []))
+        if assert_.msg:
+            assertmsg = self.helper('format_assertmsg', assert_.msg)
+            explanation = "\n>assert " + explanation
+        else:
+            assertmsg = ast.Str("")
+            explanation = "assert " + explanation
+        template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
+        msg = self.pop_format_context(template)
+        fmt = self.helper("format_explanation", msg)
+        err_name = ast.Name("AssertionError", ast.Load())
+        exc = ast_Call(err_name, [fmt], [])
+        if sys.version_info[0] >= 3:
+            raise_ = ast.Raise(exc, None)
+        else:
+            raise_ = ast.Raise(exc, None, None)
+        body.append(raise_)
+        # Clear temporary variables by setting them to None.
+        if self.variables:
+            variables = [ast.Name(name, ast.Store())
+                         for name in self.variables]
+            clear = ast.Assign(variables, _NameConstant(None))
+            self.statements.append(clear)
+        # Fix line numbers.
+        for stmt in self.statements:
+            set_location(stmt, assert_.lineno, assert_.col_offset)
+        return self.statements
+
+    def visit_Name(self, name):
+        # Display the repr of the name if it's a local variable or
+        # _should_repr_global_name() thinks it's acceptable.
+        locs = ast_Call(self.builtin("locals"), [], [])
+        inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+        dorepr = self.helper("should_repr_global_name", name)
+        test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
+        expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
+        return name, self.explanation_param(expr)
+
+    def visit_BoolOp(self, boolop):
+        res_var = self.variable()
+        expl_list = self.assign(ast.List([], ast.Load()))
+        app = ast.Attribute(expl_list, "append", ast.Load())
+        is_or = int(isinstance(boolop.op, ast.Or))
+        body = save = self.statements
+        fail_save = self.on_failure
+        levels = len(boolop.values) - 1
+        self.push_format_context()
+        # Process each operand, short-circuting if needed.
+        for i, v in enumerate(boolop.values):
+            if i:
+                fail_inner = []
+                # cond is set in a prior loop iteration below
+                self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
+                self.on_failure = fail_inner
+            self.push_format_context()
+            res, expl = self.visit(v)
+            body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
+            expl_format = self.pop_format_context(ast.Str(expl))
+            call = ast_Call(app, [expl_format], [])
+            self.on_failure.append(ast.Expr(call))
+            if i < levels:
+                cond = res
+                if is_or:
+                    cond = ast.UnaryOp(ast.Not(), cond)
+                inner = []
+                self.statements.append(ast.If(cond, inner, []))
+                self.statements = body = inner
+        self.statements = save
+        self.on_failure = fail_save
+        expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
+        expl = self.pop_format_context(expl_template)
+        return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
+
+    def visit_UnaryOp(self, unary):
+        pattern = unary_map[unary.op.__class__]
+        operand_res, operand_expl = self.visit(unary.operand)
+        res = self.assign(ast.UnaryOp(unary.op, operand_res))
+        return res, pattern % (operand_expl,)
+
+    def visit_BinOp(self, binop):
+        symbol = binop_map[binop.op.__class__]
+        left_expr, left_expl = self.visit(binop.left)
+        right_expr, right_expl = self.visit(binop.right)
+        explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
+        res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
+        return res, explanation
+
+    def visit_Call_35(self, call):
+        """
+        visit `ast.Call` nodes on Python3.5 and after
+        """
+        new_func, func_expl = self.visit(call.func)
+        arg_expls = []
+        new_args = []
+        new_kwargs = []
+        for arg in call.args:
+            res, expl = self.visit(arg)
+            arg_expls.append(expl)
+            new_args.append(res)
+        for keyword in call.keywords:
+            res, expl = self.visit(keyword.value)
+            new_kwargs.append(ast.keyword(keyword.arg, res))
+            if keyword.arg:
+                arg_expls.append(keyword.arg + "=" + expl)
+            else: ## **args have `arg` keywords with an .arg of None
+                arg_expls.append("**" + expl)
+
+        expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+        new_call = ast.Call(new_func, new_args, new_kwargs)
+        res = self.assign(new_call)
+        res_expl = self.explanation_param(self.display(res))
+        outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+        return res, outer_expl
+
+    def visit_Starred(self, starred):
+        # From Python 3.5, a Starred node can appear in a function call
+        res, expl = self.visit(starred.value)
+        return starred, '*' + expl
+
+    def visit_Call_legacy(self, call):
+        """
+        visit `ast.Call nodes on 3.4 and below`
+        """
+        new_func, func_expl = self.visit(call.func)
+        arg_expls = []
+        new_args = []
+        new_kwargs = []
+        new_star = new_kwarg = None
+        for arg in call.args:
+            res, expl = self.visit(arg)
+            new_args.append(res)
+            arg_expls.append(expl)
+        for keyword in call.keywords:
+            res, expl = self.visit(keyword.value)
+            new_kwargs.append(ast.keyword(keyword.arg, res))
+            arg_expls.append(keyword.arg + "=" + expl)
+        if call.starargs:
+            new_star, expl = self.visit(call.starargs)
+            arg_expls.append("*" + expl)
+        if call.kwargs:
+            new_kwarg, expl = self.visit(call.kwargs)
+            arg_expls.append("**" + expl)
+        expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+        new_call = ast.Call(new_func, new_args, new_kwargs,
+                            new_star, new_kwarg)
+        res = self.assign(new_call)
+        res_expl = self.explanation_param(self.display(res))
+        outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+        return res, outer_expl
+
+    # ast.Call signature changed on 3.5,
+    # conditionally change  which methods is named
+    # visit_Call depending on Python version
+    if sys.version_info >= (3, 5):
+        visit_Call = visit_Call_35
+    else:
+        visit_Call = visit_Call_legacy
+
+
+    def visit_Attribute(self, attr):
+        if not isinstance(attr.ctx, ast.Load):
+            return self.generic_visit(attr)
+        value, value_expl = self.visit(attr.value)
+        res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
+        res_expl = self.explanation_param(self.display(res))
+        pat = "%s\n{%s = %s.%s\n}"
+        expl = pat % (res_expl, res_expl, value_expl, attr.attr)
+        return res, expl
+
+    def visit_Compare(self, comp):
+        self.push_format_context()
+        left_res, left_expl = self.visit(comp.left)
+        if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
+            left_expl = "({0})".format(left_expl)
+        res_variables = [self.variable() for i in range(len(comp.ops))]
+        load_names = [ast.Name(v, ast.Load()) for v in res_variables]
+        store_names = [ast.Name(v, ast.Store()) for v in res_variables]
+        it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
+        expls = []
+        syms = []
+        results = [left_res]
+        for i, op, next_operand in it:
+            next_res, next_expl = self.visit(next_operand)
+            if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
+                next_expl = "({0})".format(next_expl)
+            results.append(next_res)
+            sym = binop_map[op.__class__]
+            syms.append(ast.Str(sym))
+            expl = "%s %s %s" % (left_expl, sym, next_expl)
+            expls.append(ast.Str(expl))
+            res_expr = ast.Compare(left_res, [op], [next_res])
+            self.statements.append(ast.Assign([store_names[i]], res_expr))
+            left_res, left_expl = next_res, next_expl
+        # Use pytest.assertion.util._reprcompare if that's available.
+        expl_call = self.helper("call_reprcompare",
+                                ast.Tuple(syms, ast.Load()),
+                                ast.Tuple(load_names, ast.Load()),
+                                ast.Tuple(expls, ast.Load()),
+                                ast.Tuple(results, ast.Load()))
+        if len(comp.ops) > 1:
+            res = ast.BoolOp(ast.And(), load_names)
+        else:
+            res = load_names[0]
+        return res, self.explanation_param(self.pop_format_context(expl_call))
diff --git a/lib/spack/external/_pytest/assertion/util.py b/lib/spack/external/_pytest/assertion/util.py
new file mode 100644
index 0000000000..4a0a4e4310
--- /dev/null
+++ b/lib/spack/external/_pytest/assertion/util.py
@@ -0,0 +1,300 @@
+"""Utilities for assertion debugging"""
+import pprint
+
+import _pytest._code
+import py
+try:
+    from collections import Sequence
+except ImportError:
+    Sequence = list
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
+
+# The _reprcompare attribute on the util module is used by the new assertion
+# interpretation code and assertion rewriter to detect this plugin was
+# loaded and in turn call the hooks defined here as part of the
+# DebugInterpreter.
+_reprcompare = None
+
+
+# the re-encoding is needed for python2 repr
+# with non-ascii characters (see issue 877 and 1379)
+def ecu(s):
+    try:
+        return u(s, 'utf-8', 'replace')
+    except TypeError:
+        return s
+
+
+def format_explanation(explanation):
+    """This formats an explanation
+
+    Normally all embedded newlines are escaped, however there are
+    three exceptions: \n{, \n} and \n~.  The first two are intended
+    cover nested explanations, see function and attribute explanations
+    for examples (.visit_Call(), visit_Attribute()).  The last one is
+    for when one explanation needs to span multiple lines, e.g. when
+    displaying diffs.
+    """
+    explanation = ecu(explanation)
+    lines = _split_explanation(explanation)
+    result = _format_lines(lines)
+    return u('\n').join(result)
+
+
+def _split_explanation(explanation):
+    """Return a list of individual lines in the explanation
+
+    This will return a list of lines split on '\n{', '\n}' and '\n~'.
+    Any other newlines will be escaped and appear in the line as the
+    literal '\n' characters.
+    """
+    raw_lines = (explanation or u('')).split('\n')
+    lines = [raw_lines[0]]
+    for l in raw_lines[1:]:
+        if l and l[0] in ['{', '}', '~', '>']:
+            lines.append(l)
+        else:
+            lines[-1] += '\\n' + l
+    return lines
+
+
+def _format_lines(lines):
+    """Format the individual lines
+
+    This will replace the '{', '}' and '~' characters of our mini
+    formatting language with the proper 'where ...', 'and ...' and ' +
+    ...' text, taking care of indentation along the way.
+
+    Return a list of formatted lines.
+    """
+    result = lines[:1]
+    stack = [0]
+    stackcnt = [0]
+    for line in lines[1:]:
+        if line.startswith('{'):
+            if stackcnt[-1]:
+                s = u('and   ')
+            else:
+                s = u('where ')
+            stack.append(len(result))
+            stackcnt[-1] += 1
+            stackcnt.append(0)
+            result.append(u(' +') + u('  ')*(len(stack)-1) + s + line[1:])
+        elif line.startswith('}'):
+            stack.pop()
+            stackcnt.pop()
+            result[stack[-1]] += line[1:]
+        else:
+            assert line[0] in ['~', '>']
+            stack[-1] += 1
+            indent = len(stack) if line.startswith('~') else len(stack) - 1
+            result.append(u('  ')*indent + line[1:])
+    assert len(stack) == 1
+    return result
+
+
+# Provide basestring in python3
+try:
+    basestring = basestring
+except NameError:
+    basestring = str
+
+
+def assertrepr_compare(config, op, left, right):
+    """Return specialised explanations for some operators/operands"""
+    width = 80 - 15 - len(op) - 2  # 15 chars indentation, 1 space around op
+    left_repr = py.io.saferepr(left, maxsize=int(width//2))
+    right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
+
+    summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
+
+    issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
+                            not isinstance(x, basestring))
+    istext = lambda x: isinstance(x, basestring)
+    isdict = lambda x: isinstance(x, dict)
+    isset = lambda x: isinstance(x, (set, frozenset))
+
+    def isiterable(obj):
+        try:
+            iter(obj)
+            return not istext(obj)
+        except TypeError:
+            return False
+
+    verbose = config.getoption('verbose')
+    explanation = None
+    try:
+        if op == '==':
+            if istext(left) and istext(right):
+                explanation = _diff_text(left, right, verbose)
+            else:
+                if issequence(left) and issequence(right):
+                    explanation = _compare_eq_sequence(left, right, verbose)
+                elif isset(left) and isset(right):
+                    explanation = _compare_eq_set(left, right, verbose)
+                elif isdict(left) and isdict(right):
+                    explanation = _compare_eq_dict(left, right, verbose)
+                if isiterable(left) and isiterable(right):
+                    expl = _compare_eq_iterable(left, right, verbose)
+                    if explanation is not None:
+                        explanation.extend(expl)
+                    else:
+                        explanation = expl
+        elif op == 'not in':
+            if istext(left) and istext(right):
+                explanation = _notin_text(left, right, verbose)
+    except Exception:
+        explanation = [
+            u('(pytest_assertion plugin: representation of details failed.  '
+              'Probably an object has a faulty __repr__.)'),
+            u(_pytest._code.ExceptionInfo())]
+
+    if not explanation:
+        return None
+
+    return [summary] + explanation
+
+
+def _diff_text(left, right, verbose=False):
+    """Return the explanation for the diff between text or bytes
+
+    Unless --verbose is used this will skip leading and trailing
+    characters which are identical to keep the diff minimal.
+
+    If the input are bytes they will be safely converted to text.
+    """
+    from difflib import ndiff
+    explanation = []
+    if isinstance(left, py.builtin.bytes):
+        left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+    if isinstance(right, py.builtin.bytes):
+        right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+    if not verbose:
+        i = 0  # just in case left or right has zero length
+        for i in range(min(len(left), len(right))):
+            if left[i] != right[i]:
+                break
+        if i > 42:
+            i -= 10                 # Provide some context
+            explanation = [u('Skipping %s identical leading '
+                             'characters in diff, use -v to show') % i]
+            left = left[i:]
+            right = right[i:]
+        if len(left) == len(right):
+            for i in range(len(left)):
+                if left[-i] != right[-i]:
+                    break
+            if i > 42:
+                i -= 10     # Provide some context
+                explanation += [u('Skipping %s identical trailing '
+                                  'characters in diff, use -v to show') % i]
+                left = left[:-i]
+                right = right[:-i]
+    keepends = True
+    explanation += [line.strip('\n')
+                    for line in ndiff(left.splitlines(keepends),
+                                      right.splitlines(keepends))]
+    return explanation
+
+
+def _compare_eq_iterable(left, right, verbose=False):
+    if not verbose:
+        return [u('Use -v to get the full diff')]
+    # dynamic import to speedup pytest
+    import difflib
+
+    try:
+        left_formatting = pprint.pformat(left).splitlines()
+        right_formatting = pprint.pformat(right).splitlines()
+        explanation = [u('Full diff:')]
+    except Exception:
+        # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
+        # sorted() on a list would raise. See issue #718.
+        # As a workaround, the full diff is generated by using the repr() string of each item of each container.
+        left_formatting = sorted(repr(x) for x in left)
+        right_formatting = sorted(repr(x) for x in right)
+        explanation = [u('Full diff (fallback to calling repr on each item):')]
+    explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
+    return explanation
+
+
+def _compare_eq_sequence(left, right, verbose=False):
+    explanation = []
+    for i in range(min(len(left), len(right))):
+        if left[i] != right[i]:
+            explanation += [u('At index %s diff: %r != %r')
+                            % (i, left[i], right[i])]
+            break
+    if len(left) > len(right):
+        explanation += [u('Left contains more items, first extra item: %s')
+                        % py.io.saferepr(left[len(right)],)]
+    elif len(left) < len(right):
+        explanation += [
+            u('Right contains more items, first extra item: %s') %
+            py.io.saferepr(right[len(left)],)]
+    return explanation
+
+
+def _compare_eq_set(left, right, verbose=False):
+    explanation = []
+    diff_left = left - right
+    diff_right = right - left
+    if diff_left:
+        explanation.append(u('Extra items in the left set:'))
+        for item in diff_left:
+            explanation.append(py.io.saferepr(item))
+    if diff_right:
+        explanation.append(u('Extra items in the right set:'))
+        for item in diff_right:
+            explanation.append(py.io.saferepr(item))
+    return explanation
+
+
+def _compare_eq_dict(left, right, verbose=False):
+    explanation = []
+    common = set(left).intersection(set(right))
+    same = dict((k, left[k]) for k in common if left[k] == right[k])
+    if same and not verbose:
+        explanation += [u('Omitting %s identical items, use -v to show') %
+                        len(same)]
+    elif same:
+        explanation += [u('Common items:')]
+        explanation += pprint.pformat(same).splitlines()
+    diff = set(k for k in common if left[k] != right[k])
+    if diff:
+        explanation += [u('Differing items:')]
+        for k in diff:
+            explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+                            py.io.saferepr({k: right[k]})]
+    extra_left = set(left) - set(right)
+    if extra_left:
+        explanation.append(u('Left contains more items:'))
+        explanation.extend(pprint.pformat(
+            dict((k, left[k]) for k in extra_left)).splitlines())
+    extra_right = set(right) - set(left)
+    if extra_right:
+        explanation.append(u('Right contains more items:'))
+        explanation.extend(pprint.pformat(
+            dict((k, right[k]) for k in extra_right)).splitlines())
+    return explanation
+
+
+def _notin_text(term, text, verbose=False):
+    index = text.find(term)
+    head = text[:index]
+    tail = text[index+len(term):]
+    correct_text = head + tail
+    diff = _diff_text(correct_text, text, verbose)
+    newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
+    for line in diff:
+        if line.startswith(u('Skipping')):
+            continue
+        if line.startswith(u('- ')):
+            continue
+        if line.startswith(u('+ ')):
+            newdiff.append(u('  ') + line[2:])
+        else:
+            newdiff.append(line)
+    return newdiff
diff --git a/lib/spack/external/_pytest/cacheprovider.py b/lib/spack/external/_pytest/cacheprovider.py
new file mode 100644
index 0000000000..0657001f2d
--- /dev/null
+++ b/lib/spack/external/_pytest/cacheprovider.py
@@ -0,0 +1,245 @@
+"""
+merged implementation of the cache provider
+
+the name cache was not choosen to ensure pluggy automatically
+ignores the external pytest-cache
+"""
+
+import py
+import pytest
+import json
+from os.path import sep as _sep, altsep as _altsep
+
+
+class Cache(object):
+    def __init__(self, config):
+        self.config = config
+        self._cachedir = config.rootdir.join(".cache")
+        self.trace = config.trace.root.get("cache")
+        if config.getvalue("cacheclear"):
+            self.trace("clearing cachedir")
+            if self._cachedir.check():
+                self._cachedir.remove()
+            self._cachedir.mkdir()
+
+    def makedir(self, name):
+        """ return a directory path object with the given name.  If the
+        directory does not yet exist, it will be created.  You can use it
+        to manage files likes e. g. store/retrieve database
+        dumps across test sessions.
+
+        :param name: must be a string not containing a ``/`` separator.
+             Make sure the name contains your plugin or application
+             identifiers to prevent clashes with other cache users.
+        """
+        if _sep in name or _altsep is not None and _altsep in name:
+            raise ValueError("name is not allowed to contain path separators")
+        return self._cachedir.ensure_dir("d", name)
+
+    def _getvaluepath(self, key):
+        return self._cachedir.join('v', *key.split('/'))
+
+    def get(self, key, default):
+        """ return cached value for the given key.  If no value
+        was yet cached or the value cannot be read, the specified
+        default is returned.
+
+        :param key: must be a ``/`` separated value. Usually the first
+             name is the name of your plugin or your application.
+        :param default: must be provided in case of a cache-miss or
+             invalid cache values.
+
+        """
+        path = self._getvaluepath(key)
+        if path.check():
+            try:
+                with path.open("r") as f:
+                    return json.load(f)
+            except ValueError:
+                self.trace("cache-invalid at %s" % (path,))
+        return default
+
+    def set(self, key, value):
+        """ save value for the given key.
+
+        :param key: must be a ``/`` separated value. Usually the first
+             name is the name of your plugin or your application.
+        :param value: must be of any combination of basic
+               python types, including nested types
+               like e. g. lists of dictionaries.
+        """
+        path = self._getvaluepath(key)
+        try:
+            path.dirpath().ensure_dir()
+        except (py.error.EEXIST, py.error.EACCES):
+            self.config.warn(
+                code='I9', message='could not create cache path %s' % (path,)
+            )
+            return
+        try:
+            f = path.open('w')
+        except py.error.ENOTDIR:
+            self.config.warn(
+                code='I9', message='cache could not write path %s' % (path,))
+        else:
+            with f:
+                self.trace("cache-write %s: %r" % (key, value,))
+                json.dump(value, f, indent=2, sort_keys=True)
+
+
+class LFPlugin:
+    """ Plugin which implements the --lf (run last-failing) option """
+    def __init__(self, config):
+        self.config = config
+        active_keys = 'lf', 'failedfirst'
+        self.active = any(config.getvalue(key) for key in active_keys)
+        if self.active:
+            self.lastfailed = config.cache.get("cache/lastfailed", {})
+        else:
+            self.lastfailed = {}
+
+    def pytest_report_header(self):
+        if self.active:
+            if not self.lastfailed:
+                mode = "run all (no recorded failures)"
+            else:
+                mode = "rerun last %d failures%s" % (
+                    len(self.lastfailed),
+                    " first" if self.config.getvalue("failedfirst") else "")
+            return "run-last-failure: %s" % mode
+
+    def pytest_runtest_logreport(self, report):
+        if report.failed and "xfail" not in report.keywords:
+            self.lastfailed[report.nodeid] = True
+        elif not report.failed:
+            if report.when == "call":
+                self.lastfailed.pop(report.nodeid, None)
+
+    def pytest_collectreport(self, report):
+        passed = report.outcome in ('passed', 'skipped')
+        if passed:
+            if report.nodeid in self.lastfailed:
+                self.lastfailed.pop(report.nodeid)
+                self.lastfailed.update(
+                    (item.nodeid, True)
+                    for item in report.result)
+        else:
+            self.lastfailed[report.nodeid] = True
+
+    def pytest_collection_modifyitems(self, session, config, items):
+        if self.active and self.lastfailed:
+            previously_failed = []
+            previously_passed = []
+            for item in items:
+                if item.nodeid in self.lastfailed:
+                    previously_failed.append(item)
+                else:
+                    previously_passed.append(item)
+            if not previously_failed and previously_passed:
+                # running a subset of all tests with recorded failures outside
+                # of the set of tests currently executing
+                pass
+            elif self.config.getvalue("failedfirst"):
+                items[:] = previously_failed + previously_passed
+            else:
+                items[:] = previously_failed
+                config.hook.pytest_deselected(items=previously_passed)
+
+    def pytest_sessionfinish(self, session):
+        config = self.config
+        if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
+            return
+        prev_failed = config.cache.get("cache/lastfailed", None) is not None
+        if (session.testscollected and prev_failed) or self.lastfailed:
+            config.cache.set("cache/lastfailed", self.lastfailed)
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group.addoption(
+        '--lf', '--last-failed', action='store_true', dest="lf",
+        help="rerun only the tests that failed "
+             "at the last run (or all if none failed)")
+    group.addoption(
+        '--ff', '--failed-first', action='store_true', dest="failedfirst",
+        help="run all tests but run the last failures first.  "
+             "This may re-order tests and thus lead to "
+             "repeated fixture setup/teardown")
+    group.addoption(
+        '--cache-show', action='store_true', dest="cacheshow",
+        help="show cache contents, don't perform collection or tests")
+    group.addoption(
+        '--cache-clear', action='store_true', dest="cacheclear",
+        help="remove all cache contents at start of test run.")
+
+
+def pytest_cmdline_main(config):
+    if config.option.cacheshow:
+        from _pytest.main import wrap_session
+        return wrap_session(config, cacheshow)
+
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_configure(config):
+    config.cache = Cache(config)
+    config.pluginmanager.register(LFPlugin(config), "lfplugin")
+
+
+@pytest.fixture
+def cache(request):
+    """
+    Return a cache object that can persist state between testing sessions.
+
+    cache.get(key, default)
+    cache.set(key, value)
+
+    Keys must be a ``/`` separated value, where the first part is usually the
+    name of your plugin or application to avoid clashes with other cache users.
+
+    Values can be any object handled by the json stdlib module.
+    """
+    return request.config.cache
+
+
+def pytest_report_header(config):
+    if config.option.verbose:
+        relpath = py.path.local().bestrelpath(config.cache._cachedir)
+        return "cachedir: %s" % relpath
+
+
+def cacheshow(config, session):
+    from pprint import pprint
+    tw = py.io.TerminalWriter()
+    tw.line("cachedir: " + str(config.cache._cachedir))
+    if not config.cache._cachedir.check():
+        tw.line("cache is empty")
+        return 0
+    dummy = object()
+    basedir = config.cache._cachedir
+    vdir = basedir.join("v")
+    tw.sep("-", "cache values")
+    for valpath in vdir.visit(lambda x: x.isfile()):
+        key = valpath.relto(vdir).replace(valpath.sep, "/")
+        val = config.cache.get(key, dummy)
+        if val is dummy:
+            tw.line("%s contains unreadable content, "
+                  "will be ignored" % key)
+        else:
+            tw.line("%s contains:" % key)
+            stream = py.io.TextIO()
+            pprint(val, stream=stream)
+            for line in stream.getvalue().splitlines():
+                tw.line("  " + line)
+
+    ddir = basedir.join("d")
+    if ddir.isdir() and ddir.listdir():
+        tw.sep("-", "cache directories")
+        for p in basedir.join("d").visit():
+            #if p.check(dir=1):
+            #    print("%s/" % p.relto(basedir))
+            if p.isfile():
+                key = p.relto(basedir)
+                tw.line("%s is a file of length %d" % (
+                        key, p.size()))
+    return 0
diff --git a/lib/spack/external/_pytest/capture.py b/lib/spack/external/_pytest/capture.py
new file mode 100644
index 0000000000..eea81ca187
--- /dev/null
+++ b/lib/spack/external/_pytest/capture.py
@@ -0,0 +1,491 @@
+"""
+per-test stdout/stderr capturing mechanism.
+
+"""
+from __future__ import with_statement
+
+import contextlib
+import sys
+import os
+from tempfile import TemporaryFile
+
+import py
+import pytest
+
+from py.io import TextIO
+unicode = py.builtin.text
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group._addoption(
+        '--capture', action="store",
+        default="fd" if hasattr(os, "dup") else "sys",
+        metavar="method", choices=['fd', 'sys', 'no'],
+        help="per-test capturing method: one of fd|sys|no.")
+    group._addoption(
+        '-s', action="store_const", const="no", dest="capture",
+        help="shortcut for --capture=no.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_load_initial_conftests(early_config, parser, args):
+    _readline_workaround()
+    ns = early_config.known_args_namespace
+    pluginmanager = early_config.pluginmanager
+    capman = CaptureManager(ns.capture)
+    pluginmanager.register(capman, "capturemanager")
+
+    # make sure that capturemanager is properly reset at final shutdown
+    early_config.add_cleanup(capman.reset_capturings)
+
+    # make sure logging does not raise exceptions at the end
+    def silence_logging_at_shutdown():
+        if "logging" in sys.modules:
+            sys.modules["logging"].raiseExceptions = False
+    early_config.add_cleanup(silence_logging_at_shutdown)
+
+    # finally trigger conftest loading but while capturing (issue93)
+    capman.init_capturings()
+    outcome = yield
+    out, err = capman.suspendcapture()
+    if outcome.excinfo is not None:
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+
+
+class CaptureManager:
+    def __init__(self, method):
+        self._method = method
+
+    def _getcapture(self, method):
+        if method == "fd":
+            return MultiCapture(out=True, err=True, Capture=FDCapture)
+        elif method == "sys":
+            return MultiCapture(out=True, err=True, Capture=SysCapture)
+        elif method == "no":
+            return MultiCapture(out=False, err=False, in_=False)
+        else:
+            raise ValueError("unknown capturing method: %r" % method)
+
+    def init_capturings(self):
+        assert not hasattr(self, "_capturing")
+        self._capturing = self._getcapture(self._method)
+        self._capturing.start_capturing()
+
+    def reset_capturings(self):
+        cap = self.__dict__.pop("_capturing", None)
+        if cap is not None:
+            cap.pop_outerr_to_orig()
+            cap.stop_capturing()
+
+    def resumecapture(self):
+        self._capturing.resume_capturing()
+
+    def suspendcapture(self, in_=False):
+        self.deactivate_funcargs()
+        cap = getattr(self, "_capturing", None)
+        if cap is not None:
+            try:
+                outerr = cap.readouterr()
+            finally:
+                cap.suspend_capturing(in_=in_)
+            return outerr
+
+    def activate_funcargs(self, pyfuncitem):
+        capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None)
+        if capfuncarg is not None:
+            capfuncarg._start()
+            self._capfuncarg = capfuncarg
+
+    def deactivate_funcargs(self):
+        capfuncarg = self.__dict__.pop("_capfuncarg", None)
+        if capfuncarg is not None:
+            capfuncarg.close()
+
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_make_collect_report(self, collector):
+        if isinstance(collector, pytest.File):
+            self.resumecapture()
+            outcome = yield
+            out, err = self.suspendcapture()
+            rep = outcome.get_result()
+            if out:
+                rep.sections.append(("Captured stdout", out))
+            if err:
+                rep.sections.append(("Captured stderr", err))
+        else:
+            yield
+
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_runtest_setup(self, item):
+        self.resumecapture()
+        yield
+        self.suspendcapture_item(item, "setup")
+
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_runtest_call(self, item):
+        self.resumecapture()
+        self.activate_funcargs(item)
+        yield
+        #self.deactivate_funcargs() called from suspendcapture()
+        self.suspendcapture_item(item, "call")
+
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_runtest_teardown(self, item):
+        self.resumecapture()
+        yield
+        self.suspendcapture_item(item, "teardown")
+
+    @pytest.hookimpl(tryfirst=True)
+    def pytest_keyboard_interrupt(self, excinfo):
+        self.reset_capturings()
+
+    @pytest.hookimpl(tryfirst=True)
+    def pytest_internalerror(self, excinfo):
+        self.reset_capturings()
+
+    def suspendcapture_item(self, item, when, in_=False):
+        out, err = self.suspendcapture(in_=in_)
+        item.add_report_section(when, "stdout", out)
+        item.add_report_section(when, "stderr", err)
+
+
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
+@pytest.fixture
+def capsys(request):
+    """Enable capturing of writes to sys.stdout/sys.stderr and make
+    captured output available via ``capsys.readouterr()`` method calls
+    which return a ``(out, err)`` tuple.
+    """
+    if "capfd" in request.fixturenames:
+        raise request.raiseerror(error_capsysfderror)
+    request.node._capfuncarg = c = CaptureFixture(SysCapture, request)
+    return c
+
+@pytest.fixture
+def capfd(request):
+    """Enable capturing of writes to file descriptors 1 and 2 and make
+    captured output available via ``capfd.readouterr()`` method calls
+    which return a ``(out, err)`` tuple.
+    """
+    if "capsys" in request.fixturenames:
+        request.raiseerror(error_capsysfderror)
+    if not hasattr(os, 'dup'):
+        pytest.skip("capfd funcarg needs os.dup")
+    request.node._capfuncarg = c = CaptureFixture(FDCapture, request)
+    return c
+
+
+class CaptureFixture:
+    def __init__(self, captureclass, request):
+        self.captureclass = captureclass
+        self.request = request
+
+    def _start(self):
+        self._capture = MultiCapture(out=True, err=True, in_=False,
+                                     Capture=self.captureclass)
+        self._capture.start_capturing()
+
+    def close(self):
+        cap = self.__dict__.pop("_capture", None)
+        if cap is not None:
+            self._outerr = cap.pop_outerr_to_orig()
+            cap.stop_capturing()
+
+    def readouterr(self):
+        try:
+            return self._capture.readouterr()
+        except AttributeError:
+            return self._outerr
+
+    @contextlib.contextmanager
+    def disabled(self):
+        capmanager = self.request.config.pluginmanager.getplugin('capturemanager')
+        capmanager.suspendcapture_item(self.request.node, "call", in_=True)
+        try:
+            yield
+        finally:
+            capmanager.resumecapture()
+
+
+def safe_text_dupfile(f, mode, default_encoding="UTF8"):
+    """ return a open text file object that's a duplicate of f on the
+        FD-level if possible.
+    """
+    encoding = getattr(f, "encoding", None)
+    try:
+        fd = f.fileno()
+    except Exception:
+        if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
+            # we seem to have a text stream, let's just use it
+            return f
+    else:
+        newfd = os.dup(fd)
+        if "b" not in mode:
+            mode += "b"
+        f = os.fdopen(newfd, mode, 0)  # no buffering
+    return EncodedFile(f, encoding or default_encoding)
+
+
+class EncodedFile(object):
+    errors = "strict"  # possibly needed by py3 code (issue555)
+    def __init__(self, buffer, encoding):
+        self.buffer = buffer
+        self.encoding = encoding
+
+    def write(self, obj):
+        if isinstance(obj, unicode):
+            obj = obj.encode(self.encoding, "replace")
+        self.buffer.write(obj)
+
+    def writelines(self, linelist):
+        data = ''.join(linelist)
+        self.write(data)
+
+    def __getattr__(self, name):
+        return getattr(object.__getattribute__(self, "buffer"), name)
+
+
+class MultiCapture(object):
+    out = err = in_ = None
+
+    def __init__(self, out=True, err=True, in_=True, Capture=None):
+        if in_:
+            self.in_ = Capture(0)
+        if out:
+            self.out = Capture(1)
+        if err:
+            self.err = Capture(2)
+
+    def start_capturing(self):
+        if self.in_:
+            self.in_.start()
+        if self.out:
+            self.out.start()
+        if self.err:
+            self.err.start()
+
+    def pop_outerr_to_orig(self):
+        """ pop current snapshot out/err capture and flush to orig streams. """
+        out, err = self.readouterr()
+        if out:
+            self.out.writeorg(out)
+        if err:
+            self.err.writeorg(err)
+        return out, err
+
+    def suspend_capturing(self, in_=False):
+        if self.out:
+            self.out.suspend()
+        if self.err:
+            self.err.suspend()
+        if in_ and self.in_:
+            self.in_.suspend()
+            self._in_suspended = True
+
+    def resume_capturing(self):
+        if self.out:
+            self.out.resume()
+        if self.err:
+            self.err.resume()
+        if hasattr(self, "_in_suspended"):
+            self.in_.resume()
+            del self._in_suspended
+
+    def stop_capturing(self):
+        """ stop capturing and reset capturing streams """
+        if hasattr(self, '_reset'):
+            raise ValueError("was already stopped")
+        self._reset = True
+        if self.out:
+            self.out.done()
+        if self.err:
+            self.err.done()
+        if self.in_:
+            self.in_.done()
+
+    def readouterr(self):
+        """ return snapshot unicode value of stdout/stderr capturings. """
+        return (self.out.snap() if self.out is not None else "",
+                self.err.snap() if self.err is not None else "")
+
+class NoCapture:
+    __init__ = start = done = suspend = resume = lambda *args: None
+
+class FDCapture:
+    """ Capture IO to/from a given os-level filedescriptor. """
+
+    def __init__(self, targetfd, tmpfile=None):
+        self.targetfd = targetfd
+        try:
+            self.targetfd_save = os.dup(self.targetfd)
+        except OSError:
+            self.start = lambda: None
+            self.done = lambda: None
+        else:
+            if targetfd == 0:
+                assert not tmpfile, "cannot set tmpfile with stdin"
+                tmpfile = open(os.devnull, "r")
+                self.syscapture = SysCapture(targetfd)
+            else:
+                if tmpfile is None:
+                    f = TemporaryFile()
+                    with f:
+                        tmpfile = safe_text_dupfile(f, mode="wb+")
+                if targetfd in patchsysdict:
+                    self.syscapture = SysCapture(targetfd, tmpfile)
+                else:
+                    self.syscapture = NoCapture()
+            self.tmpfile = tmpfile
+            self.tmpfile_fd = tmpfile.fileno()
+
+    def __repr__(self):
+        return "<FDCapture %s oldfd=%s>" % (self.targetfd, self.targetfd_save)
+
+    def start(self):
+        """ Start capturing on targetfd using memorized tmpfile. """
+        try:
+            os.fstat(self.targetfd_save)
+        except (AttributeError, OSError):
+            raise ValueError("saved filedescriptor not valid anymore")
+        os.dup2(self.tmpfile_fd, self.targetfd)
+        self.syscapture.start()
+
+    def snap(self):
+        f = self.tmpfile
+        f.seek(0)
+        res = f.read()
+        if res:
+            enc = getattr(f, "encoding", None)
+            if enc and isinstance(res, bytes):
+                res = py.builtin._totext(res, enc, "replace")
+            f.truncate(0)
+            f.seek(0)
+            return res
+        return ''
+
+    def done(self):
+        """ stop capturing, restore streams, return original capture file,
+        seeked to position zero. """
+        targetfd_save = self.__dict__.pop("targetfd_save")
+        os.dup2(targetfd_save, self.targetfd)
+        os.close(targetfd_save)
+        self.syscapture.done()
+        self.tmpfile.close()
+
+    def suspend(self):
+        self.syscapture.suspend()
+        os.dup2(self.targetfd_save, self.targetfd)
+
+    def resume(self):
+        self.syscapture.resume()
+        os.dup2(self.tmpfile_fd, self.targetfd)
+
+    def writeorg(self, data):
+        """ write to original file descriptor. """
+        if py.builtin._istext(data):
+            data = data.encode("utf8") # XXX use encoding of original stream
+        os.write(self.targetfd_save, data)
+
+
+class SysCapture:
+    def __init__(self, fd, tmpfile=None):
+        name = patchsysdict[fd]
+        self._old = getattr(sys, name)
+        self.name = name
+        if tmpfile is None:
+            if name == "stdin":
+                tmpfile = DontReadFromInput()
+            else:
+                tmpfile = TextIO()
+        self.tmpfile = tmpfile
+
+    def start(self):
+        setattr(sys, self.name, self.tmpfile)
+
+    def snap(self):
+        f = self.tmpfile
+        res = f.getvalue()
+        f.truncate(0)
+        f.seek(0)
+        return res
+
+    def done(self):
+        setattr(sys, self.name, self._old)
+        del self._old
+        self.tmpfile.close()
+
+    def suspend(self):
+        setattr(sys, self.name, self._old)
+
+    def resume(self):
+        setattr(sys, self.name, self.tmpfile)
+
+    def writeorg(self, data):
+        self._old.write(data)
+        self._old.flush()
+
+
+class DontReadFromInput:
+    """Temporary stub class.  Ideally when stdin is accessed, the
+    capturing should be turned off, with possibly all data captured
+    so far sent to the screen.  This should be configurable, though,
+    because in automated test runs it is better to crash than
+    hang indefinitely.
+    """
+
+    encoding = None
+
+    def read(self, *args):
+        raise IOError("reading from stdin while output is captured")
+    readline = read
+    readlines = read
+    __iter__ = read
+
+    def fileno(self):
+        raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+    def isatty(self):
+        return False
+
+    def close(self):
+        pass
+
+    @property
+    def buffer(self):
+        if sys.version_info >= (3,0):
+            return self
+        else:
+            raise AttributeError('redirected stdin has no attribute buffer')
+
+
+def _readline_workaround():
+    """
+    Ensure readline is imported so that it attaches to the correct stdio
+    handles on Windows.
+
+    Pdb uses readline support where available--when not running from the Python
+    prompt, the readline module is not imported until running the pdb REPL.  If
+    running pytest with the --pdb option this means the readline module is not
+    imported until after I/O capture has been started.
+
+    This is a problem for pyreadline, which is often used to implement readline
+    support on Windows, as it does not attach to the correct handles for stdout
+    and/or stdin if they have been redirected by the FDCapture mechanism.  This
+    workaround ensures that readline is imported before I/O capture is setup so
+    that it can attach to the actual stdin/out for the console.
+
+    See https://github.com/pytest-dev/pytest/pull/1281
+    """
+
+    if not sys.platform.startswith('win32'):
+        return
+    try:
+        import readline  # noqa
+    except ImportError:
+        pass
diff --git a/lib/spack/external/_pytest/compat.py b/lib/spack/external/_pytest/compat.py
new file mode 100644
index 0000000000..51fc3bc5c1
--- /dev/null
+++ b/lib/spack/external/_pytest/compat.py
@@ -0,0 +1,230 @@
+"""
+python version compatibility code
+"""
+import sys
+import inspect
+import types
+import re
+import functools
+
+import py
+
+import  _pytest
+
+
+
+try:
+    import enum
+except ImportError:  # pragma: no cover
+    # Only available in Python 3.4+ or as a backport
+    enum = None
+
+_PY3 = sys.version_info > (3, 0)
+_PY2 = not _PY3
+
+
+NoneType = type(None)
+NOTSET = object()
+
+if hasattr(inspect, 'signature'):
+    def _format_args(func):
+        return str(inspect.signature(func))
+else:
+    def _format_args(func):
+        return inspect.formatargspec(*inspect.getargspec(func))
+
+isfunction = inspect.isfunction
+isclass = inspect.isclass
+# used to work around a python2 exception info leak
+exc_clear = getattr(sys, 'exc_clear', lambda: None)
+# The type of re.compile objects is not exposed in Python.
+REGEX_TYPE = type(re.compile(''))
+
+
+def is_generator(func):
+    try:
+        return _pytest._code.getrawcode(func).co_flags & 32 # generator function
+    except AttributeError: # builtin functions have no bytecode
+        # assume them to not be generators
+        return False
+
+
+def getlocation(function, curdir):
+    import inspect
+    fn = py.path.local(inspect.getfile(function))
+    lineno = py.builtin._getcode(function).co_firstlineno
+    if fn.relto(curdir):
+        fn = fn.relto(curdir)
+    return "%s:%d" %(fn, lineno+1)
+
+
+def num_mock_patch_args(function):
+    """ return number of arguments used up by mock arguments (if any) """
+    patchings = getattr(function, "patchings", None)
+    if not patchings:
+        return 0
+    mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
+    if mock is not None:
+        return len([p for p in patchings
+                        if not p.attribute_name and p.new is mock.DEFAULT])
+    return len(patchings)
+
+
+def getfuncargnames(function, startindex=None):
+    # XXX merge with main.py's varnames
+    #assert not isclass(function)
+    realfunction = function
+    while hasattr(realfunction, "__wrapped__"):
+        realfunction = realfunction.__wrapped__
+    if startindex is None:
+        startindex = inspect.ismethod(function) and 1 or 0
+    if realfunction != function:
+        startindex += num_mock_patch_args(function)
+        function = realfunction
+    if isinstance(function, functools.partial):
+        argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
+        partial = function
+        argnames = argnames[len(partial.args):]
+        if partial.keywords:
+            for kw in partial.keywords:
+                argnames.remove(kw)
+    else:
+        argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
+    defaults = getattr(function, 'func_defaults',
+                       getattr(function, '__defaults__', None)) or ()
+    numdefaults = len(defaults)
+    if numdefaults:
+        return tuple(argnames[startindex:-numdefaults])
+    return tuple(argnames[startindex:])
+
+
+
+if  sys.version_info[:2] == (2, 6):
+    def isclass(object):
+        """ Return true if the object is a class. Overrides inspect.isclass for
+        python 2.6 because it will return True for objects which always return
+        something on __getattr__ calls (see #1035).
+        Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
+        """
+        return isinstance(object, (type, types.ClassType))
+
+
+if _PY3:
+    import codecs
+
+    STRING_TYPES = bytes, str
+
+    def _escape_strings(val):
+        """If val is pure ascii, returns it as a str().  Otherwise, escapes
+        bytes objects into a sequence of escaped bytes:
+
+        b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
+
+        and escapes unicode objects into a sequence of escaped unicode
+        ids, e.g.:
+
+        '4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
+
+        note:
+           the obvious "v.decode('unicode-escape')" will return
+           valid utf-8 unicode if it finds them in bytes, but we
+           want to return escaped bytes for any byte, even if they match
+           a utf-8 string.
+
+        """
+        if isinstance(val, bytes):
+            if val:
+                # source: http://goo.gl/bGsnwC
+                encoded_bytes, _ = codecs.escape_encode(val)
+                return encoded_bytes.decode('ascii')
+            else:
+                # empty bytes crashes codecs.escape_encode (#1087)
+                return ''
+        else:
+            return val.encode('unicode_escape').decode('ascii')
+else:
+    STRING_TYPES = bytes, str, unicode
+
+    def _escape_strings(val):
+        """In py2 bytes and str are the same type, so return if it's a bytes
+        object, return it unchanged if it is a full ascii string,
+        otherwise escape it into its binary form.
+
+        If it's a unicode string, change the unicode characters into
+        unicode escapes.
+
+        """
+        if isinstance(val, bytes):
+            try:
+                return val.encode('ascii')
+            except UnicodeDecodeError:
+                return val.encode('string-escape')
+        else:
+            return val.encode('unicode-escape')
+
+
+def get_real_func(obj):
+    """ gets the real function object of the (possibly) wrapped object by
+    functools.wraps or functools.partial.
+    """
+    while hasattr(obj, "__wrapped__"):
+        obj = obj.__wrapped__
+    if isinstance(obj, functools.partial):
+        obj = obj.func
+    return obj
+
+
+def getfslineno(obj):
+    # xxx let decorators etc specify a sane ordering
+    obj = get_real_func(obj)
+    if hasattr(obj, 'place_as'):
+        obj = obj.place_as
+    fslineno = _pytest._code.getfslineno(obj)
+    assert isinstance(fslineno[1], int), obj
+    return fslineno
+
+
+def getimfunc(func):
+    try:
+        return func.__func__
+    except AttributeError:
+        try:
+            return func.im_func
+        except AttributeError:
+            return func
+
+
+def safe_getattr(object, name, default):
+    """ Like getattr but return default upon any Exception.
+
+    Attribute access can potentially fail for 'evil' Python objects.
+    See issue214
+    """
+    try:
+        return getattr(object, name, default)
+    except Exception:
+        return default
+
+
+def _is_unittest_unexpected_success_a_failure():
+    """Return if the test suite should fail if a @expectedFailure unittest test PASSES.
+
+    From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
+        Changed in version 3.4: Returns False if there were any
+        unexpectedSuccesses from tests marked with the expectedFailure() decorator.
+    """
+    return sys.version_info >= (3, 4)
+
+
+if _PY3:
+    def safe_str(v):
+        """returns v as string"""
+        return str(v)
+else:
+    def safe_str(v):
+        """returns v as string, converting to ascii if necessary"""
+        try:
+            return str(v)
+        except UnicodeError:
+            errors = 'replace'
+            return v.encode('ascii', errors)
diff --git a/lib/spack/external/_pytest/config.py b/lib/spack/external/_pytest/config.py
new file mode 100644
index 0000000000..fe386ed0b1
--- /dev/null
+++ b/lib/spack/external/_pytest/config.py
@@ -0,0 +1,1340 @@
+""" command line options, ini-file and conftest.py processing. """
+import argparse
+import shlex
+import traceback
+import types
+import warnings
+
+import py
+# DON't import pytest here because it causes import cycle troubles
+import sys, os
+import _pytest._code
+import _pytest.hookspec  # the extension point definitions
+import _pytest.assertion
+from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
+from _pytest.compat import safe_str
+
+hookimpl = HookimplMarker("pytest")
+hookspec = HookspecMarker("pytest")
+
+# pytest startup
+#
+
+
+class ConftestImportFailure(Exception):
+    def __init__(self, path, excinfo):
+        Exception.__init__(self, path, excinfo)
+        self.path = path
+        self.excinfo = excinfo
+
+    def __str__(self):
+        etype, evalue, etb = self.excinfo
+        formatted = traceback.format_tb(etb)
+        # The level of the tracebacks we want to print is hand crafted :(
+        return repr(evalue) + '\n' + ''.join(formatted[2:])
+
+
+def main(args=None, plugins=None):
+    """ return exit code, after performing an in-process test run.
+
+    :arg args: list of command line arguments.
+
+    :arg plugins: list of plugin objects to be auto-registered during
+                  initialization.
+    """
+    try:
+        try:
+            config = _prepareconfig(args, plugins)
+        except ConftestImportFailure as e:
+            tw = py.io.TerminalWriter(sys.stderr)
+            for line in traceback.format_exception(*e.excinfo):
+                tw.line(line.rstrip(), red=True)
+            tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+            return 4
+        else:
+            try:
+                config.pluginmanager.check_pending()
+                return config.hook.pytest_cmdline_main(config=config)
+            finally:
+                config._ensure_unconfigure()
+    except UsageError as e:
+        for msg in e.args:
+            sys.stderr.write("ERROR: %s\n" %(msg,))
+        return 4
+
+class cmdline:  # compatibility namespace
+    main = staticmethod(main)
+
+
+class UsageError(Exception):
+    """ error in pytest usage or invocation"""
+
+
+def filename_arg(path, optname):
+    """ Argparse type validator for filename arguments.
+
+    :path: path of filename
+    :optname: name of the option
+    """
+    if os.path.isdir(path):
+        raise UsageError("{0} must be a filename, given: {1}".format(optname, path))
+    return path
+
+
+def directory_arg(path, optname):
+    """Argparse type validator for directory arguments.
+
+    :path: path of directory
+    :optname: name of the option
+    """
+    if not os.path.isdir(path):
+        raise UsageError("{0} must be a directory, given: {1}".format(optname, path))
+    return path
+
+
+_preinit = []
+
+default_plugins = (
+     "mark main terminal runner python fixtures debugging unittest capture skipping "
+     "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
+     "junitxml resultlog doctest cacheprovider freeze_support "
+     "setuponly setupplan").split()
+
+builtin_plugins = set(default_plugins)
+builtin_plugins.add("pytester")
+
+
+def _preloadplugins():
+    assert not _preinit
+    _preinit.append(get_config())
+
+def get_config():
+    if _preinit:
+        return _preinit.pop(0)
+    # subsequent calls to main will create a fresh instance
+    pluginmanager = PytestPluginManager()
+    config = Config(pluginmanager)
+    for spec in default_plugins:
+        pluginmanager.import_plugin(spec)
+    return config
+
+def get_plugin_manager():
+    """
+    Obtain a new instance of the
+    :py:class:`_pytest.config.PytestPluginManager`, with default plugins
+    already loaded.
+
+    This function can be used by integration with other tools, like hooking
+    into pytest to run tests into an IDE.
+    """
+    return get_config().pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+    warning = None
+    if args is None:
+        args = sys.argv[1:]
+    elif isinstance(args, py.path.local):
+        args = [str(args)]
+    elif not isinstance(args, (tuple, list)):
+        if not isinstance(args, str):
+            raise ValueError("not a string or argument list: %r" % (args,))
+        args = shlex.split(args, posix=sys.platform != "win32")
+        from _pytest import deprecated
+        warning = deprecated.MAIN_STR_ARGS
+    config = get_config()
+    pluginmanager = config.pluginmanager
+    try:
+        if plugins:
+            for plugin in plugins:
+                if isinstance(plugin, py.builtin._basestring):
+                    pluginmanager.consider_pluginarg(plugin)
+                else:
+                    pluginmanager.register(plugin)
+        if warning:
+            config.warn('C1', warning)
+        return pluginmanager.hook.pytest_cmdline_parse(
+                pluginmanager=pluginmanager, args=args)
+    except BaseException:
+        config._ensure_unconfigure()
+        raise
+
+
+class PytestPluginManager(PluginManager):
+    """
+    Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific
+    functionality:
+
+    * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
+      ``pytest_plugins`` global variables found in plugins being loaded;
+    * ``conftest.py`` loading during start-up;
+    """
+    def __init__(self):
+        super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
+        self._conftest_plugins = set()
+
+        # state related to local conftest plugins
+        self._path2confmods = {}
+        self._conftestpath2mod = {}
+        self._confcutdir = None
+        self._noconftest = False
+        self._duplicatepaths = set()
+
+        self.add_hookspecs(_pytest.hookspec)
+        self.register(self)
+        if os.environ.get('PYTEST_DEBUG'):
+            err = sys.stderr
+            encoding = getattr(err, 'encoding', 'utf8')
+            try:
+                err = py.io.dupfile(err, encoding=encoding)
+            except Exception:
+                pass
+            self.trace.root.setwriter(err.write)
+            self.enable_tracing()
+
+        # Config._consider_importhook will set a real object if required.
+        self.rewrite_hook = _pytest.assertion.DummyRewriteHook()
+
+    def addhooks(self, module_or_class):
+        """
+        .. deprecated:: 2.8
+
+        Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead.
+        """
+        warning = dict(code="I2",
+                       fslocation=_pytest._code.getfslineno(sys._getframe(1)),
+                       nodeid=None,
+                       message="use pluginmanager.add_hookspecs instead of "
+                               "deprecated addhooks() method.")
+        self._warn(warning)
+        return self.add_hookspecs(module_or_class)
+
+    def parse_hookimpl_opts(self, plugin, name):
+        # pytest hooks are always prefixed with pytest_
+        # so we avoid accessing possibly non-readable attributes
+        # (see issue #1073)
+        if not name.startswith("pytest_"):
+            return
+        # ignore some historic special names which can not be hooks anyway
+        if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
+            return
+
+        method = getattr(plugin, name)
+        opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
+        if opts is not None:
+            for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
+                opts.setdefault(name, hasattr(method, name))
+        return opts
+
+    def parse_hookspec_opts(self, module_or_class, name):
+        opts = super(PytestPluginManager, self).parse_hookspec_opts(
+                                                module_or_class, name)
+        if opts is None:
+            method = getattr(module_or_class, name)
+            if name.startswith("pytest_"):
+                opts = {"firstresult": hasattr(method, "firstresult"),
+                        "historic": hasattr(method, "historic")}
+        return opts
+
+    def _verify_hook(self, hook, hookmethod):
+        super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
+        if "__multicall__" in hookmethod.argnames:
+            fslineno = _pytest._code.getfslineno(hookmethod.function)
+            warning = dict(code="I1",
+                           fslocation=fslineno,
+                           nodeid=None,
+                           message="%r hook uses deprecated __multicall__ "
+                                   "argument" % (hook.name))
+            self._warn(warning)
+
+    def register(self, plugin, name=None):
+        ret = super(PytestPluginManager, self).register(plugin, name)
+        if ret:
+            self.hook.pytest_plugin_registered.call_historic(
+                      kwargs=dict(plugin=plugin, manager=self))
+        return ret
+
+    def getplugin(self, name):
+        # support deprecated naming because plugins (xdist e.g.) use it
+        return self.get_plugin(name)
+
+    def hasplugin(self, name):
+        """Return True if the plugin with the given name is registered."""
+        return bool(self.get_plugin(name))
+
+    def pytest_configure(self, config):
+        # XXX now that the pluginmanager exposes hookimpl(tryfirst...)
+        # we should remove tryfirst/trylast as markers
+        config.addinivalue_line("markers",
+            "tryfirst: mark a hook implementation function such that the "
+            "plugin machinery will try to call it first/as early as possible.")
+        config.addinivalue_line("markers",
+            "trylast: mark a hook implementation function such that the "
+            "plugin machinery will try to call it last/as late as possible.")
+
+    def _warn(self, message):
+        kwargs = message if isinstance(message, dict) else {
+            'code': 'I1',
+            'message': message,
+            'fslocation': None,
+            'nodeid': None,
+        }
+        self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
+
+    #
+    # internal API for local conftest plugin handling
+    #
+    def _set_initial_conftests(self, namespace):
+        """ load initial conftest files given a preparsed "namespace".
+            As conftest files may add their own command line options
+            which have arguments ('--my-opt somepath') we might get some
+            false positives.  All builtin and 3rd party plugins will have
+            been loaded, however, so common options will not confuse our logic
+            here.
+        """
+        current = py.path.local()
+        self._confcutdir = current.join(namespace.confcutdir, abs=True) \
+                                if namespace.confcutdir else None
+        self._noconftest = namespace.noconftest
+        testpaths = namespace.file_or_dir
+        foundanchor = False
+        for path in testpaths:
+            path = str(path)
+            # remove node-id syntax
+            i = path.find("::")
+            if i != -1:
+                path = path[:i]
+            anchor = current.join(path, abs=1)
+            if exists(anchor): # we found some file object
+                self._try_load_conftest(anchor)
+                foundanchor = True
+        if not foundanchor:
+            self._try_load_conftest(current)
+
+    def _try_load_conftest(self, anchor):
+        self._getconftestmodules(anchor)
+        # let's also consider test* subdirs
+        if anchor.check(dir=1):
+            for x in anchor.listdir("test*"):
+                if x.check(dir=1):
+                    self._getconftestmodules(x)
+
+    def _getconftestmodules(self, path):
+        if self._noconftest:
+            return []
+        try:
+            return self._path2confmods[path]
+        except KeyError:
+            if path.isfile():
+                clist = self._getconftestmodules(path.dirpath())
+            else:
+                # XXX these days we may rather want to use config.rootdir
+                # and allow users to opt into looking into the rootdir parent
+                # directories instead of requiring to specify confcutdir
+                clist = []
+                for parent in path.parts():
+                    if self._confcutdir and self._confcutdir.relto(parent):
+                        continue
+                    conftestpath = parent.join("conftest.py")
+                    if conftestpath.isfile():
+                        mod = self._importconftest(conftestpath)
+                        clist.append(mod)
+
+            self._path2confmods[path] = clist
+            return clist
+
+    def _rget_with_confmod(self, name, path):
+        modules = self._getconftestmodules(path)
+        for mod in reversed(modules):
+            try:
+                return mod, getattr(mod, name)
+            except AttributeError:
+                continue
+        raise KeyError(name)
+
+    def _importconftest(self, conftestpath):
+        try:
+            return self._conftestpath2mod[conftestpath]
+        except KeyError:
+            pkgpath = conftestpath.pypkgpath()
+            if pkgpath is None:
+                _ensure_removed_sysmodule(conftestpath.purebasename)
+            try:
+                mod = conftestpath.pyimport()
+            except Exception:
+                raise ConftestImportFailure(conftestpath, sys.exc_info())
+
+            self._conftest_plugins.add(mod)
+            self._conftestpath2mod[conftestpath] = mod
+            dirpath = conftestpath.dirpath()
+            if dirpath in self._path2confmods:
+                for path, mods in self._path2confmods.items():
+                    if path and path.relto(dirpath) or path == dirpath:
+                        assert mod not in mods
+                        mods.append(mod)
+            self.trace("loaded conftestmodule %r" %(mod))
+            self.consider_conftest(mod)
+            return mod
+
+    #
+    # API for bootstrapping plugin loading
+    #
+    #
+
+    def consider_preparse(self, args):
+        for opt1,opt2 in zip(args, args[1:]):
+            if opt1 == "-p":
+                self.consider_pluginarg(opt2)
+
+    def consider_pluginarg(self, arg):
+        if arg.startswith("no:"):
+            name = arg[3:]
+            self.set_blocked(name)
+            if not name.startswith("pytest_"):
+                self.set_blocked("pytest_" + name)
+        else:
+            self.import_plugin(arg)
+
+    def consider_conftest(self, conftestmodule):
+        if self.register(conftestmodule, name=conftestmodule.__file__):
+            self.consider_module(conftestmodule)
+
+    def consider_env(self):
+        self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
+
+    def consider_module(self, mod):
+        plugins = getattr(mod, 'pytest_plugins', [])
+        if isinstance(plugins, str):
+            plugins = [plugins]
+        self.rewrite_hook.mark_rewrite(*plugins)
+        self._import_plugin_specs(plugins)
+
+    def _import_plugin_specs(self, spec):
+        if spec:
+            if isinstance(spec, str):
+                spec = spec.split(",")
+            for import_spec in spec:
+                self.import_plugin(import_spec)
+
+    def import_plugin(self, modname):
+        # most often modname refers to builtin modules, e.g. "pytester",
+        # "terminal" or "capture".  Those plugins are registered under their
+        # basename for historic purposes but must be imported with the
+        # _pytest prefix.
+        assert isinstance(modname, str)
+        if self.get_plugin(modname) is not None:
+            return
+        if modname in builtin_plugins:
+            importspec = "_pytest." + modname
+        else:
+            importspec = modname
+        try:
+            __import__(importspec)
+        except ImportError as e:
+            new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0])))
+            # copy over name and path attributes
+            for attr in ('name', 'path'):
+                if hasattr(e, attr):
+                    setattr(new_exc, attr, getattr(e, attr))
+            raise new_exc
+        except Exception as e:
+            import pytest
+            if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
+                raise
+            self._warn("skipped plugin %r: %s" %((modname, e.msg)))
+        else:
+            mod = sys.modules[importspec]
+            self.register(mod, modname)
+            self.consider_module(mod)
+
+
+class Parser:
+    """ Parser for command line arguments and ini-file values.
+
+    :ivar extra_info: dict of generic param -> value to display in case
+        there's an error processing the command line arguments.
+    """
+
+    def __init__(self, usage=None, processopt=None):
+        self._anonymous = OptionGroup("custom options", parser=self)
+        self._groups = []
+        self._processopt = processopt
+        self._usage = usage
+        self._inidict = {}
+        self._ininames = []
+        self.extra_info = {}
+
+    def processoption(self, option):
+        if self._processopt:
+            if option.dest:
+                self._processopt(option)
+
+    def getgroup(self, name, description="", after=None):
+        """ get (or create) a named option Group.
+
+        :name: name of the option group.
+        :description: long description for --help output.
+        :after: name of other group, used for ordering --help output.
+
+        The returned group object has an ``addoption`` method with the same
+        signature as :py:func:`parser.addoption
+        <_pytest.config.Parser.addoption>` but will be shown in the
+        respective group in the output of ``pytest. --help``.
+        """
+        for group in self._groups:
+            if group.name == name:
+                return group
+        group = OptionGroup(name, description, parser=self)
+        i = 0
+        for i, grp in enumerate(self._groups):
+            if grp.name == after:
+                break
+        self._groups.insert(i+1, group)
+        return group
+
+    def addoption(self, *opts, **attrs):
+        """ register a command line option.
+
+        :opts: option names, can be short or long options.
+        :attrs: same attributes which the ``add_option()`` function of the
+           `argparse library
+           <http://docs.python.org/2/library/argparse.html>`_
+           accepts.
+
+        After command line parsing options are available on the pytest config
+        object via ``config.option.NAME`` where ``NAME`` is usually set
+        by passing a ``dest`` attribute, for example
+        ``addoption("--long", dest="NAME", ...)``.
+        """
+        self._anonymous.addoption(*opts, **attrs)
+
+    def parse(self, args, namespace=None):
+        from _pytest._argcomplete import try_argcomplete
+        self.optparser = self._getparser()
+        try_argcomplete(self.optparser)
+        return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
+
+    def _getparser(self):
+        from _pytest._argcomplete import filescompleter
+        optparser = MyOptionParser(self, self.extra_info)
+        groups = self._groups + [self._anonymous]
+        for group in groups:
+            if group.options:
+                desc = group.description or group.name
+                arggroup = optparser.add_argument_group(desc)
+                for option in group.options:
+                    n = option.names()
+                    a = option.attrs()
+                    arggroup.add_argument(*n, **a)
+        # bash like autocompletion for dirs (appending '/')
+        optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter
+        return optparser
+
+    def parse_setoption(self, args, option, namespace=None):
+        parsedoption = self.parse(args, namespace=namespace)
+        for name, value in parsedoption.__dict__.items():
+            setattr(option, name, value)
+        return getattr(parsedoption, FILE_OR_DIR)
+
+    def parse_known_args(self, args, namespace=None):
+        """parses and returns a namespace object with known arguments at this
+        point.
+        """
+        return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
+
+    def parse_known_and_unknown_args(self, args, namespace=None):
+        """parses and returns a namespace object with known arguments, and
+        the remaining arguments unknown at this point.
+        """
+        optparser = self._getparser()
+        args = [str(x) for x in args]
+        return optparser.parse_known_args(args, namespace=namespace)
+
+    def addini(self, name, help, type=None, default=None):
+        """ register an ini-file option.
+
+        :name: name of the ini-variable
+        :type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
+               or ``bool``.
+        :default: default value if no ini-file option exists but is queried.
+
+        The value of ini-variables can be retrieved via a call to
+        :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+        """
+        assert type in (None, "pathlist", "args", "linelist", "bool")
+        self._inidict[name] = (help, type, default)
+        self._ininames.append(name)
+
+
+class ArgumentError(Exception):
+    """
+    Raised if an Argument instance is created with invalid or
+    inconsistent arguments.
+    """
+
+    def __init__(self, msg, option):
+        self.msg = msg
+        self.option_id = str(option)
+
+    def __str__(self):
+        if self.option_id:
+            return "option %s: %s" % (self.option_id, self.msg)
+        else:
+            return self.msg
+
+
+class Argument:
+    """class that mimics the necessary behaviour of optparse.Option
+
+    its currently a least effort implementation
+    and ignoring choices and integer prefixes
+    https://docs.python.org/3/library/optparse.html#optparse-standard-option-types
+    """
+    _typ_map = {
+        'int': int,
+        'string': str,
+        'float': float,
+        'complex': complex,
+    }
+
+    def __init__(self, *names, **attrs):
+        """store parms in private vars for use in add_argument"""
+        self._attrs = attrs
+        self._short_opts = []
+        self._long_opts = []
+        self.dest = attrs.get('dest')
+        if '%default' in (attrs.get('help') or ''):
+            warnings.warn(
+                'pytest now uses argparse. "%default" should be'
+                ' changed to "%(default)s" ',
+                DeprecationWarning,
+                stacklevel=3)
+        try:
+            typ = attrs['type']
+        except KeyError:
+            pass
+        else:
+            # this might raise a keyerror as well, don't want to catch that
+            if isinstance(typ, py.builtin._basestring):
+                if typ == 'choice':
+                    warnings.warn(
+                        'type argument to addoption() is a string %r.'
+                        ' For parsearg this is optional and when supplied'
+                        ' should be a type.'
+                        ' (options: %s)' % (typ, names),
+                        DeprecationWarning,
+                        stacklevel=3)
+                    # argparse expects a type here take it from
+                    # the type of the first element
+                    attrs['type'] = type(attrs['choices'][0])
+                else:
+                    warnings.warn(
+                        'type argument to addoption() is a string %r.'
+                        ' For parsearg this should be a type.'
+                        ' (options: %s)' % (typ, names),
+                        DeprecationWarning,
+                        stacklevel=3)
+                    attrs['type'] = Argument._typ_map[typ]
+                # used in test_parseopt -> test_parse_defaultgetter
+                self.type = attrs['type']
+            else:
+                self.type = typ
+        try:
+            # attribute existence is tested in Config._processopt
+            self.default = attrs['default']
+        except KeyError:
+            pass
+        self._set_opt_strings(names)
+        if not self.dest:
+            if self._long_opts:
+                self.dest = self._long_opts[0][2:].replace('-', '_')
+            else:
+                try:
+                    self.dest = self._short_opts[0][1:]
+                except IndexError:
+                    raise ArgumentError(
+                        'need a long or short option', self)
+
+    def names(self):
+        return self._short_opts + self._long_opts
+
+    def attrs(self):
+        # update any attributes set by processopt
+        attrs = 'default dest help'.split()
+        if self.dest:
+            attrs.append(self.dest)
+        for attr in attrs:
+            try:
+                self._attrs[attr] = getattr(self, attr)
+            except AttributeError:
+                pass
+        if self._attrs.get('help'):
+            a = self._attrs['help']
+            a = a.replace('%default', '%(default)s')
+            #a = a.replace('%prog', '%(prog)s')
+            self._attrs['help'] = a
+        return self._attrs
+
+    def _set_opt_strings(self, opts):
+        """directly from optparse
+
+        might not be necessary as this is passed to argparse later on"""
+        for opt in opts:
+            if len(opt) < 2:
+                raise ArgumentError(
+                    "invalid option string %r: "
+                    "must be at least two characters long" % opt, self)
+            elif len(opt) == 2:
+                if not (opt[0] == "-" and opt[1] != "-"):
+                    raise ArgumentError(
+                        "invalid short option string %r: "
+                        "must be of the form -x, (x any non-dash char)" % opt,
+                        self)
+                self._short_opts.append(opt)
+            else:
+                if not (opt[0:2] == "--" and opt[2] != "-"):
+                    raise ArgumentError(
+                        "invalid long option string %r: "
+                        "must start with --, followed by non-dash" % opt,
+                        self)
+                self._long_opts.append(opt)
+
+    def __repr__(self):
+        args = []
+        if self._short_opts:
+            args += ['_short_opts: ' + repr(self._short_opts)]
+        if self._long_opts:
+            args += ['_long_opts: ' + repr(self._long_opts)]
+        args += ['dest: ' + repr(self.dest)]
+        if hasattr(self, 'type'):
+            args += ['type: ' + repr(self.type)]
+        if hasattr(self, 'default'):
+            args += ['default: ' + repr(self.default)]
+        return 'Argument({0})'.format(', '.join(args))
+
+
+class OptionGroup:
+    def __init__(self, name, description="", parser=None):
+        self.name = name
+        self.description = description
+        self.options = []
+        self.parser = parser
+
+    def addoption(self, *optnames, **attrs):
+        """ add an option to this group.
+
+        if a shortened version of a long option is specified it will
+        be suppressed in the help. addoption('--twowords', '--two-words')
+        results in help showing '--two-words' only, but --twowords gets
+        accepted **and** the automatic destination is in args.twowords
+        """
+        conflict = set(optnames).intersection(
+            name for opt in self.options for name in opt.names())
+        if conflict:
+            raise ValueError("option names %s already added" % conflict)
+        option = Argument(*optnames, **attrs)
+        self._addoption_instance(option, shortupper=False)
+
+    def _addoption(self, *optnames, **attrs):
+        option = Argument(*optnames, **attrs)
+        self._addoption_instance(option, shortupper=True)
+
+    def _addoption_instance(self, option, shortupper=False):
+        if not shortupper:
+            for opt in option._short_opts:
+                if opt[0] == '-' and opt[1].islower():
+                    raise ValueError("lowercase shortoptions reserved")
+        if self.parser:
+            self.parser.processoption(option)
+        self.options.append(option)
+
+
+class MyOptionParser(argparse.ArgumentParser):
+    def __init__(self, parser, extra_info=None):
+        if not extra_info:
+            extra_info = {}
+        self._parser = parser
+        argparse.ArgumentParser.__init__(self, usage=parser._usage,
+            add_help=False, formatter_class=DropShorterLongHelpFormatter)
+        # extra_info is a dict of (param -> value) to display if there's
+        # an usage error to provide more contextual information to the user
+        self.extra_info = extra_info
+
+    def parse_args(self, args=None, namespace=None):
+        """allow splitting of positional arguments"""
+        args, argv = self.parse_known_args(args, namespace)
+        if argv:
+            for arg in argv:
+                if arg and arg[0] == '-':
+                    lines = ['unrecognized arguments: %s' % (' '.join(argv))]
+                    for k, v in sorted(self.extra_info.items()):
+                        lines.append('  %s: %s' % (k, v))
+                    self.error('\n'.join(lines))
+            getattr(args, FILE_OR_DIR).extend(argv)
+        return args
+
+
+class DropShorterLongHelpFormatter(argparse.HelpFormatter):
+    """shorten help for long options that differ only in extra hyphens
+
+    - collapse **long** options that are the same except for extra hyphens
+    - special action attribute map_long_option allows surpressing additional
+      long options
+    - shortcut if there are only two options and one of them is a short one
+    - cache result on action object as this is called at least 2 times
+    """
+    def _format_action_invocation(self, action):
+        orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
+        if orgstr and orgstr[0] != '-': # only optional arguments
+            return orgstr
+        res = getattr(action, '_formatted_action_invocation', None)
+        if res:
+            return res
+        options = orgstr.split(', ')
+        if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
+            # a shortcut for '-h, --help' or '--abc', '-a'
+            action._formatted_action_invocation = orgstr
+            return orgstr
+        return_list = []
+        option_map =  getattr(action, 'map_long_option', {})
+        if option_map is None:
+            option_map = {}
+        short_long = {}
+        for option in options:
+            if len(option) == 2 or option[2] == ' ':
+                continue
+            if not option.startswith('--'):
+                raise ArgumentError('long optional argument without "--": [%s]'
+                                    % (option), self)
+            xxoption = option[2:]
+            if xxoption.split()[0] not in option_map:
+                shortened = xxoption.replace('-', '')
+                if shortened not in short_long or \
+                   len(short_long[shortened]) < len(xxoption):
+                    short_long[shortened] = xxoption
+        # now short_long has been filled out to the longest with dashes
+        # **and** we keep the right option ordering from add_argument
+        for option in options: #
+            if len(option) == 2 or option[2] == ' ':
+                return_list.append(option)
+            if option[2:] == short_long.get(option.replace('-', '')):
+                return_list.append(option.replace(' ', '=', 1))
+        action._formatted_action_invocation = ', '.join(return_list)
+        return action._formatted_action_invocation
+
+
+
+def _ensure_removed_sysmodule(modname):
+    try:
+        del sys.modules[modname]
+    except KeyError:
+        pass
+
+class CmdOptions(object):
+    """ holds cmdline options as attributes."""
+    def __init__(self, values=()):
+        self.__dict__.update(values)
+    def __repr__(self):
+        return "<CmdOptions %r>" %(self.__dict__,)
+    def copy(self):
+        return CmdOptions(self.__dict__)
+
+class Notset:
+    def __repr__(self):
+        return "<NOTSET>"
+
+
+notset = Notset()
+FILE_OR_DIR = 'file_or_dir'
+
+
+class Config(object):
+    """ access to configuration values, pluginmanager and plugin hooks.  """
+
+    def __init__(self, pluginmanager):
+        #: access to command line option as attributes.
+        #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
+        self.option = CmdOptions()
+        _a = FILE_OR_DIR
+        self._parser = Parser(
+            usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
+            processopt=self._processopt,
+        )
+        #: a pluginmanager instance
+        self.pluginmanager = pluginmanager
+        self.trace = self.pluginmanager.trace.root.get("config")
+        self.hook = self.pluginmanager.hook
+        self._inicache = {}
+        self._opt2dest = {}
+        self._cleanup = []
+        self._warn = self.pluginmanager._warn
+        self.pluginmanager.register(self, "pytestconfig")
+        self._configured = False
+
+        def do_setns(dic):
+            import pytest
+            setns(pytest, dic)
+
+        self.hook.pytest_namespace.call_historic(do_setns, {})
+        self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
+
+    def add_cleanup(self, func):
+        """ Add a function to be called when the config object gets out of
+        use (usually coninciding with pytest_unconfigure)."""
+        self._cleanup.append(func)
+
+    def _do_configure(self):
+        assert not self._configured
+        self._configured = True
+        self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
+
+    def _ensure_unconfigure(self):
+        if self._configured:
+            self._configured = False
+            self.hook.pytest_unconfigure(config=self)
+            self.hook.pytest_configure._call_history = []
+        while self._cleanup:
+            fin = self._cleanup.pop()
+            fin()
+
+    def warn(self, code, message, fslocation=None):
+        """ generate a warning for this test session. """
+        self.hook.pytest_logwarning.call_historic(kwargs=dict(
+            code=code, message=message,
+            fslocation=fslocation, nodeid=None))
+
+    def get_terminal_writer(self):
+        return self.pluginmanager.get_plugin("terminalreporter")._tw
+
+    def pytest_cmdline_parse(self, pluginmanager, args):
+        # REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
+        self.parse(args)
+        return self
+
+    def notify_exception(self, excinfo, option=None):
+        if option and option.fulltrace:
+            style = "long"
+        else:
+            style = "native"
+        excrepr = excinfo.getrepr(funcargs=True,
+            showlocals=getattr(option, 'showlocals', False),
+            style=style,
+        )
+        res = self.hook.pytest_internalerror(excrepr=excrepr,
+                                             excinfo=excinfo)
+        if not py.builtin.any(res):
+            for line in str(excrepr).split("\n"):
+                sys.stderr.write("INTERNALERROR> %s\n" %line)
+                sys.stderr.flush()
+
+    def cwd_relative_nodeid(self, nodeid):
+        # nodeid's are relative to the rootpath, compute relative to cwd
+        if self.invocation_dir != self.rootdir:
+            fullpath = self.rootdir.join(nodeid)
+            nodeid = self.invocation_dir.bestrelpath(fullpath)
+        return nodeid
+
+    @classmethod
+    def fromdictargs(cls, option_dict, args):
+        """ constructor useable for subprocesses. """
+        config = get_config()
+        config.option.__dict__.update(option_dict)
+        config.parse(args, addopts=False)
+        for x in config.option.plugins:
+            config.pluginmanager.consider_pluginarg(x)
+        return config
+
+    def _processopt(self, opt):
+        for name in opt._short_opts + opt._long_opts:
+            self._opt2dest[name] = opt.dest
+
+        if hasattr(opt, 'default') and opt.dest:
+            if not hasattr(self.option, opt.dest):
+                setattr(self.option, opt.dest, opt.default)
+
+    @hookimpl(trylast=True)
+    def pytest_load_initial_conftests(self, early_config):
+        self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
+
+    def _initini(self, args):
+        ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
+        r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args, warnfunc=self.warn)
+        self.rootdir, self.inifile, self.inicfg = r
+        self._parser.extra_info['rootdir'] = self.rootdir
+        self._parser.extra_info['inifile'] = self.inifile
+        self.invocation_dir = py.path.local()
+        self._parser.addini('addopts', 'extra command line options', 'args')
+        self._parser.addini('minversion', 'minimally required pytest version')
+
+    def _consider_importhook(self, args, entrypoint_name):
+        """Install the PEP 302 import hook if using assertion re-writing.
+
+        Needs to parse the --assert=<mode> option from the commandline
+        and find all the installed plugins to mark them for re-writing
+        by the importhook.
+        """
+        ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
+        mode = ns.assertmode
+        if mode == 'rewrite':
+            try:
+                hook = _pytest.assertion.install_importhook(self)
+            except SystemError:
+                mode = 'plain'
+            else:
+                import pkg_resources
+                self.pluginmanager.rewrite_hook = hook
+                for entrypoint in pkg_resources.iter_entry_points('pytest11'):
+                    # 'RECORD' available for plugins installed normally (pip install)
+                    # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e)
+                    # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa
+                    # so it shouldn't be an issue
+                    for metadata in ('RECORD', 'SOURCES.txt'):
+                        for entry in entrypoint.dist._get_metadata(metadata):
+                            fn = entry.split(',')[0]
+                            is_simple_module = os.sep not in fn and fn.endswith('.py')
+                            is_package = fn.count(os.sep) == 1 and fn.endswith('__init__.py')
+                            if is_simple_module:
+                                module_name, ext = os.path.splitext(fn)
+                                hook.mark_rewrite(module_name)
+                            elif is_package:
+                                package_name = os.path.dirname(fn)
+                                hook.mark_rewrite(package_name)
+        self._warn_about_missing_assertion(mode)
+
+    def _warn_about_missing_assertion(self, mode):
+        try:
+            assert False
+        except AssertionError:
+            pass
+        else:
+            if mode == 'plain':
+                sys.stderr.write("WARNING: ASSERTIONS ARE NOT EXECUTED"
+                                 " and FAILING TESTS WILL PASS.  Are you"
+                                 " using python -O?")
+            else:
+                sys.stderr.write("WARNING: assertions not in test modules or"
+                                 " plugins will be ignored"
+                                 " because assert statements are not executed "
+                                 "by the underlying Python interpreter "
+                                 "(are you using python -O?)\n")
+
+    def _preparse(self, args, addopts=True):
+        self._initini(args)
+        if addopts:
+            args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
+            args[:] = self.getini("addopts") + args
+        self._checkversion()
+        entrypoint_name = 'pytest11'
+        self._consider_importhook(args, entrypoint_name)
+        self.pluginmanager.consider_preparse(args)
+        self.pluginmanager.load_setuptools_entrypoints(entrypoint_name)
+        self.pluginmanager.consider_env()
+        self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
+        confcutdir = self.known_args_namespace.confcutdir
+        if self.known_args_namespace.confcutdir is None and self.inifile:
+            confcutdir = py.path.local(self.inifile).dirname
+            self.known_args_namespace.confcutdir = confcutdir
+        try:
+            self.hook.pytest_load_initial_conftests(early_config=self,
+                    args=args, parser=self._parser)
+        except ConftestImportFailure:
+            e = sys.exc_info()[1]
+            if ns.help or ns.version:
+                # we don't want to prevent --help/--version to work
+                # so just let is pass and print a warning at the end
+                self._warn("could not load initial conftests (%s)\n" % e.path)
+            else:
+                raise
+
+    def _checkversion(self):
+        import pytest
+        minver = self.inicfg.get('minversion', None)
+        if minver:
+            ver = minver.split(".")
+            myver = pytest.__version__.split(".")
+            if myver < ver:
+                raise pytest.UsageError(
+                    "%s:%d: requires pytest-%s, actual pytest-%s'" %(
+                    self.inicfg.config.path, self.inicfg.lineof('minversion'),
+                    minver, pytest.__version__))
+
+    def parse(self, args, addopts=True):
+        # parse given cmdline arguments into this config object.
+        assert not hasattr(self, 'args'), (
+                "can only parse cmdline args at most once per Config object")
+        self._origargs = args
+        self.hook.pytest_addhooks.call_historic(
+                                  kwargs=dict(pluginmanager=self.pluginmanager))
+        self._preparse(args, addopts=addopts)
+        # XXX deprecated hook:
+        self.hook.pytest_cmdline_preparse(config=self, args=args)
+        args = self._parser.parse_setoption(args, self.option, namespace=self.option)
+        if not args:
+            cwd = os.getcwd()
+            if cwd == self.rootdir:
+                args = self.getini('testpaths')
+            if not args:
+                args = [cwd]
+        self.args = args
+
+    def addinivalue_line(self, name, line):
+        """ add a line to an ini-file option. The option must have been
+        declared but might not yet be set in which case the line becomes the
+        the first line in its value. """
+        x = self.getini(name)
+        assert isinstance(x, list)
+        x.append(line) # modifies the cached list inline
+
+    def getini(self, name):
+        """ return configuration value from an :ref:`ini file <inifiles>`. If the
+        specified name hasn't been registered through a prior
+        :py:func:`parser.addini <pytest.config.Parser.addini>`
+        call (usually from a plugin), a ValueError is raised. """
+        try:
+            return self._inicache[name]
+        except KeyError:
+            self._inicache[name] = val = self._getini(name)
+            return val
+
+    def _getini(self, name):
+        try:
+            description, type, default = self._parser._inidict[name]
+        except KeyError:
+            raise ValueError("unknown configuration value: %r" %(name,))
+        value = self._get_override_ini_value(name)
+        if value is None:
+            try:
+                value = self.inicfg[name]
+            except KeyError:
+                if default is not None:
+                    return default
+                if type is None:
+                    return ''
+                return []
+        if type == "pathlist":
+            dp = py.path.local(self.inicfg.config.path).dirpath()
+            l = []
+            for relpath in shlex.split(value):
+                l.append(dp.join(relpath, abs=True))
+            return l
+        elif type == "args":
+            return shlex.split(value)
+        elif type == "linelist":
+            return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
+        elif type == "bool":
+            return bool(_strtobool(value.strip()))
+        else:
+            assert type is None
+            return value
+
+    def _getconftest_pathlist(self, name, path):
+        try:
+            mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
+        except KeyError:
+            return None
+        modpath = py.path.local(mod.__file__).dirpath()
+        l = []
+        for relroot in relroots:
+            if not isinstance(relroot, py.path.local):
+                relroot = relroot.replace("/", py.path.local.sep)
+                relroot = modpath.join(relroot, abs=True)
+            l.append(relroot)
+        return l
+
+    def _get_override_ini_value(self, name):
+        value = None
+        # override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and
+        # and -o foo1=bar1 -o foo2=bar2 options
+        # always use the last item if multiple value set for same ini-name,
+        # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
+        if self.getoption("override_ini", None):
+            for ini_config_list in self.option.override_ini:
+                for ini_config in ini_config_list:
+                    try:
+                        (key, user_ini_value) = ini_config.split("=", 1)
+                    except ValueError:
+                        raise UsageError("-o/--override-ini expects option=value style.")
+                    if key == name:
+                        value = user_ini_value
+        return value
+
+    def getoption(self, name, default=notset, skip=False):
+        """ return command line option value.
+
+        :arg name: name of the option.  You may also specify
+            the literal ``--OPT`` option instead of the "dest" option name.
+        :arg default: default value if no option of that name exists.
+        :arg skip: if True raise pytest.skip if option does not exists
+            or has a None value.
+        """
+        name = self._opt2dest.get(name, name)
+        try:
+            val = getattr(self.option, name)
+            if val is None and skip:
+                raise AttributeError(name)
+            return val
+        except AttributeError:
+            if default is not notset:
+                return default
+            if skip:
+                import pytest
+                pytest.skip("no %r option found" %(name,))
+            raise ValueError("no option named %r" % (name,))
+
+    def getvalue(self, name, path=None):
+        """ (deprecated, use getoption()) """
+        return self.getoption(name)
+
+    def getvalueorskip(self, name, path=None):
+        """ (deprecated, use getoption(skip=True)) """
+        return self.getoption(name, skip=True)
+
+def exists(path, ignore=EnvironmentError):
+    try:
+        return path.check()
+    except ignore:
+        return False
+
+def getcfg(args, warnfunc=None):
+    """
+    Search the list of arguments for a valid ini-file for pytest,
+    and return a tuple of (rootdir, inifile, cfg-dict).
+
+    note: warnfunc is an optional function used to warn
+        about ini-files that use deprecated features.
+        This parameter should be removed when pytest
+        adopts standard deprecation warnings (#1804).
+    """
+    from _pytest.deprecated import SETUP_CFG_PYTEST
+    inibasenames = ["pytest.ini", "tox.ini", "setup.cfg"]
+    args = [x for x in args if not str(x).startswith("-")]
+    if not args:
+        args = [py.path.local()]
+    for arg in args:
+        arg = py.path.local(arg)
+        for base in arg.parts(reverse=True):
+            for inibasename in inibasenames:
+                p = base.join(inibasename)
+                if exists(p):
+                    iniconfig = py.iniconfig.IniConfig(p)
+                    if 'pytest' in iniconfig.sections:
+                        if inibasename == 'setup.cfg' and warnfunc:
+                            warnfunc('C1', SETUP_CFG_PYTEST)
+                        return base, p, iniconfig['pytest']
+                    if inibasename == 'setup.cfg' and 'tool:pytest' in iniconfig.sections:
+                        return base, p, iniconfig['tool:pytest']
+                    elif inibasename == "pytest.ini":
+                        # allowed to be empty
+                        return base, p, {}
+    return None, None, None
+
+
+def get_common_ancestor(args):
+    # args are what we get after early command line parsing (usually
+    # strings, but can be py.path.local objects as well)
+    common_ancestor = None
+    for arg in args:
+        if str(arg)[0] == "-":
+            continue
+        p = py.path.local(arg)
+        if not p.exists():
+            continue
+        if common_ancestor is None:
+            common_ancestor = p
+        else:
+            if p.relto(common_ancestor) or p == common_ancestor:
+                continue
+            elif common_ancestor.relto(p):
+                common_ancestor = p
+            else:
+                shared = p.common(common_ancestor)
+                if shared is not None:
+                    common_ancestor = shared
+    if common_ancestor is None:
+        common_ancestor = py.path.local()
+    elif common_ancestor.isfile():
+        common_ancestor = common_ancestor.dirpath()
+    return common_ancestor
+
+
+def get_dirs_from_args(args):
+    return [d for d in (py.path.local(x) for x in args
+                        if not str(x).startswith("-"))
+            if d.exists()]
+
+
+def determine_setup(inifile, args, warnfunc=None):
+    dirs = get_dirs_from_args(args)
+    if inifile:
+        iniconfig = py.iniconfig.IniConfig(inifile)
+        try:
+            inicfg = iniconfig["pytest"]
+        except KeyError:
+            inicfg = None
+        rootdir = get_common_ancestor(dirs)
+    else:
+        ancestor = get_common_ancestor(dirs)
+        rootdir, inifile, inicfg = getcfg([ancestor], warnfunc=warnfunc)
+        if rootdir is None:
+            for rootdir in ancestor.parts(reverse=True):
+                if rootdir.join("setup.py").exists():
+                    break
+            else:
+                rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc)
+                if rootdir is None:
+                    rootdir = get_common_ancestor([py.path.local(), ancestor])
+                    is_fs_root = os.path.splitdrive(str(rootdir))[1] == os.sep
+                    if is_fs_root:
+                        rootdir = ancestor
+    return rootdir, inifile, inicfg or {}
+
+
+def setns(obj, dic):
+    import pytest
+    for name, value in dic.items():
+        if isinstance(value, dict):
+            mod = getattr(obj, name, None)
+            if mod is None:
+                modname = "pytest.%s" % name
+                mod = types.ModuleType(modname)
+                sys.modules[modname] = mod
+                mod.__all__ = []
+                setattr(obj, name, mod)
+            obj.__all__.append(name)
+            setns(mod, value)
+        else:
+            setattr(obj, name, value)
+            obj.__all__.append(name)
+            #if obj != pytest:
+            #    pytest.__all__.append(name)
+            setattr(pytest, name, value)
+
+
+def create_terminal_writer(config, *args, **kwargs):
+    """Create a TerminalWriter instance configured according to the options
+    in the config object. Every code which requires a TerminalWriter object
+    and has access to a config object should use this function.
+    """
+    tw = py.io.TerminalWriter(*args, **kwargs)
+    if config.option.color == 'yes':
+        tw.hasmarkup = True
+    if config.option.color == 'no':
+        tw.hasmarkup = False
+    return tw
+
+
+def _strtobool(val):
+    """Convert a string representation of truth to true (1) or false (0).
+
+    True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+    are 'n', 'no', 'f', 'false', 'off', and '0'.  Raises ValueError if
+    'val' is anything else.
+
+    .. note:: copied from distutils.util
+    """
+    val = val.lower()
+    if val in ('y', 'yes', 't', 'true', 'on', '1'):
+        return 1
+    elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+        return 0
+    else:
+        raise ValueError("invalid truth value %r" % (val,))
diff --git a/lib/spack/external/_pytest/debugging.py b/lib/spack/external/_pytest/debugging.py
new file mode 100644
index 0000000000..d96170bd8b
--- /dev/null
+++ b/lib/spack/external/_pytest/debugging.py
@@ -0,0 +1,124 @@
+""" interactive debugging with PDB, the Python Debugger. """
+from __future__ import absolute_import
+import pdb
+import sys
+
+import pytest
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group._addoption(
+        '--pdb', dest="usepdb", action="store_true",
+        help="start the interactive Python debugger on errors.")
+    group._addoption(
+        '--pdbcls', dest="usepdb_cls", metavar="modulename:classname",
+        help="start a custom interactive Python debugger on errors. "
+             "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb")
+
+def pytest_namespace():
+    return {'set_trace': pytestPDB().set_trace}
+
+def pytest_configure(config):
+    if config.getvalue("usepdb") or config.getvalue("usepdb_cls"):
+        config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
+        if config.getvalue("usepdb_cls"):
+            modname, classname = config.getvalue("usepdb_cls").split(":")
+            __import__(modname)
+            pdb_cls = getattr(sys.modules[modname], classname)
+        else:
+            pdb_cls = pdb.Pdb
+        pytestPDB._pdb_cls = pdb_cls
+
+    old = (pdb.set_trace, pytestPDB._pluginmanager)
+
+    def fin():
+        pdb.set_trace, pytestPDB._pluginmanager = old
+        pytestPDB._config = None
+        pytestPDB._pdb_cls = pdb.Pdb
+
+    pdb.set_trace = pytest.set_trace
+    pytestPDB._pluginmanager = config.pluginmanager
+    pytestPDB._config = config
+    config._cleanup.append(fin)
+
+class pytestPDB:
+    """ Pseudo PDB that defers to the real pdb. """
+    _pluginmanager = None
+    _config = None
+    _pdb_cls = pdb.Pdb
+
+    def set_trace(self):
+        """ invoke PDB set_trace debugging, dropping any IO capturing. """
+        import _pytest.config
+        frame = sys._getframe().f_back
+        if self._pluginmanager is not None:
+            capman = self._pluginmanager.getplugin("capturemanager")
+            if capman:
+                capman.suspendcapture(in_=True)
+            tw = _pytest.config.create_terminal_writer(self._config)
+            tw.line()
+            tw.sep(">", "PDB set_trace (IO-capturing turned off)")
+            self._pluginmanager.hook.pytest_enter_pdb(config=self._config)
+        self._pdb_cls().set_trace(frame)
+
+
+class PdbInvoke:
+    def pytest_exception_interact(self, node, call, report):
+        capman = node.config.pluginmanager.getplugin("capturemanager")
+        if capman:
+            out, err = capman.suspendcapture(in_=True)
+            sys.stdout.write(out)
+            sys.stdout.write(err)
+        _enter_pdb(node, call.excinfo, report)
+
+    def pytest_internalerror(self, excrepr, excinfo):
+        for line in str(excrepr).split("\n"):
+            sys.stderr.write("INTERNALERROR> %s\n" %line)
+            sys.stderr.flush()
+        tb = _postmortem_traceback(excinfo)
+        post_mortem(tb)
+
+
+def _enter_pdb(node, excinfo, rep):
+    # XXX we re-use the TerminalReporter's terminalwriter
+    # because this seems to avoid some encoding related troubles
+    # for not completely clear reasons.
+    tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
+    tw.line()
+    tw.sep(">", "traceback")
+    rep.toterminal(tw)
+    tw.sep(">", "entering PDB")
+    tb = _postmortem_traceback(excinfo)
+    post_mortem(tb)
+    rep._pdbshown = True
+    return rep
+
+
+def _postmortem_traceback(excinfo):
+    # A doctest.UnexpectedException is not useful for post_mortem.
+    # Use the underlying exception instead:
+    from doctest import UnexpectedException
+    if isinstance(excinfo.value, UnexpectedException):
+        return excinfo.value.exc_info[2]
+    else:
+        return excinfo._excinfo[2]
+
+
+def _find_last_non_hidden_frame(stack):
+    i = max(0, len(stack) - 1)
+    while i and stack[i][0].f_locals.get("__tracebackhide__", False):
+        i -= 1
+    return i
+
+
+def post_mortem(t):
+    class Pdb(pytestPDB._pdb_cls):
+        def get_stack(self, f, t):
+            stack, i = pdb.Pdb.get_stack(self, f, t)
+            if f is None:
+                i = _find_last_non_hidden_frame(stack)
+            return stack, i
+    p = Pdb()
+    p.reset()
+    p.interaction(None, t)
diff --git a/lib/spack/external/_pytest/deprecated.py b/lib/spack/external/_pytest/deprecated.py
new file mode 100644
index 0000000000..6edc475f6e
--- /dev/null
+++ b/lib/spack/external/_pytest/deprecated.py
@@ -0,0 +1,24 @@
+"""
+This module contains deprecation messages and bits of code used elsewhere in the codebase
+that is planned to be removed in the next pytest release.
+
+Keeping it in a central location makes it easy to track what is deprecated and should
+be removed when the time comes.
+"""
+
+
+MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \
+                      'pass a list of arguments instead.'
+
+YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0'
+
+FUNCARG_PREFIX = (
+    '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated '
+    'and scheduled to be removed in pytest 4.0.  '
+    'Please remove the prefix and use the @pytest.fixture decorator instead.')
+
+SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool:pytest] instead.'
+
+GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue"
+
+RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0'
diff --git a/lib/spack/external/_pytest/doctest.py b/lib/spack/external/_pytest/doctest.py
new file mode 100644
index 0000000000..f4782dded5
--- /dev/null
+++ b/lib/spack/external/_pytest/doctest.py
@@ -0,0 +1,331 @@
+""" discover and run doctests in modules and test files."""
+from __future__ import absolute_import
+
+import traceback
+
+import pytest
+from _pytest._code.code import ExceptionInfo, ReprFileLocation, TerminalRepr
+from _pytest.fixtures import FixtureRequest
+
+
+DOCTEST_REPORT_CHOICE_NONE = 'none'
+DOCTEST_REPORT_CHOICE_CDIFF = 'cdiff'
+DOCTEST_REPORT_CHOICE_NDIFF = 'ndiff'
+DOCTEST_REPORT_CHOICE_UDIFF = 'udiff'
+DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = 'only_first_failure'
+
+DOCTEST_REPORT_CHOICES = (
+    DOCTEST_REPORT_CHOICE_NONE,
+    DOCTEST_REPORT_CHOICE_CDIFF,
+    DOCTEST_REPORT_CHOICE_NDIFF,
+    DOCTEST_REPORT_CHOICE_UDIFF,
+    DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE,
+)
+
+def pytest_addoption(parser):
+    parser.addini('doctest_optionflags', 'option flags for doctests',
+        type="args", default=["ELLIPSIS"])
+    group = parser.getgroup("collect")
+    group.addoption("--doctest-modules",
+        action="store_true", default=False,
+        help="run doctests in all .py modules",
+        dest="doctestmodules")
+    group.addoption("--doctest-report",
+        type=str.lower, default="udiff",
+        help="choose another output format for diffs on doctest failure",
+        choices=DOCTEST_REPORT_CHOICES,
+        dest="doctestreport")
+    group.addoption("--doctest-glob",
+        action="append", default=[], metavar="pat",
+        help="doctests file matching pattern, default: test*.txt",
+        dest="doctestglob")
+    group.addoption("--doctest-ignore-import-errors",
+        action="store_true", default=False,
+        help="ignore doctest ImportErrors",
+        dest="doctest_ignore_import_errors")
+
+
+def pytest_collect_file(path, parent):
+    config = parent.config
+    if path.ext == ".py":
+        if config.option.doctestmodules:
+            return DoctestModule(path, parent)
+    elif _is_doctest(config, path, parent):
+        return DoctestTextfile(path, parent)
+
+
+def _is_doctest(config, path, parent):
+    if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
+        return True
+    globs = config.getoption("doctestglob") or ['test*.txt']
+    for glob in globs:
+        if path.check(fnmatch=glob):
+            return True
+    return False
+
+
+class ReprFailDoctest(TerminalRepr):
+
+    def __init__(self, reprlocation, lines):
+        self.reprlocation = reprlocation
+        self.lines = lines
+
+    def toterminal(self, tw):
+        for line in self.lines:
+            tw.line(line)
+        self.reprlocation.toterminal(tw)
+
+
+class DoctestItem(pytest.Item):
+    def __init__(self, name, parent, runner=None, dtest=None):
+        super(DoctestItem, self).__init__(name, parent)
+        self.runner = runner
+        self.dtest = dtest
+        self.obj = None
+        self.fixture_request = None
+
+    def setup(self):
+        if self.dtest is not None:
+            self.fixture_request = _setup_fixtures(self)
+            globs = dict(getfixture=self.fixture_request.getfixturevalue)
+            for name, value in self.fixture_request.getfixturevalue('doctest_namespace').items():
+                globs[name] = value
+            self.dtest.globs.update(globs)
+
+    def runtest(self):
+        _check_all_skipped(self.dtest)
+        self.runner.run(self.dtest)
+
+    def repr_failure(self, excinfo):
+        import doctest
+        if excinfo.errisinstance((doctest.DocTestFailure,
+                                  doctest.UnexpectedException)):
+            doctestfailure = excinfo.value
+            example = doctestfailure.example
+            test = doctestfailure.test
+            filename = test.filename
+            if test.lineno is None:
+                lineno = None
+            else:
+                lineno = test.lineno + example.lineno + 1
+            message = excinfo.type.__name__
+            reprlocation = ReprFileLocation(filename, lineno, message)
+            checker = _get_checker()
+            report_choice = _get_report_choice(self.config.getoption("doctestreport"))
+            if lineno is not None:
+                lines = doctestfailure.test.docstring.splitlines(False)
+                # add line numbers to the left of the error message
+                lines = ["%03d %s" % (i + test.lineno + 1, x)
+                         for (i, x) in enumerate(lines)]
+                # trim docstring error lines to 10
+                lines = lines[example.lineno - 9:example.lineno + 1]
+            else:
+                lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
+                indent = '>>>'
+                for line in example.source.splitlines():
+                    lines.append('??? %s %s' % (indent, line))
+                    indent = '...'
+            if excinfo.errisinstance(doctest.DocTestFailure):
+                lines += checker.output_difference(example,
+                        doctestfailure.got, report_choice).split("\n")
+            else:
+                inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
+                lines += ["UNEXPECTED EXCEPTION: %s" %
+                            repr(inner_excinfo.value)]
+                lines += traceback.format_exception(*excinfo.value.exc_info)
+            return ReprFailDoctest(reprlocation, lines)
+        else:
+            return super(DoctestItem, self).repr_failure(excinfo)
+
+    def reportinfo(self):
+        return self.fspath, None, "[doctest] %s" % self.name
+
+
+def _get_flag_lookup():
+    import doctest
+    return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
+                DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
+                NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
+                ELLIPSIS=doctest.ELLIPSIS,
+                IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
+                COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
+                ALLOW_UNICODE=_get_allow_unicode_flag(),
+                ALLOW_BYTES=_get_allow_bytes_flag(),
+                )
+
+
+def get_optionflags(parent):
+    optionflags_str = parent.config.getini("doctest_optionflags")
+    flag_lookup_table = _get_flag_lookup()
+    flag_acc = 0
+    for flag in optionflags_str:
+        flag_acc |= flag_lookup_table[flag]
+    return flag_acc
+
+
+class DoctestTextfile(pytest.Module):
+    obj = None
+
+    def collect(self):
+        import doctest
+
+        # inspired by doctest.testfile; ideally we would use it directly,
+        # but it doesn't support passing a custom checker
+        text = self.fspath.read()
+        filename = str(self.fspath)
+        name = self.fspath.basename
+        globs = {'__name__': '__main__'}
+
+
+        optionflags = get_optionflags(self)
+        runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
+                                     checker=_get_checker())
+
+        parser = doctest.DocTestParser()
+        test = parser.get_doctest(text, globs, name, filename, 0)
+        if test.examples:
+            yield DoctestItem(test.name, self, runner, test)
+
+
+def _check_all_skipped(test):
+    """raises pytest.skip() if all examples in the given DocTest have the SKIP
+    option set.
+    """
+    import doctest
+    all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
+    if all_skipped:
+        pytest.skip('all tests skipped by +SKIP option')
+
+
+class DoctestModule(pytest.Module):
+    def collect(self):
+        import doctest
+        if self.fspath.basename == "conftest.py":
+            module = self.config.pluginmanager._importconftest(self.fspath)
+        else:
+            try:
+                module = self.fspath.pyimport()
+            except ImportError:
+                if self.config.getvalue('doctest_ignore_import_errors'):
+                    pytest.skip('unable to import module %r' % self.fspath)
+                else:
+                    raise
+        # uses internal doctest module parsing mechanism
+        finder = doctest.DocTestFinder()
+        optionflags = get_optionflags(self)
+        runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
+                                     checker=_get_checker())
+        for test in finder.find(module, module.__name__):
+            if test.examples:  # skip empty doctests
+                yield DoctestItem(test.name, self, runner, test)
+
+
+def _setup_fixtures(doctest_item):
+    """
+    Used by DoctestTextfile and DoctestItem to setup fixture information.
+    """
+    def func():
+        pass
+
+    doctest_item.funcargs = {}
+    fm = doctest_item.session._fixturemanager
+    doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
+                                                  cls=None, funcargs=False)
+    fixture_request = FixtureRequest(doctest_item)
+    fixture_request._fillfixtures()
+    return fixture_request
+
+
+def _get_checker():
+    """
+    Returns a doctest.OutputChecker subclass that takes in account the
+    ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
+    to strip b'' prefixes.
+    Useful when the same doctest should run in Python 2 and Python 3.
+
+    An inner class is used to avoid importing "doctest" at the module
+    level.
+    """
+    if hasattr(_get_checker, 'LiteralsOutputChecker'):
+        return _get_checker.LiteralsOutputChecker()
+
+    import doctest
+    import re
+
+    class LiteralsOutputChecker(doctest.OutputChecker):
+        """
+        Copied from doctest_nose_plugin.py from the nltk project:
+            https://github.com/nltk/nltk
+
+        Further extended to also support byte literals.
+        """
+
+        _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
+        _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
+
+        def check_output(self, want, got, optionflags):
+            res = doctest.OutputChecker.check_output(self, want, got,
+                                                     optionflags)
+            if res:
+                return True
+
+            allow_unicode = optionflags & _get_allow_unicode_flag()
+            allow_bytes = optionflags & _get_allow_bytes_flag()
+            if not allow_unicode and not allow_bytes:
+                return False
+
+            else:  # pragma: no cover
+                def remove_prefixes(regex, txt):
+                    return re.sub(regex, r'\1\2', txt)
+
+                if allow_unicode:
+                    want = remove_prefixes(self._unicode_literal_re, want)
+                    got = remove_prefixes(self._unicode_literal_re, got)
+                if allow_bytes:
+                    want = remove_prefixes(self._bytes_literal_re, want)
+                    got = remove_prefixes(self._bytes_literal_re, got)
+                res = doctest.OutputChecker.check_output(self, want, got,
+                                                         optionflags)
+                return res
+
+    _get_checker.LiteralsOutputChecker = LiteralsOutputChecker
+    return _get_checker.LiteralsOutputChecker()
+
+
+def _get_allow_unicode_flag():
+    """
+    Registers and returns the ALLOW_UNICODE flag.
+    """
+    import doctest
+    return doctest.register_optionflag('ALLOW_UNICODE')
+
+
+def _get_allow_bytes_flag():
+    """
+    Registers and returns the ALLOW_BYTES flag.
+    """
+    import doctest
+    return doctest.register_optionflag('ALLOW_BYTES')
+
+
+def _get_report_choice(key):
+    """
+    This function returns the actual `doctest` module flag value, we want to do it as late as possible to avoid
+    importing `doctest` and all its dependencies when parsing options, as it adds overhead and breaks tests.
+    """
+    import doctest
+
+    return {
+        DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF,
+        DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF,
+        DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF,
+        DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE,
+        DOCTEST_REPORT_CHOICE_NONE: 0,
+    }[key]
+
+@pytest.fixture(scope='session')
+def doctest_namespace():
+    """
+    Inject names into the doctest namespace.
+    """
+    return dict()
diff --git a/lib/spack/external/_pytest/fixtures.py b/lib/spack/external/_pytest/fixtures.py
new file mode 100644
index 0000000000..28bcd4d8d7
--- /dev/null
+++ b/lib/spack/external/_pytest/fixtures.py
@@ -0,0 +1,1134 @@
+import sys
+
+from py._code.code import FormattedExcinfo
+
+import py
+import pytest
+import warnings
+
+import inspect
+import _pytest
+from _pytest._code.code import TerminalRepr
+from _pytest.compat import (
+    NOTSET, exc_clear, _format_args,
+    getfslineno, get_real_func,
+    is_generator, isclass, getimfunc,
+    getlocation, getfuncargnames,
+)
+
+def pytest_sessionstart(session):
+    session._fixturemanager = FixtureManager(session)
+
+
+scopename2class = {}
+
+
+scope2props = dict(session=())
+scope2props["module"] = ("fspath", "module")
+scope2props["class"] = scope2props["module"] + ("cls",)
+scope2props["instance"] = scope2props["class"] + ("instance", )
+scope2props["function"] = scope2props["instance"] + ("function", "keywords")
+
+def scopeproperty(name=None, doc=None):
+    def decoratescope(func):
+        scopename = name or func.__name__
+
+        def provide(self):
+            if func.__name__ in scope2props[self.scope]:
+                return func(self)
+            raise AttributeError("%s not available in %s-scoped context" % (
+                scopename, self.scope))
+
+        return property(provide, None, None, func.__doc__)
+    return decoratescope
+
+
+def pytest_namespace():
+    scopename2class.update({
+        'class': pytest.Class,
+        'module': pytest.Module,
+        'function': pytest.Item,
+    })
+    return {
+        'fixture': fixture,
+        'yield_fixture': yield_fixture,
+        'collect': {'_fillfuncargs': fillfixtures}
+    }
+
+
+def get_scope_node(node, scope):
+    cls = scopename2class.get(scope)
+    if cls is None:
+        if scope == "session":
+            return node.session
+        raise ValueError("unknown scope")
+    return node.getparent(cls)
+
+
+def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
+    # this function will transform all collected calls to a functions
+    # if they use direct funcargs (i.e. direct parametrization)
+    # because we want later test execution to be able to rely on
+    # an existing FixtureDef structure for all arguments.
+    # XXX we can probably avoid this algorithm  if we modify CallSpec2
+    # to directly care for creating the fixturedefs within its methods.
+    if not metafunc._calls[0].funcargs:
+        return # this function call does not have direct parametrization
+    # collect funcargs of all callspecs into a list of values
+    arg2params = {}
+    arg2scope = {}
+    for callspec in metafunc._calls:
+        for argname, argvalue in callspec.funcargs.items():
+            assert argname not in callspec.params
+            callspec.params[argname] = argvalue
+            arg2params_list = arg2params.setdefault(argname, [])
+            callspec.indices[argname] = len(arg2params_list)
+            arg2params_list.append(argvalue)
+            if argname not in arg2scope:
+                scopenum = callspec._arg2scopenum.get(argname,
+                                                      scopenum_function)
+                arg2scope[argname] = scopes[scopenum]
+        callspec.funcargs.clear()
+
+    # register artificial FixtureDef's so that later at test execution
+    # time we can rely on a proper FixtureDef to exist for fixture setup.
+    arg2fixturedefs = metafunc._arg2fixturedefs
+    for argname, valuelist in arg2params.items():
+        # if we have a scope that is higher than function we need
+        # to make sure we only ever create an according fixturedef on
+        # a per-scope basis. We thus store and cache the fixturedef on the
+        # node related to the scope.
+        scope = arg2scope[argname]
+        node = None
+        if scope != "function":
+            node = get_scope_node(collector, scope)
+            if node is None:
+                assert scope == "class" and isinstance(collector, pytest.Module)
+                # use module-level collector for class-scope (for now)
+                node = collector
+        if node and argname in node._name2pseudofixturedef:
+            arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
+        else:
+            fixturedef =  FixtureDef(fixturemanager, '', argname,
+                           get_direct_param_fixture_func,
+                           arg2scope[argname],
+                           valuelist, False, False)
+            arg2fixturedefs[argname] = [fixturedef]
+            if node is not None:
+                node._name2pseudofixturedef[argname] = fixturedef
+
+
+
+def getfixturemarker(obj):
+    """ return fixturemarker or None if it doesn't exist or raised
+    exceptions."""
+    try:
+        return getattr(obj, "_pytestfixturefunction", None)
+    except KeyboardInterrupt:
+        raise
+    except Exception:
+        # some objects raise errors like request (from flask import request)
+        # we don't expect them to be fixture functions
+        return None
+
+
+
+def get_parametrized_fixture_keys(item, scopenum):
+    """ return list of keys for all parametrized arguments which match
+    the specified scope. """
+    assert scopenum < scopenum_function  # function
+    try:
+        cs = item.callspec
+    except AttributeError:
+        pass
+    else:
+        # cs.indictes.items() is random order of argnames but
+        # then again different functions (items) can change order of
+        # arguments so it doesn't matter much probably
+        for argname, param_index in cs.indices.items():
+            if cs._arg2scopenum[argname] != scopenum:
+                continue
+            if scopenum == 0:    # session
+                key = (argname, param_index)
+            elif scopenum == 1:  # module
+                key = (argname, param_index, item.fspath)
+            elif scopenum == 2:  # class
+                key = (argname, param_index, item.fspath, item.cls)
+            yield key
+
+
+# algorithm for sorting on a per-parametrized resource setup basis
+# it is called for scopenum==0 (session) first and performs sorting
+# down to the lower scopes such as to minimize number of "high scope"
+# setups and teardowns
+
+def reorder_items(items):
+    argkeys_cache = {}
+    for scopenum in range(0, scopenum_function):
+        argkeys_cache[scopenum] = d = {}
+        for item in items:
+            keys = set(get_parametrized_fixture_keys(item, scopenum))
+            if keys:
+                d[item] = keys
+    return reorder_items_atscope(items, set(), argkeys_cache, 0)
+
+def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
+    if scopenum >= scopenum_function or len(items) < 3:
+        return items
+    items_done = []
+    while 1:
+        items_before, items_same, items_other, newignore = \
+                slice_items(items, ignore, argkeys_cache[scopenum])
+        items_before = reorder_items_atscope(
+                            items_before, ignore, argkeys_cache,scopenum+1)
+        if items_same is None:
+            # nothing to reorder in this scope
+            assert items_other is None
+            return items_done + items_before
+        items_done.extend(items_before)
+        items = items_same + items_other
+        ignore = newignore
+
+
+def slice_items(items, ignore, scoped_argkeys_cache):
+    # we pick the first item which uses a fixture instance in the
+    # requested scope and which we haven't seen yet.  We slice the input
+    # items list into a list of items_nomatch, items_same and
+    # items_other
+    if scoped_argkeys_cache:  # do we need to do work at all?
+        it = iter(items)
+        # first find a slicing key
+        for i, item in enumerate(it):
+            argkeys = scoped_argkeys_cache.get(item)
+            if argkeys is not None:
+                argkeys = argkeys.difference(ignore)
+                if argkeys:  # found a slicing key
+                    slicing_argkey = argkeys.pop()
+                    items_before = items[:i]
+                    items_same = [item]
+                    items_other = []
+                    # now slice the remainder of the list
+                    for item in it:
+                        argkeys = scoped_argkeys_cache.get(item)
+                        if argkeys and slicing_argkey in argkeys and \
+                            slicing_argkey not in ignore:
+                            items_same.append(item)
+                        else:
+                            items_other.append(item)
+                    newignore = ignore.copy()
+                    newignore.add(slicing_argkey)
+                    return (items_before, items_same, items_other, newignore)
+    return items, None, None, None
+
+
+
+class FuncargnamesCompatAttr:
+    """ helper class so that Metafunc, Function and FixtureRequest
+    don't need to each define the "funcargnames" compatibility attribute.
+    """
+    @property
+    def funcargnames(self):
+        """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
+        return self.fixturenames
+
+
+def fillfixtures(function):
+    """ fill missing funcargs for a test function. """
+    try:
+        request = function._request
+    except AttributeError:
+        # XXX this special code path is only expected to execute
+        # with the oejskit plugin.  It uses classes with funcargs
+        # and we thus have to work a bit to allow this.
+        fm = function.session._fixturemanager
+        fi = fm.getfixtureinfo(function.parent, function.obj, None)
+        function._fixtureinfo = fi
+        request = function._request = FixtureRequest(function)
+        request._fillfixtures()
+        # prune out funcargs for jstests
+        newfuncargs = {}
+        for name in fi.argnames:
+            newfuncargs[name] = function.funcargs[name]
+        function.funcargs = newfuncargs
+    else:
+        request._fillfixtures()
+
+
+
+def get_direct_param_fixture_func(request):
+    return request.param
+
+class FuncFixtureInfo:
+    def __init__(self, argnames, names_closure, name2fixturedefs):
+        self.argnames = argnames
+        self.names_closure = names_closure
+        self.name2fixturedefs = name2fixturedefs
+
+
+class FixtureRequest(FuncargnamesCompatAttr):
+    """ A request for a fixture from a test or fixture function.
+
+    A request object gives access to the requesting test context
+    and has an optional ``param`` attribute in case
+    the fixture is parametrized indirectly.
+    """
+
+    def __init__(self, pyfuncitem):
+        self._pyfuncitem = pyfuncitem
+        #: fixture for which this request is being performed
+        self.fixturename = None
+        #: Scope string, one of "function", "class", "module", "session"
+        self.scope = "function"
+        self._fixture_values = {}  # argname -> fixture value
+        self._fixture_defs = {}  # argname -> FixtureDef
+        fixtureinfo = pyfuncitem._fixtureinfo
+        self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
+        self._arg2index = {}
+        self._fixturemanager = pyfuncitem.session._fixturemanager
+
+    @property
+    def fixturenames(self):
+        # backward incompatible note: now a readonly property
+        return list(self._pyfuncitem._fixtureinfo.names_closure)
+
+    @property
+    def node(self):
+        """ underlying collection node (depends on current request scope)"""
+        return self._getscopeitem(self.scope)
+
+
+    def _getnextfixturedef(self, argname):
+        fixturedefs = self._arg2fixturedefs.get(argname, None)
+        if fixturedefs is None:
+            # we arrive here because of a  a dynamic call to
+            # getfixturevalue(argname) usage which was naturally
+            # not known at parsing/collection time
+            parentid = self._pyfuncitem.parent.nodeid
+            fixturedefs = self._fixturemanager.getfixturedefs(argname, parentid)
+            self._arg2fixturedefs[argname] = fixturedefs
+        # fixturedefs list is immutable so we maintain a decreasing index
+        index = self._arg2index.get(argname, 0) - 1
+        if fixturedefs is None or (-index > len(fixturedefs)):
+            raise FixtureLookupError(argname, self)
+        self._arg2index[argname] = index
+        return fixturedefs[index]
+
+    @property
+    def config(self):
+        """ the pytest config object associated with this request. """
+        return self._pyfuncitem.config
+
+
+    @scopeproperty()
+    def function(self):
+        """ test function object if the request has a per-function scope. """
+        return self._pyfuncitem.obj
+
+    @scopeproperty("class")
+    def cls(self):
+        """ class (can be None) where the test function was collected. """
+        clscol = self._pyfuncitem.getparent(pytest.Class)
+        if clscol:
+            return clscol.obj
+
+    @property
+    def instance(self):
+        """ instance (can be None) on which test function was collected. """
+        # unittest support hack, see _pytest.unittest.TestCaseFunction
+        try:
+            return self._pyfuncitem._testcase
+        except AttributeError:
+            function = getattr(self, "function", None)
+            if function is not None:
+                return py.builtin._getimself(function)
+
+    @scopeproperty()
+    def module(self):
+        """ python module object where the test function was collected. """
+        return self._pyfuncitem.getparent(pytest.Module).obj
+
+    @scopeproperty()
+    def fspath(self):
+        """ the file system path of the test module which collected this test. """
+        return self._pyfuncitem.fspath
+
+    @property
+    def keywords(self):
+        """ keywords/markers dictionary for the underlying node. """
+        return self.node.keywords
+
+    @property
+    def session(self):
+        """ pytest session object. """
+        return self._pyfuncitem.session
+
+    def addfinalizer(self, finalizer):
+        """ add finalizer/teardown function to be called after the
+        last test within the requesting test context finished
+        execution. """
+        # XXX usually this method is shadowed by fixturedef specific ones
+        self._addfinalizer(finalizer, scope=self.scope)
+
+    def _addfinalizer(self, finalizer, scope):
+        colitem = self._getscopeitem(scope)
+        self._pyfuncitem.session._setupstate.addfinalizer(
+            finalizer=finalizer, colitem=colitem)
+
+    def applymarker(self, marker):
+        """ Apply a marker to a single test function invocation.
+        This method is useful if you don't want to have a keyword/marker
+        on all function invocations.
+
+        :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
+            created by a call to ``pytest.mark.NAME(...)``.
+        """
+        try:
+            self.node.keywords[marker.markname] = marker
+        except AttributeError:
+            raise ValueError(marker)
+
+    def raiseerror(self, msg):
+        """ raise a FixtureLookupError with the given message. """
+        raise self._fixturemanager.FixtureLookupError(None, self, msg)
+
+    def _fillfixtures(self):
+        item = self._pyfuncitem
+        fixturenames = getattr(item, "fixturenames", self.fixturenames)
+        for argname in fixturenames:
+            if argname not in item.funcargs:
+                item.funcargs[argname] = self.getfixturevalue(argname)
+
+    def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
+        """ (deprecated) Return a testing resource managed by ``setup`` &
+        ``teardown`` calls.  ``scope`` and ``extrakey`` determine when the
+        ``teardown`` function will be called so that subsequent calls to
+        ``setup`` would recreate the resource.  With pytest-2.3 you often
+        do not need ``cached_setup()`` as you can directly declare a scope
+        on a fixture function and register a finalizer through
+        ``request.addfinalizer()``.
+
+        :arg teardown: function receiving a previously setup resource.
+        :arg setup: a no-argument function creating a resource.
+        :arg scope: a string value out of ``function``, ``class``, ``module``
+            or ``session`` indicating the caching lifecycle of the resource.
+        :arg extrakey: added to internal caching key of (funcargname, scope).
+        """
+        if not hasattr(self.config, '_setupcache'):
+            self.config._setupcache = {} # XXX weakref?
+        cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
+        cache = self.config._setupcache
+        try:
+            val = cache[cachekey]
+        except KeyError:
+            self._check_scope(self.fixturename, self.scope, scope)
+            val = setup()
+            cache[cachekey] = val
+            if teardown is not None:
+                def finalizer():
+                    del cache[cachekey]
+                    teardown(val)
+                self._addfinalizer(finalizer, scope=scope)
+        return val
+
+    def getfixturevalue(self, argname):
+        """ Dynamically run a named fixture function.
+
+        Declaring fixtures via function argument is recommended where possible.
+        But if you can only decide whether to use another fixture at test
+        setup time, you may use this function to retrieve it inside a fixture
+        or test function body.
+        """
+        return self._get_active_fixturedef(argname).cached_result[0]
+
+    def getfuncargvalue(self, argname):
+        """ Deprecated, use getfixturevalue. """
+        from _pytest import deprecated
+        warnings.warn(
+            deprecated.GETFUNCARGVALUE,
+            DeprecationWarning)
+        return self.getfixturevalue(argname)
+
+    def _get_active_fixturedef(self, argname):
+        try:
+            return self._fixture_defs[argname]
+        except KeyError:
+            try:
+                fixturedef = self._getnextfixturedef(argname)
+            except FixtureLookupError:
+                if argname == "request":
+                    class PseudoFixtureDef:
+                        cached_result = (self, [0], None)
+                        scope = "function"
+                    return PseudoFixtureDef
+                raise
+        # remove indent to prevent the python3 exception
+        # from leaking into the call
+        result = self._getfixturevalue(fixturedef)
+        self._fixture_values[argname] = result
+        self._fixture_defs[argname] = fixturedef
+        return fixturedef
+
+    def _get_fixturestack(self):
+        current = self
+        l = []
+        while 1:
+            fixturedef = getattr(current, "_fixturedef", None)
+            if fixturedef is None:
+                l.reverse()
+                return l
+            l.append(fixturedef)
+            current = current._parent_request
+
+    def _getfixturevalue(self, fixturedef):
+        # prepare a subrequest object before calling fixture function
+        # (latter managed by fixturedef)
+        argname = fixturedef.argname
+        funcitem = self._pyfuncitem
+        scope = fixturedef.scope
+        try:
+            param = funcitem.callspec.getparam(argname)
+        except (AttributeError, ValueError):
+            param = NOTSET
+            param_index = 0
+            if fixturedef.params is not None:
+                frame = inspect.stack()[3]
+                frameinfo = inspect.getframeinfo(frame[0])
+                source_path = frameinfo.filename
+                source_lineno = frameinfo.lineno
+                source_path = py.path.local(source_path)
+                if source_path.relto(funcitem.config.rootdir):
+                    source_path = source_path.relto(funcitem.config.rootdir)
+                msg = (
+                    "The requested fixture has no parameter defined for the "
+                    "current test.\n\nRequested fixture '{0}' defined in:\n{1}"
+                    "\n\nRequested here:\n{2}:{3}".format(
+                        fixturedef.argname,
+                        getlocation(fixturedef.func, funcitem.config.rootdir),
+                        source_path,
+                        source_lineno,
+                    )
+                )
+                pytest.fail(msg)
+        else:
+            # indices might not be set if old-style metafunc.addcall() was used
+            param_index = funcitem.callspec.indices.get(argname, 0)
+            # if a parametrize invocation set a scope it will override
+            # the static scope defined with the fixture function
+            paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
+            if paramscopenum is not None:
+                scope = scopes[paramscopenum]
+
+        subrequest = SubRequest(self, scope, param, param_index, fixturedef)
+
+        # check if a higher-level scoped fixture accesses a lower level one
+        subrequest._check_scope(argname, self.scope, scope)
+
+        # clear sys.exc_info before invoking the fixture (python bug?)
+        # if its not explicitly cleared it will leak into the call
+        exc_clear()
+        try:
+            # call the fixture function
+            val = fixturedef.execute(request=subrequest)
+        finally:
+            # if fixture function failed it might have registered finalizers
+            self.session._setupstate.addfinalizer(fixturedef.finish,
+                                                  subrequest.node)
+        return val
+
+    def _check_scope(self, argname, invoking_scope, requested_scope):
+        if argname == "request":
+            return
+        if scopemismatch(invoking_scope, requested_scope):
+            # try to report something helpful
+            lines = self._factorytraceback()
+            pytest.fail("ScopeMismatch: You tried to access the %r scoped "
+                "fixture %r with a %r scoped request object, "
+                "involved factories\n%s" %(
+                (requested_scope, argname, invoking_scope, "\n".join(lines))),
+                pytrace=False)
+
+    def _factorytraceback(self):
+        lines = []
+        for fixturedef in self._get_fixturestack():
+            factory = fixturedef.func
+            fs, lineno = getfslineno(factory)
+            p = self._pyfuncitem.session.fspath.bestrelpath(fs)
+            args = _format_args(factory)
+            lines.append("%s:%d:  def %s%s" %(
+                p, lineno, factory.__name__, args))
+        return lines
+
+    def _getscopeitem(self, scope):
+        if scope == "function":
+            # this might also be a non-function Item despite its attribute name
+            return self._pyfuncitem
+        node = get_scope_node(self._pyfuncitem, scope)
+        if node is None and scope == "class":
+            # fallback to function item itself
+            node = self._pyfuncitem
+        assert node
+        return node
+
+    def __repr__(self):
+        return "<FixtureRequest for %r>" %(self.node)
+
+
+class SubRequest(FixtureRequest):
+    """ a sub request for handling getting a fixture from a
+    test function/fixture. """
+    def __init__(self, request, scope, param, param_index, fixturedef):
+        self._parent_request = request
+        self.fixturename = fixturedef.argname
+        if param is not NOTSET:
+            self.param = param
+        self.param_index = param_index
+        self.scope = scope
+        self._fixturedef = fixturedef
+        self.addfinalizer = fixturedef.addfinalizer
+        self._pyfuncitem = request._pyfuncitem
+        self._fixture_values  = request._fixture_values
+        self._fixture_defs = request._fixture_defs
+        self._arg2fixturedefs = request._arg2fixturedefs
+        self._arg2index = request._arg2index
+        self._fixturemanager = request._fixturemanager
+
+    def __repr__(self):
+        return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
+
+
+class ScopeMismatchError(Exception):
+    """ A fixture function tries to use a different fixture function which
+    which has a lower scope (e.g. a Session one calls a function one)
+    """
+
+
+scopes = "session module class function".split()
+scopenum_function = scopes.index("function")
+
+
+def scopemismatch(currentscope, newscope):
+    return scopes.index(newscope) > scopes.index(currentscope)
+
+
+def scope2index(scope, descr, where=None):
+    """Look up the index of ``scope`` and raise a descriptive value error
+    if not defined.
+    """
+    try:
+        return scopes.index(scope)
+    except ValueError:
+        raise ValueError(
+            "{0} {1}has an unsupported scope value '{2}'".format(
+                descr, 'from {0} '.format(where) if where else '',
+                scope)
+        )
+
+
+class FixtureLookupError(LookupError):
+    """ could not return a requested Fixture (missing or invalid). """
+    def __init__(self, argname, request, msg=None):
+        self.argname = argname
+        self.request = request
+        self.fixturestack = request._get_fixturestack()
+        self.msg = msg
+
+    def formatrepr(self):
+        tblines = []
+        addline = tblines.append
+        stack = [self.request._pyfuncitem.obj]
+        stack.extend(map(lambda x: x.func, self.fixturestack))
+        msg = self.msg
+        if msg is not None:
+            # the last fixture raise an error, let's present
+            # it at the requesting side
+            stack = stack[:-1]
+        for function in stack:
+            fspath, lineno = getfslineno(function)
+            try:
+                lines, _ = inspect.getsourcelines(get_real_func(function))
+            except (IOError, IndexError, TypeError):
+                error_msg = "file %s, line %s: source code not available"
+                addline(error_msg % (fspath, lineno+1))
+            else:
+                addline("file %s, line %s" % (fspath, lineno+1))
+                for i, line in enumerate(lines):
+                    line = line.rstrip()
+                    addline("  " + line)
+                    if line.lstrip().startswith('def'):
+                        break
+
+        if msg is None:
+            fm = self.request._fixturemanager
+            available = []
+            parentid = self.request._pyfuncitem.parent.nodeid
+            for name, fixturedefs in fm._arg2fixturedefs.items():
+                faclist = list(fm._matchfactories(fixturedefs, parentid))
+                if faclist and name not in available:
+                    available.append(name)
+            msg = "fixture %r not found" % (self.argname,)
+            msg += "\n available fixtures: %s" %(", ".join(sorted(available)),)
+            msg += "\n use 'pytest --fixtures [testpath]' for help on them."
+
+        return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
+
+
+class FixtureLookupErrorRepr(TerminalRepr):
+    def __init__(self, filename, firstlineno, tblines, errorstring, argname):
+        self.tblines = tblines
+        self.errorstring = errorstring
+        self.filename = filename
+        self.firstlineno = firstlineno
+        self.argname = argname
+
+    def toterminal(self, tw):
+        # tw.line("FixtureLookupError: %s" %(self.argname), red=True)
+        for tbline in self.tblines:
+            tw.line(tbline.rstrip())
+        lines = self.errorstring.split("\n")
+        if lines:
+            tw.line('{0}       {1}'.format(FormattedExcinfo.fail_marker,
+                                           lines[0].strip()), red=True)
+            for line in lines[1:]:
+                tw.line('{0}       {1}'.format(FormattedExcinfo.flow_marker,
+                                               line.strip()), red=True)
+        tw.line()
+        tw.line("%s:%d" % (self.filename, self.firstlineno+1))
+
+
+def fail_fixturefunc(fixturefunc, msg):
+    fs, lineno = getfslineno(fixturefunc)
+    location = "%s:%s" % (fs, lineno+1)
+    source = _pytest._code.Source(fixturefunc)
+    pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
+                pytrace=False)
+
+def call_fixture_func(fixturefunc, request, kwargs):
+    yieldctx = is_generator(fixturefunc)
+    if yieldctx:
+        it = fixturefunc(**kwargs)
+        res = next(it)
+
+        def teardown():
+            try:
+                next(it)
+            except StopIteration:
+                pass
+            else:
+                fail_fixturefunc(fixturefunc,
+                    "yield_fixture function has more than one 'yield'")
+
+        request.addfinalizer(teardown)
+    else:
+        res = fixturefunc(**kwargs)
+    return res
+
+
+class FixtureDef:
+    """ A container for a factory definition. """
+    def __init__(self, fixturemanager, baseid, argname, func, scope, params,
+                 unittest=False, ids=None):
+        self._fixturemanager = fixturemanager
+        self.baseid = baseid or ''
+        self.has_location = baseid is not None
+        self.func = func
+        self.argname = argname
+        self.scope = scope
+        self.scopenum = scope2index(
+            scope or "function",
+            descr='fixture {0}'.format(func.__name__),
+            where=baseid
+        )
+        self.params = params
+        startindex = unittest and 1 or None
+        self.argnames = getfuncargnames(func, startindex=startindex)
+        self.unittest = unittest
+        self.ids = ids
+        self._finalizer = []
+
+    def addfinalizer(self, finalizer):
+        self._finalizer.append(finalizer)
+
+    def finish(self):
+        try:
+            while self._finalizer:
+                func = self._finalizer.pop()
+                func()
+        finally:
+            ihook = self._fixturemanager.session.ihook
+            ihook.pytest_fixture_post_finalizer(fixturedef=self)
+            # even if finalization fails, we invalidate
+            # the cached fixture value
+            if hasattr(self, "cached_result"):
+                del self.cached_result
+
+    def execute(self, request):
+        # get required arguments and register our own finish()
+        # with their finalization
+        for argname in self.argnames:
+            fixturedef = request._get_active_fixturedef(argname)
+            if argname != "request":
+                fixturedef.addfinalizer(self.finish)
+
+        my_cache_key = request.param_index
+        cached_result = getattr(self, "cached_result", None)
+        if cached_result is not None:
+            result, cache_key, err = cached_result
+            if my_cache_key == cache_key:
+                if err is not None:
+                    py.builtin._reraise(*err)
+                else:
+                    return result
+            # we have a previous but differently parametrized fixture instance
+            # so we need to tear it down before creating a new one
+            self.finish()
+            assert not hasattr(self, "cached_result")
+
+        ihook = self._fixturemanager.session.ihook
+        return ihook.pytest_fixture_setup(fixturedef=self, request=request)
+
+    def __repr__(self):
+        return ("<FixtureDef name=%r scope=%r baseid=%r >" %
+                (self.argname, self.scope, self.baseid))
+
+def pytest_fixture_setup(fixturedef, request):
+    """ Execution of fixture setup. """
+    kwargs = {}
+    for argname in fixturedef.argnames:
+        fixdef = request._get_active_fixturedef(argname)
+        result, arg_cache_key, exc = fixdef.cached_result
+        request._check_scope(argname, request.scope, fixdef.scope)
+        kwargs[argname] = result
+
+    fixturefunc = fixturedef.func
+    if fixturedef.unittest:
+        if request.instance is not None:
+            # bind the unbound method to the TestCase instance
+            fixturefunc = fixturedef.func.__get__(request.instance)
+    else:
+        # the fixture function needs to be bound to the actual
+        # request.instance so that code working with "fixturedef" behaves
+        # as expected.
+        if request.instance is not None:
+            fixturefunc = getimfunc(fixturedef.func)
+            if fixturefunc != fixturedef.func:
+                fixturefunc = fixturefunc.__get__(request.instance)
+    my_cache_key = request.param_index
+    try:
+        result = call_fixture_func(fixturefunc, request, kwargs)
+    except Exception:
+        fixturedef.cached_result = (None, my_cache_key, sys.exc_info())
+        raise
+    fixturedef.cached_result = (result, my_cache_key, None)
+    return result
+
+
+class FixtureFunctionMarker:
+    def __init__(self, scope, params, autouse=False, ids=None, name=None):
+        self.scope = scope
+        self.params = params
+        self.autouse = autouse
+        self.ids = ids
+        self.name = name
+
+    def __call__(self, function):
+        if isclass(function):
+            raise ValueError(
+                    "class fixtures not supported (may be in the future)")
+        function._pytestfixturefunction = self
+        return function
+
+
+
+def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
+    """ (return a) decorator to mark a fixture factory function.
+
+    This decorator can be used (with or or without parameters) to define
+    a fixture function.  The name of the fixture function can later be
+    referenced to cause its invocation ahead of running tests: test
+    modules or classes can use the pytest.mark.usefixtures(fixturename)
+    marker.  Test functions can directly use fixture names as input
+    arguments in which case the fixture instance returned from the fixture
+    function will be injected.
+
+    :arg scope: the scope for which this fixture is shared, one of
+                "function" (default), "class", "module" or "session".
+
+    :arg params: an optional list of parameters which will cause multiple
+                invocations of the fixture function and all of the tests
+                using it.
+
+    :arg autouse: if True, the fixture func is activated for all tests that
+                can see it.  If False (the default) then an explicit
+                reference is needed to activate the fixture.
+
+    :arg ids: list of string ids each corresponding to the params
+       so that they are part of the test id. If no ids are provided
+       they will be generated automatically from the params.
+
+    :arg name: the name of the fixture. This defaults to the name of the
+               decorated function. If a fixture is used in the same module in
+               which it is defined, the function name of the fixture will be
+               shadowed by the function arg that requests the fixture; one way
+               to resolve this is to name the decorated function
+               ``fixture_<fixturename>`` and then use
+               ``@pytest.fixture(name='<fixturename>')``.
+
+    Fixtures can optionally provide their values to test functions using a ``yield`` statement,
+    instead of ``return``. In this case, the code block after the ``yield`` statement is executed
+    as teardown code regardless of the test outcome. A fixture function must yield exactly once.
+    """
+    if callable(scope) and params is None and autouse == False:
+        # direct decoration
+        return FixtureFunctionMarker(
+                "function", params, autouse, name=name)(scope)
+    if params is not None and not isinstance(params, (list, tuple)):
+        params = list(params)
+    return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
+
+
+def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=None):
+    """ (return a) decorator to mark a yield-fixture factory function.
+
+    .. deprecated:: 3.0
+        Use :py:func:`pytest.fixture` directly instead.
+    """
+    if callable(scope) and params is None and not autouse:
+        # direct decoration
+        return FixtureFunctionMarker(
+                "function", params, autouse, ids=ids, name=name)(scope)
+    else:
+        return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
+
+
+defaultfuncargprefixmarker = fixture()
+
+
+@fixture(scope="session")
+def pytestconfig(request):
+    """ the pytest config object with access to command line opts."""
+    return request.config
+
+
+class FixtureManager:
+    """
+    pytest fixtures definitions and information is stored and managed
+    from this class.
+
+    During collection fm.parsefactories() is called multiple times to parse
+    fixture function definitions into FixtureDef objects and internal
+    data structures.
+
+    During collection of test functions, metafunc-mechanics instantiate
+    a FuncFixtureInfo object which is cached per node/func-name.
+    This FuncFixtureInfo object is later retrieved by Function nodes
+    which themselves offer a fixturenames attribute.
+
+    The FuncFixtureInfo object holds information about fixtures and FixtureDefs
+    relevant for a particular function.  An initial list of fixtures is
+    assembled like this:
+
+    - ini-defined usefixtures
+    - autouse-marked fixtures along the collection chain up from the function
+    - usefixtures markers at module/class/function level
+    - test function funcargs
+
+    Subsequently the funcfixtureinfo.fixturenames attribute is computed
+    as the closure of the fixtures needed to setup the initial fixtures,
+    i. e. fixtures needed by fixture functions themselves are appended
+    to the fixturenames list.
+
+    Upon the test-setup phases all fixturenames are instantiated, retrieved
+    by a lookup of their FuncFixtureInfo.
+    """
+
+    _argprefix = "pytest_funcarg__"
+    FixtureLookupError = FixtureLookupError
+    FixtureLookupErrorRepr = FixtureLookupErrorRepr
+
+    def __init__(self, session):
+        self.session = session
+        self.config = session.config
+        self._arg2fixturedefs = {}
+        self._holderobjseen = set()
+        self._arg2finish = {}
+        self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
+        session.config.pluginmanager.register(self, "funcmanage")
+
+
+    def getfixtureinfo(self, node, func, cls, funcargs=True):
+        if funcargs and not hasattr(node, "nofuncargs"):
+            if cls is not None:
+                startindex = 1
+            else:
+                startindex = None
+            argnames = getfuncargnames(func, startindex)
+        else:
+            argnames = ()
+        usefixtures = getattr(func, "usefixtures", None)
+        initialnames = argnames
+        if usefixtures is not None:
+            initialnames = usefixtures.args + initialnames
+        fm = node.session._fixturemanager
+        names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
+                                                              node)
+        return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
+
+    def pytest_plugin_registered(self, plugin):
+        nodeid = None
+        try:
+            p = py.path.local(plugin.__file__)
+        except AttributeError:
+            pass
+        else:
+            # construct the base nodeid which is later used to check
+            # what fixtures are visible for particular tests (as denoted
+            # by their test id)
+            if p.basename.startswith("conftest.py"):
+                nodeid = p.dirpath().relto(self.config.rootdir)
+                if p.sep != "/":
+                    nodeid = nodeid.replace(p.sep, "/")
+        self.parsefactories(plugin, nodeid)
+
+    def _getautousenames(self, nodeid):
+        """ return a tuple of fixture names to be used. """
+        autousenames = []
+        for baseid, basenames in self._nodeid_and_autousenames:
+            if nodeid.startswith(baseid):
+                if baseid:
+                    i = len(baseid)
+                    nextchar = nodeid[i:i+1]
+                    if nextchar and nextchar not in ":/":
+                        continue
+                autousenames.extend(basenames)
+        # make sure autousenames are sorted by scope, scopenum 0 is session
+        autousenames.sort(
+            key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
+        return autousenames
+
+    def getfixtureclosure(self, fixturenames, parentnode):
+        # collect the closure of all fixtures , starting with the given
+        # fixturenames as the initial set.  As we have to visit all
+        # factory definitions anyway, we also return a arg2fixturedefs
+        # mapping so that the caller can reuse it and does not have
+        # to re-discover fixturedefs again for each fixturename
+        # (discovering matching fixtures for a given name/node is expensive)
+
+        parentid = parentnode.nodeid
+        fixturenames_closure = self._getautousenames(parentid)
+
+        def merge(otherlist):
+            for arg in otherlist:
+                if arg not in fixturenames_closure:
+                    fixturenames_closure.append(arg)
+
+        merge(fixturenames)
+        arg2fixturedefs = {}
+        lastlen = -1
+        while lastlen != len(fixturenames_closure):
+            lastlen = len(fixturenames_closure)
+            for argname in fixturenames_closure:
+                if argname in arg2fixturedefs:
+                    continue
+                fixturedefs = self.getfixturedefs(argname, parentid)
+                if fixturedefs:
+                    arg2fixturedefs[argname] = fixturedefs
+                    merge(fixturedefs[-1].argnames)
+        return fixturenames_closure, arg2fixturedefs
+
+    def pytest_generate_tests(self, metafunc):
+        for argname in metafunc.fixturenames:
+            faclist = metafunc._arg2fixturedefs.get(argname)
+            if faclist:
+                fixturedef = faclist[-1]
+                if fixturedef.params is not None:
+                    func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
+                    # skip directly parametrized arguments
+                    argnames = func_params[0]
+                    if not isinstance(argnames, (tuple, list)):
+                        argnames = [x.strip() for x in argnames.split(",") if x.strip()]
+                    if argname not in func_params and argname not in argnames:
+                        metafunc.parametrize(argname, fixturedef.params,
+                                             indirect=True, scope=fixturedef.scope,
+                                             ids=fixturedef.ids)
+            else:
+                continue  # will raise FixtureLookupError at setup time
+
+    def pytest_collection_modifyitems(self, items):
+        # separate parametrized setups
+        items[:] = reorder_items(items)
+
+    def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
+        if nodeid is not NOTSET:
+            holderobj = node_or_obj
+        else:
+            holderobj = node_or_obj.obj
+            nodeid = node_or_obj.nodeid
+        if holderobj in self._holderobjseen:
+            return
+        self._holderobjseen.add(holderobj)
+        autousenames = []
+        for name in dir(holderobj):
+            obj = getattr(holderobj, name, None)
+            # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
+            # or are "@pytest.fixture" marked
+            marker = getfixturemarker(obj)
+            if marker is None:
+                if not name.startswith(self._argprefix):
+                    continue
+                if not callable(obj):
+                    continue
+                marker = defaultfuncargprefixmarker
+                from _pytest import deprecated
+                self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name))
+                name = name[len(self._argprefix):]
+            elif not isinstance(marker, FixtureFunctionMarker):
+                # magic globals  with __getattr__ might have got us a wrong
+                # fixture attribute
+                continue
+            else:
+                if marker.name:
+                    name = marker.name
+                msg = 'fixtures cannot have "pytest_funcarg__" prefix ' \
+                      'and be decorated with @pytest.fixture:\n%s' % name
+                assert not name.startswith(self._argprefix), msg
+
+            fixture_def = FixtureDef(self, nodeid, name, obj,
+                                     marker.scope, marker.params,
+                                     unittest=unittest, ids=marker.ids)
+
+            faclist = self._arg2fixturedefs.setdefault(name, [])
+            if fixture_def.has_location:
+                faclist.append(fixture_def)
+            else:
+                # fixturedefs with no location are at the front
+                # so this inserts the current fixturedef after the
+                # existing fixturedefs from external plugins but
+                # before the fixturedefs provided in conftests.
+                i = len([f for f in faclist if not f.has_location])
+                faclist.insert(i, fixture_def)
+            if marker.autouse:
+                autousenames.append(name)
+
+        if autousenames:
+            self._nodeid_and_autousenames.append((nodeid or '', autousenames))
+
+    def getfixturedefs(self, argname, nodeid):
+        """
+        Gets a list of fixtures which are applicable to the given node id.
+
+        :param str argname: name of the fixture to search for
+        :param str nodeid: full node id of the requesting test.
+        :return: list[FixtureDef]
+        """
+        try:
+            fixturedefs = self._arg2fixturedefs[argname]
+        except KeyError:
+            return None
+        else:
+            return tuple(self._matchfactories(fixturedefs, nodeid))
+
+    def _matchfactories(self, fixturedefs, nodeid):
+        for fixturedef in fixturedefs:
+            if nodeid.startswith(fixturedef.baseid):
+                yield fixturedef
+
diff --git a/lib/spack/external/_pytest/freeze_support.py b/lib/spack/external/_pytest/freeze_support.py
new file mode 100644
index 0000000000..f78ccd298e
--- /dev/null
+++ b/lib/spack/external/_pytest/freeze_support.py
@@ -0,0 +1,45 @@
+"""
+Provides a function to report all internal modules for using freezing tools
+pytest
+"""
+
+def pytest_namespace():
+    return {'freeze_includes': freeze_includes}
+
+
+def freeze_includes():
+    """
+    Returns a list of module names used by py.test that should be
+    included by cx_freeze.
+    """
+    import py
+    import _pytest
+    result = list(_iter_all_modules(py))
+    result += list(_iter_all_modules(_pytest))
+    return result
+
+
+def _iter_all_modules(package, prefix=''):
+    """
+    Iterates over the names of all modules that can be found in the given
+    package, recursively.
+    Example:
+        _iter_all_modules(_pytest) ->
+            ['_pytest.assertion.newinterpret',
+             '_pytest.capture',
+             '_pytest.core',
+             ...
+            ]
+    """
+    import os
+    import pkgutil
+    if type(package) is not str:
+        path, prefix = package.__path__[0], package.__name__ + '.'
+    else:
+        path = package
+    for _, name, is_package in pkgutil.iter_modules([path]):
+        if is_package:
+            for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
+                yield prefix + m
+        else:
+            yield prefix + name
\ No newline at end of file
diff --git a/lib/spack/external/_pytest/helpconfig.py b/lib/spack/external/_pytest/helpconfig.py
new file mode 100644
index 0000000000..6e66b11c48
--- /dev/null
+++ b/lib/spack/external/_pytest/helpconfig.py
@@ -0,0 +1,144 @@
+""" version info, help messages, tracing configuration.  """
+import py
+import pytest
+import os, sys
+
+def pytest_addoption(parser):
+    group = parser.getgroup('debugconfig')
+    group.addoption('--version', action="store_true",
+            help="display pytest lib version and import information.")
+    group._addoption("-h", "--help", action="store_true", dest="help",
+            help="show help message and configuration info")
+    group._addoption('-p', action="append", dest="plugins", default = [],
+               metavar="name",
+               help="early-load given plugin (multi-allowed). "
+                    "To avoid loading of plugins, use the `no:` prefix, e.g. "
+                    "`no:doctest`.")
+    group.addoption('--traceconfig', '--trace-config',
+               action="store_true", default=False,
+               help="trace considerations of conftest.py files."),
+    group.addoption('--debug',
+               action="store_true", dest="debug", default=False,
+               help="store internal tracing debug information in 'pytestdebug.log'.")
+    group._addoption(
+        '-o', '--override-ini', nargs='*', dest="override_ini",
+        action="append",
+        help="override config option with option=value style, e.g. `-o xfail_strict=True`.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_cmdline_parse():
+    outcome = yield
+    config = outcome.get_result()
+    if config.option.debug:
+        path = os.path.abspath("pytestdebug.log")
+        debugfile = open(path, 'w')
+        debugfile.write("versions pytest-%s, py-%s, "
+                "python-%s\ncwd=%s\nargs=%s\n\n" %(
+            pytest.__version__, py.__version__,
+            ".".join(map(str, sys.version_info)),
+            os.getcwd(), config._origargs))
+        config.trace.root.setwriter(debugfile.write)
+        undo_tracing = config.pluginmanager.enable_tracing()
+        sys.stderr.write("writing pytestdebug information to %s\n" % path)
+
+        def unset_tracing():
+            debugfile.close()
+            sys.stderr.write("wrote pytestdebug information to %s\n" %
+                             debugfile.name)
+            config.trace.root.setwriter(None)
+            undo_tracing()
+
+        config.add_cleanup(unset_tracing)
+
+def pytest_cmdline_main(config):
+    if config.option.version:
+        p = py.path.local(pytest.__file__)
+        sys.stderr.write("This is pytest version %s, imported from %s\n" %
+            (pytest.__version__, p))
+        plugininfo = getpluginversioninfo(config)
+        if plugininfo:
+            for line in plugininfo:
+                sys.stderr.write(line + "\n")
+        return 0
+    elif config.option.help:
+        config._do_configure()
+        showhelp(config)
+        config._ensure_unconfigure()
+        return 0
+
+def showhelp(config):
+    reporter = config.pluginmanager.get_plugin('terminalreporter')
+    tw = reporter._tw
+    tw.write(config._parser.optparser.format_help())
+    tw.line()
+    tw.line()
+    tw.line("[pytest] ini-options in the first "
+            "pytest.ini|tox.ini|setup.cfg file found:")
+    tw.line()
+
+    for name in config._parser._ininames:
+        help, type, default = config._parser._inidict[name]
+        if type is None:
+            type = "string"
+        spec = "%s (%s)" % (name, type)
+        line = "  %-24s %s" %(spec, help)
+        tw.line(line[:tw.fullwidth])
+
+    tw.line()
+    tw.line("environment variables:")
+    vars = [
+        ("PYTEST_ADDOPTS", "extra command line options"),
+        ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
+        ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
+    ]
+    for name, help in vars:
+        tw.line("  %-24s %s" % (name, help))
+    tw.line()
+    tw.line()
+
+    tw.line("to see available markers type: pytest --markers")
+    tw.line("to see available fixtures type: pytest --fixtures")
+    tw.line("(shown according to specified file_or_dir or current dir "
+            "if not specified)")
+
+    for warningreport in reporter.stats.get('warnings', []):
+        tw.line("warning : " + warningreport.message, red=True)
+    return
+
+
+conftest_options = [
+    ('pytest_plugins', 'list of plugin names to load'),
+]
+
+def getpluginversioninfo(config):
+    lines = []
+    plugininfo = config.pluginmanager.list_plugin_distinfo()
+    if plugininfo:
+        lines.append("setuptools registered plugins:")
+        for plugin, dist in plugininfo:
+            loc = getattr(plugin, '__file__', repr(plugin))
+            content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
+            lines.append("  " + content)
+    return lines
+
+def pytest_report_header(config):
+    lines = []
+    if config.option.debug or config.option.traceconfig:
+        lines.append("using: pytest-%s pylib-%s" %
+            (pytest.__version__,py.__version__))
+
+        verinfo = getpluginversioninfo(config)
+        if verinfo:
+            lines.extend(verinfo)
+
+    if config.option.traceconfig:
+        lines.append("active plugins:")
+        items = config.pluginmanager.list_name_plugin()
+        for name, plugin in items:
+            if hasattr(plugin, '__file__'):
+                r = plugin.__file__
+            else:
+                r = repr(plugin)
+            lines.append("    %-20s: %s" %(name, r))
+    return lines
diff --git a/lib/spack/external/_pytest/hookspec.py b/lib/spack/external/_pytest/hookspec.py
new file mode 100644
index 0000000000..b5f51eccf5
--- /dev/null
+++ b/lib/spack/external/_pytest/hookspec.py
@@ -0,0 +1,314 @@
+""" hook specifications for pytest plugins, invoked from main.py and builtin plugins.  """
+
+from _pytest._pluggy import HookspecMarker
+
+hookspec = HookspecMarker("pytest")
+
+# -------------------------------------------------------------------------
+# Initialization hooks called for every plugin
+# -------------------------------------------------------------------------
+
+@hookspec(historic=True)
+def pytest_addhooks(pluginmanager):
+    """called at plugin registration time to allow adding new hooks via a call to
+    pluginmanager.add_hookspecs(module_or_class, prefix)."""
+
+
+@hookspec(historic=True)
+def pytest_namespace():
+    """return dict of name->object to be made globally available in
+    the pytest namespace.  This hook is called at plugin registration
+    time.
+    """
+
+@hookspec(historic=True)
+def pytest_plugin_registered(plugin, manager):
+    """ a new pytest plugin got registered. """
+
+
+@hookspec(historic=True)
+def pytest_addoption(parser):
+    """register argparse-style options and ini-style config values,
+    called once at the beginning of a test run.
+
+    .. note::
+
+        This function should be implemented only in plugins or ``conftest.py``
+        files situated at the tests root directory due to how pytest
+        :ref:`discovers plugins during startup <pluginorder>`.
+
+    :arg parser: To add command line options, call
+        :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
+        To add ini-file values call :py:func:`parser.addini(...)
+        <_pytest.config.Parser.addini>`.
+
+    Options can later be accessed through the
+    :py:class:`config <_pytest.config.Config>` object, respectively:
+
+    - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
+      retrieve the value of a command line option.
+
+    - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
+      a value read from an ini-style file.
+
+    The config object is passed around on many internal objects via the ``.config``
+    attribute or can be retrieved as the ``pytestconfig`` fixture or accessed
+    via (deprecated) ``pytest.config``.
+    """
+
+@hookspec(historic=True)
+def pytest_configure(config):
+    """ called after command line options have been parsed
+    and all plugins and initial conftest files been loaded.
+    This hook is called for every plugin.
+    """
+
+# -------------------------------------------------------------------------
+# Bootstrapping hooks called for plugins registered early enough:
+# internal and 3rd party plugins as well as directly
+# discoverable conftest.py local plugins.
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_cmdline_parse(pluginmanager, args):
+    """return initialized config object, parsing the specified args. """
+
+def pytest_cmdline_preparse(config, args):
+    """(deprecated) modify command line arguments before option parsing. """
+
+@hookspec(firstresult=True)
+def pytest_cmdline_main(config):
+    """ called for performing the main command line action. The default
+    implementation will invoke the configure hooks and runtest_mainloop. """
+
+def pytest_load_initial_conftests(early_config, parser, args):
+    """ implements the loading of initial conftest files ahead
+    of command line option parsing. """
+
+
+# -------------------------------------------------------------------------
+# collection hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_collection(session):
+    """ perform the collection protocol for the given session. """
+
+def pytest_collection_modifyitems(session, config, items):
+    """ called after collection has been performed, may filter or re-order
+    the items in-place."""
+
+def pytest_collection_finish(session):
+    """ called after collection has been performed and modified. """
+
+@hookspec(firstresult=True)
+def pytest_ignore_collect(path, config):
+    """ return True to prevent considering this path for collection.
+    This hook is consulted for all files and directories prior to calling
+    more specific hooks.
+    """
+
+@hookspec(firstresult=True)
+def pytest_collect_directory(path, parent):
+    """ called before traversing a directory for collection files. """
+
+def pytest_collect_file(path, parent):
+    """ return collection Node or None for the given path. Any new node
+    needs to have the specified ``parent`` as a parent."""
+
+# logging hooks for collection
+def pytest_collectstart(collector):
+    """ collector starts collecting. """
+
+def pytest_itemcollected(item):
+    """ we just collected a test item. """
+
+def pytest_collectreport(report):
+    """ collector finished collecting. """
+
+def pytest_deselected(items):
+    """ called for test items deselected by keyword. """
+
+@hookspec(firstresult=True)
+def pytest_make_collect_report(collector):
+    """ perform ``collector.collect()`` and return a CollectReport. """
+
+# -------------------------------------------------------------------------
+# Python test function related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_pycollect_makemodule(path, parent):
+    """ return a Module collector or None for the given path.
+    This hook will be called for each matching test module path.
+    The pytest_collect_file hook needs to be used if you want to
+    create test modules for files that do not match as a test module.
+    """
+
+@hookspec(firstresult=True)
+def pytest_pycollect_makeitem(collector, name, obj):
+    """ return custom item/collector for a python object in a module, or None.  """
+
+@hookspec(firstresult=True)
+def pytest_pyfunc_call(pyfuncitem):
+    """ call underlying test function. """
+
+def pytest_generate_tests(metafunc):
+    """ generate (multiple) parametrized calls to a test function."""
+
+@hookspec(firstresult=True)
+def pytest_make_parametrize_id(config, val):
+    """Return a user-friendly string representation of the given ``val`` that will be used
+    by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
+    """
+
+# -------------------------------------------------------------------------
+# generic runtest related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_runtestloop(session):
+    """ called for performing the main runtest loop
+    (after collection finished). """
+
+def pytest_itemstart(item, node):
+    """ (deprecated, use pytest_runtest_logstart). """
+
+@hookspec(firstresult=True)
+def pytest_runtest_protocol(item, nextitem):
+    """ implements the runtest_setup/call/teardown protocol for
+    the given test item, including capturing exceptions and calling
+    reporting hooks.
+
+    :arg item: test item for which the runtest protocol is performed.
+
+    :arg nextitem: the scheduled-to-be-next test item (or None if this
+                   is the end my friend).  This argument is passed on to
+                   :py:func:`pytest_runtest_teardown`.
+
+    :return boolean: True if no further hook implementations should be invoked.
+    """
+
+def pytest_runtest_logstart(nodeid, location):
+    """ signal the start of running a single test item. """
+
+def pytest_runtest_setup(item):
+    """ called before ``pytest_runtest_call(item)``. """
+
+def pytest_runtest_call(item):
+    """ called to execute the test ``item``. """
+
+def pytest_runtest_teardown(item, nextitem):
+    """ called after ``pytest_runtest_call``.
+
+    :arg nextitem: the scheduled-to-be-next test item (None if no further
+                   test item is scheduled).  This argument can be used to
+                   perform exact teardowns, i.e. calling just enough finalizers
+                   so that nextitem only needs to call setup-functions.
+    """
+
+@hookspec(firstresult=True)
+def pytest_runtest_makereport(item, call):
+    """ return a :py:class:`_pytest.runner.TestReport` object
+    for the given :py:class:`pytest.Item` and
+    :py:class:`_pytest.runner.CallInfo`.
+    """
+
+def pytest_runtest_logreport(report):
+    """ process a test setup/call/teardown report relating to
+    the respective phase of executing a test. """
+
+# -------------------------------------------------------------------------
+# Fixture related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_fixture_setup(fixturedef, request):
+    """ performs fixture setup execution. """
+
+def pytest_fixture_post_finalizer(fixturedef):
+    """ called after fixture teardown, but before the cache is cleared so
+    the fixture result cache ``fixturedef.cached_result`` can
+    still be accessed."""
+
+# -------------------------------------------------------------------------
+# test session related hooks
+# -------------------------------------------------------------------------
+
+def pytest_sessionstart(session):
+    """ before session.main() is called. """
+
+def pytest_sessionfinish(session, exitstatus):
+    """ whole test run finishes. """
+
+def pytest_unconfigure(config):
+    """ called before test process is exited.  """
+
+
+# -------------------------------------------------------------------------
+# hooks for customising the assert methods
+# -------------------------------------------------------------------------
+
+def pytest_assertrepr_compare(config, op, left, right):
+    """return explanation for comparisons in failing assert expressions.
+
+    Return None for no custom explanation, otherwise return a list
+    of strings.  The strings will be joined by newlines but any newlines
+    *in* a string will be escaped.  Note that all but the first line will
+    be indented sligthly, the intention is for the first line to be a summary.
+    """
+
+# -------------------------------------------------------------------------
+# hooks for influencing reporting (invoked from _pytest_terminal)
+# -------------------------------------------------------------------------
+
+def pytest_report_header(config, startdir):
+    """ return a string to be displayed as header info for terminal reporting."""
+
+@hookspec(firstresult=True)
+def pytest_report_teststatus(report):
+    """ return result-category, shortletter and verbose word for reporting."""
+
+def pytest_terminal_summary(terminalreporter, exitstatus):
+    """ add additional section in terminal summary reporting.  """
+
+
+@hookspec(historic=True)
+def pytest_logwarning(message, code, nodeid, fslocation):
+    """ process a warning specified by a message, a code string,
+    a nodeid and fslocation (both of which may be None
+    if the warning is not tied to a partilar node/location)."""
+
+# -------------------------------------------------------------------------
+# doctest hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_doctest_prepare_content(content):
+    """ return processed content for a given doctest"""
+
+# -------------------------------------------------------------------------
+# error handling and internal debugging hooks
+# -------------------------------------------------------------------------
+
+def pytest_internalerror(excrepr, excinfo):
+    """ called for internal errors. """
+
+def pytest_keyboard_interrupt(excinfo):
+    """ called for keyboard interrupt. """
+
+def pytest_exception_interact(node, call, report):
+    """called when an exception was raised which can potentially be
+    interactively handled.
+
+    This hook is only called if an exception was raised
+    that is not an internal exception like ``skip.Exception``.
+    """
+
+def pytest_enter_pdb(config):
+    """ called upon pdb.set_trace(), can be used by plugins to take special
+    action just before the python debugger enters in interactive mode.
+
+    :arg config: pytest config object
+    :type config: _pytest.config.Config
+    """
diff --git a/lib/spack/external/_pytest/junitxml.py b/lib/spack/external/_pytest/junitxml.py
new file mode 100644
index 0000000000..317382e637
--- /dev/null
+++ b/lib/spack/external/_pytest/junitxml.py
@@ -0,0 +1,413 @@
+"""
+    report test results in JUnit-XML format,
+    for use with Jenkins and build integration servers.
+
+
+Based on initial code from Ross Lawley.
+"""
+# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
+# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
+
+import functools
+import py
+import os
+import re
+import sys
+import time
+import pytest
+from _pytest.config import filename_arg
+
+# Python 2.X and 3.X compatibility
+if sys.version_info[0] < 3:
+    from codecs import open
+else:
+    unichr = chr
+    unicode = str
+    long = int
+
+
+class Junit(py.xml.Namespace):
+    pass
+
+
+# We need to get the subset of the invalid unicode ranges according to
+# XML 1.0 which are valid in this python build.  Hence we calculate
+# this dynamically instead of hardcoding it.  The spec range of valid
+# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
+#                    | [#x10000-#x10FFFF]
+_legal_chars = (0x09, 0x0A, 0x0d)
+_legal_ranges = (
+    (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
+)
+_legal_xml_re = [
+    unicode("%s-%s") % (unichr(low), unichr(high))
+    for (low, high) in _legal_ranges if low < sys.maxunicode
+]
+_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
+illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
+del _legal_chars
+del _legal_ranges
+del _legal_xml_re
+
+_py_ext_re = re.compile(r"\.py$")
+
+
+def bin_xml_escape(arg):
+    def repl(matchobj):
+        i = ord(matchobj.group())
+        if i <= 0xFF:
+            return unicode('#x%02X') % i
+        else:
+            return unicode('#x%04X') % i
+
+    return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
+
+
+class _NodeReporter(object):
+    def __init__(self, nodeid, xml):
+
+        self.id = nodeid
+        self.xml = xml
+        self.add_stats = self.xml.add_stats
+        self.duration = 0
+        self.properties = []
+        self.nodes = []
+        self.testcase = None
+        self.attrs = {}
+
+    def append(self, node):
+        self.xml.add_stats(type(node).__name__)
+        self.nodes.append(node)
+
+    def add_property(self, name, value):
+        self.properties.append((str(name), bin_xml_escape(value)))
+
+    def make_properties_node(self):
+        """Return a Junit node containing custom properties, if any.
+        """
+        if self.properties:
+            return Junit.properties([
+                Junit.property(name=name, value=value)
+                for name, value in self.properties
+            ])
+        return ''
+
+    def record_testreport(self, testreport):
+        assert not self.testcase
+        names = mangle_test_address(testreport.nodeid)
+        classnames = names[:-1]
+        if self.xml.prefix:
+            classnames.insert(0, self.xml.prefix)
+        attrs = {
+            "classname": ".".join(classnames),
+            "name": bin_xml_escape(names[-1]),
+            "file": testreport.location[0],
+        }
+        if testreport.location[1] is not None:
+            attrs["line"] = testreport.location[1]
+        self.attrs = attrs
+
+    def to_xml(self):
+        testcase = Junit.testcase(time=self.duration, **self.attrs)
+        testcase.append(self.make_properties_node())
+        for node in self.nodes:
+            testcase.append(node)
+        return testcase
+
+    def _add_simple(self, kind, message, data=None):
+        data = bin_xml_escape(data)
+        node = kind(data, message=message)
+        self.append(node)
+
+    def _write_captured_output(self, report):
+        for capname in ('out', 'err'):
+            content = getattr(report, 'capstd' + capname)
+            if content:
+                tag = getattr(Junit, 'system-' + capname)
+                self.append(tag(bin_xml_escape(content)))
+
+    def append_pass(self, report):
+        self.add_stats('passed')
+        self._write_captured_output(report)
+
+    def append_failure(self, report):
+        # msg = str(report.longrepr.reprtraceback.extraline)
+        if hasattr(report, "wasxfail"):
+            self._add_simple(
+                Junit.skipped,
+                "xfail-marked test passes unexpectedly")
+        else:
+            if hasattr(report.longrepr, "reprcrash"):
+                message = report.longrepr.reprcrash.message
+            elif isinstance(report.longrepr, (unicode, str)):
+                message = report.longrepr
+            else:
+                message = str(report.longrepr)
+            message = bin_xml_escape(message)
+            fail = Junit.failure(message=message)
+            fail.append(bin_xml_escape(report.longrepr))
+            self.append(fail)
+        self._write_captured_output(report)
+
+    def append_collect_error(self, report):
+        # msg = str(report.longrepr.reprtraceback.extraline)
+        self.append(Junit.error(bin_xml_escape(report.longrepr),
+                                message="collection failure"))
+
+    def append_collect_skipped(self, report):
+        self._add_simple(
+            Junit.skipped, "collection skipped", report.longrepr)
+
+    def append_error(self, report):
+        if getattr(report, 'when', None) == 'teardown':
+            msg = "test teardown failure"
+        else:
+            msg = "test setup failure"
+        self._add_simple(
+            Junit.error, msg, report.longrepr)
+        self._write_captured_output(report)
+
+    def append_skipped(self, report):
+        if hasattr(report, "wasxfail"):
+            self._add_simple(
+                Junit.skipped, "expected test failure", report.wasxfail
+            )
+        else:
+            filename, lineno, skipreason = report.longrepr
+            if skipreason.startswith("Skipped: "):
+                skipreason = bin_xml_escape(skipreason[9:])
+            self.append(
+                Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
+                              type="pytest.skip",
+                              message=skipreason))
+        self._write_captured_output(report)
+
+    def finalize(self):
+        data = self.to_xml().unicode(indent=0)
+        self.__dict__.clear()
+        self.to_xml = lambda: py.xml.raw(data)
+
+
+@pytest.fixture
+def record_xml_property(request):
+    """Add extra xml properties to the tag for the calling test.
+    The fixture is callable with ``(name, value)``, with value being automatically
+    xml-encoded.
+    """
+    request.node.warn(
+        code='C3',
+        message='record_xml_property is an experimental feature',
+    )
+    xml = getattr(request.config, "_xml", None)
+    if xml is not None:
+        node_reporter = xml.node_reporter(request.node.nodeid)
+        return node_reporter.add_property
+    else:
+        def add_property_noop(name, value):
+            pass
+
+        return add_property_noop
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting")
+    group.addoption(
+        '--junitxml', '--junit-xml',
+        action="store",
+        dest="xmlpath",
+        metavar="path",
+        type=functools.partial(filename_arg, optname="--junitxml"),
+        default=None,
+        help="create junit-xml style report file at given path.")
+    group.addoption(
+        '--junitprefix', '--junit-prefix',
+        action="store",
+        metavar="str",
+        default=None,
+        help="prepend prefix to classnames in junit-xml output")
+
+
+def pytest_configure(config):
+    xmlpath = config.option.xmlpath
+    # prevent opening xmllog on slave nodes (xdist)
+    if xmlpath and not hasattr(config, 'slaveinput'):
+        config._xml = LogXML(xmlpath, config.option.junitprefix)
+        config.pluginmanager.register(config._xml)
+
+
+def pytest_unconfigure(config):
+    xml = getattr(config, '_xml', None)
+    if xml:
+        del config._xml
+        config.pluginmanager.unregister(xml)
+
+
+def mangle_test_address(address):
+    path, possible_open_bracket, params = address.partition('[')
+    names = path.split("::")
+    try:
+        names.remove('()')
+    except ValueError:
+        pass
+    # convert file path to dotted path
+    names[0] = names[0].replace("/", '.')
+    names[0] = _py_ext_re.sub("", names[0])
+    # put any params back
+    names[-1] += possible_open_bracket + params
+    return names
+
+
+class LogXML(object):
+    def __init__(self, logfile, prefix):
+        logfile = os.path.expanduser(os.path.expandvars(logfile))
+        self.logfile = os.path.normpath(os.path.abspath(logfile))
+        self.prefix = prefix
+        self.stats = dict.fromkeys([
+            'error',
+            'passed',
+            'failure',
+            'skipped',
+        ], 0)
+        self.node_reporters = {}  # nodeid -> _NodeReporter
+        self.node_reporters_ordered = []
+        self.global_properties = []
+
+    def finalize(self, report):
+        nodeid = getattr(report, 'nodeid', report)
+        # local hack to handle xdist report order
+        slavenode = getattr(report, 'node', None)
+        reporter = self.node_reporters.pop((nodeid, slavenode))
+        if reporter is not None:
+            reporter.finalize()
+
+    def node_reporter(self, report):
+        nodeid = getattr(report, 'nodeid', report)
+        # local hack to handle xdist report order
+        slavenode = getattr(report, 'node', None)
+
+        key = nodeid, slavenode
+
+        if key in self.node_reporters:
+            # TODO: breasks for --dist=each
+            return self.node_reporters[key]
+
+        reporter = _NodeReporter(nodeid, self)
+
+        self.node_reporters[key] = reporter
+        self.node_reporters_ordered.append(reporter)
+
+        return reporter
+
+    def add_stats(self, key):
+        if key in self.stats:
+            self.stats[key] += 1
+
+    def _opentestcase(self, report):
+        reporter = self.node_reporter(report)
+        reporter.record_testreport(report)
+        return reporter
+
+    def pytest_runtest_logreport(self, report):
+        """handle a setup/call/teardown report, generating the appropriate
+        xml tags as necessary.
+
+        note: due to plugins like xdist, this hook may be called in interlaced
+        order with reports from other nodes. for example:
+
+        usual call order:
+            -> setup node1
+            -> call node1
+            -> teardown node1
+            -> setup node2
+            -> call node2
+            -> teardown node2
+
+        possible call order in xdist:
+            -> setup node1
+            -> call node1
+            -> setup node2
+            -> call node2
+            -> teardown node2
+            -> teardown node1
+        """
+        if report.passed:
+            if report.when == "call":  # ignore setup/teardown
+                reporter = self._opentestcase(report)
+                reporter.append_pass(report)
+        elif report.failed:
+            reporter = self._opentestcase(report)
+            if report.when == "call":
+                reporter.append_failure(report)
+            else:
+                reporter.append_error(report)
+        elif report.skipped:
+            reporter = self._opentestcase(report)
+            reporter.append_skipped(report)
+        self.update_testcase_duration(report)
+        if report.when == "teardown":
+            self.finalize(report)
+
+    def update_testcase_duration(self, report):
+        """accumulates total duration for nodeid from given report and updates
+        the Junit.testcase with the new total if already created.
+        """
+        reporter = self.node_reporter(report)
+        reporter.duration += getattr(report, 'duration', 0.0)
+
+    def pytest_collectreport(self, report):
+        if not report.passed:
+            reporter = self._opentestcase(report)
+            if report.failed:
+                reporter.append_collect_error(report)
+            else:
+                reporter.append_collect_skipped(report)
+
+    def pytest_internalerror(self, excrepr):
+        reporter = self.node_reporter('internal')
+        reporter.attrs.update(classname="pytest", name='internal')
+        reporter._add_simple(Junit.error, 'internal error', excrepr)
+
+    def pytest_sessionstart(self):
+        self.suite_start_time = time.time()
+
+    def pytest_sessionfinish(self):
+        dirname = os.path.dirname(os.path.abspath(self.logfile))
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
+        logfile = open(self.logfile, 'w', encoding='utf-8')
+        suite_stop_time = time.time()
+        suite_time_delta = suite_stop_time - self.suite_start_time
+
+        numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error']
+
+        logfile.write('<?xml version="1.0" encoding="utf-8"?>')
+
+        logfile.write(Junit.testsuite(
+            self._get_global_properties_node(),
+            [x.to_xml() for x in self.node_reporters_ordered],
+            name="pytest",
+            errors=self.stats['error'],
+            failures=self.stats['failure'],
+            skips=self.stats['skipped'],
+            tests=numtests,
+            time="%.3f" % suite_time_delta, ).unicode(indent=0))
+        logfile.close()
+
+    def pytest_terminal_summary(self, terminalreporter):
+        terminalreporter.write_sep("-",
+                                   "generated xml file: %s" % (self.logfile))
+
+    def add_global_property(self, name, value):
+        self.global_properties.append((str(name), bin_xml_escape(value)))
+
+    def _get_global_properties_node(self):
+        """Return a Junit node containing custom properties, if any.
+        """
+        if self.global_properties:
+            return Junit.properties(
+                    [
+                        Junit.property(name=name, value=value)
+                        for name, value in self.global_properties
+                    ]
+            )
+        return ''
diff --git a/lib/spack/external/_pytest/main.py b/lib/spack/external/_pytest/main.py
new file mode 100644
index 0000000000..52876c12a4
--- /dev/null
+++ b/lib/spack/external/_pytest/main.py
@@ -0,0 +1,762 @@
+""" core implementation of testing process: init, session, runtest loop. """
+import functools
+import os
+import sys
+
+import _pytest
+import _pytest._code
+import py
+import pytest
+try:
+    from collections import MutableMapping as MappingMixin
+except ImportError:
+    from UserDict import DictMixin as MappingMixin
+
+from _pytest.config import directory_arg
+from _pytest.runner import collect_one_node
+
+tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
+
+# exitcodes for the command line
+EXIT_OK = 0
+EXIT_TESTSFAILED = 1
+EXIT_INTERRUPTED = 2
+EXIT_INTERNALERROR = 3
+EXIT_USAGEERROR = 4
+EXIT_NOTESTSCOLLECTED = 5
+
+def pytest_addoption(parser):
+    parser.addini("norecursedirs", "directory patterns to avoid for recursion",
+        type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'])
+    parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
+        type="args", default=[])
+    #parser.addini("dirpatterns",
+    #    "patterns specifying possible locations of test files",
+    #    type="linelist", default=["**/test_*.txt",
+    #            "**/test_*.py", "**/*_test.py"]
+    #)
+    group = parser.getgroup("general", "running and selection options")
+    group._addoption('-x', '--exitfirst', action="store_const",
+               dest="maxfail", const=1,
+               help="exit instantly on first error or failed test."),
+    group._addoption('--maxfail', metavar="num",
+               action="store", type=int, dest="maxfail", default=0,
+               help="exit after first num failures or errors.")
+    group._addoption('--strict', action="store_true",
+               help="run pytest in strict mode, warnings become errors.")
+    group._addoption("-c", metavar="file", type=str, dest="inifilename",
+               help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
+    group._addoption("--continue-on-collection-errors", action="store_true",
+               default=False, dest="continue_on_collection_errors",
+               help="Force test execution even if collection errors occur.")
+
+    group = parser.getgroup("collect", "collection")
+    group.addoption('--collectonly', '--collect-only', action="store_true",
+        help="only collect tests, don't execute them."),
+    group.addoption('--pyargs', action="store_true",
+        help="try to interpret all arguments as python packages.")
+    group.addoption("--ignore", action="append", metavar="path",
+        help="ignore path during collection (multi-allowed).")
+    # when changing this to --conf-cut-dir, config.py Conftest.setinitial
+    # needs upgrading as well
+    group.addoption('--confcutdir', dest="confcutdir", default=None,
+        metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"),
+        help="only load conftest.py's relative to specified dir.")
+    group.addoption('--noconftest', action="store_true",
+        dest="noconftest", default=False,
+        help="Don't load any conftest.py files.")
+    group.addoption('--keepduplicates', '--keep-duplicates', action="store_true",
+        dest="keepduplicates", default=False,
+        help="Keep duplicate tests.")
+
+    group = parser.getgroup("debugconfig",
+        "test session debugging and configuration")
+    group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
+               help="base temporary directory for this test run.")
+
+
+def pytest_namespace():
+    collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
+    return dict(collect=collect)
+
+
+def pytest_configure(config):
+    pytest.config = config # compatibiltiy
+
+
+def wrap_session(config, doit):
+    """Skeleton command line program"""
+    session = Session(config)
+    session.exitstatus = EXIT_OK
+    initstate = 0
+    try:
+        try:
+            config._do_configure()
+            initstate = 1
+            config.hook.pytest_sessionstart(session=session)
+            initstate = 2
+            session.exitstatus = doit(config, session) or 0
+        except pytest.UsageError:
+            raise
+        except KeyboardInterrupt:
+            excinfo = _pytest._code.ExceptionInfo()
+            if initstate < 2 and isinstance(
+                    excinfo.value, pytest.exit.Exception):
+                sys.stderr.write('{0}: {1}\n'.format(
+                    excinfo.typename, excinfo.value.msg))
+            config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
+            session.exitstatus = EXIT_INTERRUPTED
+        except:
+            excinfo = _pytest._code.ExceptionInfo()
+            config.notify_exception(excinfo, config.option)
+            session.exitstatus = EXIT_INTERNALERROR
+            if excinfo.errisinstance(SystemExit):
+                sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
+
+    finally:
+        excinfo = None  # Explicitly break reference cycle.
+        session.startdir.chdir()
+        if initstate >= 2:
+            config.hook.pytest_sessionfinish(
+                session=session,
+                exitstatus=session.exitstatus)
+        config._ensure_unconfigure()
+    return session.exitstatus
+
+def pytest_cmdline_main(config):
+    return wrap_session(config, _main)
+
+def _main(config, session):
+    """ default command line protocol for initialization, session,
+    running tests and reporting. """
+    config.hook.pytest_collection(session=session)
+    config.hook.pytest_runtestloop(session=session)
+
+    if session.testsfailed:
+        return EXIT_TESTSFAILED
+    elif session.testscollected == 0:
+        return EXIT_NOTESTSCOLLECTED
+
+def pytest_collection(session):
+    return session.perform_collect()
+
+def pytest_runtestloop(session):
+    if (session.testsfailed and
+            not session.config.option.continue_on_collection_errors):
+        raise session.Interrupted(
+            "%d errors during collection" % session.testsfailed)
+
+    if session.config.option.collectonly:
+        return True
+
+    for i, item in enumerate(session.items):
+        nextitem = session.items[i+1] if i+1 < len(session.items) else None
+        item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
+        if session.shouldstop:
+            raise session.Interrupted(session.shouldstop)
+    return True
+
+def pytest_ignore_collect(path, config):
+    p = path.dirpath()
+    ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
+    ignore_paths = ignore_paths or []
+    excludeopt = config.getoption("ignore")
+    if excludeopt:
+        ignore_paths.extend([py.path.local(x) for x in excludeopt])
+
+    if path in ignore_paths:
+        return True
+
+    # Skip duplicate paths.
+    keepduplicates = config.getoption("keepduplicates")
+    duplicate_paths = config.pluginmanager._duplicatepaths
+    if not keepduplicates:
+        if path in duplicate_paths:
+            return True
+        else:
+            duplicate_paths.add(path)
+
+    return False
+
+
+class FSHookProxy:
+    def __init__(self, fspath, pm, remove_mods):
+        self.fspath = fspath
+        self.pm = pm
+        self.remove_mods = remove_mods
+
+    def __getattr__(self, name):
+        x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
+        self.__dict__[name] = x
+        return x
+
+def compatproperty(name):
+    def fget(self):
+        import warnings
+        warnings.warn("This usage is deprecated, please use pytest.{0} instead".format(name),
+                      PendingDeprecationWarning, stacklevel=2)
+        return getattr(pytest, name)
+
+    return property(fget)
+
+class NodeKeywords(MappingMixin):
+    def __init__(self, node):
+        self.node = node
+        self.parent = node.parent
+        self._markers = {node.name: True}
+
+    def __getitem__(self, key):
+        try:
+            return self._markers[key]
+        except KeyError:
+            if self.parent is None:
+                raise
+            return self.parent.keywords[key]
+
+    def __setitem__(self, key, value):
+        self._markers[key] = value
+
+    def __delitem__(self, key):
+        raise ValueError("cannot delete key in keywords dict")
+
+    def __iter__(self):
+        seen = set(self._markers)
+        if self.parent is not None:
+            seen.update(self.parent.keywords)
+        return iter(seen)
+
+    def __len__(self):
+        return len(self.__iter__())
+
+    def keys(self):
+        return list(self)
+
+    def __repr__(self):
+        return "<NodeKeywords for node %s>" % (self.node, )
+
+
+class Node(object):
+    """ base class for Collector and Item the test collection tree.
+    Collector subclasses have children, Items are terminal nodes."""
+
+    def __init__(self, name, parent=None, config=None, session=None):
+        #: a unique name within the scope of the parent node
+        self.name = name
+
+        #: the parent collector node.
+        self.parent = parent
+
+        #: the pytest config object
+        self.config = config or parent.config
+
+        #: the session this node is part of
+        self.session = session or parent.session
+
+        #: filesystem path where this node was collected from (can be None)
+        self.fspath = getattr(parent, 'fspath', None)
+
+        #: keywords/markers collected from all scopes
+        self.keywords = NodeKeywords(self)
+
+        #: allow adding of extra keywords to use for matching
+        self.extra_keyword_matches = set()
+
+        # used for storing artificial fixturedefs for direct parametrization
+        self._name2pseudofixturedef = {}
+
+    @property
+    def ihook(self):
+        """ fspath sensitive hook proxy used to call pytest hooks"""
+        return self.session.gethookproxy(self.fspath)
+
+    Module = compatproperty("Module")
+    Class = compatproperty("Class")
+    Instance = compatproperty("Instance")
+    Function = compatproperty("Function")
+    File = compatproperty("File")
+    Item = compatproperty("Item")
+
+    def _getcustomclass(self, name):
+        cls = getattr(self, name)
+        if cls != getattr(pytest, name):
+            py.log._apiwarn("2.0", "use of node.%s is deprecated, "
+                "use pytest_pycollect_makeitem(...) to create custom "
+                "collection nodes" % name)
+        return cls
+
+    def __repr__(self):
+        return "<%s %r>" %(self.__class__.__name__,
+                           getattr(self, 'name', None))
+
+    def warn(self, code, message):
+        """ generate a warning with the given code and message for this
+        item. """
+        assert isinstance(code, str)
+        fslocation = getattr(self, "location", None)
+        if fslocation is None:
+            fslocation = getattr(self, "fspath", None)
+        else:
+            fslocation = "%s:%s" % (fslocation[0], fslocation[1] + 1)
+
+        self.ihook.pytest_logwarning.call_historic(kwargs=dict(
+            code=code, message=message,
+            nodeid=self.nodeid, fslocation=fslocation))
+
+    # methods for ordering nodes
+    @property
+    def nodeid(self):
+        """ a ::-separated string denoting its collection tree address. """
+        try:
+            return self._nodeid
+        except AttributeError:
+            self._nodeid = x = self._makeid()
+            return x
+
+    def _makeid(self):
+        return self.parent.nodeid + "::" + self.name
+
+    def __hash__(self):
+        return hash(self.nodeid)
+
+    def setup(self):
+        pass
+
+    def teardown(self):
+        pass
+
+    def _memoizedcall(self, attrname, function):
+        exattrname = "_ex_" + attrname
+        failure = getattr(self, exattrname, None)
+        if failure is not None:
+            py.builtin._reraise(failure[0], failure[1], failure[2])
+        if hasattr(self, attrname):
+            return getattr(self, attrname)
+        try:
+            res = function()
+        except py.builtin._sysex:
+            raise
+        except:
+            failure = sys.exc_info()
+            setattr(self, exattrname, failure)
+            raise
+        setattr(self, attrname, res)
+        return res
+
+    def listchain(self):
+        """ return list of all parent collectors up to self,
+            starting from root of collection tree. """
+        chain = []
+        item = self
+        while item is not None:
+            chain.append(item)
+            item = item.parent
+        chain.reverse()
+        return chain
+
+    def add_marker(self, marker):
+        """ dynamically add a marker object to the node.
+
+        ``marker`` can be a string or pytest.mark.* instance.
+        """
+        from _pytest.mark import MarkDecorator
+        if isinstance(marker, py.builtin._basestring):
+            marker = MarkDecorator(marker)
+        elif not isinstance(marker, MarkDecorator):
+            raise ValueError("is not a string or pytest.mark.* Marker")
+        self.keywords[marker.name] = marker
+
+    def get_marker(self, name):
+        """ get a marker object from this node or None if
+        the node doesn't have a marker with that name. """
+        val = self.keywords.get(name, None)
+        if val is not None:
+            from _pytest.mark import MarkInfo, MarkDecorator
+            if isinstance(val, (MarkDecorator, MarkInfo)):
+                return val
+
+    def listextrakeywords(self):
+        """ Return a set of all extra keywords in self and any parents."""
+        extra_keywords = set()
+        item = self
+        for item in self.listchain():
+            extra_keywords.update(item.extra_keyword_matches)
+        return extra_keywords
+
+    def listnames(self):
+        return [x.name for x in self.listchain()]
+
+    def addfinalizer(self, fin):
+        """ register a function to be called when this node is finalized.
+
+        This method can only be called when this node is active
+        in a setup chain, for example during self.setup().
+        """
+        self.session._setupstate.addfinalizer(fin, self)
+
+    def getparent(self, cls):
+        """ get the next parent node (including ourself)
+        which is an instance of the given class"""
+        current = self
+        while current and not isinstance(current, cls):
+            current = current.parent
+        return current
+
+    def _prunetraceback(self, excinfo):
+        pass
+
+    def _repr_failure_py(self, excinfo, style=None):
+        fm = self.session._fixturemanager
+        if excinfo.errisinstance(fm.FixtureLookupError):
+            return excinfo.value.formatrepr()
+        tbfilter = True
+        if self.config.option.fulltrace:
+            style="long"
+        else:
+            tb = _pytest._code.Traceback([excinfo.traceback[-1]])
+            self._prunetraceback(excinfo)
+            if len(excinfo.traceback) == 0:
+                excinfo.traceback = tb
+            tbfilter = False  # prunetraceback already does it
+            if style == "auto":
+                style = "long"
+        # XXX should excinfo.getrepr record all data and toterminal() process it?
+        if style is None:
+            if self.config.option.tbstyle == "short":
+                style = "short"
+            else:
+                style = "long"
+
+        try:
+            os.getcwd()
+            abspath = False
+        except OSError:
+            abspath = True
+
+        return excinfo.getrepr(funcargs=True, abspath=abspath,
+                               showlocals=self.config.option.showlocals,
+                               style=style, tbfilter=tbfilter)
+
+    repr_failure = _repr_failure_py
+
+class Collector(Node):
+    """ Collector instances create children through collect()
+        and thus iteratively build a tree.
+    """
+
+    class CollectError(Exception):
+        """ an error during collection, contains a custom message. """
+
+    def collect(self):
+        """ returns a list of children (items and collectors)
+            for this collection node.
+        """
+        raise NotImplementedError("abstract")
+
+    def repr_failure(self, excinfo):
+        """ represent a collection failure. """
+        if excinfo.errisinstance(self.CollectError):
+            exc = excinfo.value
+            return str(exc.args[0])
+        return self._repr_failure_py(excinfo, style="short")
+
+    def _memocollect(self):
+        """ internal helper method to cache results of calling collect(). """
+        return self._memoizedcall('_collected', lambda: list(self.collect()))
+
+    def _prunetraceback(self, excinfo):
+        if hasattr(self, 'fspath'):
+            traceback = excinfo.traceback
+            ntraceback = traceback.cut(path=self.fspath)
+            if ntraceback == traceback:
+                ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
+            excinfo.traceback = ntraceback.filter()
+
+class FSCollector(Collector):
+    def __init__(self, fspath, parent=None, config=None, session=None):
+        fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
+        name = fspath.basename
+        if parent is not None:
+            rel = fspath.relto(parent.fspath)
+            if rel:
+                name = rel
+            name = name.replace(os.sep, "/")
+        super(FSCollector, self).__init__(name, parent, config, session)
+        self.fspath = fspath
+
+    def _makeid(self):
+        relpath = self.fspath.relto(self.config.rootdir)
+        if os.sep != "/":
+            relpath = relpath.replace(os.sep, "/")
+        return relpath
+
+class File(FSCollector):
+    """ base class for collecting tests from a file. """
+
+class Item(Node):
+    """ a basic test invocation item. Note that for a single function
+    there might be multiple test invocation items.
+    """
+    nextitem = None
+
+    def __init__(self, name, parent=None, config=None, session=None):
+        super(Item, self).__init__(name, parent, config, session)
+        self._report_sections = []
+
+    def add_report_section(self, when, key, content):
+        if content:
+            self._report_sections.append((when, key, content))
+
+    def reportinfo(self):
+        return self.fspath, None, ""
+
+    @property
+    def location(self):
+        try:
+            return self._location
+        except AttributeError:
+            location = self.reportinfo()
+            # bestrelpath is a quite slow function
+            cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
+            try:
+                fspath = cache[location[0]]
+            except KeyError:
+                fspath = self.session.fspath.bestrelpath(location[0])
+                cache[location[0]] = fspath
+            location = (fspath, location[1], str(location[2]))
+            self._location = location
+            return location
+
+class NoMatch(Exception):
+    """ raised if matching cannot locate a matching names. """
+
+class Interrupted(KeyboardInterrupt):
+    """ signals an interrupted test run. """
+    __module__ = 'builtins' # for py3
+
+class Session(FSCollector):
+    Interrupted = Interrupted
+
+    def __init__(self, config):
+        FSCollector.__init__(self, config.rootdir, parent=None,
+                             config=config, session=self)
+        self.testsfailed = 0
+        self.testscollected = 0
+        self.shouldstop = False
+        self.trace = config.trace.root.get("collection")
+        self._norecursepatterns = config.getini("norecursedirs")
+        self.startdir = py.path.local()
+        self.config.pluginmanager.register(self, name="session")
+
+    def _makeid(self):
+        return ""
+
+    @pytest.hookimpl(tryfirst=True)
+    def pytest_collectstart(self):
+        if self.shouldstop:
+            raise self.Interrupted(self.shouldstop)
+
+    @pytest.hookimpl(tryfirst=True)
+    def pytest_runtest_logreport(self, report):
+        if report.failed and not hasattr(report, 'wasxfail'):
+            self.testsfailed += 1
+            maxfail = self.config.getvalue("maxfail")
+            if maxfail and self.testsfailed >= maxfail:
+                self.shouldstop = "stopping after %d failures" % (
+                    self.testsfailed)
+    pytest_collectreport = pytest_runtest_logreport
+
+    def isinitpath(self, path):
+        return path in self._initialpaths
+
+    def gethookproxy(self, fspath):
+        # check if we have the common case of running
+        # hooks with all conftest.py filesall conftest.py
+        pm = self.config.pluginmanager
+        my_conftestmodules = pm._getconftestmodules(fspath)
+        remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
+        if remove_mods:
+            # one or more conftests are not in use at this fspath
+            proxy = FSHookProxy(fspath, pm, remove_mods)
+        else:
+            # all plugis are active for this fspath
+            proxy = self.config.hook
+        return proxy
+
+    def perform_collect(self, args=None, genitems=True):
+        hook = self.config.hook
+        try:
+            items = self._perform_collect(args, genitems)
+            hook.pytest_collection_modifyitems(session=self,
+                config=self.config, items=items)
+        finally:
+            hook.pytest_collection_finish(session=self)
+        self.testscollected = len(items)
+        return items
+
+    def _perform_collect(self, args, genitems):
+        if args is None:
+            args = self.config.args
+        self.trace("perform_collect", self, args)
+        self.trace.root.indent += 1
+        self._notfound = []
+        self._initialpaths = set()
+        self._initialparts = []
+        self.items = items = []
+        for arg in args:
+            parts = self._parsearg(arg)
+            self._initialparts.append(parts)
+            self._initialpaths.add(parts[0])
+        rep = collect_one_node(self)
+        self.ihook.pytest_collectreport(report=rep)
+        self.trace.root.indent -= 1
+        if self._notfound:
+            errors = []
+            for arg, exc in self._notfound:
+                line = "(no name %r in any of %r)" % (arg, exc.args[0])
+                errors.append("not found: %s\n%s" % (arg, line))
+                #XXX: test this
+            raise pytest.UsageError(*errors)
+        if not genitems:
+            return rep.result
+        else:
+            if rep.passed:
+                for node in rep.result:
+                    self.items.extend(self.genitems(node))
+            return items
+
+    def collect(self):
+        for parts in self._initialparts:
+            arg = "::".join(map(str, parts))
+            self.trace("processing argument", arg)
+            self.trace.root.indent += 1
+            try:
+                for x in self._collect(arg):
+                    yield x
+            except NoMatch:
+                # we are inside a make_report hook so
+                # we cannot directly pass through the exception
+                self._notfound.append((arg, sys.exc_info()[1]))
+
+            self.trace.root.indent -= 1
+
+    def _collect(self, arg):
+        names = self._parsearg(arg)
+        path = names.pop(0)
+        if path.check(dir=1):
+            assert not names, "invalid arg %r" %(arg,)
+            for path in path.visit(fil=lambda x: x.check(file=1),
+                                   rec=self._recurse, bf=True, sort=True):
+                for x in self._collectfile(path):
+                    yield x
+        else:
+            assert path.check(file=1)
+            for x in self.matchnodes(self._collectfile(path), names):
+                yield x
+
+    def _collectfile(self, path):
+        ihook = self.gethookproxy(path)
+        if not self.isinitpath(path):
+            if ihook.pytest_ignore_collect(path=path, config=self.config):
+                return ()
+        return ihook.pytest_collect_file(path=path, parent=self)
+
+    def _recurse(self, path):
+        ihook = self.gethookproxy(path.dirpath())
+        if ihook.pytest_ignore_collect(path=path, config=self.config):
+            return
+        for pat in self._norecursepatterns:
+            if path.check(fnmatch=pat):
+                return False
+        ihook = self.gethookproxy(path)
+        ihook.pytest_collect_directory(path=path, parent=self)
+        return True
+
+    def _tryconvertpyarg(self, x):
+        """Convert a dotted module name to path.
+
+        """
+        import pkgutil
+        try:
+            loader = pkgutil.find_loader(x)
+        except ImportError:
+            return x
+        if loader is None:
+            return x
+        # This method is sometimes invoked when AssertionRewritingHook, which
+        # does not define a get_filename method, is already in place:
+        try:
+            path = loader.get_filename(x)
+        except AttributeError:
+            # Retrieve path from AssertionRewritingHook:
+            path = loader.modules[x][0].co_filename
+        if loader.is_package(x):
+            path = os.path.dirname(path)
+        return path
+
+    def _parsearg(self, arg):
+        """ return (fspath, names) tuple after checking the file exists. """
+        parts = str(arg).split("::")
+        if self.config.option.pyargs:
+            parts[0] = self._tryconvertpyarg(parts[0])
+        relpath = parts[0].replace("/", os.sep)
+        path = self.config.invocation_dir.join(relpath, abs=True)
+        if not path.check():
+            if self.config.option.pyargs:
+                raise pytest.UsageError("file or package not found: " + arg + " (missing __init__.py?)")
+            else:
+                raise pytest.UsageError("file not found: " + arg)
+        parts[0] = path
+        return parts
+
+    def matchnodes(self, matching, names):
+        self.trace("matchnodes", matching, names)
+        self.trace.root.indent += 1
+        nodes = self._matchnodes(matching, names)
+        num = len(nodes)
+        self.trace("matchnodes finished -> ", num, "nodes")
+        self.trace.root.indent -= 1
+        if num == 0:
+            raise NoMatch(matching, names[:1])
+        return nodes
+
+    def _matchnodes(self, matching, names):
+        if not matching or not names:
+            return matching
+        name = names[0]
+        assert name
+        nextnames = names[1:]
+        resultnodes = []
+        for node in matching:
+            if isinstance(node, pytest.Item):
+                if not names:
+                    resultnodes.append(node)
+                continue
+            assert isinstance(node, pytest.Collector)
+            rep = collect_one_node(node)
+            if rep.passed:
+                has_matched = False
+                for x in rep.result:
+                    # TODO: remove parametrized workaround once collection structure contains parametrization
+                    if x.name == name or x.name.split("[")[0] == name:
+                        resultnodes.extend(self.matchnodes([x], nextnames))
+                        has_matched = True
+                # XXX accept IDs that don't have "()" for class instances
+                if not has_matched and len(rep.result) == 1 and x.name == "()":
+                    nextnames.insert(0, name)
+                    resultnodes.extend(self.matchnodes([x], nextnames))
+            node.ihook.pytest_collectreport(report=rep)
+        return resultnodes
+
+    def genitems(self, node):
+        self.trace("genitems", node)
+        if isinstance(node, pytest.Item):
+            node.ihook.pytest_itemcollected(item=node)
+            yield node
+        else:
+            assert isinstance(node, pytest.Collector)
+            rep = collect_one_node(node)
+            if rep.passed:
+                for subnode in rep.result:
+                    for x in self.genitems(subnode):
+                        yield x
+            node.ihook.pytest_collectreport(report=rep)
diff --git a/lib/spack/external/_pytest/mark.py b/lib/spack/external/_pytest/mark.py
new file mode 100644
index 0000000000..357a60492e
--- /dev/null
+++ b/lib/spack/external/_pytest/mark.py
@@ -0,0 +1,328 @@
+""" generic mechanism for marking and selecting python functions. """
+import inspect
+
+
+class MarkerError(Exception):
+
+    """Error in use of a pytest marker/attribute."""
+
+
+def pytest_namespace():
+    return {'mark': MarkGenerator()}
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group._addoption(
+        '-k',
+        action="store", dest="keyword", default='', metavar="EXPRESSION",
+        help="only run tests which match the given substring expression. "
+             "An expression is a python evaluatable expression "
+             "where all names are substring-matched against test names "
+             "and their parent classes. Example: -k 'test_method or test_"
+             "other' matches all test functions and classes whose name "
+             "contains 'test_method' or 'test_other'. "
+             "Additionally keywords are matched to classes and functions "
+             "containing extra names in their 'extra_keyword_matches' set, "
+             "as well as functions which have names assigned directly to them."
+    )
+
+    group._addoption(
+        "-m",
+        action="store", dest="markexpr", default="", metavar="MARKEXPR",
+        help="only run tests matching given mark expression.  "
+             "example: -m 'mark1 and not mark2'."
+    )
+
+    group.addoption(
+        "--markers", action="store_true",
+        help="show markers (builtin, plugin and per-project ones)."
+    )
+
+    parser.addini("markers", "markers for test functions", 'linelist')
+
+
+def pytest_cmdline_main(config):
+    import _pytest.config
+    if config.option.markers:
+        config._do_configure()
+        tw = _pytest.config.create_terminal_writer(config)
+        for line in config.getini("markers"):
+            name, rest = line.split(":", 1)
+            tw.write("@pytest.mark.%s:" % name, bold=True)
+            tw.line(rest)
+            tw.line()
+        config._ensure_unconfigure()
+        return 0
+
+
+pytest_cmdline_main.tryfirst = True
+
+
+def pytest_collection_modifyitems(items, config):
+    keywordexpr = config.option.keyword.lstrip()
+    matchexpr = config.option.markexpr
+    if not keywordexpr and not matchexpr:
+        return
+    # pytest used to allow "-" for negating
+    # but today we just allow "-" at the beginning, use "not" instead
+    # we probably remove "-" alltogether soon
+    if keywordexpr.startswith("-"):
+        keywordexpr = "not " + keywordexpr[1:]
+    selectuntil = False
+    if keywordexpr[-1:] == ":":
+        selectuntil = True
+        keywordexpr = keywordexpr[:-1]
+
+    remaining = []
+    deselected = []
+    for colitem in items:
+        if keywordexpr and not matchkeyword(colitem, keywordexpr):
+            deselected.append(colitem)
+        else:
+            if selectuntil:
+                keywordexpr = None
+            if matchexpr:
+                if not matchmark(colitem, matchexpr):
+                    deselected.append(colitem)
+                    continue
+            remaining.append(colitem)
+
+    if deselected:
+        config.hook.pytest_deselected(items=deselected)
+        items[:] = remaining
+
+
+class MarkMapping:
+    """Provides a local mapping for markers where item access
+    resolves to True if the marker is present. """
+    def __init__(self, keywords):
+        mymarks = set()
+        for key, value in keywords.items():
+            if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
+                mymarks.add(key)
+        self._mymarks = mymarks
+
+    def __getitem__(self, name):
+        return name in self._mymarks
+
+
+class KeywordMapping:
+    """Provides a local mapping for keywords.
+    Given a list of names, map any substring of one of these names to True.
+    """
+    def __init__(self, names):
+        self._names = names
+
+    def __getitem__(self, subname):
+        for name in self._names:
+            if subname in name:
+                return True
+        return False
+
+
+def matchmark(colitem, markexpr):
+    """Tries to match on any marker names, attached to the given colitem."""
+    return eval(markexpr, {}, MarkMapping(colitem.keywords))
+
+
+def matchkeyword(colitem, keywordexpr):
+    """Tries to match given keyword expression to given collector item.
+
+    Will match on the name of colitem, including the names of its parents.
+    Only matches names of items which are either a :class:`Class` or a
+    :class:`Function`.
+    Additionally, matches on names in the 'extra_keyword_matches' set of
+    any item, as well as names directly assigned to test functions.
+    """
+    mapped_names = set()
+
+    # Add the names of the current item and any parent items
+    import pytest
+    for item in colitem.listchain():
+        if not isinstance(item, pytest.Instance):
+            mapped_names.add(item.name)
+
+    # Add the names added as extra keywords to current or parent items
+    for name in colitem.listextrakeywords():
+        mapped_names.add(name)
+
+    # Add the names attached to the current function through direct assignment
+    if hasattr(colitem, 'function'):
+        for name in colitem.function.__dict__:
+            mapped_names.add(name)
+
+    mapping = KeywordMapping(mapped_names)
+    if " " not in keywordexpr:
+        # special case to allow for simple "-k pass" and "-k 1.3"
+        return mapping[keywordexpr]
+    elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
+        return not mapping[keywordexpr[4:]]
+    return eval(keywordexpr, {}, mapping)
+
+
+def pytest_configure(config):
+    import pytest
+    if config.option.strict:
+        pytest.mark._config = config
+
+
+class MarkGenerator:
+    """ Factory for :class:`MarkDecorator` objects - exposed as
+    a ``pytest.mark`` singleton instance.  Example::
+
+         import pytest
+         @pytest.mark.slowtest
+         def test_function():
+            pass
+
+    will set a 'slowtest' :class:`MarkInfo` object
+    on the ``test_function`` object. """
+
+    def __getattr__(self, name):
+        if name[0] == "_":
+            raise AttributeError("Marker name must NOT start with underscore")
+        if hasattr(self, '_config'):
+            self._check(name)
+        return MarkDecorator(name)
+
+    def _check(self, name):
+        try:
+            if name in self._markers:
+                return
+        except AttributeError:
+            pass
+        self._markers = l = set()
+        for line in self._config.getini("markers"):
+            beginning = line.split(":", 1)
+            x = beginning[0].split("(", 1)[0]
+            l.add(x)
+        if name not in self._markers:
+            raise AttributeError("%r not a registered marker" % (name,))
+
+def istestfunc(func):
+    return hasattr(func, "__call__") and \
+        getattr(func, "__name__", "<lambda>") != "<lambda>"
+
+class MarkDecorator:
+    """ A decorator for test functions and test classes.  When applied
+    it will create :class:`MarkInfo` objects which may be
+    :ref:`retrieved by hooks as item keywords <excontrolskip>`.
+    MarkDecorator instances are often created like this::
+
+        mark1 = pytest.mark.NAME              # simple MarkDecorator
+        mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
+
+    and can then be applied as decorators to test functions::
+
+        @mark2
+        def test_function():
+            pass
+
+    When a MarkDecorator instance is called it does the following:
+      1. If called with a single class as its only positional argument and no
+         additional keyword arguments, it attaches itself to the class so it
+         gets applied automatically to all test cases found in that class.
+      2. If called with a single function as its only positional argument and
+         no additional keyword arguments, it attaches a MarkInfo object to the
+         function, containing all the arguments already stored internally in
+         the MarkDecorator.
+      3. When called in any other case, it performs a 'fake construction' call,
+         i.e. it returns a new MarkDecorator instance with the original
+         MarkDecorator's content updated with the arguments passed to this
+         call.
+
+    Note: The rules above prevent MarkDecorator objects from storing only a
+    single function or class reference as their positional argument with no
+    additional keyword or positional arguments.
+
+    """
+    def __init__(self, name, args=None, kwargs=None):
+        self.name = name
+        self.args = args or ()
+        self.kwargs = kwargs or {}
+
+    @property
+    def markname(self):
+        return self.name # for backward-compat (2.4.1 had this attr)
+
+    def __repr__(self):
+        d = self.__dict__.copy()
+        name = d.pop('name')
+        return "<MarkDecorator %r %r>" % (name, d)
+
+    def __call__(self, *args, **kwargs):
+        """ if passed a single callable argument: decorate it with mark info.
+            otherwise add *args/**kwargs in-place to mark information. """
+        if args and not kwargs:
+            func = args[0]
+            is_class = inspect.isclass(func)
+            if len(args) == 1 and (istestfunc(func) or is_class):
+                if is_class:
+                    if hasattr(func, 'pytestmark'):
+                        mark_list = func.pytestmark
+                        if not isinstance(mark_list, list):
+                            mark_list = [mark_list]
+                        # always work on a copy to avoid updating pytestmark
+                        # from a superclass by accident
+                        mark_list = mark_list + [self]
+                        func.pytestmark = mark_list
+                    else:
+                        func.pytestmark = [self]
+                else:
+                    holder = getattr(func, self.name, None)
+                    if holder is None:
+                        holder = MarkInfo(
+                            self.name, self.args, self.kwargs
+                        )
+                        setattr(func, self.name, holder)
+                    else:
+                        holder.add(self.args, self.kwargs)
+                return func
+        kw = self.kwargs.copy()
+        kw.update(kwargs)
+        args = self.args + args
+        return self.__class__(self.name, args=args, kwargs=kw)
+
+
+def extract_argvalue(maybe_marked_args):
+    # TODO: incorrect mark data, the old code wanst able to collect lists
+    # individual parametrized argument sets can be wrapped in a series
+    # of markers in which case we unwrap the values and apply the mark
+    # at Function init
+    newmarks = {}
+    argval = maybe_marked_args
+    while isinstance(argval, MarkDecorator):
+        newmark = MarkDecorator(argval.markname,
+                                argval.args[:-1], argval.kwargs)
+        newmarks[newmark.markname] = newmark
+        argval = argval.args[-1]
+    return argval, newmarks
+
+
+class MarkInfo:
+    """ Marking object created by :class:`MarkDecorator` instances. """
+    def __init__(self, name, args, kwargs):
+        #: name of attribute
+        self.name = name
+        #: positional argument list, empty if none specified
+        self.args = args
+        #: keyword argument dictionary, empty if nothing specified
+        self.kwargs = kwargs.copy()
+        self._arglist = [(args, kwargs.copy())]
+
+    def __repr__(self):
+        return "<MarkInfo %r args=%r kwargs=%r>" % (
+            self.name, self.args, self.kwargs
+        )
+
+    def add(self, args, kwargs):
+        """ add a MarkInfo with the given args and kwargs. """
+        self._arglist.append((args, kwargs))
+        self.args += args
+        self.kwargs.update(kwargs)
+
+    def __iter__(self):
+        """ yield MarkInfo objects each relating to a marking-call. """
+        for args, kwargs in self._arglist:
+            yield MarkInfo(self.name, args, kwargs)
diff --git a/lib/spack/external/_pytest/monkeypatch.py b/lib/spack/external/_pytest/monkeypatch.py
new file mode 100644
index 0000000000..852e72beda
--- /dev/null
+++ b/lib/spack/external/_pytest/monkeypatch.py
@@ -0,0 +1,258 @@
+""" monkeypatching and mocking functionality.  """
+
+import os, sys
+import re
+
+from py.builtin import _basestring
+
+import pytest
+
+RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
+
+
+@pytest.fixture
+def monkeypatch(request):
+    """The returned ``monkeypatch`` fixture provides these
+    helper methods to modify objects, dictionaries or os.environ::
+
+        monkeypatch.setattr(obj, name, value, raising=True)
+        monkeypatch.delattr(obj, name, raising=True)
+        monkeypatch.setitem(mapping, name, value)
+        monkeypatch.delitem(obj, name, raising=True)
+        monkeypatch.setenv(name, value, prepend=False)
+        monkeypatch.delenv(name, value, raising=True)
+        monkeypatch.syspath_prepend(path)
+        monkeypatch.chdir(path)
+
+    All modifications will be undone after the requesting
+    test function or fixture has finished. The ``raising``
+    parameter determines if a KeyError or AttributeError
+    will be raised if the set/deletion operation has no target.
+    """
+    mpatch = MonkeyPatch()
+    request.addfinalizer(mpatch.undo)
+    return mpatch
+
+
+def resolve(name):
+    # simplified from zope.dottedname
+    parts = name.split('.')
+
+    used = parts.pop(0)
+    found = __import__(used)
+    for part in parts:
+        used += '.' + part
+        try:
+            found = getattr(found, part)
+        except AttributeError:
+            pass
+        else:
+            continue
+        # we use explicit un-nesting of the handling block in order
+        # to avoid nested exceptions on python 3
+        try:
+            __import__(used)
+        except ImportError as ex:
+            # str is used for py2 vs py3
+            expected = str(ex).split()[-1]
+            if expected == used:
+                raise
+            else:
+                raise ImportError(
+                    'import error in %s: %s' % (used, ex)
+                )
+        found = annotated_getattr(found, part, used)
+    return found
+
+
+def annotated_getattr(obj, name, ann):
+    try:
+        obj = getattr(obj, name)
+    except AttributeError:
+        raise AttributeError(
+                '%r object at %s has no attribute %r' % (
+                    type(obj).__name__, ann, name
+                )
+        )
+    return obj
+
+
+def derive_importpath(import_path, raising):
+    if not isinstance(import_path, _basestring) or "." not in import_path:
+        raise TypeError("must be absolute import path string, not %r" %
+                        (import_path,))
+    module, attr = import_path.rsplit('.', 1)
+    target = resolve(module)
+    if raising:
+        annotated_getattr(target, attr, ann=module)
+    return attr, target
+
+
+class Notset:
+    def __repr__(self):
+        return "<notset>"
+
+
+notset = Notset()
+
+
+class MonkeyPatch:
+    """ Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
+    """
+
+    def __init__(self):
+        self._setattr = []
+        self._setitem = []
+        self._cwd = None
+        self._savesyspath = None
+
+    def setattr(self, target, name, value=notset, raising=True):
+        """ Set attribute value on target, memorizing the old value.
+        By default raise AttributeError if the attribute did not exist.
+
+        For convenience you can specify a string as ``target`` which
+        will be interpreted as a dotted import path, with the last part
+        being the attribute name.  Example:
+        ``monkeypatch.setattr("os.getcwd", lambda x: "/")``
+        would set the ``getcwd`` function of the ``os`` module.
+
+        The ``raising`` value determines if the setattr should fail
+        if the attribute is not already present (defaults to True
+        which means it will raise).
+        """
+        __tracebackhide__ = True
+        import inspect
+
+        if value is notset:
+            if not isinstance(target, _basestring):
+                raise TypeError("use setattr(target, name, value) or "
+                                "setattr(target, value) with target being a dotted "
+                                "import string")
+            value = name
+            name, target = derive_importpath(target, raising)
+
+        oldval = getattr(target, name, notset)
+        if raising and oldval is notset:
+            raise AttributeError("%r has no attribute %r" % (target, name))
+
+        # avoid class descriptors like staticmethod/classmethod
+        if inspect.isclass(target):
+            oldval = target.__dict__.get(name, notset)
+        self._setattr.append((target, name, oldval))
+        setattr(target, name, value)
+
+    def delattr(self, target, name=notset, raising=True):
+        """ Delete attribute ``name`` from ``target``, by default raise
+        AttributeError it the attribute did not previously exist.
+
+        If no ``name`` is specified and ``target`` is a string
+        it will be interpreted as a dotted import path with the
+        last part being the attribute name.
+
+        If ``raising`` is set to False, no exception will be raised if the
+        attribute is missing.
+        """
+        __tracebackhide__ = True
+        if name is notset:
+            if not isinstance(target, _basestring):
+                raise TypeError("use delattr(target, name) or "
+                                "delattr(target) with target being a dotted "
+                                "import string")
+            name, target = derive_importpath(target, raising)
+
+        if not hasattr(target, name):
+            if raising:
+                raise AttributeError(name)
+        else:
+            self._setattr.append((target, name, getattr(target, name, notset)))
+            delattr(target, name)
+
+    def setitem(self, dic, name, value):
+        """ Set dictionary entry ``name`` to value. """
+        self._setitem.append((dic, name, dic.get(name, notset)))
+        dic[name] = value
+
+    def delitem(self, dic, name, raising=True):
+        """ Delete ``name`` from dict. Raise KeyError if it doesn't exist.
+
+        If ``raising`` is set to False, no exception will be raised if the
+        key is missing.
+        """
+        if name not in dic:
+            if raising:
+                raise KeyError(name)
+        else:
+            self._setitem.append((dic, name, dic.get(name, notset)))
+            del dic[name]
+
+    def setenv(self, name, value, prepend=None):
+        """ Set environment variable ``name`` to ``value``.  If ``prepend``
+        is a character, read the current environment variable value
+        and prepend the ``value`` adjoined with the ``prepend`` character."""
+        value = str(value)
+        if prepend and name in os.environ:
+            value = value + prepend + os.environ[name]
+        self.setitem(os.environ, name, value)
+
+    def delenv(self, name, raising=True):
+        """ Delete ``name`` from the environment. Raise KeyError it does not
+        exist.
+
+        If ``raising`` is set to False, no exception will be raised if the
+        environment variable is missing.
+        """
+        self.delitem(os.environ, name, raising=raising)
+
+    def syspath_prepend(self, path):
+        """ Prepend ``path`` to ``sys.path`` list of import locations. """
+        if self._savesyspath is None:
+            self._savesyspath = sys.path[:]
+        sys.path.insert(0, str(path))
+
+    def chdir(self, path):
+        """ Change the current working directory to the specified path.
+        Path can be a string or a py.path.local object.
+        """
+        if self._cwd is None:
+            self._cwd = os.getcwd()
+        if hasattr(path, "chdir"):
+            path.chdir()
+        else:
+            os.chdir(path)
+
+    def undo(self):
+        """ Undo previous changes.  This call consumes the
+        undo stack. Calling it a second time has no effect unless
+        you do more monkeypatching after the undo call.
+
+        There is generally no need to call `undo()`, since it is
+        called automatically during tear-down.
+
+        Note that the same `monkeypatch` fixture is used across a
+        single test function invocation. If `monkeypatch` is used both by
+        the test function itself and one of the test fixtures,
+        calling `undo()` will undo all of the changes made in
+        both functions.
+        """
+        for obj, name, value in reversed(self._setattr):
+            if value is not notset:
+                setattr(obj, name, value)
+            else:
+                delattr(obj, name)
+        self._setattr[:] = []
+        for dictionary, name, value in reversed(self._setitem):
+            if value is notset:
+                try:
+                    del dictionary[name]
+                except KeyError:
+                    pass  # was already deleted, so we have the desired state
+            else:
+                dictionary[name] = value
+        self._setitem[:] = []
+        if self._savesyspath is not None:
+            sys.path[:] = self._savesyspath
+            self._savesyspath = None
+
+        if self._cwd is not None:
+            os.chdir(self._cwd)
+            self._cwd = None
diff --git a/lib/spack/external/_pytest/nose.py b/lib/spack/external/_pytest/nose.py
new file mode 100644
index 0000000000..0387468686
--- /dev/null
+++ b/lib/spack/external/_pytest/nose.py
@@ -0,0 +1,71 @@
+""" run test suites written for nose. """
+
+import sys
+
+import py
+import pytest
+from _pytest import unittest
+
+
+def get_skip_exceptions():
+    skip_classes = set()
+    for module_name in ('unittest', 'unittest2', 'nose'):
+        mod = sys.modules.get(module_name)
+        if hasattr(mod, 'SkipTest'):
+            skip_classes.add(mod.SkipTest)
+    return tuple(skip_classes)
+
+
+def pytest_runtest_makereport(item, call):
+    if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
+        # let's substitute the excinfo with a pytest.skip one
+        call2 = call.__class__(lambda:
+                    pytest.skip(str(call.excinfo.value)), call.when)
+        call.excinfo = call2.excinfo
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_runtest_setup(item):
+    if is_potential_nosetest(item):
+        if isinstance(item.parent, pytest.Generator):
+            gen = item.parent
+            if not hasattr(gen, '_nosegensetup'):
+                call_optional(gen.obj, 'setup')
+                if isinstance(gen.parent, pytest.Instance):
+                    call_optional(gen.parent.obj, 'setup')
+                gen._nosegensetup = True
+        if not call_optional(item.obj, 'setup'):
+            # call module level setup if there is no object level one
+            call_optional(item.parent.obj, 'setup')
+        #XXX this implies we only call teardown when setup worked
+        item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
+
+def teardown_nose(item):
+    if is_potential_nosetest(item):
+        if not call_optional(item.obj, 'teardown'):
+            call_optional(item.parent.obj, 'teardown')
+        #if hasattr(item.parent, '_nosegensetup'):
+        #    #call_optional(item._nosegensetup, 'teardown')
+        #    del item.parent._nosegensetup
+
+
+def pytest_make_collect_report(collector):
+    if isinstance(collector, pytest.Generator):
+        call_optional(collector.obj, 'setup')
+
+
+def is_potential_nosetest(item):
+    # extra check needed since we do not do nose style setup/teardown
+    # on direct unittest style classes
+    return isinstance(item, pytest.Function) and \
+        not isinstance(item, unittest.TestCaseFunction)
+
+
+def call_optional(obj, name):
+    method = getattr(obj, name, None)
+    isfixture = hasattr(method, "_pytestfixturefunction")
+    if method is not None and not isfixture and py.builtin.callable(method):
+        # If there's any problems allow the exception to raise rather than
+        # silently ignoring them
+        method()
+        return True
diff --git a/lib/spack/external/_pytest/pastebin.py b/lib/spack/external/_pytest/pastebin.py
new file mode 100644
index 0000000000..9f1cf90637
--- /dev/null
+++ b/lib/spack/external/_pytest/pastebin.py
@@ -0,0 +1,98 @@
+""" submit failure or test session information to a pastebin service. """
+import pytest
+import sys
+import tempfile
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting")
+    group._addoption('--pastebin', metavar="mode",
+        action='store', dest="pastebin", default=None,
+        choices=['failed', 'all'],
+        help="send failed|all info to bpaste.net pastebin service.")
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_configure(config):
+    import py
+    if config.option.pastebin == "all":
+        tr = config.pluginmanager.getplugin('terminalreporter')
+        # if no terminal reporter plugin is present, nothing we can do here;
+        # this can happen when this function executes in a slave node
+        # when using pytest-xdist, for example
+        if tr is not None:
+            # pastebin file will be utf-8 encoded binary file
+            config._pastebinfile = tempfile.TemporaryFile('w+b')
+            oldwrite = tr._tw.write
+
+            def tee_write(s, **kwargs):
+                oldwrite(s, **kwargs)
+                if py.builtin._istext(s):
+                    s = s.encode('utf-8')
+                config._pastebinfile.write(s)
+
+            tr._tw.write = tee_write
+
+
+def pytest_unconfigure(config):
+    if hasattr(config, '_pastebinfile'):
+        # get terminal contents and delete file
+        config._pastebinfile.seek(0)
+        sessionlog = config._pastebinfile.read()
+        config._pastebinfile.close()
+        del config._pastebinfile
+        # undo our patching in the terminal reporter
+        tr = config.pluginmanager.getplugin('terminalreporter')
+        del tr._tw.__dict__['write']
+        # write summary
+        tr.write_sep("=", "Sending information to Paste Service")
+        pastebinurl = create_new_paste(sessionlog)
+        tr.write_line("pastebin session-log: %s\n" % pastebinurl)
+
+
+def create_new_paste(contents):
+    """
+    Creates a new paste using bpaste.net service.
+
+    :contents: paste contents as utf-8 encoded bytes
+    :returns: url to the pasted contents
+    """
+    import re
+    if sys.version_info < (3, 0):
+        from urllib import urlopen, urlencode
+    else:
+        from urllib.request import urlopen
+        from urllib.parse import urlencode
+
+    params = {
+        'code': contents,
+        'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
+        'expiry': '1week',
+    }
+    url = 'https://bpaste.net'
+    response = urlopen(url, data=urlencode(params).encode('ascii')).read()
+    m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
+    if m:
+        return '%s/show/%s' % (url, m.group(1))
+    else:
+        return 'bad response: ' + response
+
+
+def pytest_terminal_summary(terminalreporter):
+    import _pytest.config
+    if terminalreporter.config.option.pastebin != "failed":
+        return
+    tr = terminalreporter
+    if 'failed' in tr.stats:
+        terminalreporter.write_sep("=", "Sending information to Paste Service")
+        for rep in terminalreporter.stats.get('failed'):
+            try:
+                msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
+            except AttributeError:
+                msg = tr._getfailureheadline(rep)
+            tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
+            rep.toterminal(tw)
+            s = tw.stringio.getvalue()
+            assert len(s)
+            pastebinurl = create_new_paste(s)
+            tr.write_line("%s --> %s" %(msg, pastebinurl))
diff --git a/lib/spack/external/_pytest/pytester.py b/lib/spack/external/_pytest/pytester.py
new file mode 100644
index 0000000000..17ff529a6c
--- /dev/null
+++ b/lib/spack/external/_pytest/pytester.py
@@ -0,0 +1,1139 @@
+""" (disabled by default) support for testing pytest and pytest plugins. """
+import codecs
+import gc
+import os
+import platform
+import re
+import subprocess
+import sys
+import time
+import traceback
+from fnmatch import fnmatch
+
+from py.builtin import print_
+
+from _pytest._code import Source
+import py
+import pytest
+from _pytest.main import Session, EXIT_OK
+from _pytest.assertion.rewrite import AssertionRewritingHook
+
+
+def pytest_addoption(parser):
+    # group = parser.getgroup("pytester", "pytester (self-tests) options")
+    parser.addoption('--lsof',
+           action="store_true", dest="lsof", default=False,
+           help=("run FD checks if lsof is available"))
+
+    parser.addoption('--runpytest', default="inprocess", dest="runpytest",
+           choices=("inprocess", "subprocess", ),
+           help=("run pytest sub runs in tests using an 'inprocess' "
+                 "or 'subprocess' (python -m main) method"))
+
+
+def pytest_configure(config):
+    # This might be called multiple times. Only take the first.
+    global _pytest_fullpath
+    try:
+        _pytest_fullpath
+    except NameError:
+        _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+        _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
+
+    if config.getvalue("lsof"):
+        checker = LsofFdLeakChecker()
+        if checker.matching_platform():
+            config.pluginmanager.register(checker)
+
+
+class LsofFdLeakChecker(object):
+    def get_open_files(self):
+        out = self._exec_lsof()
+        open_files = self._parse_lsof_output(out)
+        return open_files
+
+    def _exec_lsof(self):
+        pid = os.getpid()
+        return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
+
+    def _parse_lsof_output(self, out):
+        def isopen(line):
+            return line.startswith('f') and ("deleted" not in line and
+                'mem' not in line and "txt" not in line and 'cwd' not in line)
+
+        open_files = []
+
+        for line in out.split("\n"):
+            if isopen(line):
+                fields = line.split('\0')
+                fd = fields[0][1:]
+                filename = fields[1][1:]
+                if filename.startswith('/'):
+                    open_files.append((fd, filename))
+
+        return open_files
+
+    def matching_platform(self):
+        try:
+            py.process.cmdexec("lsof -v")
+        except (py.process.cmdexec.Error, UnicodeDecodeError):
+            # cmdexec may raise UnicodeDecodeError on Windows systems
+            # with locale other than english:
+            # https://bitbucket.org/pytest-dev/py/issues/66
+            return False
+        else:
+            return True
+
+    @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+    def pytest_runtest_item(self, item):
+        lines1 = self.get_open_files()
+        yield
+        if hasattr(sys, "pypy_version_info"):
+            gc.collect()
+        lines2 = self.get_open_files()
+
+        new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
+        leaked_files = [t for t in lines2 if t[0] in new_fds]
+        if leaked_files:
+            error = []
+            error.append("***** %s FD leakage detected" % len(leaked_files))
+            error.extend([str(f) for f in leaked_files])
+            error.append("*** Before:")
+            error.extend([str(f) for f in lines1])
+            error.append("*** After:")
+            error.extend([str(f) for f in lines2])
+            error.append(error[0])
+            error.append("*** function %s:%s: %s " % item.location)
+            pytest.fail("\n".join(error), pytrace=False)
+
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+    'python2.7': r'C:\Python27\python.exe',
+    'python2.6': r'C:\Python26\python.exe',
+    'python3.1': r'C:\Python31\python.exe',
+    'python3.2': r'C:\Python32\python.exe',
+    'python3.3': r'C:\Python33\python.exe',
+    'python3.4': r'C:\Python34\python.exe',
+    'python3.5': r'C:\Python35\python.exe',
+}
+
+def getexecutable(name, cache={}):
+    try:
+        return cache[name]
+    except KeyError:
+        executable = py.path.local.sysfind(name)
+        if executable:
+            import subprocess
+            popen = subprocess.Popen([str(executable), "--version"],
+                universal_newlines=True, stderr=subprocess.PIPE)
+            out, err = popen.communicate()
+            if name == "jython":
+                if not err or "2.5" not in err:
+                    executable = None
+                if "2.5.2" in err:
+                    executable = None # http://bugs.jython.org/issue1790
+            elif popen.returncode != 0:
+                # Handle pyenv's 127.
+                executable = None
+        cache[name] = executable
+        return executable
+
+@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
+                        'pypy', 'pypy3'])
+def anypython(request):
+    name = request.param
+    executable = getexecutable(name)
+    if executable is None:
+        if sys.platform == "win32":
+            executable = winpymap.get(name, None)
+            if executable:
+                executable = py.path.local(executable)
+                if executable.check():
+                    return executable
+        pytest.skip("no suitable %s found" % (name,))
+    return executable
+
+# used at least by pytest-xdist plugin
+@pytest.fixture
+def _pytest(request):
+    """ Return a helper which offers a gethookrecorder(hook)
+    method which returns a HookRecorder instance which helps
+    to make assertions about called hooks.
+    """
+    return PytestArg(request)
+
+class PytestArg:
+    def __init__(self, request):
+        self.request = request
+
+    def gethookrecorder(self, hook):
+        hookrecorder = HookRecorder(hook._pm)
+        self.request.addfinalizer(hookrecorder.finish_recording)
+        return hookrecorder
+
+
+def get_public_names(l):
+    """Only return names from iterator l without a leading underscore."""
+    return [x for x in l if x[0] != "_"]
+
+
+class ParsedCall:
+    def __init__(self, name, kwargs):
+        self.__dict__.update(kwargs)
+        self._name = name
+
+    def __repr__(self):
+        d = self.__dict__.copy()
+        del d['_name']
+        return "<ParsedCall %r(**%r)>" %(self._name, d)
+
+
+class HookRecorder:
+    """Record all hooks called in a plugin manager.
+
+    This wraps all the hook calls in the plugin manager, recording
+    each call before propagating the normal calls.
+
+    """
+
+    def __init__(self, pluginmanager):
+        self._pluginmanager = pluginmanager
+        self.calls = []
+
+        def before(hook_name, hook_impls, kwargs):
+            self.calls.append(ParsedCall(hook_name, kwargs))
+
+        def after(outcome, hook_name, hook_impls, kwargs):
+            pass
+
+        self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
+
+    def finish_recording(self):
+        self._undo_wrapping()
+
+    def getcalls(self, names):
+        if isinstance(names, str):
+            names = names.split()
+        return [call for call in self.calls if call._name in names]
+
+    def assert_contains(self, entries):
+        __tracebackhide__ = True
+        i = 0
+        entries = list(entries)
+        backlocals = sys._getframe(1).f_locals
+        while entries:
+            name, check = entries.pop(0)
+            for ind, call in enumerate(self.calls[i:]):
+                if call._name == name:
+                    print_("NAMEMATCH", name, call)
+                    if eval(check, backlocals, call.__dict__):
+                        print_("CHECKERMATCH", repr(check), "->", call)
+                    else:
+                        print_("NOCHECKERMATCH", repr(check), "-", call)
+                        continue
+                    i += ind + 1
+                    break
+                print_("NONAMEMATCH", name, "with", call)
+            else:
+                pytest.fail("could not find %r check %r" % (name, check))
+
+    def popcall(self, name):
+        __tracebackhide__ = True
+        for i, call in enumerate(self.calls):
+            if call._name == name:
+                del self.calls[i]
+                return call
+        lines = ["could not find call %r, in:" % (name,)]
+        lines.extend(["  %s" % str(x) for x in self.calls])
+        pytest.fail("\n".join(lines))
+
+    def getcall(self, name):
+        l = self.getcalls(name)
+        assert len(l) == 1, (name, l)
+        return l[0]
+
+    # functionality for test reports
+
+    def getreports(self,
+                   names="pytest_runtest_logreport pytest_collectreport"):
+        return [x.report for x in self.getcalls(names)]
+
+    def matchreport(self, inamepart="",
+        names="pytest_runtest_logreport pytest_collectreport", when=None):
+        """ return a testreport whose dotted import path matches """
+        l = []
+        for rep in self.getreports(names=names):
+            try:
+                if not when and rep.when != "call" and rep.passed:
+                    # setup/teardown passing reports - let's ignore those
+                    continue
+            except AttributeError:
+                pass
+            if when and getattr(rep, 'when', None) != when:
+                continue
+            if not inamepart or inamepart in rep.nodeid.split("::"):
+                l.append(rep)
+        if not l:
+            raise ValueError("could not find test report matching %r: "
+                             "no test reports at all!" % (inamepart,))
+        if len(l) > 1:
+            raise ValueError(
+                "found 2 or more testreports matching %r: %s" %(inamepart, l))
+        return l[0]
+
+    def getfailures(self,
+                    names='pytest_runtest_logreport pytest_collectreport'):
+        return [rep for rep in self.getreports(names) if rep.failed]
+
+    def getfailedcollections(self):
+        return self.getfailures('pytest_collectreport')
+
+    def listoutcomes(self):
+        passed = []
+        skipped = []
+        failed = []
+        for rep in self.getreports(
+            "pytest_collectreport pytest_runtest_logreport"):
+            if rep.passed:
+                if getattr(rep, "when", None) == "call":
+                    passed.append(rep)
+            elif rep.skipped:
+                skipped.append(rep)
+            elif rep.failed:
+                failed.append(rep)
+        return passed, skipped, failed
+
+    def countoutcomes(self):
+        return [len(x) for x in self.listoutcomes()]
+
+    def assertoutcome(self, passed=0, skipped=0, failed=0):
+        realpassed, realskipped, realfailed = self.listoutcomes()
+        assert passed == len(realpassed)
+        assert skipped == len(realskipped)
+        assert failed == len(realfailed)
+
+    def clear(self):
+        self.calls[:] = []
+
+
+@pytest.fixture
+def linecomp(request):
+    return LineComp()
+
+
+@pytest.fixture(name='LineMatcher')
+def LineMatcher_fixture(request):
+    return LineMatcher
+
+
+@pytest.fixture
+def testdir(request, tmpdir_factory):
+    return Testdir(request, tmpdir_factory)
+
+
+rex_outcome = re.compile("(\d+) ([\w-]+)")
+class RunResult:
+    """The result of running a command.
+
+    Attributes:
+
+    :ret: The return value.
+    :outlines: List of lines captured from stdout.
+    :errlines: List of lines captures from stderr.
+    :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
+       reconstruct stdout or the commonly used
+       ``stdout.fnmatch_lines()`` method.
+    :stderrr: :py:class:`LineMatcher` of stderr.
+    :duration: Duration in seconds.
+
+    """
+    def __init__(self, ret, outlines, errlines, duration):
+        self.ret = ret
+        self.outlines = outlines
+        self.errlines = errlines
+        self.stdout = LineMatcher(outlines)
+        self.stderr = LineMatcher(errlines)
+        self.duration = duration
+
+    def parseoutcomes(self):
+        """ Return a dictionary of outcomestring->num from parsing
+        the terminal output that the test process produced."""
+        for line in reversed(self.outlines):
+            if 'seconds' in line:
+                outcomes = rex_outcome.findall(line)
+                if outcomes:
+                    d = {}
+                    for num, cat in outcomes:
+                        d[cat] = int(num)
+                    return d
+
+    def assert_outcomes(self, passed=0, skipped=0, failed=0):
+        """ assert that the specified outcomes appear with the respective
+        numbers (0 means it didn't occur) in the text output from a test run."""
+        d = self.parseoutcomes()
+        assert passed == d.get("passed", 0)
+        assert skipped == d.get("skipped", 0)
+        assert failed == d.get("failed", 0)
+
+
+
+class Testdir:
+    """Temporary test directory with tools to test/run pytest itself.
+
+    This is based on the ``tmpdir`` fixture but provides a number of
+    methods which aid with testing pytest itself.  Unless
+    :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
+    current working directory.
+
+    Attributes:
+
+    :tmpdir: The :py:class:`py.path.local` instance of the temporary
+       directory.
+
+    :plugins: A list of plugins to use with :py:meth:`parseconfig` and
+       :py:meth:`runpytest`.  Initially this is an empty list but
+       plugins can be added to the list.  The type of items to add to
+       the list depend on the method which uses them so refer to them
+       for details.
+
+    """
+
+    def __init__(self, request, tmpdir_factory):
+        self.request = request
+        # XXX remove duplication with tmpdir plugin
+        basetmp = tmpdir_factory.ensuretemp("testdir")
+        name = request.function.__name__
+        for i in range(100):
+            try:
+                tmpdir = basetmp.mkdir(name + str(i))
+            except py.error.EEXIST:
+                continue
+            break
+        self.tmpdir = tmpdir
+        self.plugins = []
+        self._savesyspath = (list(sys.path), list(sys.meta_path))
+        self._savemodulekeys = set(sys.modules)
+        self.chdir() # always chdir
+        self.request.addfinalizer(self.finalize)
+        method = self.request.config.getoption("--runpytest")
+        if method == "inprocess":
+            self._runpytest_method = self.runpytest_inprocess
+        elif method == "subprocess":
+            self._runpytest_method = self.runpytest_subprocess
+
+    def __repr__(self):
+        return "<Testdir %r>" % (self.tmpdir,)
+
+    def finalize(self):
+        """Clean up global state artifacts.
+
+        Some methods modify the global interpreter state and this
+        tries to clean this up.  It does not remove the temporary
+        directory however so it can be looked at after the test run
+        has finished.
+
+        """
+        sys.path[:], sys.meta_path[:] = self._savesyspath
+        if hasattr(self, '_olddir'):
+            self._olddir.chdir()
+        self.delete_loaded_modules()
+
+    def delete_loaded_modules(self):
+        """Delete modules that have been loaded during a test.
+
+        This allows the interpreter to catch module changes in case
+        the module is re-imported.
+        """
+        for name in set(sys.modules).difference(self._savemodulekeys):
+            # it seems zope.interfaces is keeping some state
+            # (used by twisted related tests)
+            if name != "zope.interface":
+                del sys.modules[name]
+
+    def make_hook_recorder(self, pluginmanager):
+        """Create a new :py:class:`HookRecorder` for a PluginManager."""
+        assert not hasattr(pluginmanager, "reprec")
+        pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
+        self.request.addfinalizer(reprec.finish_recording)
+        return reprec
+
+    def chdir(self):
+        """Cd into the temporary directory.
+
+        This is done automatically upon instantiation.
+
+        """
+        old = self.tmpdir.chdir()
+        if not hasattr(self, '_olddir'):
+            self._olddir = old
+
+    def _makefile(self, ext, args, kwargs):
+        items = list(kwargs.items())
+        if args:
+            source = py.builtin._totext("\n").join(
+                map(py.builtin._totext, args)) + py.builtin._totext("\n")
+            basename = self.request.function.__name__
+            items.insert(0, (basename, source))
+        ret = None
+        for name, value in items:
+            p = self.tmpdir.join(name).new(ext=ext)
+            p.dirpath().ensure_dir()
+            source = Source(value)
+
+            def my_totext(s, encoding="utf-8"):
+                if py.builtin._isbytes(s):
+                    s = py.builtin._totext(s, encoding=encoding)
+                return s
+
+            source_unicode = "\n".join([my_totext(line) for line in source.lines])
+            source = py.builtin._totext(source_unicode)
+            content = source.strip().encode("utf-8") # + "\n"
+            #content = content.rstrip() + "\n"
+            p.write(content, "wb")
+            if ret is None:
+                ret = p
+        return ret
+
+    def makefile(self, ext, *args, **kwargs):
+        """Create a new file in the testdir.
+
+        ext: The extension the file should use, including the dot.
+           E.g. ".py".
+
+        args: All args will be treated as strings and joined using
+           newlines.  The result will be written as contents to the
+           file.  The name of the file will be based on the test
+           function requesting this fixture.
+           E.g. "testdir.makefile('.txt', 'line1', 'line2')"
+
+        kwargs: Each keyword is the name of a file, while the value of
+           it will be written as contents of the file.
+           E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
+
+        """
+        return self._makefile(ext, args, kwargs)
+
+    def makeconftest(self, source):
+        """Write a contest.py file with 'source' as contents."""
+        return self.makepyfile(conftest=source)
+
+    def makeini(self, source):
+        """Write a tox.ini file with 'source' as contents."""
+        return self.makefile('.ini', tox=source)
+
+    def getinicfg(self, source):
+        """Return the pytest section from the tox.ini config file."""
+        p = self.makeini(source)
+        return py.iniconfig.IniConfig(p)['pytest']
+
+    def makepyfile(self, *args, **kwargs):
+        """Shortcut for .makefile() with a .py extension."""
+        return self._makefile('.py', args, kwargs)
+
+    def maketxtfile(self, *args, **kwargs):
+        """Shortcut for .makefile() with a .txt extension."""
+        return self._makefile('.txt', args, kwargs)
+
+    def syspathinsert(self, path=None):
+        """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
+
+        This is undone automatically after the test.
+        """
+        if path is None:
+            path = self.tmpdir
+        sys.path.insert(0, str(path))
+        # a call to syspathinsert() usually means that the caller
+        # wants to import some dynamically created files.
+        # with python3 we thus invalidate import caches.
+        self._possibly_invalidate_import_caches()
+
+    def _possibly_invalidate_import_caches(self):
+        # invalidate caches if we can (py33 and above)
+        try:
+            import importlib
+        except ImportError:
+            pass
+        else:
+            if hasattr(importlib, "invalidate_caches"):
+                importlib.invalidate_caches()
+
+    def mkdir(self, name):
+        """Create a new (sub)directory."""
+        return self.tmpdir.mkdir(name)
+
+    def mkpydir(self, name):
+        """Create a new python package.
+
+        This creates a (sub)direcotry with an empty ``__init__.py``
+        file so that is recognised as a python package.
+
+        """
+        p = self.mkdir(name)
+        p.ensure("__init__.py")
+        return p
+
+    Session = Session
+    def getnode(self, config, arg):
+        """Return the collection node of a file.
+
+        :param config: :py:class:`_pytest.config.Config` instance, see
+           :py:meth:`parseconfig` and :py:meth:`parseconfigure` to
+           create the configuration.
+
+        :param arg: A :py:class:`py.path.local` instance of the file.
+
+        """
+        session = Session(config)
+        assert '::' not in str(arg)
+        p = py.path.local(arg)
+        config.hook.pytest_sessionstart(session=session)
+        res = session.perform_collect([str(p)], genitems=False)[0]
+        config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+        return res
+
+    def getpathnode(self, path):
+        """Return the collection node of a file.
+
+        This is like :py:meth:`getnode` but uses
+        :py:meth:`parseconfigure` to create the (configured) pytest
+        Config instance.
+
+        :param path: A :py:class:`py.path.local` instance of the file.
+
+        """
+        config = self.parseconfigure(path)
+        session = Session(config)
+        x = session.fspath.bestrelpath(path)
+        config.hook.pytest_sessionstart(session=session)
+        res = session.perform_collect([x], genitems=False)[0]
+        config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+        return res
+
+    def genitems(self, colitems):
+        """Generate all test items from a collection node.
+
+        This recurses into the collection node and returns a list of
+        all the test items contained within.
+
+        """
+        session = colitems[0].session
+        result = []
+        for colitem in colitems:
+            result.extend(session.genitems(colitem))
+        return result
+
+    def runitem(self, source):
+        """Run the "test_func" Item.
+
+        The calling test instance (the class which contains the test
+        method) must provide a ``.getrunner()`` method which should
+        return a runner which can run the test protocol for a single
+        item, like e.g. :py:func:`_pytest.runner.runtestprotocol`.
+
+        """
+        # used from runner functional tests
+        item = self.getitem(source)
+        # the test class where we are called from wants to provide the runner
+        testclassinstance = self.request.instance
+        runner = testclassinstance.getrunner()
+        return runner(item)
+
+    def inline_runsource(self, source, *cmdlineargs):
+        """Run a test module in process using ``pytest.main()``.
+
+        This run writes "source" into a temporary file and runs
+        ``pytest.main()`` on it, returning a :py:class:`HookRecorder`
+        instance for the result.
+
+        :param source: The source code of the test module.
+
+        :param cmdlineargs: Any extra command line arguments to use.
+
+        :return: :py:class:`HookRecorder` instance of the result.
+
+        """
+        p = self.makepyfile(source)
+        l = list(cmdlineargs) + [p]
+        return self.inline_run(*l)
+
+    def inline_genitems(self, *args):
+        """Run ``pytest.main(['--collectonly'])`` in-process.
+
+        Retuns a tuple of the collected items and a
+        :py:class:`HookRecorder` instance.
+
+        This runs the :py:func:`pytest.main` function to run all of
+        pytest inside the test process itself like
+        :py:meth:`inline_run`.  However the return value is a tuple of
+        the collection items and a :py:class:`HookRecorder` instance.
+
+        """
+        rec = self.inline_run("--collect-only", *args)
+        items = [x.item for x in rec.getcalls("pytest_itemcollected")]
+        return items, rec
+
+    def inline_run(self, *args, **kwargs):
+        """Run ``pytest.main()`` in-process, returning a HookRecorder.
+
+        This runs the :py:func:`pytest.main` function to run all of
+        pytest inside the test process itself.  This means it can
+        return a :py:class:`HookRecorder` instance which gives more
+        detailed results from then run then can be done by matching
+        stdout/stderr from :py:meth:`runpytest`.
+
+        :param args: Any command line arguments to pass to
+           :py:func:`pytest.main`.
+
+        :param plugin: (keyword-only) Extra plugin instances the
+           ``pytest.main()`` instance should use.
+
+        :return: A :py:class:`HookRecorder` instance.
+        """
+        # When running py.test inline any plugins active in the main
+        # test process are already imported.  So this disables the
+        # warning which will trigger to say they can no longer be
+        # re-written, which is fine as they are already re-written.
+        orig_warn = AssertionRewritingHook._warn_already_imported
+
+        def revert():
+            AssertionRewritingHook._warn_already_imported = orig_warn
+
+        self.request.addfinalizer(revert)
+        AssertionRewritingHook._warn_already_imported = lambda *a: None
+
+        rec = []
+
+        class Collect:
+            def pytest_configure(x, config):
+                rec.append(self.make_hook_recorder(config.pluginmanager))
+
+        plugins = kwargs.get("plugins") or []
+        plugins.append(Collect())
+        ret = pytest.main(list(args), plugins=plugins)
+        self.delete_loaded_modules()
+        if len(rec) == 1:
+            reprec = rec.pop()
+        else:
+            class reprec:
+                pass
+        reprec.ret = ret
+
+        # typically we reraise keyboard interrupts from the child run
+        # because it's our user requesting interruption of the testing
+        if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
+            calls = reprec.getcalls("pytest_keyboard_interrupt")
+            if calls and calls[-1].excinfo.type == KeyboardInterrupt:
+                raise KeyboardInterrupt()
+        return reprec
+
+    def runpytest_inprocess(self, *args, **kwargs):
+        """ Return result of running pytest in-process, providing a similar
+        interface to what self.runpytest() provides. """
+        if kwargs.get("syspathinsert"):
+            self.syspathinsert()
+        now = time.time()
+        capture = py.io.StdCapture()
+        try:
+            try:
+                reprec = self.inline_run(*args, **kwargs)
+            except SystemExit as e:
+
+                class reprec:
+                    ret = e.args[0]
+
+            except Exception:
+                traceback.print_exc()
+
+                class reprec:
+                    ret = 3
+        finally:
+            out, err = capture.reset()
+            sys.stdout.write(out)
+            sys.stderr.write(err)
+
+        res = RunResult(reprec.ret,
+                        out.split("\n"), err.split("\n"),
+                        time.time()-now)
+        res.reprec = reprec
+        return res
+
+    def runpytest(self, *args, **kwargs):
+        """ Run pytest inline or in a subprocess, depending on the command line
+        option "--runpytest" and return a :py:class:`RunResult`.
+
+        """
+        args = self._ensure_basetemp(args)
+        return self._runpytest_method(*args, **kwargs)
+
+    def _ensure_basetemp(self, args):
+        args = [str(x) for x in args]
+        for x in args:
+            if str(x).startswith('--basetemp'):
+                #print ("basedtemp exists: %s" %(args,))
+                break
+        else:
+            args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
+            #print ("added basetemp: %s" %(args,))
+        return args
+
+    def parseconfig(self, *args):
+        """Return a new pytest Config instance from given commandline args.
+
+        This invokes the pytest bootstrapping code in _pytest.config
+        to create a new :py:class:`_pytest.core.PluginManager` and
+        call the pytest_cmdline_parse hook to create new
+        :py:class:`_pytest.config.Config` instance.
+
+        If :py:attr:`plugins` has been populated they should be plugin
+        modules which will be registered with the PluginManager.
+
+        """
+        args = self._ensure_basetemp(args)
+
+        import _pytest.config
+        config = _pytest.config._prepareconfig(args, self.plugins)
+        # we don't know what the test will do with this half-setup config
+        # object and thus we make sure it gets unconfigured properly in any
+        # case (otherwise capturing could still be active, for example)
+        self.request.addfinalizer(config._ensure_unconfigure)
+        return config
+
+    def parseconfigure(self, *args):
+        """Return a new pytest configured Config instance.
+
+        This returns a new :py:class:`_pytest.config.Config` instance
+        like :py:meth:`parseconfig`, but also calls the
+        pytest_configure hook.
+
+        """
+        config = self.parseconfig(*args)
+        config._do_configure()
+        self.request.addfinalizer(config._ensure_unconfigure)
+        return config
+
+    def getitem(self,  source, funcname="test_func"):
+        """Return the test item for a test function.
+
+        This writes the source to a python file and runs pytest's
+        collection on the resulting module, returning the test item
+        for the requested function name.
+
+        :param source: The module source.
+
+        :param funcname: The name of the test function for which the
+           Item must be returned.
+
+        """
+        items = self.getitems(source)
+        for item in items:
+            if item.name == funcname:
+                return item
+        assert 0, "%r item not found in module:\n%s\nitems: %s" %(
+                  funcname, source, items)
+
+    def getitems(self,  source):
+        """Return all test items collected from the module.
+
+        This writes the source to a python file and runs pytest's
+        collection on the resulting module, returning all test items
+        contained within.
+
+        """
+        modcol = self.getmodulecol(source)
+        return self.genitems([modcol])
+
+    def getmodulecol(self,  source, configargs=(), withinit=False):
+        """Return the module collection node for ``source``.
+
+        This writes ``source`` to a file using :py:meth:`makepyfile`
+        and then runs the pytest collection on it, returning the
+        collection node for the test module.
+
+        :param source: The source code of the module to collect.
+
+        :param configargs: Any extra arguments to pass to
+           :py:meth:`parseconfigure`.
+
+        :param withinit: Whether to also write a ``__init__.py`` file
+           to the temporarly directory to ensure it is a package.
+
+        """
+        kw = {self.request.function.__name__: Source(source).strip()}
+        path = self.makepyfile(**kw)
+        if withinit:
+            self.makepyfile(__init__ = "#")
+        self.config = config = self.parseconfigure(path, *configargs)
+        node = self.getnode(config, path)
+        return node
+
+    def collect_by_name(self, modcol, name):
+        """Return the collection node for name from the module collection.
+
+        This will search a module collection node for a collection
+        node matching the given name.
+
+        :param modcol: A module collection node, see
+           :py:meth:`getmodulecol`.
+
+        :param name: The name of the node to return.
+
+        """
+        for colitem in modcol._memocollect():
+            if colitem.name == name:
+                return colitem
+
+    def popen(self, cmdargs, stdout, stderr, **kw):
+        """Invoke subprocess.Popen.
+
+        This calls subprocess.Popen making sure the current working
+        directory is the PYTHONPATH.
+
+        You probably want to use :py:meth:`run` instead.
+
+        """
+        env = os.environ.copy()
+        env['PYTHONPATH'] = os.pathsep.join(filter(None, [
+            str(os.getcwd()), env.get('PYTHONPATH', '')]))
+        kw['env'] = env
+        return subprocess.Popen(cmdargs,
+                                stdout=stdout, stderr=stderr, **kw)
+
+    def run(self, *cmdargs):
+        """Run a command with arguments.
+
+        Run a process using subprocess.Popen saving the stdout and
+        stderr.
+
+        Returns a :py:class:`RunResult`.
+
+        """
+        return self._run(*cmdargs)
+
+    def _run(self, *cmdargs):
+        cmdargs = [str(x) for x in cmdargs]
+        p1 = self.tmpdir.join("stdout")
+        p2 = self.tmpdir.join("stderr")
+        print_("running:", ' '.join(cmdargs))
+        print_("     in:", str(py.path.local()))
+        f1 = codecs.open(str(p1), "w", encoding="utf8")
+        f2 = codecs.open(str(p2), "w", encoding="utf8")
+        try:
+            now = time.time()
+            popen = self.popen(cmdargs, stdout=f1, stderr=f2,
+                close_fds=(sys.platform != "win32"))
+            ret = popen.wait()
+        finally:
+            f1.close()
+            f2.close()
+        f1 = codecs.open(str(p1), "r", encoding="utf8")
+        f2 = codecs.open(str(p2), "r", encoding="utf8")
+        try:
+            out = f1.read().splitlines()
+            err = f2.read().splitlines()
+        finally:
+            f1.close()
+            f2.close()
+        self._dump_lines(out, sys.stdout)
+        self._dump_lines(err, sys.stderr)
+        return RunResult(ret, out, err, time.time()-now)
+
+    def _dump_lines(self, lines, fp):
+        try:
+            for line in lines:
+                py.builtin.print_(line, file=fp)
+        except UnicodeEncodeError:
+            print("couldn't print to %s because of encoding" % (fp,))
+
+    def _getpytestargs(self):
+        # we cannot use "(sys.executable,script)"
+        # because on windows the script is e.g. a pytest.exe
+        return (sys.executable, _pytest_fullpath,) # noqa
+
+    def runpython(self, script):
+        """Run a python script using sys.executable as interpreter.
+
+        Returns a :py:class:`RunResult`.
+        """
+        return self.run(sys.executable, script)
+
+    def runpython_c(self, command):
+        """Run python -c "command", return a :py:class:`RunResult`."""
+        return self.run(sys.executable, "-c", command)
+
+    def runpytest_subprocess(self, *args, **kwargs):
+        """Run pytest as a subprocess with given arguments.
+
+        Any plugins added to the :py:attr:`plugins` list will added
+        using the ``-p`` command line option.  Addtionally
+        ``--basetemp`` is used put any temporary files and directories
+        in a numbered directory prefixed with "runpytest-" so they do
+        not conflict with the normal numberd pytest location for
+        temporary files and directories.
+
+        Returns a :py:class:`RunResult`.
+
+        """
+        p = py.path.local.make_numbered_dir(prefix="runpytest-",
+            keep=None, rootdir=self.tmpdir)
+        args = ('--basetemp=%s' % p, ) + args
+        #for x in args:
+        #    if '--confcutdir' in str(x):
+        #        break
+        #else:
+        #    pass
+        #    args = ('--confcutdir=.',) + args
+        plugins = [x for x in self.plugins if isinstance(x, str)]
+        if plugins:
+            args = ('-p', plugins[0]) + args
+        args = self._getpytestargs() + args
+        return self.run(*args)
+
+    def spawn_pytest(self, string, expect_timeout=10.0):
+        """Run pytest using pexpect.
+
+        This makes sure to use the right pytest and sets up the
+        temporary directory locations.
+
+        The pexpect child is returned.
+
+        """
+        basetemp = self.tmpdir.mkdir("pexpect")
+        invoke = " ".join(map(str, self._getpytestargs()))
+        cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
+        return self.spawn(cmd, expect_timeout=expect_timeout)
+
+    def spawn(self, cmd, expect_timeout=10.0):
+        """Run a command using pexpect.
+
+        The pexpect child is returned.
+        """
+        pexpect = pytest.importorskip("pexpect", "3.0")
+        if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
+            pytest.skip("pypy-64 bit not supported")
+        if sys.platform.startswith("freebsd"):
+            pytest.xfail("pexpect does not work reliably on freebsd")
+        logfile = self.tmpdir.join("spawn.out").open("wb")
+        child = pexpect.spawn(cmd, logfile=logfile)
+        self.request.addfinalizer(logfile.close)
+        child.timeout = expect_timeout
+        return child
+
+def getdecoded(out):
+        try:
+            return out.decode("utf-8")
+        except UnicodeDecodeError:
+            return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
+                    py.io.saferepr(out),)
+
+
+class LineComp:
+    def __init__(self):
+        self.stringio = py.io.TextIO()
+
+    def assert_contains_lines(self, lines2):
+        """ assert that lines2 are contained (linearly) in lines1.
+            return a list of extralines found.
+        """
+        __tracebackhide__ = True
+        val = self.stringio.getvalue()
+        self.stringio.truncate(0)
+        self.stringio.seek(0)
+        lines1 = val.split("\n")
+        return LineMatcher(lines1).fnmatch_lines(lines2)
+
+
+class LineMatcher:
+    """Flexible matching of text.
+
+    This is a convenience class to test large texts like the output of
+    commands.
+
+    The constructor takes a list of lines without their trailing
+    newlines, i.e. ``text.splitlines()``.
+
+    """
+
+    def __init__(self,  lines):
+        self.lines = lines
+        self._log_output = []
+
+    def str(self):
+        """Return the entire original text."""
+        return "\n".join(self.lines)
+
+    def _getlines(self, lines2):
+        if isinstance(lines2, str):
+            lines2 = Source(lines2)
+        if isinstance(lines2, Source):
+            lines2 = lines2.strip().lines
+        return lines2
+
+    def fnmatch_lines_random(self, lines2):
+        """Check lines exist in the output.
+
+        The argument is a list of lines which have to occur in the
+        output, in any order.  Each line can contain glob whildcards.
+
+        """
+        lines2 = self._getlines(lines2)
+        for line in lines2:
+            for x in self.lines:
+                if line == x or fnmatch(x, line):
+                    self._log("matched: ", repr(line))
+                    break
+            else:
+                self._log("line %r not found in output" % line)
+                raise ValueError(self._log_text)
+
+    def get_lines_after(self, fnline):
+        """Return all lines following the given line in the text.
+
+        The given line can contain glob wildcards.
+        """
+        for i, line in enumerate(self.lines):
+            if fnline == line or fnmatch(line, fnline):
+                return self.lines[i+1:]
+        raise ValueError("line %r not found in output" % fnline)
+
+    def _log(self, *args):
+        self._log_output.append(' '.join((str(x) for x in args)))
+
+    @property
+    def _log_text(self):
+        return '\n'.join(self._log_output)
+
+    def fnmatch_lines(self, lines2):
+        """Search the text for matching lines.
+
+        The argument is a list of lines which have to match and can
+        use glob wildcards.  If they do not match an pytest.fail() is
+        called.  The matches and non-matches are also printed on
+        stdout.
+
+        """
+        lines2 = self._getlines(lines2)
+        lines1 = self.lines[:]
+        nextline = None
+        extralines = []
+        __tracebackhide__ = True
+        for line in lines2:
+            nomatchprinted = False
+            while lines1:
+                nextline = lines1.pop(0)
+                if line == nextline:
+                    self._log("exact match:", repr(line))
+                    break
+                elif fnmatch(nextline, line):
+                    self._log("fnmatch:", repr(line))
+                    self._log("   with:", repr(nextline))
+                    break
+                else:
+                    if not nomatchprinted:
+                        self._log("nomatch:", repr(line))
+                        nomatchprinted = True
+                    self._log("    and:", repr(nextline))
+                extralines.append(nextline)
+            else:
+                self._log("remains unmatched: %r" % (line,))
+                pytest.fail(self._log_text)
diff --git a/lib/spack/external/_pytest/python.py b/lib/spack/external/_pytest/python.py
new file mode 100644
index 0000000000..53815da2f0
--- /dev/null
+++ b/lib/spack/external/_pytest/python.py
@@ -0,0 +1,1578 @@
+""" Python test discovery, setup and run of test functions. """
+
+import fnmatch
+import inspect
+import sys
+import collections
+import math
+from itertools import count
+
+import py
+import pytest
+from _pytest.mark import MarkerError
+
+
+import _pytest
+import _pytest._pluggy as pluggy
+from _pytest import fixtures
+from _pytest.compat import (
+    isclass, isfunction, is_generator, _escape_strings,
+    REGEX_TYPE, STRING_TYPES, NoneType, NOTSET,
+    get_real_func, getfslineno, safe_getattr,
+    getlocation, enum,
+)
+
+cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
+cutdir2 = py.path.local(_pytest.__file__).dirpath()
+cutdir3 = py.path.local(py.__file__).dirpath()
+
+
+def filter_traceback(entry):
+    """Return True if a TracebackEntry instance should be removed from tracebacks:
+    * dynamically generated code (no code to show up for it);
+    * internal traceback from pytest or its internal libraries, py and pluggy.
+    """
+    # entry.path might sometimes return a str object when the entry
+    # points to dynamically generated code
+    # see https://bitbucket.org/pytest-dev/py/issues/71
+    raw_filename = entry.frame.code.raw.co_filename
+    is_generated = '<' in raw_filename and '>' in raw_filename
+    if is_generated:
+        return False
+    # entry.path might point to an inexisting file, in which case it will
+    # alsso return a str object. see #1133
+    p = py.path.local(entry.path)
+    return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3)
+
+
+
+def pyobj_property(name):
+    def get(self):
+        node = self.getparent(getattr(pytest, name))
+        if node is not None:
+            return node.obj
+    doc = "python %s object this node was collected from (can be None)." % (
+          name.lower(),)
+    return property(get, None, None, doc)
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group.addoption('--fixtures', '--funcargs',
+               action="store_true", dest="showfixtures", default=False,
+               help="show available fixtures, sorted by plugin appearance")
+    group.addoption(
+        '--fixtures-per-test',
+        action="store_true",
+        dest="show_fixtures_per_test",
+        default=False,
+        help="show fixtures per test",
+    )
+    parser.addini("usefixtures", type="args", default=[],
+        help="list of default fixtures to be used with this project")
+    parser.addini("python_files", type="args",
+        default=['test_*.py', '*_test.py'],
+        help="glob-style file patterns for Python test module discovery")
+    parser.addini("python_classes", type="args", default=["Test",],
+        help="prefixes or glob names for Python test class discovery")
+    parser.addini("python_functions", type="args", default=["test",],
+        help="prefixes or glob names for Python test function and "
+             "method discovery")
+
+    group.addoption("--import-mode", default="prepend",
+        choices=["prepend", "append"], dest="importmode",
+        help="prepend/append to sys.path when importing test modules, "
+             "default is to prepend.")
+
+
+def pytest_cmdline_main(config):
+    if config.option.showfixtures:
+        showfixtures(config)
+        return 0
+    if config.option.show_fixtures_per_test:
+        show_fixtures_per_test(config)
+        return 0
+
+
+def pytest_generate_tests(metafunc):
+    # those alternative spellings are common - raise a specific error to alert
+    # the user
+    alt_spellings = ['parameterize', 'parametrise', 'parameterise']
+    for attr in alt_spellings:
+        if hasattr(metafunc.function, attr):
+            msg = "{0} has '{1}', spelling should be 'parametrize'"
+            raise MarkerError(msg.format(metafunc.function.__name__, attr))
+    try:
+        markers = metafunc.function.parametrize
+    except AttributeError:
+        return
+    for marker in markers:
+        metafunc.parametrize(*marker.args, **marker.kwargs)
+
+def pytest_configure(config):
+    config.addinivalue_line("markers",
+        "parametrize(argnames, argvalues): call a test function multiple "
+        "times passing in different arguments in turn. argvalues generally "
+        "needs to be a list of values if argnames specifies only one name "
+        "or a list of tuples of values if argnames specifies multiple names. "
+        "Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
+        "decorated test function, one with arg1=1 and another with arg1=2."
+        "see http://pytest.org/latest/parametrize.html for more info and "
+        "examples."
+    )
+    config.addinivalue_line("markers",
+        "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
+        "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
+    )
+
+@pytest.hookimpl(trylast=True)
+def pytest_namespace():
+    raises.Exception = pytest.fail.Exception
+    return {
+        'raises': raises,
+        'approx': approx,
+        'collect': {
+            'Module': Module,
+            'Class': Class,
+            'Instance': Instance,
+            'Function': Function,
+            'Generator': Generator,
+        }
+    }
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_pyfunc_call(pyfuncitem):
+    testfunction = pyfuncitem.obj
+    if pyfuncitem._isyieldedfunction():
+        testfunction(*pyfuncitem._args)
+    else:
+        funcargs = pyfuncitem.funcargs
+        testargs = {}
+        for arg in pyfuncitem._fixtureinfo.argnames:
+            testargs[arg] = funcargs[arg]
+        testfunction(**testargs)
+    return True
+
+def pytest_collect_file(path, parent):
+    ext = path.ext
+    if ext == ".py":
+        if not parent.session.isinitpath(path):
+            for pat in parent.config.getini('python_files'):
+                if path.fnmatch(pat):
+                    break
+            else:
+               return
+        ihook = parent.session.gethookproxy(path)
+        return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
+
+def pytest_pycollect_makemodule(path, parent):
+    return Module(path, parent)
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_pycollect_makeitem(collector, name, obj):
+    outcome = yield
+    res = outcome.get_result()
+    if res is not None:
+        raise StopIteration
+    # nothing was collected elsewhere, let's do it here
+    if isclass(obj):
+        if collector.istestclass(obj, name):
+            Class = collector._getcustomclass("Class")
+            outcome.force_result(Class(name, parent=collector))
+    elif collector.istestfunction(obj, name):
+        # mock seems to store unbound methods (issue473), normalize it
+        obj = getattr(obj, "__func__", obj)
+        # We need to try and unwrap the function if it's a functools.partial
+        # or a funtools.wrapped.
+        # We musn't if it's been wrapped with mock.patch (python 2 only)
+        if not (isfunction(obj) or isfunction(get_real_func(obj))):
+            collector.warn(code="C2", message=
+                "cannot collect %r because it is not a function."
+                % name, )
+        elif getattr(obj, "__test__", True):
+            if is_generator(obj):
+                res = Generator(name, parent=collector)
+            else:
+                res = list(collector._genfunctions(name, obj))
+            outcome.force_result(res)
+
+def pytest_make_parametrize_id(config, val):
+    return None
+
+
+
+class PyobjContext(object):
+    module = pyobj_property("Module")
+    cls = pyobj_property("Class")
+    instance = pyobj_property("Instance")
+
+class PyobjMixin(PyobjContext):
+    def obj():
+        def fget(self):
+            obj = getattr(self, '_obj', None)
+            if obj is None:
+                self._obj = obj = self._getobj()
+            return obj
+
+        def fset(self, value):
+            self._obj = value
+
+        return property(fget, fset, None, "underlying python object")
+
+    obj = obj()
+
+    def _getobj(self):
+        return getattr(self.parent.obj, self.name)
+
+    def getmodpath(self, stopatmodule=True, includemodule=False):
+        """ return python path relative to the containing module. """
+        chain = self.listchain()
+        chain.reverse()
+        parts = []
+        for node in chain:
+            if isinstance(node, Instance):
+                continue
+            name = node.name
+            if isinstance(node, Module):
+                assert name.endswith(".py")
+                name = name[:-3]
+                if stopatmodule:
+                    if includemodule:
+                        parts.append(name)
+                    break
+            parts.append(name)
+        parts.reverse()
+        s = ".".join(parts)
+        return s.replace(".[", "[")
+
+    def _getfslineno(self):
+        return getfslineno(self.obj)
+
+    def reportinfo(self):
+        # XXX caching?
+        obj = self.obj
+        compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
+        if isinstance(compat_co_firstlineno, int):
+            # nose compatibility
+            fspath = sys.modules[obj.__module__].__file__
+            if fspath.endswith(".pyc"):
+                fspath = fspath[:-1]
+            lineno = compat_co_firstlineno
+        else:
+            fspath, lineno = getfslineno(obj)
+        modpath = self.getmodpath()
+        assert isinstance(lineno, int)
+        return fspath, lineno, modpath
+
+class PyCollector(PyobjMixin, pytest.Collector):
+
+    def funcnamefilter(self, name):
+        return self._matches_prefix_or_glob_option('python_functions', name)
+
+    def isnosetest(self, obj):
+        """ Look for the __test__ attribute, which is applied by the
+        @nose.tools.istest decorator
+        """
+        # We explicitly check for "is True" here to not mistakenly treat
+        # classes with a custom __getattr__ returning something truthy (like a
+        # function) as test classes.
+        return safe_getattr(obj, '__test__', False) is True
+
+    def classnamefilter(self, name):
+        return self._matches_prefix_or_glob_option('python_classes', name)
+
+    def istestfunction(self, obj, name):
+        return (
+            (self.funcnamefilter(name) or self.isnosetest(obj)) and
+            safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None
+        )
+
+    def istestclass(self, obj, name):
+        return self.classnamefilter(name) or self.isnosetest(obj)
+
+    def _matches_prefix_or_glob_option(self, option_name, name):
+        """
+        checks if the given name matches the prefix or glob-pattern defined
+        in ini configuration.
+        """
+        for option in self.config.getini(option_name):
+            if name.startswith(option):
+                return True
+            # check that name looks like a glob-string before calling fnmatch
+            # because this is called for every name in each collected module,
+            # and fnmatch is somewhat expensive to call
+            elif ('*' in option or '?' in option or '[' in option) and \
+                    fnmatch.fnmatch(name, option):
+                return True
+        return False
+
+    def collect(self):
+        if not getattr(self.obj, "__test__", True):
+            return []
+
+        # NB. we avoid random getattrs and peek in the __dict__ instead
+        # (XXX originally introduced from a PyPy need, still true?)
+        dicts = [getattr(self.obj, '__dict__', {})]
+        for basecls in inspect.getmro(self.obj.__class__):
+            dicts.append(basecls.__dict__)
+        seen = {}
+        l = []
+        for dic in dicts:
+            for name, obj in list(dic.items()):
+                if name in seen:
+                    continue
+                seen[name] = True
+                res = self.makeitem(name, obj)
+                if res is None:
+                    continue
+                if not isinstance(res, list):
+                    res = [res]
+                l.extend(res)
+        l.sort(key=lambda item: item.reportinfo()[:2])
+        return l
+
+    def makeitem(self, name, obj):
+        #assert self.ihook.fspath == self.fspath, self
+        return self.ihook.pytest_pycollect_makeitem(
+            collector=self, name=name, obj=obj)
+
+    def _genfunctions(self, name, funcobj):
+        module = self.getparent(Module).obj
+        clscol = self.getparent(Class)
+        cls = clscol and clscol.obj or None
+        transfer_markers(funcobj, cls, module)
+        fm = self.session._fixturemanager
+        fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
+        metafunc = Metafunc(funcobj, fixtureinfo, self.config,
+                            cls=cls, module=module)
+        methods = []
+        if hasattr(module, "pytest_generate_tests"):
+            methods.append(module.pytest_generate_tests)
+        if hasattr(cls, "pytest_generate_tests"):
+            methods.append(cls().pytest_generate_tests)
+        if methods:
+            self.ihook.pytest_generate_tests.call_extra(methods,
+                                                        dict(metafunc=metafunc))
+        else:
+            self.ihook.pytest_generate_tests(metafunc=metafunc)
+
+        Function = self._getcustomclass("Function")
+        if not metafunc._calls:
+            yield Function(name, parent=self, fixtureinfo=fixtureinfo)
+        else:
+            # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
+            fixtures.add_funcarg_pseudo_fixture_def(self, metafunc, fm)
+
+            for callspec in metafunc._calls:
+                subname = "%s[%s]" % (name, callspec.id)
+                yield Function(name=subname, parent=self,
+                               callspec=callspec, callobj=funcobj,
+                               fixtureinfo=fixtureinfo,
+                               keywords={callspec.id:True},
+                               originalname=name,
+                               )
+
+
+def _marked(func, mark):
+    """ Returns True if :func: is already marked with :mark:, False otherwise.
+    This can happen if marker is applied to class and the test file is
+    invoked more than once.
+    """
+    try:
+        func_mark = getattr(func, mark.name)
+    except AttributeError:
+        return False
+    return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
+
+
+def transfer_markers(funcobj, cls, mod):
+    # XXX this should rather be code in the mark plugin or the mark
+    # plugin should merge with the python plugin.
+    for holder in (cls, mod):
+        try:
+            pytestmark = holder.pytestmark
+        except AttributeError:
+            continue
+        if isinstance(pytestmark, list):
+            for mark in pytestmark:
+                if not _marked(funcobj, mark):
+                    mark(funcobj)
+        else:
+            if not _marked(funcobj, pytestmark):
+                pytestmark(funcobj)
+
+class Module(pytest.File, PyCollector):
+    """ Collector for test classes and functions. """
+    def _getobj(self):
+        return self._memoizedcall('_obj', self._importtestmodule)
+
+    def collect(self):
+        self.session._fixturemanager.parsefactories(self)
+        return super(Module, self).collect()
+
+    def _importtestmodule(self):
+        # we assume we are only called once per module
+        importmode = self.config.getoption("--import-mode")
+        try:
+            mod = self.fspath.pyimport(ensuresyspath=importmode)
+        except SyntaxError:
+            raise self.CollectError(
+                _pytest._code.ExceptionInfo().getrepr(style="short"))
+        except self.fspath.ImportMismatchError:
+            e = sys.exc_info()[1]
+            raise self.CollectError(
+                "import file mismatch:\n"
+                "imported module %r has this __file__ attribute:\n"
+                "  %s\n"
+                "which is not the same as the test file we want to collect:\n"
+                "  %s\n"
+                "HINT: remove __pycache__ / .pyc files and/or use a "
+                "unique basename for your test file modules"
+                 % e.args
+            )
+        except ImportError:
+            from _pytest._code.code import ExceptionInfo
+            exc_info = ExceptionInfo()
+            if self.config.getoption('verbose') < 2:
+                exc_info.traceback = exc_info.traceback.filter(filter_traceback)
+            exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly()
+            formatted_tb = py._builtin._totext(exc_repr)
+            raise self.CollectError(
+                "ImportError while importing test module '{fspath}'.\n"
+                "Hint: make sure your test modules/packages have valid Python names.\n"
+                "Traceback:\n"
+                "{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
+            )
+        except _pytest.runner.Skipped as e:
+            if e.allow_module_level:
+                raise
+            raise self.CollectError(
+                "Using pytest.skip outside of a test is not allowed. If you are "
+                "trying to decorate a test function, use the @pytest.mark.skip "
+                "or @pytest.mark.skipif decorators instead."
+            )
+        self.config.pluginmanager.consider_module(mod)
+        return mod
+
+    def setup(self):
+        setup_module = _get_xunit_setup_teardown(self.obj, "setUpModule")
+        if setup_module is None:
+            setup_module = _get_xunit_setup_teardown(self.obj, "setup_module")
+        if setup_module is not None:
+            setup_module()
+
+        teardown_module = _get_xunit_setup_teardown(self.obj, 'tearDownModule')
+        if teardown_module is None:
+            teardown_module = _get_xunit_setup_teardown(self.obj, 'teardown_module')
+        if teardown_module is not None:
+            self.addfinalizer(teardown_module)
+
+
+def _get_xunit_setup_teardown(holder, attr_name, param_obj=None):
+    """
+    Return a callable to perform xunit-style setup or teardown if
+    the function exists in the ``holder`` object.
+    The ``param_obj`` parameter is the parameter which will be passed to the function
+    when the callable is called without arguments, defaults to the ``holder`` object.
+    Return ``None`` if a suitable callable is not found.
+    """
+    param_obj = param_obj if param_obj is not None else holder
+    result = _get_xunit_func(holder, attr_name)
+    if result is not None:
+        arg_count = result.__code__.co_argcount
+        if inspect.ismethod(result):
+            arg_count -= 1
+        if arg_count:
+            return lambda: result(param_obj)
+        else:
+            return result
+
+
+def _get_xunit_func(obj, name):
+    """Return the attribute from the given object to be used as a setup/teardown
+    xunit-style function, but only if not marked as a fixture to
+    avoid calling it twice.
+    """
+    meth = getattr(obj, name, None)
+    if fixtures.getfixturemarker(meth) is None:
+        return meth
+
+
+class Class(PyCollector):
+    """ Collector for test methods. """
+    def collect(self):
+        if hasinit(self.obj):
+            self.warn("C1", "cannot collect test class %r because it has a "
+                "__init__ constructor" % self.obj.__name__)
+            return []
+        elif hasnew(self.obj):
+            self.warn("C1", "cannot collect test class %r because it has a "
+                            "__new__ constructor" % self.obj.__name__)
+            return []
+        return [self._getcustomclass("Instance")(name="()", parent=self)]
+
+    def setup(self):
+        setup_class = _get_xunit_func(self.obj, 'setup_class')
+        if setup_class is not None:
+            setup_class = getattr(setup_class, 'im_func', setup_class)
+            setup_class = getattr(setup_class, '__func__', setup_class)
+            setup_class(self.obj)
+
+        fin_class = getattr(self.obj, 'teardown_class', None)
+        if fin_class is not None:
+            fin_class = getattr(fin_class, 'im_func', fin_class)
+            fin_class = getattr(fin_class, '__func__', fin_class)
+            self.addfinalizer(lambda: fin_class(self.obj))
+
+class Instance(PyCollector):
+    def _getobj(self):
+        return self.parent.obj()
+
+    def collect(self):
+        self.session._fixturemanager.parsefactories(self)
+        return super(Instance, self).collect()
+
+    def newinstance(self):
+        self.obj = self._getobj()
+        return self.obj
+
+class FunctionMixin(PyobjMixin):
+    """ mixin for the code common to Function and Generator.
+    """
+
+    def setup(self):
+        """ perform setup for this test function. """
+        if hasattr(self, '_preservedparent'):
+            obj = self._preservedparent
+        elif isinstance(self.parent, Instance):
+            obj = self.parent.newinstance()
+            self.obj = self._getobj()
+        else:
+            obj = self.parent.obj
+        if inspect.ismethod(self.obj):
+            setup_name = 'setup_method'
+            teardown_name = 'teardown_method'
+        else:
+            setup_name = 'setup_function'
+            teardown_name = 'teardown_function'
+        setup_func_or_method = _get_xunit_setup_teardown(obj, setup_name, param_obj=self.obj)
+        if setup_func_or_method is not None:
+            setup_func_or_method()
+        teardown_func_or_method = _get_xunit_setup_teardown(obj, teardown_name, param_obj=self.obj)
+        if teardown_func_or_method is not None:
+            self.addfinalizer(teardown_func_or_method)
+
+    def _prunetraceback(self, excinfo):
+        if hasattr(self, '_obj') and not self.config.option.fulltrace:
+            code = _pytest._code.Code(get_real_func(self.obj))
+            path, firstlineno = code.path, code.firstlineno
+            traceback = excinfo.traceback
+            ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
+            if ntraceback == traceback:
+                ntraceback = ntraceback.cut(path=path)
+                if ntraceback == traceback:
+                    #ntraceback = ntraceback.cut(excludepath=cutdir2)
+                    ntraceback = ntraceback.filter(filter_traceback)
+                    if not ntraceback:
+                        ntraceback = traceback
+
+            excinfo.traceback = ntraceback.filter()
+            # issue364: mark all but first and last frames to
+            # only show a single-line message for each frame
+            if self.config.option.tbstyle == "auto":
+                if len(excinfo.traceback) > 2:
+                    for entry in excinfo.traceback[1:-1]:
+                        entry.set_repr_style('short')
+
+    def _repr_failure_py(self, excinfo, style="long"):
+        if excinfo.errisinstance(pytest.fail.Exception):
+            if not excinfo.value.pytrace:
+                return py._builtin._totext(excinfo.value)
+        return super(FunctionMixin, self)._repr_failure_py(excinfo,
+            style=style)
+
+    def repr_failure(self, excinfo, outerr=None):
+        assert outerr is None, "XXX outerr usage is deprecated"
+        style = self.config.option.tbstyle
+        if style == "auto":
+            style = "long"
+        return self._repr_failure_py(excinfo, style=style)
+
+
+class Generator(FunctionMixin, PyCollector):
+    def collect(self):
+        # test generators are seen as collectors but they also
+        # invoke setup/teardown on popular request
+        # (induced by the common "test_*" naming shared with normal tests)
+        from _pytest import deprecated
+        self.session._setupstate.prepare(self)
+        # see FunctionMixin.setup and test_setupstate_is_preserved_134
+        self._preservedparent = self.parent.obj
+        l = []
+        seen = {}
+        for i, x in enumerate(self.obj()):
+            name, call, args = self.getcallargs(x)
+            if not callable(call):
+                raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
+            if name is None:
+                name = "[%d]" % i
+            else:
+                name = "['%s']" % name
+            if name in seen:
+                raise ValueError("%r generated tests with non-unique name %r" %(self, name))
+            seen[name] = True
+            l.append(self.Function(name, self, args=args, callobj=call))
+            self.config.warn('C1', deprecated.YIELD_TESTS, fslocation=self.fspath)
+        return l
+
+    def getcallargs(self, obj):
+        if not isinstance(obj, (tuple, list)):
+            obj = (obj,)
+        # explict naming
+        if isinstance(obj[0], py.builtin._basestring):
+            name = obj[0]
+            obj = obj[1:]
+        else:
+            name = None
+        call, args = obj[0], obj[1:]
+        return name, call, args
+
+
+def hasinit(obj):
+    init = getattr(obj, '__init__', None)
+    if init:
+        return init != object.__init__
+
+
+def hasnew(obj):
+    new = getattr(obj, '__new__', None)
+    if new:
+        return new != object.__new__
+
+
+class CallSpec2(object):
+    def __init__(self, metafunc):
+        self.metafunc = metafunc
+        self.funcargs = {}
+        self._idlist = []
+        self.params = {}
+        self._globalid = NOTSET
+        self._globalid_args = set()
+        self._globalparam = NOTSET
+        self._arg2scopenum = {}  # used for sorting parametrized resources
+        self.keywords = {}
+        self.indices = {}
+
+    def copy(self, metafunc):
+        cs = CallSpec2(self.metafunc)
+        cs.funcargs.update(self.funcargs)
+        cs.params.update(self.params)
+        cs.keywords.update(self.keywords)
+        cs.indices.update(self.indices)
+        cs._arg2scopenum.update(self._arg2scopenum)
+        cs._idlist = list(self._idlist)
+        cs._globalid = self._globalid
+        cs._globalid_args = self._globalid_args
+        cs._globalparam = self._globalparam
+        return cs
+
+    def _checkargnotcontained(self, arg):
+        if arg in self.params or arg in self.funcargs:
+            raise ValueError("duplicate %r" %(arg,))
+
+    def getparam(self, name):
+        try:
+            return self.params[name]
+        except KeyError:
+            if self._globalparam is NOTSET:
+                raise ValueError(name)
+            return self._globalparam
+
+    @property
+    def id(self):
+        return "-".join(map(str, filter(None, self._idlist)))
+
+    def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
+                 param_index):
+        for arg,val in zip(argnames, valset):
+            self._checkargnotcontained(arg)
+            valtype_for_arg = valtypes[arg]
+            getattr(self, valtype_for_arg)[arg] = val
+            self.indices[arg] = param_index
+            self._arg2scopenum[arg] = scopenum
+        self._idlist.append(id)
+        self.keywords.update(keywords)
+
+    def setall(self, funcargs, id, param):
+        for x in funcargs:
+            self._checkargnotcontained(x)
+        self.funcargs.update(funcargs)
+        if id is not NOTSET:
+            self._idlist.append(id)
+        if param is not NOTSET:
+            assert self._globalparam is NOTSET
+            self._globalparam = param
+        for arg in funcargs:
+            self._arg2scopenum[arg] = fixtures.scopenum_function
+
+
+class Metafunc(fixtures.FuncargnamesCompatAttr):
+    """
+    Metafunc objects are passed to the ``pytest_generate_tests`` hook.
+    They help to inspect a test function and to generate tests according to
+    test configuration or values specified in the class or module where a
+    test function is defined.
+    """
+    def __init__(self, function, fixtureinfo, config, cls=None, module=None):
+        #: access to the :class:`_pytest.config.Config` object for the test session
+        self.config = config
+
+        #: the module object where the test function is defined in.
+        self.module = module
+
+        #: underlying python test function
+        self.function = function
+
+        #: set of fixture names required by the test function
+        self.fixturenames = fixtureinfo.names_closure
+
+        #: class object where the test function is defined in or ``None``.
+        self.cls = cls
+
+        self._calls = []
+        self._ids = py.builtin.set()
+        self._arg2fixturedefs = fixtureinfo.name2fixturedefs
+
+    def parametrize(self, argnames, argvalues, indirect=False, ids=None,
+        scope=None):
+        """ Add new invocations to the underlying test function using the list
+        of argvalues for the given argnames.  Parametrization is performed
+        during the collection phase.  If you need to setup expensive resources
+        see about setting indirect to do it rather at test setup time.
+
+        :arg argnames: a comma-separated string denoting one or more argument
+                       names, or a list/tuple of argument strings.
+
+        :arg argvalues: The list of argvalues determines how often a
+            test is invoked with different argument values.  If only one
+            argname was specified argvalues is a list of values.  If N
+            argnames were specified, argvalues must be a list of N-tuples,
+            where each tuple-element specifies a value for its respective
+            argname.
+
+        :arg indirect: The list of argnames or boolean. A list of arguments'
+            names (subset of argnames). If True the list contains all names from
+            the argnames. Each argvalue corresponding to an argname in this list will
+            be passed as request.param to its respective argname fixture
+            function so that it can perform more expensive setups during the
+            setup phase of a test rather than at collection time.
+
+        :arg ids: list of string ids, or a callable.
+            If strings, each is corresponding to the argvalues so that they are
+            part of the test id. If None is given as id of specific test, the
+            automatically generated id for that argument will be used.
+            If callable, it should take one argument (a single argvalue) and return
+            a string or return None. If None, the automatically generated id for that
+            argument will be used.
+            If no ids are provided they will be generated automatically from
+            the argvalues.
+
+        :arg scope: if specified it denotes the scope of the parameters.
+            The scope is used for grouping tests by parameter instances.
+            It will also override any fixture-function defined scope, allowing
+            to set a dynamic scope using test context or configuration.
+        """
+        from _pytest.fixtures import scope2index
+        from _pytest.mark import extract_argvalue
+        from py.io import saferepr
+
+        unwrapped_argvalues = []
+        newkeywords = []
+        for maybe_marked_args in argvalues:
+            argval, newmarks = extract_argvalue(maybe_marked_args)
+            unwrapped_argvalues.append(argval)
+            newkeywords.append(newmarks)
+        argvalues = unwrapped_argvalues
+
+        if not isinstance(argnames, (tuple, list)):
+            argnames = [x.strip() for x in argnames.split(",") if x.strip()]
+            if len(argnames) == 1:
+                argvalues = [(val,) for val in argvalues]
+        if not argvalues:
+            argvalues = [(NOTSET,) * len(argnames)]
+            # we passed a empty list to parameterize, skip that test
+            #
+            fs, lineno = getfslineno(self.function)
+            newmark = pytest.mark.skip(
+                reason="got empty parameter set %r, function %s at %s:%d" % (
+                    argnames, self.function.__name__, fs, lineno))
+            newkeywords = [{newmark.markname: newmark}]
+
+        if scope is None:
+            scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
+
+        scopenum = scope2index(
+            scope, descr='call to {0}'.format(self.parametrize))
+        valtypes = {}
+        for arg in argnames:
+            if arg not in self.fixturenames:
+                if isinstance(indirect, (tuple, list)):
+                    name = 'fixture' if arg in indirect else 'argument'
+                else:
+                    name = 'fixture' if indirect else 'argument'
+                raise ValueError(
+                    "%r uses no %s %r" % (
+                            self.function, name, arg))
+
+        if indirect is True:
+            valtypes = dict.fromkeys(argnames, "params")
+        elif indirect is False:
+            valtypes = dict.fromkeys(argnames, "funcargs")
+        elif isinstance(indirect, (tuple, list)):
+            valtypes = dict.fromkeys(argnames, "funcargs")
+            for arg in indirect:
+                if arg not in argnames:
+                    raise ValueError("indirect given to %r: fixture %r doesn't exist" % (
+                                     self.function, arg))
+                valtypes[arg] = "params"
+        idfn = None
+        if callable(ids):
+            idfn = ids
+            ids = None
+        if ids:
+            if len(ids) != len(argvalues):
+                raise ValueError('%d tests specified with %d ids' %(
+                                 len(argvalues), len(ids)))
+            for id_value in ids:
+                if id_value is not None and not isinstance(id_value, py.builtin._basestring):
+                    msg = 'ids must be list of strings, found: %s (type: %s)'
+                    raise ValueError(msg % (saferepr(id_value), type(id_value).__name__))
+        ids = idmaker(argnames, argvalues, idfn, ids, self.config)
+        newcalls = []
+        for callspec in self._calls or [CallSpec2(self)]:
+            elements = zip(ids, argvalues, newkeywords, count())
+            for a_id, valset, keywords, param_index in elements:
+                assert len(valset) == len(argnames)
+                newcallspec = callspec.copy(self)
+                newcallspec.setmulti(valtypes, argnames, valset, a_id,
+                                     keywords, scopenum, param_index)
+                newcalls.append(newcallspec)
+        self._calls = newcalls
+
+    def addcall(self, funcargs=None, id=NOTSET, param=NOTSET):
+        """ (deprecated, use parametrize) Add a new call to the underlying
+        test function during the collection phase of a test run.  Note that
+        request.addcall() is called during the test collection phase prior and
+        independently to actual test execution.  You should only use addcall()
+        if you need to specify multiple arguments of a test function.
+
+        :arg funcargs: argument keyword dictionary used when invoking
+            the test function.
+
+        :arg id: used for reporting and identification purposes.  If you
+            don't supply an `id` an automatic unique id will be generated.
+
+        :arg param: a parameter which will be exposed to a later fixture function
+            invocation through the ``request.param`` attribute.
+        """
+        assert funcargs is None or isinstance(funcargs, dict)
+        if funcargs is not None:
+            for name in funcargs:
+                if name not in self.fixturenames:
+                    pytest.fail("funcarg %r not used in this function." % name)
+        else:
+            funcargs = {}
+        if id is None:
+            raise ValueError("id=None not allowed")
+        if id is NOTSET:
+            id = len(self._calls)
+        id = str(id)
+        if id in self._ids:
+            raise ValueError("duplicate id %r" % id)
+        self._ids.add(id)
+
+        cs = CallSpec2(self)
+        cs.setall(funcargs, id, param)
+        self._calls.append(cs)
+
+
+def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
+    """Find the most appropriate scope for a parametrized call based on its arguments.
+
+    When there's at least one direct argument, always use "function" scope.
+
+    When a test function is parametrized and all its arguments are indirect
+    (e.g. fixtures), return the most narrow scope based on the fixtures used.
+
+    Related to issue #1832, based on code posted by @Kingdread.
+    """
+    from _pytest.fixtures import scopes
+    indirect_as_list = isinstance(indirect, (list, tuple))
+    all_arguments_are_fixtures = indirect is True or \
+                                 indirect_as_list and len(indirect) == argnames
+    if all_arguments_are_fixtures:
+        fixturedefs = arg2fixturedefs or {}
+        used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()]
+        if used_scopes:
+            # Takes the most narrow scope from used fixtures
+            for scope in reversed(scopes):
+                if scope in used_scopes:
+                    return scope
+
+    return 'function'
+
+
+def _idval(val, argname, idx, idfn, config=None):
+    if idfn:
+        try:
+            s = idfn(val)
+            if s:
+                return _escape_strings(s)
+        except Exception:
+            pass
+
+    if config:
+        hook_id = config.hook.pytest_make_parametrize_id(config=config, val=val)
+        if hook_id:
+            return hook_id
+
+    if isinstance(val, STRING_TYPES):
+        return _escape_strings(val)
+    elif isinstance(val, (float, int, bool, NoneType)):
+        return str(val)
+    elif isinstance(val, REGEX_TYPE):
+        return _escape_strings(val.pattern)
+    elif enum is not None and isinstance(val, enum.Enum):
+        return str(val)
+    elif isclass(val) and hasattr(val, '__name__'):
+        return val.__name__
+    return str(argname)+str(idx)
+
+def _idvalset(idx, valset, argnames, idfn, ids, config=None):
+    if ids is None or (idx >= len(ids) or ids[idx] is None):
+        this_id = [_idval(val, argname, idx, idfn, config)
+                   for val, argname in zip(valset, argnames)]
+        return "-".join(this_id)
+    else:
+        return _escape_strings(ids[idx])
+
+def idmaker(argnames, argvalues, idfn=None, ids=None, config=None):
+    ids = [_idvalset(valindex, valset, argnames, idfn, ids, config)
+           for valindex, valset in enumerate(argvalues)]
+    if len(set(ids)) != len(ids):
+        # The ids are not unique
+        duplicates = [testid for testid in ids if ids.count(testid) > 1]
+        counters = collections.defaultdict(lambda: 0)
+        for index, testid in enumerate(ids):
+            if testid in duplicates:
+                ids[index] = testid + str(counters[testid])
+                counters[testid] += 1
+    return ids
+
+
+def show_fixtures_per_test(config):
+    from _pytest.main import wrap_session
+    return wrap_session(config, _show_fixtures_per_test)
+
+
+def _show_fixtures_per_test(config, session):
+    import _pytest.config
+    session.perform_collect()
+    curdir = py.path.local()
+    tw = _pytest.config.create_terminal_writer(config)
+    verbose = config.getvalue("verbose")
+
+    def get_best_rel(func):
+        loc = getlocation(func, curdir)
+        return curdir.bestrelpath(loc)
+
+    def write_fixture(fixture_def):
+        argname = fixture_def.argname
+
+        if verbose <= 0 and argname.startswith("_"):
+            return
+        if verbose > 0:
+            bestrel = get_best_rel(fixture_def.func)
+            funcargspec = "{0} -- {1}".format(argname, bestrel)
+        else:
+            funcargspec = argname
+        tw.line(funcargspec, green=True)
+
+        INDENT = '    {0}'
+        fixture_doc = fixture_def.func.__doc__
+
+        if fixture_doc:
+            for line in fixture_doc.strip().split('\n'):
+                tw.line(INDENT.format(line.strip()))
+        else:
+            tw.line(INDENT.format('no docstring available'), red=True)
+
+    def write_item(item):
+        name2fixturedefs = item._fixtureinfo.name2fixturedefs
+
+        if not name2fixturedefs:
+            # The given test item does not use any fixtures
+            return
+        bestrel = get_best_rel(item.function)
+
+        tw.line()
+        tw.sep('-', 'fixtures used by {0}'.format(item.name))
+        tw.sep('-', '({0})'.format(bestrel))
+        for argname, fixture_defs in sorted(name2fixturedefs.items()):
+            assert fixture_defs is not None
+            if not fixture_defs:
+                continue
+            # The last fixture def item in the list is expected
+            # to be the one used by the test item
+            write_fixture(fixture_defs[-1])
+
+    for item in session.items:
+        write_item(item)
+
+
+def showfixtures(config):
+    from _pytest.main import wrap_session
+    return wrap_session(config, _showfixtures_main)
+
+def _showfixtures_main(config, session):
+    import _pytest.config
+    session.perform_collect()
+    curdir = py.path.local()
+    tw = _pytest.config.create_terminal_writer(config)
+    verbose = config.getvalue("verbose")
+
+    fm = session._fixturemanager
+
+    available = []
+    seen = set()
+
+    for argname, fixturedefs in fm._arg2fixturedefs.items():
+        assert fixturedefs is not None
+        if not fixturedefs:
+            continue
+        for fixturedef in fixturedefs:
+            loc = getlocation(fixturedef.func, curdir)
+            if (fixturedef.argname, loc) in seen:
+                continue
+            seen.add((fixturedef.argname, loc))
+            available.append((len(fixturedef.baseid),
+                              fixturedef.func.__module__,
+                              curdir.bestrelpath(loc),
+                              fixturedef.argname, fixturedef))
+
+    available.sort()
+    currentmodule = None
+    for baseid, module, bestrel, argname, fixturedef in available:
+        if currentmodule != module:
+            if not module.startswith("_pytest."):
+                tw.line()
+                tw.sep("-", "fixtures defined from %s" %(module,))
+                currentmodule = module
+        if verbose <= 0 and argname[0] == "_":
+            continue
+        if verbose > 0:
+            funcargspec = "%s -- %s" %(argname, bestrel,)
+        else:
+            funcargspec = argname
+        tw.line(funcargspec, green=True)
+        loc = getlocation(fixturedef.func, curdir)
+        doc = fixturedef.func.__doc__ or ""
+        if doc:
+            for line in doc.strip().split("\n"):
+                tw.line("    " + line.strip())
+        else:
+            tw.line("    %s: no docstring available" %(loc,),
+                red=True)
+
+
+# builtin pytest.raises helper
+
+def raises(expected_exception, *args, **kwargs):
+    """
+    Assert that a code block/function call raises ``expected_exception``
+    and raise a failure exception otherwise.
+
+    This helper produces a ``ExceptionInfo()`` object (see below).
+
+    If using Python 2.5 or above, you may use this function as a
+    context manager::
+
+        >>> with raises(ZeroDivisionError):
+        ...    1/0
+
+    .. versionchanged:: 2.10
+
+    In the context manager form you may use the keyword argument
+    ``message`` to specify a custom failure message::
+
+        >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
+        ...    pass
+        Traceback (most recent call last):
+          ...
+        Failed: Expecting ZeroDivisionError
+
+
+    .. note::
+
+       When using ``pytest.raises`` as a context manager, it's worthwhile to
+       note that normal context manager rules apply and that the exception
+       raised *must* be the final line in the scope of the context manager.
+       Lines of code after that, within the scope of the context manager will
+       not be executed. For example::
+
+           >>> value = 15
+           >>> with raises(ValueError) as exc_info:
+           ...     if value > 10:
+           ...         raise ValueError("value must be <= 10")
+           ...     assert str(exc_info.value) == "value must be <= 10"  # this will not execute
+
+       Instead, the following approach must be taken (note the difference in
+       scope)::
+
+           >>> with raises(ValueError) as exc_info:
+           ...     if value > 10:
+           ...         raise ValueError("value must be <= 10")
+           ...
+           >>> assert str(exc_info.value) == "value must be <= 10"
+
+
+    Or you can specify a callable by passing a to-be-called lambda::
+
+        >>> raises(ZeroDivisionError, lambda: 1/0)
+        <ExceptionInfo ...>
+
+    or you can specify an arbitrary callable with arguments::
+
+        >>> def f(x): return 1/x
+        ...
+        >>> raises(ZeroDivisionError, f, 0)
+        <ExceptionInfo ...>
+        >>> raises(ZeroDivisionError, f, x=0)
+        <ExceptionInfo ...>
+
+    A third possibility is to use a string to be executed::
+
+        >>> raises(ZeroDivisionError, "f(0)")
+        <ExceptionInfo ...>
+
+    .. autoclass:: _pytest._code.ExceptionInfo
+        :members:
+
+    .. note::
+        Similar to caught exception objects in Python, explicitly clearing
+        local references to returned ``ExceptionInfo`` objects can
+        help the Python interpreter speed up its garbage collection.
+
+        Clearing those references breaks a reference cycle
+        (``ExceptionInfo`` --> caught exception --> frame stack raising
+        the exception --> current frame stack --> local variables -->
+        ``ExceptionInfo``) which makes Python keep all objects referenced
+        from that cycle (including all local variables in the current
+        frame) alive until the next cyclic garbage collection run. See the
+        official Python ``try`` statement documentation for more detailed
+        information.
+
+    """
+    __tracebackhide__ = True
+    if expected_exception is AssertionError:
+        # we want to catch a AssertionError
+        # replace our subclass with the builtin one
+        # see https://github.com/pytest-dev/pytest/issues/176
+        from _pytest.assertion.util import BuiltinAssertionError \
+            as expected_exception
+    msg = ("exceptions must be old-style classes or"
+           " derived from BaseException, not %s")
+    if isinstance(expected_exception, tuple):
+        for exc in expected_exception:
+            if not isclass(exc):
+                raise TypeError(msg % type(exc))
+    elif not isclass(expected_exception):
+        raise TypeError(msg % type(expected_exception))
+
+    message = "DID NOT RAISE {0}".format(expected_exception)
+
+    if not args:
+        if "message" in kwargs:
+            message = kwargs.pop("message")
+        return RaisesContext(expected_exception, message)
+    elif isinstance(args[0], str):
+        code, = args
+        assert isinstance(code, str)
+        frame = sys._getframe(1)
+        loc = frame.f_locals.copy()
+        loc.update(kwargs)
+        #print "raises frame scope: %r" % frame.f_locals
+        try:
+            code = _pytest._code.Source(code).compile()
+            py.builtin.exec_(code, frame.f_globals, loc)
+            # XXX didn'T mean f_globals == f_locals something special?
+            #     this is destroyed here ...
+        except expected_exception:
+            return _pytest._code.ExceptionInfo()
+    else:
+        func = args[0]
+        try:
+            func(*args[1:], **kwargs)
+        except expected_exception:
+            return _pytest._code.ExceptionInfo()
+    pytest.fail(message)
+
+class RaisesContext(object):
+    def __init__(self, expected_exception, message):
+        self.expected_exception = expected_exception
+        self.message = message
+        self.excinfo = None
+
+    def __enter__(self):
+        self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
+        return self.excinfo
+
+    def __exit__(self, *tp):
+        __tracebackhide__ = True
+        if tp[0] is None:
+            pytest.fail(self.message)
+        if sys.version_info < (2, 7):
+            # py26: on __exit__() exc_value often does not contain the
+            # exception value.
+            # http://bugs.python.org/issue7853
+            if not isinstance(tp[1], BaseException):
+                exc_type, value, traceback = tp
+                tp = exc_type, exc_type(value), traceback
+        self.excinfo.__init__(tp)
+        suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
+        if sys.version_info[0] == 2 and suppress_exception:
+            sys.exc_clear()
+        return suppress_exception
+
+
+# builtin pytest.approx helper
+
+class approx(object):
+    """
+    Assert that two numbers (or two sets of numbers) are equal to each other
+    within some tolerance.
+
+    Due to the `intricacies of floating-point arithmetic`__, numbers that we
+    would intuitively expect to be equal are not always so::
+
+        >>> 0.1 + 0.2 == 0.3
+        False
+
+    __ https://docs.python.org/3/tutorial/floatingpoint.html
+
+    This problem is commonly encountered when writing tests, e.g. when making
+    sure that floating-point values are what you expect them to be.  One way to
+    deal with this problem is to assert that two floating-point numbers are
+    equal to within some appropriate tolerance::
+
+        >>> abs((0.1 + 0.2) - 0.3) < 1e-6
+        True
+
+    However, comparisons like this are tedious to write and difficult to
+    understand.  Furthermore, absolute comparisons like the one above are
+    usually discouraged because there's no tolerance that works well for all
+    situations.  ``1e-6`` is good for numbers around ``1``, but too small for
+    very big numbers and too big for very small ones.  It's better to express
+    the tolerance as a fraction of the expected value, but relative comparisons
+    like that are even more difficult to write correctly and concisely.
+
+    The ``approx`` class performs floating-point comparisons using a syntax
+    that's as intuitive as possible::
+
+        >>> from pytest import approx
+        >>> 0.1 + 0.2 == approx(0.3)
+        True
+
+    The same syntax also works on sequences of numbers::
+
+        >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
+        True
+
+    By default, ``approx`` considers numbers within a relative tolerance of
+    ``1e-6`` (i.e. one part in a million) of its expected value to be equal.
+    This treatment would lead to surprising results if the expected value was
+    ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
+    To handle this case less surprisingly, ``approx`` also considers numbers
+    within an absolute tolerance of ``1e-12`` of its expected value to be
+    equal.  Infinite numbers are another special case.  They are only
+    considered equal to themselves, regardless of the relative tolerance.  Both
+    the relative and absolute tolerances can be changed by passing arguments to
+    the ``approx`` constructor::
+
+        >>> 1.0001 == approx(1)
+        False
+        >>> 1.0001 == approx(1, rel=1e-3)
+        True
+        >>> 1.0001 == approx(1, abs=1e-3)
+        True
+
+    If you specify ``abs`` but not ``rel``, the comparison will not consider
+    the relative tolerance at all.  In other words, two numbers that are within
+    the default relative tolerance of ``1e-6`` will still be considered unequal
+    if they exceed the specified absolute tolerance.  If you specify both
+    ``abs`` and ``rel``, the numbers will be considered equal if either
+    tolerance is met::
+
+        >>> 1 + 1e-8 == approx(1)
+        True
+        >>> 1 + 1e-8 == approx(1, abs=1e-12)
+        False
+        >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
+        True
+
+    If you're thinking about using ``approx``, then you might want to know how
+    it compares to other good ways of comparing floating-point numbers.  All of
+    these algorithms are based on relative and absolute tolerances and should
+    agree for the most part, but they do have meaningful differences:
+
+    - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``:  True if the relative
+      tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
+      tolerance is met.  Because the relative tolerance is calculated w.r.t.
+      both ``a`` and ``b``, this test is symmetric (i.e.  neither ``a`` nor
+      ``b`` is a "reference value").  You have to specify an absolute tolerance
+      if you want to compare to ``0.0`` because there is no tolerance by
+      default.  Only available in python>=3.5.  `More information...`__
+
+      __ https://docs.python.org/3/library/math.html#math.isclose
+
+    - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
+      between ``a`` and ``b`` is less that the sum of the relative tolerance
+      w.r.t. ``b`` and the absolute tolerance.  Because the relative tolerance
+      is only calculated w.r.t. ``b``, this test is asymmetric and you can
+      think of ``b`` as the reference value.  Support for comparing sequences
+      is provided by ``numpy.allclose``.  `More information...`__
+
+      __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
+
+    - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
+      are within an absolute tolerance of ``1e-7``.  No relative tolerance is
+      considered and the absolute tolerance cannot be changed, so this function
+      is not appropriate for very large or very small numbers.  Also, it's only
+      available in subclasses of ``unittest.TestCase`` and it's ugly because it
+      doesn't follow PEP8.  `More information...`__
+
+      __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
+
+    - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
+      tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
+      Because the relative tolerance is only calculated w.r.t. ``b``, this test
+      is asymmetric and you can think of ``b`` as the reference value.  In the
+      special case that you explicitly specify an absolute tolerance but not a
+      relative tolerance, only the absolute tolerance is considered.
+    """
+
+    def __init__(self, expected, rel=None, abs=None):
+        self.expected = expected
+        self.abs = abs
+        self.rel = rel
+
+    def __repr__(self):
+        return ', '.join(repr(x) for x in self.expected)
+
+    def __eq__(self, actual):
+        from collections import Iterable
+        if not isinstance(actual, Iterable):
+            actual = [actual]
+        if len(actual) != len(self.expected):
+            return False
+        return all(a == x for a, x in zip(actual, self.expected))
+
+    __hash__ = None
+
+    def __ne__(self, actual):
+        return not (actual == self)
+
+    @property
+    def expected(self):
+        # Regardless of whether the user-specified expected value is a number
+        # or a sequence of numbers, return a list of ApproxNotIterable objects
+        # that can be compared against.
+        from collections import Iterable
+        approx_non_iter = lambda x: ApproxNonIterable(x, self.rel, self.abs)
+        if isinstance(self._expected, Iterable):
+            return [approx_non_iter(x) for x in self._expected]
+        else:
+            return [approx_non_iter(self._expected)]
+
+    @expected.setter
+    def expected(self, expected):
+        self._expected = expected
+
+
+class ApproxNonIterable(object):
+    """
+    Perform approximate comparisons for single numbers only.
+
+    In other words, the ``expected`` attribute for objects of this class must
+    be some sort of number.  This is in contrast to the ``approx`` class, where
+    the ``expected`` attribute can either be a number of a sequence of numbers.
+    This class is responsible for making comparisons, while ``approx`` is
+    responsible for abstracting the difference between numbers and sequences of
+    numbers.  Although this class can stand on its own, it's only meant to be
+    used within ``approx``.
+    """
+
+    def __init__(self, expected, rel=None, abs=None):
+        self.expected = expected
+        self.abs = abs
+        self.rel = rel
+
+    def __repr__(self):
+        if isinstance(self.expected, complex):
+            return str(self.expected)
+
+        # Infinities aren't compared using tolerances, so don't show a
+        # tolerance.
+        if math.isinf(self.expected):
+            return str(self.expected)
+
+        # If a sensible tolerance can't be calculated, self.tolerance will
+        # raise a ValueError.  In this case, display '???'.
+        try:
+            vetted_tolerance = '{:.1e}'.format(self.tolerance)
+        except ValueError:
+            vetted_tolerance = '???'
+
+        if sys.version_info[0] == 2:
+            return '{0} +- {1}'.format(self.expected, vetted_tolerance)
+        else:
+            return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
+
+    def __eq__(self, actual):
+        # Short-circuit exact equality.
+        if actual == self.expected:
+            return True
+
+        # Infinity shouldn't be approximately equal to anything but itself, but
+        # if there's a relative tolerance, it will be infinite and infinity
+        # will seem approximately equal to everything.  The equal-to-itself
+        # case would have been short circuited above, so here we can just
+        # return false if the expected value is infinite.  The abs() call is
+        # for compatibility with complex numbers.
+        if math.isinf(abs(self.expected)):
+            return False
+
+        # Return true if the two numbers are within the tolerance.
+        return abs(self.expected - actual) <= self.tolerance
+
+    __hash__ = None
+
+    def __ne__(self, actual):
+        return not (actual == self)
+
+    @property
+    def tolerance(self):
+        set_default = lambda x, default: x if x is not None else default
+
+        # Figure out what the absolute tolerance should be.  ``self.abs`` is
+        # either None or a value specified by the user.
+        absolute_tolerance = set_default(self.abs, 1e-12)
+
+        if absolute_tolerance < 0:
+            raise ValueError("absolute tolerance can't be negative: {0}".format(absolute_tolerance))
+        if math.isnan(absolute_tolerance):
+            raise ValueError("absolute tolerance can't be NaN.")
+
+        # If the user specified an absolute tolerance but not a relative one,
+        # just return the absolute tolerance.
+        if self.rel is None:
+            if self.abs is not None:
+                return absolute_tolerance
+
+        # Figure out what the relative tolerance should be.  ``self.rel`` is
+        # either None or a value specified by the user.  This is done after
+        # we've made sure the user didn't ask for an absolute tolerance only,
+        # because we don't want to raise errors about the relative tolerance if
+        # we aren't even going to use it.
+        relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected)
+
+        if relative_tolerance < 0:
+            raise ValueError("relative tolerance can't be negative: {0}".format(absolute_tolerance))
+        if math.isnan(relative_tolerance):
+            raise ValueError("relative tolerance can't be NaN.")
+
+        # Return the larger of the relative and absolute tolerances.
+        return max(relative_tolerance, absolute_tolerance)
+
+
+#
+#  the basic pytest Function item
+#
+
+class Function(FunctionMixin, pytest.Item, fixtures.FuncargnamesCompatAttr):
+    """ a Function Item is responsible for setting up and executing a
+    Python test function.
+    """
+    _genid = None
+    def __init__(self, name, parent, args=None, config=None,
+                 callspec=None, callobj=NOTSET, keywords=None, session=None,
+                 fixtureinfo=None, originalname=None):
+        super(Function, self).__init__(name, parent, config=config,
+                                       session=session)
+        self._args = args
+        if callobj is not NOTSET:
+            self.obj = callobj
+
+        self.keywords.update(self.obj.__dict__)
+        if callspec:
+            self.callspec = callspec
+            self.keywords.update(callspec.keywords)
+        if keywords:
+            self.keywords.update(keywords)
+
+        if fixtureinfo is None:
+            fixtureinfo = self.session._fixturemanager.getfixtureinfo(
+                self.parent, self.obj, self.cls,
+                funcargs=not self._isyieldedfunction())
+        self._fixtureinfo = fixtureinfo
+        self.fixturenames = fixtureinfo.names_closure
+        self._initrequest()
+
+        #: original function name, without any decorations (for example
+        #: parametrization adds a ``"[...]"`` suffix to function names).
+        #:
+        #: .. versionadded:: 3.0
+        self.originalname = originalname
+
+    def _initrequest(self):
+        self.funcargs = {}
+        if self._isyieldedfunction():
+            assert not hasattr(self, "callspec"), (
+                "yielded functions (deprecated) cannot have funcargs")
+        else:
+            if hasattr(self, "callspec"):
+                callspec = self.callspec
+                assert not callspec.funcargs
+                self._genid = callspec.id
+                if hasattr(callspec, "param"):
+                    self.param = callspec.param
+        self._request = fixtures.FixtureRequest(self)
+
+    @property
+    def function(self):
+        "underlying python 'function' object"
+        return getattr(self.obj, 'im_func', self.obj)
+
+    def _getobj(self):
+        name = self.name
+        i = name.find("[") # parametrization
+        if i != -1:
+            name = name[:i]
+        return getattr(self.parent.obj, name)
+
+    @property
+    def _pyfuncitem(self):
+        "(compatonly) for code expecting pytest-2.2 style request objects"
+        return self
+
+    def _isyieldedfunction(self):
+        return getattr(self, "_args", None) is not None
+
+    def runtest(self):
+        """ execute the underlying test function. """
+        self.ihook.pytest_pyfunc_call(pyfuncitem=self)
+
+    def setup(self):
+        super(Function, self).setup()
+        fixtures.fillfixtures(self)
diff --git a/lib/spack/external/_pytest/recwarn.py b/lib/spack/external/_pytest/recwarn.py
new file mode 100644
index 0000000000..87823bfbc6
--- /dev/null
+++ b/lib/spack/external/_pytest/recwarn.py
@@ -0,0 +1,226 @@
+""" recording warnings during test function execution. """
+
+import inspect
+
+import _pytest._code
+import py
+import sys
+import warnings
+import pytest
+
+
+@pytest.yield_fixture
+def recwarn(request):
+    """Return a WarningsRecorder instance that provides these methods:
+
+    * ``pop(category=None)``: return last warning matching the category.
+    * ``clear()``: clear list of warnings
+
+    See http://docs.python.org/library/warnings.html for information
+    on warning categories.
+    """
+    wrec = WarningsRecorder()
+    with wrec:
+        warnings.simplefilter('default')
+        yield wrec
+
+
+def pytest_namespace():
+    return {'deprecated_call': deprecated_call,
+            'warns': warns}
+
+
+def deprecated_call(func=None, *args, **kwargs):
+    """ assert that calling ``func(*args, **kwargs)`` triggers a
+    ``DeprecationWarning`` or ``PendingDeprecationWarning``.
+
+    This function can be used as a context manager::
+
+        >>> import warnings
+        >>> def api_call_v2():
+        ...     warnings.warn('use v3 of this api', DeprecationWarning)
+        ...     return 200
+
+        >>> with deprecated_call():
+        ...    assert api_call_v2() == 200
+
+    Note: we cannot use WarningsRecorder here because it is still subject
+    to the mechanism that prevents warnings of the same type from being
+    triggered twice for the same module. See #1190.
+    """
+    if not func:
+        return WarningsChecker(expected_warning=DeprecationWarning)
+
+    categories = []
+
+    def warn_explicit(message, category, *args, **kwargs):
+        categories.append(category)
+        old_warn_explicit(message, category, *args, **kwargs)
+
+    def warn(message, category=None, *args, **kwargs):
+        if isinstance(message, Warning):
+            categories.append(message.__class__)
+        else:
+            categories.append(category)
+        old_warn(message, category, *args, **kwargs)
+
+    old_warn = warnings.warn
+    old_warn_explicit = warnings.warn_explicit
+    warnings.warn_explicit = warn_explicit
+    warnings.warn = warn
+    try:
+        ret = func(*args, **kwargs)
+    finally:
+        warnings.warn_explicit = old_warn_explicit
+        warnings.warn = old_warn
+    deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
+    if not any(issubclass(c, deprecation_categories) for c in categories):
+        __tracebackhide__ = True
+        raise AssertionError("%r did not produce DeprecationWarning" % (func,))
+    return ret
+
+
+def warns(expected_warning, *args, **kwargs):
+    """Assert that code raises a particular class of warning.
+
+    Specifically, the input @expected_warning can be a warning class or
+    tuple of warning classes, and the code must return that warning
+    (if a single class) or one of those warnings (if a tuple).
+
+    This helper produces a list of ``warnings.WarningMessage`` objects,
+    one for each warning raised.
+
+    This function can be used as a context manager, or any of the other ways
+    ``pytest.raises`` can be used::
+
+        >>> with warns(RuntimeWarning):
+        ...    warnings.warn("my warning", RuntimeWarning)
+    """
+    wcheck = WarningsChecker(expected_warning)
+    if not args:
+        return wcheck
+    elif isinstance(args[0], str):
+        code, = args
+        assert isinstance(code, str)
+        frame = sys._getframe(1)
+        loc = frame.f_locals.copy()
+        loc.update(kwargs)
+
+        with wcheck:
+            code = _pytest._code.Source(code).compile()
+            py.builtin.exec_(code, frame.f_globals, loc)
+    else:
+        func = args[0]
+        with wcheck:
+            return func(*args[1:], **kwargs)
+
+
+class RecordedWarning(object):
+    def __init__(self, message, category, filename, lineno, file, line):
+        self.message = message
+        self.category = category
+        self.filename = filename
+        self.lineno = lineno
+        self.file = file
+        self.line = line
+
+
+class WarningsRecorder(object):
+    """A context manager to record raised warnings.
+
+    Adapted from `warnings.catch_warnings`.
+    """
+
+    def __init__(self, module=None):
+        self._module = sys.modules['warnings'] if module is None else module
+        self._entered = False
+        self._list = []
+
+    @property
+    def list(self):
+        """The list of recorded warnings."""
+        return self._list
+
+    def __getitem__(self, i):
+        """Get a recorded warning by index."""
+        return self._list[i]
+
+    def __iter__(self):
+        """Iterate through the recorded warnings."""
+        return iter(self._list)
+
+    def __len__(self):
+        """The number of recorded warnings."""
+        return len(self._list)
+
+    def pop(self, cls=Warning):
+        """Pop the first recorded warning, raise exception if not exists."""
+        for i, w in enumerate(self._list):
+            if issubclass(w.category, cls):
+                return self._list.pop(i)
+        __tracebackhide__ = True
+        raise AssertionError("%r not found in warning list" % cls)
+
+    def clear(self):
+        """Clear the list of recorded warnings."""
+        self._list[:] = []
+
+    def __enter__(self):
+        if self._entered:
+            __tracebackhide__ = True
+            raise RuntimeError("Cannot enter %r twice" % self)
+        self._entered = True
+        self._filters = self._module.filters
+        self._module.filters = self._filters[:]
+        self._showwarning = self._module.showwarning
+
+        def showwarning(message, category, filename, lineno,
+                        file=None, line=None):
+            self._list.append(RecordedWarning(
+                message, category, filename, lineno, file, line))
+
+            # still perform old showwarning functionality
+            self._showwarning(
+                message, category, filename, lineno, file=file, line=line)
+
+        self._module.showwarning = showwarning
+
+        # allow the same warning to be raised more than once
+
+        self._module.simplefilter('always')
+        return self
+
+    def __exit__(self, *exc_info):
+        if not self._entered:
+            __tracebackhide__ = True
+            raise RuntimeError("Cannot exit %r without entering first" % self)
+        self._module.filters = self._filters
+        self._module.showwarning = self._showwarning
+
+
+class WarningsChecker(WarningsRecorder):
+    def __init__(self, expected_warning=None, module=None):
+        super(WarningsChecker, self).__init__(module=module)
+
+        msg = ("exceptions must be old-style classes or "
+               "derived from Warning, not %s")
+        if isinstance(expected_warning, tuple):
+            for exc in expected_warning:
+                if not inspect.isclass(exc):
+                    raise TypeError(msg % type(exc))
+        elif inspect.isclass(expected_warning):
+            expected_warning = (expected_warning,)
+        elif expected_warning is not None:
+            raise TypeError(msg % type(expected_warning))
+
+        self.expected_warning = expected_warning
+
+    def __exit__(self, *exc_info):
+        super(WarningsChecker, self).__exit__(*exc_info)
+
+        # only check if we're not currently handling an exception
+        if all(a is None for a in exc_info):
+            if self.expected_warning is not None:
+                if not any(r.category in self.expected_warning for r in self):
+                    __tracebackhide__ = True
+                    pytest.fail("DID NOT WARN")
diff --git a/lib/spack/external/_pytest/resultlog.py b/lib/spack/external/_pytest/resultlog.py
new file mode 100644
index 0000000000..fc00259834
--- /dev/null
+++ b/lib/spack/external/_pytest/resultlog.py
@@ -0,0 +1,107 @@
+""" log machine-parseable test session result information in a plain
+text file.
+"""
+
+import py
+import os
+
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting", "resultlog plugin options")
+    group.addoption('--resultlog', '--result-log', action="store",
+        metavar="path", default=None,
+        help="DEPRECATED path for machine-readable result log.")
+
+def pytest_configure(config):
+    resultlog = config.option.resultlog
+    # prevent opening resultlog on slave nodes (xdist)
+    if resultlog and not hasattr(config, 'slaveinput'):
+        dirname = os.path.dirname(os.path.abspath(resultlog))
+        if not os.path.isdir(dirname):
+            os.makedirs(dirname)
+        logfile = open(resultlog, 'w', 1) # line buffered
+        config._resultlog = ResultLog(config, logfile)
+        config.pluginmanager.register(config._resultlog)
+
+        from _pytest.deprecated import RESULT_LOG
+        config.warn('C1', RESULT_LOG)
+
+def pytest_unconfigure(config):
+    resultlog = getattr(config, '_resultlog', None)
+    if resultlog:
+        resultlog.logfile.close()
+        del config._resultlog
+        config.pluginmanager.unregister(resultlog)
+
+def generic_path(item):
+    chain = item.listchain()
+    gpath = [chain[0].name]
+    fspath = chain[0].fspath
+    fspart = False
+    for node in chain[1:]:
+        newfspath = node.fspath
+        if newfspath == fspath:
+            if fspart:
+                gpath.append(':')
+                fspart = False
+            else:
+                gpath.append('.')
+        else:
+            gpath.append('/')
+            fspart = True
+        name = node.name
+        if name[0] in '([':
+            gpath.pop()
+        gpath.append(name)
+        fspath = newfspath
+    return ''.join(gpath)
+
+class ResultLog(object):
+    def __init__(self, config, logfile):
+        self.config = config
+        self.logfile = logfile # preferably line buffered
+
+    def write_log_entry(self, testpath, lettercode, longrepr):
+        py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
+        for line in longrepr.splitlines():
+            py.builtin.print_(" %s" % line, file=self.logfile)
+
+    def log_outcome(self, report, lettercode, longrepr):
+        testpath = getattr(report, 'nodeid', None)
+        if testpath is None:
+            testpath = report.fspath
+        self.write_log_entry(testpath, lettercode, longrepr)
+
+    def pytest_runtest_logreport(self, report):
+        if report.when != "call" and report.passed:
+            return
+        res = self.config.hook.pytest_report_teststatus(report=report)
+        code = res[1]
+        if code == 'x':
+            longrepr = str(report.longrepr)
+        elif code == 'X':
+            longrepr = ''
+        elif report.passed:
+            longrepr = ""
+        elif report.failed:
+            longrepr = str(report.longrepr)
+        elif report.skipped:
+            longrepr = str(report.longrepr[2])
+        self.log_outcome(report, code, longrepr)
+
+    def pytest_collectreport(self, report):
+        if not report.passed:
+            if report.failed:
+                code = "F"
+                longrepr = str(report.longrepr)
+            else:
+                assert report.skipped
+                code = "S"
+                longrepr = "%s:%d: %s" % report.longrepr
+            self.log_outcome(report, code, longrepr)
+
+    def pytest_internalerror(self, excrepr):
+        reprcrash = getattr(excrepr, 'reprcrash', None)
+        path = getattr(reprcrash, "path", None)
+        if path is None:
+            path = "cwd:%s" % py.path.local()
+        self.write_log_entry(path, '!', str(excrepr))
diff --git a/lib/spack/external/_pytest/runner.py b/lib/spack/external/_pytest/runner.py
new file mode 100644
index 0000000000..eb29e7370c
--- /dev/null
+++ b/lib/spack/external/_pytest/runner.py
@@ -0,0 +1,578 @@
+""" basic collect and runtest protocol implementations """
+import bdb
+import sys
+from time import time
+
+import py
+import pytest
+from _pytest._code.code import TerminalRepr, ExceptionInfo
+
+
+def pytest_namespace():
+    return {
+        'fail'         : fail,
+        'skip'         : skip,
+        'importorskip' : importorskip,
+        'exit'         : exit,
+    }
+
+#
+# pytest plugin hooks
+
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting", "reporting", after="general")
+    group.addoption('--durations',
+         action="store", type=int, default=None, metavar="N",
+         help="show N slowest setup/test durations (N=0 for all)."),
+
+def pytest_terminal_summary(terminalreporter):
+    durations = terminalreporter.config.option.durations
+    if durations is None:
+        return
+    tr = terminalreporter
+    dlist = []
+    for replist in tr.stats.values():
+        for rep in replist:
+            if hasattr(rep, 'duration'):
+                dlist.append(rep)
+    if not dlist:
+        return
+    dlist.sort(key=lambda x: x.duration)
+    dlist.reverse()
+    if not durations:
+        tr.write_sep("=", "slowest test durations")
+    else:
+        tr.write_sep("=", "slowest %s test durations" % durations)
+        dlist = dlist[:durations]
+
+    for rep in dlist:
+        nodeid = rep.nodeid.replace("::()::", "::")
+        tr.write_line("%02.2fs %-8s %s" %
+            (rep.duration, rep.when, nodeid))
+
+def pytest_sessionstart(session):
+    session._setupstate = SetupState()
+def pytest_sessionfinish(session):
+    session._setupstate.teardown_all()
+
+class NodeInfo:
+    def __init__(self, location):
+        self.location = location
+
+def pytest_runtest_protocol(item, nextitem):
+    item.ihook.pytest_runtest_logstart(
+        nodeid=item.nodeid, location=item.location,
+    )
+    runtestprotocol(item, nextitem=nextitem)
+    return True
+
+def runtestprotocol(item, log=True, nextitem=None):
+    hasrequest = hasattr(item, "_request")
+    if hasrequest and not item._request:
+        item._initrequest()
+    rep = call_and_report(item, "setup", log)
+    reports = [rep]
+    if rep.passed:
+        if item.config.option.setupshow:
+            show_test_item(item)
+        if not item.config.option.setuponly:
+            reports.append(call_and_report(item, "call", log))
+    reports.append(call_and_report(item, "teardown", log,
+        nextitem=nextitem))
+    # after all teardown hooks have been called
+    # want funcargs and request info to go away
+    if hasrequest:
+        item._request = False
+        item.funcargs = None
+    return reports
+
+def show_test_item(item):
+    """Show test function, parameters and the fixtures of the test item."""
+    tw = item.config.get_terminal_writer()
+    tw.line()
+    tw.write(' ' * 8)
+    tw.write(item._nodeid)
+    used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
+    if used_fixtures:
+        tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
+
+def pytest_runtest_setup(item):
+    item.session._setupstate.prepare(item)
+
+def pytest_runtest_call(item):
+    try:
+        item.runtest()
+    except Exception:
+        # Store trace info to allow postmortem debugging
+        type, value, tb = sys.exc_info()
+        tb = tb.tb_next  # Skip *this* frame
+        sys.last_type = type
+        sys.last_value = value
+        sys.last_traceback = tb
+        del tb  # Get rid of it in this namespace
+        raise
+
+def pytest_runtest_teardown(item, nextitem):
+    item.session._setupstate.teardown_exact(item, nextitem)
+
+def pytest_report_teststatus(report):
+    if report.when in ("setup", "teardown"):
+        if report.failed:
+            #      category, shortletter, verbose-word
+            return "error", "E", "ERROR"
+        elif report.skipped:
+            return "skipped", "s", "SKIPPED"
+        else:
+            return "", "", ""
+
+
+#
+# Implementation
+
+def call_and_report(item, when, log=True, **kwds):
+    call = call_runtest_hook(item, when, **kwds)
+    hook = item.ihook
+    report = hook.pytest_runtest_makereport(item=item, call=call)
+    if log:
+        hook.pytest_runtest_logreport(report=report)
+    if check_interactive_exception(call, report):
+        hook.pytest_exception_interact(node=item, call=call, report=report)
+    return report
+
+def check_interactive_exception(call, report):
+    return call.excinfo and not (
+                hasattr(report, "wasxfail") or
+                call.excinfo.errisinstance(skip.Exception) or
+                call.excinfo.errisinstance(bdb.BdbQuit))
+
+def call_runtest_hook(item, when, **kwds):
+    hookname = "pytest_runtest_" + when
+    ihook = getattr(item.ihook, hookname)
+    return CallInfo(lambda: ihook(item=item, **kwds), when=when)
+
+class CallInfo:
+    """ Result/Exception info a function invocation. """
+    #: None or ExceptionInfo object.
+    excinfo = None
+    def __init__(self, func, when):
+        #: context of invocation: one of "setup", "call",
+        #: "teardown", "memocollect"
+        self.when = when
+        self.start = time()
+        try:
+            self.result = func()
+        except KeyboardInterrupt:
+            self.stop = time()
+            raise
+        except:
+            self.excinfo = ExceptionInfo()
+        self.stop = time()
+
+    def __repr__(self):
+        if self.excinfo:
+            status = "exception: %s" % str(self.excinfo.value)
+        else:
+            status = "result: %r" % (self.result,)
+        return "<CallInfo when=%r %s>" % (self.when, status)
+
+def getslaveinfoline(node):
+    try:
+        return node._slaveinfocache
+    except AttributeError:
+        d = node.slaveinfo
+        ver = "%s.%s.%s" % d['version_info'][:3]
+        node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
+            d['id'], d['sysplatform'], ver, d['executable'])
+        return s
+
+class BaseReport(object):
+
+    def __init__(self, **kw):
+        self.__dict__.update(kw)
+
+    def toterminal(self, out):
+        if hasattr(self, 'node'):
+            out.line(getslaveinfoline(self.node))
+
+        longrepr = self.longrepr
+        if longrepr is None:
+            return
+
+        if hasattr(longrepr, 'toterminal'):
+            longrepr.toterminal(out)
+        else:
+            try:
+                out.line(longrepr)
+            except UnicodeEncodeError:
+                out.line("<unprintable longrepr>")
+
+    def get_sections(self, prefix):
+        for name, content in self.sections:
+            if name.startswith(prefix):
+                yield prefix, content
+
+    @property
+    def longreprtext(self):
+        """
+        Read-only property that returns the full string representation
+        of ``longrepr``.
+
+        .. versionadded:: 3.0
+        """
+        tw = py.io.TerminalWriter(stringio=True)
+        tw.hasmarkup = False
+        self.toterminal(tw)
+        exc = tw.stringio.getvalue()
+        return exc.strip()
+
+    @property
+    def capstdout(self):
+        """Return captured text from stdout, if capturing is enabled
+
+        .. versionadded:: 3.0
+        """
+        return ''.join(content for (prefix, content) in self.get_sections('Captured stdout'))
+
+    @property
+    def capstderr(self):
+        """Return captured text from stderr, if capturing is enabled
+
+        .. versionadded:: 3.0
+        """
+        return ''.join(content for (prefix, content) in self.get_sections('Captured stderr'))
+
+    passed = property(lambda x: x.outcome == "passed")
+    failed = property(lambda x: x.outcome == "failed")
+    skipped = property(lambda x: x.outcome == "skipped")
+
+    @property
+    def fspath(self):
+        return self.nodeid.split("::")[0]
+
+def pytest_runtest_makereport(item, call):
+    when = call.when
+    duration = call.stop-call.start
+    keywords = dict([(x,1) for x in item.keywords])
+    excinfo = call.excinfo
+    sections = []
+    if not call.excinfo:
+        outcome = "passed"
+        longrepr = None
+    else:
+        if not isinstance(excinfo, ExceptionInfo):
+            outcome = "failed"
+            longrepr = excinfo
+        elif excinfo.errisinstance(pytest.skip.Exception):
+            outcome = "skipped"
+            r = excinfo._getreprcrash()
+            longrepr = (str(r.path), r.lineno, r.message)
+        else:
+            outcome = "failed"
+            if call.when == "call":
+                longrepr = item.repr_failure(excinfo)
+            else: # exception in setup or teardown
+                longrepr = item._repr_failure_py(excinfo,
+                                            style=item.config.option.tbstyle)
+    for rwhen, key, content in item._report_sections:
+        sections.append(("Captured %s %s" %(key, rwhen), content))
+    return TestReport(item.nodeid, item.location,
+                      keywords, outcome, longrepr, when,
+                      sections, duration)
+
+class TestReport(BaseReport):
+    """ Basic test report object (also used for setup and teardown calls if
+    they fail).
+    """
+    def __init__(self, nodeid, location, keywords, outcome,
+                 longrepr, when, sections=(), duration=0, **extra):
+        #: normalized collection node id
+        self.nodeid = nodeid
+
+        #: a (filesystempath, lineno, domaininfo) tuple indicating the
+        #: actual location of a test item - it might be different from the
+        #: collected one e.g. if a method is inherited from a different module.
+        self.location = location
+
+        #: a name -> value dictionary containing all keywords and
+        #: markers associated with a test invocation.
+        self.keywords = keywords
+
+        #: test outcome, always one of "passed", "failed", "skipped".
+        self.outcome = outcome
+
+        #: None or a failure representation.
+        self.longrepr = longrepr
+
+        #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
+        self.when = when
+
+        #: list of pairs ``(str, str)`` of extra information which needs to
+        #: marshallable. Used by pytest to add captured text
+        #: from ``stdout`` and ``stderr``, but may be used by other plugins
+        #: to add arbitrary information to reports.
+        self.sections = list(sections)
+
+        #: time it took to run just the test
+        self.duration = duration
+
+        self.__dict__.update(extra)
+
+    def __repr__(self):
+        return "<TestReport %r when=%r outcome=%r>" % (
+            self.nodeid, self.when, self.outcome)
+
+class TeardownErrorReport(BaseReport):
+    outcome = "failed"
+    when = "teardown"
+    def __init__(self, longrepr, **extra):
+        self.longrepr = longrepr
+        self.sections = []
+        self.__dict__.update(extra)
+
+def pytest_make_collect_report(collector):
+    call = CallInfo(collector._memocollect, "memocollect")
+    longrepr = None
+    if not call.excinfo:
+        outcome = "passed"
+    else:
+        from _pytest import nose
+        skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
+        if call.excinfo.errisinstance(skip_exceptions):
+            outcome = "skipped"
+            r = collector._repr_failure_py(call.excinfo, "line").reprcrash
+            longrepr = (str(r.path), r.lineno, r.message)
+        else:
+            outcome = "failed"
+            errorinfo = collector.repr_failure(call.excinfo)
+            if not hasattr(errorinfo, "toterminal"):
+                errorinfo = CollectErrorRepr(errorinfo)
+            longrepr = errorinfo
+    rep = CollectReport(collector.nodeid, outcome, longrepr,
+        getattr(call, 'result', None))
+    rep.call = call  # see collect_one_node
+    return rep
+
+
+class CollectReport(BaseReport):
+    def __init__(self, nodeid, outcome, longrepr, result,
+                 sections=(), **extra):
+        self.nodeid = nodeid
+        self.outcome = outcome
+        self.longrepr = longrepr
+        self.result = result or []
+        self.sections = list(sections)
+        self.__dict__.update(extra)
+
+    @property
+    def location(self):
+        return (self.fspath, None, self.fspath)
+
+    def __repr__(self):
+        return "<CollectReport %r lenresult=%s outcome=%r>" % (
+                self.nodeid, len(self.result), self.outcome)
+
+class CollectErrorRepr(TerminalRepr):
+    def __init__(self, msg):
+        self.longrepr = msg
+    def toterminal(self, out):
+        out.line(self.longrepr, red=True)
+
+class SetupState(object):
+    """ shared state for setting up/tearing down test items or collectors. """
+    def __init__(self):
+        self.stack = []
+        self._finalizers = {}
+
+    def addfinalizer(self, finalizer, colitem):
+        """ attach a finalizer to the given colitem.
+        if colitem is None, this will add a finalizer that
+        is called at the end of teardown_all().
+        """
+        assert colitem and not isinstance(colitem, tuple)
+        assert py.builtin.callable(finalizer)
+        #assert colitem in self.stack  # some unit tests don't setup stack :/
+        self._finalizers.setdefault(colitem, []).append(finalizer)
+
+    def _pop_and_teardown(self):
+        colitem = self.stack.pop()
+        self._teardown_with_finalization(colitem)
+
+    def _callfinalizers(self, colitem):
+        finalizers = self._finalizers.pop(colitem, None)
+        exc = None
+        while finalizers:
+            fin = finalizers.pop()
+            try:
+                fin()
+            except Exception:
+                # XXX Only first exception will be seen by user,
+                #     ideally all should be reported.
+                if exc is None:
+                    exc = sys.exc_info()
+        if exc:
+            py.builtin._reraise(*exc)
+
+    def _teardown_with_finalization(self, colitem):
+        self._callfinalizers(colitem)
+        if hasattr(colitem, "teardown"):
+            colitem.teardown()
+        for colitem in self._finalizers:
+            assert colitem is None or colitem in self.stack \
+             or isinstance(colitem, tuple)
+
+    def teardown_all(self):
+        while self.stack:
+            self._pop_and_teardown()
+        for key in list(self._finalizers):
+            self._teardown_with_finalization(key)
+        assert not self._finalizers
+
+    def teardown_exact(self, item, nextitem):
+        needed_collectors = nextitem and nextitem.listchain() or []
+        self._teardown_towards(needed_collectors)
+
+    def _teardown_towards(self, needed_collectors):
+        while self.stack:
+            if self.stack == needed_collectors[:len(self.stack)]:
+                break
+            self._pop_and_teardown()
+
+    def prepare(self, colitem):
+        """ setup objects along the collector chain to the test-method
+            and teardown previously setup objects."""
+        needed_collectors = colitem.listchain()
+        self._teardown_towards(needed_collectors)
+
+        # check if the last collection node has raised an error
+        for col in self.stack:
+            if hasattr(col, '_prepare_exc'):
+                py.builtin._reraise(*col._prepare_exc)
+        for col in needed_collectors[len(self.stack):]:
+            self.stack.append(col)
+            try:
+                col.setup()
+            except Exception:
+                col._prepare_exc = sys.exc_info()
+                raise
+
+def collect_one_node(collector):
+    ihook = collector.ihook
+    ihook.pytest_collectstart(collector=collector)
+    rep = ihook.pytest_make_collect_report(collector=collector)
+    call = rep.__dict__.pop("call", None)
+    if call and check_interactive_exception(call, rep):
+        ihook.pytest_exception_interact(node=collector, call=call, report=rep)
+    return rep
+
+
+# =============================================================
+# Test OutcomeExceptions and helpers for creating them.
+
+
+class OutcomeException(Exception):
+    """ OutcomeException and its subclass instances indicate and
+        contain info about test and collection outcomes.
+    """
+    def __init__(self, msg=None, pytrace=True):
+        Exception.__init__(self, msg)
+        self.msg = msg
+        self.pytrace = pytrace
+
+    def __repr__(self):
+        if self.msg:
+            val = self.msg
+            if isinstance(val, bytes):
+                val = py._builtin._totext(val, errors='replace')
+            return val
+        return "<%s instance>" %(self.__class__.__name__,)
+    __str__ = __repr__
+
+class Skipped(OutcomeException):
+    # XXX hackish: on 3k we fake to live in the builtins
+    # in order to have Skipped exception printing shorter/nicer
+    __module__ = 'builtins'
+
+    def __init__(self, msg=None, pytrace=True, allow_module_level=False):
+        OutcomeException.__init__(self, msg=msg, pytrace=pytrace)
+        self.allow_module_level = allow_module_level
+
+
+class Failed(OutcomeException):
+    """ raised from an explicit call to pytest.fail() """
+    __module__ = 'builtins'
+
+
+class Exit(KeyboardInterrupt):
+    """ raised for immediate program exits (no tracebacks/summaries)"""
+    def __init__(self, msg="unknown reason"):
+        self.msg = msg
+        KeyboardInterrupt.__init__(self, msg)
+
+# exposed helper methods
+
+def exit(msg):
+    """ exit testing process as if KeyboardInterrupt was triggered. """
+    __tracebackhide__ = True
+    raise Exit(msg)
+
+
+exit.Exception = Exit
+
+
+def skip(msg=""):
+    """ skip an executing test with the given message.  Note: it's usually
+    better to use the pytest.mark.skipif marker to declare a test to be
+    skipped under certain conditions like mismatching platforms or
+    dependencies.  See the pytest_skipping plugin for details.
+    """
+    __tracebackhide__ = True
+    raise Skipped(msg=msg)
+
+
+skip.Exception = Skipped
+
+
+def fail(msg="", pytrace=True):
+    """ explicitly fail an currently-executing test with the given Message.
+
+    :arg pytrace: if false the msg represents the full failure information
+                  and no python traceback will be reported.
+    """
+    __tracebackhide__ = True
+    raise Failed(msg=msg, pytrace=pytrace)
+
+
+fail.Exception = Failed
+
+
+def importorskip(modname, minversion=None):
+    """ return imported module if it has at least "minversion" as its
+    __version__ attribute.  If no minversion is specified the a skip
+    is only triggered if the module can not be imported.
+    """
+    __tracebackhide__ = True
+    compile(modname, '', 'eval') # to catch syntaxerrors
+    should_skip = False
+    try:
+        __import__(modname)
+    except ImportError:
+        # Do not raise chained exception here(#1485)
+        should_skip = True
+    if should_skip:
+        raise Skipped("could not import %r" %(modname,), allow_module_level=True)
+    mod = sys.modules[modname]
+    if minversion is None:
+        return mod
+    verattr = getattr(mod, '__version__', None)
+    if minversion is not None:
+        try:
+            from pkg_resources import parse_version as pv
+        except ImportError:
+            raise Skipped("we have a required version for %r but can not import "
+                          "pkg_resources to parse version strings." % (modname,),
+                          allow_module_level=True)
+        if verattr is None or pv(verattr) < pv(minversion):
+            raise Skipped("module %r has __version__ %r, required is: %r" %(
+                          modname, verattr, minversion), allow_module_level=True)
+    return mod
+
diff --git a/lib/spack/external/_pytest/setuponly.py b/lib/spack/external/_pytest/setuponly.py
new file mode 100644
index 0000000000..1752c575f5
--- /dev/null
+++ b/lib/spack/external/_pytest/setuponly.py
@@ -0,0 +1,72 @@
+import pytest
+import sys
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("debugconfig")
+    group.addoption('--setuponly', '--setup-only', action="store_true",
+                    help="only setup fixtures, do not execute tests.")
+    group.addoption('--setupshow', '--setup-show', action="store_true",
+                    help="show setup of fixtures while executing tests.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_fixture_setup(fixturedef, request):
+    yield
+    config = request.config
+    if config.option.setupshow:
+        if hasattr(request, 'param'):
+            # Save the fixture parameter so ._show_fixture_action() can
+            # display it now and during the teardown (in .finish()).
+            if fixturedef.ids:
+                if callable(fixturedef.ids):
+                    fixturedef.cached_param = fixturedef.ids(request.param)
+                else:
+                    fixturedef.cached_param = fixturedef.ids[
+                        request.param_index]
+            else:
+                fixturedef.cached_param = request.param
+        _show_fixture_action(fixturedef, 'SETUP')
+
+
+def pytest_fixture_post_finalizer(fixturedef):
+    if hasattr(fixturedef, "cached_result"):
+        config = fixturedef._fixturemanager.config
+        if config.option.setupshow:
+            _show_fixture_action(fixturedef, 'TEARDOWN')
+            if hasattr(fixturedef, "cached_param"):
+                del fixturedef.cached_param
+
+
+def _show_fixture_action(fixturedef, msg):
+    config = fixturedef._fixturemanager.config
+    capman = config.pluginmanager.getplugin('capturemanager')
+    if capman:
+        out, err = capman.suspendcapture()
+
+    tw = config.get_terminal_writer()
+    tw.line()
+    tw.write(' ' * 2 * fixturedef.scopenum)
+    tw.write('{step} {scope} {fixture}'.format(
+        step=msg.ljust(8),  # align the output to TEARDOWN
+        scope=fixturedef.scope[0].upper(),
+        fixture=fixturedef.argname))
+
+    if msg == 'SETUP':
+        deps = sorted(arg for arg in fixturedef.argnames if arg != 'request')
+        if deps:
+            tw.write(' (fixtures used: {0})'.format(', '.join(deps)))
+
+    if hasattr(fixturedef, 'cached_param'):
+        tw.write('[{0}]'.format(fixturedef.cached_param))
+
+    if capman:
+        capman.resumecapture()
+        sys.stdout.write(out)
+        sys.stderr.write(err)
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_cmdline_main(config):
+    if config.option.setuponly:
+        config.option.setupshow = True
diff --git a/lib/spack/external/_pytest/setupplan.py b/lib/spack/external/_pytest/setupplan.py
new file mode 100644
index 0000000000..f0853dee54
--- /dev/null
+++ b/lib/spack/external/_pytest/setupplan.py
@@ -0,0 +1,23 @@
+import pytest
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("debugconfig")
+    group.addoption('--setupplan', '--setup-plan', action="store_true",
+                    help="show what fixtures and tests would be executed but "
+                    "don't execute anything.")
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_fixture_setup(fixturedef, request):
+    # Will return a dummy fixture if the setuponly option is provided.
+    if request.config.option.setupplan:
+        fixturedef.cached_result = (None, None, None)
+        return fixturedef.cached_result
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_cmdline_main(config):
+    if config.option.setupplan:
+        config.option.setuponly = True
+        config.option.setupshow = True
diff --git a/lib/spack/external/_pytest/skipping.py b/lib/spack/external/_pytest/skipping.py
new file mode 100644
index 0000000000..a8eaea98aa
--- /dev/null
+++ b/lib/spack/external/_pytest/skipping.py
@@ -0,0 +1,375 @@
+""" support for skip/xfail functions and markers. """
+import os
+import sys
+import traceback
+
+import py
+import pytest
+from _pytest.mark import MarkInfo, MarkDecorator
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("general")
+    group.addoption('--runxfail',
+           action="store_true", dest="runxfail", default=False,
+           help="run tests even if they are marked xfail")
+
+    parser.addini("xfail_strict", "default for the strict parameter of xfail "
+                                  "markers when not given explicitly (default: "
+                                  "False)",
+                                  default=False,
+                                  type="bool")
+
+
+def pytest_configure(config):
+    if config.option.runxfail:
+        old = pytest.xfail
+        config._cleanup.append(lambda: setattr(pytest, "xfail", old))
+
+        def nop(*args, **kwargs):
+            pass
+
+        nop.Exception = XFailed
+        setattr(pytest, "xfail", nop)
+
+    config.addinivalue_line("markers",
+        "skip(reason=None): skip the given test function with an optional reason. "
+        "Example: skip(reason=\"no way of currently testing this\") skips the "
+        "test."
+    )
+    config.addinivalue_line("markers",
+        "skipif(condition): skip the given test function if eval(condition) "
+        "results in a True value.  Evaluation happens within the "
+        "module global context. Example: skipif('sys.platform == \"win32\"') "
+        "skips the test if we are on the win32 platform. see "
+        "http://pytest.org/latest/skipping.html"
+    )
+    config.addinivalue_line("markers",
+        "xfail(condition, reason=None, run=True, raises=None, strict=False): "
+        "mark the the test function as an expected failure if eval(condition) "
+        "has a True value. Optionally specify a reason for better reporting "
+        "and run=False if you don't even want to execute the test function. "
+        "If only specific exception(s) are expected, you can list them in "
+        "raises, and if the test fails in other ways, it will be reported as "
+        "a true failure. See http://pytest.org/latest/skipping.html"
+    )
+
+
+def pytest_namespace():
+    return dict(xfail=xfail)
+
+
+class XFailed(pytest.fail.Exception):
+    """ raised from an explicit call to pytest.xfail() """
+
+
+def xfail(reason=""):
+    """ xfail an executing test or setup functions with the given reason."""
+    __tracebackhide__ = True
+    raise XFailed(reason)
+
+
+xfail.Exception = XFailed
+
+
+class MarkEvaluator:
+    def __init__(self, item, name):
+        self.item = item
+        self.name = name
+
+    @property
+    def holder(self):
+        return self.item.keywords.get(self.name)
+
+    def __bool__(self):
+        return bool(self.holder)
+    __nonzero__ = __bool__
+
+    def wasvalid(self):
+        return not hasattr(self, 'exc')
+
+    def invalidraise(self, exc):
+        raises = self.get('raises')
+        if not raises:
+            return
+        return not isinstance(exc, raises)
+
+    def istrue(self):
+        try:
+            return self._istrue()
+        except Exception:
+            self.exc = sys.exc_info()
+            if isinstance(self.exc[1], SyntaxError):
+                msg = [" " * (self.exc[1].offset + 4) + "^",]
+                msg.append("SyntaxError: invalid syntax")
+            else:
+                msg = traceback.format_exception_only(*self.exc[:2])
+            pytest.fail("Error evaluating %r expression\n"
+                        "    %s\n"
+                        "%s"
+                        %(self.name, self.expr, "\n".join(msg)),
+                        pytrace=False)
+
+    def _getglobals(self):
+        d = {'os': os, 'sys': sys, 'config': self.item.config}
+        d.update(self.item.obj.__globals__)
+        return d
+
+    def _istrue(self):
+        if hasattr(self, 'result'):
+            return self.result
+        if self.holder:
+            d = self._getglobals()
+            if self.holder.args or 'condition' in self.holder.kwargs:
+                self.result = False
+                # "holder" might be a MarkInfo or a MarkDecorator; only
+                # MarkInfo keeps track of all parameters it received in an
+                # _arglist attribute
+                if hasattr(self.holder, '_arglist'):
+                    arglist = self.holder._arglist
+                else:
+                    arglist = [(self.holder.args, self.holder.kwargs)]
+                for args, kwargs in arglist:
+                    if 'condition' in kwargs:
+                        args = (kwargs['condition'],)
+                    for expr in args:
+                        self.expr = expr
+                        if isinstance(expr, py.builtin._basestring):
+                            result = cached_eval(self.item.config, expr, d)
+                        else:
+                            if "reason" not in kwargs:
+                                # XXX better be checked at collection time
+                                msg = "you need to specify reason=STRING " \
+                                      "when using booleans as conditions."
+                                pytest.fail(msg)
+                            result = bool(expr)
+                        if result:
+                            self.result = True
+                            self.reason = kwargs.get('reason', None)
+                            self.expr = expr
+                            return self.result
+            else:
+                self.result = True
+        return getattr(self, 'result', False)
+
+    def get(self, attr, default=None):
+        return self.holder.kwargs.get(attr, default)
+
+    def getexplanation(self):
+        expl = getattr(self, 'reason', None) or self.get('reason', None)
+        if not expl:
+            if not hasattr(self, 'expr'):
+                return ""
+            else:
+                return "condition: " + str(self.expr)
+        return expl
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_setup(item):
+    # Check if skip or skipif are specified as pytest marks
+
+    skipif_info = item.keywords.get('skipif')
+    if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
+        eval_skipif = MarkEvaluator(item, 'skipif')
+        if eval_skipif.istrue():
+            item._evalskip = eval_skipif
+            pytest.skip(eval_skipif.getexplanation())
+
+    skip_info = item.keywords.get('skip')
+    if isinstance(skip_info, (MarkInfo, MarkDecorator)):
+        item._evalskip = True
+        if 'reason' in skip_info.kwargs:
+            pytest.skip(skip_info.kwargs['reason'])
+        elif skip_info.args:
+            pytest.skip(skip_info.args[0])
+        else:
+            pytest.skip("unconditional skip")
+
+    item._evalxfail = MarkEvaluator(item, 'xfail')
+    check_xfail_no_run(item)
+
+
+@pytest.mark.hookwrapper
+def pytest_pyfunc_call(pyfuncitem):
+    check_xfail_no_run(pyfuncitem)
+    outcome = yield
+    passed = outcome.excinfo is None
+    if passed:
+        check_strict_xfail(pyfuncitem)
+
+
+def check_xfail_no_run(item):
+    """check xfail(run=False)"""
+    if not item.config.option.runxfail:
+        evalxfail = item._evalxfail
+        if evalxfail.istrue():
+            if not evalxfail.get('run', True):
+                pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
+
+
+def check_strict_xfail(pyfuncitem):
+    """check xfail(strict=True) for the given PASSING test"""
+    evalxfail = pyfuncitem._evalxfail
+    if evalxfail.istrue():
+        strict_default = pyfuncitem.config.getini('xfail_strict')
+        is_strict_xfail = evalxfail.get('strict', strict_default)
+        if is_strict_xfail:
+            del pyfuncitem._evalxfail
+            explanation = evalxfail.getexplanation()
+            pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+    outcome = yield
+    rep = outcome.get_result()
+    evalxfail = getattr(item, '_evalxfail', None)
+    evalskip = getattr(item, '_evalskip', None)
+    # unitttest special case, see setting of _unexpectedsuccess
+    if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
+        from _pytest.compat import _is_unittest_unexpected_success_a_failure
+        if item._unexpectedsuccess:
+            rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
+        else:
+            rep.longrepr = "Unexpected success"
+        if _is_unittest_unexpected_success_a_failure():
+            rep.outcome = "failed"
+        else:
+            rep.outcome = "passed"
+            rep.wasxfail = rep.longrepr
+    elif item.config.option.runxfail:
+        pass   # don't interefere
+    elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
+        rep.wasxfail = "reason: " + call.excinfo.value.msg
+        rep.outcome = "skipped"
+    elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
+        evalxfail.istrue():
+        if call.excinfo:
+            if evalxfail.invalidraise(call.excinfo.value):
+                rep.outcome = "failed"
+            else:
+                rep.outcome = "skipped"
+                rep.wasxfail = evalxfail.getexplanation()
+        elif call.when == "call":
+            strict_default = item.config.getini('xfail_strict')
+            is_strict_xfail = evalxfail.get('strict', strict_default)
+            explanation = evalxfail.getexplanation()
+            if is_strict_xfail:
+                rep.outcome = "failed"
+                rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
+            else:
+                rep.outcome = "passed"
+                rep.wasxfail = explanation
+    elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
+        # skipped by mark.skipif; change the location of the failure
+        # to point to the item definition, otherwise it will display
+        # the location of where the skip exception was raised within pytest
+        filename, line, reason = rep.longrepr
+        filename, line = item.location[:2]
+        rep.longrepr = filename, line, reason
+
+# called by terminalreporter progress reporting
+def pytest_report_teststatus(report):
+    if hasattr(report, "wasxfail"):
+        if report.skipped:
+            return "xfailed", "x", "xfail"
+        elif report.passed:
+            return "xpassed", "X", ("XPASS", {'yellow': True})
+
+# called by the terminalreporter instance/plugin
+def pytest_terminal_summary(terminalreporter):
+    tr = terminalreporter
+    if not tr.reportchars:
+        #for name in "xfailed skipped failed xpassed":
+        #    if not tr.stats.get(name, 0):
+        #        tr.write_line("HINT: use '-r' option to see extra "
+        #              "summary info about tests")
+        #        break
+        return
+
+    lines = []
+    for char in tr.reportchars:
+        if char == "x":
+            show_xfailed(terminalreporter, lines)
+        elif char == "X":
+            show_xpassed(terminalreporter, lines)
+        elif char in "fF":
+            show_simple(terminalreporter, lines, 'failed', "FAIL %s")
+        elif char in "sS":
+            show_skipped(terminalreporter, lines)
+        elif char == "E":
+            show_simple(terminalreporter, lines, 'error', "ERROR %s")
+        elif char == 'p':
+            show_simple(terminalreporter, lines, 'passed', "PASSED %s")
+
+    if lines:
+        tr._tw.sep("=", "short test summary info")
+        for line in lines:
+            tr._tw.line(line)
+
+def show_simple(terminalreporter, lines, stat, format):
+    failed = terminalreporter.stats.get(stat)
+    if failed:
+        for rep in failed:
+            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+            lines.append(format %(pos,))
+
+def show_xfailed(terminalreporter, lines):
+    xfailed = terminalreporter.stats.get("xfailed")
+    if xfailed:
+        for rep in xfailed:
+            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+            reason = rep.wasxfail
+            lines.append("XFAIL %s" % (pos,))
+            if reason:
+                lines.append("  " + str(reason))
+
+def show_xpassed(terminalreporter, lines):
+    xpassed = terminalreporter.stats.get("xpassed")
+    if xpassed:
+        for rep in xpassed:
+            pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+            reason = rep.wasxfail
+            lines.append("XPASS %s %s" %(pos, reason))
+
+def cached_eval(config, expr, d):
+    if not hasattr(config, '_evalcache'):
+        config._evalcache = {}
+    try:
+        return config._evalcache[expr]
+    except KeyError:
+        import _pytest._code
+        exprcode = _pytest._code.compile(expr, mode="eval")
+        config._evalcache[expr] = x = eval(exprcode, d)
+        return x
+
+
+def folded_skips(skipped):
+    d = {}
+    for event in skipped:
+        key = event.longrepr
+        assert len(key) == 3, (event, key)
+        d.setdefault(key, []).append(event)
+    l = []
+    for key, events in d.items():
+        l.append((len(events),) + key)
+    return l
+
+def show_skipped(terminalreporter, lines):
+    tr = terminalreporter
+    skipped = tr.stats.get('skipped', [])
+    if skipped:
+        #if not tr.hasopt('skipped'):
+        #    tr.write_line(
+        #        "%d skipped tests, specify -rs for more info" %
+        #        len(skipped))
+        #    return
+        fskips = folded_skips(skipped)
+        if fskips:
+            #tr.write_sep("_", "skipped test summary")
+            for num, fspath, lineno, reason in fskips:
+                if reason.startswith("Skipped: "):
+                    reason = reason[9:]
+                lines.append("SKIP [%d] %s:%d: %s" %
+                    (num, fspath, lineno, reason))
diff --git a/lib/spack/external/_pytest/terminal.py b/lib/spack/external/_pytest/terminal.py
new file mode 100644
index 0000000000..16bf757338
--- /dev/null
+++ b/lib/spack/external/_pytest/terminal.py
@@ -0,0 +1,593 @@
+""" terminal reporting of the full testing process.
+
+This is a good source for looking at the various reporting hooks.
+"""
+from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
+    EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
+import pytest
+import py
+import sys
+import time
+import platform
+
+import _pytest._pluggy as pluggy
+
+
+def pytest_addoption(parser):
+    group = parser.getgroup("terminal reporting", "reporting", after="general")
+    group._addoption('-v', '--verbose', action="count",
+               dest="verbose", default=0, help="increase verbosity."),
+    group._addoption('-q', '--quiet', action="count",
+               dest="quiet", default=0, help="decrease verbosity."),
+    group._addoption('-r',
+         action="store", dest="reportchars", default='', metavar="chars",
+         help="show extra test summary info as specified by chars (f)ailed, "
+              "(E)error, (s)skipped, (x)failed, (X)passed, "
+              "(p)passed, (P)passed with output, (a)all except pP. "
+              "The pytest warnings are displayed at all times except when "
+              "--disable-pytest-warnings is set")
+    group._addoption('--disable-pytest-warnings', default=False,
+                     dest='disablepytestwarnings', action='store_true',
+                     help='disable warnings summary, overrides -r w flag')
+    group._addoption('-l', '--showlocals',
+         action="store_true", dest="showlocals", default=False,
+         help="show locals in tracebacks (disabled by default).")
+    group._addoption('--tb', metavar="style",
+               action="store", dest="tbstyle", default='auto',
+               choices=['auto', 'long', 'short', 'no', 'line', 'native'],
+               help="traceback print mode (auto/long/short/line/native/no).")
+    group._addoption('--fulltrace', '--full-trace',
+               action="store_true", default=False,
+               help="don't cut any tracebacks (default is to cut).")
+    group._addoption('--color', metavar="color",
+               action="store", dest="color", default='auto',
+               choices=['yes', 'no', 'auto'],
+               help="color terminal output (yes/no/auto).")
+
+def pytest_configure(config):
+    config.option.verbose -= config.option.quiet
+    reporter = TerminalReporter(config, sys.stdout)
+    config.pluginmanager.register(reporter, 'terminalreporter')
+    if config.option.debug or config.option.traceconfig:
+        def mywriter(tags, args):
+            msg = " ".join(map(str, args))
+            reporter.write_line("[traceconfig] " + msg)
+        config.trace.root.setprocessor("pytest:config", mywriter)
+
+def getreportopt(config):
+    reportopts = ""
+    reportchars = config.option.reportchars
+    if not config.option.disablepytestwarnings and 'w' not in reportchars:
+        reportchars += 'w'
+    elif config.option.disablepytestwarnings and 'w' in reportchars:
+        reportchars = reportchars.replace('w', '')
+    if reportchars:
+        for char in reportchars:
+            if char not in reportopts and char != 'a':
+                reportopts += char
+            elif char == 'a':
+                reportopts = 'fEsxXw'
+    return reportopts
+
+def pytest_report_teststatus(report):
+    if report.passed:
+        letter = "."
+    elif report.skipped:
+        letter = "s"
+    elif report.failed:
+        letter = "F"
+        if report.when != "call":
+            letter = "f"
+    return report.outcome, letter, report.outcome.upper()
+
+class WarningReport:
+    def __init__(self, code, message, nodeid=None, fslocation=None):
+        self.code = code
+        self.message = message
+        self.nodeid = nodeid
+        self.fslocation = fslocation
+
+
+class TerminalReporter:
+    def __init__(self, config, file=None):
+        import _pytest.config
+        self.config = config
+        self.verbosity = self.config.option.verbose
+        self.showheader = self.verbosity >= 0
+        self.showfspath = self.verbosity >= 0
+        self.showlongtestinfo = self.verbosity > 0
+        self._numcollected = 0
+
+        self.stats = {}
+        self.startdir = py.path.local()
+        if file is None:
+            file = sys.stdout
+        self._tw = self.writer = _pytest.config.create_terminal_writer(config,
+                                                                       file)
+        self.currentfspath = None
+        self.reportchars = getreportopt(config)
+        self.hasmarkup = self._tw.hasmarkup
+        self.isatty = file.isatty()
+
+    def hasopt(self, char):
+        char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
+        return char in self.reportchars
+
+    def write_fspath_result(self, nodeid, res):
+        fspath = self.config.rootdir.join(nodeid.split("::")[0])
+        if fspath != self.currentfspath:
+            self.currentfspath = fspath
+            fspath = self.startdir.bestrelpath(fspath)
+            self._tw.line()
+            self._tw.write(fspath + " ")
+        self._tw.write(res)
+
+    def write_ensure_prefix(self, prefix, extra="", **kwargs):
+        if self.currentfspath != prefix:
+            self._tw.line()
+            self.currentfspath = prefix
+            self._tw.write(prefix)
+        if extra:
+            self._tw.write(extra, **kwargs)
+            self.currentfspath = -2
+
+    def ensure_newline(self):
+        if self.currentfspath:
+            self._tw.line()
+            self.currentfspath = None
+
+    def write(self, content, **markup):
+        self._tw.write(content, **markup)
+
+    def write_line(self, line, **markup):
+        if not py.builtin._istext(line):
+            line = py.builtin.text(line, errors="replace")
+        self.ensure_newline()
+        self._tw.line(line, **markup)
+
+    def rewrite(self, line, **markup):
+        line = str(line)
+        self._tw.write("\r" + line, **markup)
+
+    def write_sep(self, sep, title=None, **markup):
+        self.ensure_newline()
+        self._tw.sep(sep, title, **markup)
+
+    def section(self, title, sep="=", **kw):
+        self._tw.sep(sep, title, **kw)
+
+    def line(self, msg, **kw):
+        self._tw.line(msg, **kw)
+
+    def pytest_internalerror(self, excrepr):
+        for line in py.builtin.text(excrepr).split("\n"):
+            self.write_line("INTERNALERROR> " + line)
+        return 1
+
+    def pytest_logwarning(self, code, fslocation, message, nodeid):
+        warnings = self.stats.setdefault("warnings", [])
+        if isinstance(fslocation, tuple):
+            fslocation = "%s:%d" % fslocation
+        warning = WarningReport(code=code, fslocation=fslocation,
+                                message=message, nodeid=nodeid)
+        warnings.append(warning)
+
+    def pytest_plugin_registered(self, plugin):
+        if self.config.option.traceconfig:
+            msg = "PLUGIN registered: %s" % (plugin,)
+            # XXX this event may happen during setup/teardown time
+            #     which unfortunately captures our output here
+            #     which garbles our output if we use self.write_line
+            self.write_line(msg)
+
+    def pytest_deselected(self, items):
+        self.stats.setdefault('deselected', []).extend(items)
+
+    def pytest_runtest_logstart(self, nodeid, location):
+        # ensure that the path is printed before the
+        # 1st test of a module starts running
+        if self.showlongtestinfo:
+            line = self._locationline(nodeid, *location)
+            self.write_ensure_prefix(line, "")
+        elif self.showfspath:
+            fsid = nodeid.split("::")[0]
+            self.write_fspath_result(fsid, "")
+
+    def pytest_runtest_logreport(self, report):
+        rep = report
+        res = self.config.hook.pytest_report_teststatus(report=rep)
+        cat, letter, word = res
+        self.stats.setdefault(cat, []).append(rep)
+        self._tests_ran = True
+        if not letter and not word:
+            # probably passed setup/teardown
+            return
+        if self.verbosity <= 0:
+            if not hasattr(rep, 'node') and self.showfspath:
+                self.write_fspath_result(rep.nodeid, letter)
+            else:
+                self._tw.write(letter)
+        else:
+            if isinstance(word, tuple):
+                word, markup = word
+            else:
+                if rep.passed:
+                    markup = {'green':True}
+                elif rep.failed:
+                    markup = {'red':True}
+                elif rep.skipped:
+                    markup = {'yellow':True}
+            line = self._locationline(rep.nodeid, *rep.location)
+            if not hasattr(rep, 'node'):
+                self.write_ensure_prefix(line, word, **markup)
+                #self._tw.write(word, **markup)
+            else:
+                self.ensure_newline()
+                if hasattr(rep, 'node'):
+                    self._tw.write("[%s] " % rep.node.gateway.id)
+                self._tw.write(word, **markup)
+                self._tw.write(" " + line)
+                self.currentfspath = -2
+
+    def pytest_collection(self):
+        if not self.isatty and self.config.option.verbose >= 1:
+            self.write("collecting ... ", bold=True)
+
+    def pytest_collectreport(self, report):
+        if report.failed:
+            self.stats.setdefault("error", []).append(report)
+        elif report.skipped:
+            self.stats.setdefault("skipped", []).append(report)
+        items = [x for x in report.result if isinstance(x, pytest.Item)]
+        self._numcollected += len(items)
+        if self.isatty:
+            #self.write_fspath_result(report.nodeid, 'E')
+            self.report_collect()
+
+    def report_collect(self, final=False):
+        if self.config.option.verbose < 0:
+            return
+
+        errors = len(self.stats.get('error', []))
+        skipped = len(self.stats.get('skipped', []))
+        if final:
+            line = "collected "
+        else:
+            line = "collecting "
+        line += str(self._numcollected) + " items"
+        if errors:
+            line += " / %d errors" % errors
+        if skipped:
+            line += " / %d skipped" % skipped
+        if self.isatty:
+            if final:
+                line += " \n"
+            self.rewrite(line, bold=True)
+        else:
+            self.write_line(line)
+
+    def pytest_collection_modifyitems(self):
+        self.report_collect(True)
+
+    @pytest.hookimpl(trylast=True)
+    def pytest_sessionstart(self, session):
+        self._sessionstarttime = time.time()
+        if not self.showheader:
+            return
+        self.write_sep("=", "test session starts", bold=True)
+        verinfo = platform.python_version()
+        msg = "platform %s -- Python %s" % (sys.platform, verinfo)
+        if hasattr(sys, 'pypy_version_info'):
+            verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
+            msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
+        msg += ", pytest-%s, py-%s, pluggy-%s" % (
+               pytest.__version__, py.__version__, pluggy.__version__)
+        if self.verbosity > 0 or self.config.option.debug or \
+           getattr(self.config.option, 'pastebin', None):
+            msg += " -- " + str(sys.executable)
+        self.write_line(msg)
+        lines = self.config.hook.pytest_report_header(
+            config=self.config, startdir=self.startdir)
+        lines.reverse()
+        for line in flatten(lines):
+            self.write_line(line)
+
+    def pytest_report_header(self, config):
+        inifile = ""
+        if config.inifile:
+            inifile = config.rootdir.bestrelpath(config.inifile)
+        lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)]
+
+        plugininfo = config.pluginmanager.list_plugin_distinfo()
+        if plugininfo:
+
+            lines.append(
+                "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
+        return lines
+
+    def pytest_collection_finish(self, session):
+        if self.config.option.collectonly:
+            self._printcollecteditems(session.items)
+            if self.stats.get('failed'):
+                self._tw.sep("!", "collection failures")
+                for rep in self.stats.get('failed'):
+                    rep.toterminal(self._tw)
+                return 1
+            return 0
+        if not self.showheader:
+            return
+        #for i, testarg in enumerate(self.config.args):
+        #    self.write_line("test path %d: %s" %(i+1, testarg))
+
+    def _printcollecteditems(self, items):
+        # to print out items and their parent collectors
+        # we take care to leave out Instances aka ()
+        # because later versions are going to get rid of them anyway
+        if self.config.option.verbose < 0:
+            if self.config.option.verbose < -1:
+                counts = {}
+                for item in items:
+                    name = item.nodeid.split('::', 1)[0]
+                    counts[name] = counts.get(name, 0) + 1
+                for name, count in sorted(counts.items()):
+                    self._tw.line("%s: %d" % (name, count))
+            else:
+                for item in items:
+                    nodeid = item.nodeid
+                    nodeid = nodeid.replace("::()::", "::")
+                    self._tw.line(nodeid)
+            return
+        stack = []
+        indent = ""
+        for item in items:
+            needed_collectors = item.listchain()[1:] # strip root node
+            while stack:
+                if stack == needed_collectors[:len(stack)]:
+                    break
+                stack.pop()
+            for col in needed_collectors[len(stack):]:
+                stack.append(col)
+                #if col.name == "()":
+                #    continue
+                indent = (len(stack) - 1) * "  "
+                self._tw.line("%s%s" % (indent, col))
+
+    @pytest.hookimpl(hookwrapper=True)
+    def pytest_sessionfinish(self, exitstatus):
+        outcome = yield
+        outcome.get_result()
+        self._tw.line("")
+        summary_exit_codes = (
+            EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
+            EXIT_NOTESTSCOLLECTED)
+        if exitstatus in summary_exit_codes:
+            self.config.hook.pytest_terminal_summary(terminalreporter=self,
+                                                     exitstatus=exitstatus)
+            self.summary_errors()
+            self.summary_failures()
+            self.summary_warnings()
+            self.summary_passes()
+        if exitstatus == EXIT_INTERRUPTED:
+            self._report_keyboardinterrupt()
+            del self._keyboardinterrupt_memo
+        self.summary_deselected()
+        self.summary_stats()
+
+    def pytest_keyboard_interrupt(self, excinfo):
+        self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
+
+    def pytest_unconfigure(self):
+        if hasattr(self, '_keyboardinterrupt_memo'):
+            self._report_keyboardinterrupt()
+
+    def _report_keyboardinterrupt(self):
+        excrepr = self._keyboardinterrupt_memo
+        msg = excrepr.reprcrash.message
+        self.write_sep("!", msg)
+        if "KeyboardInterrupt" in msg:
+            if self.config.option.fulltrace:
+                excrepr.toterminal(self._tw)
+            else:
+                self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
+                excrepr.reprcrash.toterminal(self._tw)
+
+    def _locationline(self, nodeid, fspath, lineno, domain):
+        def mkrel(nodeid):
+            line = self.config.cwd_relative_nodeid(nodeid)
+            if domain and line.endswith(domain):
+                line = line[:-len(domain)]
+                l = domain.split("[")
+                l[0] = l[0].replace('.', '::')  # don't replace '.' in params
+                line += "[".join(l)
+            return line
+        # collect_fspath comes from testid which has a "/"-normalized path
+
+        if fspath:
+            res = mkrel(nodeid).replace("::()", "")  # parens-normalization
+            if nodeid.split("::")[0] != fspath.replace("\\", "/"):
+                res += " <- " + self.startdir.bestrelpath(fspath)
+        else:
+            res = "[location]"
+        return res + " "
+
+    def _getfailureheadline(self, rep):
+        if hasattr(rep, 'location'):
+            fspath, lineno, domain = rep.location
+            return domain
+        else:
+            return "test session" # XXX?
+
+    def _getcrashline(self, rep):
+        try:
+            return str(rep.longrepr.reprcrash)
+        except AttributeError:
+            try:
+                return str(rep.longrepr)[:50]
+            except AttributeError:
+                return ""
+
+    #
+    # summaries for sessionfinish
+    #
+    def getreports(self, name):
+        l = []
+        for x in self.stats.get(name, []):
+            if not hasattr(x, '_pdbshown'):
+                l.append(x)
+        return l
+
+    def summary_warnings(self):
+        if self.hasopt("w"):
+            warnings = self.stats.get("warnings")
+            if not warnings:
+                return
+            self.write_sep("=", "pytest-warning summary")
+            for w in warnings:
+                self._tw.line("W%s %s %s" % (w.code,
+                              w.fslocation, w.message))
+
+    def summary_passes(self):
+        if self.config.option.tbstyle != "no":
+            if self.hasopt("P"):
+                reports = self.getreports('passed')
+                if not reports:
+                    return
+                self.write_sep("=", "PASSES")
+                for rep in reports:
+                    msg = self._getfailureheadline(rep)
+                    self.write_sep("_", msg)
+                    self._outrep_summary(rep)
+
+    def print_teardown_sections(self, rep):
+        for secname, content in rep.sections:
+            if 'teardown' in secname:
+                self._tw.sep('-', secname)
+                if content[-1:] == "\n":
+                    content = content[:-1]
+                self._tw.line(content)
+
+
+    def summary_failures(self):
+        if self.config.option.tbstyle != "no":
+            reports = self.getreports('failed')
+            if not reports:
+                return
+            self.write_sep("=", "FAILURES")
+            for rep in reports:
+                if self.config.option.tbstyle == "line":
+                    line = self._getcrashline(rep)
+                    self.write_line(line)
+                else:
+                    msg = self._getfailureheadline(rep)
+                    markup = {'red': True, 'bold': True}
+                    self.write_sep("_", msg, **markup)
+                    self._outrep_summary(rep)
+                    for report in self.getreports(''):
+                        if report.nodeid == rep.nodeid and report.when == 'teardown':
+                            self.print_teardown_sections(report)
+
+    def summary_errors(self):
+        if self.config.option.tbstyle != "no":
+            reports = self.getreports('error')
+            if not reports:
+                return
+            self.write_sep("=", "ERRORS")
+            for rep in self.stats['error']:
+                msg = self._getfailureheadline(rep)
+                if not hasattr(rep, 'when'):
+                    # collect
+                    msg = "ERROR collecting " + msg
+                elif rep.when == "setup":
+                    msg = "ERROR at setup of " + msg
+                elif rep.when == "teardown":
+                    msg = "ERROR at teardown of " + msg
+                self.write_sep("_", msg)
+                self._outrep_summary(rep)
+
+    def _outrep_summary(self, rep):
+        rep.toterminal(self._tw)
+        for secname, content in rep.sections:
+            self._tw.sep("-", secname)
+            if content[-1:] == "\n":
+                content = content[:-1]
+            self._tw.line(content)
+
+    def summary_stats(self):
+        session_duration = time.time() - self._sessionstarttime
+        (line, color) = build_summary_stats_line(self.stats)
+        msg = "%s in %.2f seconds" % (line, session_duration)
+        markup = {color: True, 'bold': True}
+
+        if self.verbosity >= 0:
+            self.write_sep("=", msg, **markup)
+        if self.verbosity == -1:
+            self.write_line(msg, **markup)
+
+    def summary_deselected(self):
+        if 'deselected' in self.stats:
+            self.write_sep("=", "%d tests deselected" % (
+                len(self.stats['deselected'])), bold=True)
+
+def repr_pythonversion(v=None):
+    if v is None:
+        v = sys.version_info
+    try:
+        return "%s.%s.%s-%s-%s" % v
+    except (TypeError, ValueError):
+        return str(v)
+
+def flatten(l):
+    for x in l:
+        if isinstance(x, (list, tuple)):
+            for y in flatten(x):
+                yield y
+        else:
+            yield x
+
+def build_summary_stats_line(stats):
+    keys = ("failed passed skipped deselected "
+           "xfailed xpassed warnings error").split()
+    key_translation = {'warnings': 'pytest-warnings'}
+    unknown_key_seen = False
+    for key in stats.keys():
+        if key not in keys:
+            if key: # setup/teardown reports have an empty key, ignore them
+                keys.append(key)
+                unknown_key_seen = True
+    parts = []
+    for key in keys:
+        val = stats.get(key, None)
+        if val:
+            key_name = key_translation.get(key, key)
+            parts.append("%d %s" % (len(val), key_name))
+
+    if parts:
+        line = ", ".join(parts)
+    else:
+        line = "no tests ran"
+
+    if 'failed' in stats or 'error' in stats:
+        color = 'red'
+    elif 'warnings' in stats or unknown_key_seen:
+        color = 'yellow'
+    elif 'passed' in stats:
+        color = 'green'
+    else:
+        color = 'yellow'
+
+    return (line, color)
+
+
+def _plugin_nameversions(plugininfo):
+    l = []
+    for plugin, dist in plugininfo:
+        # gets us name and version!
+        name = '{dist.project_name}-{dist.version}'.format(dist=dist)
+        # questionable convenience, but it keeps things short
+        if name.startswith("pytest-"):
+            name = name[7:]
+        # we decided to print python package names
+        # they can have more than one plugin
+        if name not in l:
+            l.append(name)
+    return l
diff --git a/lib/spack/external/_pytest/tmpdir.py b/lib/spack/external/_pytest/tmpdir.py
new file mode 100644
index 0000000000..28a6b06366
--- /dev/null
+++ b/lib/spack/external/_pytest/tmpdir.py
@@ -0,0 +1,124 @@
+""" support for providing temporary directories to test functions.  """
+import re
+
+import pytest
+import py
+from _pytest.monkeypatch import MonkeyPatch
+
+
+class TempdirFactory:
+    """Factory for temporary directories under the common base temp directory.
+
+    The base directory can be configured using the ``--basetemp`` option.
+    """
+
+    def __init__(self, config):
+        self.config = config
+        self.trace = config.trace.get("tmpdir")
+
+    def ensuretemp(self, string, dir=1):
+        """ (deprecated) return temporary directory path with
+            the given string as the trailing part.  It is usually
+            better to use the 'tmpdir' function argument which
+            provides an empty unique-per-test-invocation directory
+            and is guaranteed to be empty.
+        """
+        #py.log._apiwarn(">1.1", "use tmpdir function argument")
+        return self.getbasetemp().ensure(string, dir=dir)
+
+    def mktemp(self, basename, numbered=True):
+        """Create a subdirectory of the base temporary directory and return it.
+        If ``numbered``, ensure the directory is unique by adding a number
+        prefix greater than any existing one.
+        """
+        basetemp = self.getbasetemp()
+        if not numbered:
+            p = basetemp.mkdir(basename)
+        else:
+            p = py.path.local.make_numbered_dir(prefix=basename,
+                keep=0, rootdir=basetemp, lock_timeout=None)
+        self.trace("mktemp", p)
+        return p
+
+    def getbasetemp(self):
+        """ return base temporary directory. """
+        try:
+            return self._basetemp
+        except AttributeError:
+            basetemp = self.config.option.basetemp
+            if basetemp:
+                basetemp = py.path.local(basetemp)
+                if basetemp.check():
+                    basetemp.remove()
+                basetemp.mkdir()
+            else:
+                temproot = py.path.local.get_temproot()
+                user = get_user()
+                if user:
+                    # use a sub-directory in the temproot to speed-up
+                    # make_numbered_dir() call
+                    rootdir = temproot.join('pytest-of-%s' % user)
+                else:
+                    rootdir = temproot
+                rootdir.ensure(dir=1)
+                basetemp = py.path.local.make_numbered_dir(prefix='pytest-',
+                                                           rootdir=rootdir)
+            self._basetemp = t = basetemp.realpath()
+            self.trace("new basetemp", t)
+            return t
+
+    def finish(self):
+        self.trace("finish")
+
+
+def get_user():
+    """Return the current user name, or None if getuser() does not work
+    in the current environment (see #1010).
+    """
+    import getpass
+    try:
+        return getpass.getuser()
+    except (ImportError, KeyError):
+        return None
+
+
+# backward compatibility
+TempdirHandler = TempdirFactory
+
+
+def pytest_configure(config):
+    """Create a TempdirFactory and attach it to the config object.
+
+    This is to comply with existing plugins which expect the handler to be
+    available at pytest_configure time, but ideally should be moved entirely
+    to the tmpdir_factory session fixture.
+    """
+    mp = MonkeyPatch()
+    t = TempdirFactory(config)
+    config._cleanup.extend([mp.undo, t.finish])
+    mp.setattr(config, '_tmpdirhandler', t, raising=False)
+    mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
+
+
+@pytest.fixture(scope='session')
+def tmpdir_factory(request):
+    """Return a TempdirFactory instance for the test session.
+    """
+    return request.config._tmpdirhandler
+
+
+@pytest.fixture
+def tmpdir(request, tmpdir_factory):
+    """Return a temporary directory path object
+    which is unique to each test function invocation,
+    created as a sub directory of the base temporary
+    directory.  The returned object is a `py.path.local`_
+    path object.
+    """
+    name = request.node.name
+    name = re.sub("[\W]", "_", name)
+    MAXVAL = 30
+    if len(name) > MAXVAL:
+        name = name[:MAXVAL]
+    x = tmpdir_factory.mktemp(name, numbered=True)
+    return x
diff --git a/lib/spack/external/_pytest/unittest.py b/lib/spack/external/_pytest/unittest.py
new file mode 100644
index 0000000000..73224010b2
--- /dev/null
+++ b/lib/spack/external/_pytest/unittest.py
@@ -0,0 +1,217 @@
+""" discovery and running of std-library "unittest" style tests. """
+from __future__ import absolute_import
+
+import sys
+import traceback
+
+import pytest
+# for transfering markers
+import _pytest._code
+from _pytest.python import transfer_markers
+from _pytest.skipping import MarkEvaluator
+
+
+def pytest_pycollect_makeitem(collector, name, obj):
+    # has unittest been imported and is obj a subclass of its TestCase?
+    try:
+        if not issubclass(obj, sys.modules["unittest"].TestCase):
+            return
+    except Exception:
+        return
+    # yes, so let's collect it
+    return UnitTestCase(name, parent=collector)
+
+
+class UnitTestCase(pytest.Class):
+    # marker for fixturemanger.getfixtureinfo()
+    # to declare that our children do not support funcargs
+    nofuncargs = True
+                                              
+    def setup(self):
+        cls = self.obj
+        if getattr(cls, '__unittest_skip__', False):
+            return  # skipped
+        setup = getattr(cls, 'setUpClass', None)
+        if setup is not None:
+            setup()
+        teardown = getattr(cls, 'tearDownClass', None)
+        if teardown is not None:
+            self.addfinalizer(teardown)
+        super(UnitTestCase, self).setup()
+
+    def collect(self):
+        from unittest import TestLoader
+        cls = self.obj
+        if not getattr(cls, "__test__", True):
+            return
+        self.session._fixturemanager.parsefactories(self, unittest=True)
+        loader = TestLoader()
+        module = self.getparent(pytest.Module).obj
+        foundsomething = False
+        for name in loader.getTestCaseNames(self.obj):
+            x = getattr(self.obj, name)
+            if not getattr(x, '__test__', True):
+                continue
+            funcobj = getattr(x, 'im_func', x)
+            transfer_markers(funcobj, cls, module)
+            yield TestCaseFunction(name, parent=self)
+            foundsomething = True
+
+        if not foundsomething:
+            runtest = getattr(self.obj, 'runTest', None)
+            if runtest is not None:
+                ut = sys.modules.get("twisted.trial.unittest", None)
+                if ut is None or runtest != ut.TestCase.runTest:
+                    yield TestCaseFunction('runTest', parent=self)
+
+
+
+class TestCaseFunction(pytest.Function):
+    _excinfo = None
+
+    def setup(self):
+        self._testcase = self.parent.obj(self.name)
+        self._fix_unittest_skip_decorator()
+        self._obj = getattr(self._testcase, self.name)
+        if hasattr(self._testcase, 'setup_method'):
+            self._testcase.setup_method(self._obj)
+        if hasattr(self, "_request"):
+            self._request._fillfixtures()
+
+    def _fix_unittest_skip_decorator(self):
+        """
+        The @unittest.skip decorator calls functools.wraps(self._testcase)
+        The call to functools.wraps() fails unless self._testcase
+        has a __name__ attribute. This is usually automatically supplied
+        if the test is a function or method, but we need to add manually
+        here.
+
+        See issue #1169
+        """
+        if sys.version_info[0] == 2:
+            setattr(self._testcase, "__name__", self.name)
+
+    def teardown(self):
+        if hasattr(self._testcase, 'teardown_method'):
+            self._testcase.teardown_method(self._obj)
+        # Allow garbage collection on TestCase instance attributes.
+        self._testcase = None
+        self._obj = None
+
+    def startTest(self, testcase):
+        pass
+
+    def _addexcinfo(self, rawexcinfo):
+        # unwrap potential exception info (see twisted trial support below)
+        rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
+        try:
+            excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
+        except TypeError:
+            try:
+                try:
+                    l = traceback.format_exception(*rawexcinfo)
+                    l.insert(0, "NOTE: Incompatible Exception Representation, "
+                        "displaying natively:\n\n")
+                    pytest.fail("".join(l), pytrace=False)
+                except (pytest.fail.Exception, KeyboardInterrupt):
+                    raise
+                except:
+                    pytest.fail("ERROR: Unknown Incompatible Exception "
+                        "representation:\n%r" %(rawexcinfo,), pytrace=False)
+            except KeyboardInterrupt:
+                raise
+            except pytest.fail.Exception:
+                excinfo = _pytest._code.ExceptionInfo()
+        self.__dict__.setdefault('_excinfo', []).append(excinfo)
+
+    def addError(self, testcase, rawexcinfo):
+        self._addexcinfo(rawexcinfo)
+    def addFailure(self, testcase, rawexcinfo):
+        self._addexcinfo(rawexcinfo)
+
+    def addSkip(self, testcase, reason):
+        try:
+            pytest.skip(reason)
+        except pytest.skip.Exception:
+            self._evalskip = MarkEvaluator(self, 'SkipTest')
+            self._evalskip.result = True
+            self._addexcinfo(sys.exc_info())
+
+    def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
+        try:
+            pytest.xfail(str(reason))
+        except pytest.xfail.Exception:
+            self._addexcinfo(sys.exc_info())
+
+    def addUnexpectedSuccess(self, testcase, reason=""):
+        self._unexpectedsuccess = reason
+
+    def addSuccess(self, testcase):
+        pass
+
+    def stopTest(self, testcase):
+        pass
+
+    def runtest(self):
+        if self.config.pluginmanager.get_plugin("pdbinvoke") is None:
+            self._testcase(result=self)
+        else:
+            # disables tearDown and cleanups for post mortem debugging (see #1890)
+            self._testcase.debug()
+
+
+    def _prunetraceback(self, excinfo):
+        pytest.Function._prunetraceback(self, excinfo)
+        traceback = excinfo.traceback.filter(
+            lambda x:not x.frame.f_globals.get('__unittest'))
+        if traceback:
+            excinfo.traceback = traceback
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_makereport(item, call):
+    if isinstance(item, TestCaseFunction):
+        if item._excinfo:
+            call.excinfo = item._excinfo.pop(0)
+            try:
+                del call.result
+            except AttributeError:
+                pass
+
+# twisted trial support
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_protocol(item):
+    if isinstance(item, TestCaseFunction) and \
+       'twisted.trial.unittest' in sys.modules:
+        ut = sys.modules['twisted.python.failure']
+        Failure__init__ = ut.Failure.__init__
+        check_testcase_implements_trial_reporter()
+
+        def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
+            captureVars=None):
+            if exc_value is None:
+                self._rawexcinfo = sys.exc_info()
+            else:
+                if exc_type is None:
+                    exc_type = type(exc_value)
+                self._rawexcinfo = (exc_type, exc_value, exc_tb)
+            try:
+                Failure__init__(self, exc_value, exc_type, exc_tb,
+                    captureVars=captureVars)
+            except TypeError:
+                Failure__init__(self, exc_value, exc_type, exc_tb)
+
+        ut.Failure.__init__ = excstore
+        yield
+        ut.Failure.__init__ = Failure__init__
+    else:
+        yield
+
+
+def check_testcase_implements_trial_reporter(done=[]):
+    if done:
+        return
+    from zope.interface import classImplements
+    from twisted.trial.itrial import IReporter
+    classImplements(TestCaseFunction, IReporter)
+    done.append(1)
diff --git a/lib/spack/external/_pytest/vendored_packages/README.md b/lib/spack/external/_pytest/vendored_packages/README.md
new file mode 100644
index 0000000000..b5fe6febb0
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/README.md
@@ -0,0 +1,13 @@
+This directory vendors the `pluggy` module.
+
+For a more detailed discussion for the reasons to vendoring this 
+package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944).
+
+To update the current version, execute:
+
+```
+$ pip install -U pluggy==<version> --no-compile --target=_pytest/vendored_packages
+```
+
+And commit the modified files. The `pluggy-<version>.dist-info` directory 
+created by `pip` should be added as well.
diff --git a/lib/spack/external/_pytest/vendored_packages/__init__.py b/lib/spack/external/_pytest/vendored_packages/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
new file mode 100644
index 0000000000..da0e7a6ed7
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/DESCRIPTION.rst
@@ -0,0 +1,11 @@
+
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
new file mode 100644
index 0000000000..a1b589e38a
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000..121017d086
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/LICENSE.txt
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42) 
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
new file mode 100644
index 0000000000..bd88517c94
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/METADATA
@@ -0,0 +1,40 @@
+Metadata-Version: 2.0
+Name: pluggy
+Version: 0.4.0
+Summary: plugin and hook calling mechanisms for python
+Home-page: https://github.com/pytest-dev/pluggy
+Author: Holger Krekel
+Author-email: holger at merlinux.eu
+License: MIT license
+Platform: unix
+Platform: linux
+Platform: osx
+Platform: win32
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+
+
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
new file mode 100644
index 0000000000..3003a3bf2b
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/RECORD
@@ -0,0 +1,9 @@
+pluggy.py,sha256=u0oG9cv-oLOkNvEBlwnnu8pp1AyxpoERgUO00S3rvpQ,31543
+pluggy-0.4.0.dist-info/DESCRIPTION.rst,sha256=ltvjkFd40LW_xShthp6RRVM6OB_uACYDFR3kTpKw7o4,307
+pluggy-0.4.0.dist-info/LICENSE.txt,sha256=ruwhUOyV1HgE9F35JVL9BCZ9vMSALx369I4xq9rhpkM,1134
+pluggy-0.4.0.dist-info/METADATA,sha256=pe2hbsqKFaLHC6wAQPpFPn0KlpcPfLBe_BnS4O70bfk,1364
+pluggy-0.4.0.dist-info/RECORD,,
+pluggy-0.4.0.dist-info/WHEEL,sha256=9Z5Xm-eel1bTS7e6ogYiKz0zmPEqDwIypurdHN1hR40,116
+pluggy-0.4.0.dist-info/metadata.json,sha256=T3go5L2qOa_-H-HpCZi3EoVKb8sZ3R-fOssbkWo2nvM,1119
+pluggy-0.4.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
+pluggy-0.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
new file mode 100644
index 0000000000..8b6dd1b5a8
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.29.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
new file mode 100644
index 0000000000..cde22aff02
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/metadata.json
@@ -0,0 +1 @@
+{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"contacts": [{"email": "holger at merlinux.eu", "name": "Holger Krekel", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/pytest-dev/pluggy"}}}, "generator": "bdist_wheel (0.29.0)", "license": "MIT license", "metadata_version": "2.0", "name": "pluggy", "platform": "unix", "summary": "plugin and hook calling mechanisms for python", "version": "0.4.0"}
\ No newline at end of file
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
new file mode 100644
index 0000000000..11bdb5c1f5
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy-0.4.0.dist-info/top_level.txt
@@ -0,0 +1 @@
+pluggy
diff --git a/lib/spack/external/_pytest/vendored_packages/pluggy.py b/lib/spack/external/_pytest/vendored_packages/pluggy.py
new file mode 100644
index 0000000000..9c13932b36
--- /dev/null
+++ b/lib/spack/external/_pytest/vendored_packages/pluggy.py
@@ -0,0 +1,802 @@
+"""
+PluginManager, basic initialization and tracing.
+
+pluggy is the cristallized core of plugin management as used
+by some 150 plugins for pytest.
+
+Pluggy uses semantic versioning. Breaking changes are only foreseen for
+Major releases (incremented X in "X.Y.Z").  If you want to use pluggy in
+your project you should thus use a dependency restriction like
+"pluggy>=0.1.0,<1.0" to avoid surprises.
+
+pluggy is concerned with hook specification, hook implementations and hook
+calling.  For any given hook specification a hook call invokes up to N implementations.
+A hook implementation can influence its position and type of execution:
+if attributed "tryfirst" or "trylast" it will be tried to execute
+first or last.  However, if attributed "hookwrapper" an implementation
+can wrap all calls to non-hookwrapper implementations.  A hookwrapper
+can thus execute some code ahead and after the execution of other hooks.
+
+Hook specification is done by way of a regular python function where
+both the function name and the names of all its arguments are significant.
+Each hook implementation function is verified against the original specification
+function, including the names of all its arguments.  To allow for hook specifications
+to evolve over the livetime of a project, hook implementations can
+accept less arguments.  One can thus add new arguments and semantics to
+a hook specification by adding another argument typically without breaking
+existing hook implementations.
+
+The chosen approach is meant to let a hook designer think carefuly about
+which objects are needed by an extension writer.  By contrast, subclass-based
+extension mechanisms often expose a lot more state and behaviour than needed,
+thus restricting future developments.
+
+Pluggy currently consists of functionality for:
+
+- a way to register new hook specifications.  Without a hook
+  specification no hook calling can be performed.
+
+- a registry of plugins which contain hook implementation functions.  It
+  is possible to register plugins for which a hook specification is not yet
+  known and validate all hooks when the system is in a more referentially
+  consistent state.  Setting an "optionalhook" attribution to a hook
+  implementation will avoid PluginValidationError's if a specification
+  is missing.  This allows to have optional integration between plugins.
+
+- a "hook" relay object from which you can launch 1:N calls to
+  registered hook implementation functions
+
+- a mechanism for ordering hook implementation functions
+
+- mechanisms for two different type of 1:N calls: "firstresult" for when
+  the call should stop when the first implementation returns a non-None result.
+  And the other (default) way of guaranteeing that all hook implementations
+  will be called and their non-None result collected.
+
+- mechanisms for "historic" extension points such that all newly
+  registered functions will receive all hook calls that happened
+  before their registration.
+
+- a mechanism for discovering plugin objects which are based on
+  setuptools based entry points.
+
+- a simple tracing mechanism, including tracing of plugin calls and
+  their arguments.
+
+"""
+import sys
+import inspect
+
+__version__ = '0.4.0'
+
+__all__ = ["PluginManager", "PluginValidationError", "HookCallError",
+           "HookspecMarker", "HookimplMarker"]
+
+_py3 = sys.version_info > (3, 0)
+
+
+class HookspecMarker:
+    """ Decorator helper class for marking functions as hook specifications.
+
+    You can instantiate it with a project_name to get a decorator.
+    Calling PluginManager.add_hookspecs later will discover all marked functions
+    if the PluginManager uses the same project_name.
+    """
+
+    def __init__(self, project_name):
+        self.project_name = project_name
+
+    def __call__(self, function=None, firstresult=False, historic=False):
+        """ if passed a function, directly sets attributes on the function
+        which will make it discoverable to add_hookspecs().  If passed no
+        function, returns a decorator which can be applied to a function
+        later using the attributes supplied.
+
+        If firstresult is True the 1:N hook call (N being the number of registered
+        hook implementation functions) will stop at I<=N when the I'th function
+        returns a non-None result.
+
+        If historic is True calls to a hook will be memorized and replayed
+        on later registered plugins.
+
+        """
+        def setattr_hookspec_opts(func):
+            if historic and firstresult:
+                raise ValueError("cannot have a historic firstresult hook")
+            setattr(func, self.project_name + "_spec",
+                   dict(firstresult=firstresult, historic=historic))
+            return func
+
+        if function is not None:
+            return setattr_hookspec_opts(function)
+        else:
+            return setattr_hookspec_opts
+
+
+class HookimplMarker:
+    """ Decorator helper class for marking functions as hook implementations.
+
+    You can instantiate with a project_name to get a decorator.
+    Calling PluginManager.register later will discover all marked functions
+    if the PluginManager uses the same project_name.
+    """
+    def __init__(self, project_name):
+        self.project_name = project_name
+
+    def __call__(self, function=None, hookwrapper=False, optionalhook=False,
+                 tryfirst=False, trylast=False):
+
+        """ if passed a function, directly sets attributes on the function
+        which will make it discoverable to register().  If passed no function,
+        returns a decorator which can be applied to a function later using
+        the attributes supplied.
+
+        If optionalhook is True a missing matching hook specification will not result
+        in an error (by default it is an error if no matching spec is found).
+
+        If tryfirst is True this hook implementation will run as early as possible
+        in the chain of N hook implementations for a specfication.
+
+        If trylast is True this hook implementation will run as late as possible
+        in the chain of N hook implementations.
+
+        If hookwrapper is True the hook implementations needs to execute exactly
+        one "yield".  The code before the yield is run early before any non-hookwrapper
+        function is run.  The code after the yield is run after all non-hookwrapper
+        function have run.  The yield receives an ``_CallOutcome`` object representing
+        the exception or result outcome of the inner calls (including other hookwrapper
+        calls).
+
+        """
+        def setattr_hookimpl_opts(func):
+            setattr(func, self.project_name + "_impl",
+                   dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
+                        tryfirst=tryfirst, trylast=trylast))
+            return func
+
+        if function is None:
+            return setattr_hookimpl_opts
+        else:
+            return setattr_hookimpl_opts(function)
+
+
+def normalize_hookimpl_opts(opts):
+    opts.setdefault("tryfirst", False)
+    opts.setdefault("trylast", False)
+    opts.setdefault("hookwrapper", False)
+    opts.setdefault("optionalhook", False)
+
+
+class _TagTracer:
+    def __init__(self):
+        self._tag2proc = {}
+        self.writer = None
+        self.indent = 0
+
+    def get(self, name):
+        return _TagTracerSub(self, (name,))
+
+    def format_message(self, tags, args):
+        if isinstance(args[-1], dict):
+            extra = args[-1]
+            args = args[:-1]
+        else:
+            extra = {}
+
+        content = " ".join(map(str, args))
+        indent = "  " * self.indent
+
+        lines = [
+            "%s%s [%s]\n" % (indent, content, ":".join(tags))
+        ]
+
+        for name, value in extra.items():
+            lines.append("%s    %s: %s\n" % (indent, name, value))
+        return lines
+
+    def processmessage(self, tags, args):
+        if self.writer is not None and args:
+            lines = self.format_message(tags, args)
+            self.writer(''.join(lines))
+        try:
+            self._tag2proc[tags](tags, args)
+        except KeyError:
+            pass
+
+    def setwriter(self, writer):
+        self.writer = writer
+
+    def setprocessor(self, tags, processor):
+        if isinstance(tags, str):
+            tags = tuple(tags.split(":"))
+        else:
+            assert isinstance(tags, tuple)
+        self._tag2proc[tags] = processor
+
+
+class _TagTracerSub:
+    def __init__(self, root, tags):
+        self.root = root
+        self.tags = tags
+
+    def __call__(self, *args):
+        self.root.processmessage(self.tags, args)
+
+    def setmyprocessor(self, processor):
+        self.root.setprocessor(self.tags, processor)
+
+    def get(self, name):
+        return self.__class__(self.root, self.tags + (name,))
+
+
+def _raise_wrapfail(wrap_controller, msg):
+    co = wrap_controller.gi_code
+    raise RuntimeError("wrap_controller at %r %s:%d %s" %
+                   (co.co_name, co.co_filename, co.co_firstlineno, msg))
+
+
+def _wrapped_call(wrap_controller, func):
+    """ Wrap calling to a function with a generator which needs to yield
+    exactly once.  The yield point will trigger calling the wrapped function
+    and return its _CallOutcome to the yield point.  The generator then needs
+    to finish (raise StopIteration) in order for the wrapped call to complete.
+    """
+    try:
+        next(wrap_controller)   # first yield
+    except StopIteration:
+        _raise_wrapfail(wrap_controller, "did not yield")
+    call_outcome = _CallOutcome(func)
+    try:
+        wrap_controller.send(call_outcome)
+        _raise_wrapfail(wrap_controller, "has second yield")
+    except StopIteration:
+        pass
+    return call_outcome.get_result()
+
+
+class _CallOutcome:
+    """ Outcome of a function call, either an exception or a proper result.
+    Calling the ``get_result`` method will return the result or reraise
+    the exception raised when the function was called. """
+    excinfo = None
+
+    def __init__(self, func):
+        try:
+            self.result = func()
+        except BaseException:
+            self.excinfo = sys.exc_info()
+
+    def force_result(self, result):
+        self.result = result
+        self.excinfo = None
+
+    def get_result(self):
+        if self.excinfo is None:
+            return self.result
+        else:
+            ex = self.excinfo
+            if _py3:
+                raise ex[1].with_traceback(ex[2])
+            _reraise(*ex)  # noqa
+
+if not _py3:
+    exec("""
+def _reraise(cls, val, tb):
+    raise cls, val, tb
+""")
+
+
+class _TracedHookExecution:
+    def __init__(self, pluginmanager, before, after):
+        self.pluginmanager = pluginmanager
+        self.before = before
+        self.after = after
+        self.oldcall = pluginmanager._inner_hookexec
+        assert not isinstance(self.oldcall, _TracedHookExecution)
+        self.pluginmanager._inner_hookexec = self
+
+    def __call__(self, hook, hook_impls, kwargs):
+        self.before(hook.name, hook_impls, kwargs)
+        outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs))
+        self.after(outcome, hook.name, hook_impls, kwargs)
+        return outcome.get_result()
+
+    def undo(self):
+        self.pluginmanager._inner_hookexec = self.oldcall
+
+
+class PluginManager(object):
+    """ Core Pluginmanager class which manages registration
+    of plugin objects and 1:N hook calling.
+
+    You can register new hooks by calling ``add_hookspec(module_or_class)``.
+    You can register plugin objects (which contain hooks) by calling
+    ``register(plugin)``.  The Pluginmanager is initialized with a
+    prefix that is searched for in the names of the dict of registered
+    plugin objects.  An optional excludefunc allows to blacklist names which
+    are not considered as hooks despite a matching prefix.
+
+    For debugging purposes you can call ``enable_tracing()``
+    which will subsequently send debug information to the trace helper.
+    """
+
+    def __init__(self, project_name, implprefix=None):
+        """ if implprefix is given implementation functions
+        will be recognized if their name matches the implprefix. """
+        self.project_name = project_name
+        self._name2plugin = {}
+        self._plugin2hookcallers = {}
+        self._plugin_distinfo = []
+        self.trace = _TagTracer().get("pluginmanage")
+        self.hook = _HookRelay(self.trace.root.get("hook"))
+        self._implprefix = implprefix
+        self._inner_hookexec = lambda hook, methods, kwargs: \
+            _MultiCall(methods, kwargs, hook.spec_opts).execute()
+
+    def _hookexec(self, hook, methods, kwargs):
+        # called from all hookcaller instances.
+        # enable_tracing will set its own wrapping function at self._inner_hookexec
+        return self._inner_hookexec(hook, methods, kwargs)
+
+    def register(self, plugin, name=None):
+        """ Register a plugin and return its canonical name or None if the name
+        is blocked from registering.  Raise a ValueError if the plugin is already
+        registered. """
+        plugin_name = name or self.get_canonical_name(plugin)
+
+        if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
+            if self._name2plugin.get(plugin_name, -1) is None:
+                return  # blocked plugin, return None to indicate no registration
+            raise ValueError("Plugin already registered: %s=%s\n%s" %
+                            (plugin_name, plugin, self._name2plugin))
+
+        # XXX if an error happens we should make sure no state has been
+        # changed at point of return
+        self._name2plugin[plugin_name] = plugin
+
+        # register matching hook implementations of the plugin
+        self._plugin2hookcallers[plugin] = hookcallers = []
+        for name in dir(plugin):
+            hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
+            if hookimpl_opts is not None:
+                normalize_hookimpl_opts(hookimpl_opts)
+                method = getattr(plugin, name)
+                hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
+                hook = getattr(self.hook, name, None)
+                if hook is None:
+                    hook = _HookCaller(name, self._hookexec)
+                    setattr(self.hook, name, hook)
+                elif hook.has_spec():
+                    self._verify_hook(hook, hookimpl)
+                    hook._maybe_apply_history(hookimpl)
+                hook._add_hookimpl(hookimpl)
+                hookcallers.append(hook)
+        return plugin_name
+
+    def parse_hookimpl_opts(self, plugin, name):
+        method = getattr(plugin, name)
+        try:
+            res = getattr(method, self.project_name + "_impl", None)
+        except Exception:
+            res = {}
+        if res is not None and not isinstance(res, dict):
+            # false positive
+            res = None
+        elif res is None and self._implprefix and name.startswith(self._implprefix):
+            res = {}
+        return res
+
+    def unregister(self, plugin=None, name=None):
+        """ unregister a plugin object and all its contained hook implementations
+        from internal data structures. """
+        if name is None:
+            assert plugin is not None, "one of name or plugin needs to be specified"
+            name = self.get_name(plugin)
+
+        if plugin is None:
+            plugin = self.get_plugin(name)
+
+        # if self._name2plugin[name] == None registration was blocked: ignore
+        if self._name2plugin.get(name):
+            del self._name2plugin[name]
+
+        for hookcaller in self._plugin2hookcallers.pop(plugin, []):
+            hookcaller._remove_plugin(plugin)
+
+        return plugin
+
+    def set_blocked(self, name):
+        """ block registrations of the given name, unregister if already registered. """
+        self.unregister(name=name)
+        self._name2plugin[name] = None
+
+    def is_blocked(self, name):
+        """ return True if the name blogs registering plugins of that name. """
+        return name in self._name2plugin and self._name2plugin[name] is None
+
+    def add_hookspecs(self, module_or_class):
+        """ add new hook specifications defined in the given module_or_class.
+        Functions are recognized if they have been decorated accordingly. """
+        names = []
+        for name in dir(module_or_class):
+            spec_opts = self.parse_hookspec_opts(module_or_class, name)
+            if spec_opts is not None:
+                hc = getattr(self.hook, name, None)
+                if hc is None:
+                    hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
+                    setattr(self.hook, name, hc)
+                else:
+                    # plugins registered this hook without knowing the spec
+                    hc.set_specification(module_or_class, spec_opts)
+                    for hookfunction in (hc._wrappers + hc._nonwrappers):
+                        self._verify_hook(hc, hookfunction)
+                names.append(name)
+
+        if not names:
+            raise ValueError("did not find any %r hooks in %r" %
+                             (self.project_name, module_or_class))
+
+    def parse_hookspec_opts(self, module_or_class, name):
+        method = getattr(module_or_class, name)
+        return getattr(method, self.project_name + "_spec", None)
+
+    def get_plugins(self):
+        """ return the set of registered plugins. """
+        return set(self._plugin2hookcallers)
+
+    def is_registered(self, plugin):
+        """ Return True if the plugin is already registered. """
+        return plugin in self._plugin2hookcallers
+
+    def get_canonical_name(self, plugin):
+        """ Return canonical name for a plugin object. Note that a plugin
+        may be registered under a different name which was specified
+        by the caller of register(plugin, name). To obtain the name
+        of an registered plugin use ``get_name(plugin)`` instead."""
+        return getattr(plugin, "__name__", None) or str(id(plugin))
+
+    def get_plugin(self, name):
+        """ Return a plugin or None for the given name. """
+        return self._name2plugin.get(name)
+
+    def has_plugin(self, name):
+        """ Return True if a plugin with the given name is registered. """
+        return self.get_plugin(name) is not None
+
+    def get_name(self, plugin):
+        """ Return name for registered plugin or None if not registered. """
+        for name, val in self._name2plugin.items():
+            if plugin == val:
+                return name
+
+    def _verify_hook(self, hook, hookimpl):
+        if hook.is_historic() and hookimpl.hookwrapper:
+            raise PluginValidationError(
+                "Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" %
+                (hookimpl.plugin_name, hook.name))
+
+        for arg in hookimpl.argnames:
+            if arg not in hook.argnames:
+                raise PluginValidationError(
+                    "Plugin %r\nhook %r\nargument %r not available\n"
+                    "plugin definition: %s\n"
+                    "available hookargs: %s" %
+                    (hookimpl.plugin_name, hook.name, arg,
+                    _formatdef(hookimpl.function), ", ".join(hook.argnames)))
+
+    def check_pending(self):
+        """ Verify that all hooks which have not been verified against
+        a hook specification are optional, otherwise raise PluginValidationError"""
+        for name in self.hook.__dict__:
+            if name[0] != "_":
+                hook = getattr(self.hook, name)
+                if not hook.has_spec():
+                    for hookimpl in (hook._wrappers + hook._nonwrappers):
+                        if not hookimpl.optionalhook:
+                            raise PluginValidationError(
+                                "unknown hook %r in plugin %r" %
+                                (name, hookimpl.plugin))
+
+    def load_setuptools_entrypoints(self, entrypoint_name):
+        """ Load modules from querying the specified setuptools entrypoint name.
+        Return the number of loaded plugins. """
+        from pkg_resources import (iter_entry_points, DistributionNotFound,
+                                   VersionConflict)
+        for ep in iter_entry_points(entrypoint_name):
+            # is the plugin registered or blocked?
+            if self.get_plugin(ep.name) or self.is_blocked(ep.name):
+                continue
+            try:
+                plugin = ep.load()
+            except DistributionNotFound:
+                continue
+            except VersionConflict as e:
+                raise PluginValidationError(
+                    "Plugin %r could not be loaded: %s!" % (ep.name, e))
+            self.register(plugin, name=ep.name)
+            self._plugin_distinfo.append((plugin, ep.dist))
+        return len(self._plugin_distinfo)
+
+    def list_plugin_distinfo(self):
+        """ return list of distinfo/plugin tuples for all setuptools registered
+        plugins. """
+        return list(self._plugin_distinfo)
+
+    def list_name_plugin(self):
+        """ return list of name/plugin pairs. """
+        return list(self._name2plugin.items())
+
+    def get_hookcallers(self, plugin):
+        """ get all hook callers for the specified plugin. """
+        return self._plugin2hookcallers.get(plugin)
+
+    def add_hookcall_monitoring(self, before, after):
+        """ add before/after tracing functions for all hooks
+        and return an undo function which, when called,
+        will remove the added tracers.
+
+        ``before(hook_name, hook_impls, kwargs)`` will be called ahead
+        of all hook calls and receive a hookcaller instance, a list
+        of HookImpl instances and the keyword arguments for the hook call.
+
+        ``after(outcome, hook_name, hook_impls, kwargs)`` receives the
+        same arguments as ``before`` but also a :py:class:`_CallOutcome`` object
+        which represents the result of the overall hook call.
+        """
+        return _TracedHookExecution(self, before, after).undo
+
+    def enable_tracing(self):
+        """ enable tracing of hook calls and return an undo function. """
+        hooktrace = self.hook._trace
+
+        def before(hook_name, methods, kwargs):
+            hooktrace.root.indent += 1
+            hooktrace(hook_name, kwargs)
+
+        def after(outcome, hook_name, methods, kwargs):
+            if outcome.excinfo is None:
+                hooktrace("finish", hook_name, "-->", outcome.result)
+            hooktrace.root.indent -= 1
+
+        return self.add_hookcall_monitoring(before, after)
+
+    def subset_hook_caller(self, name, remove_plugins):
+        """ Return a new _HookCaller instance for the named method
+        which manages calls to all registered plugins except the
+        ones from remove_plugins. """
+        orig = getattr(self.hook, name)
+        plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
+        if plugins_to_remove:
+            hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class,
+                             orig.spec_opts)
+            for hookimpl in (orig._wrappers + orig._nonwrappers):
+                plugin = hookimpl.plugin
+                if plugin not in plugins_to_remove:
+                    hc._add_hookimpl(hookimpl)
+                    # we also keep track of this hook caller so it
+                    # gets properly removed on plugin unregistration
+                    self._plugin2hookcallers.setdefault(plugin, []).append(hc)
+            return hc
+        return orig
+
+
+class _MultiCall:
+    """ execute a call into multiple python functions/methods. """
+
+    # XXX note that the __multicall__ argument is supported only
+    # for pytest compatibility reasons.  It was never officially
+    # supported there and is explicitely deprecated since 2.8
+    # so we can remove it soon, allowing to avoid the below recursion
+    # in execute() and simplify/speed up the execute loop.
+
+    def __init__(self, hook_impls, kwargs, specopts={}):
+        self.hook_impls = hook_impls
+        self.kwargs = kwargs
+        self.kwargs["__multicall__"] = self
+        self.specopts = specopts
+
+    def execute(self):
+        all_kwargs = self.kwargs
+        self.results = results = []
+        firstresult = self.specopts.get("firstresult")
+
+        while self.hook_impls:
+            hook_impl = self.hook_impls.pop()
+            try:
+                args = [all_kwargs[argname] for argname in hook_impl.argnames]
+            except KeyError:
+                for argname in hook_impl.argnames:
+                    if argname not in all_kwargs:
+                        raise HookCallError(
+                            "hook call must provide argument %r" % (argname,))
+            if hook_impl.hookwrapper:
+                return _wrapped_call(hook_impl.function(*args), self.execute)
+            res = hook_impl.function(*args)
+            if res is not None:
+                if firstresult:
+                    return res
+                results.append(res)
+
+        if not firstresult:
+            return results
+
+    def __repr__(self):
+        status = "%d meths" % (len(self.hook_impls),)
+        if hasattr(self, "results"):
+            status = ("%d results, " % len(self.results)) + status
+        return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs)
+
+
+def varnames(func, startindex=None):
+    """ return argument name tuple for a function, method, class or callable.
+
+    In case of a class, its "__init__" method is considered.
+    For methods the "self" parameter is not included unless you are passing
+    an unbound method with Python3 (which has no supports for unbound methods)
+    """
+    cache = getattr(func, "__dict__", {})
+    try:
+        return cache["_varnames"]
+    except KeyError:
+        pass
+    if inspect.isclass(func):
+        try:
+            func = func.__init__
+        except AttributeError:
+            return ()
+        startindex = 1
+    else:
+        if not inspect.isfunction(func) and not inspect.ismethod(func):
+            try:
+                func = getattr(func, '__call__', func)
+            except Exception:
+                return ()
+        if startindex is None:
+            startindex = int(inspect.ismethod(func))
+
+    try:
+        rawcode = func.__code__
+    except AttributeError:
+        return ()
+    try:
+        x = rawcode.co_varnames[startindex:rawcode.co_argcount]
+    except AttributeError:
+        x = ()
+    else:
+        defaults = func.__defaults__
+        if defaults:
+            x = x[:-len(defaults)]
+    try:
+        cache["_varnames"] = x
+    except TypeError:
+        pass
+    return x
+
+
+class _HookRelay:
+    """ hook holder object for performing 1:N hook calls where N is the number
+    of registered plugins.
+
+    """
+
+    def __init__(self, trace):
+        self._trace = trace
+
+
+class _HookCaller(object):
+    def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
+        self.name = name
+        self._wrappers = []
+        self._nonwrappers = []
+        self._hookexec = hook_execute
+        if specmodule_or_class is not None:
+            assert spec_opts is not None
+            self.set_specification(specmodule_or_class, spec_opts)
+
+    def has_spec(self):
+        return hasattr(self, "_specmodule_or_class")
+
+    def set_specification(self, specmodule_or_class, spec_opts):
+        assert not self.has_spec()
+        self._specmodule_or_class = specmodule_or_class
+        specfunc = getattr(specmodule_or_class, self.name)
+        argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class))
+        assert "self" not in argnames  # sanity check
+        self.argnames = ["__multicall__"] + list(argnames)
+        self.spec_opts = spec_opts
+        if spec_opts.get("historic"):
+            self._call_history = []
+
+    def is_historic(self):
+        return hasattr(self, "_call_history")
+
+    def _remove_plugin(self, plugin):
+        def remove(wrappers):
+            for i, method in enumerate(wrappers):
+                if method.plugin == plugin:
+                    del wrappers[i]
+                    return True
+        if remove(self._wrappers) is None:
+            if remove(self._nonwrappers) is None:
+                raise ValueError("plugin %r not found" % (plugin,))
+
+    def _add_hookimpl(self, hookimpl):
+        if hookimpl.hookwrapper:
+            methods = self._wrappers
+        else:
+            methods = self._nonwrappers
+
+        if hookimpl.trylast:
+            methods.insert(0, hookimpl)
+        elif hookimpl.tryfirst:
+            methods.append(hookimpl)
+        else:
+            # find last non-tryfirst method
+            i = len(methods) - 1
+            while i >= 0 and methods[i].tryfirst:
+                i -= 1
+            methods.insert(i + 1, hookimpl)
+
+    def __repr__(self):
+        return "<_HookCaller %r>" % (self.name,)
+
+    def __call__(self, **kwargs):
+        assert not self.is_historic()
+        return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
+
+    def call_historic(self, proc=None, kwargs=None):
+        self._call_history.append((kwargs or {}, proc))
+        # historizing hooks don't return results
+        self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
+
+    def call_extra(self, methods, kwargs):
+        """ Call the hook with some additional temporarily participating
+        methods using the specified kwargs as call parameters. """
+        old = list(self._nonwrappers), list(self._wrappers)
+        for method in methods:
+            opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
+            hookimpl = HookImpl(None, "<temp>", method, opts)
+            self._add_hookimpl(hookimpl)
+        try:
+            return self(**kwargs)
+        finally:
+            self._nonwrappers, self._wrappers = old
+
+    def _maybe_apply_history(self, method):
+        if self.is_historic():
+            for kwargs, proc in self._call_history:
+                res = self._hookexec(self, [method], kwargs)
+                if res and proc is not None:
+                    proc(res[0])
+
+
+class HookImpl:
+    def __init__(self, plugin, plugin_name, function, hook_impl_opts):
+        self.function = function
+        self.argnames = varnames(self.function)
+        self.plugin = plugin
+        self.opts = hook_impl_opts
+        self.plugin_name = plugin_name
+        self.__dict__.update(hook_impl_opts)
+
+
+class PluginValidationError(Exception):
+    """ plugin failed validation. """
+
+
+class HookCallError(Exception):
+    """ Hook was called wrongly. """
+
+
+if hasattr(inspect, 'signature'):
+    def _formatdef(func):
+        return "%s%s" % (
+            func.__name__,
+            str(inspect.signature(func))
+        )
+else:
+    def _formatdef(func):
+        return "%s%s" % (
+            func.__name__,
+            inspect.formatargspec(*inspect.getargspec(func))
+        )
diff --git a/lib/spack/external/nose/LICENSE b/lib/spack/external/nose/LICENSE
deleted file mode 100644
index 9f6e791624..0000000000
--- a/lib/spack/external/nose/LICENSE
+++ /dev/null
@@ -1,502 +0,0 @@
-		  GNU LESSER GENERAL PUBLIC LICENSE
-		       Version 2.1, February 1999
-
- Copyright (C) 1991, 1999 Free Software Foundation, Inc.
-     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-[This is the first released version of the Lesser GPL.  It also counts
- as the successor of the GNU Library Public License, version 2, hence
- the version number 2.1.]
-
-			    Preamble
-
-  The licenses for most software are designed to take away your
-freedom to share and change it.  By contrast, the GNU General Public
-Licenses are intended to guarantee your freedom to share and change
-free software--to make sure the software is free for all its users.
-
-  This license, the Lesser General Public License, applies to some
-specially designated software packages--typically libraries--of the
-Free Software Foundation and other authors who decide to use it.  You
-can use it too, but we suggest you first think carefully about whether
-this license or the ordinary General Public License is the better
-strategy to use in any particular case, based on the explanations below.
-
-  When we speak of free software, we are referring to freedom of use,
-not price.  Our General Public Licenses are designed to make sure that
-you have the freedom to distribute copies of free software (and charge
-for this service if you wish); that you receive source code or can get
-it if you want it; that you can change the software and use pieces of
-it in new free programs; and that you are informed that you can do
-these things.
-
-  To protect your rights, we need to make restrictions that forbid
-distributors to deny you these rights or to ask you to surrender these
-rights.  These restrictions translate to certain responsibilities for
-you if you distribute copies of the library or if you modify it.
-
-  For example, if you distribute copies of the library, whether gratis
-or for a fee, you must give the recipients all the rights that we gave
-you.  You must make sure that they, too, receive or can get the source
-code.  If you link other code with the library, you must provide
-complete object files to the recipients, so that they can relink them
-with the library after making changes to the library and recompiling
-it.  And you must show them these terms so they know their rights.
-
-  We protect your rights with a two-step method: (1) we copyright the
-library, and (2) we offer you this license, which gives you legal
-permission to copy, distribute and/or modify the library.
-
-  To protect each distributor, we want to make it very clear that
-there is no warranty for the free library.  Also, if the library is
-modified by someone else and passed on, the recipients should know
-that what they have is not the original version, so that the original
-author's reputation will not be affected by problems that might be
-introduced by others.
-
-  Finally, software patents pose a constant threat to the existence of
-any free program.  We wish to make sure that a company cannot
-effectively restrict the users of a free program by obtaining a
-restrictive license from a patent holder.  Therefore, we insist that
-any patent license obtained for a version of the library must be
-consistent with the full freedom of use specified in this license.
-
-  Most GNU software, including some libraries, is covered by the
-ordinary GNU General Public License.  This license, the GNU Lesser
-General Public License, applies to certain designated libraries, and
-is quite different from the ordinary General Public License.  We use
-this license for certain libraries in order to permit linking those
-libraries into non-free programs.
-
-  When a program is linked with a library, whether statically or using
-a shared library, the combination of the two is legally speaking a
-combined work, a derivative of the original library.  The ordinary
-General Public License therefore permits such linking only if the
-entire combination fits its criteria of freedom.  The Lesser General
-Public License permits more lax criteria for linking other code with
-the library.
-
-  We call this license the "Lesser" General Public License because it
-does Less to protect the user's freedom than the ordinary General
-Public License.  It also provides other free software developers Less
-of an advantage over competing non-free programs.  These disadvantages
-are the reason we use the ordinary General Public License for many
-libraries.  However, the Lesser license provides advantages in certain
-special circumstances.
-
-  For example, on rare occasions, there may be a special need to
-encourage the widest possible use of a certain library, so that it becomes
-a de-facto standard.  To achieve this, non-free programs must be
-allowed to use the library.  A more frequent case is that a free
-library does the same job as widely used non-free libraries.  In this
-case, there is little to gain by limiting the free library to free
-software only, so we use the Lesser General Public License.
-
-  In other cases, permission to use a particular library in non-free
-programs enables a greater number of people to use a large body of
-free software.  For example, permission to use the GNU C Library in
-non-free programs enables many more people to use the whole GNU
-operating system, as well as its variant, the GNU/Linux operating
-system.
-
-  Although the Lesser General Public License is Less protective of the
-users' freedom, it does ensure that the user of a program that is
-linked with the Library has the freedom and the wherewithal to run
-that program using a modified version of the Library.
-
-  The precise terms and conditions for copying, distribution and
-modification follow.  Pay close attention to the difference between a
-"work based on the library" and a "work that uses the library".  The
-former contains code derived from the library, whereas the latter must
-be combined with the library in order to run.
-
-		  GNU LESSER GENERAL PUBLIC LICENSE
-   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
-
-  0. This License Agreement applies to any software library or other
-program which contains a notice placed by the copyright holder or
-other authorized party saying it may be distributed under the terms of
-this Lesser General Public License (also called "this License").
-Each licensee is addressed as "you".
-
-  A "library" means a collection of software functions and/or data
-prepared so as to be conveniently linked with application programs
-(which use some of those functions and data) to form executables.
-
-  The "Library", below, refers to any such software library or work
-which has been distributed under these terms.  A "work based on the
-Library" means either the Library or any derivative work under
-copyright law: that is to say, a work containing the Library or a
-portion of it, either verbatim or with modifications and/or translated
-straightforwardly into another language.  (Hereinafter, translation is
-included without limitation in the term "modification".)
-
-  "Source code" for a work means the preferred form of the work for
-making modifications to it.  For a library, complete source code means
-all the source code for all modules it contains, plus any associated
-interface definition files, plus the scripts used to control compilation
-and installation of the library.
-
-  Activities other than copying, distribution and modification are not
-covered by this License; they are outside its scope.  The act of
-running a program using the Library is not restricted, and output from
-such a program is covered only if its contents constitute a work based
-on the Library (independent of the use of the Library in a tool for
-writing it).  Whether that is true depends on what the Library does
-and what the program that uses the Library does.
-
-  1. You may copy and distribute verbatim copies of the Library's
-complete source code as you receive it, in any medium, provided that
-you conspicuously and appropriately publish on each copy an
-appropriate copyright notice and disclaimer of warranty; keep intact
-all the notices that refer to this License and to the absence of any
-warranty; and distribute a copy of this License along with the
-Library.
-
-  You may charge a fee for the physical act of transferring a copy,
-and you may at your option offer warranty protection in exchange for a
-fee.
-
-  2. You may modify your copy or copies of the Library or any portion
-of it, thus forming a work based on the Library, and copy and
-distribute such modifications or work under the terms of Section 1
-above, provided that you also meet all of these conditions:
-
-    a) The modified work must itself be a software library.
-
-    b) You must cause the files modified to carry prominent notices
-    stating that you changed the files and the date of any change.
-
-    c) You must cause the whole of the work to be licensed at no
-    charge to all third parties under the terms of this License.
-
-    d) If a facility in the modified Library refers to a function or a
-    table of data to be supplied by an application program that uses
-    the facility, other than as an argument passed when the facility
-    is invoked, then you must make a good faith effort to ensure that,
-    in the event an application does not supply such function or
-    table, the facility still operates, and performs whatever part of
-    its purpose remains meaningful.
-
-    (For example, a function in a library to compute square roots has
-    a purpose that is entirely well-defined independent of the
-    application.  Therefore, Subsection 2d requires that any
-    application-supplied function or table used by this function must
-    be optional: if the application does not supply it, the square
-    root function must still compute square roots.)
-
-These requirements apply to the modified work as a whole.  If
-identifiable sections of that work are not derived from the Library,
-and can be reasonably considered independent and separate works in
-themselves, then this License, and its terms, do not apply to those
-sections when you distribute them as separate works.  But when you
-distribute the same sections as part of a whole which is a work based
-on the Library, the distribution of the whole must be on the terms of
-this License, whose permissions for other licensees extend to the
-entire whole, and thus to each and every part regardless of who wrote
-it.
-
-Thus, it is not the intent of this section to claim rights or contest
-your rights to work written entirely by you; rather, the intent is to
-exercise the right to control the distribution of derivative or
-collective works based on the Library.
-
-In addition, mere aggregation of another work not based on the Library
-with the Library (or with a work based on the Library) on a volume of
-a storage or distribution medium does not bring the other work under
-the scope of this License.
-
-  3. You may opt to apply the terms of the ordinary GNU General Public
-License instead of this License to a given copy of the Library.  To do
-this, you must alter all the notices that refer to this License, so
-that they refer to the ordinary GNU General Public License, version 2,
-instead of to this License.  (If a newer version than version 2 of the
-ordinary GNU General Public License has appeared, then you can specify
-that version instead if you wish.)  Do not make any other change in
-these notices.
-
-  Once this change is made in a given copy, it is irreversible for
-that copy, so the ordinary GNU General Public License applies to all
-subsequent copies and derivative works made from that copy.
-
-  This option is useful when you wish to copy part of the code of
-the Library into a program that is not a library.
-
-  4. You may copy and distribute the Library (or a portion or
-derivative of it, under Section 2) in object code or executable form
-under the terms of Sections 1 and 2 above provided that you accompany
-it with the complete corresponding machine-readable source code, which
-must be distributed under the terms of Sections 1 and 2 above on a
-medium customarily used for software interchange.
-
-  If distribution of object code is made by offering access to copy
-from a designated place, then offering equivalent access to copy the
-source code from the same place satisfies the requirement to
-distribute the source code, even though third parties are not
-compelled to copy the source along with the object code.
-
-  5. A program that contains no derivative of any portion of the
-Library, but is designed to work with the Library by being compiled or
-linked with it, is called a "work that uses the Library".  Such a
-work, in isolation, is not a derivative work of the Library, and
-therefore falls outside the scope of this License.
-
-  However, linking a "work that uses the Library" with the Library
-creates an executable that is a derivative of the Library (because it
-contains portions of the Library), rather than a "work that uses the
-library".  The executable is therefore covered by this License.
-Section 6 states terms for distribution of such executables.
-
-  When a "work that uses the Library" uses material from a header file
-that is part of the Library, the object code for the work may be a
-derivative work of the Library even though the source code is not.
-Whether this is true is especially significant if the work can be
-linked without the Library, or if the work is itself a library.  The
-threshold for this to be true is not precisely defined by law.
-
-  If such an object file uses only numerical parameters, data
-structure layouts and accessors, and small macros and small inline
-functions (ten lines or less in length), then the use of the object
-file is unrestricted, regardless of whether it is legally a derivative
-work.  (Executables containing this object code plus portions of the
-Library will still fall under Section 6.)
-
-  Otherwise, if the work is a derivative of the Library, you may
-distribute the object code for the work under the terms of Section 6.
-Any executables containing that work also fall under Section 6,
-whether or not they are linked directly with the Library itself.
-
-  6. As an exception to the Sections above, you may also combine or
-link a "work that uses the Library" with the Library to produce a
-work containing portions of the Library, and distribute that work
-under terms of your choice, provided that the terms permit
-modification of the work for the customer's own use and reverse
-engineering for debugging such modifications.
-
-  You must give prominent notice with each copy of the work that the
-Library is used in it and that the Library and its use are covered by
-this License.  You must supply a copy of this License.  If the work
-during execution displays copyright notices, you must include the
-copyright notice for the Library among them, as well as a reference
-directing the user to the copy of this License.  Also, you must do one
-of these things:
-
-    a) Accompany the work with the complete corresponding
-    machine-readable source code for the Library including whatever
-    changes were used in the work (which must be distributed under
-    Sections 1 and 2 above); and, if the work is an executable linked
-    with the Library, with the complete machine-readable "work that
-    uses the Library", as object code and/or source code, so that the
-    user can modify the Library and then relink to produce a modified
-    executable containing the modified Library.  (It is understood
-    that the user who changes the contents of definitions files in the
-    Library will not necessarily be able to recompile the application
-    to use the modified definitions.)
-
-    b) Use a suitable shared library mechanism for linking with the
-    Library.  A suitable mechanism is one that (1) uses at run time a
-    copy of the library already present on the user's computer system,
-    rather than copying library functions into the executable, and (2)
-    will operate properly with a modified version of the library, if
-    the user installs one, as long as the modified version is
-    interface-compatible with the version that the work was made with.
-
-    c) Accompany the work with a written offer, valid for at
-    least three years, to give the same user the materials
-    specified in Subsection 6a, above, for a charge no more
-    than the cost of performing this distribution.
-
-    d) If distribution of the work is made by offering access to copy
-    from a designated place, offer equivalent access to copy the above
-    specified materials from the same place.
-
-    e) Verify that the user has already received a copy of these
-    materials or that you have already sent this user a copy.
-
-  For an executable, the required form of the "work that uses the
-Library" must include any data and utility programs needed for
-reproducing the executable from it.  However, as a special exception,
-the materials to be distributed need not include anything that is
-normally distributed (in either source or binary form) with the major
-components (compiler, kernel, and so on) of the operating system on
-which the executable runs, unless that component itself accompanies
-the executable.
-
-  It may happen that this requirement contradicts the license
-restrictions of other proprietary libraries that do not normally
-accompany the operating system.  Such a contradiction means you cannot
-use both them and the Library together in an executable that you
-distribute.
-
-  7. You may place library facilities that are a work based on the
-Library side-by-side in a single library together with other library
-facilities not covered by this License, and distribute such a combined
-library, provided that the separate distribution of the work based on
-the Library and of the other library facilities is otherwise
-permitted, and provided that you do these two things:
-
-    a) Accompany the combined library with a copy of the same work
-    based on the Library, uncombined with any other library
-    facilities.  This must be distributed under the terms of the
-    Sections above.
-
-    b) Give prominent notice with the combined library of the fact
-    that part of it is a work based on the Library, and explaining
-    where to find the accompanying uncombined form of the same work.
-
-  8. You may not copy, modify, sublicense, link with, or distribute
-the Library except as expressly provided under this License.  Any
-attempt otherwise to copy, modify, sublicense, link with, or
-distribute the Library is void, and will automatically terminate your
-rights under this License.  However, parties who have received copies,
-or rights, from you under this License will not have their licenses
-terminated so long as such parties remain in full compliance.
-
-  9. You are not required to accept this License, since you have not
-signed it.  However, nothing else grants you permission to modify or
-distribute the Library or its derivative works.  These actions are
-prohibited by law if you do not accept this License.  Therefore, by
-modifying or distributing the Library (or any work based on the
-Library), you indicate your acceptance of this License to do so, and
-all its terms and conditions for copying, distributing or modifying
-the Library or works based on it.
-
-  10. Each time you redistribute the Library (or any work based on the
-Library), the recipient automatically receives a license from the
-original licensor to copy, distribute, link with or modify the Library
-subject to these terms and conditions.  You may not impose any further
-restrictions on the recipients' exercise of the rights granted herein.
-You are not responsible for enforcing compliance by third parties with
-this License.
-
-  11. If, as a consequence of a court judgment or allegation of patent
-infringement or for any other reason (not limited to patent issues),
-conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License.  If you cannot
-distribute so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you
-may not distribute the Library at all.  For example, if a patent
-license would not permit royalty-free redistribution of the Library by
-all those who receive copies directly or indirectly through you, then
-the only way you could satisfy both it and this License would be to
-refrain entirely from distribution of the Library.
-
-If any portion of this section is held invalid or unenforceable under any
-particular circumstance, the balance of the section is intended to apply,
-and the section as a whole is intended to apply in other circumstances.
-
-It is not the purpose of this section to induce you to infringe any
-patents or other property right claims or to contest validity of any
-such claims; this section has the sole purpose of protecting the
-integrity of the free software distribution system which is
-implemented by public license practices.  Many people have made
-generous contributions to the wide range of software distributed
-through that system in reliance on consistent application of that
-system; it is up to the author/donor to decide if he or she is willing
-to distribute software through any other system and a licensee cannot
-impose that choice.
-
-This section is intended to make thoroughly clear what is believed to
-be a consequence of the rest of this License.
-
-  12. If the distribution and/or use of the Library is restricted in
-certain countries either by patents or by copyrighted interfaces, the
-original copyright holder who places the Library under this License may add
-an explicit geographical distribution limitation excluding those countries,
-so that distribution is permitted only in or among countries not thus
-excluded.  In such case, this License incorporates the limitation as if
-written in the body of this License.
-
-  13. The Free Software Foundation may publish revised and/or new
-versions of the Lesser General Public License from time to time.
-Such new versions will be similar in spirit to the present version,
-but may differ in detail to address new problems or concerns.
-
-Each version is given a distinguishing version number.  If the Library
-specifies a version number of this License which applies to it and
-"any later version", you have the option of following the terms and
-conditions either of that version or of any later version published by
-the Free Software Foundation.  If the Library does not specify a
-license version number, you may choose any version ever published by
-the Free Software Foundation.
-
-  14. If you wish to incorporate parts of the Library into other free
-programs whose distribution conditions are incompatible with these,
-write to the author to ask for permission.  For software which is
-copyrighted by the Free Software Foundation, write to the Free
-Software Foundation; we sometimes make exceptions for this.  Our
-decision will be guided by the two goals of preserving the free status
-of all derivatives of our free software and of promoting the sharing
-and reuse of software generally.
-
-			    NO WARRANTY
-
-  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
-WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
-EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
-OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
-KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
-LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
-THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
-  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
-WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
-AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
-FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
-CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
-LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
-RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
-FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
-SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
-DAMAGES.
-
-		     END OF TERMS AND CONDITIONS
-
-           How to Apply These Terms to Your New Libraries
-
-  If you develop a new library, and you want it to be of the greatest
-possible use to the public, we recommend making it free software that
-everyone can redistribute and change.  You can do so by permitting
-redistribution under these terms (or, alternatively, under the terms of the
-ordinary General Public License).
-
-  To apply these terms, attach the following notices to the library.  It is
-safest to attach them to the start of each source file to most effectively
-convey the exclusion of warranty; and each file should have at least the
-"copyright" line and a pointer to where the full notice is found.
-
-    <one line to give the library's name and a brief idea of what it does.>
-    Copyright (C) <year>  <name of author>
-
-    This library is free software; you can redistribute it and/or
-    modify it under the terms of the GNU Lesser General Public
-    License as published by the Free Software Foundation; either
-    version 2.1 of the License, or (at your option) any later version.
-
-    This library is distributed in the hope that it will be useful,
-    but WITHOUT ANY WARRANTY; without even the implied warranty of
-    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-    Lesser General Public License for more details.
-
-    You should have received a copy of the GNU Lesser General Public
-    License along with this library; if not, write to the Free Software
-    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
-
-Also add information on how to contact you by electronic and paper mail.
-
-You should also get your employer (if you work as a programmer) or your
-school, if any, to sign a "copyright disclaimer" for the library, if
-necessary.  Here is a sample; alter the names:
-
-  Yoyodyne, Inc., hereby disclaims all copyright interest in the
-  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
-
-  <signature of Ty Coon>, 1 April 1990
-  Ty Coon, President of Vice
-
-That's all there is to it!
diff --git a/lib/spack/external/nose/__init__.py b/lib/spack/external/nose/__init__.py
deleted file mode 100644
index 1ae1362b7a..0000000000
--- a/lib/spack/external/nose/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-from nose.core import collector, main, run, run_exit, runmodule
-# backwards compatibility
-from nose.exc import SkipTest, DeprecatedTest
-from nose.tools import with_setup
-
-__author__ = 'Jason Pellerin'
-__versioninfo__ = (1, 3, 7)
-__version__ = '.'.join(map(str, __versioninfo__))
-
-__all__ = [
-    'main', 'run', 'run_exit', 'runmodule', 'with_setup',
-    'SkipTest', 'DeprecatedTest', 'collector'
-    ]
-
-
diff --git a/lib/spack/external/nose/__main__.py b/lib/spack/external/nose/__main__.py
deleted file mode 100644
index b402d9df12..0000000000
--- a/lib/spack/external/nose/__main__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-import sys
-
-from nose.core import run_exit
-
-if sys.argv[0].endswith('__main__.py'):
-    sys.argv[0] = '%s -m nose' % sys.executable
-
-run_exit()
diff --git a/lib/spack/external/nose/case.py b/lib/spack/external/nose/case.py
deleted file mode 100644
index cffa4ab4c9..0000000000
--- a/lib/spack/external/nose/case.py
+++ /dev/null
@@ -1,397 +0,0 @@
-"""nose unittest.TestCase subclasses. It is not necessary to subclass these
-classes when writing tests; they are used internally by nose.loader.TestLoader
-to create test cases from test functions and methods in test classes.
-"""
-import logging
-import sys
-import unittest
-from inspect import isfunction
-from nose.config import Config
-from nose.failure import Failure # for backwards compatibility
-from nose.util import resolve_name, test_address, try_run
-
-log = logging.getLogger(__name__)
-
-
-__all__ = ['Test']
-
-
-class Test(unittest.TestCase):
-    """The universal test case wrapper.
-
-    When a plugin sees a test, it will always see an instance of this
-    class. To access the actual test case that will be run, access the
-    test property of the nose.case.Test instance.
-    """
-    __test__ = False # do not collect
-    def __init__(self, test, config=None, resultProxy=None):
-        # sanity check
-        if not callable(test):
-            raise TypeError("nose.case.Test called with argument %r that "
-                            "is not callable. A callable is required."
-                            % test)
-        self.test = test
-        if config is None:
-            config = Config()
-        self.config = config
-        self.tbinfo = None
-        self.capturedOutput = None
-        self.resultProxy = resultProxy
-        self.plugins = config.plugins
-        self.passed = None
-        unittest.TestCase.__init__(self)
-
-    def __call__(self, *arg, **kwarg):
-        return self.run(*arg, **kwarg)
-
-    def __str__(self):
-        name = self.plugins.testName(self)
-        if name is not None:
-            return name
-        return str(self.test)
-
-    def __repr__(self):
-        return "Test(%r)" % self.test
-
-    def afterTest(self, result):
-        """Called after test is complete (after result.stopTest)
-        """
-        try:
-            afterTest = result.afterTest
-        except AttributeError:
-            pass
-        else:
-            afterTest(self.test)
-
-    def beforeTest(self, result):
-        """Called before test is run (before result.startTest)
-        """
-        try:
-            beforeTest = result.beforeTest
-        except AttributeError:
-            pass
-        else:
-            beforeTest(self.test)
-
-    def exc_info(self):
-        """Extract exception info.
-        """
-        exc, exv, tb = sys.exc_info()
-        return (exc, exv, tb)
-
-    def id(self):
-        """Get a short(er) description of the test
-        """
-        return self.test.id()
-
-    def address(self):
-        """Return a round-trip name for this test, a name that can be
-        fed back as input to loadTestByName and (assuming the same
-        plugin configuration) result in the loading of this test.
-        """
-        if hasattr(self.test, 'address'):
-            return self.test.address()
-        else:
-            # not a nose case
-            return test_address(self.test)
-
-    def _context(self):
-        try:
-            return self.test.context
-        except AttributeError:
-            pass
-        try:
-            return self.test.__class__
-        except AttributeError:
-            pass
-        try:
-            return resolve_name(self.test.__module__)
-        except AttributeError:
-            pass
-        return None
-    context = property(_context, None, None,
-                      """Get the context object of this test (if any).""")
-
-    def run(self, result):
-        """Modified run for the test wrapper.
-
-        From here we don't call result.startTest or stopTest or
-        addSuccess.  The wrapper calls addError/addFailure only if its
-        own setup or teardown fails, or running the wrapped test fails
-        (eg, if the wrapped "test" is not callable).
-
-        Two additional methods are called, beforeTest and
-        afterTest. These give plugins a chance to modify the wrapped
-        test before it is called and do cleanup after it is
-        called. They are called unconditionally.
-        """
-        if self.resultProxy:
-            result = self.resultProxy(result, self)
-        try:
-            try:
-                self.beforeTest(result)
-                self.runTest(result)
-            except KeyboardInterrupt:
-                raise
-            except:
-                err = sys.exc_info()
-                result.addError(self, err)
-        finally:
-            self.afterTest(result)
-
-    def runTest(self, result):
-        """Run the test. Plugins may alter the test by returning a
-        value from prepareTestCase. The value must be callable and
-        must accept one argument, the result instance.
-        """
-        test = self.test
-        plug_test = self.config.plugins.prepareTestCase(self)
-        if plug_test is not None:
-            test = plug_test
-        test(result)
-
-    def shortDescription(self):
-        desc = self.plugins.describeTest(self)
-        if desc is not None:
-            return desc
-        # work around bug in unittest.TestCase.shortDescription
-        # with multiline docstrings.
-        test = self.test
-        try:
-            test._testMethodDoc = test._testMethodDoc.strip()# 2.5
-        except AttributeError:
-            try:
-                # 2.4 and earlier
-                test._TestCase__testMethodDoc = \
-                    test._TestCase__testMethodDoc.strip()
-            except AttributeError:
-                pass
-        # 2.7 compat: shortDescription() always returns something
-        # which is a change from 2.6 and below, and breaks the
-        # testName plugin call.
-        try:
-            desc = self.test.shortDescription()
-        except Exception:
-            # this is probably caused by a problem in test.__str__() and is
-            # only triggered by python 3.1's unittest!
-            pass
-        try:
-            if desc == str(self.test):
-                return
-        except Exception:
-            # If str() triggers an exception then ignore it.
-            # see issue 422
-            pass
-        return desc
-
-
-class TestBase(unittest.TestCase):
-    """Common functionality for FunctionTestCase and MethodTestCase.
-    """
-    __test__ = False # do not collect
-
-    def id(self):
-        return str(self)
-
-    def runTest(self):
-        self.test(*self.arg)
-
-    def shortDescription(self):
-        if hasattr(self.test, 'description'):
-            return self.test.description
-        func, arg = self._descriptors()
-        doc = getattr(func, '__doc__', None)
-        if not doc:
-            doc = str(self)
-        return doc.strip().split("\n")[0].strip()
-
-
-class FunctionTestCase(TestBase):
-    """TestCase wrapper for test functions.
-
-    Don't use this class directly; it is used internally in nose to
-    create test cases for test functions.
-    """
-    __test__ = False # do not collect
-
-    def __init__(self, test, setUp=None, tearDown=None, arg=tuple(),
-                 descriptor=None):
-        """Initialize the MethodTestCase.
-
-        Required argument:
-
-        * test -- the test function to call.
-
-        Optional arguments:
-
-        * setUp -- function to run at setup.
-
-        * tearDown -- function to run at teardown.
-
-        * arg -- arguments to pass to the test function. This is to support
-          generator functions that yield arguments.
-
-        * descriptor -- the function, other than the test, that should be used
-          to construct the test name. This is to support generator functions.
-        """
-
-        self.test = test
-        self.setUpFunc = setUp
-        self.tearDownFunc = tearDown
-        self.arg = arg
-        self.descriptor = descriptor
-        TestBase.__init__(self)
-
-    def address(self):
-        """Return a round-trip name for this test, a name that can be
-        fed back as input to loadTestByName and (assuming the same
-        plugin configuration) result in the loading of this test.
-        """
-        if self.descriptor is not None:
-            return test_address(self.descriptor)
-        else:
-            return test_address(self.test)
-
-    def _context(self):
-        return resolve_name(self.test.__module__)
-    context = property(_context, None, None,
-                      """Get context (module) of this test""")
-
-    def setUp(self):
-        """Run any setup function attached to the test function
-        """
-        if self.setUpFunc:
-            self.setUpFunc()
-        else:
-            names = ('setup', 'setUp', 'setUpFunc')
-            try_run(self.test, names)
-
-    def tearDown(self):
-        """Run any teardown function attached to the test function
-        """
-        if self.tearDownFunc:
-            self.tearDownFunc()
-        else:
-            names = ('teardown', 'tearDown', 'tearDownFunc')
-            try_run(self.test, names)
-
-    def __str__(self):
-        func, arg = self._descriptors()
-        if hasattr(func, 'compat_func_name'):
-            name = func.compat_func_name
-        else:
-            name = func.__name__
-        name = "%s.%s" % (func.__module__, name)
-        if arg:
-            name = "%s%s" % (name, arg)
-        # FIXME need to include the full dir path to disambiguate
-        # in cases where test module of the same name was seen in
-        # another directory (old fromDirectory)
-        return name
-    __repr__ = __str__
-
-    def _descriptors(self):
-        """Get the descriptors of the test function: the function and
-        arguments that will be used to construct the test name. In
-        most cases, this is the function itself and no arguments. For
-        tests generated by generator functions, the original
-        (generator) function and args passed to the generated function
-        are returned.
-        """
-        if self.descriptor:
-            return self.descriptor, self.arg
-        else:
-            return self.test, self.arg
-
-
-class MethodTestCase(TestBase):
-    """Test case wrapper for test methods.
-
-    Don't use this class directly; it is used internally in nose to
-    create test cases for test methods.
-    """
-    __test__ = False # do not collect
-
-    def __init__(self, method, test=None, arg=tuple(), descriptor=None):
-        """Initialize the MethodTestCase.
-
-        Required argument:
-
-        * method -- the method to call, may be bound or unbound. In either
-          case, a new instance of the method's class will be instantiated to
-	  make the call.  Note: In Python 3.x, if using an unbound method, you
-	  must wrap it using pyversion.unbound_method.
-
-        Optional arguments:
-
-        * test -- the test function to call. If this is passed, it will be
-          called instead of getting a new bound method of the same name as the
-          desired method from the test instance. This is to support generator
-          methods that yield inline functions.
-
-        * arg -- arguments to pass to the test function. This is to support
-          generator methods that yield arguments.
-
-        * descriptor -- the function, other than the test, that should be used
-          to construct the test name. This is to support generator methods.
-        """
-        self.method = method
-        self.test = test
-        self.arg = arg
-        self.descriptor = descriptor
-        if isfunction(method):
-            raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase")
-        self.cls = method.im_class
-        self.inst = self.cls()
-        if self.test is None:
-            method_name = self.method.__name__
-            self.test = getattr(self.inst, method_name)
-        TestBase.__init__(self)
-
-    def __str__(self):
-        func, arg = self._descriptors()
-        if hasattr(func, 'compat_func_name'):
-            name = func.compat_func_name
-        else:
-            name = func.__name__
-        name = "%s.%s.%s" % (self.cls.__module__,
-                             self.cls.__name__,
-                             name)
-        if arg:
-            name = "%s%s" % (name, arg)
-        return name
-    __repr__ = __str__
-
-    def address(self):
-        """Return a round-trip name for this test, a name that can be
-        fed back as input to loadTestByName and (assuming the same
-        plugin configuration) result in the loading of this test.
-        """
-        if self.descriptor is not None:
-            return test_address(self.descriptor)
-        else:
-            return test_address(self.method)
-
-    def _context(self):
-        return self.cls
-    context = property(_context, None, None,
-                      """Get context (class) of this test""")
-
-    def setUp(self):
-        try_run(self.inst, ('setup', 'setUp'))
-
-    def tearDown(self):
-        try_run(self.inst, ('teardown', 'tearDown'))
-
-    def _descriptors(self):
-        """Get the descriptors of the test method: the method and
-        arguments that will be used to construct the test name. In
-        most cases, this is the method itself and no arguments. For
-        tests generated by generator methods, the original
-        (generator) method and args passed to the generated method 
-        or function are returned.
-        """
-        if self.descriptor:
-            return self.descriptor, self.arg
-        else:
-            return self.method, self.arg
diff --git a/lib/spack/external/nose/commands.py b/lib/spack/external/nose/commands.py
deleted file mode 100644
index ef0e9caed4..0000000000
--- a/lib/spack/external/nose/commands.py
+++ /dev/null
@@ -1,172 +0,0 @@
-"""
-nosetests setuptools command
-----------------------------
-
-The easiest way to run tests with nose is to use the `nosetests` setuptools
-command::
-
-  python setup.py nosetests
-
-This command has one *major* benefit over the standard `test` command: *all
-nose plugins are supported*.
-
-To configure the `nosetests` command, add a [nosetests] section to your
-setup.cfg. The [nosetests] section can contain any command line arguments that
-nosetests supports. The differences between issuing an option on the command
-line and adding it to setup.cfg are:
-
-* In setup.cfg, the -- prefix must be excluded
-* In setup.cfg, command line flags that take no arguments must be given an
-  argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
-
-Here's an example [nosetests] setup.cfg section::
-
-  [nosetests]
-  verbosity=1
-  detailed-errors=1
-  with-coverage=1
-  cover-package=nose
-  debug=nose.loader
-  pdb=1
-  pdb-failures=1
-
-If you commonly run nosetests with a large number of options, using
-the nosetests setuptools command and configuring with setup.cfg can
-make running your tests much less tedious. (Note that the same options
-and format supported in setup.cfg are supported in all other config
-files, and the nosetests script will also load config files.)
-
-Another reason to run tests with the command is that the command will
-install packages listed in your `tests_require`, as well as doing a
-complete build of your package before running tests. For packages with
-dependencies or that build C extensions, using the setuptools command
-can be more convenient than building by hand and running the nosetests
-script.
-
-Bootstrapping
--------------
-
-If you are distributing your project and want users to be able to run tests
-without having to install nose themselves, add nose to the setup_requires
-section of your setup()::
-
-  setup(
-      # ...
-      setup_requires=['nose>=1.0']
-      )
-
-This will direct setuptools to download and activate nose during the setup
-process, making the ``nosetests`` command available.
-
-"""
-try:
-    from setuptools import Command
-except ImportError:
-    Command = nosetests = None
-else:
-    from nose.config import Config, option_blacklist, user_config_files, \
-        flag, _bool
-    from nose.core import TestProgram
-    from nose.plugins import DefaultPluginManager
-
-
-    def get_user_options(parser):
-        """convert a optparse option list into a distutils option tuple list"""
-        opt_list = []
-        for opt in parser.option_list:
-            if opt._long_opts[0][2:] in option_blacklist: 
-                continue
-            long_name = opt._long_opts[0][2:]
-            if opt.action not in ('store_true', 'store_false'):
-                long_name = long_name + "="
-            short_name = None
-            if opt._short_opts:
-                short_name =  opt._short_opts[0][1:]
-            opt_list.append((long_name, short_name, opt.help or ""))
-        return opt_list
-
-
-    class nosetests(Command):
-        description = "Run unit tests using nosetests"
-        __config = Config(files=user_config_files(),
-                          plugins=DefaultPluginManager())
-        __parser = __config.getParser()
-        user_options = get_user_options(__parser)
-
-        def initialize_options(self):
-            """create the member variables, but change hyphens to
-            underscores
-            """
-
-            self.option_to_cmds = {}
-            for opt in self.__parser.option_list:
-                cmd_name = opt._long_opts[0][2:]
-                option_name = cmd_name.replace('-', '_')
-                self.option_to_cmds[option_name] = cmd_name
-                setattr(self, option_name, None)
-            self.attr  = None
-
-        def finalize_options(self):
-            """nothing to do here"""
-            pass
-
-        def run(self):
-            """ensure tests are capable of being run, then
-            run nose.main with a reconstructed argument list"""
-            if getattr(self.distribution, 'use_2to3', False):
-                # If we run 2to3 we can not do this inplace:
-
-                # Ensure metadata is up-to-date
-                build_py = self.get_finalized_command('build_py')
-                build_py.inplace = 0
-                build_py.run()
-                bpy_cmd = self.get_finalized_command("build_py")
-                build_path = bpy_cmd.build_lib
-
-                # Build extensions
-                egg_info = self.get_finalized_command('egg_info')
-                egg_info.egg_base = build_path
-                egg_info.run()
-
-                build_ext = self.get_finalized_command('build_ext')
-                build_ext.inplace = 0
-                build_ext.run()
-            else:
-                self.run_command('egg_info')
-
-                # Build extensions in-place
-                build_ext = self.get_finalized_command('build_ext')
-                build_ext.inplace = 1
-                build_ext.run()
-
-            if self.distribution.install_requires:
-                self.distribution.fetch_build_eggs(
-                    self.distribution.install_requires)
-            if self.distribution.tests_require:
-                self.distribution.fetch_build_eggs(
-                    self.distribution.tests_require)
-
-            ei_cmd = self.get_finalized_command("egg_info")
-            argv = ['nosetests', '--where', ei_cmd.egg_base] 
-            for (option_name, cmd_name) in self.option_to_cmds.items():
-                if option_name in option_blacklist:
-                    continue
-                value = getattr(self, option_name)
-                if value is not None:
-                    argv.extend(
-                        self.cfgToArg(option_name.replace('_', '-'), value))
-            TestProgram(argv=argv, config=self.__config)
-
-        def cfgToArg(self, optname, value):
-            argv = []
-            long_optname = '--' + optname
-            opt = self.__parser.get_option(long_optname)
-            if opt.action in ('store_true', 'store_false'):
-                if not flag(value):
-                    raise ValueError("Invalid value '%s' for '%s'" % (
-                        value, optname))
-                if _bool(value):
-                    argv.append(long_optname)
-            else:
-                argv.extend([long_optname, value])
-            return argv
diff --git a/lib/spack/external/nose/config.py b/lib/spack/external/nose/config.py
deleted file mode 100644
index 125eb5579d..0000000000
--- a/lib/spack/external/nose/config.py
+++ /dev/null
@@ -1,661 +0,0 @@
-import logging
-import optparse
-import os
-import re
-import sys
-import ConfigParser
-from optparse import OptionParser
-from nose.util import absdir, tolist
-from nose.plugins.manager import NoPlugins
-from warnings import warn, filterwarnings
-
-log = logging.getLogger(__name__)
-
-# not allowed in config files
-option_blacklist = ['help', 'verbose']
-
-config_files = [
-    # Linux users will prefer this
-    "~/.noserc",
-    # Windows users will prefer this
-    "~/nose.cfg"
-    ]
-
-# plaforms on which the exe check defaults to off
-# Windows and IronPython
-exe_allowed_platforms = ('win32', 'cli')
-
-filterwarnings("always", category=DeprecationWarning,
-               module=r'(.*\.)?nose\.config')
-
-class NoSuchOptionError(Exception):
-    def __init__(self, name):
-        Exception.__init__(self, name)
-        self.name = name
-
-
-class ConfigError(Exception):
-    pass
-
-
-class ConfiguredDefaultsOptionParser(object):
-    """
-    Handler for options from commandline and config files.
-    """
-    def __init__(self, parser, config_section, error=None, file_error=None):
-        self._parser = parser
-        self._config_section = config_section
-        if error is None:
-            error = self._parser.error
-        self._error = error
-        if file_error is None:
-            file_error = lambda msg, **kw: error(msg)
-        self._file_error = file_error
-
-    def _configTuples(self, cfg, filename):
-        config = []
-        if self._config_section in cfg.sections():
-            for name, value in cfg.items(self._config_section):
-                config.append((name, value, filename))
-        return config
-
-    def _readFromFilenames(self, filenames):
-        config = []
-        for filename in filenames:
-            cfg = ConfigParser.RawConfigParser()
-            try:
-                cfg.read(filename)
-            except ConfigParser.Error, exc:
-                raise ConfigError("Error reading config file %r: %s" %
-                                  (filename, str(exc)))
-            config.extend(self._configTuples(cfg, filename))
-        return config
-
-    def _readFromFileObject(self, fh):
-        cfg = ConfigParser.RawConfigParser()
-        try:
-            filename = fh.name
-        except AttributeError:
-            filename = '<???>'
-        try:
-            cfg.readfp(fh)
-        except ConfigParser.Error, exc:
-            raise ConfigError("Error reading config file %r: %s" %
-                              (filename, str(exc)))
-        return self._configTuples(cfg, filename)
-
-    def _readConfiguration(self, config_files):
-        try:
-            config_files.readline
-        except AttributeError:
-            filename_or_filenames = config_files
-            if isinstance(filename_or_filenames, basestring):
-                filenames = [filename_or_filenames]
-            else:
-                filenames = filename_or_filenames
-            config = self._readFromFilenames(filenames)
-        else:
-            fh = config_files
-            config = self._readFromFileObject(fh)
-        return config
-
-    def _processConfigValue(self, name, value, values, parser):
-        opt_str = '--' + name
-        option = parser.get_option(opt_str)
-        if option is None:
-            raise NoSuchOptionError(name)
-        else:
-            option.process(opt_str, value, values, parser)
-
-    def _applyConfigurationToValues(self, parser, config, values):
-        for name, value, filename in config:
-            if name in option_blacklist:
-                continue
-            try:
-                self._processConfigValue(name, value, values, parser)
-            except NoSuchOptionError, exc:
-                self._file_error(
-                    "Error reading config file %r: "
-                    "no such option %r" % (filename, exc.name),
-                    name=name, filename=filename)
-            except optparse.OptionValueError, exc:
-                msg = str(exc).replace('--' + name, repr(name), 1)
-                self._file_error("Error reading config file %r: "
-                                 "%s" % (filename, msg),
-                                 name=name, filename=filename)
-
-    def parseArgsAndConfigFiles(self, args, config_files):
-        values = self._parser.get_default_values()
-        try:
-            config = self._readConfiguration(config_files)
-        except ConfigError, exc:
-            self._error(str(exc))
-        else:
-            try:
-                self._applyConfigurationToValues(self._parser, config, values)
-            except ConfigError, exc:
-                self._error(str(exc))
-        return self._parser.parse_args(args, values)
-
-
-class Config(object):
-    """nose configuration.
-
-    Instances of Config are used throughout nose to configure
-    behavior, including plugin lists. Here are the default values for
-    all config keys::
-
-      self.env = env = kw.pop('env', {})
-      self.args = ()
-      self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
-      self.addPaths = not env.get('NOSE_NOPATH', False)
-      self.configSection = 'nosetests'
-      self.debug = env.get('NOSE_DEBUG')
-      self.debugLog = env.get('NOSE_DEBUG_LOG')
-      self.exclude = None
-      self.getTestCaseNamesCompat = False
-      self.includeExe = env.get('NOSE_INCLUDE_EXE',
-                                sys.platform in exe_allowed_platforms)
-      self.ignoreFiles = (re.compile(r'^\.'),
-                          re.compile(r'^_'),
-                          re.compile(r'^setup\.py$')
-                          )
-      self.include = None
-      self.loggingConfig = None
-      self.logStream = sys.stderr
-      self.options = NoOptions()
-      self.parser = None
-      self.plugins = NoPlugins()
-      self.srcDirs = ('lib', 'src')
-      self.runOnInit = True
-      self.stopOnError = env.get('NOSE_STOP', False)
-      self.stream = sys.stderr
-      self.testNames = ()
-      self.verbosity = int(env.get('NOSE_VERBOSE', 1))
-      self.where = ()
-      self.py3where = ()
-      self.workingDir = None
-    """
-
-    def __init__(self, **kw):
-        self.env = env = kw.pop('env', {})
-        self.args = ()
-        self.testMatchPat = env.get('NOSE_TESTMATCH',
-                                    r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
-        self.testMatch = re.compile(self.testMatchPat)
-        self.addPaths = not env.get('NOSE_NOPATH', False)
-        self.configSection = 'nosetests'
-        self.debug = env.get('NOSE_DEBUG')
-        self.debugLog = env.get('NOSE_DEBUG_LOG')
-        self.exclude = None
-        self.getTestCaseNamesCompat = False
-        self.includeExe = env.get('NOSE_INCLUDE_EXE',
-                                  sys.platform in exe_allowed_platforms)
-        self.ignoreFilesDefaultStrings = [r'^\.',
-                                          r'^_',
-                                          r'^setup\.py$',
-                                          ]
-        self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings)
-        self.include = None
-        self.loggingConfig = None
-        self.logStream = sys.stderr
-        self.options = NoOptions()
-        self.parser = None
-        self.plugins = NoPlugins()
-        self.srcDirs = ('lib', 'src')
-        self.runOnInit = True
-        self.stopOnError = env.get('NOSE_STOP', False)
-        self.stream = sys.stderr
-        self.testNames = []
-        self.verbosity = int(env.get('NOSE_VERBOSE', 1))
-        self.where = ()
-        self.py3where = ()
-        self.workingDir = os.getcwd()
-        self.traverseNamespace = False
-        self.firstPackageWins = False
-        self.parserClass = OptionParser
-        self.worker = False
-
-        self._default = self.__dict__.copy()
-        self.update(kw)
-        self._orig = self.__dict__.copy()
-
-    def __getstate__(self):
-        state = self.__dict__.copy()
-        del state['stream']
-        del state['_orig']
-        del state['_default']
-        del state['env']
-        del state['logStream']
-        # FIXME remove plugins, have only plugin manager class
-        state['plugins'] = self.plugins.__class__
-        return state
-
-    def __setstate__(self, state):
-        plugincls = state.pop('plugins')
-        self.update(state)
-        self.worker = True
-        # FIXME won't work for static plugin lists
-        self.plugins = plugincls()
-        self.plugins.loadPlugins()
-        # needed so .can_configure gets set appropriately
-        dummy_parser = self.parserClass()
-        self.plugins.addOptions(dummy_parser, {})
-        self.plugins.configure(self.options, self)
-
-    def __repr__(self):
-        d = self.__dict__.copy()
-        # don't expose env, could include sensitive info
-        d['env'] = {}
-        keys = [ k for k in d.keys()
-                 if not k.startswith('_') ]
-        keys.sort()
-        return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
-                                          for k in keys ])
-    __str__ = __repr__
-
-    def _parseArgs(self, argv, cfg_files):
-        def warn_sometimes(msg, name=None, filename=None):
-            if (hasattr(self.plugins, 'excludedOption') and
-                self.plugins.excludedOption(name)):
-                msg = ("Option %r in config file %r ignored: "
-                       "excluded by runtime environment" %
-                       (name, filename))
-                warn(msg, RuntimeWarning)
-            else:
-                raise ConfigError(msg)
-        parser = ConfiguredDefaultsOptionParser(
-            self.getParser(), self.configSection, file_error=warn_sometimes)
-        return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
-
-    def configure(self, argv=None, doc=None):
-        """Configure the nose running environment. Execute configure before
-        collecting tests with nose.TestCollector to enable output capture and
-        other features.
-        """
-        env = self.env
-        if argv is None:
-            argv = sys.argv
-
-        cfg_files = getattr(self, 'files', [])
-        options, args = self._parseArgs(argv, cfg_files)
-        # If -c --config has been specified on command line,
-        # load those config files and reparse
-        if getattr(options, 'files', []):
-            options, args = self._parseArgs(argv, options.files)
-
-        self.options = options
-        if args:
-            self.testNames = args
-        if options.testNames is not None:
-            self.testNames.extend(tolist(options.testNames))
-
-        if options.py3where is not None:
-            if sys.version_info >= (3,):
-                options.where = options.py3where
-
-        # `where` is an append action, so it can't have a default value
-        # in the parser, or that default will always be in the list
-        if not options.where:
-            options.where = env.get('NOSE_WHERE', None)
-
-        # include and exclude also
-        if not options.ignoreFiles:
-            options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
-        if not options.include:
-            options.include = env.get('NOSE_INCLUDE', [])
-        if not options.exclude:
-            options.exclude = env.get('NOSE_EXCLUDE', [])
-
-        self.addPaths = options.addPaths
-        self.stopOnError = options.stopOnError
-        self.verbosity = options.verbosity
-        self.includeExe = options.includeExe
-        self.traverseNamespace = options.traverseNamespace
-        self.debug = options.debug
-        self.debugLog = options.debugLog
-        self.loggingConfig = options.loggingConfig
-        self.firstPackageWins = options.firstPackageWins
-        self.configureLogging()
-
-        if not options.byteCompile:
-            sys.dont_write_bytecode = True
-
-        if options.where is not None:
-            self.configureWhere(options.where)
-
-        if options.testMatch:
-            self.testMatch = re.compile(options.testMatch)
-
-        if options.ignoreFiles:
-            self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles))
-            log.info("Ignoring files matching %s", options.ignoreFiles)
-        else:
-            log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
-
-        if options.include:
-            self.include = map(re.compile, tolist(options.include))
-            log.info("Including tests matching %s", options.include)
-
-        if options.exclude:
-            self.exclude = map(re.compile, tolist(options.exclude))
-            log.info("Excluding tests matching %s", options.exclude)
-
-        # When listing plugins we don't want to run them
-        if not options.showPlugins:
-            self.plugins.configure(options, self)
-            self.plugins.begin()
-
-    def configureLogging(self):
-        """Configure logging for nose, or optionally other packages. Any logger
-        name may be set with the debug option, and that logger will be set to
-        debug level and be assigned the same handler as the nose loggers, unless
-        it already has a handler.
-        """
-        if self.loggingConfig:
-            from logging.config import fileConfig
-            fileConfig(self.loggingConfig)
-            return
-
-        format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
-        if self.debugLog:
-            handler = logging.FileHandler(self.debugLog)
-        else:
-            handler = logging.StreamHandler(self.logStream)
-        handler.setFormatter(format)
-
-        logger = logging.getLogger('nose')
-        logger.propagate = 0
-
-        # only add our default handler if there isn't already one there
-        # this avoids annoying duplicate log messages.
-        found = False
-        if self.debugLog:
-            debugLogAbsPath = os.path.abspath(self.debugLog)
-            for h in logger.handlers:
-                if type(h) == logging.FileHandler and \
-                        h.baseFilename == debugLogAbsPath:
-                    found = True
-        else:
-            for h in logger.handlers:
-                if type(h) == logging.StreamHandler and \
-                        h.stream == self.logStream:
-                    found = True
-        if not found:
-            logger.addHandler(handler)
-
-        # default level
-        lvl = logging.WARNING
-        if self.verbosity >= 5:
-            lvl = 0
-        elif self.verbosity >= 4:
-            lvl = logging.DEBUG
-        elif self.verbosity >= 3:
-            lvl = logging.INFO
-        logger.setLevel(lvl)
-
-        # individual overrides
-        if self.debug:
-            # no blanks
-            debug_loggers = [ name for name in self.debug.split(',')
-                              if name ]
-            for logger_name in debug_loggers:
-                l = logging.getLogger(logger_name)
-                l.setLevel(logging.DEBUG)
-                if not l.handlers and not logger_name.startswith('nose'):
-                    l.addHandler(handler)
-
-    def configureWhere(self, where):
-        """Configure the working directory or directories for the test run.
-        """
-        from nose.importer import add_path
-        self.workingDir = None
-        where = tolist(where)
-        warned = False
-        for path in where:
-            if not self.workingDir:
-                abs_path = absdir(path)
-                if abs_path is None:
-                    raise ValueError("Working directory '%s' not found, or "
-                                     "not a directory" % path)
-                log.info("Set working dir to %s", abs_path)
-                self.workingDir = abs_path
-                if self.addPaths and \
-                       os.path.exists(os.path.join(abs_path, '__init__.py')):
-                    log.info("Working directory %s is a package; "
-                             "adding to sys.path" % abs_path)
-                    add_path(abs_path)
-                continue
-            if not warned:
-                warn("Use of multiple -w arguments is deprecated and "
-                     "support may be removed in a future release. You can "
-                     "get the same behavior by passing directories without "
-                     "the -w argument on the command line, or by using the "
-                     "--tests argument in a configuration file.",
-                     DeprecationWarning)
-                warned = True
-            self.testNames.append(path)
-
-    def default(self):
-        """Reset all config values to defaults.
-        """
-        self.__dict__.update(self._default)
-
-    def getParser(self, doc=None):
-        """Get the command line option parser.
-        """
-        if self.parser:
-            return self.parser
-        env = self.env
-        parser = self.parserClass(doc)
-        parser.add_option(
-            "-V","--version", action="store_true",
-            dest="version", default=False,
-            help="Output nose version and exit")
-        parser.add_option(
-            "-p", "--plugins", action="store_true",
-            dest="showPlugins", default=False,
-            help="Output list of available plugins and exit. Combine with "
-            "higher verbosity for greater detail")
-        parser.add_option(
-            "-v", "--verbose",
-            action="count", dest="verbosity",
-            default=self.verbosity,
-            help="Be more verbose. [NOSE_VERBOSE]")
-        parser.add_option(
-            "--verbosity", action="store", dest="verbosity",
-            metavar='VERBOSITY',
-            type="int", help="Set verbosity; --verbosity=2 is "
-            "the same as -v")
-        parser.add_option(
-            "-q", "--quiet", action="store_const", const=0, dest="verbosity",
-            help="Be less verbose")
-        parser.add_option(
-            "-c", "--config", action="append", dest="files",
-            metavar="FILES",
-            help="Load configuration from config file(s). May be specified "
-            "multiple times; in that case, all config files will be "
-            "loaded and combined")
-        parser.add_option(
-            "-w", "--where", action="append", dest="where",
-            metavar="WHERE",
-            help="Look for tests in this directory. "
-            "May be specified multiple times. The first directory passed "
-            "will be used as the working directory, in place of the current "
-            "working directory, which is the default. Others will be added "
-            "to the list of tests to execute. [NOSE_WHERE]"
-            )
-        parser.add_option(
-            "--py3where", action="append", dest="py3where",
-            metavar="PY3WHERE",
-            help="Look for tests in this directory under Python 3.x. "
-            "Functions the same as 'where', but only applies if running under "
-            "Python 3.x or above.  Note that, if present under 3.x, this "
-            "option completely replaces any directories specified with "
-            "'where', so the 'where' option becomes ineffective. "
-            "[NOSE_PY3WHERE]"
-            )
-        parser.add_option(
-            "-m", "--match", "--testmatch", action="store",
-            dest="testMatch", metavar="REGEX",
-            help="Files, directories, function names, and class names "
-            "that match this regular expression are considered tests.  "
-            "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
-            default=self.testMatchPat)
-        parser.add_option(
-            "--tests", action="store", dest="testNames", default=None,
-            metavar='NAMES',
-            help="Run these tests (comma-separated list). This argument is "
-            "useful mainly from configuration files; on the command line, "
-            "just pass the tests to run as additional arguments with no "
-            "switch.")
-        parser.add_option(
-            "-l", "--debug", action="store",
-            dest="debug", default=self.debug,
-            help="Activate debug logging for one or more systems. "
-            "Available debug loggers: nose, nose.importer, "
-            "nose.inspector, nose.plugins, nose.result and "
-            "nose.selector. Separate multiple names with a comma.")
-        parser.add_option(
-            "--debug-log", dest="debugLog", action="store",
-            default=self.debugLog, metavar="FILE",
-            help="Log debug messages to this file "
-            "(default: sys.stderr)")
-        parser.add_option(
-            "--logging-config", "--log-config",
-            dest="loggingConfig", action="store",
-            default=self.loggingConfig, metavar="FILE",
-            help="Load logging config from this file -- bypasses all other"
-            " logging config settings.")
-        parser.add_option(
-            "-I", "--ignore-files", action="append", dest="ignoreFiles",
-            metavar="REGEX",
-            help="Completely ignore any file that matches this regular "
-            "expression. Takes precedence over any other settings or "
-            "plugins. "
-            "Specifying this option will replace the default setting. "
-            "Specify this option multiple times "
-            "to add more regular expressions [NOSE_IGNORE_FILES]")
-        parser.add_option(
-            "-e", "--exclude", action="append", dest="exclude",
-            metavar="REGEX",
-            help="Don't run tests that match regular "
-            "expression [NOSE_EXCLUDE]")
-        parser.add_option(
-            "-i", "--include", action="append", dest="include",
-            metavar="REGEX",
-            help="This regular expression will be applied to files, "
-            "directories, function names, and class names for a chance "
-            "to include additional tests that do not match TESTMATCH.  "
-            "Specify this option multiple times "
-            "to add more regular expressions [NOSE_INCLUDE]")
-        parser.add_option(
-            "-x", "--stop", action="store_true", dest="stopOnError",
-            default=self.stopOnError,
-            help="Stop running tests after the first error or failure")
-        parser.add_option(
-            "-P", "--no-path-adjustment", action="store_false",
-            dest="addPaths",
-            default=self.addPaths,
-            help="Don't make any changes to sys.path when "
-            "loading tests [NOSE_NOPATH]")
-        parser.add_option(
-            "--exe", action="store_true", dest="includeExe",
-            default=self.includeExe,
-            help="Look for tests in python modules that are "
-            "executable. Normal behavior is to exclude executable "
-            "modules, since they may not be import-safe "
-            "[NOSE_INCLUDE_EXE]")
-        parser.add_option(
-            "--noexe", action="store_false", dest="includeExe",
-            help="DO NOT look for tests in python modules that are "
-            "executable. (The default on the windows platform is to "
-            "do so.)")
-        parser.add_option(
-            "--traverse-namespace", action="store_true",
-            default=self.traverseNamespace, dest="traverseNamespace",
-            help="Traverse through all path entries of a namespace package")
-        parser.add_option(
-            "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
-            action="store_true", default=False, dest="firstPackageWins",
-            help="nose's importer will normally evict a package from sys."
-            "modules if it sees a package with the same name in a different "
-            "location. Set this option to disable that behavior.")
-        parser.add_option(
-            "--no-byte-compile",
-            action="store_false", default=True, dest="byteCompile",
-            help="Prevent nose from byte-compiling the source into .pyc files "
-            "while nose is scanning for and running tests.")
-
-        self.plugins.loadPlugins()
-        self.pluginOpts(parser)
-
-        self.parser = parser
-        return parser
-
-    def help(self, doc=None):
-        """Return the generated help message
-        """
-        return self.getParser(doc).format_help()
-
-    def pluginOpts(self, parser):
-        self.plugins.addOptions(parser, self.env)
-
-    def reset(self):
-        self.__dict__.update(self._orig)
-
-    def todict(self):
-        return self.__dict__.copy()
-
-    def update(self, d):
-        self.__dict__.update(d)
-
-
-class NoOptions(object):
-    """Options container that returns None for all options.
-    """
-    def __getstate__(self):
-        return {}
-
-    def __setstate__(self, state):
-        pass
-
-    def __getnewargs__(self):
-        return ()
-
-    def __nonzero__(self):
-        return False
-
-
-def user_config_files():
-    """Return path to any existing user config files
-    """
-    return filter(os.path.exists,
-                  map(os.path.expanduser, config_files))
-
-
-def all_config_files():
-    """Return path to any existing user config files, plus any setup.cfg
-    in the current working directory.
-    """
-    user = user_config_files()
-    if os.path.exists('setup.cfg'):
-        return user + ['setup.cfg']
-    return user
-
-
-# used when parsing config files
-def flag(val):
-    """Does the value look like an on/off flag?"""
-    if val == 1:
-        return True
-    elif val == 0:
-        return False
-    val = str(val)
-    if len(val) > 5:
-        return False
-    return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
-
-
-def _bool(val):
-    return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
diff --git a/lib/spack/external/nose/core.py b/lib/spack/external/nose/core.py
deleted file mode 100644
index 49e7939b98..0000000000
--- a/lib/spack/external/nose/core.py
+++ /dev/null
@@ -1,341 +0,0 @@
-"""Implements nose test program and collector.
-"""
-from __future__ import generators
-
-import logging
-import os
-import sys
-import time
-import unittest
-
-from nose.config import Config, all_config_files
-from nose.loader import defaultTestLoader
-from nose.plugins.manager import PluginManager, DefaultPluginManager, \
-     RestrictedPluginManager
-from nose.result import TextTestResult
-from nose.suite import FinalizingSuiteWrapper
-from nose.util import isclass, tolist
-
-
-log = logging.getLogger('nose.core')
-compat_24 = sys.version_info >= (2, 4)
-
-__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
-           'TextTestRunner']
-
-
-class TextTestRunner(unittest.TextTestRunner):
-    """Test runner that uses nose's TextTestResult to enable errorClasses,
-    as well as providing hooks for plugins to override or replace the test
-    output stream, results, and the test case itself.
-    """
-    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
-                 config=None):
-        if config is None:
-            config = Config()
-        self.config = config
-        unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
-
-
-    def _makeResult(self):
-        return TextTestResult(self.stream,
-                              self.descriptions,
-                              self.verbosity,
-                              self.config)
-
-    def run(self, test):
-        """Overrides to provide plugin hooks and defer all output to
-        the test result class.
-        """
-        wrapper = self.config.plugins.prepareTest(test)
-        if wrapper is not None:
-            test = wrapper
-
-        # plugins can decorate or capture the output stream
-        wrapped = self.config.plugins.setOutputStream(self.stream)
-        if wrapped is not None:
-            self.stream = wrapped
-
-        result = self._makeResult()
-        start = time.time()
-        try:
-            test(result)
-        except KeyboardInterrupt:
-            pass
-        stop = time.time()
-        result.printErrors()
-        result.printSummary(start, stop)
-        self.config.plugins.finalize(result)
-        return result
-
-
-class TestProgram(unittest.TestProgram):
-    """Collect and run tests, returning success or failure.
-
-    The arguments to TestProgram() are the same as to
-    :func:`main()` and :func:`run()`:
-
-    * module: All tests are in this module (default: None)
-    * defaultTest: Tests to load (default: '.')
-    * argv: Command line arguments (default: None; sys.argv is read)
-    * testRunner: Test runner instance (default: None)
-    * testLoader: Test loader instance (default: None)
-    * env: Environment; ignored if config is provided (default: None;
-      os.environ is read)
-    * config: :class:`nose.config.Config` instance (default: None)
-    * suite: Suite or list of tests to run (default: None). Passing a
-      suite or lists of tests will bypass all test discovery and
-      loading. *ALSO NOTE* that if you pass a unittest.TestSuite
-      instance as the suite, context fixtures at the class, module and
-      package level will not be used, and many plugin hooks will not
-      be called. If you want normal nose behavior, either pass a list
-      of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
-    * exit: Exit after running tests and printing report (default: True)
-    * plugins: List of plugins to use; ignored if config is provided
-      (default: load plugins with DefaultPluginManager)
-    * addplugins: List of **extra** plugins to use. Pass a list of plugin
-      instances in this argument to make custom plugins available while
-      still using the DefaultPluginManager.
-    """
-    verbosity = 1
-
-    def __init__(self, module=None, defaultTest='.', argv=None,
-                 testRunner=None, testLoader=None, env=None, config=None,
-                 suite=None, exit=True, plugins=None, addplugins=None):
-        if env is None:
-            env = os.environ
-        if config is None:
-            config = self.makeConfig(env, plugins)
-        if addplugins:
-            config.plugins.addPlugins(extraplugins=addplugins)
-        self.config = config
-        self.suite = suite
-        self.exit = exit
-        extra_args = {}
-        version = sys.version_info[0:2]
-        if version >= (2,7) and version != (3,0):
-            extra_args['exit'] = exit
-        unittest.TestProgram.__init__(
-            self, module=module, defaultTest=defaultTest,
-            argv=argv, testRunner=testRunner, testLoader=testLoader,
-            **extra_args)
-
-    def getAllConfigFiles(self, env=None):
-        env = env or {}
-        if env.get('NOSE_IGNORE_CONFIG_FILES', False):
-            return []
-        else:
-            return all_config_files()
-
-    def makeConfig(self, env, plugins=None):
-        """Load a Config, pre-filled with user config files if any are
-        found.
-        """
-        cfg_files = self.getAllConfigFiles(env)
-        if plugins:
-            manager = PluginManager(plugins=plugins)
-        else:
-            manager = DefaultPluginManager()
-        return Config(
-            env=env, files=cfg_files, plugins=manager)
-
-    def parseArgs(self, argv):
-        """Parse argv and env and configure running environment.
-        """
-        self.config.configure(argv, doc=self.usage())
-        log.debug("configured %s", self.config)
-
-        # quick outs: version, plugins (optparse would have already
-        # caught and exited on help)
-        if self.config.options.version:
-            from nose import __version__
-            sys.stdout = sys.__stdout__
-            print "%s version %s" % (os.path.basename(sys.argv[0]), __version__)
-            sys.exit(0)
-
-        if self.config.options.showPlugins:
-            self.showPlugins()
-            sys.exit(0)
-
-        if self.testLoader is None:
-            self.testLoader = defaultTestLoader(config=self.config)
-        elif isclass(self.testLoader):
-            self.testLoader = self.testLoader(config=self.config)
-        plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
-        if plug_loader is not None:
-            self.testLoader = plug_loader
-        log.debug("test loader is %s", self.testLoader)
-
-        # FIXME if self.module is a string, add it to self.testNames? not sure
-
-        if self.config.testNames:
-            self.testNames = self.config.testNames
-        else:
-            self.testNames = tolist(self.defaultTest)
-        log.debug('defaultTest %s', self.defaultTest)
-        log.debug('Test names are %s', self.testNames)
-        if self.config.workingDir is not None:
-            os.chdir(self.config.workingDir)
-        self.createTests()
-
-    def createTests(self):
-        """Create the tests to run. If a self.suite
-        is set, then that suite will be used. Otherwise, tests will be
-        loaded from the given test names (self.testNames) using the
-        test loader.
-        """
-        log.debug("createTests called with %s", self.suite)
-        if self.suite is not None:
-            # We were given an explicit suite to run. Make sure it's
-            # loaded and wrapped correctly.
-            self.test = self.testLoader.suiteClass(self.suite)
-        else:
-            self.test = self.testLoader.loadTestsFromNames(self.testNames)
-
-    def runTests(self):
-        """Run Tests. Returns true on success, false on failure, and sets
-        self.success to the same value.
-        """
-        log.debug("runTests called")
-        if self.testRunner is None:
-            self.testRunner = TextTestRunner(stream=self.config.stream,
-                                             verbosity=self.config.verbosity,
-                                             config=self.config)
-        plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
-        if plug_runner is not None:
-            self.testRunner = plug_runner
-        result = self.testRunner.run(self.test)
-        self.success = result.wasSuccessful()
-        if self.exit:
-            sys.exit(not self.success)
-        return self.success
-
-    def showPlugins(self):
-        """Print list of available plugins.
-        """
-        import textwrap
-
-        class DummyParser:
-            def __init__(self):
-                self.options = []
-            def add_option(self, *arg, **kw):
-                self.options.append((arg, kw.pop('help', '')))
-
-        v = self.config.verbosity
-        self.config.plugins.sort()
-        for p in self.config.plugins:
-            print "Plugin %s" % p.name
-            if v >= 2:
-                print "  score: %s" % p.score
-                print '\n'.join(textwrap.wrap(p.help().strip(),
-                                              initial_indent='  ',
-                                              subsequent_indent='  '))
-                if v >= 3:
-                    parser = DummyParser()
-                    p.addOptions(parser)
-                    if len(parser.options):
-                        print
-                        print "  Options:"
-                        for opts, help in parser.options:
-                            print '  %s' % (', '.join(opts))
-                            if help:
-                                print '\n'.join(
-                                    textwrap.wrap(help.strip(),
-                                                  initial_indent='    ',
-                                                  subsequent_indent='    '))
-                print
-
-    def usage(cls):
-        import nose
-        try:
-            ld = nose.__loader__
-            text = ld.get_data(os.path.join(
-                os.path.dirname(__file__), 'usage.txt'))
-        except AttributeError:
-            f = open(os.path.join(
-                os.path.dirname(__file__), 'usage.txt'), 'r')
-            try:
-                text = f.read()
-            finally:
-                f.close()
-        # Ensure that we return str, not bytes.
-        if not isinstance(text, str):
-            text = text.decode('utf-8')
-        return text
-    usage = classmethod(usage)
-
-# backwards compatibility
-run_exit = main = TestProgram
-
-
-def run(*arg, **kw):
-    """Collect and run tests, returning success or failure.
-
-    The arguments to `run()` are the same as to `main()`:
-
-    * module: All tests are in this module (default: None)
-    * defaultTest: Tests to load (default: '.')
-    * argv: Command line arguments (default: None; sys.argv is read)
-    * testRunner: Test runner instance (default: None)
-    * testLoader: Test loader instance (default: None)
-    * env: Environment; ignored if config is provided (default: None;
-      os.environ is read)
-    * config: :class:`nose.config.Config` instance (default: None)
-    * suite: Suite or list of tests to run (default: None). Passing a
-      suite or lists of tests will bypass all test discovery and
-      loading. *ALSO NOTE* that if you pass a unittest.TestSuite
-      instance as the suite, context fixtures at the class, module and
-      package level will not be used, and many plugin hooks will not
-      be called. If you want normal nose behavior, either pass a list
-      of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
-    * plugins: List of plugins to use; ignored if config is provided
-      (default: load plugins with DefaultPluginManager)
-    * addplugins: List of **extra** plugins to use. Pass a list of plugin
-      instances in this argument to make custom plugins available while
-      still using the DefaultPluginManager.
-
-    With the exception that the ``exit`` argument is always set
-    to False.
-    """
-    kw['exit'] = False
-    return TestProgram(*arg, **kw).success
-
-
-def runmodule(name='__main__', **kw):
-    """Collect and run tests in a single module only. Defaults to running
-    tests in __main__. Additional arguments to TestProgram may be passed
-    as keyword arguments.
-    """
-    main(defaultTest=name, **kw)
-
-
-def collector():
-    """TestSuite replacement entry point. Use anywhere you might use a
-    unittest.TestSuite. The collector will, by default, load options from
-    all config files and execute loader.loadTestsFromNames() on the
-    configured testNames, or '.' if no testNames are configured.
-    """
-    # plugins that implement any of these methods are disabled, since
-    # we don't control the test runner and won't be able to run them
-    # finalize() is also not called, but plugins that use it aren't disabled,
-    # because capture needs it.
-    setuptools_incompat = ('report', 'prepareTest',
-                           'prepareTestLoader', 'prepareTestRunner',
-                           'setOutputStream')
-
-    plugins = RestrictedPluginManager(exclude=setuptools_incompat)
-    conf = Config(files=all_config_files(),
-                  plugins=plugins)
-    conf.configure(argv=['collector'])
-    loader = defaultTestLoader(conf)
-
-    if conf.testNames:
-        suite = loader.loadTestsFromNames(conf.testNames)
-    else:
-        suite = loader.loadTestsFromNames(('.',))
-    return FinalizingSuiteWrapper(suite, plugins.finalize)
-
-
-
-if __name__ == '__main__':
-    main()
diff --git a/lib/spack/external/nose/exc.py b/lib/spack/external/nose/exc.py
deleted file mode 100644
index 8b780db0d4..0000000000
--- a/lib/spack/external/nose/exc.py
+++ /dev/null
@@ -1,9 +0,0 @@
-"""Exceptions for marking tests as skipped or deprecated.
-
-This module exists to provide backwards compatibility with previous
-versions of nose where skipped and deprecated tests were core
-functionality, rather than being provided by plugins. It may be
-removed in a future release.
-"""
-from nose.plugins.skip import SkipTest
-from nose.plugins.deprecated import DeprecatedTest
diff --git a/lib/spack/external/nose/ext/__init__.py b/lib/spack/external/nose/ext/__init__.py
deleted file mode 100644
index 5fd1516a09..0000000000
--- a/lib/spack/external/nose/ext/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-"""
-External or vendor files
-"""
diff --git a/lib/spack/external/nose/ext/dtcompat.py b/lib/spack/external/nose/ext/dtcompat.py
deleted file mode 100644
index 332cf08c12..0000000000
--- a/lib/spack/external/nose/ext/dtcompat.py
+++ /dev/null
@@ -1,2272 +0,0 @@
-# Module doctest.
-# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
-# Major enhancements and refactoring by:
-#     Jim Fulton
-#     Edward Loper
-
-# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
-#
-# Modified for inclusion in nose to provide support for DocFileTest in
-# python 2.3:
-#
-# - all doctests removed from module (they fail under 2.3 and 2.5) 
-# - now handles the $py.class extension when ran under Jython
-
-r"""Module doctest -- a framework for running examples in docstrings.
-
-In simplest use, end each module M to be tested with:
-
-def _test():
-    import doctest
-    doctest.testmod()
-
-if __name__ == "__main__":
-    _test()
-
-Then running the module as a script will cause the examples in the
-docstrings to get executed and verified:
-
-python M.py
-
-This won't display anything unless an example fails, in which case the
-failing example(s) and the cause(s) of the failure(s) are printed to stdout
-(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
-line of output is "Test failed.".
-
-Run it with the -v switch instead:
-
-python M.py -v
-
-and a detailed report of all examples tried is printed to stdout, along
-with assorted summaries at the end.
-
-You can force verbose mode by passing "verbose=True" to testmod, or prohibit
-it by passing "verbose=False".  In either of those cases, sys.argv is not
-examined by testmod.
-
-There are a variety of other ways to run doctests, including integration
-with the unittest framework, and support for running non-Python text
-files containing doctests.  There are also many ways to override parts
-of doctest's default behaviors.  See the Library Reference Manual for
-details.
-"""
-
-__docformat__ = 'reStructuredText en'
-
-__all__ = [
-    # 0, Option Flags
-    'register_optionflag',
-    'DONT_ACCEPT_TRUE_FOR_1',
-    'DONT_ACCEPT_BLANKLINE',
-    'NORMALIZE_WHITESPACE',
-    'ELLIPSIS',
-    'IGNORE_EXCEPTION_DETAIL',
-    'COMPARISON_FLAGS',
-    'REPORT_UDIFF',
-    'REPORT_CDIFF',
-    'REPORT_NDIFF',
-    'REPORT_ONLY_FIRST_FAILURE',
-    'REPORTING_FLAGS',
-    # 1. Utility Functions
-    'is_private',
-    # 2. Example & DocTest
-    'Example',
-    'DocTest',
-    # 3. Doctest Parser
-    'DocTestParser',
-    # 4. Doctest Finder
-    'DocTestFinder',
-    # 5. Doctest Runner
-    'DocTestRunner',
-    'OutputChecker',
-    'DocTestFailure',
-    'UnexpectedException',
-    'DebugRunner',
-    # 6. Test Functions
-    'testmod',
-    'testfile',
-    'run_docstring_examples',
-    # 7. Tester
-    'Tester',
-    # 8. Unittest Support
-    'DocTestSuite',
-    'DocFileSuite',
-    'set_unittest_reportflags',
-    # 9. Debugging Support
-    'script_from_examples',
-    'testsource',
-    'debug_src',
-    'debug',
-]
-
-import __future__
-
-import sys, traceback, inspect, linecache, os, re
-import unittest, difflib, pdb, tempfile
-import warnings
-from StringIO import StringIO
-
-# Don't whine about the deprecated is_private function in this
-# module's tests.
-warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
-                        __name__, 0)
-
-# There are 4 basic classes:
-#  - Example: a <source, want> pair, plus an intra-docstring line number.
-#  - DocTest: a collection of examples, parsed from a docstring, plus
-#    info about where the docstring came from (name, filename, lineno).
-#  - DocTestFinder: extracts DocTests from a given object's docstring and
-#    its contained objects' docstrings.
-#  - DocTestRunner: runs DocTest cases, and accumulates statistics.
-#
-# So the basic picture is:
-#
-#                             list of:
-# +------+                   +---------+                   +-------+
-# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
-# +------+                   +---------+                   +-------+
-#                            | Example |
-#                            |   ...   |
-#                            | Example |
-#                            +---------+
-
-# Option constants.
-
-OPTIONFLAGS_BY_NAME = {}
-def register_optionflag(name):
-    # Create a new flag unless `name` is already known.
-    return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
-
-DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
-DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
-NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
-ELLIPSIS = register_optionflag('ELLIPSIS')
-IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
-
-COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
-                    DONT_ACCEPT_BLANKLINE |
-                    NORMALIZE_WHITESPACE |
-                    ELLIPSIS |
-                    IGNORE_EXCEPTION_DETAIL)
-
-REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
-REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
-REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
-REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
-
-REPORTING_FLAGS = (REPORT_UDIFF |
-                   REPORT_CDIFF |
-                   REPORT_NDIFF |
-                   REPORT_ONLY_FIRST_FAILURE)
-
-# Special string markers for use in `want` strings:
-BLANKLINE_MARKER = '<BLANKLINE>'
-ELLIPSIS_MARKER = '...'
-
-######################################################################
-## Table of Contents
-######################################################################
-#  1. Utility Functions
-#  2. Example & DocTest -- store test cases
-#  3. DocTest Parser -- extracts examples from strings
-#  4. DocTest Finder -- extracts test cases from objects
-#  5. DocTest Runner -- runs test cases
-#  6. Test Functions -- convenient wrappers for testing
-#  7. Tester Class -- for backwards compatibility
-#  8. Unittest Support
-#  9. Debugging Support
-# 10. Example Usage
-
-######################################################################
-## 1. Utility Functions
-######################################################################
-
-def is_private(prefix, base):
-    """prefix, base -> true iff name prefix + "." + base is "private".
-
-    Prefix may be an empty string, and base does not contain a period.
-    Prefix is ignored (although functions you write conforming to this
-    protocol may make use of it).
-    Return true iff base begins with an (at least one) underscore, but
-    does not both begin and end with (at least) two underscores.
-    """
-    warnings.warn("is_private is deprecated; it wasn't useful; "
-                  "examine DocTestFinder.find() lists instead",
-                  DeprecationWarning, stacklevel=2)
-    return base[:1] == "_" and not base[:2] == "__" == base[-2:]
-
-def _extract_future_flags(globs):
-    """
-    Return the compiler-flags associated with the future features that
-    have been imported into the given namespace (globs).
-    """
-    flags = 0
-    for fname in __future__.all_feature_names:
-        feature = globs.get(fname, None)
-        if feature is getattr(__future__, fname):
-            flags |= feature.compiler_flag
-    return flags
-
-def _normalize_module(module, depth=2):
-    """
-    Return the module specified by `module`.  In particular:
-      - If `module` is a module, then return module.
-      - If `module` is a string, then import and return the
-        module with that name.
-      - If `module` is None, then return the calling module.
-        The calling module is assumed to be the module of
-        the stack frame at the given depth in the call stack.
-    """
-    if inspect.ismodule(module):
-        return module
-    elif isinstance(module, (str, unicode)):
-        return __import__(module, globals(), locals(), ["*"])
-    elif module is None:
-        return sys.modules[sys._getframe(depth).f_globals['__name__']]
-    else:
-        raise TypeError("Expected a module, string, or None")
-
-def _indent(s, indent=4):
-    """
-    Add the given number of space characters to the beginning every
-    non-blank line in `s`, and return the result.
-    """
-    # This regexp matches the start of non-blank lines:
-    return re.sub('(?m)^(?!$)', indent*' ', s)
-
-def _exception_traceback(exc_info):
-    """
-    Return a string containing a traceback message for the given
-    exc_info tuple (as returned by sys.exc_info()).
-    """
-    # Get a traceback message.
-    excout = StringIO()
-    exc_type, exc_val, exc_tb = exc_info
-    traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
-    return excout.getvalue()
-
-# Override some StringIO methods.
-class _SpoofOut(StringIO):
-    def getvalue(self):
-        result = StringIO.getvalue(self)
-        # If anything at all was written, make sure there's a trailing
-        # newline.  There's no way for the expected output to indicate
-        # that a trailing newline is missing.
-        if result and not result.endswith("\n"):
-            result += "\n"
-        # Prevent softspace from screwing up the next test case, in
-        # case they used print with a trailing comma in an example.
-        if hasattr(self, "softspace"):
-            del self.softspace
-        return result
-
-    def truncate(self,   size=None):
-        StringIO.truncate(self, size)
-        if hasattr(self, "softspace"):
-            del self.softspace
-
-# Worst-case linear-time ellipsis matching.
-def _ellipsis_match(want, got):
-    if ELLIPSIS_MARKER not in want:
-        return want == got
-
-    # Find "the real" strings.
-    ws = want.split(ELLIPSIS_MARKER)
-    assert len(ws) >= 2
-
-    # Deal with exact matches possibly needed at one or both ends.
-    startpos, endpos = 0, len(got)
-    w = ws[0]
-    if w:   # starts with exact match
-        if got.startswith(w):
-            startpos = len(w)
-            del ws[0]
-        else:
-            return False
-    w = ws[-1]
-    if w:   # ends with exact match
-        if got.endswith(w):
-            endpos -= len(w)
-            del ws[-1]
-        else:
-            return False
-
-    if startpos > endpos:
-        # Exact end matches required more characters than we have, as in
-        # _ellipsis_match('aa...aa', 'aaa')
-        return False
-
-    # For the rest, we only need to find the leftmost non-overlapping
-    # match for each piece.  If there's no overall match that way alone,
-    # there's no overall match period.
-    for w in ws:
-        # w may be '' at times, if there are consecutive ellipses, or
-        # due to an ellipsis at the start or end of `want`.  That's OK.
-        # Search for an empty string succeeds, and doesn't change startpos.
-        startpos = got.find(w, startpos, endpos)
-        if startpos < 0:
-            return False
-        startpos += len(w)
-
-    return True
-
-def _comment_line(line):
-    "Return a commented form of the given line"
-    line = line.rstrip()
-    if line:
-        return '# '+line
-    else:
-        return '#'
-
-class _OutputRedirectingPdb(pdb.Pdb):
-    """
-    A specialized version of the python debugger that redirects stdout
-    to a given stream when interacting with the user.  Stdout is *not*
-    redirected when traced code is executed.
-    """
-    def __init__(self, out):
-        self.__out = out
-        pdb.Pdb.__init__(self)
-
-    def trace_dispatch(self, *args):
-        # Redirect stdout to the given stream.
-        save_stdout = sys.stdout
-        sys.stdout = self.__out
-        # Call Pdb's trace dispatch method.
-        try:
-            return pdb.Pdb.trace_dispatch(self, *args)
-        finally:
-            sys.stdout = save_stdout
-
-# [XX] Normalize with respect to os.path.pardir?
-def _module_relative_path(module, path):
-    if not inspect.ismodule(module):
-        raise TypeError, 'Expected a module: %r' % module
-    if path.startswith('/'):
-        raise ValueError, 'Module-relative files may not have absolute paths'
-
-    # Find the base directory for the path.
-    if hasattr(module, '__file__'):
-        # A normal module/package
-        basedir = os.path.split(module.__file__)[0]
-    elif module.__name__ == '__main__':
-        # An interactive session.
-        if len(sys.argv)>0 and sys.argv[0] != '':
-            basedir = os.path.split(sys.argv[0])[0]
-        else:
-            basedir = os.curdir
-    else:
-        # A module w/o __file__ (this includes builtins)
-        raise ValueError("Can't resolve paths relative to the module " +
-                         module + " (it has no __file__)")
-
-    # Combine the base directory and the path.
-    return os.path.join(basedir, *(path.split('/')))
-
-######################################################################
-## 2. Example & DocTest
-######################################################################
-## - An "example" is a <source, want> pair, where "source" is a
-##   fragment of source code, and "want" is the expected output for
-##   "source."  The Example class also includes information about
-##   where the example was extracted from.
-##
-## - A "doctest" is a collection of examples, typically extracted from
-##   a string (such as an object's docstring).  The DocTest class also
-##   includes information about where the string was extracted from.
-
-class Example:
-    """
-    A single doctest example, consisting of source code and expected
-    output.  `Example` defines the following attributes:
-
-      - source: A single Python statement, always ending with a newline.
-        The constructor adds a newline if needed.
-
-      - want: The expected output from running the source code (either
-        from stdout, or a traceback in case of exception).  `want` ends
-        with a newline unless it's empty, in which case it's an empty
-        string.  The constructor adds a newline if needed.
-
-      - exc_msg: The exception message generated by the example, if
-        the example is expected to generate an exception; or `None` if
-        it is not expected to generate an exception.  This exception
-        message is compared against the return value of
-        `traceback.format_exception_only()`.  `exc_msg` ends with a
-        newline unless it's `None`.  The constructor adds a newline
-        if needed.
-
-      - lineno: The line number within the DocTest string containing
-        this Example where the Example begins.  This line number is
-        zero-based, with respect to the beginning of the DocTest.
-
-      - indent: The example's indentation in the DocTest string.
-        I.e., the number of space characters that preceed the
-        example's first prompt.
-
-      - options: A dictionary mapping from option flags to True or
-        False, which is used to override default options for this
-        example.  Any option flags not contained in this dictionary
-        are left at their default value (as specified by the
-        DocTestRunner's optionflags).  By default, no options are set.
-    """
-    def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
-                 options=None):
-        # Normalize inputs.
-        if not source.endswith('\n'):
-            source += '\n'
-        if want and not want.endswith('\n'):
-            want += '\n'
-        if exc_msg is not None and not exc_msg.endswith('\n'):
-            exc_msg += '\n'
-        # Store properties.
-        self.source = source
-        self.want = want
-        self.lineno = lineno
-        self.indent = indent
-        if options is None: options = {}
-        self.options = options
-        self.exc_msg = exc_msg
-
-class DocTest:
-    """
-    A collection of doctest examples that should be run in a single
-    namespace.  Each `DocTest` defines the following attributes:
-
-      - examples: the list of examples.
-
-      - globs: The namespace (aka globals) that the examples should
-        be run in.
-
-      - name: A name identifying the DocTest (typically, the name of
-        the object whose docstring this DocTest was extracted from).
-
-      - filename: The name of the file that this DocTest was extracted
-        from, or `None` if the filename is unknown.
-
-      - lineno: The line number within filename where this DocTest
-        begins, or `None` if the line number is unavailable.  This
-        line number is zero-based, with respect to the beginning of
-        the file.
-
-      - docstring: The string that the examples were extracted from,
-        or `None` if the string is unavailable.
-    """
-    def __init__(self, examples, globs, name, filename, lineno, docstring):
-        """
-        Create a new DocTest containing the given examples.  The
-        DocTest's globals are initialized with a copy of `globs`.
-        """
-        assert not isinstance(examples, basestring), \
-               "DocTest no longer accepts str; use DocTestParser instead"
-        self.examples = examples
-        self.docstring = docstring
-        self.globs = globs.copy()
-        self.name = name
-        self.filename = filename
-        self.lineno = lineno
-
-    def __repr__(self):
-        if len(self.examples) == 0:
-            examples = 'no examples'
-        elif len(self.examples) == 1:
-            examples = '1 example'
-        else:
-            examples = '%d examples' % len(self.examples)
-        return ('<DocTest %s from %s:%s (%s)>' %
-                (self.name, self.filename, self.lineno, examples))
-
-
-    # This lets us sort tests by name:
-    def __cmp__(self, other):
-        if not isinstance(other, DocTest):
-            return -1
-        return cmp((self.name, self.filename, self.lineno, id(self)),
-                   (other.name, other.filename, other.lineno, id(other)))
-
-######################################################################
-## 3. DocTestParser
-######################################################################
-
-class DocTestParser:
-    """
-    A class used to parse strings containing doctest examples.
-    """
-    # This regular expression is used to find doctest examples in a
-    # string.  It defines three groups: `source` is the source code
-    # (including leading indentation and prompts); `indent` is the
-    # indentation of the first (PS1) line of the source code; and
-    # `want` is the expected output (including leading indentation).
-    _EXAMPLE_RE = re.compile(r'''
-        # Source consists of a PS1 line followed by zero or more PS2 lines.
-        (?P<source>
-            (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
-            (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
-        \n?
-        # Want consists of any non-blank lines that do not start with PS1.
-        (?P<want> (?:(?![ ]*$)    # Not a blank line
-                     (?![ ]*>>>)  # Not a line starting with PS1
-                     .*$\n?       # But any other line
-                  )*)
-        ''', re.MULTILINE | re.VERBOSE)
-
-    # A regular expression for handling `want` strings that contain
-    # expected exceptions.  It divides `want` into three pieces:
-    #    - the traceback header line (`hdr`)
-    #    - the traceback stack (`stack`)
-    #    - the exception message (`msg`), as generated by
-    #      traceback.format_exception_only()
-    # `msg` may have multiple lines.  We assume/require that the
-    # exception message is the first non-indented line starting with a word
-    # character following the traceback header line.
-    _EXCEPTION_RE = re.compile(r"""
-        # Grab the traceback header.  Different versions of Python have
-        # said different things on the first traceback line.
-        ^(?P<hdr> Traceback\ \(
-            (?: most\ recent\ call\ last
-            |   innermost\ last
-            ) \) :
-        )
-        \s* $                # toss trailing whitespace on the header.
-        (?P<stack> .*?)      # don't blink: absorb stuff until...
-        ^ (?P<msg> \w+ .*)   #     a line *starts* with alphanum.
-        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
-
-    # A callable returning a true value iff its argument is a blank line
-    # or contains a single comment.
-    _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
-
-    def parse(self, string, name='<string>'):
-        """
-        Divide the given string into examples and intervening text,
-        and return them as a list of alternating Examples and strings.
-        Line numbers for the Examples are 0-based.  The optional
-        argument `name` is a name identifying this string, and is only
-        used for error messages.
-        """
-        string = string.expandtabs()
-        # If all lines begin with the same indentation, then strip it.
-        min_indent = self._min_indent(string)
-        if min_indent > 0:
-            string = '\n'.join([l[min_indent:] for l in string.split('\n')])
-
-        output = []
-        charno, lineno = 0, 0
-        # Find all doctest examples in the string:
-        for m in self._EXAMPLE_RE.finditer(string):
-            # Add the pre-example text to `output`.
-            output.append(string[charno:m.start()])
-            # Update lineno (lines before this example)
-            lineno += string.count('\n', charno, m.start())
-            # Extract info from the regexp match.
-            (source, options, want, exc_msg) = \
-                     self._parse_example(m, name, lineno)
-            # Create an Example, and add it to the list.
-            if not self._IS_BLANK_OR_COMMENT(source):
-                output.append( Example(source, want, exc_msg,
-                                    lineno=lineno,
-                                    indent=min_indent+len(m.group('indent')),
-                                    options=options) )
-            # Update lineno (lines inside this example)
-            lineno += string.count('\n', m.start(), m.end())
-            # Update charno.
-            charno = m.end()
-        # Add any remaining post-example text to `output`.
-        output.append(string[charno:])
-        return output
-
-    def get_doctest(self, string, globs, name, filename, lineno):
-        """
-        Extract all doctest examples from the given string, and
-        collect them into a `DocTest` object.
-
-        `globs`, `name`, `filename`, and `lineno` are attributes for
-        the new `DocTest` object.  See the documentation for `DocTest`
-        for more information.
-        """
-        return DocTest(self.get_examples(string, name), globs,
-                       name, filename, lineno, string)
-
-    def get_examples(self, string, name='<string>'):
-        """
-        Extract all doctest examples from the given string, and return
-        them as a list of `Example` objects.  Line numbers are
-        0-based, because it's most common in doctests that nothing
-        interesting appears on the same line as opening triple-quote,
-        and so the first interesting line is called \"line 1\" then.
-
-        The optional argument `name` is a name identifying this
-        string, and is only used for error messages.
-        """
-        return [x for x in self.parse(string, name)
-                if isinstance(x, Example)]
-
-    def _parse_example(self, m, name, lineno):
-        """
-        Given a regular expression match from `_EXAMPLE_RE` (`m`),
-        return a pair `(source, want)`, where `source` is the matched
-        example's source code (with prompts and indentation stripped);
-        and `want` is the example's expected output (with indentation
-        stripped).
-
-        `name` is the string's name, and `lineno` is the line number
-        where the example starts; both are used for error messages.
-        """
-        # Get the example's indentation level.
-        indent = len(m.group('indent'))
-
-        # Divide source into lines; check that they're properly
-        # indented; and then strip their indentation & prompts.
-        source_lines = m.group('source').split('\n')
-        self._check_prompt_blank(source_lines, indent, name, lineno)
-        self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
-        source = '\n'.join([sl[indent+4:] for sl in source_lines])
-
-        # Divide want into lines; check that it's properly indented; and
-        # then strip the indentation.  Spaces before the last newline should
-        # be preserved, so plain rstrip() isn't good enough.
-        want = m.group('want')
-        want_lines = want.split('\n')
-        if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
-            del want_lines[-1]  # forget final newline & spaces after it
-        self._check_prefix(want_lines, ' '*indent, name,
-                           lineno + len(source_lines))
-        want = '\n'.join([wl[indent:] for wl in want_lines])
-
-        # If `want` contains a traceback message, then extract it.
-        m = self._EXCEPTION_RE.match(want)
-        if m:
-            exc_msg = m.group('msg')
-        else:
-            exc_msg = None
-
-        # Extract options from the source.
-        options = self._find_options(source, name, lineno)
-
-        return source, options, want, exc_msg
-
-    # This regular expression looks for option directives in the
-    # source code of an example.  Option directives are comments
-    # starting with "doctest:".  Warning: this may give false
-    # positives for string-literals that contain the string
-    # "#doctest:".  Eliminating these false positives would require
-    # actually parsing the string; but we limit them by ignoring any
-    # line containing "#doctest:" that is *followed* by a quote mark.
-    _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
-                                      re.MULTILINE)
-
-    def _find_options(self, source, name, lineno):
-        """
-        Return a dictionary containing option overrides extracted from
-        option directives in the given source string.
-
-        `name` is the string's name, and `lineno` is the line number
-        where the example starts; both are used for error messages.
-        """
-        options = {}
-        # (note: with the current regexp, this will match at most once:)
-        for m in self._OPTION_DIRECTIVE_RE.finditer(source):
-            option_strings = m.group(1).replace(',', ' ').split()
-            for option in option_strings:
-                if (option[0] not in '+-' or
-                    option[1:] not in OPTIONFLAGS_BY_NAME):
-                    raise ValueError('line %r of the doctest for %s '
-                                     'has an invalid option: %r' %
-                                     (lineno+1, name, option))
-                flag = OPTIONFLAGS_BY_NAME[option[1:]]
-                options[flag] = (option[0] == '+')
-        if options and self._IS_BLANK_OR_COMMENT(source):
-            raise ValueError('line %r of the doctest for %s has an option '
-                             'directive on a line with no example: %r' %
-                             (lineno, name, source))
-        return options
-
-    # This regular expression finds the indentation of every non-blank
-    # line in a string.
-    _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
-
-    def _min_indent(self, s):
-        "Return the minimum indentation of any non-blank line in `s`"
-        indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
-        if len(indents) > 0:
-            return min(indents)
-        else:
-            return 0
-
-    def _check_prompt_blank(self, lines, indent, name, lineno):
-        """
-        Given the lines of a source string (including prompts and
-        leading indentation), check to make sure that every prompt is
-        followed by a space character.  If any line is not followed by
-        a space character, then raise ValueError.
-        """
-        for i, line in enumerate(lines):
-            if len(line) >= indent+4 and line[indent+3] != ' ':
-                raise ValueError('line %r of the docstring for %s '
-                                 'lacks blank after %s: %r' %
-                                 (lineno+i+1, name,
-                                  line[indent:indent+3], line))
-
-    def _check_prefix(self, lines, prefix, name, lineno):
-        """
-        Check that every line in the given list starts with the given
-        prefix; if any line does not, then raise a ValueError.
-        """
-        for i, line in enumerate(lines):
-            if line and not line.startswith(prefix):
-                raise ValueError('line %r of the docstring for %s has '
-                                 'inconsistent leading whitespace: %r' %
-                                 (lineno+i+1, name, line))
-
-
-######################################################################
-## 4. DocTest Finder
-######################################################################
-
-class DocTestFinder:
-    """
-    A class used to extract the DocTests that are relevant to a given
-    object, from its docstring and the docstrings of its contained
-    objects.  Doctests can currently be extracted from the following
-    object types: modules, functions, classes, methods, staticmethods,
-    classmethods, and properties.
-    """
-
-    def __init__(self, verbose=False, parser=DocTestParser(),
-                 recurse=True, _namefilter=None, exclude_empty=True):
-        """
-        Create a new doctest finder.
-
-        The optional argument `parser` specifies a class or
-        function that should be used to create new DocTest objects (or
-        objects that implement the same interface as DocTest).  The
-        signature for this factory function should match the signature
-        of the DocTest constructor.
-
-        If the optional argument `recurse` is false, then `find` will
-        only examine the given object, and not any contained objects.
-
-        If the optional argument `exclude_empty` is false, then `find`
-        will include tests for objects with empty docstrings.
-        """
-        self._parser = parser
-        self._verbose = verbose
-        self._recurse = recurse
-        self._exclude_empty = exclude_empty
-        # _namefilter is undocumented, and exists only for temporary backward-
-        # compatibility support of testmod's deprecated isprivate mess.
-        self._namefilter = _namefilter
-
-    def find(self, obj, name=None, module=None, globs=None,
-             extraglobs=None):
-        """
-        Return a list of the DocTests that are defined by the given
-        object's docstring, or by any of its contained objects'
-        docstrings.
-
-        The optional parameter `module` is the module that contains
-        the given object.  If the module is not specified or is None, then
-        the test finder will attempt to automatically determine the
-        correct module.  The object's module is used:
-
-            - As a default namespace, if `globs` is not specified.
-            - To prevent the DocTestFinder from extracting DocTests
-              from objects that are imported from other modules.
-            - To find the name of the file containing the object.
-            - To help find the line number of the object within its
-              file.
-
-        Contained objects whose module does not match `module` are ignored.
-
-        If `module` is False, no attempt to find the module will be made.
-        This is obscure, of use mostly in tests:  if `module` is False, or
-        is None but cannot be found automatically, then all objects are
-        considered to belong to the (non-existent) module, so all contained
-        objects will (recursively) be searched for doctests.
-
-        The globals for each DocTest is formed by combining `globs`
-        and `extraglobs` (bindings in `extraglobs` override bindings
-        in `globs`).  A new copy of the globals dictionary is created
-        for each DocTest.  If `globs` is not specified, then it
-        defaults to the module's `__dict__`, if specified, or {}
-        otherwise.  If `extraglobs` is not specified, then it defaults
-        to {}.
-
-        """
-        # If name was not specified, then extract it from the object.
-        if name is None:
-            name = getattr(obj, '__name__', None)
-            if name is None:
-                raise ValueError("DocTestFinder.find: name must be given "
-                        "when obj.__name__ doesn't exist: %r" %
-                                 (type(obj),))
-
-        # Find the module that contains the given object (if obj is
-        # a module, then module=obj.).  Note: this may fail, in which
-        # case module will be None.
-        if module is False:
-            module = None
-        elif module is None:
-            module = inspect.getmodule(obj)
-
-        # Read the module's source code.  This is used by
-        # DocTestFinder._find_lineno to find the line number for a
-        # given object's docstring.
-        try:
-            file = inspect.getsourcefile(obj) or inspect.getfile(obj)
-            source_lines = linecache.getlines(file)
-            if not source_lines:
-                source_lines = None
-        except TypeError:
-            source_lines = None
-
-        # Initialize globals, and merge in extraglobs.
-        if globs is None:
-            if module is None:
-                globs = {}
-            else:
-                globs = module.__dict__.copy()
-        else:
-            globs = globs.copy()
-        if extraglobs is not None:
-            globs.update(extraglobs)
-
-        # Recursively expore `obj`, extracting DocTests.
-        tests = []
-        self._find(tests, obj, name, module, source_lines, globs, {})
-        # Sort the tests by alpha order of names, for consistency in
-        # verbose-mode output.  This was a feature of doctest in Pythons
-        # <= 2.3 that got lost by accident in 2.4.  It was repaired in
-        # 2.4.4 and 2.5.
-        tests.sort()
-        return tests
-
-    def _filter(self, obj, prefix, base):
-        """
-        Return true if the given object should not be examined.
-        """
-        return (self._namefilter is not None and
-                self._namefilter(prefix, base))
-
-    def _from_module(self, module, object):
-        """
-        Return true if the given object is defined in the given
-        module.
-        """
-        if module is None:
-            return True
-        elif inspect.isfunction(object):
-            return module.__dict__ is object.func_globals
-        elif inspect.isclass(object):
-            # Some jython classes don't set __module__
-            return module.__name__ == getattr(object, '__module__', None)
-        elif inspect.getmodule(object) is not None:
-            return module is inspect.getmodule(object)
-        elif hasattr(object, '__module__'):
-            return module.__name__ == object.__module__
-        elif isinstance(object, property):
-            return True # [XX] no way not be sure.
-        else:
-            raise ValueError("object must be a class or function")
-
-    def _find(self, tests, obj, name, module, source_lines, globs, seen):
-        """
-        Find tests for the given object and any contained objects, and
-        add them to `tests`.
-        """
-        if self._verbose:
-            print 'Finding tests in %s' % name
-
-        # If we've already processed this object, then ignore it.
-        if id(obj) in seen:
-            return
-        seen[id(obj)] = 1
-
-        # Find a test for this object, and add it to the list of tests.
-        test = self._get_test(obj, name, module, globs, source_lines)
-        if test is not None:
-            tests.append(test)
-
-        # Look for tests in a module's contained objects.
-        if inspect.ismodule(obj) and self._recurse:
-            for valname, val in obj.__dict__.items():
-                # Check if this contained object should be ignored.
-                if self._filter(val, name, valname):
-                    continue
-                valname = '%s.%s' % (name, valname)
-                # Recurse to functions & classes.
-                if ((inspect.isfunction(val) or inspect.isclass(val)) and
-                    self._from_module(module, val)):
-                    self._find(tests, val, valname, module, source_lines,
-                               globs, seen)
-
-        # Look for tests in a module's __test__ dictionary.
-        if inspect.ismodule(obj) and self._recurse:
-            for valname, val in getattr(obj, '__test__', {}).items():
-                if not isinstance(valname, basestring):
-                    raise ValueError("DocTestFinder.find: __test__ keys "
-                                     "must be strings: %r" %
-                                     (type(valname),))
-                if not (inspect.isfunction(val) or inspect.isclass(val) or
-                        inspect.ismethod(val) or inspect.ismodule(val) or
-                        isinstance(val, basestring)):
-                    raise ValueError("DocTestFinder.find: __test__ values "
-                                     "must be strings, functions, methods, "
-                                     "classes, or modules: %r" %
-                                     (type(val),))
-                valname = '%s.__test__.%s' % (name, valname)
-                self._find(tests, val, valname, module, source_lines,
-                           globs, seen)
-
-        # Look for tests in a class's contained objects.
-        if inspect.isclass(obj) and self._recurse:
-            for valname, val in obj.__dict__.items():
-                # Check if this contained object should be ignored.
-                if self._filter(val, name, valname):
-                    continue
-                # Special handling for staticmethod/classmethod.
-                if isinstance(val, staticmethod):
-                    val = getattr(obj, valname)
-                if isinstance(val, classmethod):
-                    val = getattr(obj, valname).im_func
-
-                # Recurse to methods, properties, and nested classes.
-                if ((inspect.isfunction(val) or inspect.isclass(val) or
-                      isinstance(val, property)) and
-                      self._from_module(module, val)):
-                    valname = '%s.%s' % (name, valname)
-                    self._find(tests, val, valname, module, source_lines,
-                               globs, seen)
-
-    def _get_test(self, obj, name, module, globs, source_lines):
-        """
-        Return a DocTest for the given object, if it defines a docstring;
-        otherwise, return None.
-        """
-        # Extract the object's docstring.  If it doesn't have one,
-        # then return None (no test for this object).
-        if isinstance(obj, basestring):
-            docstring = obj
-        else:
-            try:
-                if obj.__doc__ is None:
-                    docstring = ''
-                else:
-                    docstring = obj.__doc__
-                    if not isinstance(docstring, basestring):
-                        docstring = str(docstring)
-            except (TypeError, AttributeError):
-                docstring = ''
-
-        # Find the docstring's location in the file.
-        lineno = self._find_lineno(obj, source_lines)
-
-        # Don't bother if the docstring is empty.
-        if self._exclude_empty and not docstring:
-            return None
-
-        # Return a DocTest for this object.
-        if module is None:
-            filename = None
-        else:
-            filename = getattr(module, '__file__', module.__name__)
-            if filename[-4:] in (".pyc", ".pyo"):
-                filename = filename[:-1]
-            elif sys.platform.startswith('java') and \
-                    filename.endswith('$py.class'):
-                filename = '%s.py' % filename[:-9]
-        return self._parser.get_doctest(docstring, globs, name,
-                                        filename, lineno)
-
-    def _find_lineno(self, obj, source_lines):
-        """
-        Return a line number of the given object's docstring.  Note:
-        this method assumes that the object has a docstring.
-        """
-        lineno = None
-
-        # Find the line number for modules.
-        if inspect.ismodule(obj):
-            lineno = 0
-
-        # Find the line number for classes.
-        # Note: this could be fooled if a class is defined multiple
-        # times in a single file.
-        if inspect.isclass(obj):
-            if source_lines is None:
-                return None
-            pat = re.compile(r'^\s*class\s*%s\b' %
-                             getattr(obj, '__name__', '-'))
-            for i, line in enumerate(source_lines):
-                if pat.match(line):
-                    lineno = i
-                    break
-
-        # Find the line number for functions & methods.
-        if inspect.ismethod(obj): obj = obj.im_func
-        if inspect.isfunction(obj): obj = obj.func_code
-        if inspect.istraceback(obj): obj = obj.tb_frame
-        if inspect.isframe(obj): obj = obj.f_code
-        if inspect.iscode(obj):
-            lineno = getattr(obj, 'co_firstlineno', None)-1
-
-        # Find the line number where the docstring starts.  Assume
-        # that it's the first line that begins with a quote mark.
-        # Note: this could be fooled by a multiline function
-        # signature, where a continuation line begins with a quote
-        # mark.
-        if lineno is not None:
-            if source_lines is None:
-                return lineno+1
-            pat = re.compile('(^|.*:)\s*\w*("|\')')
-            for lineno in range(lineno, len(source_lines)):
-                if pat.match(source_lines[lineno]):
-                    return lineno
-
-        # We couldn't find the line number.
-        return None
-
-######################################################################
-## 5. DocTest Runner
-######################################################################
-
-class DocTestRunner:
-    # This divider string is used to separate failure messages, and to
-    # separate sections of the summary.
-    DIVIDER = "*" * 70
-
-    def __init__(self, checker=None, verbose=None, optionflags=0):
-        """
-        Create a new test runner.
-
-        Optional keyword arg `checker` is the `OutputChecker` that
-        should be used to compare the expected outputs and actual
-        outputs of doctest examples.
-
-        Optional keyword arg 'verbose' prints lots of stuff if true,
-        only failures if false; by default, it's true iff '-v' is in
-        sys.argv.
-
-        Optional argument `optionflags` can be used to control how the
-        test runner compares expected output to actual output, and how
-        it displays failures.  See the documentation for `testmod` for
-        more information.
-        """
-        self._checker = checker or OutputChecker()
-        if verbose is None:
-            verbose = '-v' in sys.argv
-        self._verbose = verbose
-        self.optionflags = optionflags
-        self.original_optionflags = optionflags
-
-        # Keep track of the examples we've run.
-        self.tries = 0
-        self.failures = 0
-        self._name2ft = {}
-
-        # Create a fake output target for capturing doctest output.
-        self._fakeout = _SpoofOut()
-
-    #/////////////////////////////////////////////////////////////////
-    # Reporting methods
-    #/////////////////////////////////////////////////////////////////
-
-    def report_start(self, out, test, example):
-        """
-        Report that the test runner is about to process the given
-        example.  (Only displays a message if verbose=True)
-        """
-        if self._verbose:
-            if example.want:
-                out('Trying:\n' + _indent(example.source) +
-                    'Expecting:\n' + _indent(example.want))
-            else:
-                out('Trying:\n' + _indent(example.source) +
-                    'Expecting nothing\n')
-
-    def report_success(self, out, test, example, got):
-        """
-        Report that the given example ran successfully.  (Only
-        displays a message if verbose=True)
-        """
-        if self._verbose:
-            out("ok\n")
-
-    def report_failure(self, out, test, example, got):
-        """
-        Report that the given example failed.
-        """
-        out(self._failure_header(test, example) +
-            self._checker.output_difference(example, got, self.optionflags))
-
-    def report_unexpected_exception(self, out, test, example, exc_info):
-        """
-        Report that the given example raised an unexpected exception.
-        """
-        out(self._failure_header(test, example) +
-            'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
-
-    def _failure_header(self, test, example):
-        out = [self.DIVIDER]
-        if test.filename:
-            if test.lineno is not None and example.lineno is not None:
-                lineno = test.lineno + example.lineno + 1
-            else:
-                lineno = '?'
-            out.append('File "%s", line %s, in %s' %
-                       (test.filename, lineno, test.name))
-        else:
-            out.append('Line %s, in %s' % (example.lineno+1, test.name))
-        out.append('Failed example:')
-        source = example.source
-        out.append(_indent(source))
-        return '\n'.join(out)
-
-    #/////////////////////////////////////////////////////////////////
-    # DocTest Running
-    #/////////////////////////////////////////////////////////////////
-
-    def __run(self, test, compileflags, out):
-        """
-        Run the examples in `test`.  Write the outcome of each example
-        with one of the `DocTestRunner.report_*` methods, using the
-        writer function `out`.  `compileflags` is the set of compiler
-        flags that should be used to execute examples.  Return a tuple
-        `(f, t)`, where `t` is the number of examples tried, and `f`
-        is the number of examples that failed.  The examples are run
-        in the namespace `test.globs`.
-        """
-        # Keep track of the number of failures and tries.
-        failures = tries = 0
-
-        # Save the option flags (since option directives can be used
-        # to modify them).
-        original_optionflags = self.optionflags
-
-        SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
-
-        check = self._checker.check_output
-
-        # Process each example.
-        for examplenum, example in enumerate(test.examples):
-
-            # If REPORT_ONLY_FIRST_FAILURE is set, then supress
-            # reporting after the first failure.
-            quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
-                     failures > 0)
-
-            # Merge in the example's options.
-            self.optionflags = original_optionflags
-            if example.options:
-                for (optionflag, val) in example.options.items():
-                    if val:
-                        self.optionflags |= optionflag
-                    else:
-                        self.optionflags &= ~optionflag
-
-            # Record that we started this example.
-            tries += 1
-            if not quiet:
-                self.report_start(out, test, example)
-
-            # Use a special filename for compile(), so we can retrieve
-            # the source code during interactive debugging (see
-            # __patched_linecache_getlines).
-            filename = '<doctest %s[%d]>' % (test.name, examplenum)
-
-            # Run the example in the given context (globs), and record
-            # any exception that gets raised.  (But don't intercept
-            # keyboard interrupts.)
-            try:
-                # Don't blink!  This is where the user's code gets run.
-                exec compile(example.source, filename, "single",
-                             compileflags, 1) in test.globs
-                self.debugger.set_continue() # ==== Example Finished ====
-                exception = None
-            except KeyboardInterrupt:
-                raise
-            except:
-                exception = sys.exc_info()
-                self.debugger.set_continue() # ==== Example Finished ====
-
-            got = self._fakeout.getvalue()  # the actual output
-            self._fakeout.truncate(0)
-            outcome = FAILURE   # guilty until proved innocent or insane
-
-            # If the example executed without raising any exceptions,
-            # verify its output.
-            if exception is None:
-                if check(example.want, got, self.optionflags):
-                    outcome = SUCCESS
-
-            # The example raised an exception:  check if it was expected.
-            else:
-                exc_info = sys.exc_info()
-                exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
-                if not quiet:
-                    got += _exception_traceback(exc_info)
-
-                # If `example.exc_msg` is None, then we weren't expecting
-                # an exception.
-                if example.exc_msg is None:
-                    outcome = BOOM
-
-                # We expected an exception:  see whether it matches.
-                elif check(example.exc_msg, exc_msg, self.optionflags):
-                    outcome = SUCCESS
-
-                # Another chance if they didn't care about the detail.
-                elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
-                    m1 = re.match(r'[^:]*:', example.exc_msg)
-                    m2 = re.match(r'[^:]*:', exc_msg)
-                    if m1 and m2 and check(m1.group(0), m2.group(0),
-                                           self.optionflags):
-                        outcome = SUCCESS
-
-            # Report the outcome.
-            if outcome is SUCCESS:
-                if not quiet:
-                    self.report_success(out, test, example, got)
-            elif outcome is FAILURE:
-                if not quiet:
-                    self.report_failure(out, test, example, got)
-                failures += 1
-            elif outcome is BOOM:
-                if not quiet:
-                    self.report_unexpected_exception(out, test, example,
-                                                     exc_info)
-                failures += 1
-            else:
-                assert False, ("unknown outcome", outcome)
-
-        # Restore the option flags (in case they were modified)
-        self.optionflags = original_optionflags
-
-        # Record and return the number of failures and tries.
-        self.__record_outcome(test, failures, tries)
-        return failures, tries
-
-    def __record_outcome(self, test, f, t):
-        """
-        Record the fact that the given DocTest (`test`) generated `f`
-        failures out of `t` tried examples.
-        """
-        f2, t2 = self._name2ft.get(test.name, (0,0))
-        self._name2ft[test.name] = (f+f2, t+t2)
-        self.failures += f
-        self.tries += t
-
-    __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
-                                         r'(?P<name>[\w\.]+)'
-                                         r'\[(?P<examplenum>\d+)\]>$')
-    def __patched_linecache_getlines(self, filename):
-        m = self.__LINECACHE_FILENAME_RE.match(filename)
-        if m and m.group('name') == self.test.name:
-            example = self.test.examples[int(m.group('examplenum'))]
-            return example.source.splitlines(True)
-        else:
-            return self.save_linecache_getlines(filename)
-
-    def run(self, test, compileflags=None, out=None, clear_globs=True):
-        """
-        Run the examples in `test`, and display the results using the
-        writer function `out`.
-
-        The examples are run in the namespace `test.globs`.  If
-        `clear_globs` is true (the default), then this namespace will
-        be cleared after the test runs, to help with garbage
-        collection.  If you would like to examine the namespace after
-        the test completes, then use `clear_globs=False`.
-
-        `compileflags` gives the set of flags that should be used by
-        the Python compiler when running the examples.  If not
-        specified, then it will default to the set of future-import
-        flags that apply to `globs`.
-
-        The output of each example is checked using
-        `DocTestRunner.check_output`, and the results are formatted by
-        the `DocTestRunner.report_*` methods.
-        """
-        self.test = test
-
-        if compileflags is None:
-            compileflags = _extract_future_flags(test.globs)
-
-        save_stdout = sys.stdout
-        if out is None:
-            out = save_stdout.write
-        sys.stdout = self._fakeout
-
-        # Patch pdb.set_trace to restore sys.stdout during interactive
-        # debugging (so it's not still redirected to self._fakeout).
-        # Note that the interactive output will go to *our*
-        # save_stdout, even if that's not the real sys.stdout; this
-        # allows us to write test cases for the set_trace behavior.
-        save_set_trace = pdb.set_trace
-        self.debugger = _OutputRedirectingPdb(save_stdout)
-        self.debugger.reset()
-        pdb.set_trace = self.debugger.set_trace
-
-        # Patch linecache.getlines, so we can see the example's source
-        # when we're inside the debugger.
-        self.save_linecache_getlines = linecache.getlines
-        linecache.getlines = self.__patched_linecache_getlines
-
-        try:
-            return self.__run(test, compileflags, out)
-        finally:
-            sys.stdout = save_stdout
-            pdb.set_trace = save_set_trace
-            linecache.getlines = self.save_linecache_getlines
-            if clear_globs:
-                test.globs.clear()
-
-    #/////////////////////////////////////////////////////////////////
-    # Summarization
-    #/////////////////////////////////////////////////////////////////
-    def summarize(self, verbose=None):
-        """
-        Print a summary of all the test cases that have been run by
-        this DocTestRunner, and return a tuple `(f, t)`, where `f` is
-        the total number of failed examples, and `t` is the total
-        number of tried examples.
-
-        The optional `verbose` argument controls how detailed the
-        summary is.  If the verbosity is not specified, then the
-        DocTestRunner's verbosity is used.
-        """
-        if verbose is None:
-            verbose = self._verbose
-        notests = []
-        passed = []
-        failed = []
-        totalt = totalf = 0
-        for x in self._name2ft.items():
-            name, (f, t) = x
-            assert f <= t
-            totalt += t
-            totalf += f
-            if t == 0:
-                notests.append(name)
-            elif f == 0:
-                passed.append( (name, t) )
-            else:
-                failed.append(x)
-        if verbose:
-            if notests:
-                print len(notests), "items had no tests:"
-                notests.sort()
-                for thing in notests:
-                    print "   ", thing
-            if passed:
-                print len(passed), "items passed all tests:"
-                passed.sort()
-                for thing, count in passed:
-                    print " %3d tests in %s" % (count, thing)
-        if failed:
-            print self.DIVIDER
-            print len(failed), "items had failures:"
-            failed.sort()
-            for thing, (f, t) in failed:
-                print " %3d of %3d in %s" % (f, t, thing)
-        if verbose:
-            print totalt, "tests in", len(self._name2ft), "items."
-            print totalt - totalf, "passed and", totalf, "failed."
-        if totalf:
-            print "***Test Failed***", totalf, "failures."
-        elif verbose:
-            print "Test passed."
-        return totalf, totalt
-
-    #/////////////////////////////////////////////////////////////////
-    # Backward compatibility cruft to maintain doctest.master.
-    #/////////////////////////////////////////////////////////////////
-    def merge(self, other):
-        d = self._name2ft
-        for name, (f, t) in other._name2ft.items():
-            if name in d:
-                print "*** DocTestRunner.merge: '" + name + "' in both" \
-                    " testers; summing outcomes."
-                f2, t2 = d[name]
-                f = f + f2
-                t = t + t2
-            d[name] = f, t
-
-class OutputChecker:
-    """
-    A class used to check the whether the actual output from a doctest
-    example matches the expected output.  `OutputChecker` defines two
-    methods: `check_output`, which compares a given pair of outputs,
-    and returns true if they match; and `output_difference`, which
-    returns a string describing the differences between two outputs.
-    """
-    def check_output(self, want, got, optionflags):
-        """
-        Return True iff the actual output from an example (`got`)
-        matches the expected output (`want`).  These strings are
-        always considered to match if they are identical; but
-        depending on what option flags the test runner is using,
-        several non-exact match types are also possible.  See the
-        documentation for `TestRunner` for more information about
-        option flags.
-        """
-        # Handle the common case first, for efficiency:
-        # if they're string-identical, always return true.
-        if got == want:
-            return True
-
-        # The values True and False replaced 1 and 0 as the return
-        # value for boolean comparisons in Python 2.3.
-        if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
-            if (got,want) == ("True\n", "1\n"):
-                return True
-            if (got,want) == ("False\n", "0\n"):
-                return True
-
-        # <BLANKLINE> can be used as a special sequence to signify a
-        # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
-        if not (optionflags & DONT_ACCEPT_BLANKLINE):
-            # Replace <BLANKLINE> in want with a blank line.
-            want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
-                          '', want)
-            # If a line in got contains only spaces, then remove the
-            # spaces.
-            got = re.sub('(?m)^\s*?$', '', got)
-            if got == want:
-                return True
-
-        # This flag causes doctest to ignore any differences in the
-        # contents of whitespace strings.  Note that this can be used
-        # in conjunction with the ELLIPSIS flag.
-        if optionflags & NORMALIZE_WHITESPACE:
-            got = ' '.join(got.split())
-            want = ' '.join(want.split())
-            if got == want:
-                return True
-
-        # The ELLIPSIS flag says to let the sequence "..." in `want`
-        # match any substring in `got`.
-        if optionflags & ELLIPSIS:
-            if _ellipsis_match(want, got):
-                return True
-
-        # We didn't find any match; return false.
-        return False
-
-    # Should we do a fancy diff?
-    def _do_a_fancy_diff(self, want, got, optionflags):
-        # Not unless they asked for a fancy diff.
-        if not optionflags & (REPORT_UDIFF |
-                              REPORT_CDIFF |
-                              REPORT_NDIFF):
-            return False
-
-        # If expected output uses ellipsis, a meaningful fancy diff is
-        # too hard ... or maybe not.  In two real-life failures Tim saw,
-        # a diff was a major help anyway, so this is commented out.
-        # [todo] _ellipsis_match() knows which pieces do and don't match,
-        # and could be the basis for a kick-ass diff in this case.
-        ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
-        ##    return False
-
-        # ndiff does intraline difference marking, so can be useful even
-        # for 1-line differences.
-        if optionflags & REPORT_NDIFF:
-            return True
-
-        # The other diff types need at least a few lines to be helpful.
-        return want.count('\n') > 2 and got.count('\n') > 2
-
-    def output_difference(self, example, got, optionflags):
-        """
-        Return a string describing the differences between the
-        expected output for a given example (`example`) and the actual
-        output (`got`).  `optionflags` is the set of option flags used
-        to compare `want` and `got`.
-        """
-        want = example.want
-        # If <BLANKLINE>s are being used, then replace blank lines
-        # with <BLANKLINE> in the actual output string.
-        if not (optionflags & DONT_ACCEPT_BLANKLINE):
-            got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
-
-        # Check if we should use diff.
-        if self._do_a_fancy_diff(want, got, optionflags):
-            # Split want & got into lines.
-            want_lines = want.splitlines(True)  # True == keep line ends
-            got_lines = got.splitlines(True)
-            # Use difflib to find their differences.
-            if optionflags & REPORT_UDIFF:
-                diff = difflib.unified_diff(want_lines, got_lines, n=2)
-                diff = list(diff)[2:] # strip the diff header
-                kind = 'unified diff with -expected +actual'
-            elif optionflags & REPORT_CDIFF:
-                diff = difflib.context_diff(want_lines, got_lines, n=2)
-                diff = list(diff)[2:] # strip the diff header
-                kind = 'context diff with expected followed by actual'
-            elif optionflags & REPORT_NDIFF:
-                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
-                diff = list(engine.compare(want_lines, got_lines))
-                kind = 'ndiff with -expected +actual'
-            else:
-                assert 0, 'Bad diff option'
-            # Remove trailing whitespace on diff output.
-            diff = [line.rstrip() + '\n' for line in diff]
-            return 'Differences (%s):\n' % kind + _indent(''.join(diff))
-
-        # If we're not using diff, then simply list the expected
-        # output followed by the actual output.
-        if want and got:
-            return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
-        elif want:
-            return 'Expected:\n%sGot nothing\n' % _indent(want)
-        elif got:
-            return 'Expected nothing\nGot:\n%s' % _indent(got)
-        else:
-            return 'Expected nothing\nGot nothing\n'
-
-class DocTestFailure(Exception):
-    """A DocTest example has failed in debugging mode.
-
-    The exception instance has variables:
-
-    - test: the DocTest object being run
-
-    - excample: the Example object that failed
-
-    - got: the actual output
-    """
-    def __init__(self, test, example, got):
-        self.test = test
-        self.example = example
-        self.got = got
-
-    def __str__(self):
-        return str(self.test)
-
-class UnexpectedException(Exception):
-    """A DocTest example has encountered an unexpected exception
-
-    The exception instance has variables:
-
-    - test: the DocTest object being run
-
-    - excample: the Example object that failed
-
-    - exc_info: the exception info
-    """
-    def __init__(self, test, example, exc_info):
-        self.test = test
-        self.example = example
-        self.exc_info = exc_info
-
-    def __str__(self):
-        return str(self.test)
-
-class DebugRunner(DocTestRunner):
-
-    def run(self, test, compileflags=None, out=None, clear_globs=True):
-        r = DocTestRunner.run(self, test, compileflags, out, False)
-        if clear_globs:
-            test.globs.clear()
-        return r
-
-    def report_unexpected_exception(self, out, test, example, exc_info):
-        raise UnexpectedException(test, example, exc_info)
-
-    def report_failure(self, out, test, example, got):
-        raise DocTestFailure(test, example, got)
-
-######################################################################
-## 6. Test Functions
-######################################################################
-# These should be backwards compatible.
-
-# For backward compatibility, a global instance of a DocTestRunner
-# class, updated by testmod.
-master = None
-
-def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
-            report=True, optionflags=0, extraglobs=None,
-            raise_on_error=False, exclude_empty=False):
-    """m=None, name=None, globs=None, verbose=None, isprivate=None,
-       report=True, optionflags=0, extraglobs=None, raise_on_error=False,
-       exclude_empty=False
-
-    Test examples in docstrings in functions and classes reachable
-    from module m (or the current module if m is not supplied), starting
-    with m.__doc__.  Unless isprivate is specified, private names
-    are not skipped.
-
-    Also test examples reachable from dict m.__test__ if it exists and is
-    not None.  m.__test__ maps names to functions, classes and strings;
-    function and class docstrings are tested even if the name is private;
-    strings are tested directly, as if they were docstrings.
-
-    Return (#failures, #tests).
-
-    See doctest.__doc__ for an overview.
-
-    Optional keyword arg "name" gives the name of the module; by default
-    use m.__name__.
-
-    Optional keyword arg "globs" gives a dict to be used as the globals
-    when executing examples; by default, use m.__dict__.  A copy of this
-    dict is actually used for each docstring, so that each docstring's
-    examples start with a clean slate.
-
-    Optional keyword arg "extraglobs" gives a dictionary that should be
-    merged into the globals that are used to execute examples.  By
-    default, no extra globals are used.  This is new in 2.4.
-
-    Optional keyword arg "verbose" prints lots of stuff if true, prints
-    only failures if false; by default, it's true iff "-v" is in sys.argv.
-
-    Optional keyword arg "report" prints a summary at the end when true,
-    else prints nothing at the end.  In verbose mode, the summary is
-    detailed, else very brief (in fact, empty if all tests passed).
-
-    Optional keyword arg "optionflags" or's together module constants,
-    and defaults to 0.  This is new in 2.3.  Possible values (see the
-    docs for details):
-
-        DONT_ACCEPT_TRUE_FOR_1
-        DONT_ACCEPT_BLANKLINE
-        NORMALIZE_WHITESPACE
-        ELLIPSIS
-        IGNORE_EXCEPTION_DETAIL
-        REPORT_UDIFF
-        REPORT_CDIFF
-        REPORT_NDIFF
-        REPORT_ONLY_FIRST_FAILURE
-
-    Optional keyword arg "raise_on_error" raises an exception on the
-    first unexpected exception or failure. This allows failures to be
-    post-mortem debugged.
-
-    Deprecated in Python 2.4:
-    Optional keyword arg "isprivate" specifies a function used to
-    determine whether a name is private.  The default function is
-    treat all functions as public.  Optionally, "isprivate" can be
-    set to doctest.is_private to skip over functions marked as private
-    using the underscore naming convention; see its docs for details.
-
-    Advanced tomfoolery:  testmod runs methods of a local instance of
-    class doctest.Tester, then merges the results into (or creates)
-    global Tester instance doctest.master.  Methods of doctest.master
-    can be called directly too, if you want to do something unusual.
-    Passing report=0 to testmod is especially useful then, to delay
-    displaying a summary.  Invoke doctest.master.summarize(verbose)
-    when you're done fiddling.
-    """
-    global master
-
-    if isprivate is not None:
-        warnings.warn("the isprivate argument is deprecated; "
-                      "examine DocTestFinder.find() lists instead",
-                      DeprecationWarning)
-
-    # If no module was given, then use __main__.
-    if m is None:
-        # DWA - m will still be None if this wasn't invoked from the command
-        # line, in which case the following TypeError is about as good an error
-        # as we should expect
-        m = sys.modules.get('__main__')
-
-    # Check that we were actually given a module.
-    if not inspect.ismodule(m):
-        raise TypeError("testmod: module required; %r" % (m,))
-
-    # If no name was given, then use the module's name.
-    if name is None:
-        name = m.__name__
-
-    # Find, parse, and run all tests in the given module.
-    finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
-
-    if raise_on_error:
-        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
-    else:
-        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
-    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
-        runner.run(test)
-
-    if report:
-        runner.summarize()
-
-    if master is None:
-        master = runner
-    else:
-        master.merge(runner)
-
-    return runner.failures, runner.tries
-
-def testfile(filename, module_relative=True, name=None, package=None,
-             globs=None, verbose=None, report=True, optionflags=0,
-             extraglobs=None, raise_on_error=False, parser=DocTestParser()):
-    """
-    Test examples in the given file.  Return (#failures, #tests).
-
-    Optional keyword arg "module_relative" specifies how filenames
-    should be interpreted:
-
-      - If "module_relative" is True (the default), then "filename"
-         specifies a module-relative path.  By default, this path is
-         relative to the calling module's directory; but if the
-         "package" argument is specified, then it is relative to that
-         package.  To ensure os-independence, "filename" should use
-         "/" characters to separate path segments, and should not
-         be an absolute path (i.e., it may not begin with "/").
-
-      - If "module_relative" is False, then "filename" specifies an
-        os-specific path.  The path may be absolute or relative (to
-        the current working directory).
-
-    Optional keyword arg "name" gives the name of the test; by default
-    use the file's basename.
-
-    Optional keyword argument "package" is a Python package or the
-    name of a Python package whose directory should be used as the
-    base directory for a module relative filename.  If no package is
-    specified, then the calling module's directory is used as the base
-    directory for module relative filenames.  It is an error to
-    specify "package" if "module_relative" is False.
-
-    Optional keyword arg "globs" gives a dict to be used as the globals
-    when executing examples; by default, use {}.  A copy of this dict
-    is actually used for each docstring, so that each docstring's
-    examples start with a clean slate.
-
-    Optional keyword arg "extraglobs" gives a dictionary that should be
-    merged into the globals that are used to execute examples.  By
-    default, no extra globals are used.
-
-    Optional keyword arg "verbose" prints lots of stuff if true, prints
-    only failures if false; by default, it's true iff "-v" is in sys.argv.
-
-    Optional keyword arg "report" prints a summary at the end when true,
-    else prints nothing at the end.  In verbose mode, the summary is
-    detailed, else very brief (in fact, empty if all tests passed).
-
-    Optional keyword arg "optionflags" or's together module constants,
-    and defaults to 0.  Possible values (see the docs for details):
-
-        DONT_ACCEPT_TRUE_FOR_1
-        DONT_ACCEPT_BLANKLINE
-        NORMALIZE_WHITESPACE
-        ELLIPSIS
-        IGNORE_EXCEPTION_DETAIL
-        REPORT_UDIFF
-        REPORT_CDIFF
-        REPORT_NDIFF
-        REPORT_ONLY_FIRST_FAILURE
-
-    Optional keyword arg "raise_on_error" raises an exception on the
-    first unexpected exception or failure. This allows failures to be
-    post-mortem debugged.
-
-    Optional keyword arg "parser" specifies a DocTestParser (or
-    subclass) that should be used to extract tests from the files.
-
-    Advanced tomfoolery:  testmod runs methods of a local instance of
-    class doctest.Tester, then merges the results into (or creates)
-    global Tester instance doctest.master.  Methods of doctest.master
-    can be called directly too, if you want to do something unusual.
-    Passing report=0 to testmod is especially useful then, to delay
-    displaying a summary.  Invoke doctest.master.summarize(verbose)
-    when you're done fiddling.
-    """
-    global master
-
-    if package and not module_relative:
-        raise ValueError("Package may only be specified for module-"
-                         "relative paths.")
-
-    # Relativize the path
-    if module_relative:
-        package = _normalize_module(package)
-        filename = _module_relative_path(package, filename)
-
-    # If no name was given, then use the file's name.
-    if name is None:
-        name = os.path.basename(filename)
-
-    # Assemble the globals.
-    if globs is None:
-        globs = {}
-    else:
-        globs = globs.copy()
-    if extraglobs is not None:
-        globs.update(extraglobs)
-
-    if raise_on_error:
-        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
-    else:
-        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-
-    # Read the file, convert it to a test, and run it.
-    s = open(filename).read()
-    test = parser.get_doctest(s, globs, name, filename, 0)
-    runner.run(test)
-
-    if report:
-        runner.summarize()
-
-    if master is None:
-        master = runner
-    else:
-        master.merge(runner)
-
-    return runner.failures, runner.tries
-
-def run_docstring_examples(f, globs, verbose=False, name="NoName",
-                           compileflags=None, optionflags=0):
-    """
-    Test examples in the given object's docstring (`f`), using `globs`
-    as globals.  Optional argument `name` is used in failure messages.
-    If the optional argument `verbose` is true, then generate output
-    even if there are no failures.
-
-    `compileflags` gives the set of flags that should be used by the
-    Python compiler when running the examples.  If not specified, then
-    it will default to the set of future-import flags that apply to
-    `globs`.
-
-    Optional keyword arg `optionflags` specifies options for the
-    testing and output.  See the documentation for `testmod` for more
-    information.
-    """
-    # Find, parse, and run all tests in the given module.
-    finder = DocTestFinder(verbose=verbose, recurse=False)
-    runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
-    for test in finder.find(f, name, globs=globs):
-        runner.run(test, compileflags=compileflags)
-
-######################################################################
-## 7. Tester
-######################################################################
-# This is provided only for backwards compatibility.  It's not
-# actually used in any way.
-
-class Tester:
-    def __init__(self, mod=None, globs=None, verbose=None,
-                 isprivate=None, optionflags=0):
-
-        warnings.warn("class Tester is deprecated; "
-                      "use class doctest.DocTestRunner instead",
-                      DeprecationWarning, stacklevel=2)
-        if mod is None and globs is None:
-            raise TypeError("Tester.__init__: must specify mod or globs")
-        if mod is not None and not inspect.ismodule(mod):
-            raise TypeError("Tester.__init__: mod must be a module; %r" %
-                            (mod,))
-        if globs is None:
-            globs = mod.__dict__
-        self.globs = globs
-
-        self.verbose = verbose
-        self.isprivate = isprivate
-        self.optionflags = optionflags
-        self.testfinder = DocTestFinder(_namefilter=isprivate)
-        self.testrunner = DocTestRunner(verbose=verbose,
-                                        optionflags=optionflags)
-
-    def runstring(self, s, name):
-        test = DocTestParser().get_doctest(s, self.globs, name, None, None)
-        if self.verbose:
-            print "Running string", name
-        (f,t) = self.testrunner.run(test)
-        if self.verbose:
-            print f, "of", t, "examples failed in string", name
-        return (f,t)
-
-    def rundoc(self, object, name=None, module=None):
-        f = t = 0
-        tests = self.testfinder.find(object, name, module=module,
-                                     globs=self.globs)
-        for test in tests:
-            (f2, t2) = self.testrunner.run(test)
-            (f,t) = (f+f2, t+t2)
-        return (f,t)
-
-    def rundict(self, d, name, module=None):
-        import new
-        m = new.module(name)
-        m.__dict__.update(d)
-        if module is None:
-            module = False
-        return self.rundoc(m, name, module)
-
-    def run__test__(self, d, name):
-        import new
-        m = new.module(name)
-        m.__test__ = d
-        return self.rundoc(m, name)
-
-    def summarize(self, verbose=None):
-        return self.testrunner.summarize(verbose)
-
-    def merge(self, other):
-        self.testrunner.merge(other.testrunner)
-
-######################################################################
-## 8. Unittest Support
-######################################################################
-
-_unittest_reportflags = 0
-
-def set_unittest_reportflags(flags):
-    global _unittest_reportflags
-
-    if (flags & REPORTING_FLAGS) != flags:
-        raise ValueError("Only reporting flags allowed", flags)
-    old = _unittest_reportflags
-    _unittest_reportflags = flags
-    return old
-
-
-class DocTestCase(unittest.TestCase):
-
-    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
-                 checker=None):
-
-        unittest.TestCase.__init__(self)
-        self._dt_optionflags = optionflags
-        self._dt_checker = checker
-        self._dt_test = test
-        self._dt_setUp = setUp
-        self._dt_tearDown = tearDown
-
-    def setUp(self):
-        test = self._dt_test
-
-        if self._dt_setUp is not None:
-            self._dt_setUp(test)
-
-    def tearDown(self):
-        test = self._dt_test
-
-        if self._dt_tearDown is not None:
-            self._dt_tearDown(test)
-
-        test.globs.clear()
-
-    def runTest(self):
-        test = self._dt_test
-        old = sys.stdout
-        new = StringIO()
-        optionflags = self._dt_optionflags
-
-        if not (optionflags & REPORTING_FLAGS):
-            # The option flags don't include any reporting flags,
-            # so add the default reporting flags
-            optionflags |= _unittest_reportflags
-
-        runner = DocTestRunner(optionflags=optionflags,
-                               checker=self._dt_checker, verbose=False)
-
-        try:
-            runner.DIVIDER = "-"*70
-            failures, tries = runner.run(
-                test, out=new.write, clear_globs=False)
-        finally:
-            sys.stdout = old
-
-        if failures:
-            raise self.failureException(self.format_failure(new.getvalue()))
-
-    def format_failure(self, err):
-        test = self._dt_test
-        if test.lineno is None:
-            lineno = 'unknown line number'
-        else:
-            lineno = '%s' % test.lineno
-        lname = '.'.join(test.name.split('.')[-1:])
-        return ('Failed doctest test for %s\n'
-                '  File "%s", line %s, in %s\n\n%s'
-                % (test.name, test.filename, lineno, lname, err)
-                )
-
-    def debug(self):
-        self.setUp()
-        runner = DebugRunner(optionflags=self._dt_optionflags,
-                             checker=self._dt_checker, verbose=False)
-        runner.run(self._dt_test)
-        self.tearDown()
-
-    def id(self):
-        return self._dt_test.name
-
-    def __repr__(self):
-        name = self._dt_test.name.split('.')
-        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
-
-    __str__ = __repr__
-
-    def shortDescription(self):
-        return "Doctest: " + self._dt_test.name
-
-def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
-                 **options):
-    """
-    Convert doctest tests for a module to a unittest test suite.
-
-    This converts each documentation string in a module that
-    contains doctest tests to a unittest test case.  If any of the
-    tests in a doc string fail, then the test case fails.  An exception
-    is raised showing the name of the file containing the test and a
-    (sometimes approximate) line number.
-
-    The `module` argument provides the module to be tested.  The argument
-    can be either a module or a module name.
-
-    If no argument is given, the calling module is used.
-
-    A number of options may be provided as keyword arguments:
-
-    setUp
-      A set-up function.  This is called before running the
-      tests in each file. The setUp function will be passed a DocTest
-      object.  The setUp function can access the test globals as the
-      globs attribute of the test passed.
-
-    tearDown
-      A tear-down function.  This is called after running the
-      tests in each file.  The tearDown function will be passed a DocTest
-      object.  The tearDown function can access the test globals as the
-      globs attribute of the test passed.
-
-    globs
-      A dictionary containing initial global variables for the tests.
-
-    optionflags
-       A set of doctest option flags expressed as an integer.
-    """
-
-    if test_finder is None:
-        test_finder = DocTestFinder()
-
-    module = _normalize_module(module)
-    tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
-    if globs is None:
-        globs = module.__dict__
-    if not tests:
-        # Why do we want to do this? Because it reveals a bug that might
-        # otherwise be hidden.
-        raise ValueError(module, "has no tests")
-
-    tests.sort()
-    suite = unittest.TestSuite()
-    for test in tests:
-        if len(test.examples) == 0:
-            continue
-        if not test.filename:
-            filename = module.__file__
-            if filename[-4:] in (".pyc", ".pyo"):
-                filename = filename[:-1]
-            elif sys.platform.startswith('java') and \
-                    filename.endswith('$py.class'):
-                filename = '%s.py' % filename[:-9]
-            test.filename = filename
-        suite.addTest(DocTestCase(test, **options))
-
-    return suite
-
-class DocFileCase(DocTestCase):
-
-    def id(self):
-        return '_'.join(self._dt_test.name.split('.'))
-
-    def __repr__(self):
-        return self._dt_test.filename
-    __str__ = __repr__
-
-    def format_failure(self, err):
-        return ('Failed doctest test for %s\n  File "%s", line 0\n\n%s'
-                % (self._dt_test.name, self._dt_test.filename, err)
-                )
-
-def DocFileTest(path, module_relative=True, package=None,
-                globs=None, parser=DocTestParser(), **options):
-    if globs is None:
-        globs = {}
-
-    if package and not module_relative:
-        raise ValueError("Package may only be specified for module-"
-                         "relative paths.")
-
-    # Relativize the path.
-    if module_relative:
-        package = _normalize_module(package)
-        path = _module_relative_path(package, path)
-
-    # Find the file and read it.
-    name = os.path.basename(path)
-    doc = open(path).read()
-
-    # Convert it to a test, and wrap it in a DocFileCase.
-    test = parser.get_doctest(doc, globs, name, path, 0)
-    return DocFileCase(test, **options)
-
-def DocFileSuite(*paths, **kw):
-    """A unittest suite for one or more doctest files.
-
-    The path to each doctest file is given as a string; the
-    interpretation of that string depends on the keyword argument
-    "module_relative".
-
-    A number of options may be provided as keyword arguments:
-
-    module_relative
-      If "module_relative" is True, then the given file paths are
-      interpreted as os-independent module-relative paths.  By
-      default, these paths are relative to the calling module's
-      directory; but if the "package" argument is specified, then
-      they are relative to that package.  To ensure os-independence,
-      "filename" should use "/" characters to separate path
-      segments, and may not be an absolute path (i.e., it may not
-      begin with "/").
-
-      If "module_relative" is False, then the given file paths are
-      interpreted as os-specific paths.  These paths may be absolute
-      or relative (to the current working directory).
-
-    package
-      A Python package or the name of a Python package whose directory
-      should be used as the base directory for module relative paths.
-      If "package" is not specified, then the calling module's
-      directory is used as the base directory for module relative
-      filenames.  It is an error to specify "package" if
-      "module_relative" is False.
-
-    setUp
-      A set-up function.  This is called before running the
-      tests in each file. The setUp function will be passed a DocTest
-      object.  The setUp function can access the test globals as the
-      globs attribute of the test passed.
-
-    tearDown
-      A tear-down function.  This is called after running the
-      tests in each file.  The tearDown function will be passed a DocTest
-      object.  The tearDown function can access the test globals as the
-      globs attribute of the test passed.
-
-    globs
-      A dictionary containing initial global variables for the tests.
-
-    optionflags
-      A set of doctest option flags expressed as an integer.
-
-    parser
-      A DocTestParser (or subclass) that should be used to extract
-      tests from the files.
-    """
-    suite = unittest.TestSuite()
-
-    # We do this here so that _normalize_module is called at the right
-    # level.  If it were called in DocFileTest, then this function
-    # would be the caller and we might guess the package incorrectly.
-    if kw.get('module_relative', True):
-        kw['package'] = _normalize_module(kw.get('package'))
-
-    for path in paths:
-        suite.addTest(DocFileTest(path, **kw))
-
-    return suite
-
-######################################################################
-## 9. Debugging Support
-######################################################################
-
-def script_from_examples(s):
-    output = []
-    for piece in DocTestParser().parse(s):
-        if isinstance(piece, Example):
-            # Add the example's source code (strip trailing NL)
-            output.append(piece.source[:-1])
-            # Add the expected output:
-            want = piece.want
-            if want:
-                output.append('# Expected:')
-                output += ['## '+l for l in want.split('\n')[:-1]]
-        else:
-            # Add non-example text.
-            output += [_comment_line(l)
-                       for l in piece.split('\n')[:-1]]
-
-    # Trim junk on both ends.
-    while output and output[-1] == '#':
-        output.pop()
-    while output and output[0] == '#':
-        output.pop(0)
-    # Combine the output, and return it.
-    # Add a courtesy newline to prevent exec from choking (see bug #1172785)
-    return '\n'.join(output) + '\n'
-
-def testsource(module, name):
-    """Extract the test sources from a doctest docstring as a script.
-
-    Provide the module (or dotted name of the module) containing the
-    test to be debugged and the name (within the module) of the object
-    with the doc string with tests to be debugged.
-    """
-    module = _normalize_module(module)
-    tests = DocTestFinder().find(module)
-    test = [t for t in tests if t.name == name]
-    if not test:
-        raise ValueError(name, "not found in tests")
-    test = test[0]
-    testsrc = script_from_examples(test.docstring)
-    return testsrc
-
-def debug_src(src, pm=False, globs=None):
-    """Debug a single doctest docstring, in argument `src`'"""
-    testsrc = script_from_examples(src)
-    debug_script(testsrc, pm, globs)
-
-def debug_script(src, pm=False, globs=None):
-    "Debug a test script.  `src` is the script, as a string."
-    import pdb
-
-    # Note that tempfile.NameTemporaryFile() cannot be used.  As the
-    # docs say, a file so created cannot be opened by name a second time
-    # on modern Windows boxes, and execfile() needs to open it.
-    srcfilename = tempfile.mktemp(".py", "doctestdebug")
-    f = open(srcfilename, 'w')
-    f.write(src)
-    f.close()
-
-    try:
-        if globs:
-            globs = globs.copy()
-        else:
-            globs = {}
-
-        if pm:
-            try:
-                execfile(srcfilename, globs, globs)
-            except:
-                print sys.exc_info()[1]
-                pdb.post_mortem(sys.exc_info()[2])
-        else:
-            # Note that %r is vital here.  '%s' instead can, e.g., cause
-            # backslashes to get treated as metacharacters on Windows.
-            pdb.run("execfile(%r)" % srcfilename, globs, globs)
-
-    finally:
-        os.remove(srcfilename)
-
-def debug(module, name, pm=False):
-    """Debug a single doctest docstring.
-
-    Provide the module (or dotted name of the module) containing the
-    test to be debugged and the name (within the module) of the object
-    with the docstring with tests to be debugged.
-    """
-    module = _normalize_module(module)
-    testsrc = testsource(module, name)
-    debug_script(testsrc, pm, module.__dict__)
-
-
-__test__ = {}
diff --git a/lib/spack/external/nose/failure.py b/lib/spack/external/nose/failure.py
deleted file mode 100644
index c5fabfda5e..0000000000
--- a/lib/spack/external/nose/failure.py
+++ /dev/null
@@ -1,42 +0,0 @@
-import logging
-import unittest
-from traceback import format_tb
-from nose.pyversion import is_base_exception
-
-log = logging.getLogger(__name__)
-
-
-__all__ = ['Failure']
-
-
-class Failure(unittest.TestCase):
-    """Unloadable or unexecutable test.
-
-    A Failure case is placed in a test suite to indicate the presence of a
-    test that could not be loaded or executed. A common example is a test
-    module that fails to import.
-    
-    """
-    __test__ = False # do not collect
-    def __init__(self, exc_class, exc_val, tb=None, address=None):
-        log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb))
-        self.exc_class = exc_class
-        self.exc_val = exc_val
-        self.tb = tb
-        self._address = address
-        unittest.TestCase.__init__(self)
-
-    def __str__(self):
-        return "Failure: %s (%s)" % (
-            getattr(self.exc_class, '__name__', self.exc_class), self.exc_val)
-
-    def address(self):
-        return self._address
-    
-    def runTest(self):
-        if self.tb is not None:
-            if is_base_exception(self.exc_val):
-                raise self.exc_val, None, self.tb
-            raise self.exc_class, self.exc_val, self.tb
-        else:
-            raise self.exc_class(self.exc_val)
diff --git a/lib/spack/external/nose/importer.py b/lib/spack/external/nose/importer.py
deleted file mode 100644
index e677658ce6..0000000000
--- a/lib/spack/external/nose/importer.py
+++ /dev/null
@@ -1,167 +0,0 @@
-"""Implements an importer that looks only in specific path (ignoring
-sys.path), and uses a per-path cache in addition to sys.modules. This is
-necessary because test modules in different directories frequently have the
-same names, which means that the first loaded would mask the rest when using
-the builtin importer.
-"""
-import logging
-import os
-import sys
-from nose.config import Config
-
-from imp import find_module, load_module, acquire_lock, release_lock
-
-log = logging.getLogger(__name__)
-
-try:
-    _samefile = os.path.samefile
-except AttributeError:
-    def _samefile(src, dst):
-        return (os.path.normcase(os.path.realpath(src)) ==
-                os.path.normcase(os.path.realpath(dst)))
-
-
-class Importer(object):
-    """An importer class that does only path-specific imports. That
-    is, the given module is not searched for on sys.path, but only at
-    the path or in the directory specified.
-    """
-    def __init__(self, config=None):
-        if config is None:
-            config = Config()
-        self.config = config
-
-    def importFromPath(self, path, fqname):
-        """Import a dotted-name package whose tail is at path. In other words,
-        given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then
-        bar from path/to/foo/bar, returning bar.
-        """
-        # find the base dir of the package
-        path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep)
-        name_parts = fqname.split('.')
-        if path_parts[-1] == '__init__.py':
-            path_parts.pop()
-        path_parts = path_parts[:-(len(name_parts))]
-        dir_path = os.sep.join(path_parts)
-        # then import fqname starting from that dir
-        return self.importFromDir(dir_path, fqname)
-
-    def importFromDir(self, dir, fqname):
-        """Import a module *only* from path, ignoring sys.path and
-        reloading if the version in sys.modules is not the one we want.
-        """
-        dir = os.path.normpath(os.path.abspath(dir))
-        log.debug("Import %s from %s", fqname, dir)
-
-        # FIXME reimplement local per-dir cache?
-
-        # special case for __main__
-        if fqname == '__main__':
-            return sys.modules[fqname]
-
-        if self.config.addPaths:
-            add_path(dir, self.config)
-
-        path = [dir]
-        parts = fqname.split('.')
-        part_fqname = ''
-        mod = parent = fh = None
-
-        for part in parts:
-            if part_fqname == '':
-                part_fqname = part
-            else:
-                part_fqname = "%s.%s" % (part_fqname, part)
-            try:
-                acquire_lock()
-                log.debug("find module part %s (%s) in %s",
-                          part, part_fqname, path)
-                fh, filename, desc = find_module(part, path)
-                old = sys.modules.get(part_fqname)
-                if old is not None:
-                    # test modules frequently have name overlap; make sure
-                    # we get a fresh copy of anything we are trying to load
-                    # from a new path
-                    log.debug("sys.modules has %s as %s", part_fqname, old)
-                    if (self.sameModule(old, filename)
-                        or (self.config.firstPackageWins and
-                            getattr(old, '__path__', None))):
-                        mod = old
-                    else:
-                        del sys.modules[part_fqname]
-                        mod = load_module(part_fqname, fh, filename, desc)
-                else:
-                    mod = load_module(part_fqname, fh, filename, desc)
-            finally:
-                if fh:
-                    fh.close()
-                release_lock()
-            if parent:
-                setattr(parent, part, mod)
-            if hasattr(mod, '__path__'):
-                path = mod.__path__
-            parent = mod
-        return mod
-
-    def _dirname_if_file(self, filename):
-        # We only take the dirname if we have a path to a non-dir,
-        # because taking the dirname of a symlink to a directory does not
-        # give the actual directory parent.
-        if os.path.isdir(filename):
-            return filename
-        else:
-            return os.path.dirname(filename)
-
-    def sameModule(self, mod, filename):
-        mod_paths = []
-        if hasattr(mod, '__path__'):
-            for path in mod.__path__:
-                mod_paths.append(self._dirname_if_file(path))
-        elif hasattr(mod, '__file__'):
-            mod_paths.append(self._dirname_if_file(mod.__file__))
-        else:
-            # builtin or other module-like object that
-            # doesn't have __file__; must be new
-            return False
-        new_path = self._dirname_if_file(filename)
-        for mod_path in mod_paths:
-            log.debug(
-                "module already loaded? mod: %s new: %s",
-                mod_path, new_path)
-            if _samefile(mod_path, new_path):
-                return True
-        return False
-
-
-def add_path(path, config=None):
-    """Ensure that the path, or the root of the current package (if
-    path is in a package), is in sys.path.
-    """
-
-    # FIXME add any src-looking dirs seen too... need to get config for that
-
-    log.debug('Add path %s' % path)
-    if not path:
-        return []
-    added = []
-    parent = os.path.dirname(path)
-    if (parent
-        and os.path.exists(os.path.join(path, '__init__.py'))):
-        added.extend(add_path(parent, config))
-    elif not path in sys.path:
-        log.debug("insert %s into sys.path", path)
-        sys.path.insert(0, path)
-        added.append(path)
-    if config and config.srcDirs:
-        for dirname in config.srcDirs:
-            dirpath = os.path.join(path, dirname)
-            if os.path.isdir(dirpath):
-                sys.path.insert(0, dirpath)
-                added.append(dirpath)
-    return added
-
-
-def remove_path(path):
-    log.debug('Remove path %s' % path)
-    if path in sys.path:
-        sys.path.remove(path)
diff --git a/lib/spack/external/nose/inspector.py b/lib/spack/external/nose/inspector.py
deleted file mode 100644
index a6c4a3e3b6..0000000000
--- a/lib/spack/external/nose/inspector.py
+++ /dev/null
@@ -1,207 +0,0 @@
-"""Simple traceback introspection. Used to add additional information to
-AssertionErrors in tests, so that failure messages may be more informative.
-"""
-import inspect
-import logging
-import re
-import sys
-import textwrap
-import tokenize
-
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from StringIO import StringIO
-
-log = logging.getLogger(__name__)
-
-def inspect_traceback(tb):
-    """Inspect a traceback and its frame, returning source for the expression
-    where the exception was raised, with simple variable replacement performed
-    and the line on which the exception was raised marked with '>>'
-    """
-    log.debug('inspect traceback %s', tb)
-
-    # we only want the innermost frame, where the exception was raised
-    while tb.tb_next:
-        tb = tb.tb_next
-        
-    frame = tb.tb_frame
-    lines, exc_line = tbsource(tb)
-        
-    # figure out the set of lines to grab.
-    inspect_lines, mark_line = find_inspectable_lines(lines, exc_line)
-    src = StringIO(textwrap.dedent(''.join(inspect_lines)))
-    exp = Expander(frame.f_locals, frame.f_globals)
-
-    while inspect_lines:
-        try:
-            for tok in tokenize.generate_tokens(src.readline):
-                exp(*tok)
-        except tokenize.TokenError, e:
-            # this can happen if our inspectable region happens to butt up
-            # against the end of a construct like a docstring with the closing
-            # """ on separate line
-            log.debug("Tokenizer error: %s", e)
-            inspect_lines.pop(0)
-            mark_line -= 1
-            src = StringIO(textwrap.dedent(''.join(inspect_lines)))
-            exp = Expander(frame.f_locals, frame.f_globals)
-            continue
-        break
-    padded = []
-    if exp.expanded_source:
-        exp_lines = exp.expanded_source.split('\n')
-        ep = 0
-        for line in exp_lines:
-            if ep == mark_line:
-                padded.append('>>  ' + line)
-            else:
-                padded.append('    ' + line)
-            ep += 1
-    return '\n'.join(padded)
-
-
-def tbsource(tb, context=6):
-    """Get source from  a traceback object.
-
-    A tuple of two things is returned: a list of lines of context from
-    the source code, and the index of the current line within that list.
-    The optional second argument specifies the number of lines of context
-    to return, which are centered around the current line.
-
-    .. Note ::
-       This is adapted from inspect.py in the python 2.4 standard library, 
-       since a bug in the 2.3 version of inspect prevents it from correctly
-       locating source lines in a traceback frame.
-    """
-    
-    lineno = tb.tb_lineno
-    frame = tb.tb_frame
-
-    if context > 0:
-        start = lineno - 1 - context//2
-        log.debug("lineno: %s start: %s", lineno, start)
-        
-        try:
-            lines, dummy = inspect.findsource(frame)
-        except IOError:
-            lines, index = [''], 0
-        else:
-            all_lines = lines
-            start = max(start, 1)
-            start = max(0, min(start, len(lines) - context))
-            lines = lines[start:start+context]
-            index = lineno - 1 - start
-            
-            # python 2.5 compat: if previous line ends in a continuation,
-            # decrement start by 1 to match 2.4 behavior                
-            if sys.version_info >= (2, 5) and index > 0:
-                while lines[index-1].strip().endswith('\\'):
-                    start -= 1
-                    lines = all_lines[start:start+context]
-    else:
-        lines, index = [''], 0
-    log.debug("tbsource lines '''%s''' around index %s", lines, index)
-    return (lines, index)    
-
-    
-def find_inspectable_lines(lines, pos):
-    """Find lines in home that are inspectable.
-    
-    Walk back from the err line up to 3 lines, but don't walk back over
-    changes in indent level.
-
-    Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk
-    over changes in indent level (unless part of an extended line)
-    """
-    cnt = re.compile(r'\\[\s\n]*$')
-    df = re.compile(r':[\s\n]*$')
-    ind = re.compile(r'^(\s*)')
-    toinspect = []
-    home = lines[pos]
-    home_indent = ind.match(home).groups()[0]
-    
-    before = lines[max(pos-3, 0):pos]
-    before.reverse()
-    after = lines[pos+1:min(pos+4, len(lines))]
-
-    for line in before:
-        if ind.match(line).groups()[0] == home_indent:
-            toinspect.append(line)
-        else:
-            break
-    toinspect.reverse()
-    toinspect.append(home)
-    home_pos = len(toinspect)-1
-    continued = cnt.search(home)
-    for line in after:
-        if ((continued or ind.match(line).groups()[0] == home_indent)
-            and not df.search(line)):
-            toinspect.append(line)
-            continued = cnt.search(line)
-        else:
-            break
-    log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos)
-    return toinspect, home_pos
-
-
-class Expander:
-    """Simple expression expander. Uses tokenize to find the names and
-    expands any that can be looked up in the frame.
-    """
-    def __init__(self, locals, globals):
-        self.locals = locals
-        self.globals = globals
-        self.lpos = None
-        self.expanded_source = ''
-         
-    def __call__(self, ttype, tok, start, end, line):
-        # TODO
-        # deal with unicode properly
-        
-        # TODO
-        # Dealing with instance members
-        #   always keep the last thing seen  
-        #   if the current token is a dot,
-        #      get ready to getattr(lastthing, this thing) on the
-        #      next call.
-        
-        if self.lpos is not None:
-            if start[1] >= self.lpos:
-                self.expanded_source += ' ' * (start[1]-self.lpos)
-            elif start[1] < self.lpos:
-                # newline, indent correctly
-                self.expanded_source += ' ' * start[1]
-        self.lpos = end[1]
-      
-        if ttype == tokenize.INDENT:
-            pass
-        elif ttype == tokenize.NAME:
-            # Clean this junk up
-            try:
-                val = self.locals[tok]
-                if callable(val):
-                    val = tok
-                else:
-                    val = repr(val)
-            except KeyError:
-                try:
-                    val = self.globals[tok]
-                    if callable(val):
-                        val = tok
-                    else:
-                        val = repr(val)
-
-                except KeyError:
-                    val = tok
-            # FIXME... not sure how to handle things like funcs, classes
-            # FIXME this is broken for some unicode strings
-            self.expanded_source += val
-        else:
-            self.expanded_source += tok
-        # if this is the end of the line and the line ends with
-        # \, then tack a \ and newline onto the output
-        # print line[end[1]:]
-        if re.match(r'\s+\\\n', line[end[1]:]):
-            self.expanded_source += ' \\\n'
diff --git a/lib/spack/external/nose/loader.py b/lib/spack/external/nose/loader.py
deleted file mode 100644
index 3744e54ff6..0000000000
--- a/lib/spack/external/nose/loader.py
+++ /dev/null
@@ -1,623 +0,0 @@
-"""
-Test Loader
------------
-
-nose's test loader implements the same basic functionality as its
-superclass, unittest.TestLoader, but extends it by more liberal
-interpretations of what may be a test and how a test may be named.
-"""
-from __future__ import generators
-
-import logging
-import os
-import sys
-import unittest
-import types
-from inspect import isfunction
-from nose.pyversion import unbound_method, ismethod
-from nose.case import FunctionTestCase, MethodTestCase
-from nose.failure import Failure
-from nose.config import Config
-from nose.importer import Importer, add_path, remove_path
-from nose.selector import defaultSelector, TestAddress
-from nose.util import func_lineno, getpackage, isclass, isgenerator, \
-    ispackage, regex_last_key, resolve_name, transplant_func, \
-    transplant_class, test_address
-from nose.suite import ContextSuiteFactory, ContextList, LazySuite
-from nose.pyversion import sort_list, cmp_to_key
-
-
-log = logging.getLogger(__name__)
-#log.setLevel(logging.DEBUG)
-
-# for efficiency and easier mocking
-op_normpath = os.path.normpath
-op_abspath = os.path.abspath
-op_join = os.path.join
-op_isdir = os.path.isdir
-op_isfile = os.path.isfile
-
-
-__all__ = ['TestLoader', 'defaultTestLoader']
-
-
-class TestLoader(unittest.TestLoader):
-    """Test loader that extends unittest.TestLoader to:
-
-    * Load tests from test-like functions and classes that are not
-      unittest.TestCase subclasses
-    * Find and load test modules in a directory
-    * Support tests that are generators
-    * Support easy extensions of or changes to that behavior through plugins
-    """
-    config = None
-    importer = None
-    workingDir = None
-    selector = None
-    suiteClass = None
-
-    def __init__(self, config=None, importer=None, workingDir=None,
-                 selector=None):
-        """Initialize a test loader.
-
-        Parameters (all optional):
-
-        * config: provide a `nose.config.Config`_ or other config class
-          instance; if not provided a `nose.config.Config`_ with
-          default values is used.
-        * importer: provide an importer instance that implements
-          `importFromPath`. If not provided, a
-          `nose.importer.Importer`_ is used.
-        * workingDir: the directory to which file and module names are
-          relative. If not provided, assumed to be the current working
-          directory.
-        * selector: a selector class or instance. If a class is
-          provided, it will be instantiated with one argument, the
-          current config. If not provided, a `nose.selector.Selector`_
-          is used.
-        """
-        if config is None:
-            config = Config()
-        if importer is None:
-            importer = Importer(config=config)
-        if workingDir is None:
-            workingDir = config.workingDir
-        if selector is None:
-            selector = defaultSelector(config)
-        elif isclass(selector):
-            selector = selector(config)
-        self.config = config
-        self.importer = importer
-        self.workingDir = op_normpath(op_abspath(workingDir))
-        self.selector = selector
-        if config.addPaths:
-            add_path(workingDir, config)
-        self.suiteClass = ContextSuiteFactory(config=config)
-
-        self._visitedPaths = set([])
-
-        unittest.TestLoader.__init__(self)
-
-    def getTestCaseNames(self, testCaseClass):
-        """Override to select with selector, unless
-        config.getTestCaseNamesCompat is True
-        """
-        if self.config.getTestCaseNamesCompat:
-            return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
-
-        def wanted(attr, cls=testCaseClass, sel=self.selector):
-            item = getattr(cls, attr, None)
-            if isfunction(item):
-                item = unbound_method(cls, item)
-            elif not ismethod(item):
-                return False
-            return sel.wantMethod(item)
-
-        cases = filter(wanted, dir(testCaseClass))
-
-        # add runTest if nothing else picked
-        if not cases and hasattr(testCaseClass, 'runTest'):
-            cases = ['runTest']
-        if self.sortTestMethodsUsing:
-            sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
-        return cases
-
-    def _haveVisited(self, path):
-        # For cases where path is None, we always pretend we haven't visited
-        # them.
-        if path is None:
-            return False
-
-        return path in self._visitedPaths
-
-    def _addVisitedPath(self, path):
-        if path is not None:
-            self._visitedPaths.add(path)
-
-    def loadTestsFromDir(self, path):
-        """Load tests from the directory at path. This is a generator
-        -- each suite of tests from a module or other file is yielded
-        and is expected to be executed before the next file is
-        examined.
-        """
-        log.debug("load from dir %s", path)
-        plugins = self.config.plugins
-        plugins.beforeDirectory(path)
-        if self.config.addPaths:
-            paths_added = add_path(path, self.config)
-
-        entries = os.listdir(path)
-        sort_list(entries, regex_last_key(self.config.testMatch))
-        for entry in entries:
-            # this hard-coded initial-dot test will be removed:
-            # http://code.google.com/p/python-nose/issues/detail?id=82
-            if entry.startswith('.'):
-                continue
-            entry_path = op_abspath(op_join(path, entry))
-            is_file = op_isfile(entry_path)
-            wanted = False
-            if is_file:
-                is_dir = False
-                wanted = self.selector.wantFile(entry_path)
-            else:
-                is_dir = op_isdir(entry_path)
-                if is_dir:
-                    # this hard-coded initial-underscore test will be removed:
-                    # http://code.google.com/p/python-nose/issues/detail?id=82
-                    if entry.startswith('_'):
-                        continue
-                    wanted = self.selector.wantDirectory(entry_path)
-            is_package = ispackage(entry_path)
-
-            # Python 3.3 now implements PEP 420: Implicit Namespace Packages.
-            # As a result, it's now possible that parent paths that have a
-            # segment with the same basename as our package ends up
-            # in module.__path__.  So we have to keep track of what we've
-            # visited, and not-revisit them again.
-            if wanted and not self._haveVisited(entry_path):
-                self._addVisitedPath(entry_path)
-                if is_file:
-                    plugins.beforeContext()
-                    if entry.endswith('.py'):
-                        yield self.loadTestsFromName(
-                            entry_path, discovered=True)
-                    else:
-                        yield self.loadTestsFromFile(entry_path)
-                    plugins.afterContext()
-                elif is_package:
-                    # Load the entry as a package: given the full path,
-                    # loadTestsFromName() will figure it out
-                    yield self.loadTestsFromName(
-                        entry_path, discovered=True)
-                else:
-                    # Another test dir in this one: recurse lazily
-                    yield self.suiteClass(
-                        lambda: self.loadTestsFromDir(entry_path))
-        tests = []
-        for test in plugins.loadTestsFromDir(path):
-            tests.append(test)
-        # TODO: is this try/except needed?
-        try:
-            if tests:
-                yield self.suiteClass(tests)
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except:
-            yield self.suiteClass([Failure(*sys.exc_info())])
-
-        # pop paths
-        if self.config.addPaths:
-            for p in paths_added:
-              remove_path(p)
-        plugins.afterDirectory(path)
-
-    def loadTestsFromFile(self, filename):
-        """Load tests from a non-module file. Default is to raise a
-        ValueError; plugins may implement `loadTestsFromFile` to
-        provide a list of tests loaded from the file.
-        """
-        log.debug("Load from non-module file %s", filename)
-        try:
-            tests = [test for test in
-                     self.config.plugins.loadTestsFromFile(filename)]
-            if tests:
-                # Plugins can yield False to indicate that they were
-                # unable to load tests from a file, but it was not an
-                # error -- the file just had no tests to load.
-                tests = filter(None, tests)
-                return self.suiteClass(tests)
-            else:
-                # Nothing was able to even try to load from this file
-                open(filename, 'r').close() # trigger os error
-                raise ValueError("Unable to load tests from file %s"
-                                 % filename)
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except:
-            exc = sys.exc_info()
-            return self.suiteClass(
-                [Failure(exc[0], exc[1], exc[2],
-                         address=(filename, None, None))])
-
-    def loadTestsFromGenerator(self, generator, module):
-        """Lazy-load tests from a generator function. The generator function
-        may yield either:
-
-        * a callable, or
-        * a function name resolvable within the same module
-        """
-        def generate(g=generator, m=module):
-            try:
-                for test in g():
-                    test_func, arg = self.parseGeneratedTest(test)
-                    if not callable(test_func):
-                        test_func = getattr(m, test_func)
-                    yield FunctionTestCase(test_func, arg=arg, descriptor=g)
-            except KeyboardInterrupt:
-                raise
-            except:
-                exc = sys.exc_info()
-                yield Failure(exc[0], exc[1], exc[2],
-                              address=test_address(generator))
-        return self.suiteClass(generate, context=generator, can_split=False)
-
-    def loadTestsFromGeneratorMethod(self, generator, cls):
-        """Lazy-load tests from a generator method.
-
-        This is more complicated than loading from a generator function,
-        since a generator method may yield:
-
-        * a function
-        * a bound or unbound method, or
-        * a method name
-        """
-        # convert the unbound generator method
-        # into a bound method so it can be called below
-        if hasattr(generator, 'im_class'):
-            cls = generator.im_class
-        inst = cls()
-        method = generator.__name__
-        generator = getattr(inst, method)
-
-        def generate(g=generator, c=cls):
-            try:
-                for test in g():
-                    test_func, arg = self.parseGeneratedTest(test)
-                    if not callable(test_func):
-                        test_func = unbound_method(c, getattr(c, test_func))
-                    if ismethod(test_func):
-                        yield MethodTestCase(test_func, arg=arg, descriptor=g)
-                    elif callable(test_func):
-                        # In this case we're forcing the 'MethodTestCase'
-                        # to run the inline function as its test call,
-                        # but using the generator method as the 'method of
-                        # record' (so no need to pass it as the descriptor)
-                        yield MethodTestCase(g, test=test_func, arg=arg)
-                    else:
-                        yield Failure(
-                            TypeError,
-                            "%s is not a callable or method" % test_func)
-            except KeyboardInterrupt:
-                raise
-            except:
-                exc = sys.exc_info()
-                yield Failure(exc[0], exc[1], exc[2],
-                              address=test_address(generator))
-        return self.suiteClass(generate, context=generator, can_split=False)
-
-    def loadTestsFromModule(self, module, path=None, discovered=False):
-        """Load all tests from module and return a suite containing
-        them. If the module has been discovered and is not test-like,
-        the suite will be empty by default, though plugins may add
-        their own tests.
-        """
-        log.debug("Load from module %s", module)
-        tests = []
-        test_classes = []
-        test_funcs = []
-        # For *discovered* modules, we only load tests when the module looks
-        # testlike. For modules we've been directed to load, we always
-        # look for tests. (discovered is set to True by loadTestsFromDir)
-        if not discovered or self.selector.wantModule(module):
-            for item in dir(module):
-                test = getattr(module, item, None)
-                # print "Check %s (%s) in %s" % (item, test, module.__name__)
-                if isclass(test):
-                    if self.selector.wantClass(test):
-                        test_classes.append(test)
-                elif isfunction(test) and self.selector.wantFunction(test):
-                    test_funcs.append(test)
-            sort_list(test_classes, lambda x: x.__name__)
-            sort_list(test_funcs, func_lineno)
-            tests = map(lambda t: self.makeTest(t, parent=module),
-                        test_classes + test_funcs)
-
-        # Now, descend into packages
-        # FIXME can or should this be lazy?
-        # is this syntax 2.2 compatible?
-        module_paths = getattr(module, '__path__', [])
-
-        if path:
-            path = os.path.normcase(os.path.realpath(path))
-
-        for module_path in module_paths:
-            log.debug("Load tests from module path %s?", module_path)
-            log.debug("path: %s os.path.realpath(%s): %s",
-                      path, os.path.normcase(module_path),
-                      os.path.realpath(os.path.normcase(module_path)))
-            if (self.config.traverseNamespace or not path) or \
-                    os.path.realpath(
-                        os.path.normcase(module_path)).startswith(path):
-                # Egg files can be on sys.path, so make sure the path is a
-                # directory before trying to load from it.
-                if os.path.isdir(module_path):
-                    tests.extend(self.loadTestsFromDir(module_path))
-
-        for test in self.config.plugins.loadTestsFromModule(module, path):
-            tests.append(test)
-
-        return self.suiteClass(ContextList(tests, context=module))
-
-    def loadTestsFromName(self, name, module=None, discovered=False):
-        """Load tests from the entity with the given name.
-
-        The name may indicate a file, directory, module, or any object
-        within a module. See `nose.util.split_test_name` for details on
-        test name parsing.
-        """
-        # FIXME refactor this method into little bites?
-        log.debug("load from %s (%s)", name, module)
-
-        suite = self.suiteClass
-
-        # give plugins first crack
-        plug_tests = self.config.plugins.loadTestsFromName(name, module)
-        if plug_tests:
-            return suite(plug_tests)
-
-        addr = TestAddress(name, workingDir=self.workingDir)
-        if module:
-            # Two cases:
-            #  name is class.foo
-            #    The addr will be incorrect, since it thinks class.foo is
-            #    a dotted module name. It's actually a dotted attribute
-            #    name. In this case we want to use the full submitted
-            #    name as the name to load from the module.
-            #  name is module:class.foo
-            #    The addr will be correct. The part we want is the part after
-            #    the :, which is in addr.call.
-            if addr.call:
-                name = addr.call
-            parent, obj = self.resolve(name, module)
-            if (isclass(parent)
-                and getattr(parent, '__module__', None) != module.__name__
-                and not isinstance(obj, Failure)):
-                parent = transplant_class(parent, module.__name__)
-                obj = getattr(parent, obj.__name__)
-            log.debug("parent %s obj %s module %s", parent, obj, module)
-            if isinstance(obj, Failure):
-                return suite([obj])
-            else:
-                return suite(ContextList([self.makeTest(obj, parent)],
-                                         context=parent))
-        else:
-            if addr.module:
-                try:
-                    if addr.filename is None:
-                        module = resolve_name(addr.module)
-                    else:
-                        self.config.plugins.beforeImport(
-                            addr.filename, addr.module)
-                        # FIXME: to support module.name names,
-                        # do what resolve-name does and keep trying to
-                        # import, popping tail of module into addr.call,
-                        # until we either get an import or run out of
-                        # module parts
-                        try:
-                            module = self.importer.importFromPath(
-                                addr.filename, addr.module)
-                        finally:
-                            self.config.plugins.afterImport(
-                                addr.filename, addr.module)
-                except (KeyboardInterrupt, SystemExit):
-                    raise
-                except:
-                    exc = sys.exc_info()
-                    return suite([Failure(exc[0], exc[1], exc[2],
-                                          address=addr.totuple())])
-                if addr.call:
-                    return self.loadTestsFromName(addr.call, module)
-                else:
-                    return self.loadTestsFromModule(
-                        module, addr.filename,
-                        discovered=discovered)
-            elif addr.filename:
-                path = addr.filename
-                if addr.call:
-                    package = getpackage(path)
-                    if package is None:
-                        return suite([
-                            Failure(ValueError,
-                                    "Can't find callable %s in file %s: "
-                                    "file is not a python module" %
-                                    (addr.call, path),
-                                    address=addr.totuple())])
-                    return self.loadTestsFromName(addr.call, module=package)
-                else:
-                    if op_isdir(path):
-                        # In this case we *can* be lazy since we know
-                        # that each module in the dir will be fully
-                        # loaded before its tests are executed; we
-                        # also know that we're not going to be asked
-                        # to load from . and ./some_module.py *as part
-                        # of this named test load*
-                        return LazySuite(
-                            lambda: self.loadTestsFromDir(path))
-                    elif op_isfile(path):
-                        return self.loadTestsFromFile(path)
-                    else:
-                        return suite([
-                                Failure(OSError, "No such file %s" % path,
-                                        address=addr.totuple())])
-            else:
-                # just a function? what to do? I think it can only be
-                # handled when module is not None
-                return suite([
-                    Failure(ValueError, "Unresolvable test name %s" % name,
-                            address=addr.totuple())])
-
-    def loadTestsFromNames(self, names, module=None):
-        """Load tests from all names, returning a suite containing all
-        tests.
-        """
-        plug_res = self.config.plugins.loadTestsFromNames(names, module)
-        if plug_res:
-            suite, names = plug_res
-            if suite:
-                return self.suiteClass([
-                    self.suiteClass(suite),
-                    unittest.TestLoader.loadTestsFromNames(self, names, module)
-                    ])
-        return unittest.TestLoader.loadTestsFromNames(self, names, module)
-
-    def loadTestsFromTestCase(self, testCaseClass):
-        """Load tests from a unittest.TestCase subclass.
-        """
-        cases = []
-        plugins = self.config.plugins
-        for case in plugins.loadTestsFromTestCase(testCaseClass):
-            cases.append(case)
-        # For efficiency in the most common case, just call and return from
-        # super. This avoids having to extract cases and rebuild a context
-        # suite when there are no plugin-contributed cases.
-        if not cases:
-            return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
-        cases.extend(
-            [case for case in
-             super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
-        return self.suiteClass(cases)
-
-    def loadTestsFromTestClass(self, cls):
-        """Load tests from a test class that is *not* a unittest.TestCase
-        subclass.
-
-        In this case, we can't depend on the class's `__init__` taking method
-        name arguments, so we have to compose a MethodTestCase for each
-        method in the class that looks testlike.
-        """
-        def wanted(attr, cls=cls, sel=self.selector):
-            item = getattr(cls, attr, None)
-            if isfunction(item):
-                item = unbound_method(cls, item)
-            elif not ismethod(item):
-                return False
-            return sel.wantMethod(item)
-        cases = [self.makeTest(getattr(cls, case), cls)
-                 for case in filter(wanted, dir(cls))]
-        for test in self.config.plugins.loadTestsFromTestClass(cls):
-            cases.append(test)
-        return self.suiteClass(ContextList(cases, context=cls))
-
-    def makeTest(self, obj, parent=None):
-        try:
-            return self._makeTest(obj, parent)
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except:
-            exc = sys.exc_info()
-            try:
-                addr = test_address(obj)
-            except KeyboardInterrupt:
-                raise
-            except:
-                addr = None
-            return Failure(exc[0], exc[1], exc[2], address=addr)
-
-    def _makeTest(self, obj, parent=None):
-        """Given a test object and its parent, return a test case
-        or test suite.
-        """
-        plug_tests = []
-        try:
-            addr = test_address(obj)
-        except KeyboardInterrupt:
-            raise
-        except:
-            addr = None
-        for test in self.config.plugins.makeTest(obj, parent):
-            plug_tests.append(test)
-        # TODO: is this try/except needed?
-        try:
-            if plug_tests:
-                return self.suiteClass(plug_tests)
-        except (KeyboardInterrupt, SystemExit):
-            raise
-        except:
-            exc = sys.exc_info()
-            return Failure(exc[0], exc[1], exc[2], address=addr)
-
-        if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
-	    # This is a Python 3.x 'unbound method'.  Wrap it with its
-	    # associated class..
-            obj = unbound_method(parent, obj)
-
-        if isinstance(obj, unittest.TestCase):
-            return obj
-        elif isclass(obj):
-            if parent and obj.__module__ != parent.__name__:
-                obj = transplant_class(obj, parent.__name__)
-            if issubclass(obj, unittest.TestCase):
-                return self.loadTestsFromTestCase(obj)
-            else:
-                return self.loadTestsFromTestClass(obj)
-        elif ismethod(obj):
-            if parent is None:
-                parent = obj.__class__
-            if issubclass(parent, unittest.TestCase):
-                return parent(obj.__name__)
-            else:
-                if isgenerator(obj):
-                    return self.loadTestsFromGeneratorMethod(obj, parent)
-                else:
-                    return MethodTestCase(obj)
-        elif isfunction(obj):
-            if parent and obj.__module__ != parent.__name__:
-                obj = transplant_func(obj, parent.__name__)
-            if isgenerator(obj):
-                return self.loadTestsFromGenerator(obj, parent)
-            else:
-                return FunctionTestCase(obj)
-        else:
-            return Failure(TypeError,
-                           "Can't make a test from %s" % obj,
-                           address=addr)
-
-    def resolve(self, name, module):
-        """Resolve name within module
-        """
-        obj = module
-        parts = name.split('.')
-        for part in parts:
-            parent, obj = obj, getattr(obj, part, None)
-        if obj is None:
-            # no such test
-            obj = Failure(ValueError, "No such test %s" % name)
-        return parent, obj
-
-    def parseGeneratedTest(self, test):
-        """Given the yield value of a test generator, return a func and args.
-
-        This is used in the two loadTestsFromGenerator* methods.
-
-        """
-        if not isinstance(test, tuple):         # yield test
-            test_func, arg = (test, tuple())
-        elif len(test) == 1:                    # yield (test,)
-            test_func, arg = (test[0], tuple())
-        else:                                   # yield test, foo, bar, ...
-            assert len(test) > 1 # sanity check
-            test_func, arg = (test[0], test[1:])
-        return test_func, arg
-
-defaultTestLoader = TestLoader
-
diff --git a/lib/spack/external/nose/plugins/__init__.py b/lib/spack/external/nose/plugins/__init__.py
deleted file mode 100644
index 08ee8f3230..0000000000
--- a/lib/spack/external/nose/plugins/__init__.py
+++ /dev/null
@@ -1,190 +0,0 @@
-"""
-Writing Plugins
----------------
-
-nose supports plugins for test collection, selection, observation and
-reporting. There are two basic rules for plugins:
-
-* Plugin classes should subclass :class:`nose.plugins.Plugin`.
-
-* Plugins may implement any of the methods described in the class
-  :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
-  this class is for documentary purposes only; plugins may not subclass
-  IPluginInterface.
-
-Hello World
-===========
-
-Here's a basic plugin.  It doesn't do much so read on for more ideas or dive
-into the :doc:`IPluginInterface <interface>` to see all available hooks.
-
-.. code-block:: python
-
-    import logging
-    import os
-
-    from nose.plugins import Plugin
-
-    log = logging.getLogger('nose.plugins.helloworld')
-
-    class HelloWorld(Plugin):
-        name = 'helloworld'
-
-        def options(self, parser, env=os.environ):
-            super(HelloWorld, self).options(parser, env=env)
-
-        def configure(self, options, conf):
-            super(HelloWorld, self).configure(options, conf)
-            if not self.enabled:
-                return
-
-        def finalize(self, result):
-            log.info('Hello pluginized world!')
-
-Registering
-===========
-
-.. Note::
-  Important note: the following applies only to the default
-  plugin manager. Other plugin managers may use different means to
-  locate and load plugins.
-
-For nose to find a plugin, it must be part of a package that uses
-setuptools_, and the plugin must be included in the entry points defined
-in the setup.py for the package:
-
-.. code-block:: python
-
-    setup(name='Some plugin',
-        # ...
-        entry_points = {
-            'nose.plugins.0.10': [
-                'someplugin = someplugin:SomePlugin'
-                ]
-            },
-        # ...
-        )
-
-Once the package is installed with install or develop, nose will be able
-to load the plugin.
-
-.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
-
-Registering a plugin without setuptools
-=======================================
-
-It is currently possible to register a plugin programmatically by
-creating a custom nose runner like this :
-
-.. code-block:: python
-
-    import nose
-    from yourplugin import YourPlugin
-
-    if __name__ == '__main__':
-        nose.main(addplugins=[YourPlugin()])
-
-Defining options
-================
-
-All plugins must implement the methods ``options(self, parser, env)``
-and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
-that want the standard options should call the superclass methods.
-
-nose uses optparse.OptionParser from the standard library to parse
-arguments. A plugin's ``options()`` method receives a parser
-instance. It's good form for a plugin to use that instance only to add
-additional arguments that take only long arguments (--like-this). Most
-of nose's built-in arguments get their default value from an environment
-variable.
-
-A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
-object, as well as the current config object. Plugins should configure their
-behavior based on the user-selected settings, and may raise exceptions
-if the configured behavior is nonsensical.
-
-Logging
-=======
-
-nose uses the logging classes from the standard library. To enable users
-to view debug messages easily, plugins should use ``logging.getLogger()`` to
-acquire a logger in the ``nose.plugins`` namespace.
-
-Recipes
-=======
-
-* Writing a plugin that monitors or controls test result output
-
-  Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
-  results. If you also want to monitor output, implement
-  ``setOutputStream`` and keep a reference to the output stream. If you
-  want to prevent the builtin ``TextTestResult`` output, implement
-  ``setOutputSteam`` and *return a dummy stream*. The default output will go
-  to the dummy stream, while you send your desired output to the real stream.
-
-  Example: `examples/html_plugin/htmlplug.py`_
-
-* Writing a plugin that handles exceptions
-
-  Subclass :doc:`ErrorClassPlugin <errorclasses>`.
-
-  Examples: :doc:`nose.plugins.deprecated <deprecated>`,
-  :doc:`nose.plugins.skip <skip>`
-
-* Writing a plugin that adds detail to error reports
-
-  Implement ``formatError`` and/or ``formatFailure``. The error tuple
-  you return (error class, error message, traceback) will replace the
-  original error tuple.
-
-  Examples: :doc:`nose.plugins.capture <capture>`,
-  :doc:`nose.plugins.failuredetail <failuredetail>`
-
-* Writing a plugin that loads tests from files other than python modules
-
-  Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
-  return True for files that you want to examine for tests. In
-  ``loadTestsFromFile``, for those files, return an iterable
-  containing TestCases (or yield them as you find them;
-  ``loadTestsFromFile`` may also be a generator).
-
-  Example: :doc:`nose.plugins.doctests <doctests>`
-
-* Writing a plugin that prints a report
-
-  Implement ``begin`` if you need to perform setup before testing
-  begins. Implement ``report`` and output your report to the provided stream.
-
-  Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
-
-* Writing a plugin that selects or rejects tests
-
-  Implement any or all ``want*``  methods. Return False to reject the test
-  candidate, True to accept it -- which  means that the test candidate
-  will pass through the rest of the system, so you must be prepared to
-  load tests from it if tests can't be loaded by the core loader or
-  another plugin -- and None if you don't care.
-
-  Examples: :doc:`nose.plugins.attrib <attrib>`,
-  :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
-
-
-More Examples
-=============
-
-See any builtin plugin or example plugin in the examples_ directory in
-the nose source distribution. There is a list of third-party plugins
-`on jottit`_.
-
-.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
-.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
-.. _on jottit: http://nose-plugins.jottit.com/
-
-"""
-from nose.plugins.base import Plugin
-from nose.plugins.manager import *
-from nose.plugins.plugintest import PluginTester
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/allmodules.py b/lib/spack/external/nose/plugins/allmodules.py
deleted file mode 100644
index 1ccd7773a7..0000000000
--- a/lib/spack/external/nose/plugins/allmodules.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""Use the AllModules plugin by passing ``--all-modules`` or setting the
-NOSE_ALL_MODULES environment variable to enable collection and execution of
-tests in all python modules. Normal nose behavior is to look for tests only in
-modules that match testMatch.
-
-More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
-
-.. warning ::
-
-   This plugin can have surprising interactions with plugins that load tests
-   from what nose normally considers non-test modules, such as
-   the :doc:`doctest plugin <doctests>`. This is because any given
-   object in a module can't be loaded both by a plugin and the normal nose
-   :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
-   or classes in non-test modules that look like tests but aren't, you will
-   likely see errors as nose attempts to run them as tests.
-
-"""
-
-import os
-from nose.plugins.base import Plugin
-
-class AllModules(Plugin):
-    """Collect tests from all python modules.
-    """
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        env_opt = 'NOSE_ALL_MODULES'
-        parser.add_option('--all-modules',
-                          action="store_true",
-                          dest=self.enableOpt,
-                          default=env.get(env_opt),
-                          help="Enable plugin %s: %s [%s]" %
-                          (self.__class__.__name__, self.help(), env_opt))
-
-    def wantFile(self, file):
-        """Override to return True for all files ending with .py"""
-        # always want .py files
-        if file.endswith('.py'):
-            return True
-
-    def wantModule(self, module):
-        """Override return True for all modules"""
-        return True
diff --git a/lib/spack/external/nose/plugins/attrib.py b/lib/spack/external/nose/plugins/attrib.py
deleted file mode 100644
index 3d4422a23a..0000000000
--- a/lib/spack/external/nose/plugins/attrib.py
+++ /dev/null
@@ -1,286 +0,0 @@
-"""Attribute selector plugin.
-
-Oftentimes when testing you will want to select tests based on
-criteria rather then simply by filename. For example, you might want
-to run all tests except for the slow ones. You can do this with the
-Attribute selector plugin by setting attributes on your test methods.
-Here is an example:
-
-.. code-block:: python
-
-    def test_big_download():
-        import urllib
-        # commence slowness...
-
-    test_big_download.slow = 1
-
-Once you've assigned an attribute ``slow = 1`` you can exclude that
-test and all other tests having the slow attribute by running ::
-
-    $ nosetests -a '!slow'
-
-There is also a decorator available for you that will set attributes.
-Here's how to set ``slow=1`` like above with the decorator:
-
-.. code-block:: python
-
-    from nose.plugins.attrib import attr
-    @attr('slow')
-    def test_big_download():
-        import urllib
-        # commence slowness...
-
-And here's how to set an attribute with a specific value:
-
-.. code-block:: python
-
-    from nose.plugins.attrib import attr
-    @attr(speed='slow')
-    def test_big_download():
-        import urllib
-        # commence slowness...
-
-This test could be run with ::
-
-    $ nosetests -a speed=slow
-
-In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
-on all its test methods at once.  For example:
-
-.. code-block:: python
-
-    from nose.plugins.attrib import attr
-    @attr(speed='slow')
-    class MyTestCase:
-        def test_long_integration(self):
-            pass
-        def test_end_to_end_something(self):
-            pass
-
-Below is a reference to the different syntaxes available.
-
-Simple syntax
--------------
-
-Examples of using the ``-a`` and ``--attr`` options:
-
-* ``nosetests -a status=stable``
-   Only runs tests with attribute "status" having value "stable"
-
-* ``nosetests -a priority=2,status=stable``
-   Runs tests having both attributes and values
-
-* ``nosetests -a priority=2 -a slow``
-   Runs tests that match either attribute
-
-* ``nosetests -a tags=http``
-   If a test's ``tags`` attribute was a list and it contained the value
-   ``http`` then it would be run
-
-* ``nosetests -a slow``
-   Runs tests with the attribute ``slow`` if its value does not equal False
-   (False, [], "", etc...)
-
-* ``nosetests -a '!slow'``
-   Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
-   attribute that is equal to False
-   **NOTE**:
-   if your shell (like bash) interprets '!' as a special character make sure to
-   put single quotes around it.
-
-Expression Evaluation
----------------------
-
-Examples using the ``-A`` and ``--eval-attr`` options:
-
-* ``nosetests -A "not slow"``
-  Evaluates the Python expression "not slow" and runs the test if True
-
-* ``nosetests -A "(priority > 5) and not slow"``
-  Evaluates a complex Python expression and runs the test if True
-
-"""
-import inspect
-import logging
-import os
-import sys
-from inspect import isfunction
-from nose.plugins.base import Plugin
-from nose.util import tolist
-
-log = logging.getLogger('nose.plugins.attrib')
-compat_24 = sys.version_info >= (2, 4)
-
-def attr(*args, **kwargs):
-    """Decorator that adds attributes to classes or functions
-    for use with the Attribute (-a) plugin.
-    """
-    def wrap_ob(ob):
-        for name in args:
-            setattr(ob, name, True)
-        for name, value in kwargs.iteritems():
-            setattr(ob, name, value)
-        return ob
-    return wrap_ob
-
-def get_method_attr(method, cls, attr_name, default = False):
-    """Look up an attribute on a method/ function. 
-    If the attribute isn't found there, looking it up in the
-    method's class, if any.
-    """
-    Missing = object()
-    value = getattr(method, attr_name, Missing)
-    if value is Missing and cls is not None:
-        value = getattr(cls, attr_name, Missing)
-    if value is Missing:
-        return default
-    return value
-
-
-class ContextHelper:
-    """Object that can act as context dictionary for eval and looks up
-    names as attributes on a method/ function and its class. 
-    """
-    def __init__(self, method, cls):
-        self.method = method
-        self.cls = cls
-
-    def __getitem__(self, name):
-        return get_method_attr(self.method, self.cls, name)
-
-
-class AttributeSelector(Plugin):
-    """Selects test cases to be run based on their attributes.
-    """
-
-    def __init__(self):
-        Plugin.__init__(self)
-        self.attribs = []
-
-    def options(self, parser, env):
-        """Register command line options"""
-        parser.add_option("-a", "--attr",
-                          dest="attr", action="append",
-                          default=env.get('NOSE_ATTR'),
-                          metavar="ATTR",
-                          help="Run only tests that have attributes "
-                          "specified by ATTR [NOSE_ATTR]")
-        # disable in < 2.4: eval can't take needed args
-        if compat_24:
-            parser.add_option("-A", "--eval-attr",
-                              dest="eval_attr", metavar="EXPR", action="append",
-                              default=env.get('NOSE_EVAL_ATTR'),
-                              help="Run only tests for whose attributes "
-                              "the Python expression EXPR evaluates "
-                              "to True [NOSE_EVAL_ATTR]")
-
-    def configure(self, options, config):
-        """Configure the plugin and system, based on selected options.
-
-        attr and eval_attr may each be lists.
-
-        self.attribs will be a list of lists of tuples. In that list, each
-        list is a group of attributes, all of which must match for the rule to
-        match.
-        """
-        self.attribs = []
-
-        # handle python eval-expression parameter
-        if compat_24 and options.eval_attr:
-            eval_attr = tolist(options.eval_attr)
-            for attr in eval_attr:
-                # "<python expression>"
-                # -> eval(expr) in attribute context must be True
-                def eval_in_context(expr, obj, cls):
-                    return eval(expr, None, ContextHelper(obj, cls))
-                self.attribs.append([(attr, eval_in_context)])
-
-        # attribute requirements are a comma separated list of
-        # 'key=value' pairs
-        if options.attr:
-            std_attr = tolist(options.attr)
-            for attr in std_attr:
-                # all attributes within an attribute group must match
-                attr_group = []
-                for attrib in attr.strip().split(","):
-                    # don't die on trailing comma
-                    if not attrib:
-                        continue
-                    items = attrib.split("=", 1)
-                    if len(items) > 1:
-                        # "name=value"
-                        # -> 'str(obj.name) == value' must be True
-                        key, value = items
-                    else:
-                        key = items[0]
-                        if key[0] == "!":
-                            # "!name"
-                            # 'bool(obj.name)' must be False
-                            key = key[1:]
-                            value = False
-                        else:
-                            # "name"
-                            # -> 'bool(obj.name)' must be True
-                            value = True
-                    attr_group.append((key, value))
-                self.attribs.append(attr_group)
-        if self.attribs:
-            self.enabled = True
-
-    def validateAttrib(self, method, cls = None):
-        """Verify whether a method has the required attributes
-        The method is considered a match if it matches all attributes
-        for any attribute group.
-        ."""
-        # TODO: is there a need for case-sensitive value comparison?
-        any = False
-        for group in self.attribs:
-            match = True
-            for key, value in group:
-                attr = get_method_attr(method, cls, key)
-                if callable(value):
-                    if not value(key, method, cls):
-                        match = False
-                        break
-                elif value is True:
-                    # value must exist and be True
-                    if not bool(attr):
-                        match = False
-                        break
-                elif value is False:
-                    # value must not exist or be False
-                    if bool(attr):
-                        match = False
-                        break
-                elif type(attr) in (list, tuple):
-                    # value must be found in the list attribute
-                    if not str(value).lower() in [str(x).lower()
-                                                  for x in attr]:
-                        match = False
-                        break
-                else:
-                    # value must match, convert to string and compare
-                    if (value != attr
-                        and str(value).lower() != str(attr).lower()):
-                        match = False
-                        break
-            any = any or match
-        if any:
-            # not True because we don't want to FORCE the selection of the
-            # item, only say that it is acceptable
-            return None
-        return False
-
-    def wantFunction(self, function):
-        """Accept the function if its attributes match.
-        """
-        return self.validateAttrib(function)
-
-    def wantMethod(self, method):
-        """Accept the method if its attributes match.
-        """
-        try:
-            cls = method.im_class
-        except AttributeError:
-            return False
-        return self.validateAttrib(method, cls)
diff --git a/lib/spack/external/nose/plugins/base.py b/lib/spack/external/nose/plugins/base.py
deleted file mode 100644
index f09beb696f..0000000000
--- a/lib/spack/external/nose/plugins/base.py
+++ /dev/null
@@ -1,725 +0,0 @@
-import os
-import textwrap
-from optparse import OptionConflictError
-from warnings import warn
-from nose.util import tolist
-
-class Plugin(object):
-    """Base class for nose plugins. It's recommended but not *necessary* to
-    subclass this class to create a plugin, but all plugins *must* implement
-    `options(self, parser, env)` and `configure(self, options, conf)`, and
-    must have the attributes `enabled`, `name` and `score`.  The `name`
-    attribute may contain hyphens ('-').
-
-    Plugins should not be enabled by default.
-
-    Subclassing Plugin (and calling the superclass methods in
-    __init__, configure, and options, if you override them) will give
-    your plugin some friendly default behavior:
-
-    * A --with-$name option will be added to the command line interface
-      to enable the plugin, and a corresponding environment variable
-      will be used as the default value. The plugin class's docstring
-      will be used as the help for this option.
-    * The plugin will not be enabled unless this option is selected by
-      the user.
-    """
-    can_configure = False
-    enabled = False
-    enableOpt = None
-    name = None
-    score = 100
-
-    def __init__(self):
-        if self.name is None:
-            self.name = self.__class__.__name__.lower()
-        if self.enableOpt is None:
-            self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
-
-    def addOptions(self, parser, env=None):
-        """Add command-line options for this plugin.
-
-        The base plugin class adds --with-$name by default, used to enable the
-        plugin.
-
-        .. warning :: Don't implement addOptions unless you want to override
-                      all default option handling behavior, including
-                      warnings for conflicting options. Implement
-                      :meth:`options
-                      <nose.plugins.base.IPluginInterface.options>`
-                      instead.
-        """
-        self.add_options(parser, env)
-
-    def add_options(self, parser, env=None):
-        """Non-camel-case version of func name for backwards compatibility.
-
-        .. warning ::
-
-           DEPRECATED: Do not use this method,
-           use :meth:`options <nose.plugins.base.IPluginInterface.options>`
-           instead.
-
-        """
-        # FIXME raise deprecation warning if wasn't called by wrapper
-        if env is None:
-            env = os.environ
-        try:
-            self.options(parser, env)
-            self.can_configure = True
-        except OptionConflictError, e:
-            warn("Plugin %s has conflicting option string: %s and will "
-                 "be disabled" % (self, e), RuntimeWarning)
-            self.enabled = False
-            self.can_configure = False
-
-    def options(self, parser, env):
-        """Register commandline options.
-
-        Implement this method for normal options behavior with protection from
-        OptionConflictErrors. If you override this method and want the default
-        --with-$name option to be registered, be sure to call super().
-        """
-        env_opt = 'NOSE_WITH_%s' % self.name.upper()
-        env_opt = env_opt.replace('-', '_')
-        parser.add_option("--with-%s" % self.name,
-                          action="store_true",
-                          dest=self.enableOpt,
-                          default=env.get(env_opt),
-                          help="Enable plugin %s: %s [%s]" %
-                          (self.__class__.__name__, self.help(), env_opt))
-
-    def configure(self, options, conf):
-        """Configure the plugin and system, based on selected options.
-
-        The base plugin class sets the plugin to enabled if the enable option
-        for the plugin (self.enableOpt) is true.
-        """
-        if not self.can_configure:
-            return
-        self.conf = conf
-        if hasattr(options, self.enableOpt):
-            self.enabled = getattr(options, self.enableOpt)
-
-    def help(self):
-        """Return help for this plugin. This will be output as the help
-        section of the --with-$name option that enables the plugin.
-        """
-        if self.__class__.__doc__:
-            # doc sections are often indented; compress the spaces
-            return textwrap.dedent(self.__class__.__doc__)
-        return "(no help available)"
-
-    # Compatiblity shim
-    def tolist(self, val):
-        warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
-             DeprecationWarning)
-        return tolist(val)
-
-
-class IPluginInterface(object):
-    """
-    IPluginInterface describes the plugin API. Do not subclass or use this
-    class directly.
-    """
-    def __new__(cls, *arg, **kw):
-        raise TypeError("IPluginInterface class is for documentation only")
-
-    def addOptions(self, parser, env):
-        """Called to allow plugin to register command-line options with the
-        parser. DO NOT return a value from this method unless you want to stop
-        all other plugins from setting their options.
-
-        .. warning ::
-
-           DEPRECATED -- implement
-           :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
-        """
-        pass
-    add_options = addOptions
-    add_options.deprecated = True
-
-    def addDeprecated(self, test):
-        """Called when a deprecated test is seen. DO NOT return a value
-        unless you want to stop other plugins from seeing the deprecated
-        test.
-
-        .. warning :: DEPRECATED -- check error class in addError instead
-        """
-        pass
-    addDeprecated.deprecated = True
-
-    def addError(self, test, err):
-        """Called when a test raises an uncaught exception. DO NOT return a
-        value unless you want to stop other plugins from seeing that the
-        test has raised an error.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`            
-        :param err: sys.exc_info() tuple
-        :type err: 3-tuple
-        """
-        pass
-    addError.changed = True
-
-    def addFailure(self, test, err):
-        """Called when a test fails. DO NOT return a value unless you
-        want to stop other plugins from seeing that the test has failed.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        :param err: 3-tuple
-        :type err: sys.exc_info() tuple
-        """
-        pass
-    addFailure.changed = True
-
-    def addSkip(self, test):
-        """Called when a test is skipped. DO NOT return a value unless
-        you want to stop other plugins from seeing the skipped test.
-
-        .. warning:: DEPRECATED -- check error class in addError instead
-        """
-        pass
-    addSkip.deprecated = True
-
-    def addSuccess(self, test):
-        """Called when a test passes. DO NOT return a value unless you
-        want to stop other plugins from seeing the passing test.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-    addSuccess.changed = True
-
-    def afterContext(self):
-        """Called after a context (generally a module) has been
-        lazy-loaded, imported, setup, had its tests loaded and
-        executed, and torn down.
-        """
-        pass
-    afterContext._new = True
-
-    def afterDirectory(self, path):
-        """Called after all tests have been loaded from directory at path
-        and run.
-
-        :param path: the directory that has finished processing
-        :type path: string
-        """
-        pass
-    afterDirectory._new = True
-
-    def afterImport(self, filename, module):
-        """Called after module is imported from filename. afterImport
-        is called even if the import failed.
-
-        :param filename: The file that was loaded
-        :type filename: string
-        :param module: The name of the module
-        :type module: string
-        """
-        pass
-    afterImport._new = True
-
-    def afterTest(self, test):
-        """Called after the test has been run and the result recorded
-        (after stopTest).
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-    afterTest._new = True
-
-    def beforeContext(self):
-        """Called before a context (generally a module) is
-        examined. Because the context is not yet loaded, plugins don't
-        get to know what the context is; so any context operations
-        should use a stack that is pushed in `beforeContext` and popped
-        in `afterContext` to ensure they operate symmetrically.
-
-        `beforeContext` and `afterContext` are mainly useful for tracking
-        and restoring global state around possible changes from within a
-        context, whatever the context may be. If you need to operate on
-        contexts themselves, see `startContext` and `stopContext`, which
-        are passed the context in question, but are called after
-        it has been loaded (imported in the module case).
-        """
-        pass
-    beforeContext._new = True
-
-    def beforeDirectory(self, path):
-        """Called before tests are loaded from directory at path.
-
-        :param path: the directory that is about to be processed
-        """
-        pass
-    beforeDirectory._new = True
-
-    def beforeImport(self, filename, module):
-        """Called before module is imported from filename.
-
-        :param filename: The file that will be loaded
-        :param module: The name of the module found in file
-        :type module: string
-        """
-    beforeImport._new = True
-
-    def beforeTest(self, test):
-        """Called before the test is run (before startTest).
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-    beforeTest._new = True
- 
-    def begin(self):
-        """Called before any tests are collected or run. Use this to
-        perform any setup needed before testing begins.
-        """
-        pass
-
-    def configure(self, options, conf):
-        """Called after the command line has been parsed, with the
-        parsed options and the config container. Here, implement any
-        config storage or changes to state or operation that are set
-        by command line options.
-
-        DO NOT return a value from this method unless you want to
-        stop all other plugins from being configured.
-        """
-        pass
-
-    def finalize(self, result):
-        """Called after all report output, including output from all
-        plugins, has been sent to the stream. Use this to print final
-        test results or perform final cleanup. Return None to allow
-        other plugins to continue printing, or any other value to stop
-        them.
-
-        :param result: test result object
-        
-        .. Note:: When tests are run under a test runner other than
-           :class:`nose.core.TextTestRunner`, such as
-           via ``python setup.py test``, this method may be called
-           **before** the default report output is sent.
-        """
-        pass
-
-    def describeTest(self, test):
-        """Return a test description.
-
-        Called by :meth:`nose.case.Test.shortDescription`.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-    describeTest._new = True
-
-    def formatError(self, test, err):
-        """Called in result.addError, before plugin.addError. If you
-        want to replace or modify the error tuple, return a new error
-        tuple, otherwise return err, the original error tuple.
-        
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        :param err: sys.exc_info() tuple
-        :type err: 3-tuple
-        """
-        pass
-    formatError._new = True
-    formatError.chainable = True
-    # test arg is not chainable
-    formatError.static_args = (True, False)
-
-    def formatFailure(self, test, err):
-        """Called in result.addFailure, before plugin.addFailure. If you
-        want to replace or modify the error tuple, return a new error
-        tuple, otherwise return err, the original error tuple.
-        
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        :param err: sys.exc_info() tuple
-        :type err: 3-tuple
-        """
-        pass
-    formatFailure._new = True
-    formatFailure.chainable = True
-    # test arg is not chainable
-    formatFailure.static_args = (True, False)
-
-    def handleError(self, test, err):
-        """Called on addError. To handle the error yourself and prevent normal
-        error processing, return a true value.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        :param err: sys.exc_info() tuple
-        :type err: 3-tuple
-        """
-        pass
-    handleError._new = True
-
-    def handleFailure(self, test, err):
-        """Called on addFailure. To handle the failure yourself and
-        prevent normal failure processing, return a true value.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        :param err: sys.exc_info() tuple
-        :type err: 3-tuple
-        """
-        pass
-    handleFailure._new = True
-
-    def loadTestsFromDir(self, path):
-        """Return iterable of tests from a directory. May be a
-        generator.  Each item returned must be a runnable
-        unittest.TestCase (or subclass) instance or suite instance.
-        Return None if your plugin cannot collect any tests from
-        directory.
-
-        :param  path: The path to the directory.
-        """
-        pass
-    loadTestsFromDir.generative = True
-    loadTestsFromDir._new = True
-    
-    def loadTestsFromModule(self, module, path=None):
-        """Return iterable of tests in a module. May be a
-        generator. Each item returned must be a runnable
-        unittest.TestCase (or subclass) instance.
-        Return None if your plugin cannot
-        collect any tests from module.
-
-        :param module: The module object
-        :type module: python module
-        :param path: the path of the module to search, to distinguish from
-            namespace package modules
-
-            .. note::
-
-               NEW. The ``path`` parameter will only be passed by nose 0.11
-               or above.
-        """
-        pass
-    loadTestsFromModule.generative = True
-
-    def loadTestsFromName(self, name, module=None, importPath=None):
-        """Return tests in this file or module. Return None if you are not able
-        to load any tests, or an iterable if you are. May be a
-        generator.
-
-        :param name: The test name. May be a file or module name plus a test
-            callable. Use split_test_name to split into parts. Or it might
-            be some crazy name of your own devising, in which case, do
-            whatever you want.
-        :param module: Module from which the name is to be loaded
-        :param importPath: Path from which file (must be a python module) was
-            found
-
-            .. warning:: DEPRECATED: this argument will NOT be passed.
-        """
-        pass
-    loadTestsFromName.generative = True
-
-    def loadTestsFromNames(self, names, module=None):
-        """Return a tuple of (tests loaded, remaining names). Return
-        None if you are not able to load any tests. Multiple plugins
-        may implement loadTestsFromNames; the remaining name list from
-        each will be passed to the next as input.
-
-        :param names: List of test names.
-        :type names: iterable
-        :param module: Module from which the names are to be loaded
-        """
-        pass
-    loadTestsFromNames._new = True
-    loadTestsFromNames.chainable = True
-
-    def loadTestsFromFile(self, filename):
-        """Return tests in this file. Return None if you are not
-        interested in loading any tests, or an iterable if you are and
-        can load some. May be a generator. *If you are interested in
-        loading tests from the file and encounter no errors, but find
-        no tests, yield False or return [False].*
-
-        .. Note:: This method replaces loadTestsFromPath from the 0.9
-                  API.
-
-        :param filename: The full path to the file or directory.
-        """
-        pass
-    loadTestsFromFile.generative = True
-    loadTestsFromFile._new = True
-
-    def loadTestsFromPath(self, path):
-        """
-        .. warning:: DEPRECATED -- use loadTestsFromFile instead
-        """
-        pass
-    loadTestsFromPath.deprecated = True
-
-    def loadTestsFromTestCase(self, cls):
-        """Return tests in this test case class. Return None if you are
-        not able to load any tests, or an iterable if you are. May be a
-        generator.
-
-        :param cls: The test case class. Must be subclass of
-           :class:`unittest.TestCase`.
-        """
-        pass
-    loadTestsFromTestCase.generative = True
-
-    def loadTestsFromTestClass(self, cls):
-        """Return tests in this test class. Class will *not* be a
-        unittest.TestCase subclass. Return None if you are not able to
-        load any tests, an iterable if you are. May be a generator.
-
-        :param cls: The test case class. Must be **not** be subclass of
-           :class:`unittest.TestCase`.
-        """
-        pass
-    loadTestsFromTestClass._new = True
-    loadTestsFromTestClass.generative = True
-
-    def makeTest(self, obj, parent):
-        """Given an object and its parent, return or yield one or more
-        test cases. Each test must be a unittest.TestCase (or subclass)
-        instance. This is called before default test loading to allow
-        plugins to load an alternate test case or cases for an
-        object. May be a generator.
-
-        :param obj: The object to be made into a test
-        :param parent: The parent of obj (eg, for a method, the class)
-        """
-        pass
-    makeTest._new = True
-    makeTest.generative = True
-
-    def options(self, parser, env):
-        """Called to allow plugin to register command line
-        options with the parser.
-
-        DO NOT return a value from this method unless you want to stop
-        all other plugins from setting their options.
-
-        :param parser: options parser instance
-        :type parser: :class:`ConfigParser.ConfigParser`
-        :param env: environment, default is os.environ
-        """
-        pass
-    options._new = True
-
-    def prepareTest(self, test):
-        """Called before the test is run by the test runner. Please
-        note the article *the* in the previous sentence: prepareTest
-        is called *only once*, and is passed the test case or test
-        suite that the test runner will execute. It is *not* called
-        for each individual test case. If you return a non-None value,
-        that return value will be run as the test. Use this hook to
-        wrap or decorate the test with another function. If you need
-        to modify or wrap individual test cases, use `prepareTestCase`
-        instead.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-
-    def prepareTestCase(self, test):
-        """Prepare or wrap an individual test case. Called before
-        execution of the test. The test passed here is a
-        nose.case.Test instance; the case to be executed is in the
-        test attribute of the passed case. To modify the test to be
-        run, you should return a callable that takes one argument (the
-        test result object) -- it is recommended that you *do not*
-        side-effect the nose.case.Test instance you have been passed.
-
-        Keep in mind that when you replace the test callable you are
-        replacing the run() method of the test case -- including the
-        exception handling and result calls, etc.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-    prepareTestCase._new = True
-    
-    def prepareTestLoader(self, loader):
-        """Called before tests are loaded. To replace the test loader,
-        return a test loader. To allow other plugins to process the
-        test loader, return None. Only one plugin may replace the test
-        loader. Only valid when using nose.TestProgram.
-
-        :param loader: :class:`nose.loader.TestLoader` 
-             (or other loader) instance
-        """
-        pass
-    prepareTestLoader._new = True
-
-    def prepareTestResult(self, result):
-        """Called before the first test is run. To use a different
-        test result handler for all tests than the given result,
-        return a test result handler. NOTE however that this handler
-        will only be seen by tests, that is, inside of the result
-        proxy system. The TestRunner and TestProgram -- whether nose's
-        or other -- will continue to see the original result
-        handler. For this reason, it is usually better to monkeypatch
-        the result (for instance, if you want to handle some
-        exceptions in a unique way). Only one plugin may replace the
-        result, but many may monkeypatch it. If you want to
-        monkeypatch and stop other plugins from doing so, monkeypatch
-        and return the patched result.
-
-        :param result: :class:`nose.result.TextTestResult` 
-             (or other result) instance
-        """
-        pass
-    prepareTestResult._new = True
-
-    def prepareTestRunner(self, runner):
-        """Called before tests are run. To replace the test runner,
-        return a test runner. To allow other plugins to process the
-        test runner, return None. Only valid when using nose.TestProgram.
-
-        :param runner: :class:`nose.core.TextTestRunner` 
-             (or other runner) instance
-        """
-        pass
-    prepareTestRunner._new = True
-        
-    def report(self, stream):
-        """Called after all error output has been printed. Print your
-        plugin's report to the provided stream. Return None to allow
-        other plugins to print reports, any other value to stop them.
-
-        :param stream: stream object; send your output here
-        :type stream: file-like object
-        """
-        pass
-
-    def setOutputStream(self, stream):
-        """Called before test output begins. To direct test output to a
-        new stream, return a stream object, which must implement a
-        `write(msg)` method. If you only want to note the stream, not
-        capture or redirect it, then return None.
-
-        :param stream: stream object; send your output here
-        :type stream: file-like object
-        """
-
-    def startContext(self, context):
-        """Called before context setup and the running of tests in the
-        context. Note that tests have already been *loaded* from the
-        context before this call.
-
-        :param context: the context about to be setup. May be a module or
-             class, or any other object that contains tests.
-        """
-        pass
-    startContext._new = True
-    
-    def startTest(self, test):
-        """Called before each test is run. DO NOT return a value unless
-        you want to stop other plugins from seeing the test start.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-
-    def stopContext(self, context):
-        """Called after the tests in a context have run and the
-        context has been torn down.
-
-        :param context: the context that has been torn down. May be a module or
-             class, or any other object that contains tests.
-        """
-        pass
-    stopContext._new = True
-    
-    def stopTest(self, test):
-        """Called after each test is run. DO NOT return a value unless
-        you want to stop other plugins from seeing that the test has stopped.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-
-    def testName(self, test):
-        """Return a short test name. Called by `nose.case.Test.__str__`.
-
-        :param test: the test case
-        :type test: :class:`nose.case.Test`
-        """
-        pass
-    testName._new = True
-
-    def wantClass(self, cls):
-        """Return true if you want the main test selector to collect
-        tests from this class, false if you don't, and None if you don't
-        care.
-
-        :param cls: The class being examined by the selector
-        """
-        pass
-    
-    def wantDirectory(self, dirname):
-        """Return true if you want test collection to descend into this
-        directory, false if you do not, and None if you don't care.
-
-        :param dirname: Full path to directory being examined by the selector
-        """
-        pass
-    
-    def wantFile(self, file):
-        """Return true if you want to collect tests from this file,
-        false if you do not and None if you don't care.
-
-        Change from 0.9: The optional package parameter is no longer passed.
-
-        :param file: Full path to file being examined by the selector
-        """
-        pass
-    
-    def wantFunction(self, function):
-        """Return true to collect this function as a test, false to
-        prevent it from being collected, and None if you don't care.
-
-        :param function: The function object being examined by the selector
-        """
-        pass
-    
-    def wantMethod(self, method):
-        """Return true to collect this method as a test, false to
-        prevent it from being collected, and None if you don't care.
-        
-        :param method: The method object being examined by the selector
-        :type method: unbound method
-        """    
-        pass
-    
-    def wantModule(self, module):
-        """Return true if you want to collection to descend into this
-        module, false to prevent the collector from descending into the
-        module, and None if you don't care.
-
-        :param module: The module object being examined by the selector
-        :type module: python module
-        """
-        pass
-    
-    def wantModuleTests(self, module):
-        """
-        .. warning:: DEPRECATED -- this method will not be called, it has
-                     been folded into wantModule.
-        """
-        pass
-    wantModuleTests.deprecated = True
-    
diff --git a/lib/spack/external/nose/plugins/builtin.py b/lib/spack/external/nose/plugins/builtin.py
deleted file mode 100644
index 4fcc0018ad..0000000000
--- a/lib/spack/external/nose/plugins/builtin.py
+++ /dev/null
@@ -1,34 +0,0 @@
-"""
-Lists builtin plugins.
-"""
-plugins = []
-builtins = (
-    ('nose.plugins.attrib', 'AttributeSelector'),
-    ('nose.plugins.capture', 'Capture'),
-    ('nose.plugins.logcapture', 'LogCapture'),
-    ('nose.plugins.cover', 'Coverage'),
-    ('nose.plugins.debug', 'Pdb'),
-    ('nose.plugins.deprecated', 'Deprecated'),
-    ('nose.plugins.doctests', 'Doctest'),
-    ('nose.plugins.isolate', 'IsolationPlugin'),
-    ('nose.plugins.failuredetail', 'FailureDetail'),
-    ('nose.plugins.prof', 'Profile'),
-    ('nose.plugins.skip', 'Skip'),
-    ('nose.plugins.testid', 'TestId'),
-    ('nose.plugins.multiprocess', 'MultiProcess'),
-    ('nose.plugins.xunit', 'Xunit'),
-    ('nose.plugins.allmodules', 'AllModules'),
-    ('nose.plugins.collect', 'CollectOnly'),
-    )
-
-for module, cls in builtins:
-    try:
-        plugmod = __import__(module, globals(), locals(), [cls])
-    except KeyboardInterrupt:
-        raise
-    except:
-        continue
-    plug = getattr(plugmod, cls)
-    plugins.append(plug)
-    globals()[cls] = plug
-
diff --git a/lib/spack/external/nose/plugins/capture.py b/lib/spack/external/nose/plugins/capture.py
deleted file mode 100644
index fa4e5dcaaf..0000000000
--- a/lib/spack/external/nose/plugins/capture.py
+++ /dev/null
@@ -1,115 +0,0 @@
-"""
-This plugin captures stdout during test execution. If the test fails
-or raises an error, the captured output will be appended to the error
-or failure output. It is enabled by default but can be disabled with
-the options ``-s`` or ``--nocapture``.
-
-:Options:
-  ``--nocapture``
-    Don't capture stdout (any stdout output will be printed immediately)
-
-"""
-import logging
-import os
-import sys
-from nose.plugins.base import Plugin
-from nose.pyversion import exc_to_unicode, force_unicode
-from nose.util import ln
-from StringIO import StringIO
-
-
-log = logging.getLogger(__name__)
-
-class Capture(Plugin):
-    """
-    Output capture plugin. Enabled by default. Disable with ``-s`` or
-    ``--nocapture``. This plugin captures stdout during test execution,
-    appending any output captured to the error or failure output,
-    should the test fail or raise an error.
-    """
-    enabled = True
-    env_opt = 'NOSE_NOCAPTURE'
-    name = 'capture'
-    score = 1600
-
-    def __init__(self):
-        self.stdout = []
-        self._buf = None
-
-    def options(self, parser, env):
-        """Register commandline options
-        """
-        parser.add_option(
-            "-s", "--nocapture", action="store_false",
-            default=not env.get(self.env_opt), dest="capture",
-            help="Don't capture stdout (any stdout output "
-            "will be printed immediately) [NOSE_NOCAPTURE]")
-
-    def configure(self, options, conf):
-        """Configure plugin. Plugin is enabled by default.
-        """
-        self.conf = conf
-        if not options.capture:
-            self.enabled = False
-
-    def afterTest(self, test):
-        """Clear capture buffer.
-        """
-        self.end()
-        self._buf = None
-
-    def begin(self):
-        """Replace sys.stdout with capture buffer.
-        """
-        self.start() # get an early handle on sys.stdout
-
-    def beforeTest(self, test):
-        """Flush capture buffer.
-        """
-        self.start()
-
-    def formatError(self, test, err):
-        """Add captured output to error report.
-        """
-        test.capturedOutput = output = self.buffer
-        self._buf = None
-        if not output:
-            # Don't return None as that will prevent other
-            # formatters from formatting and remove earlier formatters
-            # formats, instead return the err we got
-            return err
-        ec, ev, tb = err
-        return (ec, self.addCaptureToErr(ev, output), tb)
-
-    def formatFailure(self, test, err):
-        """Add captured output to failure report.
-        """
-        return self.formatError(test, err)
-
-    def addCaptureToErr(self, ev, output):
-        ev = exc_to_unicode(ev)
-        output = force_unicode(output)
-        return u'\n'.join([ev, ln(u'>> begin captured stdout <<'),
-                           output, ln(u'>> end captured stdout <<')])
-
-    def start(self):
-        self.stdout.append(sys.stdout)
-        self._buf = StringIO()
-        sys.stdout = self._buf
-
-    def end(self):
-        if self.stdout:
-            sys.stdout = self.stdout.pop()
-
-    def finalize(self, result):
-        """Restore stdout.
-        """
-        while self.stdout:
-            self.end()
-
-    def _get_buffer(self):
-        if self._buf is not None:
-            return self._buf.getvalue()
-
-    buffer = property(_get_buffer, None, None,
-                      """Captured stdout output.""")
diff --git a/lib/spack/external/nose/plugins/collect.py b/lib/spack/external/nose/plugins/collect.py
deleted file mode 100644
index 6f9f0faa77..0000000000
--- a/lib/spack/external/nose/plugins/collect.py
+++ /dev/null
@@ -1,94 +0,0 @@
-"""
-This plugin bypasses the actual execution of tests, and instead just collects
-test names. Fixtures are also bypassed, so running nosetests with the 
-collection plugin enabled should be very quick.
-
-This plugin is useful in combination with the testid plugin (``--with-id``).
-Run both together to get an indexed list of all tests, which will enable you to
-run individual tests by index number.
-
-This plugin is also useful for counting tests in a test suite, and making
-people watching your demo think all of your tests pass.
-"""
-from nose.plugins.base import Plugin
-from nose.case import Test
-import logging
-import unittest
-
-log = logging.getLogger(__name__)
-
-
-class CollectOnly(Plugin):
-    """
-    Collect and output test names only, don't run any tests.
-    """
-    name = "collect-only"
-    enableOpt = 'collect_only'
-
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        parser.add_option('--collect-only',
-                          action='store_true',
-                          dest=self.enableOpt,
-                          default=env.get('NOSE_COLLECT_ONLY'),
-                          help="Enable collect-only: %s [COLLECT_ONLY]" %
-                          (self.help()))
-
-    def prepareTestLoader(self, loader):
-        """Install collect-only suite class in TestLoader.
-        """
-        # Disable context awareness
-        log.debug("Preparing test loader")
-        loader.suiteClass = TestSuiteFactory(self.conf)
-
-    def prepareTestCase(self, test):
-        """Replace actual test with dummy that always passes.
-        """
-        # Return something that always passes
-        log.debug("Preparing test case %s", test)
-        if not isinstance(test, Test):
-            return
-        def run(result):
-            # We need to make these plugin calls because there won't be
-            # a result proxy, due to using a stripped-down test suite
-            self.conf.plugins.startTest(test)
-            result.startTest(test)
-            self.conf.plugins.addSuccess(test)
-            result.addSuccess(test)
-            self.conf.plugins.stopTest(test)
-            result.stopTest(test)
-        return run
-
-
-class TestSuiteFactory:
-    """
-    Factory for producing configured test suites.
-    """
-    def __init__(self, conf):
-        self.conf = conf
-
-    def __call__(self, tests=(), **kw):
-        return TestSuite(tests, conf=self.conf)
-
-
-class TestSuite(unittest.TestSuite):
-    """
-    Basic test suite that bypasses most proxy and plugin calls, but does
-    wrap tests in a nose.case.Test so prepareTestCase will be called.
-    """
-    def __init__(self, tests=(), conf=None):
-        self.conf = conf
-        # Exec lazy suites: makes discovery depth-first
-        if callable(tests):
-            tests = tests()
-        log.debug("TestSuite(%r)", tests)
-        unittest.TestSuite.__init__(self, tests)
-
-    def addTest(self, test):
-        log.debug("Add test %s", test)
-        if isinstance(test, unittest.TestSuite):
-            self._tests.append(test)
-        else:
-            self._tests.append(Test(test, config=self.conf))
-
diff --git a/lib/spack/external/nose/plugins/cover.py b/lib/spack/external/nose/plugins/cover.py
deleted file mode 100644
index fbe2e30dcd..0000000000
--- a/lib/spack/external/nose/plugins/cover.py
+++ /dev/null
@@ -1,271 +0,0 @@
-"""If you have Ned Batchelder's coverage_ module installed, you may activate a
-coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
-environment variable. The coverage report will cover any python source module
-imported after the start of the test run, excluding modules that match
-testMatch. If you want to include those modules too, use the ``--cover-tests``
-switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
-restrict the coverage report to modules from a particular package or packages,
-use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment
-variable.
-
-.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
-"""
-import logging
-import re
-import sys
-import StringIO
-from nose.plugins.base import Plugin
-from nose.util import src, tolist
-
-log = logging.getLogger(__name__)
-
-
-class Coverage(Plugin):
-    """
-    Activate a coverage report using Ned Batchelder's coverage module.
-    """
-    coverTests = False
-    coverPackages = None
-    coverInstance = None
-    coverErase = False
-    coverMinPercentage = None
-    score = 200
-    status = {}
-
-    def options(self, parser, env):
-        """
-        Add options to command line.
-        """
-        super(Coverage, self).options(parser, env)
-        parser.add_option("--cover-package", action="append",
-                          default=env.get('NOSE_COVER_PACKAGE'),
-                          metavar="PACKAGE",
-                          dest="cover_packages",
-                          help="Restrict coverage output to selected packages "
-                          "[NOSE_COVER_PACKAGE]")
-        parser.add_option("--cover-erase", action="store_true",
-                          default=env.get('NOSE_COVER_ERASE'),
-                          dest="cover_erase",
-                          help="Erase previously collected coverage "
-                          "statistics before run")
-        parser.add_option("--cover-tests", action="store_true",
-                          dest="cover_tests",
-                          default=env.get('NOSE_COVER_TESTS'),
-                          help="Include test modules in coverage report "
-                          "[NOSE_COVER_TESTS]")
-        parser.add_option("--cover-min-percentage", action="store",
-                          dest="cover_min_percentage",
-                          default=env.get('NOSE_COVER_MIN_PERCENTAGE'),
-                          help="Minimum percentage of coverage for tests "
-                          "to pass [NOSE_COVER_MIN_PERCENTAGE]")
-        parser.add_option("--cover-inclusive", action="store_true",
-                          dest="cover_inclusive",
-                          default=env.get('NOSE_COVER_INCLUSIVE'),
-                          help="Include all python files under working "
-                          "directory in coverage report.  Useful for "
-                          "discovering holes in test coverage if not all "
-                          "files are imported by the test suite. "
-                          "[NOSE_COVER_INCLUSIVE]")
-        parser.add_option("--cover-html", action="store_true",
-                          default=env.get('NOSE_COVER_HTML'),
-                          dest='cover_html',
-                          help="Produce HTML coverage information")
-        parser.add_option('--cover-html-dir', action='store',
-                          default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
-                          dest='cover_html_dir',
-                          metavar='DIR',
-                          help='Produce HTML coverage information in dir')
-        parser.add_option("--cover-branches", action="store_true",
-                          default=env.get('NOSE_COVER_BRANCHES'),
-                          dest="cover_branches",
-                          help="Include branch coverage in coverage report "
-                          "[NOSE_COVER_BRANCHES]")
-        parser.add_option("--cover-xml", action="store_true",
-                          default=env.get('NOSE_COVER_XML'),
-                          dest="cover_xml",
-                          help="Produce XML coverage information")
-        parser.add_option("--cover-xml-file", action="store",
-                          default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'),
-                          dest="cover_xml_file",
-                          metavar="FILE",
-                          help="Produce XML coverage information in file")
-
-    def configure(self, options, conf):
-        """
-        Configure plugin.
-        """
-        try:
-            self.status.pop('active')
-        except KeyError:
-            pass
-        super(Coverage, self).configure(options, conf)
-        if self.enabled:
-            try:
-                import coverage
-                if not hasattr(coverage, 'coverage'):
-                    raise ImportError("Unable to import coverage module")
-            except ImportError:
-                log.error("Coverage not available: "
-                          "unable to import coverage module")
-                self.enabled = False
-                return
-        self.conf = conf
-        self.coverErase = options.cover_erase
-        self.coverTests = options.cover_tests
-        self.coverPackages = []
-        if options.cover_packages:
-            if isinstance(options.cover_packages, (list, tuple)):
-                cover_packages = options.cover_packages
-            else:
-                cover_packages = [options.cover_packages]
-            for pkgs in [tolist(x) for x in cover_packages]:
-                self.coverPackages.extend(pkgs)
-        self.coverInclusive = options.cover_inclusive
-        if self.coverPackages:
-            log.info("Coverage report will include only packages: %s",
-                     self.coverPackages)
-        self.coverHtmlDir = None
-        if options.cover_html:
-            self.coverHtmlDir = options.cover_html_dir
-            log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
-        self.coverBranches = options.cover_branches
-        self.coverXmlFile = None
-        if options.cover_min_percentage:
-            self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%'))
-        if options.cover_xml:
-            self.coverXmlFile = options.cover_xml_file
-            log.debug('Will put XML coverage report in %s', self.coverXmlFile)
-        if self.enabled:
-            self.status['active'] = True
-            self.coverInstance = coverage.coverage(auto_data=False,
-                branch=self.coverBranches, data_suffix=conf.worker,
-                source=self.coverPackages)
-            self.coverInstance._warn_no_data = False
-            self.coverInstance.is_worker = conf.worker
-            self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
-
-            log.debug("Coverage begin")
-            self.skipModules = sys.modules.keys()[:]
-            if self.coverErase:
-                log.debug("Clearing previously collected coverage statistics")
-                self.coverInstance.combine()
-                self.coverInstance.erase()
-
-            if not self.coverInstance.is_worker:
-                self.coverInstance.load()
-                self.coverInstance.start()
-
-
-    def beforeTest(self, *args, **kwargs):
-        """
-        Begin recording coverage information.
-        """
-
-        if self.coverInstance.is_worker:
-            self.coverInstance.load()
-            self.coverInstance.start()
-
-    def afterTest(self, *args, **kwargs):
-        """
-        Stop recording coverage information.
-        """
-
-        if self.coverInstance.is_worker:
-            self.coverInstance.stop()
-            self.coverInstance.save()
-
-
-    def report(self, stream):
-        """
-        Output code coverage report.
-        """
-        log.debug("Coverage report")
-        self.coverInstance.stop()
-        self.coverInstance.combine()
-        self.coverInstance.save()
-        modules = [module
-                    for name, module in sys.modules.items()
-                    if self.wantModuleCoverage(name, module)]
-        log.debug("Coverage report will cover modules: %s", modules)
-        self.coverInstance.report(modules, file=stream)
-
-        import coverage
-        if self.coverHtmlDir:
-            log.debug("Generating HTML coverage report")
-            try:
-                self.coverInstance.html_report(modules, self.coverHtmlDir)
-            except coverage.misc.CoverageException, e:
-                log.warning("Failed to generate HTML report: %s" % str(e))
-
-        if self.coverXmlFile:
-            log.debug("Generating XML coverage report")
-            try:
-                self.coverInstance.xml_report(modules, self.coverXmlFile)
-            except coverage.misc.CoverageException, e:
-                log.warning("Failed to generate XML report: %s" % str(e))
-
-        # make sure we have minimum required coverage
-        if self.coverMinPercentage:
-            f = StringIO.StringIO()
-            self.coverInstance.report(modules, file=f)
-
-            multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
-                              r'\s+(\d+)%\s+\d*\s{0,1}$')
-            singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
-                               r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$')
-
-            m = re.search(multiPackageRe, f.getvalue())
-            if m is None:
-                m = re.search(singlePackageRe, f.getvalue())
-
-            if m:
-                percentage = int(m.groups()[0])
-                if percentage < self.coverMinPercentage:
-                    log.error('TOTAL Coverage did not reach minimum '
-                              'required: %d%%' % self.coverMinPercentage)
-                    sys.exit(1)
-            else:
-                log.error("No total percentage was found in coverage output, "
-                          "something went wrong.")
-
-
-    def wantModuleCoverage(self, name, module):
-        if not hasattr(module, '__file__'):
-            log.debug("no coverage of %s: no __file__", name)
-            return False
-        module_file = src(module.__file__)
-        if not module_file or not module_file.endswith('.py'):
-            log.debug("no coverage of %s: not a python file", name)
-            return False
-        if self.coverPackages:
-            for package in self.coverPackages:
-                if (re.findall(r'^%s\b' % re.escape(package), name)
-                    and (self.coverTests
-                         or not self.conf.testMatch.search(name))):
-                    log.debug("coverage for %s", name)
-                    return True
-        if name in self.skipModules:
-            log.debug("no coverage for %s: loaded before coverage start",
-                      name)
-            return False
-        if self.conf.testMatch.search(name) and not self.coverTests:
-            log.debug("no coverage for %s: is a test", name)
-            return False
-        # accept any package that passed the previous tests, unless
-        # coverPackages is on -- in that case, if we wanted this
-        # module, we would have already returned True
-        return not self.coverPackages
-
-    def wantFile(self, file, package=None):
-        """If inclusive coverage enabled, return true for all source files
-        in wanted packages.
-        """
-        if self.coverInclusive:
-            if file.endswith(".py"):
-                if package and self.coverPackages:
-                    for want in self.coverPackages:
-                        if package.startswith(want):
-                            return True
-                else:
-                    return True
-        return None
diff --git a/lib/spack/external/nose/plugins/debug.py b/lib/spack/external/nose/plugins/debug.py
deleted file mode 100644
index 78243e60d0..0000000000
--- a/lib/spack/external/nose/plugins/debug.py
+++ /dev/null
@@ -1,67 +0,0 @@
-"""
-This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
-option will drop the test runner into pdb when it encounters an error. To
-drop into pdb on failure, use ``--pdb-failures``.
-"""
-
-import pdb
-from nose.plugins.base import Plugin
-
-class Pdb(Plugin):
-    """
-    Provides --pdb and --pdb-failures options that cause the test runner to
-    drop into pdb if it encounters an error or failure, respectively.
-    """
-    enabled_for_errors = False
-    enabled_for_failures = False
-    score = 5 # run last, among builtins
-    
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        parser.add_option(
-            "--pdb", action="store_true", dest="debugBoth",
-            default=env.get('NOSE_PDB', False),
-            help="Drop into debugger on failures or errors")
-        parser.add_option(
-            "--pdb-failures", action="store_true",
-            dest="debugFailures",
-            default=env.get('NOSE_PDB_FAILURES', False),
-            help="Drop into debugger on failures")
-        parser.add_option(
-            "--pdb-errors", action="store_true",
-            dest="debugErrors",
-            default=env.get('NOSE_PDB_ERRORS', False),
-            help="Drop into debugger on errors")
-
-    def configure(self, options, conf):
-        """Configure which kinds of exceptions trigger plugin.
-        """
-        self.conf = conf
-        self.enabled_for_errors = options.debugErrors or options.debugBoth
-        self.enabled_for_failures = options.debugFailures or options.debugBoth
-        self.enabled = self.enabled_for_failures or self.enabled_for_errors
-
-    def addError(self, test, err):
-        """Enter pdb if configured to debug errors.
-        """
-        if not self.enabled_for_errors:
-            return
-        self.debug(err)
-
-    def addFailure(self, test, err):
-        """Enter pdb if configured to debug failures.
-        """
-        if not self.enabled_for_failures:
-            return
-        self.debug(err)
-
-    def debug(self, err):
-        import sys # FIXME why is this import here?
-        ec, ev, tb = err
-        stdout = sys.stdout
-        sys.stdout = sys.__stdout__
-        try:
-            pdb.post_mortem(tb)
-        finally:
-            sys.stdout = stdout
diff --git a/lib/spack/external/nose/plugins/deprecated.py b/lib/spack/external/nose/plugins/deprecated.py
deleted file mode 100644
index 461a26be63..0000000000
--- a/lib/spack/external/nose/plugins/deprecated.py
+++ /dev/null
@@ -1,45 +0,0 @@
-"""
-This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
-exception. When :class:`DeprecatedTest` is raised, the exception will be logged
-in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
-will be output, and the exception will not be counted as an error or failure.
-It is enabled by default, but can be turned off by using ``--no-deprecated``.
-"""
-
-from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
-
-
-class DeprecatedTest(Exception):
-    """Raise this exception to mark a test as deprecated.
-    """
-    pass
-
-
-class Deprecated(ErrorClassPlugin):
-    """
-    Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
-    by default.
-    """
-    enabled = True
-    deprecated = ErrorClass(DeprecatedTest,
-                            label='DEPRECATED',
-                            isfailure=False)
-
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        env_opt = 'NOSE_WITHOUT_DEPRECATED'
-        parser.add_option('--no-deprecated', action='store_true',
-                          dest='noDeprecated', default=env.get(env_opt, False),
-                          help="Disable special handling of DeprecatedTest "
-                          "exceptions.")
-
-    def configure(self, options, conf):
-        """Configure plugin.
-        """
-        if not self.can_configure:
-            return
-        self.conf = conf
-        disable = getattr(options, 'noDeprecated', False)
-        if disable:
-            self.enabled = False
diff --git a/lib/spack/external/nose/plugins/doctests.py b/lib/spack/external/nose/plugins/doctests.py
deleted file mode 100644
index 5ef65799f3..0000000000
--- a/lib/spack/external/nose/plugins/doctests.py
+++ /dev/null
@@ -1,455 +0,0 @@
-"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
-environment variable to enable collection and execution of :mod:`doctests
-<doctest>`.  Because doctests are usually included in the tested package
-(instead of being grouped into packages or modules of their own), nose only
-looks for them in the non-test packages it discovers in the working directory.
-
-Doctests may also be placed into files other than python modules, in which
-case they can be collected and executed by using the ``--doctest-extension``
-switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
-extension(s) to load.
-
-When loading doctests from non-module files, use the ``--doctest-fixtures``
-switch to specify how to find modules containing fixtures for the tests. A
-module name will be produced by appending the value of that switch to the base
-name of each doctest file loaded. For example, a doctest file "widgets.rst"
-with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
-``widgets_fixt.py``.
-
-A fixtures module may define any or all of the following functions:
-
-* setup([module]) or setup_module([module])
-   
-  Called before the test runs. You may raise SkipTest to skip all tests.
-  
-* teardown([module]) or teardown_module([module])
-
-  Called after the test runs, if setup/setup_module did not raise an
-  unhandled exception.
-
-* setup_test(test)
-
-  Called before the test. NOTE: the argument passed is a
-  doctest.DocTest instance, *not* a unittest.TestCase.
-  
-* teardown_test(test)
- 
-  Called after the test, if setup_test did not raise an exception. NOTE: the
-  argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
-  
-Doctests are run like any other test, with the exception that output
-capture does not work; doctest does its own output capture while running a
-test.
-
-.. note ::
-
-   See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
-   additional documentation and examples.
-
-"""
-from __future__ import generators
-
-import logging
-import os
-import sys
-import unittest
-from inspect import getmodule
-from nose.plugins.base import Plugin
-from nose.suite import ContextList
-from nose.util import anyp, getpackage, test_address, resolve_name, \
-     src, tolist, isproperty
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from StringIO import StringIO
-import sys
-import __builtin__ as builtin_mod
-
-log = logging.getLogger(__name__)
-
-try:
-    import doctest
-    doctest.DocTestCase
-    # system version of doctest is acceptable, but needs a monkeypatch
-except (ImportError, AttributeError):
-    # system version is too old
-    import nose.ext.dtcompat as doctest
-
-
-#
-# Doctest and coverage don't get along, so we need to create
-# a monkeypatch that will replace the part of doctest that
-# interferes with coverage reports.
-#
-# The monkeypatch is based on this zope patch:
-# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
-#
-_orp = doctest._OutputRedirectingPdb
-
-class NoseOutputRedirectingPdb(_orp):
-    def __init__(self, out):
-        self.__debugger_used = False
-        _orp.__init__(self, out)
-
-    def set_trace(self):
-        self.__debugger_used = True
-        _orp.set_trace(self, sys._getframe().f_back)
-
-    def set_continue(self):
-        # Calling set_continue unconditionally would break unit test 
-        # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
-        if self.__debugger_used:
-            _orp.set_continue(self)
-doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb    
-
-
-class DoctestSuite(unittest.TestSuite):
-    """
-    Doctest suites are parallelizable at the module or file level only,
-    since they may be attached to objects that are not individually
-    addressable (like properties). This suite subclass is used when
-    loading doctests from a module to ensure that behavior.
-
-    This class is used only if the plugin is not fully prepared;
-    in normal use, the loader's suiteClass is used.
-    
-    """
-    can_split = False
-    
-    def __init__(self, tests=(), context=None, can_split=False):
-        self.context = context
-        self.can_split = can_split
-        unittest.TestSuite.__init__(self, tests=tests)
-
-    def address(self):
-        return test_address(self.context)
-
-    def __iter__(self):
-        # 2.3 compat
-        return iter(self._tests)
-
-    def __str__(self):
-        return str(self._tests)
-
-        
-class Doctest(Plugin):
-    """
-    Activate doctest plugin to find and run doctests in non-test modules.
-    """
-    extension = None
-    suiteClass = DoctestSuite
-    
-    def options(self, parser, env):
-        """Register commmandline options.
-        """
-        Plugin.options(self, parser, env)
-        parser.add_option('--doctest-tests', action='store_true',
-                          dest='doctest_tests',
-                          default=env.get('NOSE_DOCTEST_TESTS'),
-                          help="Also look for doctests in test modules. "
-                          "Note that classes, methods and functions should "
-                          "have either doctests or non-doctest tests, "
-                          "not both. [NOSE_DOCTEST_TESTS]")
-        parser.add_option('--doctest-extension', action="append",
-                          dest="doctestExtension",
-                          metavar="EXT",
-                          help="Also look for doctests in files with "
-                          "this extension [NOSE_DOCTEST_EXTENSION]")
-        parser.add_option('--doctest-result-variable',
-                          dest='doctest_result_var',
-                          default=env.get('NOSE_DOCTEST_RESULT_VAR'),
-                          metavar="VAR",
-                          help="Change the variable name set to the result of "
-                          "the last interpreter command from the default '_'. "
-                          "Can be used to avoid conflicts with the _() "
-                          "function used for text translation. "
-                          "[NOSE_DOCTEST_RESULT_VAR]")
-        parser.add_option('--doctest-fixtures', action="store",
-                          dest="doctestFixtures",
-                          metavar="SUFFIX",
-                          help="Find fixtures for a doctest file in module "
-                          "with this name appended to the base name "
-                          "of the doctest file")
-        parser.add_option('--doctest-options', action="append",
-                          dest="doctestOptions",
-                          metavar="OPTIONS",
-                          help="Specify options to pass to doctest. " +
-                          "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
-        # Set the default as a list, if given in env; otherwise
-        # an additional value set on the command line will cause
-        # an error.
-        env_setting = env.get('NOSE_DOCTEST_EXTENSION')
-        if env_setting is not None:
-            parser.set_defaults(doctestExtension=tolist(env_setting))
-
-    def configure(self, options, config):
-        """Configure plugin.
-        """
-        Plugin.configure(self, options, config)
-        self.doctest_result_var = options.doctest_result_var
-        self.doctest_tests = options.doctest_tests
-        self.extension = tolist(options.doctestExtension)
-        self.fixtures = options.doctestFixtures
-        self.finder = doctest.DocTestFinder()
-        self.optionflags = 0
-        if options.doctestOptions:
-            flags = ",".join(options.doctestOptions).split(',')
-            for flag in flags:
-                if not flag or flag[0] not in '+-':
-                    raise ValueError(
-                        "Must specify doctest options with starting " +
-                        "'+' or '-'.  Got %s" % (flag,))
-                mode, option_name = flag[0], flag[1:]
-                option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name)
-                if not option_flag:
-                    raise ValueError("Unknown doctest option %s" %
-                                     (option_name,))
-                if mode == '+':
-                    self.optionflags |= option_flag
-                elif mode == '-':
-                    self.optionflags &= ~option_flag
-
-    def prepareTestLoader(self, loader):
-        """Capture loader's suiteClass.
-
-        This is used to create test suites from doctest files.
-        
-        """
-        self.suiteClass = loader.suiteClass
-
-    def loadTestsFromModule(self, module):
-        """Load doctests from the module.
-        """
-        log.debug("loading from %s", module)
-        if not self.matches(module.__name__):
-            log.debug("Doctest doesn't want module %s", module)
-            return
-        try:
-            tests = self.finder.find(module)
-        except AttributeError:
-            log.exception("Attribute error loading from %s", module)
-            # nose allows module.__test__ = False; doctest does not and throws
-            # AttributeError
-            return
-        if not tests:
-            log.debug("No tests found in %s", module)
-            return
-        tests.sort()
-        module_file = src(module.__file__)
-        # FIXME this breaks the id plugin somehow (tests probably don't
-        # get wrapped in result proxy or something)
-        cases = []
-        for test in tests:
-            if not test.examples:
-                continue
-            if not test.filename:
-                test.filename = module_file
-            cases.append(DocTestCase(test,
-                                     optionflags=self.optionflags,
-                                     result_var=self.doctest_result_var))
-        if cases:
-            yield self.suiteClass(cases, context=module, can_split=False)
-            
-    def loadTestsFromFile(self, filename):
-        """Load doctests from the file.
-
-        Tests are loaded only if filename's extension matches
-        configured doctest extension.
-
-        """
-        if self.extension and anyp(filename.endswith, self.extension):
-            name = os.path.basename(filename)
-            dh = open(filename)
-            try:
-                doc = dh.read()
-            finally:
-                dh.close()
-
-            fixture_context = None
-            globs = {'__file__': filename}
-            if self.fixtures:
-                base, ext = os.path.splitext(name)
-                dirname = os.path.dirname(filename)
-                sys.path.append(dirname)
-                fixt_mod = base + self.fixtures
-                try:
-                    fixture_context = __import__(
-                        fixt_mod, globals(), locals(), ["nop"])
-                except ImportError, e:
-                    log.debug(
-                        "Could not import %s: %s (%s)", fixt_mod, e, sys.path)
-                log.debug("Fixture module %s resolved to %s",
-                          fixt_mod, fixture_context)
-                if hasattr(fixture_context, 'globs'):
-                    globs = fixture_context.globs(globs)                    
-            parser = doctest.DocTestParser()
-            test = parser.get_doctest(
-                doc, globs=globs, name=name,
-                filename=filename, lineno=0)
-            if test.examples:
-                case = DocFileCase(
-                    test,
-                    optionflags=self.optionflags,
-                    setUp=getattr(fixture_context, 'setup_test', None),
-                    tearDown=getattr(fixture_context, 'teardown_test', None),
-                    result_var=self.doctest_result_var)
-                if fixture_context:
-                    yield ContextList((case,), context=fixture_context)
-                else:
-                    yield case
-            else:
-                yield False # no tests to load
-            
-    def makeTest(self, obj, parent):
-        """Look for doctests in the given object, which will be a
-        function, method or class.
-        """
-        name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
-        doctests = self.finder.find(obj, module=getmodule(parent), name=name)
-        if doctests:
-            for test in doctests:
-                if len(test.examples) == 0:
-                    continue
-                yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
-                                  result_var=self.doctest_result_var)
-    
-    def matches(self, name):
-        # FIXME this seems wrong -- nothing is ever going to
-        # fail this test, since we're given a module NAME not FILE
-        if name == '__init__.py':
-            return False
-        # FIXME don't think we need include/exclude checks here?
-        return ((self.doctest_tests or not self.conf.testMatch.search(name)
-                 or (self.conf.include 
-                     and filter(None,
-                                [inc.search(name)
-                                 for inc in self.conf.include])))
-                and (not self.conf.exclude 
-                     or not filter(None,
-                                   [exc.search(name)
-                                    for exc in self.conf.exclude])))
-    
-    def wantFile(self, file):
-        """Override to select all modules and any file ending with
-        configured doctest extension.
-        """
-        # always want .py files
-        if file.endswith('.py'):
-            return True
-        # also want files that match my extension
-        if (self.extension
-            and anyp(file.endswith, self.extension)
-            and (not self.conf.exclude
-                 or not filter(None, 
-                               [exc.search(file)
-                                for exc in self.conf.exclude]))):
-            return True
-        return None
-
-
-class DocTestCase(doctest.DocTestCase):
-    """Overrides DocTestCase to
-    provide an address() method that returns the correct address for
-    the doctest case. To provide hints for address(), an obj may also
-    be passed -- this will be used as the test object for purposes of
-    determining the test address, if it is provided.
-    """
-    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
-                 checker=None, obj=None, result_var='_'):
-        self._result_var = result_var
-        self._nose_obj = obj
-        super(DocTestCase, self).__init__(
-            test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
-            checker=checker)
-    
-    def address(self):
-        if self._nose_obj is not None:
-            return test_address(self._nose_obj)
-        obj = resolve_name(self._dt_test.name)
-
-        if isproperty(obj):
-            # properties have no connection to the class they are in
-            # so we can't just look 'em up, we have to first look up
-            # the class, then stick the prop on the end
-            parts = self._dt_test.name.split('.')
-            class_name = '.'.join(parts[:-1])
-            cls = resolve_name(class_name)
-            base_addr = test_address(cls)
-            return (base_addr[0], base_addr[1],
-                    '.'.join([base_addr[2], parts[-1]]))
-        else:
-            return test_address(obj)
-    
-    # doctests loaded via find(obj) omit the module name
-    # so we need to override id, __repr__ and shortDescription
-    # bonus: this will squash a 2.3 vs 2.4 incompatiblity
-    def id(self):
-        name = self._dt_test.name
-        filename = self._dt_test.filename
-        if filename is not None:
-            pk = getpackage(filename)
-            if pk is None:
-                return name
-            if not name.startswith(pk):
-                name = "%s.%s" % (pk, name)
-        return name
-    
-    def __repr__(self):
-        name = self.id()
-        name = name.split('.')
-        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
-    __str__ = __repr__
-
-    def shortDescription(self):
-        return 'Doctest: %s' % self.id()
-
-    def setUp(self):
-        if self._result_var is not None:
-            self._old_displayhook = sys.displayhook
-            sys.displayhook = self._displayhook
-        super(DocTestCase, self).setUp()
-
-    def _displayhook(self, value):
-        if value is None:
-            return
-        setattr(builtin_mod, self._result_var,  value)
-        print repr(value)
-
-    def tearDown(self):
-        super(DocTestCase, self).tearDown()
-        if self._result_var is not None:
-            sys.displayhook = self._old_displayhook
-            delattr(builtin_mod, self._result_var)
-
-
-class DocFileCase(doctest.DocFileCase):
-    """Overrides to provide address() method that returns the correct
-    address for the doc file case.
-    """
-    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
-                 checker=None, result_var='_'):
-        self._result_var = result_var
-        super(DocFileCase, self).__init__(
-            test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
-            checker=None)
-
-    def address(self):
-        return (self._dt_test.filename, None, None)
-
-    def setUp(self):
-        if self._result_var is not None:
-            self._old_displayhook = sys.displayhook
-            sys.displayhook = self._displayhook
-        super(DocFileCase, self).setUp()
-
-    def _displayhook(self, value):
-        if value is None:
-            return
-        setattr(builtin_mod, self._result_var, value)
-        print repr(value)
-
-    def tearDown(self):
-        super(DocFileCase, self).tearDown()
-        if self._result_var is not None:
-            sys.displayhook = self._old_displayhook
-            delattr(builtin_mod, self._result_var)
diff --git a/lib/spack/external/nose/plugins/errorclass.py b/lib/spack/external/nose/plugins/errorclass.py
deleted file mode 100644
index d1540e0070..0000000000
--- a/lib/spack/external/nose/plugins/errorclass.py
+++ /dev/null
@@ -1,210 +0,0 @@
-"""
-ErrorClass Plugins
-------------------
-
-ErrorClass plugins provide an easy way to add support for custom
-handling of particular classes of exceptions.
-
-An ErrorClass plugin defines one or more ErrorClasses and how each is
-handled and reported on. Each error class is stored in a different
-attribute on the result, and reported separately. Each error class must
-indicate the exceptions that fall under that class, the label to use
-for reporting, and whether exceptions of the class should be
-considered as failures for the whole test run.
-
-ErrorClasses use a declarative syntax. Assign an ErrorClass to the
-attribute you wish to add to the result object, defining the
-exceptions, label and isfailure attributes. For example, to declare an
-ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
-as an error class with the label 'TODO' that is considered a failure,
-do this:
-
-    >>> class Todo(Exception):
-    ...     pass
-    >>> class TodoError(ErrorClassPlugin):
-    ...     todo = ErrorClass(Todo, label='TODO', isfailure=True)
-
-The MetaErrorClass metaclass translates the ErrorClass declarations
-into the tuples used by the error handling and reporting functions in
-the result. This is an internal format and subject to change; you
-should always use the declarative syntax for attaching ErrorClasses to
-an ErrorClass plugin.
-
-    >>> TodoError.errorClasses # doctest: +ELLIPSIS
-    ((<class ...Todo...>, ('todo', 'TODO', True)),)
-
-Let's see the plugin in action. First some boilerplate.
-
-    >>> import sys
-    >>> import unittest
-    >>> try:
-    ...     # 2.7+
-    ...     from unittest.runner import _WritelnDecorator
-    ... except ImportError:
-    ...     from unittest import _WritelnDecorator
-    ...
-    >>> buf = _WritelnDecorator(sys.stdout)
-
-Now define a test case that raises a Todo.
-
-    >>> class TestTodo(unittest.TestCase):
-    ...     def runTest(self):
-    ...         raise Todo("I need to test something")
-    >>> case = TestTodo()
-
-Prepare the result using our plugin. Normally this happens during the
-course of test execution within nose -- you won't be doing this
-yourself. For the purposes of this testing document, I'm stepping
-through the internal process of nose so you can see what happens at
-each step.
-
-    >>> plugin = TodoError()
-    >>> from nose.result import _TextTestResult
-    >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
-    >>> plugin.prepareTestResult(result)
-
-Now run the test. TODO is printed.
-
-    >>> _ = case(result) # doctest: +ELLIPSIS
-    runTest (....TestTodo) ... TODO: I need to test something
-
-Errors and failures are empty, but todo has our test:
-
-    >>> result.errors
-    []
-    >>> result.failures
-    []
-    >>> result.todo # doctest: +ELLIPSIS
-    [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
-    >>> result.printErrors() # doctest: +ELLIPSIS
-    <BLANKLINE>
-    ======================================================================
-    TODO: runTest (....TestTodo)
-    ----------------------------------------------------------------------
-    Traceback (most recent call last):
-    ...
-    ...Todo: I need to test something
-    <BLANKLINE>
-
-Since we defined a Todo as a failure, the run was not successful.
-
-    >>> result.wasSuccessful()
-    False
-"""
-
-from nose.pyversion import make_instancemethod
-from nose.plugins.base import Plugin
-from nose.result import TextTestResult
-from nose.util import isclass
-
-class MetaErrorClass(type):
-    """Metaclass for ErrorClassPlugins that allows error classes to be
-    set up in a declarative manner.
-    """
-    def __init__(self, name, bases, attr):
-        errorClasses = []
-        for name, detail in attr.items():
-            if isinstance(detail, ErrorClass):
-                attr.pop(name)
-                for cls in detail:
-                    errorClasses.append(
-                        (cls, (name, detail.label, detail.isfailure)))
-        super(MetaErrorClass, self).__init__(name, bases, attr)
-        self.errorClasses = tuple(errorClasses)
-
-
-class ErrorClass(object):
-    def __init__(self, *errorClasses, **kw):
-        self.errorClasses = errorClasses
-        try:
-            for key in ('label', 'isfailure'):
-                setattr(self, key, kw.pop(key))
-        except KeyError:
-            raise TypeError("%r is a required named argument for ErrorClass"
-                            % key)
-
-    def __iter__(self):
-        return iter(self.errorClasses)
-
-
-class ErrorClassPlugin(Plugin):
-    """
-    Base class for ErrorClass plugins. Subclass this class and declare the
-    exceptions that you wish to handle as attributes of the subclass.
-    """
-    __metaclass__ = MetaErrorClass
-    score = 1000
-    errorClasses = ()
-
-    def addError(self, test, err):
-        err_cls, a, b = err
-        if not isclass(err_cls):
-            return
-        classes = [e[0] for e in self.errorClasses]
-        if filter(lambda c: issubclass(err_cls, c), classes):
-            return True
-
-    def prepareTestResult(self, result):
-        if not hasattr(result, 'errorClasses'):
-            self.patchResult(result)
-        for cls, (storage_attr, label, isfail) in self.errorClasses:
-            if cls not in result.errorClasses:
-                storage = getattr(result, storage_attr, [])
-                setattr(result, storage_attr, storage)
-                result.errorClasses[cls] = (storage, label, isfail)
-
-    def patchResult(self, result):
-        result.printLabel = print_label_patch(result)
-        result._orig_addError, result.addError = \
-            result.addError, add_error_patch(result)
-        result._orig_wasSuccessful, result.wasSuccessful = \
-            result.wasSuccessful, wassuccessful_patch(result)
-        if hasattr(result, 'printErrors'):
-            result._orig_printErrors, result.printErrors = \
-                result.printErrors, print_errors_patch(result)
-        if hasattr(result, 'addSkip'):
-            result._orig_addSkip, result.addSkip = \
-                result.addSkip, add_skip_patch(result)
-        result.errorClasses = {}
-
-
-def add_error_patch(result):
-    """Create a new addError method to patch into a result instance
-    that recognizes the errorClasses attribute and deals with
-    errorclasses correctly.
-    """
-    return make_instancemethod(TextTestResult.addError, result)
-
-
-def print_errors_patch(result):
-    """Create a new printErrors method that prints errorClasses items
-    as well.
-    """
-    return make_instancemethod(TextTestResult.printErrors, result)
-
-
-def print_label_patch(result):
-    """Create a new printLabel method that prints errorClasses items
-    as well.
-    """
-    return make_instancemethod(TextTestResult.printLabel, result)
-
-
-def wassuccessful_patch(result):
-    """Create a new wasSuccessful method that checks errorClasses for
-    exceptions that were put into other slots than error or failure
-    but that still count as not success.
-    """
-    return make_instancemethod(TextTestResult.wasSuccessful, result)
-
-
-def add_skip_patch(result):
-    """Create a new addSkip method to patch into a result instance
-    that delegates to addError.
-    """
-    return make_instancemethod(TextTestResult.addSkip, result)
-
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/failuredetail.py b/lib/spack/external/nose/plugins/failuredetail.py
deleted file mode 100644
index 6462865dd0..0000000000
--- a/lib/spack/external/nose/plugins/failuredetail.py
+++ /dev/null
@@ -1,49 +0,0 @@
-"""
-This plugin provides assert introspection. When the plugin is enabled
-and a test failure occurs, the traceback is displayed with extra context
-around the line in which the exception was raised. Simple variable 
-substitution is also performed in the context output to provide more
-debugging information.
-"""
-    
-from nose.plugins import Plugin
-from nose.pyversion import exc_to_unicode, force_unicode
-from nose.inspector import inspect_traceback
-
-class FailureDetail(Plugin):
-    """
-    Plugin that provides extra information in tracebacks of test failures.
-    """
-    score = 1600 # before capture
-    
-    def options(self, parser, env):
-        """Register commmandline options.
-        """
-        parser.add_option(
-            "-d", "--detailed-errors", "--failure-detail",
-            action="store_true",
-            default=env.get('NOSE_DETAILED_ERRORS'),
-            dest="detailedErrors", help="Add detail to error"
-            " output by attempting to evaluate failed"
-            " asserts [NOSE_DETAILED_ERRORS]")
-
-    def configure(self, options, conf):
-        """Configure plugin.
-        """
-        if not self.can_configure:
-            return
-        self.enabled = options.detailedErrors
-        self.conf = conf
-
-    def formatFailure(self, test, err):
-        """Add detail from traceback inspection to error message of a failure.
-        """
-        ec, ev, tb = err
-        tbinfo, str_ev = None, exc_to_unicode(ev)
-
-        if tb:
-            tbinfo = force_unicode(inspect_traceback(tb))
-            str_ev = '\n'.join([str_ev, tbinfo])
-        test.tbinfo = tbinfo
-        return (ec, str_ev, tb)
-
diff --git a/lib/spack/external/nose/plugins/isolate.py b/lib/spack/external/nose/plugins/isolate.py
deleted file mode 100644
index 13235dfbd1..0000000000
--- a/lib/spack/external/nose/plugins/isolate.py
+++ /dev/null
@@ -1,103 +0,0 @@
-"""The isolation plugin resets the contents of sys.modules after running
-each test module or package. Use it by setting ``--with-isolation`` or the
-NOSE_WITH_ISOLATION environment variable.
-
-The effects are similar to wrapping the following functions around the
-import and execution of each test module::
-
-    def setup(module):
-        module._mods = sys.modules.copy()
-    
-    def teardown(module):
-        to_del = [ m for m in sys.modules.keys() if m not in
-                   module._mods ]
-        for mod in to_del:
-            del sys.modules[mod]
-        sys.modules.update(module._mods)
-
-Isolation works only during lazy loading. In normal use, this is only
-during discovery of modules within a directory, where the process of
-importing, loading tests and running tests from each module is
-encapsulated in a single loadTestsFromName call. This plugin
-implements loadTestsFromNames to force the same lazy-loading there,
-which allows isolation to work in directed mode as well as discovery,
-at the cost of some efficiency: lazy-loading names forces full context
-setup and teardown to run for each name, defeating the grouping that
-is normally used to ensure that context setup and teardown are run the
-fewest possible times for a given set of names.
-
-.. warning ::
-
-    This plugin should not be used in conjunction with other plugins
-    that assume that modules, once imported, will stay imported; for
-    instance, it may cause very odd results when used with the coverage
-    plugin.
-
-"""
-
-import logging
-import sys
-
-from nose.plugins import Plugin
-
-
-log = logging.getLogger('nose.plugins.isolation')
-
-class IsolationPlugin(Plugin):
-    """
-    Activate the isolation plugin to isolate changes to external
-    modules to a single test module or package. The isolation plugin
-    resets the contents of sys.modules after each test module or
-    package runs to its state before the test. PLEASE NOTE that this
-    plugin should not be used with the coverage plugin, or in any other case
-    where module reloading may produce undesirable side-effects.
-    """
-    score = 10 # I want to be last
-    name = 'isolation'
-
-    def configure(self, options, conf):
-        """Configure plugin.
-        """        
-        Plugin.configure(self, options, conf)
-        self._mod_stack = []
-
-    def beforeContext(self):
-        """Copy sys.modules onto my mod stack
-        """
-        mods = sys.modules.copy()
-        self._mod_stack.append(mods)
-
-    def afterContext(self):
-        """Pop my mod stack and restore sys.modules to the state
-        it was in when mod stack was pushed.
-        """
-        mods = self._mod_stack.pop()
-        to_del = [ m for m in sys.modules.keys() if m not in mods ]
-        if to_del:
-            log.debug('removing sys modules entries: %s', to_del)
-            for mod in to_del:
-                del sys.modules[mod]
-        sys.modules.update(mods)
-
-    def loadTestsFromNames(self, names, module=None):
-        """Create a lazy suite that calls beforeContext and afterContext
-        around each name. The side-effect of this is that full context
-        fixtures will be set up and torn down around each test named.
-        """
-        # Fast path for when we don't care
-        if not names or len(names) == 1:
-            return 
-        loader = self.loader
-        plugins = self.conf.plugins
-        def lazy():
-            for name in names:
-                plugins.beforeContext()
-                yield loader.loadTestsFromName(name, module=module)
-                plugins.afterContext()
-        return (loader.suiteClass(lazy), [])
-
-    def prepareTestLoader(self, loader):
-        """Get handle on test loader so we can use it in loadTestsFromNames.
-        """
-        self.loader = loader
-
diff --git a/lib/spack/external/nose/plugins/logcapture.py b/lib/spack/external/nose/plugins/logcapture.py
deleted file mode 100644
index 4c9a79f6fd..0000000000
--- a/lib/spack/external/nose/plugins/logcapture.py
+++ /dev/null
@@ -1,245 +0,0 @@
-"""
-This plugin captures logging statements issued during test execution. When an
-error or failure occurs, the captured log messages are attached to the running
-test in the test.capturedLogging attribute, and displayed with the error failure
-output. It is enabled by default but can be turned off with the option
-``--nologcapture``.
-
-You can filter captured logging statements with the ``--logging-filter`` option. 
-If set, it specifies which logger(s) will be captured; loggers that do not match
-will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
-will ensure that only statements logged via sqlalchemy.engine, myapp
-or myapp.foo.bar logger will be logged.
-
-You can remove other installed logging handlers with the
-``--logging-clear-handlers`` option.
-"""
-
-import logging
-from logging import Handler
-import threading
-
-from nose.plugins.base import Plugin
-from nose.util import anyp, ln, safe_str
-
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from StringIO import StringIO
-
-log = logging.getLogger(__name__)
-
-class FilterSet(object):
-    def __init__(self, filter_components):
-        self.inclusive, self.exclusive = self._partition(filter_components)
-
-    # @staticmethod
-    def _partition(components):
-        inclusive, exclusive = [], []
-        for component in components:
-            if component.startswith('-'):
-                exclusive.append(component[1:])
-            else:
-                inclusive.append(component)
-        return inclusive, exclusive
-    _partition = staticmethod(_partition)
-
-    def allow(self, record):
-        """returns whether this record should be printed"""
-        if not self:
-            # nothing to filter
-            return True
-        return self._allow(record) and not self._deny(record)
-
-    # @staticmethod
-    def _any_match(matchers, record):
-        """return the bool of whether `record` starts with
-        any item in `matchers`"""
-        def record_matches_key(key):
-            return record == key or record.startswith(key + '.')
-        return anyp(bool, map(record_matches_key, matchers))
-    _any_match = staticmethod(_any_match)
-
-    def _allow(self, record):
-        if not self.inclusive:
-            return True
-        return self._any_match(self.inclusive, record)
-
-    def _deny(self, record):
-        if not self.exclusive:
-            return False
-        return self._any_match(self.exclusive, record)
-
-
-class MyMemoryHandler(Handler):
-    def __init__(self, logformat, logdatefmt, filters):
-        Handler.__init__(self)
-        fmt = logging.Formatter(logformat, logdatefmt)
-        self.setFormatter(fmt)
-        self.filterset = FilterSet(filters)
-        self.buffer = []
-    def emit(self, record):
-        self.buffer.append(self.format(record))
-    def flush(self):
-        pass # do nothing
-    def truncate(self):
-        self.buffer = []
-    def filter(self, record):
-        if self.filterset.allow(record.name):
-            return Handler.filter(self, record)
-    def __getstate__(self):
-        state = self.__dict__.copy()
-        del state['lock']
-        return state
-    def __setstate__(self, state):
-        self.__dict__.update(state)
-        self.lock = threading.RLock()
-
-
-class LogCapture(Plugin):
-    """
-    Log capture plugin. Enabled by default. Disable with --nologcapture.
-    This plugin captures logging statements issued during test execution,
-    appending any output captured to the error or failure output,
-    should the test fail or raise an error.
-    """
-    enabled = True
-    env_opt = 'NOSE_NOLOGCAPTURE'
-    name = 'logcapture'
-    score = 500
-    logformat = '%(name)s: %(levelname)s: %(message)s'
-    logdatefmt = None
-    clear = False
-    filters = ['-nose']
-
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        parser.add_option(
-            "--nologcapture", action="store_false",
-            default=not env.get(self.env_opt), dest="logcapture",
-            help="Disable logging capture plugin. "
-                 "Logging configuration will be left intact."
-                 " [NOSE_NOLOGCAPTURE]")
-        parser.add_option(
-            "--logging-format", action="store", dest="logcapture_format",
-            default=env.get('NOSE_LOGFORMAT') or self.logformat,
-            metavar="FORMAT",
-            help="Specify custom format to print statements. "
-                 "Uses the same format as used by standard logging handlers."
-                 " [NOSE_LOGFORMAT]")
-        parser.add_option(
-            "--logging-datefmt", action="store", dest="logcapture_datefmt",
-            default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
-            metavar="FORMAT",
-            help="Specify custom date/time format to print statements. "
-                 "Uses the same format as used by standard logging handlers."
-                 " [NOSE_LOGDATEFMT]")
-        parser.add_option(
-            "--logging-filter", action="store", dest="logcapture_filters",
-            default=env.get('NOSE_LOGFILTER'),
-            metavar="FILTER",
-            help="Specify which statements to filter in/out. "
-                 "By default, everything is captured. If the output is too"
-                 " verbose,\nuse this option to filter out needless output.\n"
-                 "Example: filter=foo will capture statements issued ONLY to\n"
-                 " foo or foo.what.ever.sub but not foobar or other logger.\n"
-                 "Specify multiple loggers with comma: filter=foo,bar,baz.\n"
-                 "If any logger name is prefixed with a minus, eg filter=-foo,\n"
-                 "it will be excluded rather than included. Default: "
-                 "exclude logging messages from nose itself (-nose)."
-                 " [NOSE_LOGFILTER]\n")
-        parser.add_option(
-            "--logging-clear-handlers", action="store_true",
-            default=False, dest="logcapture_clear",
-            help="Clear all other logging handlers")
-        parser.add_option(
-            "--logging-level", action="store",
-            default='NOTSET', dest="logcapture_level",
-            help="Set the log level to capture")
-
-    def configure(self, options, conf):
-        """Configure plugin.
-        """
-        self.conf = conf
-        # Disable if explicitly disabled, or if logging is
-        # configured via logging config file
-        if not options.logcapture or conf.loggingConfig:
-            self.enabled = False
-        self.logformat = options.logcapture_format
-        self.logdatefmt = options.logcapture_datefmt
-        self.clear = options.logcapture_clear
-        self.loglevel = options.logcapture_level
-        if options.logcapture_filters:
-            self.filters = options.logcapture_filters.split(',')
-
-    def setupLoghandler(self):
-        # setup our handler with root logger
-        root_logger = logging.getLogger()
-        if self.clear:
-            if hasattr(root_logger, "handlers"):
-                for handler in root_logger.handlers:
-                    root_logger.removeHandler(handler)
-            for logger in logging.Logger.manager.loggerDict.values():
-                if hasattr(logger, "handlers"):
-                    for handler in logger.handlers:
-                        logger.removeHandler(handler)
-        # make sure there isn't one already
-        # you can't simply use "if self.handler not in root_logger.handlers"
-        # since at least in unit tests this doesn't work --
-        # LogCapture() is instantiated for each test case while root_logger
-        # is module global
-        # so we always add new MyMemoryHandler instance
-        for handler in root_logger.handlers[:]:
-            if isinstance(handler, MyMemoryHandler):
-                root_logger.handlers.remove(handler)
-        root_logger.addHandler(self.handler)
-        # to make sure everything gets captured
-        loglevel = getattr(self, "loglevel", "NOTSET")
-        root_logger.setLevel(getattr(logging, loglevel))
-
-    def begin(self):
-        """Set up logging handler before test run begins.
-        """
-        self.start()
-
-    def start(self):
-        self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
-                                       self.filters)
-        self.setupLoghandler()
-
-    def end(self):
-        pass
-
-    def beforeTest(self, test):
-        """Clear buffers and handlers before test.
-        """
-        self.setupLoghandler()
-
-    def afterTest(self, test):
-        """Clear buffers after test.
-        """
-        self.handler.truncate()
-
-    def formatFailure(self, test, err):
-        """Add captured log messages to failure output.
-        """
-        return self.formatError(test, err)
-
-    def formatError(self, test, err):
-        """Add captured log messages to error output.
-        """
-        # logic flow copied from Capture.formatError
-        test.capturedLogging = records = self.formatLogRecords()
-        if not records:
-            return err
-        ec, ev, tb = err
-        return (ec, self.addCaptureToErr(ev, records), tb)
-
-    def formatLogRecords(self):
-        return map(safe_str, self.handler.buffer)
-
-    def addCaptureToErr(self, ev, records):
-        return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
-                          records + \
-                          [ln('>> end captured logging <<')])
diff --git a/lib/spack/external/nose/plugins/manager.py b/lib/spack/external/nose/plugins/manager.py
deleted file mode 100644
index 4d2ed22b6f..0000000000
--- a/lib/spack/external/nose/plugins/manager.py
+++ /dev/null
@@ -1,460 +0,0 @@
-"""
-Plugin Manager
---------------
-
-A plugin manager class is used to load plugins, manage the list of
-loaded plugins, and proxy calls to those plugins.
-
-The plugin managers provided with nose are:
-
-:class:`PluginManager`
-    This manager doesn't implement loadPlugins, so it can only work
-    with a static list of plugins.
-
-:class:`BuiltinPluginManager`
-    This manager loads plugins referenced in ``nose.plugins.builtin``.
-
-:class:`EntryPointPluginManager`
-    This manager uses setuptools entrypoints to load plugins.
-
-:class:`ExtraPluginsPluginManager`
-    This manager loads extra plugins specified with the keyword
-    `addplugins`.
-
-:class:`DefaultPluginMananger`
-    This is the manager class that will be used by default. If
-    setuptools is installed, it is a subclass of
-    :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
-    otherwise, an alias to :class:`BuiltinPluginManager`.
-
-:class:`RestrictedPluginManager`
-    This manager is for use in test runs where some plugin calls are
-    not available, such as runs started with ``python setup.py test``,
-    where the test runner is the default unittest :class:`TextTestRunner`. It
-    is a subclass of :class:`DefaultPluginManager`.
-
-Writing a plugin manager
-========================
-
-If you want to load plugins via some other means, you can write a
-plugin manager and pass an instance of your plugin manager class when
-instantiating the :class:`nose.config.Config` instance that you pass to
-:class:`TestProgram` (or :func:`main` or :func:`run`).
-
-To implement your plugin loading scheme, implement ``loadPlugins()``,
-and in that method, call ``addPlugin()`` with an instance of each plugin
-you wish to make available. Make sure to call
-``super(self).loadPlugins()`` as well if have subclassed a manager
-other than ``PluginManager``.
-
-"""
-import inspect
-import logging
-import os
-import sys
-from itertools import chain as iterchain
-from warnings import warn
-import nose.config
-from nose.failure import Failure
-from nose.plugins.base import IPluginInterface
-from nose.pyversion import sort_list
-
-try:
-    import cPickle as pickle
-except:
-    import pickle
-try:
-    from cStringIO import StringIO
-except:
-    from StringIO import StringIO
-
-
-__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
-           'BuiltinPluginManager', 'RestrictedPluginManager']
-
-log = logging.getLogger(__name__)
-
-
-class PluginProxy(object):
-    """Proxy for plugin calls. Essentially a closure bound to the
-    given call and plugin list.
-
-    The plugin proxy also must be bound to a particular plugin
-    interface specification, so that it knows what calls are available
-    and any special handling that is required for each call.
-    """
-    interface = IPluginInterface
-    def __init__(self, call, plugins):
-        try:
-            self.method = getattr(self.interface, call)
-        except AttributeError:
-            raise AttributeError("%s is not a valid %s method"
-                                 % (call, self.interface.__name__))
-        self.call = self.makeCall(call)
-        self.plugins = []
-        for p in plugins:
-            self.addPlugin(p, call)
-
-    def __call__(self, *arg, **kw):
-        return self.call(*arg, **kw)
-
-    def addPlugin(self, plugin, call):
-        """Add plugin to my list of plugins to call, if it has the attribute
-        I'm bound to.
-        """
-        meth = getattr(plugin, call, None)
-        if meth is not None:
-            if call == 'loadTestsFromModule' and \
-                    len(inspect.getargspec(meth)[0]) == 2:
-                orig_meth = meth
-                meth = lambda module, path, **kwargs: orig_meth(module)
-            self.plugins.append((plugin, meth))
-
-    def makeCall(self, call):
-        if call == 'loadTestsFromNames':
-            # special case -- load tests from names behaves somewhat differently
-            # from other chainable calls, because plugins return a tuple, only
-            # part of which can be chained to the next plugin.
-            return self._loadTestsFromNames
-
-        meth = self.method
-        if getattr(meth, 'generative', False):
-            # call all plugins and yield a flattened iterator of their results
-            return lambda *arg, **kw: list(self.generate(*arg, **kw))
-        elif getattr(meth, 'chainable', False):
-            return self.chain
-        else:
-            # return a value from the first plugin that returns non-None
-            return self.simple
-
-    def chain(self, *arg, **kw):
-        """Call plugins in a chain, where the result of each plugin call is
-        sent to the next plugin as input. The final output result is returned.
-        """
-        result = None
-        # extract the static arguments (if any) from arg so they can
-        # be passed to each plugin call in the chain
-        static = [a for (static, a)
-                  in zip(getattr(self.method, 'static_args', []), arg)
-                  if static]
-        for p, meth in self.plugins:
-            result = meth(*arg, **kw)
-            arg = static[:]
-            arg.append(result)
-        return result
-
-    def generate(self, *arg, **kw):
-        """Call all plugins, yielding each item in each non-None result.
-        """
-        for p, meth in self.plugins:
-            result = None
-            try:
-                result = meth(*arg, **kw)
-                if result is not None:
-                    for r in result:
-                        yield r
-            except (KeyboardInterrupt, SystemExit):
-                raise
-            except:
-                exc = sys.exc_info()
-                yield Failure(*exc)
-                continue
-
-    def simple(self, *arg, **kw):
-        """Call all plugins, returning the first non-None result.
-        """
-        for p, meth in self.plugins:
-            result = meth(*arg, **kw)
-            if result is not None:
-                return result
-
-    def _loadTestsFromNames(self, names, module=None):
-        """Chainable but not quite normal. Plugins return a tuple of
-        (tests, names) after processing the names. The tests are added
-        to a suite that is accumulated throughout the full call, while
-        names are input for the next plugin in the chain.
-        """
-        suite = []
-        for p, meth in self.plugins:
-            result = meth(names, module=module)
-            if result is not None:
-                suite_part, names = result
-                if suite_part:
-                    suite.extend(suite_part)
-        return suite, names
-
-
-class NoPlugins(object):
-    """Null Plugin manager that has no plugins."""
-    interface = IPluginInterface
-    def __init__(self):
-        self._plugins = self.plugins = ()
-
-    def __iter__(self):
-        return ()
-
-    def _doNothing(self, *args, **kwds):
-        pass
-
-    def _emptyIterator(self, *args, **kwds):
-        return ()
-
-    def __getattr__(self, call):
-        method = getattr(self.interface, call)
-        if getattr(method, "generative", False):
-            return self._emptyIterator
-        else:
-            return self._doNothing
-
-    def addPlugin(self, plug):
-        raise NotImplementedError()
-
-    def addPlugins(self, plugins):
-        raise NotImplementedError()
-
-    def configure(self, options, config):
-        pass
-
-    def loadPlugins(self):
-        pass
-
-    def sort(self):
-        pass
-
-
-class PluginManager(object):
-    """Base class for plugin managers. PluginManager is intended to be
-    used only with a static list of plugins. The loadPlugins() implementation
-    only reloads plugins from _extraplugins to prevent those from being
-    overridden by a subclass.
-
-    The basic functionality of a plugin manager is to proxy all unknown
-    attributes through a ``PluginProxy`` to a list of plugins.
-
-    Note that the list of plugins *may not* be changed after the first plugin
-    call.
-    """
-    proxyClass = PluginProxy
-
-    def __init__(self, plugins=(), proxyClass=None):
-        self._plugins = []
-        self._extraplugins = ()
-        self._proxies = {}
-        if plugins:
-            self.addPlugins(plugins)
-        if proxyClass is not None:
-            self.proxyClass = proxyClass
-
-    def __getattr__(self, call):
-        try:
-            return self._proxies[call]
-        except KeyError:
-            proxy = self.proxyClass(call, self._plugins)
-            self._proxies[call] = proxy
-        return proxy
-
-    def __iter__(self):
-        return iter(self.plugins)
-
-    def addPlugin(self, plug):
-        # allow, for instance, plugins loaded via entry points to
-        # supplant builtin plugins.
-        new_name = getattr(plug, 'name', object())
-        self._plugins[:] = [p for p in self._plugins
-                            if getattr(p, 'name', None) != new_name]
-        self._plugins.append(plug)
-
-    def addPlugins(self, plugins=(), extraplugins=()):
-        """extraplugins are maintained in a separate list and
-        re-added by loadPlugins() to prevent their being overwritten
-        by plugins added by a subclass of PluginManager
-        """
-        self._extraplugins = extraplugins
-        for plug in iterchain(plugins, extraplugins):
-            self.addPlugin(plug)
-
-    def configure(self, options, config):
-        """Configure the set of plugins with the given options
-        and config instance. After configuration, disabled plugins
-        are removed from the plugins list.
-        """
-        log.debug("Configuring plugins")
-        self.config = config
-        cfg = PluginProxy('configure', self._plugins)
-        cfg(options, config)
-        enabled = [plug for plug in self._plugins if plug.enabled]
-        self.plugins = enabled
-        self.sort()
-        log.debug("Plugins enabled: %s", enabled)
-
-    def loadPlugins(self):
-        for plug in self._extraplugins:
-            self.addPlugin(plug)
-
-    def sort(self):
-        return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
-
-    def _get_plugins(self):
-        return self._plugins
-
-    def _set_plugins(self, plugins):
-        self._plugins = []
-        self.addPlugins(plugins)
-
-    plugins = property(_get_plugins, _set_plugins, None,
-                       """Access the list of plugins managed by
-                       this plugin manager""")
-
-
-class ZeroNinePlugin:
-    """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
-    """
-    def __init__(self, plugin):
-        self.plugin = plugin
-
-    def options(self, parser, env=os.environ):
-        self.plugin.add_options(parser, env)
-
-    def addError(self, test, err):
-        if not hasattr(self.plugin, 'addError'):
-            return
-        # switch off to addSkip, addDeprecated if those types
-        from nose.exc import SkipTest, DeprecatedTest
-        ec, ev, tb = err
-        if issubclass(ec, SkipTest):
-            if not hasattr(self.plugin, 'addSkip'):
-                return
-            return self.plugin.addSkip(test.test)
-        elif issubclass(ec, DeprecatedTest):
-            if not hasattr(self.plugin, 'addDeprecated'):
-                return
-            return self.plugin.addDeprecated(test.test)
-        # add capt
-        capt = test.capturedOutput
-        return self.plugin.addError(test.test, err, capt)
-
-    def loadTestsFromFile(self, filename):
-        if hasattr(self.plugin, 'loadTestsFromPath'):
-            return self.plugin.loadTestsFromPath(filename)
-
-    def addFailure(self, test, err):
-        if not hasattr(self.plugin, 'addFailure'):
-            return
-        # add capt and tbinfo
-        capt = test.capturedOutput
-        tbinfo = test.tbinfo
-        return self.plugin.addFailure(test.test, err, capt, tbinfo)
-
-    def addSuccess(self, test):
-        if not hasattr(self.plugin, 'addSuccess'):
-            return
-        capt = test.capturedOutput
-        self.plugin.addSuccess(test.test, capt)
-
-    def startTest(self, test):
-        if not hasattr(self.plugin, 'startTest'):
-            return
-        return self.plugin.startTest(test.test)
-
-    def stopTest(self, test):
-        if not hasattr(self.plugin, 'stopTest'):
-            return
-        return self.plugin.stopTest(test.test)
-
-    def __getattr__(self, val):
-        return getattr(self.plugin, val)
-
-
-class EntryPointPluginManager(PluginManager):
-    """Plugin manager that loads plugins from the `nose.plugins` and
-    `nose.plugins.0.10` entry points.
-    """
-    entry_points = (('nose.plugins.0.10', None),
-                    ('nose.plugins', ZeroNinePlugin))
-
-    def loadPlugins(self):
-        """Load plugins by iterating the `nose.plugins` entry point.
-        """
-        from pkg_resources import iter_entry_points
-        loaded = {}
-        for entry_point, adapt in self.entry_points:
-            for ep in iter_entry_points(entry_point):
-                if ep.name in loaded:
-                    continue
-                loaded[ep.name] = True
-                log.debug('%s load plugin %s', self.__class__.__name__, ep)
-                try:
-                    plugcls = ep.load()
-                except KeyboardInterrupt:
-                    raise
-                except Exception, e:
-                    # never want a plugin load to kill the test run
-                    # but we can't log here because the logger is not yet
-                    # configured
-                    warn("Unable to load plugin %s: %s" % (ep, e),
-                         RuntimeWarning)
-                    continue
-                if adapt:
-                    plug = adapt(plugcls())
-                else:
-                    plug = plugcls()
-                self.addPlugin(plug)
-        super(EntryPointPluginManager, self).loadPlugins()
-
-
-class BuiltinPluginManager(PluginManager):
-    """Plugin manager that loads plugins from the list in
-    `nose.plugins.builtin`.
-    """
-    def loadPlugins(self):
-        """Load plugins in nose.plugins.builtin
-        """
-        from nose.plugins import builtin
-        for plug in builtin.plugins:
-            self.addPlugin(plug())
-        super(BuiltinPluginManager, self).loadPlugins()
-
-try:
-    import pkg_resources
-    class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
-        pass
-
-except ImportError:
-    class DefaultPluginManager(BuiltinPluginManager):
-        pass
-
-class RestrictedPluginManager(DefaultPluginManager):
-    """Plugin manager that restricts the plugin list to those not
-    excluded by a list of exclude methods. Any plugin that implements
-    an excluded method will be removed from the manager's plugin list
-    after plugins are loaded.
-    """
-    def __init__(self, plugins=(), exclude=(), load=True):
-        DefaultPluginManager.__init__(self, plugins)
-        self.load = load
-        self.exclude = exclude
-        self.excluded = []
-        self._excludedOpts = None
-
-    def excludedOption(self, name):
-        if self._excludedOpts is None:
-            from optparse import OptionParser
-            self._excludedOpts = OptionParser(add_help_option=False)
-            for plugin in self.excluded:
-                plugin.options(self._excludedOpts, env={})
-        return self._excludedOpts.get_option('--' + name)
-
-    def loadPlugins(self):
-        if self.load:
-            DefaultPluginManager.loadPlugins(self)
-        allow = []
-        for plugin in self.plugins:
-            ok = True
-            for method in self.exclude:
-                if hasattr(plugin, method):
-                    ok = False
-                    self.excluded.append(plugin)
-                    break
-            if ok:
-                allow.append(plugin)
-        self.plugins = allow
diff --git a/lib/spack/external/nose/plugins/multiprocess.py b/lib/spack/external/nose/plugins/multiprocess.py
deleted file mode 100644
index 2cae744a11..0000000000
--- a/lib/spack/external/nose/plugins/multiprocess.py
+++ /dev/null
@@ -1,835 +0,0 @@
-"""
-Overview
-========
-
-The multiprocess plugin enables you to distribute your test run among a set of
-worker processes that run tests in parallel. This can speed up CPU-bound test
-runs (as long as the number of work processeses is around the number of
-processors or cores available), but is mainly useful for IO-bound tests that
-spend most of their time waiting for data to arrive from someplace else.
-
-.. note ::
-
-   See :doc:`../doc_tests/test_multiprocess/multiprocess` for
-   additional documentation and examples. Use of this plugin on python
-   2.5 or earlier requires the multiprocessing_ module, also available
-   from PyPI.
-
-.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
-
-How tests are distributed
-=========================
-
-The ideal case would be to dispatch each test to a worker process
-separately. This ideal is not attainable in all cases, however, because many
-test suites depend on context (class, module or package) fixtures.
-
-The plugin can't know (unless you tell it -- see below!) if a context fixture
-can be called many times concurrently (is re-entrant), or if it can be shared
-among tests running in different processes. Therefore, if a context has
-fixtures, the default behavior is to dispatch the entire suite to a worker as
-a unit.
-
-Controlling distribution
-^^^^^^^^^^^^^^^^^^^^^^^^
-
-There are two context-level variables that you can use to control this default
-behavior.
-
-If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
-in the context, and the plugin will dispatch tests in suites bound to that
-context as if the context had no fixtures. This means that the fixtures will
-execute concurrently and multiple times, typically once per test.
-
-If a context's fixtures can be shared by tests running in different processes
--- such as a package-level fixture that starts an external http server or
-initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
-the context. These fixtures will then execute in the primary nose process, and
-tests in those contexts will be individually dispatched to run in parallel.
-
-How results are collected and reported
-======================================
-
-As each test or suite executes in a worker process, results (failures, errors,
-and specially handled exceptions like SkipTest) are collected in that
-process. When the worker process finishes, it returns results to the main
-nose process. There, any progress output is printed (dots!), and the
-results from the test run are combined into a consolidated result
-set. When results have been received for all dispatched tests, or all
-workers have died, the result summary is output as normal.
-
-Beware!
-=======
-
-Not all test suites will benefit from, or even operate correctly using, this
-plugin. For example, CPU-bound tests will run more slowly if you don't have
-multiple processors. There are also some differences in plugin
-interactions and behaviors due to the way in which tests are dispatched and
-loaded. In general, test loading under this plugin operates as if it were
-always in directed mode instead of discovered mode. For instance, doctests
-in test modules will always be found when using this plugin with the doctest
-plugin.
-
-But the biggest issue you will face is probably concurrency. Unless you
-have kept your tests as religiously pure unit tests, with no side-effects, no
-ordering issues, and no external dependencies, chances are you will experience
-odd, intermittent and unexplainable failures and errors when using this
-plugin. This doesn't necessarily mean the plugin is broken; it may mean that
-your test suite is not safe for concurrency.
-
-New Features in 1.1.0
-=====================
-
-* functions generated by test generators are now added to the worker queue
-  making them multi-threaded.
-* fixed timeout functionality, now functions will be terminated with a
-  TimedOutException exception when they exceed their execution time. The
-  worker processes are not terminated.
-* added ``--process-restartworker`` option to restart workers once they are
-  done, this helps control memory usage. Sometimes memory leaks can accumulate
-  making long runs very difficult.
-* added global _instantiate_plugins to configure which plugins are started
-  on the worker processes.
-
-"""
-
-import logging
-import os
-import sys
-import time
-import traceback
-import unittest
-import pickle
-import signal
-import nose.case
-from nose.core import TextTestRunner
-from nose import failure
-from nose import loader
-from nose.plugins.base import Plugin
-from nose.pyversion import bytes_
-from nose.result import TextTestResult
-from nose.suite import ContextSuite
-from nose.util import test_address
-try:
-    # 2.7+
-    from unittest.runner import _WritelnDecorator
-except ImportError:
-    from unittest import _WritelnDecorator
-from Queue import Empty
-from warnings import warn
-try:
-    from cStringIO import StringIO
-except ImportError:
-    import StringIO
-
-# this is a list of plugin classes that will be checked for and created inside 
-# each worker process
-_instantiate_plugins = None
-
-log = logging.getLogger(__name__)
-
-Process = Queue = Pool = Event = Value = Array = None
-
-# have to inherit KeyboardInterrupt to it will interrupt process properly
-class TimedOutException(KeyboardInterrupt):
-    def __init__(self, value = "Timed Out"):
-        self.value = value
-    def __str__(self):
-        return repr(self.value)
-
-def _import_mp():
-    global Process, Queue, Pool, Event, Value, Array
-    try:
-        from multiprocessing import Manager, Process
-        #prevent the server process created in the manager which holds Python 
-        #objects and allows other processes to manipulate them using proxies
-        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
-        #channel between subprocesses and main process is still usable after
-        #ctrl+C is received in the main process.
-        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
-        m = Manager()
-        #reset it back so main process will receive a KeyboardInterrupt
-        #exception on ctrl+c
-        signal.signal(signal.SIGINT, old)
-        Queue, Pool, Event, Value, Array = (
-                m.Queue, m.Pool, m.Event, m.Value, m.Array
-        )
-    except ImportError:
-        warn("multiprocessing module is not available, multiprocess plugin "
-             "cannot be used", RuntimeWarning)
-
-
-class TestLet:
-    def __init__(self, case):
-        try:
-            self._id = case.id()
-        except AttributeError:
-            pass
-        self._short_description = case.shortDescription()
-        self._str = str(case)
-
-    def id(self):
-        return self._id
-
-    def shortDescription(self):
-        return self._short_description
-
-    def __str__(self):
-        return self._str
-
-class MultiProcess(Plugin):
-    """
-    Run tests in multiple processes. Requires processing module.
-    """
-    score = 1000
-    status = {}
-
-    def options(self, parser, env):
-        """
-        Register command-line options.
-        """
-        parser.add_option("--processes", action="store",
-                          default=env.get('NOSE_PROCESSES', 0),
-                          dest="multiprocess_workers",
-                          metavar="NUM",
-                          help="Spread test run among this many processes. "
-                          "Set a number equal to the number of processors "
-                          "or cores in your machine for best results. "
-                          "Pass a negative number to have the number of "
-                          "processes automatically set to the number of "
-                          "cores. Passing 0 means to disable parallel "
-                          "testing. Default is 0 unless NOSE_PROCESSES is "
-                          "set. "
-                          "[NOSE_PROCESSES]")
-        parser.add_option("--process-timeout", action="store",
-                          default=env.get('NOSE_PROCESS_TIMEOUT', 10),
-                          dest="multiprocess_timeout",
-                          metavar="SECONDS",
-                          help="Set timeout for return of results from each "
-                          "test runner process. Default is 10. "
-                          "[NOSE_PROCESS_TIMEOUT]")
-        parser.add_option("--process-restartworker", action="store_true",
-                          default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
-                          dest="multiprocess_restartworker",
-                          help="If set, will restart each worker process once"
-                          " their tests are done, this helps control memory "
-                          "leaks from killing the system. "
-                          "[NOSE_PROCESS_RESTARTWORKER]")
-
-    def configure(self, options, config):
-        """
-        Configure plugin.
-        """
-        try:
-            self.status.pop('active')
-        except KeyError:
-            pass
-        if not hasattr(options, 'multiprocess_workers'):
-            self.enabled = False
-            return
-        # don't start inside of a worker process
-        if config.worker:
-            return
-        self.config = config
-        try:
-            workers = int(options.multiprocess_workers)
-        except (TypeError, ValueError):
-            workers = 0
-        if workers:
-            _import_mp()
-            if Process is None:
-                self.enabled = False
-                return
-            # Negative number of workers will cause multiprocessing to hang.
-            # Set the number of workers to the CPU count to avoid this.
-            if workers < 0:
-                try:
-                    import multiprocessing
-                    workers = multiprocessing.cpu_count()
-                except NotImplementedError:
-                    self.enabled = False
-                    return
-            self.enabled = True
-            self.config.multiprocess_workers = workers
-            t = float(options.multiprocess_timeout)
-            self.config.multiprocess_timeout = t
-            r = int(options.multiprocess_restartworker)
-            self.config.multiprocess_restartworker = r
-            self.status['active'] = True
-
-    def prepareTestLoader(self, loader):
-        """Remember loader class so MultiProcessTestRunner can instantiate
-        the right loader.
-        """
-        self.loaderClass = loader.__class__
-
-    def prepareTestRunner(self, runner):
-        """Replace test runner with MultiProcessTestRunner.
-        """
-        # replace with our runner class
-        return MultiProcessTestRunner(stream=runner.stream,
-                                      verbosity=self.config.verbosity,
-                                      config=self.config,
-                                      loaderClass=self.loaderClass)
-
-def signalhandler(sig, frame):
-    raise TimedOutException()
-
-class MultiProcessTestRunner(TextTestRunner):
-    waitkilltime = 5.0 # max time to wait to terminate a process that does not
-                       # respond to SIGILL
-    def __init__(self, **kw):
-        self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
-        super(MultiProcessTestRunner, self).__init__(**kw)
-
-    def collect(self, test, testQueue, tasks, to_teardown, result):
-        # dispatch and collect results
-        # put indexes only on queue because tests aren't picklable
-        for case in self.nextBatch(test):
-            log.debug("Next batch %s (%s)", case, type(case))
-            if (isinstance(case, nose.case.Test) and
-                isinstance(case.test, failure.Failure)):
-                log.debug("Case is a Failure")
-                case(result) # run here to capture the failure
-                continue
-            # handle shared fixtures
-            if isinstance(case, ContextSuite) and case.context is failure.Failure:
-                log.debug("Case is a Failure")
-                case(result) # run here to capture the failure
-                continue
-            elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
-                log.debug("%s has shared fixtures", case)
-                try:
-                    case.setUp()
-                except (KeyboardInterrupt, SystemExit):
-                    raise
-                except:
-                    log.debug("%s setup failed", sys.exc_info())
-                    result.addError(case, sys.exc_info())
-                else:
-                    to_teardown.append(case)
-                    if case.factory:
-                        ancestors=case.factory.context.get(case, [])
-                        for an in ancestors[:2]:
-                            #log.debug('reset ancestor %s', an)
-                            if getattr(an, '_multiprocess_shared_', False):
-                                an._multiprocess_can_split_=True
-                            #an._multiprocess_shared_=False
-                    self.collect(case, testQueue, tasks, to_teardown, result)
-
-            else:
-                test_addr = self.addtask(testQueue,tasks,case)
-                log.debug("Queued test %s (%s) to %s",
-                          len(tasks), test_addr, testQueue)
-
-    def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
-        currentaddr = Value('c',bytes_(''))
-        currentstart = Value('d',time.time())
-        keyboardCaught = Event()
-        p = Process(target=runner,
-                   args=(iworker, testQueue,
-                         resultQueue,
-                         currentaddr,
-                         currentstart,
-                         keyboardCaught,
-                         shouldStop,
-                         self.loaderClass,
-                         result.__class__,
-                         pickle.dumps(self.config)))
-        p.currentaddr = currentaddr
-        p.currentstart = currentstart
-        p.keyboardCaught = keyboardCaught
-        old = signal.signal(signal.SIGILL, signalhandler)
-        p.start()
-        signal.signal(signal.SIGILL, old)
-        return p
-
-    def run(self, test):
-        """
-        Execute the test (which may be a test suite). If the test is a suite,
-        distribute it out among as many processes as have been configured, at
-        as fine a level as is possible given the context fixtures defined in
-        the suite or any sub-suites.
-
-        """
-        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
-        wrapper = self.config.plugins.prepareTest(test)
-        if wrapper is not None:
-            test = wrapper
-
-        # plugins can decorate or capture the output stream
-        wrapped = self.config.plugins.setOutputStream(self.stream)
-        if wrapped is not None:
-            self.stream = wrapped
-
-        testQueue = Queue()
-        resultQueue = Queue()
-        tasks = []
-        completed = []
-        workers = []
-        to_teardown = []
-        shouldStop = Event()
-
-        result = self._makeResult()
-        start = time.time()
-
-        self.collect(test, testQueue, tasks, to_teardown, result)
-
-        log.debug("Starting %s workers", self.config.multiprocess_workers)
-        for i in range(self.config.multiprocess_workers):
-            p = self.startProcess(i, testQueue, resultQueue, shouldStop, result)
-            workers.append(p)
-            log.debug("Started worker process %s", i+1)
-
-        total_tasks = len(tasks)
-        # need to keep track of the next time to check for timeouts in case
-        # more than one process times out at the same time.
-        nexttimeout=self.config.multiprocess_timeout
-        thrownError = None
-
-        try:
-            while tasks:
-                log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
-                          len(completed), total_tasks,nexttimeout)
-                try:
-                    iworker, addr, newtask_addrs, batch_result = resultQueue.get(
-                                                            timeout=nexttimeout)
-                    log.debug('Results received for worker %d, %s, new tasks: %d',
-                              iworker,addr,len(newtask_addrs))
-                    try:
-                        try:
-                            tasks.remove(addr)
-                        except ValueError:
-                            log.warn('worker %s failed to remove from tasks: %s',
-                                     iworker,addr)
-                        total_tasks += len(newtask_addrs)
-                        tasks.extend(newtask_addrs)
-                    except KeyError:
-                        log.debug("Got result for unknown task? %s", addr)
-                        log.debug("current: %s",str(list(tasks)[0]))
-                    else:
-                        completed.append([addr,batch_result])
-                    self.consolidate(result, batch_result)
-                    if (self.config.stopOnError
-                        and not result.wasSuccessful()):
-                        # set the stop condition
-                        shouldStop.set()
-                        break
-                    if self.config.multiprocess_restartworker:
-                        log.debug('joining worker %s',iworker)
-                        # wait for working, but not that important if worker
-                        # cannot be joined in fact, for workers that add to
-                        # testQueue, they will not terminate until all their
-                        # items are read
-                        workers[iworker].join(timeout=1)
-                        if not shouldStop.is_set() and not testQueue.empty():
-                            log.debug('starting new process on worker %s',iworker)
-                            workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
-                except Empty:
-                    log.debug("Timed out with %s tasks pending "
-                              "(empty testQueue=%r): %s",
-                              len(tasks),testQueue.empty(),str(tasks))
-                    any_alive = False
-                    for iworker, w in enumerate(workers):
-                        if w.is_alive():
-                            worker_addr = bytes_(w.currentaddr.value,'ascii')
-                            timeprocessing = time.time() - w.currentstart.value
-                            if ( len(worker_addr) == 0
-                                    and timeprocessing > self.config.multiprocess_timeout-0.1):
-                                log.debug('worker %d has finished its work item, '
-                                          'but is not exiting? do we wait for it?',
-                                          iworker)
-                            else:
-                                any_alive = True
-                            if (len(worker_addr) > 0
-                                and timeprocessing > self.config.multiprocess_timeout-0.1):
-                                log.debug('timed out worker %s: %s',
-                                          iworker,worker_addr)
-                                w.currentaddr.value = bytes_('')
-                                # If the process is in C++ code, sending a SIGILL
-                                # might not send a python KeybordInterrupt exception
-                                # therefore, send multiple signals until an
-                                # exception is caught. If this takes too long, then
-                                # terminate the process
-                                w.keyboardCaught.clear()
-                                startkilltime = time.time()
-                                while not w.keyboardCaught.is_set() and w.is_alive():
-                                    if time.time()-startkilltime > self.waitkilltime:
-                                        # have to terminate...
-                                        log.error("terminating worker %s",iworker)
-                                        w.terminate()
-                                        # there is a small probability that the
-                                        # terminated process might send a result,
-                                        # which has to be specially handled or
-                                        # else processes might get orphaned.
-                                        workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
-                                        break
-                                    os.kill(w.pid, signal.SIGILL)
-                                    time.sleep(0.1)
-                    if not any_alive and testQueue.empty():
-                        log.debug("All workers dead")
-                        break
-                nexttimeout=self.config.multiprocess_timeout
-                for w in workers:
-                    if w.is_alive() and len(w.currentaddr.value) > 0:
-                        timeprocessing = time.time()-w.currentstart.value
-                        if timeprocessing <= self.config.multiprocess_timeout:
-                            nexttimeout = min(nexttimeout,
-                                self.config.multiprocess_timeout-timeprocessing)
-            log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
-
-        except (KeyboardInterrupt, SystemExit), e:
-            log.info('parent received ctrl-c when waiting for test results')
-            thrownError = e
-            #resultQueue.get(False)
-                
-            result.addError(test, sys.exc_info())
-
-        try:
-            for case in to_teardown:
-                log.debug("Tearing down shared fixtures for %s", case)
-                try:
-                    case.tearDown()
-                except (KeyboardInterrupt, SystemExit):
-                    raise
-                except:
-                    result.addError(case, sys.exc_info())
-
-            stop = time.time()
-
-            # first write since can freeze on shutting down processes
-            result.printErrors()
-            result.printSummary(start, stop)
-            self.config.plugins.finalize(result)
-
-            if thrownError is None:
-                log.debug("Tell all workers to stop")
-                for w in workers:
-                    if w.is_alive():
-                        testQueue.put('STOP', block=False)
-
-            # wait for the workers to end
-            for iworker,worker in enumerate(workers):
-                if worker.is_alive():
-                    log.debug('joining worker %s',iworker)
-                    worker.join()
-                    if worker.is_alive():
-                        log.debug('failed to join worker %s',iworker)
-        except (KeyboardInterrupt, SystemExit):
-            log.info('parent received ctrl-c when shutting down: stop all processes')
-            for worker in workers:
-                if worker.is_alive():
-                    worker.terminate()
-
-            if thrownError: raise thrownError
-            else: raise
-
-        return result
-
-    def addtask(testQueue,tasks,case):
-        arg = None
-        if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
-            # this removes the top level descriptor and allows real function
-            # name to be returned
-            case.test.descriptor = None
-            arg = case.test.arg
-        test_addr = MultiProcessTestRunner.address(case)
-        testQueue.put((test_addr,arg), block=False)
-        if arg is not None:
-            test_addr += str(arg)
-        if tasks is not None:
-            tasks.append(test_addr)
-        return test_addr
-    addtask = staticmethod(addtask)
-
-    def address(case):
-        if hasattr(case, 'address'):
-            file, mod, call = case.address()
-        elif hasattr(case, 'context'):
-            file, mod, call = test_address(case.context)
-        else:
-            raise Exception("Unable to convert %s to address" % case)
-        parts = []
-        if file is None:
-            if mod is None:
-                raise Exception("Unaddressable case %s" % case)
-            else:
-                parts.append(mod)
-        else:
-            # strip __init__.py(c) from end of file part
-            # if present, having it there confuses loader
-            dirname, basename = os.path.split(file)
-            if basename.startswith('__init__'):
-                file = dirname
-            parts.append(file)
-        if call is not None:
-            parts.append(call)
-        return ':'.join(map(str, parts))
-    address = staticmethod(address)
-
-    def nextBatch(self, test):
-        # allows tests or suites to mark themselves as not safe
-        # for multiprocess execution
-        if hasattr(test, 'context'):
-            if not getattr(test.context, '_multiprocess_', True):
-                return
-
-        if ((isinstance(test, ContextSuite)
-             and test.hasFixtures(self.checkCanSplit))
-            or not getattr(test, 'can_split', True)
-            or not isinstance(test, unittest.TestSuite)):
-            # regular test case, or a suite with context fixtures
-
-            # special case: when run like nosetests path/to/module.py
-            # the top-level suite has only one item, and it shares
-            # the same context as that item. In that case, we want the
-            # item, not the top-level suite
-            if isinstance(test, ContextSuite):
-                contained = list(test)
-                if (len(contained) == 1
-                    and getattr(contained[0],
-                                'context', None) == test.context):
-                    test = contained[0]
-            yield test
-        else:
-            # Suite is without fixtures at this level; but it may have
-            # fixtures at any deeper level, so we need to examine it all
-            # the way down to the case level
-            for case in test:
-                for batch in self.nextBatch(case):
-                    yield batch
-
-    def checkCanSplit(context, fixt):
-        """
-        Callback that we use to check whether the fixtures found in a
-        context or ancestor are ones we care about.
-
-        Contexts can tell us that their fixtures are reentrant by setting
-        _multiprocess_can_split_. So if we see that, we return False to
-        disregard those fixtures.
-        """
-        if not fixt:
-            return False
-        if getattr(context, '_multiprocess_can_split_', False):
-            return False
-        return True
-    checkCanSplit = staticmethod(checkCanSplit)
-
-    def sharedFixtures(self, case):
-        context = getattr(case, 'context', None)
-        if not context:
-            return False
-        return getattr(context, '_multiprocess_shared_', False)
-
-    def consolidate(self, result, batch_result):
-        log.debug("batch result is %s" , batch_result)
-        try:
-            output, testsRun, failures, errors, errorClasses = batch_result
-        except ValueError:
-            log.debug("result in unexpected format %s", batch_result)
-            failure.Failure(*sys.exc_info())(result)
-            return
-        self.stream.write(output)
-        result.testsRun += testsRun
-        result.failures.extend(failures)
-        result.errors.extend(errors)
-        for key, (storage, label, isfail) in errorClasses.items():
-            if key not in result.errorClasses:
-                # Ordinarily storage is result attribute
-                # but it's only processed through the errorClasses
-                # dict, so it's ok to fake it here
-                result.errorClasses[key] = ([], label, isfail)
-            mystorage, _junk, _junk = result.errorClasses[key]
-            mystorage.extend(storage)
-        log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
-
-
-def runner(ix, testQueue, resultQueue, currentaddr, currentstart,
-           keyboardCaught, shouldStop, loaderClass, resultClass, config):
-    try:
-        try:
-            return __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
-                    keyboardCaught, shouldStop, loaderClass, resultClass, config)
-        except KeyboardInterrupt:
-            log.debug('Worker %s keyboard interrupt, stopping',ix)
-    except Empty:
-        log.debug("Worker %s timed out waiting for tasks", ix)
-
-def __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
-           keyboardCaught, shouldStop, loaderClass, resultClass, config):
-
-    config = pickle.loads(config)
-    dummy_parser = config.parserClass()
-    if _instantiate_plugins is not None:
-        for pluginclass in _instantiate_plugins:
-            plugin = pluginclass()
-            plugin.addOptions(dummy_parser,{})
-            config.plugins.addPlugin(plugin)
-    config.plugins.configure(config.options,config)
-    config.plugins.begin()
-    log.debug("Worker %s executing, pid=%d", ix,os.getpid())
-    loader = loaderClass(config=config)
-    loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
-
-    def get():
-        return testQueue.get(timeout=config.multiprocess_timeout)
-
-    def makeResult():
-        stream = _WritelnDecorator(StringIO())
-        result = resultClass(stream, descriptions=1,
-                             verbosity=config.verbosity,
-                             config=config)
-        plug_result = config.plugins.prepareTestResult(result)
-        if plug_result:
-            return plug_result
-        return result
-
-    def batch(result):
-        failures = [(TestLet(c), err) for c, err in result.failures]
-        errors = [(TestLet(c), err) for c, err in result.errors]
-        errorClasses = {}
-        for key, (storage, label, isfail) in result.errorClasses.items():
-            errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
-                                 label, isfail)
-        return (
-            result.stream.getvalue(),
-            result.testsRun,
-            failures,
-            errors,
-            errorClasses)
-    for test_addr, arg in iter(get, 'STOP'):
-        if shouldStop.is_set():
-            log.exception('Worker %d STOPPED',ix)
-            break
-        result = makeResult()
-        test = loader.loadTestsFromNames([test_addr])
-        test.testQueue = testQueue
-        test.tasks = []
-        test.arg = arg
-        log.debug("Worker %s Test is %s (%s)", ix, test_addr, test)
-        try:
-            if arg is not None:
-                test_addr = test_addr + str(arg)
-            currentaddr.value = bytes_(test_addr)
-            currentstart.value = time.time()
-            test(result)
-            currentaddr.value = bytes_('')
-            resultQueue.put((ix, test_addr, test.tasks, batch(result)))
-        except KeyboardInterrupt, e: #TimedOutException:
-            timeout = isinstance(e, TimedOutException)
-            if timeout:
-                keyboardCaught.set()
-            if len(currentaddr.value):
-                if timeout:
-                    msg = 'Worker %s timed out, failing current test %s'
-                else:
-                    msg = 'Worker %s keyboard interrupt, failing current test %s'
-                log.exception(msg,ix,test_addr)
-                currentaddr.value = bytes_('')
-                failure.Failure(*sys.exc_info())(result)
-                resultQueue.put((ix, test_addr, test.tasks, batch(result)))
-            else:
-                if timeout:
-                    msg = 'Worker %s test %s timed out'
-                else:
-                    msg = 'Worker %s test %s keyboard interrupt'
-                log.debug(msg,ix,test_addr)
-                resultQueue.put((ix, test_addr, test.tasks, batch(result)))
-            if not timeout:
-                raise
-        except SystemExit:
-            currentaddr.value = bytes_('')
-            log.exception('Worker %s system exit',ix)
-            raise
-        except:
-            currentaddr.value = bytes_('')
-            log.exception("Worker %s error running test or returning "
-                            "results",ix)
-            failure.Failure(*sys.exc_info())(result)
-            resultQueue.put((ix, test_addr, test.tasks, batch(result)))
-        if config.multiprocess_restartworker:
-            break
-    log.debug("Worker %s ending", ix)
-
-
-class NoSharedFixtureContextSuite(ContextSuite):
-    """
-    Context suite that never fires shared fixtures.
-
-    When a context sets _multiprocess_shared_, fixtures in that context
-    are executed by the main process. Using this suite class prevents them
-    from executing in the runner process as well.
-
-    """
-    testQueue = None
-    tasks = None
-    arg = None
-    def setupContext(self, context):
-        if getattr(context, '_multiprocess_shared_', False):
-            return
-        super(NoSharedFixtureContextSuite, self).setupContext(context)
-
-    def teardownContext(self, context):
-        if getattr(context, '_multiprocess_shared_', False):
-            return
-        super(NoSharedFixtureContextSuite, self).teardownContext(context)
-    def run(self, result):
-        """Run tests in suite inside of suite fixtures.
-        """
-        # proxy the result for myself
-        log.debug("suite %s (%s) run called, tests: %s",
-                  id(self), self, self._tests)
-        if self.resultProxy:
-            result, orig = self.resultProxy(result, self), result
-        else:
-            result, orig = result, result
-        try:
-            #log.debug('setUp for %s', id(self));
-            self.setUp()
-        except KeyboardInterrupt:
-            raise
-        except:
-            self.error_context = 'setup'
-            result.addError(self, self._exc_info())
-            return
-        try:
-            for test in self._tests:
-                if (isinstance(test,nose.case.Test)
-                    and self.arg is not None):
-                    test.test.arg = self.arg
-                else:
-                    test.arg = self.arg
-                test.testQueue = self.testQueue
-                test.tasks = self.tasks
-                if result.shouldStop:
-                    log.debug("stopping")
-                    break
-                # each nose.case.Test will create its own result proxy
-                # so the cases need the original result, to avoid proxy
-                # chains
-                #log.debug('running test %s in suite %s', test, self);
-                try:
-                    test(orig)
-                except KeyboardInterrupt, e:
-                    timeout = isinstance(e, TimedOutException)
-                    if timeout:
-                        msg = 'Timeout when running test %s in suite %s'
-                    else:
-                        msg = 'KeyboardInterrupt when running test %s in suite %s'
-                    log.debug(msg, test, self)
-                    err = (TimedOutException,TimedOutException(str(test)),
-                           sys.exc_info()[2])
-                    test.config.plugins.addError(test,err)
-                    orig.addError(test,err)
-                    if not timeout:
-                        raise
-        finally:
-            self.has_run = True
-            try:
-                #log.debug('tearDown for %s', id(self));
-                self.tearDown()
-            except KeyboardInterrupt:
-                raise
-            except:
-                self.error_context = 'teardown'
-                result.addError(self, self._exc_info())
diff --git a/lib/spack/external/nose/plugins/plugintest.py b/lib/spack/external/nose/plugins/plugintest.py
deleted file mode 100644
index 76d0d2c48c..0000000000
--- a/lib/spack/external/nose/plugins/plugintest.py
+++ /dev/null
@@ -1,416 +0,0 @@
-"""
-Testing Plugins
-===============
-
-The plugin interface is well-tested enough to safely unit test your
-use of its hooks with some level of confidence. However, there is also
-a mixin for unittest.TestCase called PluginTester that's designed to
-test plugins in their native runtime environment.
-
-Here's a simple example with a do-nothing plugin and a composed suite.
-
-    >>> import unittest
-    >>> from nose.plugins import Plugin, PluginTester
-    >>> class FooPlugin(Plugin):
-    ...     pass
-    >>> class TestPluginFoo(PluginTester, unittest.TestCase):
-    ...     activate = '--with-foo'
-    ...     plugins = [FooPlugin()]
-    ...     def test_foo(self):
-    ...         for line in self.output:
-    ...             # i.e. check for patterns
-    ...             pass
-    ...
-    ...         # or check for a line containing ...
-    ...         assert "ValueError" in self.output
-    ...     def makeSuite(self):
-    ...         class TC(unittest.TestCase):
-    ...             def runTest(self):
-    ...                 raise ValueError("I hate foo")
-    ...         return [TC('runTest')]
-    ...
-    >>> res = unittest.TestResult()
-    >>> case = TestPluginFoo('test_foo')
-    >>> _ = case(res)
-    >>> res.errors
-    []
-    >>> res.failures
-    []
-    >>> res.wasSuccessful()
-    True
-    >>> res.testsRun
-    1
-
-And here is a more complex example of testing a plugin that has extra
-arguments and reads environment variables.
-
-    >>> import unittest, os
-    >>> from nose.plugins import Plugin, PluginTester
-    >>> class FancyOutputter(Plugin):
-    ...     name = "fancy"
-    ...     def configure(self, options, conf):
-    ...         Plugin.configure(self, options, conf)
-    ...         if not self.enabled:
-    ...             return
-    ...         self.fanciness = 1
-    ...         if options.more_fancy:
-    ...             self.fanciness = 2
-    ...         if 'EVEN_FANCIER' in self.env:
-    ...             self.fanciness = 3
-    ...
-    ...     def options(self, parser, env=os.environ):
-    ...         self.env = env
-    ...         parser.add_option('--more-fancy', action='store_true')
-    ...         Plugin.options(self, parser, env=env)
-    ...
-    ...     def report(self, stream):
-    ...         stream.write("FANCY " * self.fanciness)
-    ...
-    >>> class TestFancyOutputter(PluginTester, unittest.TestCase):
-    ...     activate = '--with-fancy' # enables the plugin
-    ...     plugins = [FancyOutputter()]
-    ...     args = ['--more-fancy']
-    ...     env = {'EVEN_FANCIER': '1'}
-    ...
-    ...     def test_fancy_output(self):
-    ...         assert "FANCY FANCY FANCY" in self.output, (
-    ...                                         "got: %s" % self.output)
-    ...     def makeSuite(self):
-    ...         class TC(unittest.TestCase):
-    ...             def runTest(self):
-    ...                 raise ValueError("I hate fancy stuff")
-    ...         return [TC('runTest')]
-    ...
-    >>> res = unittest.TestResult()
-    >>> case = TestFancyOutputter('test_fancy_output')
-    >>> _ = case(res)
-    >>> res.errors
-    []
-    >>> res.failures
-    []
-    >>> res.wasSuccessful()
-    True
-    >>> res.testsRun
-    1
-
-"""
-
-import re
-import sys
-from warnings import warn
-
-try:
-    from cStringIO import StringIO
-except ImportError:
-    from StringIO import StringIO
-
-__all__ = ['PluginTester', 'run']
-
-from os import getpid
-class MultiProcessFile(object):
-    """
-    helper for testing multiprocessing
-
-    multiprocessing poses a problem for doctests, since the strategy
-    of replacing sys.stdout/stderr with file-like objects then
-    inspecting the results won't work: the child processes will
-    write to the objects, but the data will not be reflected
-    in the parent doctest-ing process.
-
-    The solution is to create file-like objects which will interact with
-    multiprocessing in a more desirable way.
-
-    All processes can write to this object, but only the creator can read.
-    This allows the testing system to see a unified picture of I/O.
-    """
-    def __init__(self):
-        # per advice at:
-        #    http://docs.python.org/library/multiprocessing.html#all-platforms
-        self.__master = getpid()
-        self.__queue = Manager().Queue()
-        self.__buffer = StringIO()
-        self.softspace = 0
-
-    def buffer(self):
-        if getpid() != self.__master:
-            return
-
-        from Queue import Empty
-        from collections import defaultdict
-        cache = defaultdict(str)
-        while True:
-            try:
-                pid, data = self.__queue.get_nowait()
-            except Empty:
-                break
-            if pid == ():
-                #show parent output after children
-                #this is what users see, usually
-                pid = ( 1e100, ) # googol!
-            cache[pid] += data
-        for pid in sorted(cache):
-            #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG
-            self.__buffer.write( cache[pid] )
-    def write(self, data):
-        # note that these pids are in the form of current_process()._identity
-        # rather than OS pids
-        from multiprocessing import current_process
-        pid = current_process()._identity
-        self.__queue.put((pid, data))
-    def __iter__(self):
-        "getattr doesn't work for iter()"
-        self.buffer()
-        return self.__buffer
-    def seek(self, offset, whence=0):
-        self.buffer()
-        return self.__buffer.seek(offset, whence)
-    def getvalue(self):
-        self.buffer()
-        return self.__buffer.getvalue()
-    def __getattr__(self, attr):
-        return getattr(self.__buffer, attr)
-
-try:
-    from multiprocessing import Manager
-    Buffer = MultiProcessFile
-except ImportError:
-    Buffer = StringIO
-
-class PluginTester(object):
-    """A mixin for testing nose plugins in their runtime environment.
-
-    Subclass this and mix in unittest.TestCase to run integration/functional
-    tests on your plugin.  When setUp() is called, the stub test suite is
-    executed with your plugin so that during an actual test you can inspect the
-    artifacts of how your plugin interacted with the stub test suite.
-
-    - activate
-
-      - the argument to send nosetests to activate the plugin
-
-    - suitepath
-
-      - if set, this is the path of the suite to test. Otherwise, you
-        will need to use the hook, makeSuite()
-
-    - plugins
-
-      - the list of plugins to make available during the run. Note
-        that this does not mean these plugins will be *enabled* during
-        the run -- only the plugins enabled by the activate argument
-        or other settings in argv or env will be enabled.
-
-    - args
-
-      - a list of arguments to add to the nosetests command, in addition to
-        the activate argument
-
-    - env
-
-      - optional dict of environment variables to send nosetests
-
-    """
-    activate = None
-    suitepath = None
-    args = None
-    env = {}
-    argv = None
-    plugins = []
-    ignoreFiles = None
-
-    def makeSuite(self):
-        """returns a suite object of tests to run (unittest.TestSuite())
-
-        If self.suitepath is None, this must be implemented. The returned suite
-        object will be executed with all plugins activated.  It may return
-        None.
-
-        Here is an example of a basic suite object you can return ::
-
-            >>> import unittest
-            >>> class SomeTest(unittest.TestCase):
-            ...     def runTest(self):
-            ...         raise ValueError("Now do something, plugin!")
-            ...
-            >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS
-            <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]>
-
-        """
-        raise NotImplementedError
-
-    def _execPlugin(self):
-        """execute the plugin on the internal test suite.
-        """
-        from nose.config import Config
-        from nose.core import TestProgram
-        from nose.plugins.manager import PluginManager
-
-        suite = None
-        stream = Buffer()
-        conf = Config(env=self.env,
-                      stream=stream,
-                      plugins=PluginManager(plugins=self.plugins))
-        if self.ignoreFiles is not None:
-            conf.ignoreFiles = self.ignoreFiles
-        if not self.suitepath:
-            suite = self.makeSuite()
-
-        self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
-                                exit=False)
-        self.output = AccessDecorator(stream)
-
-    def setUp(self):
-        """runs nosetests with the specified test suite, all plugins
-        activated.
-        """
-        self.argv = ['nosetests', self.activate]
-        if self.args:
-            self.argv.extend(self.args)
-        if self.suitepath:
-            self.argv.append(self.suitepath)
-
-        self._execPlugin()
-
-
-class AccessDecorator(object):
-    stream = None
-    _buf = None
-    def __init__(self, stream):
-        self.stream = stream
-        stream.seek(0)
-        self._buf = stream.read()
-        stream.seek(0)
-    def __contains__(self, val):
-        return val in self._buf
-    def __iter__(self):
-        return iter(self.stream)
-    def __str__(self):
-        return self._buf
-
-
-def blankline_separated_blocks(text):
-    "a bunch of === characters is also considered a blank line"
-    block = []
-    for line in text.splitlines(True):
-        block.append(line)
-        line = line.strip()
-        if not line or line.startswith('===') and not line.strip('='):
-            yield "".join(block)
-            block = []
-    if block:
-        yield "".join(block)
-
-
-def remove_stack_traces(out):
-    # this regexp taken from Python 2.5's doctest
-    traceback_re = re.compile(r"""
-        # Grab the traceback header.  Different versions of Python have
-        # said different things on the first traceback line.
-        ^(?P<hdr> Traceback\ \(
-            (?: most\ recent\ call\ last
-            |   innermost\ last
-            ) \) :
-        )
-        \s* $                   # toss trailing whitespace on the header.
-        (?P<stack> .*?)         # don't blink: absorb stuff until...
-        ^(?=\w)                 #     a line *starts* with alphanum.
-        .*?(?P<exception> \w+ ) # exception name
-        (?P<msg> [:\n] .*)      # the rest
-        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
-    blocks = []
-    for block in blankline_separated_blocks(out):
-        blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block))
-    return "".join(blocks)
-
-
-def simplify_warnings(out):
-    warn_re = re.compile(r"""
-        # Cut the file and line no, up to the warning name
-        ^.*:\d+:\s
-        (?P<category>\w+): \s+        # warning category
-        (?P<detail>.+) $ \n?          # warning message
-        ^ .* $                        # stack frame
-        """, re.VERBOSE | re.MULTILINE)
-    return warn_re.sub(r"\g<category>: \g<detail>", out)
-
-
-def remove_timings(out):
-    return re.sub(
-        r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out)
-
-
-def munge_nose_output_for_doctest(out):
-    """Modify nose output to make it easy to use in doctests."""
-    out = remove_stack_traces(out)
-    out = simplify_warnings(out)
-    out = remove_timings(out)
-    return out.strip()
-
-
-def run(*arg, **kw):
-    """
-    Specialized version of nose.run for use inside of doctests that
-    test test runs.
-
-    This version of run() prints the result output to stdout.  Before
-    printing, the output is processed by replacing the timing
-    information with an ellipsis (...), removing traceback stacks, and
-    removing trailing whitespace.
-
-    Use this version of run wherever you are writing a doctest that
-    tests nose (or unittest) test result output.
-
-    Note: do not use doctest: +ELLIPSIS when testing nose output,
-    since ellipses ("test_foo ... ok") in your expected test runner
-    output may match multiple lines of output, causing spurious test
-    passes!
-    """
-    from nose import run
-    from nose.config import Config
-    from nose.plugins.manager import PluginManager
-
-    buffer = Buffer()
-    if 'config' not in kw:
-        plugins = kw.pop('plugins', [])
-        if isinstance(plugins, list):
-            plugins = PluginManager(plugins=plugins)
-        env = kw.pop('env', {})
-        kw['config'] = Config(env=env, plugins=plugins)
-    if 'argv' not in kw:
-        kw['argv'] = ['nosetests', '-v']
-    kw['config'].stream = buffer
-
-    # Set up buffering so that all output goes to our buffer,
-    # or warn user if deprecated behavior is active. If this is not
-    # done, prints and warnings will either be out of place or
-    # disappear.
-    stderr = sys.stderr
-    stdout = sys.stdout
-    if kw.pop('buffer_all', False):
-        sys.stdout = sys.stderr = buffer
-        restore = True
-    else:
-        restore = False
-        warn("The behavior of nose.plugins.plugintest.run() will change in "
-             "the next release of nose. The current behavior does not "
-             "correctly account for output to stdout and stderr. To enable "
-             "correct behavior, use run_buffered() instead, or pass "
-             "the keyword argument buffer_all=True to run().",
-             DeprecationWarning, stacklevel=2)
-    try:
-        run(*arg, **kw)
-    finally:
-        if restore:
-            sys.stderr = stderr
-            sys.stdout = stdout
-    out = buffer.getvalue()
-    print munge_nose_output_for_doctest(out)
-
-
-def run_buffered(*arg, **kw):
-    kw['buffer_all'] = True
-    run(*arg, **kw)
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/prof.py b/lib/spack/external/nose/plugins/prof.py
deleted file mode 100644
index 4d304a934b..0000000000
--- a/lib/spack/external/nose/plugins/prof.py
+++ /dev/null
@@ -1,154 +0,0 @@
-"""This plugin will run tests using the hotshot profiler, which is part
-of the standard library. To turn it on, use the ``--with-profile`` option
-or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
-controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
-and the profiler output file may be changed with ``--profile-stats-file``.
-
-See the `hotshot documentation`_ in the standard library documentation for
-more details on the various output options.
-
-.. _hotshot documentation: http://docs.python.org/library/hotshot.html
-"""
-
-try:
-    import hotshot
-    from hotshot import stats
-except ImportError:
-    hotshot, stats = None, None
-import logging
-import os
-import sys
-import tempfile
-from nose.plugins.base import Plugin
-from nose.util import tolist
-
-log = logging.getLogger('nose.plugins')
-
-class Profile(Plugin):
-    """
-    Use this plugin to run tests using the hotshot profiler. 
-    """
-    pfile = None
-    clean_stats_file = False
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        if not self.available():
-            return
-        Plugin.options(self, parser, env)
-        parser.add_option('--profile-sort', action='store', dest='profile_sort',
-                          default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
-                          metavar="SORT",
-                          help="Set sort order for profiler output")
-        parser.add_option('--profile-stats-file', action='store',
-                          dest='profile_stats_file',
-                          metavar="FILE",
-                          default=env.get('NOSE_PROFILE_STATS_FILE'),
-                          help='Profiler stats file; default is a new '
-                          'temp file on each run')
-        parser.add_option('--profile-restrict', action='append',
-                          dest='profile_restrict',
-                          metavar="RESTRICT",
-                          default=env.get('NOSE_PROFILE_RESTRICT'),
-                          help="Restrict profiler output. See help for "
-                          "pstats.Stats for details")
-
-    def available(cls):
-        return hotshot is not None
-    available = classmethod(available)
-
-    def begin(self):
-        """Create profile stats file and load profiler.
-        """
-        if not self.available():
-            return
-        self._create_pfile()
-        self.prof = hotshot.Profile(self.pfile)
-
-    def configure(self, options, conf):
-        """Configure plugin.
-        """
-        if not self.available():
-            self.enabled = False
-            return
-        Plugin.configure(self, options, conf)
-        self.conf = conf
-        if options.profile_stats_file:
-            self.pfile = options.profile_stats_file
-            self.clean_stats_file = False
-        else:
-            self.pfile = None
-            self.clean_stats_file = True
-        self.fileno = None
-        self.sort = options.profile_sort
-        self.restrict = tolist(options.profile_restrict)
-
-    def prepareTest(self, test):
-        """Wrap entire test run in :func:`prof.runcall`.
-        """
-        if not self.available():
-            return
-        log.debug('preparing test %s' % test)
-        def run_and_profile(result, prof=self.prof, test=test):
-            self._create_pfile()
-            prof.runcall(test, result)
-        return run_and_profile
-
-    def report(self, stream):
-        """Output profiler report.
-        """
-        log.debug('printing profiler report')
-        self.prof.close()
-        prof_stats = stats.load(self.pfile)
-        prof_stats.sort_stats(self.sort)
-
-        # 2.5 has completely different stream handling from 2.4 and earlier.
-        # Before 2.5, stats objects have no stream attribute; in 2.5 and later
-        # a reference sys.stdout is stored before we can tweak it.
-        compat_25 = hasattr(prof_stats, 'stream')
-        if compat_25:
-            tmp = prof_stats.stream
-            prof_stats.stream = stream
-        else:
-            tmp = sys.stdout
-            sys.stdout = stream
-        try:
-            if self.restrict:
-                log.debug('setting profiler restriction to %s', self.restrict)
-                prof_stats.print_stats(*self.restrict)
-            else:
-                prof_stats.print_stats()
-        finally:
-            if compat_25:
-                prof_stats.stream = tmp
-            else:
-                sys.stdout = tmp
-
-    def finalize(self, result):
-        """Clean up stats file, if configured to do so.
-        """
-        if not self.available():
-            return
-        try:
-            self.prof.close()
-        except AttributeError:
-            # TODO: is this trying to catch just the case where not
-            # hasattr(self.prof, "close")?  If so, the function call should be
-            # moved out of the try: suite.
-            pass
-        if self.clean_stats_file:
-            if self.fileno:
-                try:
-                    os.close(self.fileno)
-                except OSError:
-                    pass
-            try:
-                os.unlink(self.pfile)
-            except OSError:
-                pass
-        return None
-
-    def _create_pfile(self):
-        if not self.pfile:
-            self.fileno, self.pfile = tempfile.mkstemp()
-            self.clean_stats_file = True
diff --git a/lib/spack/external/nose/plugins/skip.py b/lib/spack/external/nose/plugins/skip.py
deleted file mode 100644
index 9d1ac8f604..0000000000
--- a/lib/spack/external/nose/plugins/skip.py
+++ /dev/null
@@ -1,63 +0,0 @@
-"""
-This plugin installs a SKIP error class for the SkipTest exception.
-When SkipTest is raised, the exception will be logged in the skipped
-attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
-the exception will not be counted as an error or failure. This plugin
-is enabled by default but may be disabled with the ``--no-skip`` option.
-"""
-
-from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
-
-
-# on SkipTest:
-#  - unittest SkipTest is first preference, but it's only available
-#    for >= 2.7
-#  - unittest2 SkipTest is second preference for older pythons.  This
-#    mirrors logic for choosing SkipTest exception in testtools
-#  - if none of the above, provide custom class
-try:
-    from unittest.case import SkipTest
-except ImportError:
-    try:
-        from unittest2.case import SkipTest
-    except ImportError:
-        class SkipTest(Exception):
-            """Raise this exception to mark a test as skipped.
-            """
-            pass
-
-
-class Skip(ErrorClassPlugin):
-    """
-    Plugin that installs a SKIP error class for the SkipTest
-    exception.  When SkipTest is raised, the exception will be logged
-    in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
-    will be output, and the exception will not be counted as an error
-    or failure.
-    """
-    enabled = True
-    skipped = ErrorClass(SkipTest,
-                         label='SKIP',
-                         isfailure=False)
-
-    def options(self, parser, env):
-        """
-        Add my options to command line.
-        """
-        env_opt = 'NOSE_WITHOUT_SKIP'
-        parser.add_option('--no-skip', action='store_true',
-                          dest='noSkip', default=env.get(env_opt, False),
-                          help="Disable special handling of SkipTest "
-                          "exceptions.")
-
-    def configure(self, options, conf):
-        """
-        Configure plugin. Skip plugin is enabled by default.
-        """
-        if not self.can_configure:
-            return
-        self.conf = conf
-        disable = getattr(options, 'noSkip', False)
-        if disable:
-            self.enabled = False
-
diff --git a/lib/spack/external/nose/plugins/testid.py b/lib/spack/external/nose/plugins/testid.py
deleted file mode 100644
index ae8119bd01..0000000000
--- a/lib/spack/external/nose/plugins/testid.py
+++ /dev/null
@@ -1,311 +0,0 @@
-"""
-This plugin adds a test id (like #1) to each test name output. After
-you've run once to generate test ids, you can re-run individual
-tests by activating the plugin and passing the ids (with or
-without the # prefix) instead of test names.
-
-For example, if your normal test run looks like::
-
-  % nosetests -v
-  tests.test_a ... ok
-  tests.test_b ... ok
-  tests.test_c ... ok
-
-When adding ``--with-id`` you'll see::
-
-  % nosetests -v --with-id
-  #1 tests.test_a ... ok
-  #2 tests.test_b ... ok
-  #3 tests.test_c ... ok
-
-Then you can re-run individual tests by supplying just an id number::
-
-  % nosetests -v --with-id 2
-  #2 tests.test_b ... ok
-
-You can also pass multiple id numbers::
-
-  % nosetests -v --with-id 2 3
-  #2 tests.test_b ... ok
-  #3 tests.test_c ... ok
-  
-Since most shells consider '#' a special character, you can leave it out when
-specifying a test id.
-
-Note that when run without the -v switch, no special output is displayed, but
-the ids file is still written.
-
-Looping over failed tests
--------------------------
-
-This plugin also adds a mode that will direct the test runner to record
-failed tests. Subsequent test runs will then run only the tests that failed
-last time. Activate this mode with the ``--failed`` switch::
-
- % nosetests -v --failed
- #1 test.test_a ... ok
- #2 test.test_b ... ERROR
- #3 test.test_c ... FAILED
- #4 test.test_d ... ok
- 
-On the second run, only tests #2 and #3 will run::
-
- % nosetests -v --failed
- #2 test.test_b ... ERROR
- #3 test.test_c ... FAILED
-
-As you correct errors and tests pass, they'll drop out of subsequent runs.
-
-First::
-
- % nosetests -v --failed
- #2 test.test_b ... ok
- #3 test.test_c ... FAILED
-
-Second::
-
- % nosetests -v --failed
- #3 test.test_c ... FAILED
-
-When all tests pass, the full set will run on the next invocation.
-
-First::
-
- % nosetests -v --failed
- #3 test.test_c ... ok
-
-Second::
- 
- % nosetests -v --failed
- #1 test.test_a ... ok
- #2 test.test_b ... ok
- #3 test.test_c ... ok
- #4 test.test_d ... ok
-
-.. note ::
-
-  If you expect to use ``--failed`` regularly, it's a good idea to always run
-  using the ``--with-id`` option. This will ensure that an id file is always
-  created, allowing you to add ``--failed`` to the command line as soon as
-  you have failing tests. Otherwise, your first run using ``--failed`` will
-  (perhaps surprisingly) run *all* tests, because there won't be an id file
-  containing the record of failed tests from your previous run.
-  
-"""
-__test__ = False
-
-import logging
-import os
-from nose.plugins import Plugin
-from nose.util import src, set
-
-try:
-    from cPickle import dump, load
-except ImportError:
-    from pickle import dump, load
-
-log = logging.getLogger(__name__)
-
-
-class TestId(Plugin):
-    """
-    Activate to add a test id (like #1) to each test name output. Activate
-    with --failed to rerun failing tests only.
-    """
-    name = 'id'
-    idfile = None
-    collecting = True
-    loopOnFailed = False
-
-    def options(self, parser, env):
-        """Register commandline options.
-        """
-        Plugin.options(self, parser, env)
-        parser.add_option('--id-file', action='store', dest='testIdFile',
-                          default='.noseids', metavar="FILE",
-                          help="Store test ids found in test runs in this "
-                          "file. Default is the file .noseids in the "
-                          "working directory.")
-        parser.add_option('--failed', action='store_true',
-                          dest='failed', default=False,
-                          help="Run the tests that failed in the last "
-                          "test run.")
-
-    def configure(self, options, conf):
-        """Configure plugin.
-        """
-        Plugin.configure(self, options, conf)
-        if options.failed:
-            self.enabled = True
-            self.loopOnFailed = True
-            log.debug("Looping on failed tests")
-        self.idfile = os.path.expanduser(options.testIdFile)
-        if not os.path.isabs(self.idfile):
-            self.idfile = os.path.join(conf.workingDir, self.idfile)
-        self.id = 1
-        # Ids and tests are mirror images: ids are {id: test address} and
-        # tests are {test address: id}
-        self.ids = {}
-        self.tests = {}
-        self.failed = []
-        self.source_names = []
-        # used to track ids seen when tests is filled from
-        # loaded ids file
-        self._seen = {}
-        self._write_hashes = conf.verbosity >= 2
-
-    def finalize(self, result):
-        """Save new ids file, if needed.
-        """
-        if result.wasSuccessful():
-            self.failed = []
-        if self.collecting:
-            ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
-        else:
-            ids = self.ids
-        fh = open(self.idfile, 'wb')
-        dump({'ids': ids,
-              'failed': self.failed,
-              'source_names': self.source_names}, fh)
-        fh.close()
-        log.debug('Saved test ids: %s, failed %s to %s',
-                  ids, self.failed, self.idfile)
-
-    def loadTestsFromNames(self, names, module=None):
-        """Translate ids in the list of requested names into their
-        test addresses, if they are found in my dict of tests.
-        """
-        log.debug('ltfn %s %s', names, module)
-        try:
-            fh = open(self.idfile, 'rb')
-            data = load(fh)
-            if 'ids' in data:
-                self.ids = data['ids']
-                self.failed = data['failed']
-                self.source_names = data['source_names']
-            else:
-                # old ids field
-                self.ids = data
-                self.failed = []
-                self.source_names = names
-            if self.ids:
-                self.id = max(self.ids) + 1
-                self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
-            else:
-                self.id = 1
-            log.debug(
-                'Loaded test ids %s tests %s failed %s sources %s from %s',
-                self.ids, self.tests, self.failed, self.source_names,
-                self.idfile)
-            fh.close()
-        except ValueError, e:
-            # load() may throw a ValueError when reading the ids file, if it
-            # was generated with a newer version of Python than we are currently
-            # running.
-            log.debug('Error loading %s : %s', self.idfile, str(e))
-        except IOError:
-            log.debug('IO error reading %s', self.idfile)
-
-        if self.loopOnFailed and self.failed:
-            self.collecting = False
-            names = self.failed
-            self.failed = []
-        # I don't load any tests myself, only translate names like '#2'
-        # into the associated test addresses
-        translated = []
-        new_source = []
-        really_new = []
-        for name in names:
-            trans = self.tr(name)
-            if trans != name:
-                translated.append(trans)
-            else:
-                new_source.append(name)
-        # names that are not ids and that are not in the current
-        # list of source names go into the list for next time
-        if new_source:
-            new_set = set(new_source)
-            old_set = set(self.source_names)
-            log.debug("old: %s new: %s", old_set, new_set)
-            really_new = [s for s in new_source
-                          if not s in old_set]
-            if really_new:
-                # remember new sources
-                self.source_names.extend(really_new)
-            if not translated:
-                # new set of source names, no translations
-                # means "run the requested tests"
-                names = new_source
-        else:
-            # no new names to translate and add to id set
-            self.collecting = False
-        log.debug("translated: %s new sources %s names %s",
-                  translated, really_new, names)
-        return (None, translated + really_new or names)
-
-    def makeName(self, addr):
-        log.debug("Make name %s", addr)
-        filename, module, call = addr
-        if filename is not None:
-            head = src(filename)
-        else:
-            head = module
-        if call is not None:
-            return "%s:%s" % (head, call)
-        return head
-
-    def setOutputStream(self, stream):
-        """Get handle on output stream so the plugin can print id #s
-        """
-        self.stream = stream
-
-    def startTest(self, test):
-        """Maybe output an id # before the test name.
-
-        Example output::
-
-          #1 test.test ... ok
-          #2 test.test_two ... ok
-
-        """
-        adr = test.address()
-        log.debug('start test %s (%s)', adr, adr in self.tests)
-        if adr in self.tests:
-            if adr in self._seen:
-                self.write('   ')
-            else:
-                self.write('#%s ' % self.tests[adr])
-                self._seen[adr] = 1
-            return
-        self.tests[adr] = self.id
-        self.write('#%s ' % self.id)
-        self.id += 1
-
-    def afterTest(self, test):
-        # None means test never ran, False means failed/err
-        if test.passed is False:
-            try:
-                key = str(self.tests[test.address()])
-            except KeyError:
-                # never saw this test -- startTest didn't run
-                pass
-            else:
-                if key not in self.failed:
-                    self.failed.append(key)
-
-    def tr(self, name):
-        log.debug("tr '%s'", name)
-        try:
-            key = int(name.replace('#', ''))
-        except ValueError:
-            return name
-        log.debug("Got key %s", key)
-        # I'm running tests mapped from the ids file,
-        # not collecting new ones
-        if key in self.ids:
-            return self.makeName(self.ids[key])
-        return name
-
-    def write(self, output):
-        if self._write_hashes:
-            self.stream.write(output)
diff --git a/lib/spack/external/nose/plugins/xunit.py b/lib/spack/external/nose/plugins/xunit.py
deleted file mode 100644
index 90b52f5f61..0000000000
--- a/lib/spack/external/nose/plugins/xunit.py
+++ /dev/null
@@ -1,341 +0,0 @@
-"""This plugin provides test results in the standard XUnit XML format.
-
-It's designed for the `Jenkins`_ (previously Hudson) continuous build
-system, but will probably work for anything else that understands an
-XUnit-formatted XML representation of test results.
-
-Add this shell command to your builder ::
-
-    nosetests --with-xunit
-
-And by default a file named nosetests.xml will be written to the
-working directory.
-
-In a Jenkins builder, tick the box named "Publish JUnit test result report"
-under the Post-build Actions and enter this value for Test report XMLs::
-
-    **/nosetests.xml
-
-If you need to change the name or location of the file, you can set the
-``--xunit-file`` option.
-
-If you need to change the name of the test suite, you can set the
-``--xunit-testsuite-name`` option.
-
-Here is an abbreviated version of what an XML test report might look like::
-
-    <?xml version="1.0" encoding="UTF-8"?>
-    <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
-        <testcase classname="path_to_test_suite.TestSomething"
-                  name="test_it" time="0">
-            <error type="exceptions.TypeError" message="oops, wrong type">
-            Traceback (most recent call last):
-            ...
-            TypeError: oops, wrong type
-            </error>
-        </testcase>
-    </testsuite>
-
-.. _Jenkins: http://jenkins-ci.org/
-
-"""
-import codecs
-import doctest
-import os
-import sys
-import traceback
-import re
-import inspect
-from StringIO import StringIO
-from time import time
-from xml.sax import saxutils
-
-from nose.plugins.base import Plugin
-from nose.exc import SkipTest
-from nose.pyversion import force_unicode, format_exception
-
-# Invalid XML characters, control characters 0-31 sans \t, \n and \r
-CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
-
-TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
-
-def xml_safe(value):
-    """Replaces invalid XML characters with '?'."""
-    return CONTROL_CHARACTERS.sub('?', value)
-
-def escape_cdata(cdata):
-    """Escape a string for an XML CDATA section."""
-    return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
-
-def id_split(idval):
-    m = TEST_ID.match(idval)
-    if m:
-        name, fargs = m.groups()
-        head, tail = name.rsplit(".", 1)
-        return [head, tail+fargs]
-    else:
-        return idval.rsplit(".", 1)
-
-def nice_classname(obj):
-    """Returns a nice name for class object or class instance.
-
-        >>> nice_classname(Exception()) # doctest: +ELLIPSIS
-        '...Exception'
-        >>> nice_classname(Exception) # doctest: +ELLIPSIS
-        '...Exception'
-
-    """
-    if inspect.isclass(obj):
-        cls_name = obj.__name__
-    else:
-        cls_name = obj.__class__.__name__
-    mod = inspect.getmodule(obj)
-    if mod:
-        name = mod.__name__
-        # jython
-        if name.startswith('org.python.core.'):
-            name = name[len('org.python.core.'):]
-        return "%s.%s" % (name, cls_name)
-    else:
-        return cls_name
-
-def exc_message(exc_info):
-    """Return the exception's message."""
-    exc = exc_info[1]
-    if exc is None:
-        # str exception
-        result = exc_info[0]
-    else:
-        try:
-            result = str(exc)
-        except UnicodeEncodeError:
-            try:
-                result = unicode(exc)
-            except UnicodeError:
-                # Fallback to args as neither str nor
-                # unicode(Exception(u'\xe6')) work in Python < 2.6
-                result = exc.args[0]
-    result = force_unicode(result, 'UTF-8')
-    return xml_safe(result)
-
-class Tee(object):
-    def __init__(self, encoding, *args):
-        self._encoding = encoding
-        self._streams = args
-
-    def write(self, data):
-        data = force_unicode(data, self._encoding)
-        for s in self._streams:
-            s.write(data)
-
-    def writelines(self, lines):
-        for line in lines:
-            self.write(line)
-
-    def flush(self):
-        for s in self._streams:
-            s.flush()
-
-    def isatty(self):
-        return False
-
-
-class Xunit(Plugin):
-    """This plugin provides test results in the standard XUnit XML format."""
-    name = 'xunit'
-    score = 1500
-    encoding = 'UTF-8'
-    error_report_file = None
-
-    def __init__(self):
-        super(Xunit, self).__init__()
-        self._capture_stack = []
-        self._currentStdout = None
-        self._currentStderr = None
-
-    def _timeTaken(self):
-        if hasattr(self, '_timer'):
-            taken = time() - self._timer
-        else:
-            # test died before it ran (probably error in setup())
-            # or success/failure added before test started probably
-            # due to custom TestResult munging
-            taken = 0.0
-        return taken
-
-    def _quoteattr(self, attr):
-        """Escape an XML attribute. Value can be unicode."""
-        attr = xml_safe(attr)
-        return saxutils.quoteattr(attr)
-
-    def options(self, parser, env):
-        """Sets additional command line options."""
-        Plugin.options(self, parser, env)
-        parser.add_option(
-            '--xunit-file', action='store',
-            dest='xunit_file', metavar="FILE",
-            default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
-            help=("Path to xml file to store the xunit report in. "
-                  "Default is nosetests.xml in the working directory "
-                  "[NOSE_XUNIT_FILE]"))
-
-        parser.add_option(
-            '--xunit-testsuite-name', action='store',
-            dest='xunit_testsuite_name', metavar="PACKAGE",
-            default=env.get('NOSE_XUNIT_TESTSUITE_NAME', 'nosetests'),
-            help=("Name of the testsuite in the xunit xml, generated by plugin. "
-                  "Default test suite name is nosetests."))
-
-    def configure(self, options, config):
-        """Configures the xunit plugin."""
-        Plugin.configure(self, options, config)
-        self.config = config
-        if self.enabled:
-            self.stats = {'errors': 0,
-                          'failures': 0,
-                          'passes': 0,
-                          'skipped': 0
-                          }
-            self.errorlist = []
-            self.error_report_file_name = os.path.realpath(options.xunit_file)
-            self.xunit_testsuite_name = options.xunit_testsuite_name
-
-    def report(self, stream):
-        """Writes an Xunit-formatted XML file
-
-        The file includes a report of test errors and failures.
-
-        """
-        self.error_report_file = codecs.open(self.error_report_file_name, 'w',
-                                             self.encoding, 'replace')
-        self.stats['encoding'] = self.encoding
-        self.stats['testsuite_name'] = self.xunit_testsuite_name
-        self.stats['total'] = (self.stats['errors'] + self.stats['failures']
-                               + self.stats['passes'] + self.stats['skipped'])
-        self.error_report_file.write(
-            u'<?xml version="1.0" encoding="%(encoding)s"?>'
-            u'<testsuite name="%(testsuite_name)s" tests="%(total)d" '
-            u'errors="%(errors)d" failures="%(failures)d" '
-            u'skip="%(skipped)d">' % self.stats)
-        self.error_report_file.write(u''.join([force_unicode(e, self.encoding)
-                                               for e in self.errorlist]))
-        self.error_report_file.write(u'</testsuite>')
-        self.error_report_file.close()
-        if self.config.verbosity > 1:
-            stream.writeln("-" * 70)
-            stream.writeln("XML: %s" % self.error_report_file.name)
-
-    def _startCapture(self):
-        self._capture_stack.append((sys.stdout, sys.stderr))
-        self._currentStdout = StringIO()
-        self._currentStderr = StringIO()
-        sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
-        sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
-
-    def startContext(self, context):
-        self._startCapture()
-
-    def stopContext(self, context):
-        self._endCapture()
-
-    def beforeTest(self, test):
-        """Initializes a timer before starting a test."""
-        self._timer = time()
-        self._startCapture()
-
-    def _endCapture(self):
-        if self._capture_stack:
-            sys.stdout, sys.stderr = self._capture_stack.pop()
-
-    def afterTest(self, test):
-        self._endCapture()
-        self._currentStdout = None
-        self._currentStderr = None
-
-    def finalize(self, test):
-        while self._capture_stack:
-            self._endCapture()
-
-    def _getCapturedStdout(self):
-        if self._currentStdout:
-            value = self._currentStdout.getvalue()
-            if value:
-                return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
-                        value)
-        return ''
-
-    def _getCapturedStderr(self):
-        if self._currentStderr:
-            value = self._currentStderr.getvalue()
-            if value:
-                return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
-                        value)
-        return ''
-
-    def addError(self, test, err, capt=None):
-        """Add error output to Xunit report.
-        """
-        taken = self._timeTaken()
-
-        if issubclass(err[0], SkipTest):
-            type = 'skipped'
-            self.stats['skipped'] += 1
-        else:
-            type = 'error'
-            self.stats['errors'] += 1
-
-        tb = format_exception(err, self.encoding)
-        id = test.id()
-
-        self.errorlist.append(
-            u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
-            u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
-            u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
-            {'cls': self._quoteattr(id_split(id)[0]),
-             'name': self._quoteattr(id_split(id)[-1]),
-             'taken': taken,
-             'type': type,
-             'errtype': self._quoteattr(nice_classname(err[0])),
-             'message': self._quoteattr(exc_message(err)),
-             'tb': escape_cdata(tb),
-             'systemout': self._getCapturedStdout(),
-             'systemerr': self._getCapturedStderr(),
-             })
-
-    def addFailure(self, test, err, capt=None, tb_info=None):
-        """Add failure output to Xunit report.
-        """
-        taken = self._timeTaken()
-        tb = format_exception(err, self.encoding)
-        self.stats['failures'] += 1
-        id = test.id()
-
-        self.errorlist.append(
-            u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
-            u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
-            u'</failure>%(systemout)s%(systemerr)s</testcase>' %
-            {'cls': self._quoteattr(id_split(id)[0]),
-             'name': self._quoteattr(id_split(id)[-1]),
-             'taken': taken,
-             'errtype': self._quoteattr(nice_classname(err[0])),
-             'message': self._quoteattr(exc_message(err)),
-             'tb': escape_cdata(tb),
-             'systemout': self._getCapturedStdout(),
-             'systemerr': self._getCapturedStderr(),
-             })
-
-    def addSuccess(self, test, capt=None):
-        """Add success output to Xunit report.
-        """
-        taken = self._timeTaken()
-        self.stats['passes'] += 1
-        id = test.id()
-        self.errorlist.append(
-            '<testcase classname=%(cls)s name=%(name)s '
-            'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
-            {'cls': self._quoteattr(id_split(id)[0]),
-             'name': self._quoteattr(id_split(id)[-1]),
-             'taken': taken,
-             'systemout': self._getCapturedStdout(),
-             'systemerr': self._getCapturedStderr(),
-             })
diff --git a/lib/spack/external/nose/proxy.py b/lib/spack/external/nose/proxy.py
deleted file mode 100644
index c2676cb195..0000000000
--- a/lib/spack/external/nose/proxy.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""
-Result Proxy
-------------
-
-The result proxy wraps the result instance given to each test. It
-performs two functions: enabling extended error/failure reporting
-and calling plugins.
-
-As each result event is fired, plugins are called with the same event;
-however, plugins are called with the nose.case.Test instance that
-wraps the actual test. So when a test fails and calls
-result.addFailure(self, err), the result proxy calls
-addFailure(self.test, err) for each plugin. This allows plugins to
-have a single stable interface for all test types, and also to
-manipulate the test object itself by setting the `test` attribute of
-the nose.case.Test that they receive.
-"""
-import logging
-from nose.config import Config
-
-
-log = logging.getLogger(__name__)
-
-
-def proxied_attribute(local_attr, proxied_attr, doc):
-    """Create a property that proxies attribute ``proxied_attr`` through
-    the local attribute ``local_attr``.
-    """
-    def fget(self):
-        return getattr(getattr(self, local_attr), proxied_attr)
-    def fset(self, value):
-        setattr(getattr(self, local_attr), proxied_attr, value)
-    def fdel(self):
-        delattr(getattr(self, local_attr), proxied_attr)
-    return property(fget, fset, fdel, doc)
-
-
-class ResultProxyFactory(object):
-    """Factory for result proxies. Generates a ResultProxy bound to each test
-    and the result passed to the test.
-    """
-    def __init__(self, config=None):
-        if config is None:
-            config = Config()
-        self.config = config
-        self.__prepared = False
-        self.__result = None
-
-    def __call__(self, result, test):
-        """Return a ResultProxy for the current test.
-
-        On first call, plugins are given a chance to replace the
-        result used for the remaining tests. If a plugin returns a
-        value from prepareTestResult, that object will be used as the
-        result for all tests.
-        """
-        if not self.__prepared:
-            self.__prepared = True
-            plug_result = self.config.plugins.prepareTestResult(result)
-            if plug_result is not None:
-                self.__result = result = plug_result
-        if self.__result is not None:
-            result = self.__result
-        return ResultProxy(result, test, config=self.config)
-
-
-class ResultProxy(object):
-    """Proxy to TestResults (or other results handler).
-
-    One ResultProxy is created for each nose.case.Test. The result
-    proxy calls plugins with the nose.case.Test instance (instead of
-    the wrapped test case) as each result call is made. Finally, the
-    real result method is called, also with the nose.case.Test
-    instance as the test parameter.
-
-    """
-    def __init__(self, result, test, config=None):
-        if config is None:
-            config = Config()
-        self.config = config
-        self.plugins = config.plugins
-        self.result = result
-        self.test = test
-
-    def __repr__(self):
-        return repr(self.result)
-
-    def _prepareErr(self, err):
-        if not isinstance(err[1], Exception) and isinstance(err[0], type):
-            # Turn value back into an Exception (required in Python 3.x).
-            # Plugins do all sorts of crazy things with exception values.
-            # Convert it to a custom subclass of Exception with the same
-            # name as the actual exception to make it print correctly.
-            value = type(err[0].__name__, (Exception,), {})(err[1])
-            err = (err[0], value, err[2])
-        return err
-
-    def assertMyTest(self, test):
-        # The test I was called with must be my .test or my
-        # .test's .test. or my .test.test's .case
-
-        case = getattr(self.test, 'test', None)
-        assert (test is self.test
-                or test is case
-                or test is getattr(case, '_nose_case', None)), (
-                "ResultProxy for %r (%s) was called with test %r (%s)"
-                % (self.test, id(self.test), test, id(test)))
-
-    def afterTest(self, test):
-        self.assertMyTest(test)
-        self.plugins.afterTest(self.test)
-        if hasattr(self.result, "afterTest"):
-            self.result.afterTest(self.test)
-
-    def beforeTest(self, test):
-        self.assertMyTest(test)
-        self.plugins.beforeTest(self.test)
-        if hasattr(self.result, "beforeTest"):
-            self.result.beforeTest(self.test)
-
-    def addError(self, test, err):
-        self.assertMyTest(test)
-        plugins = self.plugins
-        plugin_handled = plugins.handleError(self.test, err)
-        if plugin_handled:
-            return
-        # test.passed is set in result, to account for error classes
-        formatted = plugins.formatError(self.test, err)
-        if formatted is not None:
-            err = formatted
-        plugins.addError(self.test, err)
-        self.result.addError(self.test, self._prepareErr(err))
-        if not self.result.wasSuccessful() and self.config.stopOnError:
-            self.shouldStop = True
-
-    def addFailure(self, test, err):
-        self.assertMyTest(test)
-        plugins = self.plugins
-        plugin_handled = plugins.handleFailure(self.test, err)
-        if plugin_handled:
-            return
-        self.test.passed = False
-        formatted = plugins.formatFailure(self.test, err)
-        if formatted is not None:
-            err = formatted
-        plugins.addFailure(self.test, err)
-        self.result.addFailure(self.test, self._prepareErr(err))
-        if self.config.stopOnError:
-            self.shouldStop = True
-
-    def addSkip(self, test, reason):
-        # 2.7 compat shim
-        from nose.plugins.skip import SkipTest
-        self.assertMyTest(test)
-        plugins = self.plugins
-        if not isinstance(reason, Exception):
-            # for Python 3.2+
-            reason = Exception(reason)
-        plugins.addError(self.test, (SkipTest, reason, None))
-        self.result.addSkip(self.test, reason)
-
-    def addSuccess(self, test):
-        self.assertMyTest(test)
-        self.plugins.addSuccess(self.test)
-        self.result.addSuccess(self.test)
-
-    def startTest(self, test):
-        self.assertMyTest(test)
-        self.plugins.startTest(self.test)
-        self.result.startTest(self.test)
-
-    def stop(self):
-        self.result.stop()
-
-    def stopTest(self, test):
-        self.assertMyTest(test)
-        self.plugins.stopTest(self.test)
-        self.result.stopTest(self.test)
-
-    # proxied attributes
-    shouldStop = proxied_attribute('result', 'shouldStop',
-                                    """Should the test run stop?""")
-    errors = proxied_attribute('result', 'errors',
-                               """Tests that raised an exception""")
-    failures = proxied_attribute('result', 'failures',
-                                 """Tests that failed""")
-    testsRun = proxied_attribute('result', 'testsRun',
-                                 """Number of tests run""")
diff --git a/lib/spack/external/nose/pyversion.py b/lib/spack/external/nose/pyversion.py
deleted file mode 100644
index 091238da75..0000000000
--- a/lib/spack/external/nose/pyversion.py
+++ /dev/null
@@ -1,215 +0,0 @@
-"""
-This module contains fixups for using nose under different versions of Python.
-"""
-import sys
-import os
-import traceback
-import types
-import inspect
-import nose.util
-
-__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType',
-           'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod',
-           'bytes_', 'is_base_exception', 'force_unicode', 'exc_to_unicode',
-           'format_exception']
-
-# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x
-# source will be replaced with 'str()' when running 2to3, so this test will
-# then become true)
-UNICODE_STRINGS = (type(unicode()) == type(str()))
-
-if sys.version_info[:2] < (3, 0):
-    def force_unicode(s, encoding='UTF-8'):
-        try:
-            s = unicode(s)
-        except UnicodeDecodeError:
-            s = str(s).decode(encoding, 'replace')
-
-        return s
-else:
-    def force_unicode(s, encoding='UTF-8'):
-        return str(s)
-
-# new.instancemethod() is obsolete for new-style classes (Python 3.x)
-# We need to use descriptor methods instead.
-try:
-    import new
-    def make_instancemethod(function, instance):
-        return new.instancemethod(function.im_func, instance,
-                                  instance.__class__)
-except ImportError:
-    def make_instancemethod(function, instance):
-        return function.__get__(instance, instance.__class__)
-
-# To be forward-compatible, we do all list sorts using keys instead of cmp
-# functions.  However, part of the unittest.TestLoader API involves a
-# user-provideable cmp function, so we need some way to convert that.
-def cmp_to_key(mycmp):
-    'Convert a cmp= function into a key= function'
-    class Key(object):
-        def __init__(self, obj):
-            self.obj = obj
-        def __lt__(self, other):
-            return mycmp(self.obj, other.obj) < 0
-        def __gt__(self, other):
-            return mycmp(self.obj, other.obj) > 0
-        def __eq__(self, other):
-            return mycmp(self.obj, other.obj) == 0
-    return Key
-
-# Python 2.3 also does not support list-sorting by key, so we need to convert
-# keys to cmp functions if we're running on old Python..
-if sys.version_info < (2, 4):
-    def sort_list(l, key, reverse=False):
-        if reverse:
-            return l.sort(lambda a, b: cmp(key(b), key(a)))
-        else:
-            return l.sort(lambda a, b: cmp(key(a), key(b)))
-else:
-    def sort_list(l, key, reverse=False):
-        return l.sort(key=key, reverse=reverse)
-
-# In Python 3.x, all objects are "new style" objects descended from 'type', and
-# thus types.ClassType and types.TypeType don't exist anymore.  For
-# compatibility, we make sure they still work.
-if hasattr(types, 'ClassType'):
-    ClassType = types.ClassType
-    TypeType = types.TypeType
-else:
-    ClassType = type
-    TypeType = type
-
-# The following emulates the behavior (we need) of an 'unbound method' under
-# Python 3.x (namely, the ability to have a class associated with a function
-# definition so that things can do stuff based on its associated class)
-class UnboundMethod:
-    def __init__(self, cls, func):
-        # Make sure we have all the same attributes as the original function,
-        # so that the AttributeSelector plugin will work correctly...
-        self.__dict__ = func.__dict__.copy()
-        self._func = func
-        self.__self__ = UnboundSelf(cls)
-        if sys.version_info < (3, 0):
-            self.im_class = cls
-        self.__doc__ = getattr(func, '__doc__', None)
-
-    def address(self):
-        cls = self.__self__.cls
-        modname = cls.__module__
-        module = sys.modules[modname]
-        filename = getattr(module, '__file__', None)
-        if filename is not None:
-            filename = os.path.abspath(filename)
-        return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__,
-                                                        self._func.__name__))
-
-    def __call__(self, *args, **kwargs):
-        return self._func(*args, **kwargs)
-
-    def __getattr__(self, attr):
-        return getattr(self._func, attr)
-
-    def __repr__(self):
-        return '<unbound method %s.%s>' % (self.__self__.cls.__name__,
-                                           self._func.__name__)
-
-class UnboundSelf:
-    def __init__(self, cls):
-        self.cls = cls
-
-    # We have to do this hackery because Python won't let us override the
-    # __class__ attribute...
-    def __getattribute__(self, attr):
-        if attr == '__class__':
-            return self.cls
-        else:
-            return object.__getattribute__(self, attr)
-
-def unbound_method(cls, func):
-    if inspect.ismethod(func):
-        return func
-    if not inspect.isfunction(func):
-        raise TypeError('%s is not a function' % (repr(func),))
-    return UnboundMethod(cls, func)
-
-def ismethod(obj):
-    return inspect.ismethod(obj) or isinstance(obj, UnboundMethod)
-
-
-# Make a pseudo-bytes function that can be called without the encoding arg:
-if sys.version_info >= (3, 0):
-    def bytes_(s, encoding='utf8'):
-        if isinstance(s, bytes):
-            return s
-        return bytes(s, encoding)
-else:
-    def bytes_(s, encoding=None):
-        return str(s)
-
-
-if sys.version_info[:2] >= (2, 6):
-    def isgenerator(o):
-        if isinstance(o, UnboundMethod):
-            o = o._func
-        return inspect.isgeneratorfunction(o) or inspect.isgenerator(o)
-else:
-    try:
-        from compiler.consts import CO_GENERATOR
-    except ImportError:
-        # IronPython doesn't have a complier module
-        CO_GENERATOR=0x20
-
-    def isgenerator(func):
-        try:
-            return func.func_code.co_flags & CO_GENERATOR != 0
-        except AttributeError:
-            return False
-
-# Make a function to help check if an exception is derived from BaseException.
-# In Python 2.4, we just use Exception instead.
-if sys.version_info[:2] < (2, 5):
-    def is_base_exception(exc):
-        return isinstance(exc, Exception)
-else:
-    def is_base_exception(exc):
-        return isinstance(exc, BaseException)
-
-if sys.version_info[:2] < (3, 0):
-    def exc_to_unicode(ev, encoding='utf-8'):
-        if is_base_exception(ev):
-            if not hasattr(ev, '__unicode__'):
-                # 2.5-
-                if not hasattr(ev, 'message'):
-                    # 2.4
-                    msg = len(ev.args) and ev.args[0] or ''
-                else:
-                    msg = ev.message
-                msg = force_unicode(msg, encoding=encoding)
-                clsname = force_unicode(ev.__class__.__name__,
-                        encoding=encoding)
-                ev = u'%s: %s' % (clsname, msg)
-        elif not isinstance(ev, unicode):
-            ev = repr(ev)
-
-        return force_unicode(ev, encoding=encoding)
-else:
-    def exc_to_unicode(ev, encoding='utf-8'):
-        return str(ev)
-
-def format_exception(exc_info, encoding='UTF-8'):
-    ec, ev, tb = exc_info
-
-    # Our exception object may have been turned into a string, and Python 3's
-    # traceback.format_exception() doesn't take kindly to that (it expects an
-    # actual exception object).  So we work around it, by doing the work
-    # ourselves if ev is not an exception object.
-    if not is_base_exception(ev):
-        tb_data = force_unicode(
-                ''.join(traceback.format_tb(tb)),
-                encoding)
-        ev = exc_to_unicode(ev)
-        return tb_data + ev
-    else:
-        return force_unicode(
-                ''.join(traceback.format_exception(*exc_info)),
-                encoding)
diff --git a/lib/spack/external/nose/result.py b/lib/spack/external/nose/result.py
deleted file mode 100644
index f974a14ae2..0000000000
--- a/lib/spack/external/nose/result.py
+++ /dev/null
@@ -1,200 +0,0 @@
-"""
-Test Result
------------
-
-Provides a TextTestResult that extends unittest's _TextTestResult to
-provide support for error classes (such as the builtin skip and
-deprecated classes), and hooks for plugins to take over or extend
-reporting.
-"""
-
-import logging
-try:
-    # 2.7+
-    from unittest.runner import _TextTestResult
-except ImportError:
-    from unittest import _TextTestResult
-from nose.config import Config
-from nose.util import isclass, ln as _ln # backwards compat
-
-log = logging.getLogger('nose.result')
-
-
-def _exception_detail(exc):
-    # this is what stdlib module traceback does
-    try:
-        return str(exc)
-    except:
-        return '<unprintable %s object>' % type(exc).__name__
-
-
-class TextTestResult(_TextTestResult):
-    """Text test result that extends unittest's default test result
-    support for a configurable set of errorClasses (eg, Skip,
-    Deprecated, TODO) that extend the errors/failures/success triad.
-    """
-    def __init__(self, stream, descriptions, verbosity, config=None,
-                 errorClasses=None):
-        if errorClasses is None:
-            errorClasses = {}
-        self.errorClasses = errorClasses
-        if config is None:
-            config = Config()
-        self.config = config
-        _TextTestResult.__init__(self, stream, descriptions, verbosity)
-
-    def addSkip(self, test, reason):
-        # 2.7 skip compat
-        from nose.plugins.skip import SkipTest
-        if SkipTest in self.errorClasses:
-            storage, label, isfail = self.errorClasses[SkipTest]
-            storage.append((test, reason))
-            self.printLabel(label, (SkipTest, reason, None))
-
-    def addError(self, test, err):
-        """Overrides normal addError to add support for
-        errorClasses. If the exception is a registered class, the
-        error will be added to the list for that class, not errors.
-        """
-        ec, ev, tb = err
-        try:
-            exc_info = self._exc_info_to_string(err, test)
-        except TypeError:
-            # 2.3 compat
-            exc_info = self._exc_info_to_string(err)
-        for cls, (storage, label, isfail) in self.errorClasses.items():
-            #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
-            #    from nose.tools import set_trace
-            #    set_trace()
-            if isclass(ec) and issubclass(ec, cls):
-                if isfail:
-                    test.passed = False
-                storage.append((test, exc_info))
-                self.printLabel(label, err)
-                return
-        self.errors.append((test, exc_info))
-        test.passed = False
-        self.printLabel('ERROR')
-
-    # override to bypass changes in 2.7
-    def getDescription(self, test):
-        if self.descriptions:
-            return test.shortDescription() or str(test)
-        else:
-            return str(test)
-
-    def printLabel(self, label, err=None):
-        # Might get patched into a streamless result
-        stream = getattr(self, 'stream', None)
-        if stream is not None:
-            if self.showAll:
-                message = [label]
-                if err:
-                    detail = _exception_detail(err[1])
-                    if detail:
-                        message.append(detail)
-                stream.writeln(": ".join(message))
-            elif self.dots:
-                stream.write(label[:1])
-
-    def printErrors(self):
-        """Overrides to print all errorClasses errors as well.
-        """
-        _TextTestResult.printErrors(self)
-        for cls in self.errorClasses.keys():
-            storage, label, isfail = self.errorClasses[cls]
-            if isfail:
-                self.printErrorList(label, storage)
-        # Might get patched into a result with no config
-        if hasattr(self, 'config'):
-            self.config.plugins.report(self.stream)
-
-    def printSummary(self, start, stop):
-        """Called by the test runner to print the final summary of test
-        run results.
-        """
-        write = self.stream.write
-        writeln = self.stream.writeln
-        taken = float(stop - start)
-        run = self.testsRun
-        plural = run != 1 and "s" or ""
-
-        writeln(self.separator2)
-        writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
-        writeln()
-
-        summary = {}
-        eckeys = self.errorClasses.keys()
-        for cls in eckeys:
-            storage, label, isfail = self.errorClasses[cls]
-            count = len(storage)
-            if not count:
-                continue
-            summary[label] = count
-        if len(self.failures):
-            summary['failures'] = len(self.failures)
-        if len(self.errors):
-            summary['errors'] = len(self.errors)
-
-        if not self.wasSuccessful():
-            write("FAILED")
-        else:
-            write("OK")
-        items = summary.items()
-        if items:
-            items.sort()
-            write(" (")
-            write(", ".join(["%s=%s" % (label, count) for
-                             label, count in items]))
-            writeln(")")
-        else:
-            writeln()
-
-    def wasSuccessful(self):
-        """Overrides to check that there are no errors in errorClasses
-        lists that are marked as errors and should cause a run to
-        fail.
-        """
-        if self.errors or self.failures:
-            return False
-        for cls in self.errorClasses.keys():
-            storage, label, isfail = self.errorClasses[cls]
-            if not isfail:
-                continue
-            if storage:
-                return False
-        return True
-
-    def _addError(self, test, err):
-        try:
-            exc_info = self._exc_info_to_string(err, test)
-        except TypeError:
-            # 2.3: does not take test arg
-            exc_info = self._exc_info_to_string(err)
-        self.errors.append((test, exc_info))
-        if self.showAll:
-            self.stream.write('ERROR')
-        elif self.dots:
-            self.stream.write('E')
-
-    def _exc_info_to_string(self, err, test=None):
-        # 2.7 skip compat
-        from nose.plugins.skip import SkipTest
-        if isclass(err[0]) and issubclass(err[0], SkipTest):
-            return str(err[1])
-        # 2.3/2.4 -- 2.4 passes test, 2.3 does not
-        try:
-            return _TextTestResult._exc_info_to_string(self, err, test)
-        except TypeError:
-            # 2.3: does not take test arg
-            return _TextTestResult._exc_info_to_string(self, err)
-
-
-def ln(*arg, **kw):
-    from warnings import warn
-    warn("ln() has moved to nose.util from nose.result and will be removed "
-         "from nose.result in a future release. Please update your imports ",
-         DeprecationWarning)
-    return _ln(*arg, **kw)
-
-
diff --git a/lib/spack/external/nose/selector.py b/lib/spack/external/nose/selector.py
deleted file mode 100644
index b63f7af0b1..0000000000
--- a/lib/spack/external/nose/selector.py
+++ /dev/null
@@ -1,251 +0,0 @@
-"""
-Test Selection
---------------
-
-Test selection is handled by a Selector. The test loader calls the
-appropriate selector method for each object it encounters that it
-thinks may be a test.
-"""
-import logging
-import os
-import unittest
-from nose.config import Config
-from nose.util import split_test_name, src, getfilename, getpackage, ispackage, is_executable
-
-log = logging.getLogger(__name__)
-
-__all__ = ['Selector', 'defaultSelector', 'TestAddress']
-
-
-# for efficiency and easier mocking
-op_join = os.path.join
-op_basename = os.path.basename
-op_exists = os.path.exists
-op_splitext = os.path.splitext
-op_isabs = os.path.isabs
-op_abspath = os.path.abspath
-
-
-class Selector(object):
-    """Core test selector. Examines test candidates and determines whether,
-    given the specified configuration, the test candidate should be selected
-    as a test.
-    """
-    def __init__(self, config):
-        if config is None:
-            config = Config()
-        self.configure(config)
-
-    def configure(self, config):
-        self.config = config
-        self.exclude = config.exclude
-        self.ignoreFiles = config.ignoreFiles
-        self.include = config.include
-        self.plugins = config.plugins
-        self.match = config.testMatch
-        
-    def matches(self, name):
-        """Does the name match my requirements?
-
-        To match, a name must match config.testMatch OR config.include
-        and it must not match config.exclude
-        """
-        return ((self.match.search(name)
-                 or (self.include and
-                     filter(None,
-                            [inc.search(name) for inc in self.include])))
-                and ((not self.exclude)
-                     or not filter(None,
-                                   [exc.search(name) for exc in self.exclude])
-                 ))
-    
-    def wantClass(self, cls):
-        """Is the class a wanted test class?
-
-        A class must be a unittest.TestCase subclass, or match test name
-        requirements. Classes that start with _ are always excluded.
-        """
-        declared = getattr(cls, '__test__', None)
-        if declared is not None:
-            wanted = declared
-        else:
-            wanted = (not cls.__name__.startswith('_')
-                      and (issubclass(cls, unittest.TestCase)
-                           or self.matches(cls.__name__)))
-        
-        plug_wants = self.plugins.wantClass(cls)        
-        if plug_wants is not None:
-            log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
-            wanted = plug_wants
-        log.debug("wantClass %s? %s", cls, wanted)
-        return wanted
-
-    def wantDirectory(self, dirname):
-        """Is the directory a wanted test directory?
-
-        All package directories match, so long as they do not match exclude. 
-        All other directories must match test requirements.
-        """
-        tail = op_basename(dirname)
-        if ispackage(dirname):
-            wanted = (not self.exclude
-                      or not filter(None,
-                                    [exc.search(tail) for exc in self.exclude]
-                                    ))
-        else:
-            wanted = (self.matches(tail)
-                      or (self.config.srcDirs
-                          and tail in self.config.srcDirs))
-        plug_wants = self.plugins.wantDirectory(dirname)
-        if plug_wants is not None:
-            log.debug("Plugin setting selection of %s to %s",
-                      dirname, plug_wants)
-            wanted = plug_wants
-        log.debug("wantDirectory %s? %s", dirname, wanted)
-        return wanted
-    
-    def wantFile(self, file):
-        """Is the file a wanted test file?
-
-        The file must be a python source file and match testMatch or
-        include, and not match exclude. Files that match ignore are *never*
-        wanted, regardless of plugin, testMatch, include or exclude settings.
-        """
-        # never, ever load files that match anything in ignore
-        # (.* _* and *setup*.py by default)
-        base = op_basename(file)
-        ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
-                           if ignore_this.search(base) ]
-        if ignore_matches:
-            log.debug('%s matches ignoreFiles pattern; skipped',
-                      base) 
-            return False
-        if not self.config.includeExe and is_executable(file):
-            log.info('%s is executable; skipped', file)
-            return False
-        dummy, ext = op_splitext(base)
-        pysrc = ext == '.py'
-
-        wanted = pysrc and self.matches(base) 
-        plug_wants = self.plugins.wantFile(file)
-        if plug_wants is not None:
-            log.debug("plugin setting want %s to %s", file, plug_wants)
-            wanted = plug_wants
-        log.debug("wantFile %s? %s", file, wanted)
-        return wanted
-
-    def wantFunction(self, function):
-        """Is the function a test function?
-        """
-        try:
-            if hasattr(function, 'compat_func_name'):
-                funcname = function.compat_func_name
-            else:
-                funcname = function.__name__
-        except AttributeError:
-            # not a function
-            return False
-        declared = getattr(function, '__test__', None)
-        if declared is not None:
-            wanted = declared
-        else:
-            wanted = not funcname.startswith('_') and self.matches(funcname)
-        plug_wants = self.plugins.wantFunction(function)
-        if plug_wants is not None:
-            wanted = plug_wants
-        log.debug("wantFunction %s? %s", function, wanted)
-        return wanted
-
-    def wantMethod(self, method):
-        """Is the method a test method?
-        """
-        try:
-            method_name = method.__name__
-        except AttributeError:
-            # not a method
-            return False
-        if method_name.startswith('_'):
-            # never collect 'private' methods
-            return False
-        declared = getattr(method, '__test__', None)
-        if declared is not None:
-            wanted = declared
-        else:
-            wanted = self.matches(method_name)
-        plug_wants = self.plugins.wantMethod(method)
-        if plug_wants is not None:
-            wanted = plug_wants
-        log.debug("wantMethod %s? %s", method, wanted)
-        return wanted
-    
-    def wantModule(self, module):
-        """Is the module a test module?
-
-        The tail of the module name must match test requirements. One exception:
-        we always want __main__.
-        """
-        declared = getattr(module, '__test__', None)
-        if declared is not None:
-            wanted = declared
-        else:
-            wanted = self.matches(module.__name__.split('.')[-1]) \
-                     or module.__name__ == '__main__'
-        plug_wants = self.plugins.wantModule(module)
-        if plug_wants is not None:
-            wanted = plug_wants
-        log.debug("wantModule %s? %s", module, wanted)
-        return wanted
-        
-defaultSelector = Selector        
-
-
-class TestAddress(object):
-    """A test address represents a user's request to run a particular
-    test. The user may specify a filename or module (or neither),
-    and/or a callable (a class, function, or method). The naming
-    format for test addresses is:
-
-    filename_or_module:callable
-
-    Filenames that are not absolute will be made absolute relative to
-    the working dir.
-
-    The filename or module part will be considered a module name if it
-    doesn't look like a file, that is, if it doesn't exist on the file
-    system and it doesn't contain any directory separators and it
-    doesn't end in .py.
-
-    Callables may be a class name, function name, method name, or
-    class.method specification.
-    """
-    def __init__(self, name, workingDir=None):
-        if workingDir is None:
-            workingDir = os.getcwd()
-        self.name = name
-        self.workingDir = workingDir
-        self.filename, self.module, self.call = split_test_name(name)
-        log.debug('Test name %s resolved to file %s, module %s, call %s',
-                  name, self.filename, self.module, self.call)
-        if self.filename is None:
-            if self.module is not None:
-                self.filename = getfilename(self.module, self.workingDir)
-        if self.filename:
-            self.filename = src(self.filename)
-            if not op_isabs(self.filename):
-                self.filename = op_abspath(op_join(workingDir,
-                                                   self.filename))
-            if self.module is None:
-                self.module = getpackage(self.filename)
-        log.debug(
-            'Final resolution of test name %s: file %s module %s call %s',
-            name, self.filename, self.module, self.call)
-
-    def totuple(self):
-        return (self.filename, self.module, self.call)
-        
-    def __str__(self):
-        return self.name
-
-    def __repr__(self):
-        return "%s: (%s, %s, %s)" % (self.name, self.filename,
-                                     self.module, self.call)
diff --git a/lib/spack/external/nose/sphinx/__init__.py b/lib/spack/external/nose/sphinx/__init__.py
deleted file mode 100644
index 2ae28399f5..0000000000
--- a/lib/spack/external/nose/sphinx/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-pass
diff --git a/lib/spack/external/nose/sphinx/pluginopts.py b/lib/spack/external/nose/sphinx/pluginopts.py
deleted file mode 100644
index d2b284ab27..0000000000
--- a/lib/spack/external/nose/sphinx/pluginopts.py
+++ /dev/null
@@ -1,189 +0,0 @@
-"""
-Adds a sphinx directive that can be used to automatically document a plugin.
-
-this::
-
- .. autoplugin :: nose.plugins.foo
-    :plugin: Pluggy
-    
-produces::
-
-  .. automodule :: nose.plugins.foo
-  
-  Options
-  -------
-
-  .. cmdoption :: --foo=BAR, --fooble=BAR
-
-    Do the foo thing to the new thing.
-
-  Plugin
-  ------
-
-  .. autoclass :: nose.plugins.foo.Pluggy
-     :members:
-
-  Source
-  ------
-
-  .. include :: path/to/nose/plugins/foo.py
-     :literal:
-
-"""
-import os
-try:
-    from docutils import nodes, utils
-    from docutils.statemachine import ViewList
-    from docutils.parsers.rst import directives
-except ImportError:
-    pass # won't run anyway
-
-from nose.util import resolve_name
-from nose.plugins.base import Plugin
-from nose.plugins.manager import BuiltinPluginManager
-from nose.config import Config
-from nose.core import TestProgram
-from inspect import isclass
-
-
-def autoplugin_directive(dirname, arguments, options, content, lineno,
-                         content_offset, block_text, state, state_machine):
-    mod_name = arguments[0]
-    mod = resolve_name(mod_name)
-    plug_name = options.get('plugin', None)
-    if plug_name:
-        obj = getattr(mod, plug_name)
-    else:
-        for entry in dir(mod):
-            obj = getattr(mod, entry)
-            if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin:
-                plug_name = '%s.%s' % (mod_name, entry)
-                break
-    
-    # mod docstring
-    rst = ViewList()
-    rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>')
-    rst.append('', '<autodoc>')
-    
-    # options
-    rst.append('Options', '<autodoc>')
-    rst.append('-------', '<autodoc>')
-    rst.append('', '<autodoc>')
-
-    plug = obj()
-    opts = OptBucket()
-    plug.options(opts, {})
-    for opt in opts:
-        rst.append(opt.options(), '<autodoc>')
-        rst.append('   \n', '<autodoc>')
-        rst.append('   ' + opt.help + '\n', '<autodoc>')
-        rst.append('\n', '<autodoc>')
-        
-    # plugin class
-    rst.append('Plugin', '<autodoc>')
-    rst.append('------', '<autodoc>')
-    rst.append('', '<autodoc>')
-    
-    rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>')
-    rst.append('   :members:\n', '<autodoc>')
-    rst.append('   :show-inheritance:\n', '<autodoc>')
-    rst.append('', '<autodoc>')
-    
-    # source
-    rst.append('Source', '<autodoc>')
-    rst.append('------', '<autodoc>')
-    rst.append(
-            '.. include :: %s\n' % utils.relative_path(
-                state_machine.document['source'],
-                os.path.abspath(mod.__file__.replace('.pyc', '.py'))),
-            '<autodoc>')
-    rst.append('   :literal:\n', '<autodoc>')
-    rst.append('', '<autodoc>')
-    
-    node = nodes.section()
-    node.document = state.document
-    surrounding_title_styles = state.memo.title_styles
-    surrounding_section_level = state.memo.section_level
-    state.memo.title_styles = []
-    state.memo.section_level = 0
-    state.nested_parse(rst, 0, node, match_titles=1)
-    state.memo.title_styles = surrounding_title_styles
-    state.memo.section_level = surrounding_section_level
-
-    return node.children
-
-
-def autohelp_directive(dirname, arguments, options, content, lineno,
-                       content_offset, block_text, state, state_machine):
-    """produces rst from nose help"""
-    config = Config(parserClass=OptBucket,
-                    plugins=BuiltinPluginManager())
-    parser = config.getParser(TestProgram.usage())
-    rst = ViewList()
-    for line in parser.format_help().split('\n'):
-        rst.append(line, '<autodoc>')
-
-    rst.append('Options', '<autodoc>')
-    rst.append('-------', '<autodoc>')
-    rst.append('', '<autodoc>')
-    for opt in parser:
-        rst.append(opt.options(), '<autodoc>')
-        rst.append('   \n', '<autodoc>')
-        rst.append('   ' + opt.help + '\n', '<autodoc>')
-        rst.append('\n', '<autodoc>')    
-    node = nodes.section()
-    node.document = state.document
-    surrounding_title_styles = state.memo.title_styles
-    surrounding_section_level = state.memo.section_level
-    state.memo.title_styles = []
-    state.memo.section_level = 0
-    state.nested_parse(rst, 0, node, match_titles=1)
-    state.memo.title_styles = surrounding_title_styles
-    state.memo.section_level = surrounding_section_level
-
-    return node.children
-
-    
-class OptBucket(object):
-    def __init__(self, doc=None, prog='nosetests'):
-        self.opts = []
-        self.doc = doc
-        self.prog = prog
-
-    def __iter__(self):
-        return iter(self.opts)
-
-    def format_help(self):
-        return self.doc.replace('%prog', self.prog).replace(':\n', '::\n')
-    
-    def add_option(self, *arg, **kw):
-        self.opts.append(Opt(*arg, **kw))
-
-
-class Opt(object):
-    def __init__(self, *arg, **kw):
-        self.opts = arg
-        self.action = kw.pop('action', None)
-        self.default = kw.pop('default', None)
-        self.metavar = kw.pop('metavar', None)
-        self.help = kw.pop('help', None)
-
-    def options(self):
-        buf = []
-        for optstring in self.opts:
-            desc = optstring
-            if self.action not in ('store_true', 'store_false'):
-                desc += '=%s' % self.meta(optstring)
-            buf.append(desc)
-        return '.. cmdoption :: ' + ', '.join(buf)
-
-    def meta(self, optstring):
-        # FIXME optparser default metavar?
-        return self.metavar or 'DEFAULT'
-
-    
-def setup(app):
-    app.add_directive('autoplugin',
-                      autoplugin_directive, 1, (1, 0, 1),
-                      plugin=directives.unchanged)
-    app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1))
diff --git a/lib/spack/external/nose/suite.py b/lib/spack/external/nose/suite.py
deleted file mode 100644
index a831105e34..0000000000
--- a/lib/spack/external/nose/suite.py
+++ /dev/null
@@ -1,609 +0,0 @@
-"""
-Test Suites
------------
-
-Provides a LazySuite, which is a suite whose test list is a generator
-function, and ContextSuite,which can run fixtures (setup/teardown
-functions or methods) for the context that contains its tests.
-
-"""
-from __future__ import generators
-
-import logging
-import sys
-import unittest
-from nose.case import Test
-from nose.config import Config
-from nose.proxy import ResultProxyFactory
-from nose.util import isclass, resolve_name, try_run
-
-if sys.platform == 'cli':
-    if sys.version_info[:2] < (2, 6):
-        import clr
-        clr.AddReference("IronPython")
-        from IronPython.Runtime.Exceptions import StringException
-    else:
-        class StringException(Exception):
-            pass
-
-log = logging.getLogger(__name__)
-#log.setLevel(logging.DEBUG)
-
-# Singleton for default value -- see ContextSuite.__init__ below
-_def = object()
-
-
-def _strclass(cls):
-    return "%s.%s" % (cls.__module__, cls.__name__)
-
-class MixedContextError(Exception):
-    """Error raised when a context suite sees tests from more than
-    one context.
-    """
-    pass
-
-
-class LazySuite(unittest.TestSuite):
-    """A suite that may use a generator as its list of tests
-    """
-    def __init__(self, tests=()):
-        """Initialize the suite. tests may be an iterable or a generator
-        """
-        super(LazySuite, self).__init__()
-        self._set_tests(tests)
-
-    def __iter__(self):
-        return iter(self._tests)
-
-    def __repr__(self):
-        return "<%s tests=generator (%s)>" % (
-            _strclass(self.__class__), id(self))
-
-    def __hash__(self):
-        return object.__hash__(self)
-
-    __str__ = __repr__
-
-    def addTest(self, test):
-        self._precache.append(test)
-
-    # added to bypass run changes in 2.7's unittest
-    def run(self, result):
-        for test in self._tests:
-            if result.shouldStop:
-                break
-            test(result)
-        return result
-
-    def __nonzero__(self):
-        log.debug("tests in %s?", id(self))
-        if self._precache:
-            return True
-        if self.test_generator is None:
-            return False
-        try:
-            test = self.test_generator.next()
-            if test is not None:
-                self._precache.append(test)
-                return True
-        except StopIteration:
-            pass
-        return False
-
-    def _get_tests(self):
-        log.debug("precache is %s", self._precache)
-        for test in self._precache:
-            yield test
-        if self.test_generator is None:
-            return
-        for test in self.test_generator:
-            yield test
-
-    def _set_tests(self, tests):
-        self._precache = []
-        is_suite = isinstance(tests, unittest.TestSuite)
-        if callable(tests) and not is_suite:
-            self.test_generator = tests()
-        elif is_suite:
-            # Suites need special treatment: they must be called like
-            # tests for their setup/teardown to run (if any)
-            self.addTests([tests])
-            self.test_generator = None
-        else:
-            self.addTests(tests)
-            self.test_generator = None
-
-    _tests = property(_get_tests, _set_tests, None,
-                      "Access the tests in this suite. Access is through a "
-                      "generator, so iteration may not be repeatable.")
-
-
-class ContextSuite(LazySuite):
-    """A suite with context.
-
-    A ContextSuite executes fixtures (setup and teardown functions or
-    methods) for the context containing its tests.
-
-    The context may be explicitly passed. If it is not, a context (or
-    nested set of contexts) will be constructed by examining the tests
-    in the suite.
-    """
-    failureException = unittest.TestCase.failureException
-    was_setup = False
-    was_torndown = False
-    classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
-                     'setUpClass', 'setUpAll')
-    classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
-                     'teardownAll', 'tearDownClass', 'tearDownAll')
-    moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
-                   'setUp')
-    moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
-                      'teardown', 'tearDown')
-    packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
-    packageTeardown = ('teardown_package', 'teardownPackage',
-                       'tearDownPackage')
-
-    def __init__(self, tests=(), context=None, factory=None,
-                 config=None, resultProxy=None, can_split=True):
-        log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
-        self.context = context
-        self.factory = factory
-        if config is None:
-            config = Config()
-        self.config = config
-        self.resultProxy = resultProxy
-        self.has_run = False
-        self.can_split = can_split
-        self.error_context = None
-        super(ContextSuite, self).__init__(tests)
-
-    def __repr__(self):
-        return "<%s context=%s>" % (
-            _strclass(self.__class__),
-            getattr(self.context, '__name__', self.context))
-    __str__ = __repr__
-
-    def id(self):
-        if self.error_context:
-            return '%s:%s' % (repr(self), self.error_context)
-        else:
-            return repr(self)
-
-    def __hash__(self):
-        return object.__hash__(self)
-
-    # 2.3 compat -- force 2.4 call sequence
-    def __call__(self, *arg, **kw):
-        return self.run(*arg, **kw)
-
-    def exc_info(self):
-        """Hook for replacing error tuple output
-        """
-        return sys.exc_info()
-
-    def _exc_info(self):
-        """Bottleneck to fix up IronPython string exceptions
-        """
-        e = self.exc_info()
-        if sys.platform == 'cli':
-            if isinstance(e[0], StringException):
-                # IronPython throws these StringExceptions, but
-                # traceback checks type(etype) == str. Make a real
-                # string here.
-                e = (str(e[0]), e[1], e[2])
-
-        return e
-
-    def run(self, result):
-        """Run tests in suite inside of suite fixtures.
-        """
-        # proxy the result for myself
-        log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
-        #import pdb
-        #pdb.set_trace()
-        if self.resultProxy:
-            result, orig = self.resultProxy(result, self), result
-        else:
-            result, orig = result, result
-        try:
-            self.setUp()
-        except KeyboardInterrupt:
-            raise
-        except:
-            self.error_context = 'setup'
-            result.addError(self, self._exc_info())
-            return
-        try:
-            for test in self._tests:
-                if result.shouldStop:
-                    log.debug("stopping")
-                    break
-                # each nose.case.Test will create its own result proxy
-                # so the cases need the original result, to avoid proxy
-                # chains
-                test(orig)
-        finally:
-            self.has_run = True
-            try:
-                self.tearDown()
-            except KeyboardInterrupt:
-                raise
-            except:
-                self.error_context = 'teardown'
-                result.addError(self, self._exc_info())
-
-    def hasFixtures(self, ctx_callback=None):
-        context = self.context
-        if context is None:
-            return False
-        if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
-            return True
-        # My context doesn't have any, but its ancestors might
-        factory = self.factory
-        if factory:
-            ancestors = factory.context.get(self, [])
-            for ancestor in ancestors:
-                if self.implementsAnyFixture(
-                    ancestor, ctx_callback=ctx_callback):
-                    return True
-        return False
-
-    def implementsAnyFixture(self, context, ctx_callback):
-        if isclass(context):
-            names = self.classSetup + self.classTeardown
-        else:
-            names = self.moduleSetup + self.moduleTeardown
-            if hasattr(context, '__path__'):
-                names += self.packageSetup + self.packageTeardown
-        # If my context has any fixture attribute, I have fixtures
-        fixt = False
-        for m in names:
-            if hasattr(context, m):
-                fixt = True
-                break
-        if ctx_callback is None:
-            return fixt
-        return ctx_callback(context, fixt)
-
-    def setUp(self):
-        log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
-        if not self:
-            # I have no tests
-            log.debug("suite %s has no tests", id(self))
-            return
-        if self.was_setup:
-            log.debug("suite %s already set up", id(self))
-            return
-        context = self.context
-        if context is None:
-            return
-        # before running my own context's setup, I need to
-        # ask the factory if my context's contexts' setups have been run
-        factory = self.factory
-        if factory:
-            # get a copy, since we'll be destroying it as we go
-            ancestors = factory.context.get(self, [])[:]
-            while ancestors:
-                ancestor = ancestors.pop()
-                log.debug("ancestor %s may need setup", ancestor)
-                if ancestor in factory.was_setup:
-                    continue
-                log.debug("ancestor %s does need setup", ancestor)
-                self.setupContext(ancestor)
-            if not context in factory.was_setup:
-                self.setupContext(context)
-        else:
-            self.setupContext(context)
-        self.was_setup = True
-        log.debug("completed suite setup")
-
-    def setupContext(self, context):
-        self.config.plugins.startContext(context)
-        log.debug("%s setup context %s", self, context)
-        if self.factory:
-            if context in self.factory.was_setup:
-                return
-            # note that I ran the setup for this context, so that I'll run
-            # the teardown in my teardown
-            self.factory.was_setup[context] = self
-        if isclass(context):
-            names = self.classSetup
-        else:
-            names = self.moduleSetup
-            if hasattr(context, '__path__'):
-                names = self.packageSetup + names
-        try_run(context, names)
-
-    def shortDescription(self):
-        if self.context is None:
-            return "test suite"
-        return "test suite for %s" % self.context
-
-    def tearDown(self):
-        log.debug('context teardown')
-        if not self.was_setup or self.was_torndown:
-            log.debug(
-                "No reason to teardown (was_setup? %s was_torndown? %s)"
-                % (self.was_setup, self.was_torndown))
-            return
-        self.was_torndown = True
-        context = self.context
-        if context is None:
-            log.debug("No context to tear down")
-            return
-
-        # for each ancestor... if the ancestor was setup
-        # and I did the setup, I can do teardown
-        factory = self.factory
-        if factory:
-            ancestors = factory.context.get(self, []) + [context]
-            for ancestor in ancestors:
-                log.debug('ancestor %s may need teardown', ancestor)
-                if not ancestor in factory.was_setup:
-                    log.debug('ancestor %s was not setup', ancestor)
-                    continue
-                if ancestor in factory.was_torndown:
-                    log.debug('ancestor %s already torn down', ancestor)
-                    continue
-                setup = factory.was_setup[ancestor]
-                log.debug("%s setup ancestor %s", setup, ancestor)
-                if setup is self:
-                    self.teardownContext(ancestor)
-        else:
-            self.teardownContext(context)
-
-    def teardownContext(self, context):
-        log.debug("%s teardown context %s", self, context)
-        if self.factory:
-            if context in self.factory.was_torndown:
-                return
-            self.factory.was_torndown[context] = self
-        if isclass(context):
-            names = self.classTeardown
-        else:
-            names = self.moduleTeardown
-            if hasattr(context, '__path__'):
-                names = self.packageTeardown + names
-        try_run(context, names)
-        self.config.plugins.stopContext(context)
-
-    # FIXME the wrapping has to move to the factory?
-    def _get_wrapped_tests(self):
-        for test in self._get_tests():
-            if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
-                yield test
-            else:
-                yield Test(test,
-                           config=self.config,
-                           resultProxy=self.resultProxy)
-
-    _tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
-                      "Access the tests in this suite. Tests are returned "
-                      "inside of a context wrapper.")
-
-
-class ContextSuiteFactory(object):
-    """Factory for ContextSuites. Called with a collection of tests,
-    the factory decides on a hierarchy of contexts by introspecting
-    the collection or the tests themselves to find the objects
-    containing the test objects. It always returns one suite, but that
-    suite may consist of a hierarchy of nested suites.
-    """
-    suiteClass = ContextSuite
-    def __init__(self, config=None, suiteClass=None, resultProxy=_def):
-        if config is None:
-            config = Config()
-        self.config = config
-        if suiteClass is not None:
-            self.suiteClass = suiteClass
-        # Using a singleton to represent default instead of None allows
-        # passing resultProxy=None to turn proxying off.
-        if resultProxy is _def:
-            resultProxy = ResultProxyFactory(config=config)
-        self.resultProxy = resultProxy
-        self.suites = {}
-        self.context = {}
-        self.was_setup = {}
-        self.was_torndown = {}
-
-    def __call__(self, tests, **kw):
-        """Return ``ContextSuite`` for tests. ``tests`` may either
-        be a callable (in which case the resulting ContextSuite will
-        have no parent context and be evaluated lazily) or an
-        iterable. In that case the tests will wrapped in
-        nose.case.Test, be examined and the context of each found and a
-        suite of suites returned, organized into a stack with the
-        outermost suites belonging to the outermost contexts.
-        """
-        log.debug("Create suite for %s", tests)
-        context = kw.pop('context', getattr(tests, 'context', None))
-        log.debug("tests %s context %s", tests, context)
-        if context is None:
-            tests = self.wrapTests(tests)
-            try:
-                context = self.findContext(tests)
-            except MixedContextError:
-                return self.makeSuite(self.mixedSuites(tests), None, **kw)
-        return self.makeSuite(tests, context, **kw)
-
-    def ancestry(self, context):
-        """Return the ancestry of the context (that is, all of the
-        packages and modules containing the context), in order of
-        descent with the outermost ancestor last.
-        This method is a generator.
-        """
-        log.debug("get ancestry %s", context)
-        if context is None:
-            return
-        # Methods include reference to module they are defined in, we
-        # don't want that, instead want the module the class is in now
-        # (classes are re-ancestored elsewhere).
-        if hasattr(context, 'im_class'):
-            context = context.im_class
-        elif hasattr(context, '__self__'):
-            context = context.__self__.__class__
-        if hasattr(context, '__module__'):
-            ancestors = context.__module__.split('.')
-        elif hasattr(context, '__name__'):
-            ancestors = context.__name__.split('.')[:-1]
-        else:
-            raise TypeError("%s has no ancestors?" % context)
-        while ancestors:
-            log.debug(" %s ancestors %s", context, ancestors)
-            yield resolve_name('.'.join(ancestors))
-            ancestors.pop()
-
-    def findContext(self, tests):
-        if callable(tests) or isinstance(tests, unittest.TestSuite):
-            return None
-        context = None
-        for test in tests:
-            # Don't look at suites for contexts, only tests
-            ctx = getattr(test, 'context', None)
-            if ctx is None:
-                continue
-            if context is None:
-                context = ctx
-            elif context != ctx:
-                raise MixedContextError(
-                    "Tests with different contexts in same suite! %s != %s"
-                    % (context, ctx))
-        return context
-
-    def makeSuite(self, tests, context, **kw):
-        suite = self.suiteClass(
-            tests, context=context, config=self.config, factory=self,
-            resultProxy=self.resultProxy, **kw)
-        if context is not None:
-            self.suites.setdefault(context, []).append(suite)
-            self.context.setdefault(suite, []).append(context)
-            log.debug("suite %s has context %s", suite,
-                      getattr(context, '__name__', None))
-            for ancestor in self.ancestry(context):
-                self.suites.setdefault(ancestor, []).append(suite)
-                self.context[suite].append(ancestor)
-                log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
-        return suite
-
-    def mixedSuites(self, tests):
-        """The complex case where there are tests that don't all share
-        the same context. Groups tests into suites with common ancestors,
-        according to the following (essentially tail-recursive) procedure:
-
-        Starting with the context of the first test, if it is not
-        None, look for tests in the remaining tests that share that
-        ancestor. If any are found, group into a suite with that
-        ancestor as the context, and replace the current suite with
-        that suite. Continue this process for each ancestor of the
-        first test, until all ancestors have been processed. At this
-        point if any tests remain, recurse with those tests as the
-        input, returning a list of the common suite (which may be the
-        suite or test we started with, if no common tests were found)
-        plus the results of recursion.
-        """
-        if not tests:
-            return []
-        head = tests.pop(0)
-        if not tests:
-            return [head] # short circuit when none are left to combine
-        suite = head # the common ancestry suite, so far
-        tail = tests[:]
-        context = getattr(head, 'context', None)
-        if context is not None:
-            ancestors = [context] + [a for a in self.ancestry(context)]
-            for ancestor in ancestors:
-                common = [suite] # tests with ancestor in common, so far
-                remain = [] # tests that remain to be processed
-                for test in tail:
-                    found_common = False
-                    test_ctx = getattr(test, 'context', None)
-                    if test_ctx is None:
-                        remain.append(test)
-                        continue
-                    if test_ctx is ancestor:
-                        common.append(test)
-                        continue
-                    for test_ancestor in self.ancestry(test_ctx):
-                        if test_ancestor is ancestor:
-                            common.append(test)
-                            found_common = True
-                            break
-                    if not found_common:
-                        remain.append(test)
-                if common:
-                    suite = self.makeSuite(common, ancestor)
-                tail = self.mixedSuites(remain)
-        return [suite] + tail
-
-    def wrapTests(self, tests):
-        log.debug("wrap %s", tests)
-        if callable(tests) or isinstance(tests, unittest.TestSuite):
-            log.debug("I won't wrap")
-            return tests
-        wrapped = []
-        for test in tests:
-            log.debug("wrapping %s", test)
-            if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
-                wrapped.append(test)
-            elif isinstance(test, ContextList):
-                wrapped.append(self.makeSuite(test, context=test.context))
-            else:
-                wrapped.append(
-                    Test(test, config=self.config, resultProxy=self.resultProxy)
-                    )
-        return wrapped
-
-
-class ContextList(object):
-    """Not quite a suite -- a group of tests in a context. This is used
-    to hint the ContextSuiteFactory about what context the tests
-    belong to, in cases where it may be ambiguous or missing.
-    """
-    def __init__(self, tests, context=None):
-        self.tests = tests
-        self.context = context
-
-    def __iter__(self):
-        return iter(self.tests)
-
-
-class FinalizingSuiteWrapper(unittest.TestSuite):
-    """Wraps suite and calls final function after suite has
-    executed. Used to call final functions in cases (like running in
-    the standard test runner) where test running is not under nose's
-    control.
-    """
-    def __init__(self, suite, finalize):
-        super(FinalizingSuiteWrapper, self).__init__()
-        self.suite = suite
-        self.finalize = finalize
-
-    def __call__(self, *arg, **kw):
-        return self.run(*arg, **kw)
-
-    # 2.7 compat
-    def __iter__(self):
-        return iter(self.suite)
-
-    def run(self, *arg, **kw):
-        try:
-            return self.suite(*arg, **kw)
-        finally:
-            self.finalize(*arg, **kw)
-
-
-# backwards compat -- sort of
-class TestDir:
-    def __init__(*arg, **kw):
-        raise NotImplementedError(
-            "TestDir is not usable with nose 0.10. The class is present "
-            "in nose.suite for backwards compatibility purposes but it "
-            "may not be used.")
-
-
-class TestModule:
-    def __init__(*arg, **kw):
-        raise NotImplementedError(
-            "TestModule is not usable with nose 0.10. The class is present "
-            "in nose.suite for backwards compatibility purposes but it "
-            "may not be used.")
diff --git a/lib/spack/external/nose/tools/__init__.py b/lib/spack/external/nose/tools/__init__.py
deleted file mode 100644
index 74dab16a74..0000000000
--- a/lib/spack/external/nose/tools/__init__.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-Tools for testing
------------------
-
-nose.tools provides a few convenience functions to make writing tests
-easier. You don't have to use them; nothing in the rest of nose depends
-on any of these methods.
-
-"""
-from nose.tools.nontrivial import *
-from nose.tools.nontrivial import __all__ as nontrivial_all
-from nose.tools.trivial import *
-from nose.tools.trivial import __all__ as trivial_all
-
-__all__ = trivial_all + nontrivial_all
diff --git a/lib/spack/external/nose/tools/nontrivial.py b/lib/spack/external/nose/tools/nontrivial.py
deleted file mode 100644
index 283973245b..0000000000
--- a/lib/spack/external/nose/tools/nontrivial.py
+++ /dev/null
@@ -1,151 +0,0 @@
-"""Tools not exempt from being descended into in tracebacks"""
-
-import time
-
-
-__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup',
-           'TimeExpired', 'istest', 'nottest']
-
-
-class TimeExpired(AssertionError):
-    pass
-
-
-def make_decorator(func):
-    """
-    Wraps a test decorator so as to properly replicate metadata
-    of the decorated function, including nose's additional stuff
-    (namely, setup and teardown).
-    """
-    def decorate(newfunc):
-        if hasattr(func, 'compat_func_name'):
-            name = func.compat_func_name
-        else:
-            name = func.__name__
-        newfunc.__dict__ = func.__dict__
-        newfunc.__doc__ = func.__doc__
-        newfunc.__module__ = func.__module__
-        if not hasattr(newfunc, 'compat_co_firstlineno'):
-            newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
-        try:
-            newfunc.__name__ = name
-        except TypeError:
-            # can't set func name in 2.3
-            newfunc.compat_func_name = name
-        return newfunc
-    return decorate
-
-
-def raises(*exceptions):
-    """Test must raise one of expected exceptions to pass.
-
-    Example use::
-
-      @raises(TypeError, ValueError)
-      def test_raises_type_error():
-          raise TypeError("This test passes")
-
-      @raises(Exception)
-      def test_that_fails_by_passing():
-          pass
-
-    If you want to test many assertions about exceptions in a single test,
-    you may want to use `assert_raises` instead.
-    """
-    valid = ' or '.join([e.__name__ for e in exceptions])
-    def decorate(func):
-        name = func.__name__
-        def newfunc(*arg, **kw):
-            try:
-                func(*arg, **kw)
-            except exceptions:
-                pass
-            except:
-                raise
-            else:
-                message = "%s() did not raise %s" % (name, valid)
-                raise AssertionError(message)
-        newfunc = make_decorator(func)(newfunc)
-        return newfunc
-    return decorate
-
-
-def set_trace():
-    """Call pdb.set_trace in the calling frame, first restoring
-    sys.stdout to the real output stream. Note that sys.stdout is NOT
-    reset to whatever it was before the call once pdb is done!
-    """
-    import pdb
-    import sys
-    stdout = sys.stdout
-    sys.stdout = sys.__stdout__
-    pdb.Pdb().set_trace(sys._getframe().f_back)
-
-
-def timed(limit):
-    """Test must finish within specified time limit to pass.
-
-    Example use::
-
-      @timed(.1)
-      def test_that_fails():
-          time.sleep(.2)
-    """
-    def decorate(func):
-        def newfunc(*arg, **kw):
-            start = time.time()
-            result = func(*arg, **kw)
-            end = time.time()
-            if end - start > limit:
-                raise TimeExpired("Time limit (%s) exceeded" % limit)
-            return result
-        newfunc = make_decorator(func)(newfunc)
-        return newfunc
-    return decorate
-
-
-def with_setup(setup=None, teardown=None):
-    """Decorator to add setup and/or teardown methods to a test function::
-
-      @with_setup(setup, teardown)
-      def test_something():
-          " ... "
-
-    Note that `with_setup` is useful *only* for test functions, not for test
-    methods or inside of TestCase subclasses.
-    """
-    def decorate(func, setup=setup, teardown=teardown):
-        if setup:
-            if hasattr(func, 'setup'):
-                _old_s = func.setup
-                def _s():
-                    setup()
-                    _old_s()
-                func.setup = _s
-            else:
-                func.setup = setup
-        if teardown:
-            if hasattr(func, 'teardown'):
-                _old_t = func.teardown
-                def _t():
-                    _old_t()
-                    teardown()
-                func.teardown = _t
-            else:
-                func.teardown = teardown
-        return func
-    return decorate
-
-
-def istest(func):
-    """Decorator to mark a function or method as a test
-    """
-    func.__test__ = True
-    return func
-
-
-def nottest(func):
-    """Decorator to mark a function or method as *not* a test
-    """
-    func.__test__ = False
-    return func
diff --git a/lib/spack/external/nose/tools/trivial.py b/lib/spack/external/nose/tools/trivial.py
deleted file mode 100644
index cf83efeda5..0000000000
--- a/lib/spack/external/nose/tools/trivial.py
+++ /dev/null
@@ -1,54 +0,0 @@
-"""Tools so trivial that tracebacks should not descend into them
-
-We define the ``__unittest`` symbol in their module namespace so unittest will
-skip them when printing tracebacks, just as it does for their corresponding
-methods in ``unittest`` proper.
-
-"""
-import re
-import unittest
-
-
-__all__ = ['ok_', 'eq_']
-
-# Use the same flag as unittest itself to prevent descent into these functions:
-__unittest = 1
-
-
-def ok_(expr, msg=None):
-    """Shorthand for assert. Saves 3 whole characters!
-    """
-    if not expr:
-        raise AssertionError(msg)
-
-
-def eq_(a, b, msg=None):
-    """Shorthand for 'assert a == b, "%r != %r" % (a, b)
-    """
-    if not a == b:
-        raise AssertionError(msg or "%r != %r" % (a, b))
-
-
-#
-# Expose assert* from unittest.TestCase
-# - give them pep8 style names
-#
-caps = re.compile('([A-Z])')
-
-def pep8(name):
-    return caps.sub(lambda m: '_' + m.groups()[0].lower(), name)
-
-class Dummy(unittest.TestCase):
-    def nop():
-        pass
-_t = Dummy('nop')
-
-for at in [ at for at in dir(_t)
-            if at.startswith('assert') and not '_' in at ]:
-    pepd = pep8(at)
-    vars()[pepd] = getattr(_t, at)
-    __all__.append(pepd)
-
-del Dummy
-del _t
-del pep8
diff --git a/lib/spack/external/nose/twistedtools.py b/lib/spack/external/nose/twistedtools.py
deleted file mode 100644
index 8d9c6ffe9b..0000000000
--- a/lib/spack/external/nose/twistedtools.py
+++ /dev/null
@@ -1,173 +0,0 @@
-"""
-Twisted integration
--------------------
-
-This module provides a very simple way to integrate your tests with the
-Twisted_ event loop.
-
-You must import this module *before* importing anything from Twisted itself!
-
-Example::
-
-  from nose.twistedtools import reactor, deferred
-  
-  @deferred()
-  def test_resolve():
-      return reactor.resolve("www.python.org")
-
-Or, more realistically::
-
-  @deferred(timeout=5.0)
-  def test_resolve():
-      d = reactor.resolve("www.python.org")
-      def check_ip(ip):
-          assert ip == "67.15.36.43"
-      d.addCallback(check_ip)
-      return d
-
-.. _Twisted: http://twistedmatrix.com/trac/
-"""
-
-import sys
-from Queue import Queue, Empty
-from nose.tools import make_decorator, TimeExpired
-
-__all__ = [
-    'threaded_reactor', 'reactor', 'deferred', 'TimeExpired',
-    'stop_reactor'
-]
-
-_twisted_thread = None
-
-def threaded_reactor():
-    """
-    Start the Twisted reactor in a separate thread, if not already done.
-    Returns the reactor.
-    The thread will automatically be destroyed when all the tests are done.
-    """
-    global _twisted_thread
-    try:
-        from twisted.internet import reactor
-    except ImportError:
-        return None, None
-    if not _twisted_thread:
-        from twisted.python import threadable
-        from threading import Thread
-        _twisted_thread = Thread(target=lambda: reactor.run( \
-                installSignalHandlers=False))
-        _twisted_thread.setDaemon(True)
-        _twisted_thread.start()
-    return reactor, _twisted_thread
-
-# Export global reactor variable, as Twisted does
-reactor, reactor_thread = threaded_reactor()
-
-
-def stop_reactor():
-    """Stop the reactor and join the reactor thread until it stops.
-    Call this function in teardown at the module or package level to
-    reset the twisted system after your tests. You *must* do this if
-    you mix tests using these tools and tests using twisted.trial.
-    """
-    global _twisted_thread
-
-    def stop_reactor():
-        '''Helper for calling stop from withing the thread.'''
-        reactor.stop()
-
-    reactor.callFromThread(stop_reactor)
-    reactor_thread.join()
-    for p in reactor.getDelayedCalls():
-        if p.active():
-            p.cancel()
-    _twisted_thread = None
-
-
-def deferred(timeout=None):
-    """
-    By wrapping a test function with this decorator, you can return a
-    twisted Deferred and the test will wait for the deferred to be triggered.
-    The whole test function will run inside the Twisted event loop.
-
-    The optional timeout parameter specifies the maximum duration of the test.
-    The difference with timed() is that timed() will still wait for the test
-    to end, while deferred() will stop the test when its timeout has expired.
-    The latter is more desireable when dealing with network tests, because
-    the result may actually never arrive.
-
-    If the callback is triggered, the test has passed.
-    If the errback is triggered or the timeout expires, the test has failed.
-
-    Example::
-    
-        @deferred(timeout=5.0)
-        def test_resolve():
-            return reactor.resolve("www.python.org")
-
-    Attention! If you combine this decorator with other decorators (like
-    "raises"), deferred() must be called *first*!
-
-    In other words, this is good::
-        
-        @raises(DNSLookupError)
-        @deferred()
-        def test_error():
-            return reactor.resolve("xxxjhjhj.biz")
-
-    and this is bad::
-        
-        @deferred()
-        @raises(DNSLookupError)
-        def test_error():
-            return reactor.resolve("xxxjhjhj.biz")
-    """
-    reactor, reactor_thread = threaded_reactor()
-    if reactor is None:
-        raise ImportError("twisted is not available or could not be imported")
-    # Check for common syntax mistake
-    # (otherwise, tests can be silently ignored
-    # if one writes "@deferred" instead of "@deferred()")
-    try:
-        timeout is None or timeout + 0
-    except TypeError:
-        raise TypeError("'timeout' argument must be a number or None")
-
-    def decorate(func):
-        def wrapper(*args, **kargs):
-            q = Queue()
-            def callback(value):
-                q.put(None)
-            def errback(failure):
-                # Retrieve and save full exception info
-                try:
-                    failure.raiseException()
-                except:
-                    q.put(sys.exc_info())
-            def g():
-                try:
-                    d = func(*args, **kargs)
-                    try:
-                        d.addCallbacks(callback, errback)
-                    # Check for a common mistake and display a nice error
-                    # message
-                    except AttributeError:
-                        raise TypeError("you must return a twisted Deferred "
-                                        "from your test case!")
-                # Catch exceptions raised in the test body (from the
-                # Twisted thread)
-                except:
-                    q.put(sys.exc_info())
-            reactor.callFromThread(g)
-            try:
-                error = q.get(timeout=timeout)
-            except Empty:
-                raise TimeExpired("timeout expired before end of test (%f s.)"
-                                  % timeout)
-            # Re-raise all exceptions
-            if error is not None:
-                exc_type, exc_value, tb = error
-                raise exc_type, exc_value, tb
-        wrapper = make_decorator(func)(wrapper)
-        return wrapper
-    return decorate
-
diff --git a/lib/spack/external/nose/usage.txt b/lib/spack/external/nose/usage.txt
deleted file mode 100644
index bc96894ab7..0000000000
--- a/lib/spack/external/nose/usage.txt
+++ /dev/null
@@ -1,115 +0,0 @@
-nose collects tests automatically from python source files,
-directories and packages found in its working directory (which
-defaults to the current working directory). Any python source file,
-directory or package that matches the testMatch regular expression
-(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or
-source for collection of tests). In addition, all other packages
-found in the working directory will be examined for python source files
-or directories that match testMatch. Package discovery descends all
-the way down the tree, so package.tests and package.sub.tests and
-package.sub.sub2.tests will all be collected.
-
-Within a test directory or package, any python source file matching
-testMatch will be examined for test cases. Within a test module,
-functions and classes whose names match testMatch and TestCase
-subclasses with any name will be loaded and executed as tests. Tests
-may use the assert keyword or raise AssertionErrors to indicate test
-failure. TestCase subclasses may do the same or use the various
-TestCase methods available.
-
-**It is important to note that the default behavior of nose is to
-not include tests from files which are executable.**  To include
-tests from such files, remove their executable bit or use
-the --exe flag (see 'Options' section below).
-
-Selecting Tests
----------------
-
-To specify which tests to run, pass test names on the command line:
-
-  %prog only_test_this.py
-  
-Test names specified may be file or module names, and may optionally
-indicate the test case to run by separating the module or file name
-from the test case name with a colon. Filenames may be relative or
-absolute. Examples:
-
-  %prog test.module
-  %prog another.test:TestCase.test_method
-  %prog a.test:TestCase
-  %prog /path/to/test/file.py:test_function
-  
-You may also change the working directory where nose looks for tests
-by using the -w switch:
-
-  %prog -w /path/to/tests
-
-Note, however, that support for multiple -w arguments is now deprecated
-and will be removed in a future release. As of nose 0.10, you can get
-the same behavior by specifying the target directories *without*
-the -w switch:
-
-  %prog /path/to/tests /another/path/to/tests
-
-Further customization of test selection and loading is possible
-through the use of plugins.
-
-Test result output is identical to that of unittest, except for
-the additional features (error classes, and plugin-supplied
-features such as output capture and assert introspection) detailed
-in the options below.
-
-Configuration
--------------
-
-In addition to passing command-line options, you may also put
-configuration options in your project's *setup.cfg* file, or a .noserc
-or nose.cfg file in your home directory. In any of these standard
-ini-style config files, you put your nosetests configuration in a
-``[nosetests]`` section. Options are the same as on the command line,
-with the -- prefix removed. For options that are simple switches, you
-must supply a value:
-
-  [nosetests]
-  verbosity=3
-  with-doctest=1
-
-All configuration files that are found will be loaded and their
-options combined. You can override the standard config file loading
-with the ``-c`` option.
-
-Using Plugins
--------------
-
-There are numerous nose plugins available via easy_install and
-elsewhere. To use a plugin, just install it. The plugin will add
-command line options to nosetests. To verify that the plugin is installed,
-run:
-
-  nosetests --plugins
-
-You can add -v or -vv to that command to show more information
-about each plugin.
-
-If you are running nose.main() or nose.run() from a script, you
-can specify a list of plugins to use by passing a list of plugins
-with the plugins keyword argument.
-
-0.9 plugins
------------
-
-nose 1.0 can use SOME plugins that were written for nose 0.9. The
-default plugin manager inserts a compatibility wrapper around 0.9
-plugins that adapts the changed plugin api calls. However, plugins
-that access nose internals are likely to fail, especially if they
-attempt to access test case or test suite classes. For example,
-plugins that try to determine if a test passed to startTest is an
-individual test or a suite will fail, partly because suites are no
-longer passed to startTest and partly because it's likely that the
-plugin is trying to find out if the test is an instance of a class
-that no longer exists.
-
-0.10 and 0.11 plugins
----------------------
-
-All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
diff --git a/lib/spack/external/nose/util.py b/lib/spack/external/nose/util.py
deleted file mode 100644
index bfe16589ea..0000000000
--- a/lib/spack/external/nose/util.py
+++ /dev/null
@@ -1,668 +0,0 @@
-"""Utility functions and classes used by nose internally.
-"""
-import inspect
-import itertools
-import logging
-import stat
-import os
-import re
-import sys
-import types
-import unittest
-from nose.pyversion import ClassType, TypeType, isgenerator, ismethod
-
-
-log = logging.getLogger('nose')
-
-ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
-class_types = (ClassType, TypeType)
-skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
-
-try:
-    set()
-    set = set # make from nose.util import set happy
-except NameError:
-    try:
-        from sets import Set as set
-    except ImportError:
-        pass
-
-
-def ls_tree(dir_path="",
-            skip_pattern=skip_pattern,
-            indent="|-- ", branch_indent="|   ",
-            last_indent="`-- ", last_branch_indent="    "):
-    # TODO: empty directories look like non-directory files
-    return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
-                                    indent, branch_indent,
-                                    last_indent, last_branch_indent))
-
-
-def _ls_tree_lines(dir_path, skip_pattern,
-                   indent, branch_indent, last_indent, last_branch_indent):
-    if dir_path == "":
-        dir_path = os.getcwd()
-
-    lines = []
-
-    names = os.listdir(dir_path)
-    names.sort()
-    dirs, nondirs = [], []
-    for name in names:
-        if re.match(skip_pattern, name):
-            continue
-        if os.path.isdir(os.path.join(dir_path, name)):
-            dirs.append(name)
-        else:
-            nondirs.append(name)
-
-    # list non-directories first
-    entries = list(itertools.chain([(name, False) for name in nondirs],
-                                   [(name, True) for name in dirs]))
-    def ls_entry(name, is_dir, ind, branch_ind):
-        if not is_dir:
-            yield ind + name
-        else:
-            path = os.path.join(dir_path, name)
-            if not os.path.islink(path):
-                yield ind + name
-                subtree = _ls_tree_lines(path, skip_pattern,
-                                         indent, branch_indent,
-                                         last_indent, last_branch_indent)
-                for x in subtree:
-                    yield branch_ind + x
-    for name, is_dir in entries[:-1]:
-        for line in ls_entry(name, is_dir, indent, branch_indent):
-            yield line
-    if entries:
-        name, is_dir = entries[-1]
-        for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
-            yield line
-
-
-def absdir(path):
-    """Return absolute, normalized path to directory, if it exists; None
-    otherwise.
-    """
-    if not os.path.isabs(path):
-        path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
-                                                             path)))
-    if path is None or not os.path.isdir(path):
-        return None
-    return path
-
-
-def absfile(path, where=None):
-    """Return absolute, normalized path to file (optionally in directory
-    where), or None if the file can't be found either in where or the current
-    working directory.
-    """
-    orig = path
-    if where is None:
-        where = os.getcwd()
-    if isinstance(where, list) or isinstance(where, tuple):
-        for maybe_path in where:
-            maybe_abs = absfile(path, maybe_path)
-            if maybe_abs is not None:
-                return maybe_abs
-        return None
-    if not os.path.isabs(path):
-        path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
-    if path is None or not os.path.exists(path):
-        if where != os.getcwd():
-            # try the cwd instead
-            path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
-                                                                 orig)))
-    if path is None or not os.path.exists(path):
-        return None
-    if os.path.isdir(path):
-        # might want an __init__.py from pacakge
-        init = os.path.join(path,'__init__.py')
-        if os.path.isfile(init):
-            return init
-    elif os.path.isfile(path):
-        return path
-    return None
-
-
-def anyp(predicate, iterable):
-    for item in iterable:
-        if predicate(item):
-            return True
-    return False
-
-
-def file_like(name):
-    """A name is file-like if it is a path that exists, or it has a
-    directory part, or it ends in .py, or it isn't a legal python
-    identifier.
-    """
-    return (os.path.exists(name)
-            or os.path.dirname(name)
-            or name.endswith('.py')
-            or not ident_re.match(os.path.splitext(name)[0]))
-
-
-def func_lineno(func):
-    """Get the line number of a function. First looks for
-    compat_co_firstlineno, then func_code.co_first_lineno.
-    """
-    try:
-        return func.compat_co_firstlineno
-    except AttributeError:
-        try:
-            return func.func_code.co_firstlineno
-        except AttributeError:
-            return -1
-
-
-def isclass(obj):
-    """Is obj a class? Inspect's isclass is too liberal and returns True
-    for objects that can't be subclasses of anything.
-    """
-    obj_type = type(obj)
-    return obj_type in class_types or issubclass(obj_type, type)
-
-
-# backwards compat (issue #64)
-is_generator = isgenerator
-
-
-def ispackage(path):
-    """
-    Is this path a package directory?
-
-    >>> ispackage('nose')
-    True
-    >>> ispackage('unit_tests')
-    False
-    >>> ispackage('nose/plugins')
-    True
-    >>> ispackage('nose/loader.py')
-    False
-    """
-    if os.path.isdir(path):
-        # at least the end of the path must be a legal python identifier
-        # and __init__.py[co] must exist
-        end = os.path.basename(path)
-        if ident_re.match(end):
-            for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
-                if os.path.isfile(os.path.join(path, init)):
-                    return True
-            if sys.platform.startswith('java') and \
-                    os.path.isfile(os.path.join(path, '__init__$py.class')):
-                return True
-    return False
-
-
-def isproperty(obj):
-    """
-    Is this a property?
-
-    >>> class Foo:
-    ...     def got(self):
-    ...         return 2
-    ...     def get(self):
-    ...         return 1
-    ...     get = property(get)
-
-    >>> isproperty(Foo.got)
-    False
-    >>> isproperty(Foo.get)
-    True
-    """
-    return type(obj) == property
-
-
-def getfilename(package, relativeTo=None):
-    """Find the python source file for a package, relative to a
-    particular directory (defaults to current working directory if not
-    given).
-    """
-    if relativeTo is None:
-        relativeTo = os.getcwd()
-    path = os.path.join(relativeTo, os.sep.join(package.split('.')))
-    if os.path.exists(path + '/__init__.py'):
-        return path
-    filename = path + '.py'
-    if os.path.exists(filename):
-        return filename
-    return None
-
-
-def getpackage(filename):
-    """
-    Find the full dotted package name for a given python source file
-    name. Returns None if the file is not a python source file.
-
-    >>> getpackage('foo.py')
-    'foo'
-    >>> getpackage('biff/baf.py')
-    'baf'
-    >>> getpackage('nose/util.py')
-    'nose.util'
-
-    Works for directories too.
-
-    >>> getpackage('nose')
-    'nose'
-    >>> getpackage('nose/plugins')
-    'nose.plugins'
-
-    And __init__ files stuck onto directories
-
-    >>> getpackage('nose/plugins/__init__.py')
-    'nose.plugins'
-
-    Absolute paths also work.
-
-    >>> path = os.path.abspath(os.path.join('nose', 'plugins'))
-    >>> getpackage(path)
-    'nose.plugins'
-    """
-    src_file = src(filename)
-    if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
-        return None
-    base, ext = os.path.splitext(os.path.basename(src_file))
-    if base == '__init__':
-        mod_parts = []
-    else:
-        mod_parts = [base]
-    path, part = os.path.split(os.path.split(src_file)[0])
-    while part:
-        if ispackage(os.path.join(path, part)):
-            mod_parts.append(part)
-        else:
-            break
-        path, part = os.path.split(path)
-    mod_parts.reverse()
-    return '.'.join(mod_parts)
-
-
-def ln(label):
-    """Draw a 70-char-wide divider, with label in the middle.
-
-    >>> ln('hello there')
-    '---------------------------- hello there -----------------------------'
-    """
-    label_len = len(label) + 2
-    chunk = (70 - label_len) // 2
-    out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
-    pad = 70 - len(out)
-    if pad > 0:
-        out = out + ('-' * pad)
-    return out
-
-
-def resolve_name(name, module=None):
-    """Resolve a dotted name to a module and its parts. This is stolen
-    wholesale from unittest.TestLoader.loadTestByName.
-
-    >>> resolve_name('nose.util') #doctest: +ELLIPSIS
-    <module 'nose.util' from...>
-    >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
-    <function resolve_name at...>
-    """
-    parts = name.split('.')
-    parts_copy = parts[:]
-    if module is None:
-        while parts_copy:
-            try:
-                log.debug("__import__ %s", name)
-                module = __import__('.'.join(parts_copy))
-                break
-            except ImportError:
-                del parts_copy[-1]
-                if not parts_copy:
-                    raise
-        parts = parts[1:]
-    obj = module
-    log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
-    for part in parts:
-        obj = getattr(obj, part)
-    return obj
-
-
-def split_test_name(test):
-    """Split a test name into a 3-tuple containing file, module, and callable
-    names, any of which (but not all) may be blank.
-
-    Test names are in the form:
-
-    file_or_module:callable
-
-    Either side of the : may be dotted. To change the splitting behavior, you
-    can alter nose.util.split_test_re.
-    """
-    norm = os.path.normpath
-    file_or_mod = test
-    fn = None
-    if not ':' in test:
-        # only a file or mod part
-        if file_like(test):
-            return (norm(test), None, None)
-        else:
-            return (None, test, None)
-
-    # could be path|mod:callable, or a : in the file path someplace
-    head, tail = os.path.split(test)
-    if not head:
-        # this is a case like 'foo:bar' -- generally a module
-        # name followed by a callable, but also may be a windows
-        # drive letter followed by a path
-        try:
-            file_or_mod, fn = test.split(':')
-            if file_like(fn):
-                # must be a funny path
-                file_or_mod, fn = test, None
-        except ValueError:
-            # more than one : in the test
-            # this is a case like c:\some\path.py:a_test
-            parts = test.split(':')
-            if len(parts[0]) == 1:
-                file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
-            else:
-                # nonsense like foo:bar:baz
-                raise ValueError("Test name '%s' could not be parsed. Please "
-                                 "format test names as path:callable or "
-                                 "module:callable." % (test,))
-    elif not tail:
-        # this is a case like 'foo:bar/'
-        # : must be part of the file path, so ignore it
-        file_or_mod = test
-    else:
-        if ':' in tail:
-            file_part, fn = tail.split(':')
-        else:
-            file_part = tail
-        file_or_mod = os.sep.join([head, file_part])
-    if file_or_mod:
-        if file_like(file_or_mod):
-            return (norm(file_or_mod), None, fn)
-        else:
-            return (None, file_or_mod, fn)
-    else:
-        return (None, None, fn)
-split_test_name.__test__ = False # do not collect
-
-
-def test_address(test):
-    """Find the test address for a test, which may be a module, filename,
-    class, method or function.
-    """
-    if hasattr(test, "address"):
-        return test.address()
-    # type-based polymorphism sucks in general, but I believe is
-    # appropriate here
-    t = type(test)
-    file = module = call = None
-    if t == types.ModuleType:
-        file = getattr(test, '__file__', None)
-        module = getattr(test, '__name__', None)
-        return (src(file), module, call)
-    if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
-        module = getattr(test, '__module__', None)
-        if module is not None:
-            m = sys.modules[module]
-            file = getattr(m, '__file__', None)
-            if file is not None:
-                file = os.path.abspath(file)
-        call = getattr(test, '__name__', None)
-        return (src(file), module, call)
-    if t == types.MethodType:
-        cls_adr = test_address(test.im_class)
-        return (src(cls_adr[0]), cls_adr[1],
-                "%s.%s" % (cls_adr[2], test.__name__))
-    # handle unittest.TestCase instances
-    if isinstance(test, unittest.TestCase):
-        if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
-            or hasattr(test, '_testFunc')):              # 2.7
-            # unittest FunctionTestCase
-            try:
-                return test_address(test._FunctionTestCase__testFunc)
-            except AttributeError:
-                return test_address(test._testFunc)
-        # regular unittest.TestCase
-        cls_adr = test_address(test.__class__)
-        # 2.5 compat: __testMethodName changed to _testMethodName
-        try:
-            method_name = test._TestCase__testMethodName
-        except AttributeError:
-            method_name = test._testMethodName
-        return (src(cls_adr[0]), cls_adr[1],
-                "%s.%s" % (cls_adr[2], method_name))
-    if (hasattr(test, '__class__') and
-            test.__class__.__module__ not in ('__builtin__', 'builtins')):
-        return test_address(test.__class__)
-    raise TypeError("I don't know what %s is (%s)" % (test, t))
-test_address.__test__ = False # do not collect
-
-
-def try_run(obj, names):
-    """Given a list of possible method names, try to run them with the
-    provided object. Keep going until something works. Used to run
-    setup/teardown methods for module, package, and function tests.
-    """
-    for name in names:
-        func = getattr(obj, name, None)
-        if func is not None:
-            if type(obj) == types.ModuleType:
-                # py.test compatibility
-                if isinstance(func, types.FunctionType):
-                    args, varargs, varkw, defaults = \
-                        inspect.getargspec(func)
-                else:
-                    # Not a function. If it's callable, call it anyway
-                    if hasattr(func, '__call__') and not inspect.ismethod(func):
-                        func = func.__call__
-                    try:
-                        args, varargs, varkw, defaults = \
-                            inspect.getargspec(func)
-                        args.pop(0) # pop the self off
-                    except TypeError:
-                        raise TypeError("Attribute %s of %r is not a python "
-                                        "function. Only functions or callables"
-                                        " may be used as fixtures." %
-                                        (name, obj))
-                if len(args):
-                    log.debug("call fixture %s.%s(%s)", obj, name, obj)
-                    return func(obj)
-            log.debug("call fixture %s.%s", obj, name)
-            return func()
-
-
-def src(filename):
-    """Find the python source file for a .pyc, .pyo or $py.class file on
-    jython. Returns the filename provided if it is not a python source
-    file.
-    """
-    if filename is None:
-        return filename
-    if sys.platform.startswith('java') and filename.endswith('$py.class'):
-        return '.'.join((filename[:-9], 'py'))
-    base, ext = os.path.splitext(filename)
-    if ext in ('.pyc', '.pyo', '.py'):
-        return '.'.join((base, 'py'))
-    return filename
-
-
-def regex_last_key(regex):
-    """Sort key function factory that puts items that match a
-    regular expression last.
-
-    >>> from nose.config import Config
-    >>> from nose.pyversion import sort_list
-    >>> c = Config()
-    >>> regex = c.testMatch
-    >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
-    >>> sort_list(entries, regex_last_key(regex))
-    >>> entries
-    ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
-    """
-    def k(obj):
-        if regex.search(obj):
-            return (1, obj)
-        return (0, obj)
-    return k
-
-
-def tolist(val):
-    """Convert a value that may be a list or a (possibly comma-separated)
-    string into a list. The exception: None is returned as None, not [None].
-
-    >>> tolist(["one", "two"])
-    ['one', 'two']
-    >>> tolist("hello")
-    ['hello']
-    >>> tolist("separate,values, with, commas,  spaces , are    ,ok")
-    ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
-    """
-    if val is None:
-        return None
-    try:
-        # might already be a list
-        val.extend([])
-        return val
-    except AttributeError:
-        pass
-    # might be a string
-    try:
-        return re.split(r'\s*,\s*', val)
-    except TypeError:
-        # who knows...
-        return list(val)
-
-
-class odict(dict):
-    """Simple ordered dict implementation, based on:
-
-    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
-    """
-    def __init__(self, *arg, **kw):
-        self._keys = []
-        super(odict, self).__init__(*arg, **kw)
-
-    def __delitem__(self, key):
-        super(odict, self).__delitem__(key)
-        self._keys.remove(key)
-
-    def __setitem__(self, key, item):
-        super(odict, self).__setitem__(key, item)
-        if key not in self._keys:
-            self._keys.append(key)
-
-    def __str__(self):
-        return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
-
-    def clear(self):
-        super(odict, self).clear()
-        self._keys = []
-
-    def copy(self):
-        d = super(odict, self).copy()
-        d._keys = self._keys[:]
-        return d
-
-    def items(self):
-        return zip(self._keys, self.values())
-
-    def keys(self):
-        return self._keys[:]
-
-    def setdefault(self, key, failobj=None):
-        item = super(odict, self).setdefault(key, failobj)
-        if key not in self._keys:
-            self._keys.append(key)
-        return item
-
-    def update(self, dict):
-        super(odict, self).update(dict)
-        for key in dict.keys():
-            if key not in self._keys:
-                self._keys.append(key)
-
-    def values(self):
-        return map(self.get, self._keys)
-
-
-def transplant_func(func, module):
-    """
-    Make a function imported from module A appear as if it is located
-    in module B.
-
-    >>> from pprint import pprint
-    >>> pprint.__module__
-    'pprint'
-    >>> pp = transplant_func(pprint, __name__)
-    >>> pp.__module__
-    'nose.util'
-
-    The original function is not modified.
-
-    >>> pprint.__module__
-    'pprint'
-
-    Calling the transplanted function calls the original.
-
-    >>> pp([1, 2])
-    [1, 2]
-    >>> pprint([1,2])
-    [1, 2]
-
-    """
-    from nose.tools import make_decorator
-    if isgenerator(func):
-        def newfunc(*arg, **kw):
-            for v in func(*arg, **kw):
-                yield v
-    else:
-        def newfunc(*arg, **kw):
-            return func(*arg, **kw)
-
-    newfunc = make_decorator(func)(newfunc)
-    newfunc.__module__ = module
-    return newfunc
-
-
-def transplant_class(cls, module):
-    """
-    Make a class appear to reside in `module`, rather than the module in which
-    it is actually defined.
-
-    >>> from nose.failure import Failure
-    >>> Failure.__module__
-    'nose.failure'
-    >>> Nf = transplant_class(Failure, __name__)
-    >>> Nf.__module__
-    'nose.util'
-    >>> Nf.__name__
-    'Failure'
-
-    """
-    class C(cls):
-        pass
-    C.__module__ = module
-    C.__name__ = cls.__name__
-    return C
-
-
-def safe_str(val, encoding='utf-8'):
-    try:
-        return str(val)
-    except UnicodeEncodeError:
-        if isinstance(val, Exception):
-            return ' '.join([safe_str(arg, encoding)
-                             for arg in val])
-        return unicode(val).encode(encoding)
-
-
-def is_executable(file):
-    if not os.path.exists(file):
-        return False
-    st = os.stat(file)
-    return bool(st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
-
-
-if __name__ == '__main__':
-    import doctest
-    doctest.testmod()
diff --git a/lib/spack/external/pyqver2.py b/lib/spack/external/pyqver2.py
index 4690239748..571e005524 100755
--- a/lib/spack/external/pyqver2.py
+++ b/lib/spack/external/pyqver2.py
@@ -57,7 +57,11 @@
     "hmac":             (2, 2),
     "hotshot":          (2, 2),
     "HTMLParser":       (2, 2),
-    "importlib":        (2, 7),
+# skip importlib until we can conditionally skip for pytest.
+# pytest tries to import this and catches the exception, but
+# the test will still fail.
+# TODO: can we excelude with a comment like '# flake: noqa?'
+#    "importlib":        (2, 7),
     "inspect":          (2, 1),
     "io":               (2, 6),
     "itertools":        (2, 3),
diff --git a/lib/spack/external/pytest.py b/lib/spack/external/pytest.py
new file mode 100644
index 0000000000..e376e417e8
--- /dev/null
+++ b/lib/spack/external/pytest.py
@@ -0,0 +1,28 @@
+# PYTHON_ARGCOMPLETE_OK
+"""
+pytest: unit and functional testing with Python.
+"""
+__all__ = [
+    'main',
+    'UsageError',
+    'cmdline',
+    'hookspec',
+    'hookimpl',
+    '__version__',
+]
+
+if __name__ == '__main__': # if run as a script or by 'python -m pytest'
+    # we trigger the below "else" condition by the following import
+    import pytest
+    raise SystemExit(pytest.main())
+
+# else we are imported
+
+from _pytest.config import (
+    main, UsageError, _preloadplugins, cmdline,
+    hookspec, hookimpl
+)
+from _pytest import __version__
+
+_preloadplugins() # to populate pytest.* namespace so help(pytest) works
+
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index 637d18cd63..331cf2b3c5 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -362,3 +362,15 @@ class RequiredAttributeError(ValueError):
 
     def __init__(self, message):
         super(RequiredAttributeError, self).__init__(message)
+
+
+def duplicate_stream(original):
+    """Duplicates a stream  at the os level.
+
+    :param stream original: original stream to be duplicated. Must have a
+        `fileno` callable attribute.
+
+    :return: duplicate of the original stream
+    :rtype: file like object
+    """
+    return os.fdopen(os.dup(original.fileno()))
diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py
index 3d4972b3ae..b1d45214ab 100644
--- a/lib/spack/llnl/util/tty/log.py
+++ b/lib/spack/llnl/util/tty/log.py
@@ -30,6 +30,7 @@
 import select
 import sys
 
+import llnl.util.lang as lang
 import llnl.util.tty as tty
 import llnl.util.tty.color as color
 
@@ -147,9 +148,7 @@ def __init__(
     def __enter__(self):
         # Sets a daemon that writes to file what it reads from a pipe
         try:
-            fwd_input_stream = os.fdopen(
-                os.dup(self.input_stream.fileno())
-            )
+            fwd_input_stream = lang.duplicate_stream(self.input_stream)
             self.p = multiprocessing.Process(
                 target=self._spawn_writing_daemon,
                 args=(self.read, fwd_input_stream),
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index 0646f5cb32..fcf140617e 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -41,6 +41,7 @@
 
 # spack directory hierarchy
 lib_path       = join_path(spack_root, "lib", "spack")
+external_path  = join_path(lib_path, "external")
 build_env_path = join_path(lib_path, "env")
 module_path    = join_path(lib_path, "spack")
 platform_path  = join_path(module_path, 'platforms')
@@ -196,3 +197,8 @@
 __all__ += [
     'install_dependency_symlinks', 'flatten_dependencies',
     'DependencyConflictError', 'InstallError', 'ExternalPackageError']
+
+# Add default values for attributes that would otherwise be modified from
+# Spack main script
+debug = True
+spack_working_dir = None
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 501ace29b1..d83288860b 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -51,13 +51,14 @@
 Skimming this module is a nice way to get acquainted with the types of
 calls you can make from within the install() function.
 """
+import inspect
+import multiprocessing
 import os
+import shutil
 import sys
-import multiprocessing
 import traceback
-import inspect
-import shutil
 
+import llnl.util.lang as lang
 import llnl.util.tty as tty
 import spack
 import spack.store
@@ -579,7 +580,7 @@ def child_execution(child_connection, input_stream):
     try:
         # Forward sys.stdin to be able to activate / deactivate
         # verbosity pressing a key at run-time
-        input_stream = os.fdopen(os.dup(sys.stdin.fileno()))
+        input_stream = lang.duplicate_stream(sys.stdin)
         p = multiprocessing.Process(
             target=child_execution,
             args=(child_connection, input_stream)
diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py
index e712ba8e1d..bcc4524b4f 100644
--- a/lib/spack/spack/cmd/__init__.py
+++ b/lib/spack/spack/cmd/__init__.py
@@ -61,9 +61,19 @@
     if file.endswith(".py") and not re.search(ignore_files, file):
         cmd = re.sub(r'.py$', '', file)
         commands.append(cmd)
+commands.append('test')
 commands.sort()
 
 
+def remove_options(parser, *options):
+    """Remove some options from a parser."""
+    for option in options:
+        for action in parser._actions:
+            if vars(action)['option_strings'][0] == option:
+                parser._handle_conflict_resolve(None, [(option, action)])
+                break
+
+
 def get_cmd_function_name(name):
     return name.replace("-", "_")
 
diff --git a/lib/spack/spack/cmd/flake8.py b/lib/spack/spack/cmd/flake8.py
index a4c607a640..b8e28b0860 100644
--- a/lib/spack/spack/cmd/flake8.py
+++ b/lib/spack/spack/cmd/flake8.py
@@ -35,17 +35,18 @@
 from spack.util.executable import *
 
 description = "Runs source code style checks on Spack. Requires flake8."
-
-changed_files_path = os.path.join(spack.share_path, 'qa', 'changed_files')
-changed_files = Executable(changed_files_path)
 flake8 = None
+include_untracked = True
 
-#
-# This is a dict that maps:
-# filename pattern ->
-#    a flake8 exemption code ->
-#       list of patterns, for which matching lines should have codes applied.
-#
+"""List of directories to exclude from checks."""
+exclude_directories = [spack.external_path]
+
+"""
+This is a dict that maps:
+ filename pattern ->
+    a flake8 exemption code ->
+       list of patterns, for which matching lines should have codes applied.
+"""
 exemptions = {
     # exemptions applied only to package.py files.
     r'package.py$': {
@@ -77,6 +78,37 @@
                   for file_pattern, error_dict in exemptions.items())
 
 
+def changed_files():
+    """Get list of changed files in the Spack repository."""
+
+    git = which('git', required=True)
+
+    git_args = [
+        # Add changed files committed since branching off of develop
+        ['diff', '--name-only', '--diff-filter=ACMR', 'develop'],
+        # Add changed files that have been staged but not yet committed
+        ['diff', '--name-only', '--diff-filter=ACMR', '--cached'],
+        # Add changed files that are unstaged
+        ['diff', '--name-only', '--diff-filter=ACMR']]
+
+    # Add new files that are untracked
+    if include_untracked:
+        git_args.append(['ls-files', '--exclude-standard', '--other'])
+
+    excludes = [os.path.realpath(f) for f in exclude_directories]
+    changed = set()
+    for git_arg_list in git_args:
+        arg_list = git_arg_list + ['--', '*.py']
+
+        files = [f for f in git(*arg_list, output=str).split('\n') if f]
+        for f in files:
+            # don't look at files that are in the exclude locations
+            if any(os.path.realpath(f).startswith(e) for e in excludes):
+                continue
+            changed.add(f)
+    return sorted(changed)
+
+
 def filter_file(source, dest, output=False):
     """Filter a single file through all the patterns in exemptions."""
     with open(source) as infile:
@@ -114,14 +146,18 @@ def setup_parser(subparser):
     subparser.add_argument(
         '-r', '--root-relative', action='store_true', default=False,
         help="print root-relative paths (default is cwd-relative)")
+    subparser.add_argument(
+        '-U', '--no-untracked', dest='untracked', action='store_false',
+        default=True, help="Exclude untracked files from checks.")
     subparser.add_argument(
         'files', nargs=argparse.REMAINDER, help="specific files to check")
 
 
 def flake8(parser, args):
     # Just use this to check for flake8 -- we actually execute it with Popen.
-    global flake8
+    global flake8, include_untracked
     flake8 = which('flake8', required=True)
+    include_untracked = args.untracked
 
     temp = tempfile.mkdtemp()
     try:
@@ -135,9 +171,7 @@ def prefix_relative(path):
 
         with working_dir(spack.prefix):
             if not file_list:
-                file_list = changed_files('*.py', output=str)
-                file_list = [x for x in file_list.split('\n') if x]
-
+                file_list = changed_files()
             shutil.copy('.flake8', os.path.join(temp, '.flake8'))
 
         print '======================================================='
diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py
index 52c2a06778..2e0ab8b49e 100644
--- a/lib/spack/spack/cmd/test.py
+++ b/lib/spack/spack/cmd/test.py
@@ -22,71 +22,86 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import sys
 import os
+import re
+import argparse
+import pytest
+from StringIO import StringIO
 
-from llnl.util.filesystem import join_path, mkdirp
+from llnl.util.filesystem import *
 from llnl.util.tty.colify import colify
 
 import spack
-import spack.test
-from spack.fetch_strategy import FetchError
 
-description = "Run unit tests"
+description = "A thin wrapper around the pytest command."
 
 
 def setup_parser(subparser):
     subparser.add_argument(
-        'names', nargs='*', help="Names of tests to run.")
+        '-H', '--pytest-help', action='store_true', default=False,
+        help="print full pytest help message, showing advanced options.")
+
+    list_group = subparser.add_mutually_exclusive_group()
+    list_group.add_argument(
+        '-l', '--list', action='store_true', default=False,
+        help="list basic test names.")
+    list_group.add_argument(
+        '-L', '--long-list', action='store_true', default=False,
+        help="list the entire hierarchy of tests.")
     subparser.add_argument(
-        '-l', '--list', action='store_true', dest='list',
-        help="Show available tests")
-    subparser.add_argument(
-        '--createXmlOutput', action='store_true', dest='createXmlOutput',
-        help="Create JUnit XML from test results")
-    subparser.add_argument(
-        '--xmlOutputDir', dest='xmlOutputDir',
-        help="Nose creates XML files in this directory")
-    subparser.add_argument(
-        '-v', '--verbose', action='store_true', dest='verbose',
-        help="verbose output")
-
-
-class MockCache(object):
-
-    def store(self, copyCmd, relativeDst):
-        pass
-
-    def fetcher(self, targetPath, digest, **kwargs):
-        return MockCacheFetcher()
-
-
-class MockCacheFetcher(object):
-
-    def set_stage(self, stage):
-        pass
-
-    def fetch(self):
-        raise FetchError("Mock cache always fails for tests")
-
-    def __str__(self):
-        return "[mock fetcher]"
-
+        'tests', nargs=argparse.REMAINDER,
+        help="list of tests to run (will be passed to pytest -k).")
+
+
+def do_list(args, unknown_args):
+    """Print a lists of tests than what pytest offers."""
+    # Run test collection and get the tree out.
+    old_output = sys.stdout
+    try:
+        sys.stdout = output = StringIO()
+        pytest.main(['--collect-only'])
+    finally:
+        sys.stdout = old_output
+
+    # put the output in a more readable tree format.
+    lines = output.getvalue().split('\n')
+    output_lines = []
+    for line in lines:
+        match = re.match(r"(\s*)<([^ ]*) '([^']*)'", line)
+        if not match:
+            continue
+        indent, nodetype, name = match.groups()
+
+        # only print top-level for short list
+        if args.list:
+            if not indent:
+                output_lines.append(
+                    os.path.basename(name).replace('.py', ''))
+        else:
+            print indent + name
 
-def test(parser, args):
     if args.list:
-        print "Available tests:"
-        colify(spack.test.list_tests(), indent=2)
-
-    else:
-        if not args.createXmlOutput:
-            outputDir = None
+        colify(output_lines)
+
+
+def test(parser, args, unknown_args):
+    if args.pytest_help:
+        # make the pytest.main help output more accurate
+        sys.argv[0] = 'spack test'
+        pytest.main(['-h'])
+        return
+
+    # pytest.ini lives in the root of the sapck repository.
+    with working_dir(spack.prefix):
+        # --list and --long-list print the test output better.
+        if args.list or args.long_list:
+            do_list(args, unknown_args)
+            return
+
+        if args.tests and not any(arg.startswith('-') for arg in args.tests):
+            # Allow keyword search without -k if no options are specified
+            return pytest.main(['-k'] + args.tests)
         else:
-            if not args.xmlOutputDir:
-                outputDir = join_path(os.getcwd(), "test-output")
-            else:
-                outputDir = os.path.abspath(args.xmlOutputDir)
-
-            if not os.path.exists(outputDir):
-                mkdirp(outputDir)
-        spack.fetch_cache = MockCache()
-        spack.test.run(args.names, outputDir, args.verbose)
+            # Just run the pytest command.
+            return pytest.main(unknown_args + args.tests)
diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py
index 94b79accdb..d77700c01f 100644
--- a/lib/spack/spack/repository.py
+++ b/lib/spack/spack/repository.py
@@ -133,7 +133,7 @@ def __init__(self, *repo_dirs, **kwargs):
                          "    spack repo rm %s" % root)
 
     def swap(self, other):
-        """Convenience function to make swapping repostiories easier.
+        """Convenience function to make swapping repositories easier.
 
         This is currently used by mock tests.
         TODO: Maybe there is a cleaner way.
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index 79122cc1de..ed1ec23bca 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -22,132 +22,3 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-import sys
-import os
-
-import llnl.util.tty as tty
-import nose
-import spack
-import spack.architecture
-from llnl.util.filesystem import join_path
-from llnl.util.tty.colify import colify
-from spack.test.tally_plugin import Tally
-from spack.platforms.test import Test as TestPlatform
-"""Names of tests to be included in Spack's test suite"""
-
-# All the tests Spack knows about.
-# Keep these one per line so that it's easy to see changes in diffs.
-test_names = [
-    'architecture',
-    'build_system_guess',
-    'cc',
-    'cmd.find',
-    'cmd.module',
-    'cmd.install',
-    'cmd.uninstall',
-    'concretize',
-    'concretize_preferences',
-    'config',
-    'database',
-    'directory_layout',
-    'environment',
-    'file_cache',
-    'git_fetch',
-    'hg_fetch',
-    'install',
-    'library_list',
-    'link_tree',
-    'lock',
-    'make_executable',
-    'mirror',
-    'modules',
-    'multimethod',
-    'namespace_trie',
-    'optional_deps',
-    'package_sanity',
-    'packages',
-    'pattern',
-    'python_version',
-    'sbang',
-    'spec_dag',
-    'spec_semantics',
-    'spec_syntax',
-    'spec_yaml',
-    'stage',
-    'svn_fetch',
-    'url_extrapolate',
-    'url_parse',
-    'url_substitution',
-    'versions',
-    'provider_index',
-    'spack_yaml',
-    # This test needs to be last until global compiler cache is fixed.
-    'cmd.test_compiler_cmd',
-]
-
-
-def setup_tests():
-    """Prepare the environment for the Spack tests to be run."""
-    test_platform = TestPlatform()
-    spack.architecture.real_platform = spack.architecture.platform
-    spack.architecture.platform = lambda: test_platform
-
-
-def list_tests():
-    """Return names of all tests that can be run for Spack."""
-    return test_names
-
-
-def run(names, outputDir, verbose=False):
-    """Run tests with the supplied names.  Names should be a list.  If
-       it's empty, run ALL of Spack's tests."""
-    # Print output to stdout if verbose is 1.
-    if verbose:
-        os.environ['NOSE_NOCAPTURE'] = '1'
-
-    if not names:
-        names = test_names
-    else:
-        for test in names:
-            if test not in test_names:
-                tty.error("%s is not a valid spack test name." % test,
-                          "Valid names are:")
-                colify(sorted(test_names), indent=4)
-                sys.exit(1)
-
-    tally = Tally()
-
-    modules = ['spack.test.' + test for test in names]
-    runOpts = ["--with-%s" % spack.test.tally_plugin.Tally.name]
-
-    if outputDir:
-        xmlOutputFname = "unittests-{0}.xml".format(test)
-        xmlOutputPath = join_path(outputDir, xmlOutputFname)
-        runOpts += ["--with-xunit",
-                    "--xunit-file={0}".format(xmlOutputPath)]
-    argv = [""] + runOpts + modules
-
-    setup_tests()
-    nose.run(argv=argv, addplugins=[tally])
-
-    succeeded = not tally.failCount and not tally.errorCount
-    tty.msg(
-        "Tests Complete.",
-        "%5d tests run" % tally.numberOfTestsRun,
-        "%5d failures" % tally.failCount,
-        "%5d errors" % tally.errorCount
-    )
-
-    if tally.fail_list:
-        items = [x for x in tally.fail_list]
-        tty.msg('List of failing tests:', *items)
-
-    if tally.error_list:
-        items = [x for x in tally.error_list]
-        tty.msg('List of tests with errors:', *items)
-
-    if succeeded:
-        tty.info("OK", format='g')
-    else:
-        tty.info("FAIL", format='r')
-        sys.exit(1)
diff --git a/lib/spack/spack/test/architecture.py b/lib/spack/spack/test/architecture.py
index 0ce583c6ea..fb4113361c 100644
--- a/lib/spack/spack/test/architecture.py
+++ b/lib/spack/spack/test/architecture.py
@@ -30,48 +30,33 @@
 import platform as py_platform
 import spack
 import spack.architecture
-from spack.spec import *
+from spack.spec import Spec
 from spack.platforms.cray import Cray
 from spack.platforms.linux import Linux
 from spack.platforms.bgq import Bgq
 from spack.platforms.darwin import Darwin
 
-from spack.test.mock_packages_test import *
 
+def test_dict_functions_for_architecture():
+    arch = spack.architecture.Arch()
+    arch.platform = spack.architecture.platform()
+    arch.platform_os = arch.platform.operating_system('default_os')
+    arch.target = arch.platform.target('default_target')
 
-class ArchitectureTest(MockPackagesTest):
+    new_arch = spack.architecture.Arch.from_dict(arch.to_dict())
 
-    def setUp(self):
-        super(ArchitectureTest, self).setUp()
-        self.platform = spack.architecture.platform()
+    assert arch == new_arch
+    assert isinstance(arch, spack.architecture.Arch)
+    assert isinstance(arch.platform, spack.architecture.Platform)
+    assert isinstance(arch.platform_os, spack.architecture.OperatingSystem)
+    assert isinstance(arch.target, spack.architecture.Target)
+    assert isinstance(new_arch, spack.architecture.Arch)
+    assert isinstance(new_arch.platform, spack.architecture.Platform)
+    assert isinstance(new_arch.platform_os, spack.architecture.OperatingSystem)
+    assert isinstance(new_arch.target, spack.architecture.Target)
 
-    def tearDown(self):
-        super(ArchitectureTest, self).tearDown()
 
-    def test_dict_functions_for_architecture(self):
-        arch = spack.architecture.Arch()
-        arch.platform = spack.architecture.platform()
-        arch.platform_os = arch.platform.operating_system('default_os')
-        arch.target = arch.platform.target('default_target')
-
-        new_arch = spack.architecture.Arch.from_dict(arch.to_dict())
-        self.assertEqual(arch, new_arch)
-
-        self.assertTrue(isinstance(arch, spack.architecture.Arch))
-        self.assertTrue(isinstance(arch.platform, spack.architecture.Platform))
-        self.assertTrue(isinstance(arch.platform_os,
-                                   spack.architecture.OperatingSystem))
-        self.assertTrue(isinstance(arch.target,
-                                   spack.architecture.Target))
-        self.assertTrue(isinstance(new_arch, spack.architecture.Arch))
-        self.assertTrue(isinstance(new_arch.platform,
-                                   spack.architecture.Platform))
-        self.assertTrue(isinstance(new_arch.platform_os,
-                                   spack.architecture.OperatingSystem))
-        self.assertTrue(isinstance(new_arch.target,
-                                   spack.architecture.Target))
-
-    def test_platform(self):
+def test_platform():
         output_platform_class = spack.architecture.real_platform()
         if os.path.exists('/opt/cray/craype'):
             my_platform_class = Cray()
@@ -82,85 +67,95 @@ def test_platform(self):
         elif 'Darwin' in py_platform.system():
             my_platform_class = Darwin()
 
-        self.assertEqual(str(output_platform_class), str(my_platform_class))
-
-    def test_boolness(self):
-        # Make sure architecture reports that it's False when nothing's set.
-        arch = spack.architecture.Arch()
-        self.assertFalse(arch)
-
-        # Dummy architecture parts
-        plat = spack.architecture.platform()
-        plat_os = plat.operating_system('default_os')
-        plat_target = plat.target('default_target')
-
-        # Make sure architecture reports that it's True when anything is set.
-        arch = spack.architecture.Arch()
-        arch.platform = plat
-        self.assertTrue(arch)
-
-        arch = spack.architecture.Arch()
-        arch.platform_os = plat_os
-        self.assertTrue(arch)
-
-        arch = spack.architecture.Arch()
-        arch.target = plat_target
-        self.assertTrue(arch)
-
-    def test_user_front_end_input(self):
-        """Test when user inputs just frontend that both the frontend target
-            and frontend operating system match
-        """
-        frontend_os = str(self.platform.operating_system("frontend"))
-        frontend_target = str(self.platform.target("frontend"))
-
-        frontend_spec = Spec("libelf os=frontend target=frontend")
-        frontend_spec.concretize()
-
-        self.assertEqual(frontend_os, frontend_spec.architecture.platform_os)
-        self.assertEqual(frontend_target, frontend_spec.architecture.target)
-
-    def test_user_back_end_input(self):
-        """Test when user inputs backend that both the backend target and
-            backend operating system match
-        """
-        backend_os = str(self.platform.operating_system("backend"))
-        backend_target = str(self.platform.target("backend"))
-
-        backend_spec = Spec("libelf os=backend target=backend")
-        backend_spec.concretize()
-
-        self.assertEqual(backend_os, backend_spec.architecture.platform_os)
-        self.assertEqual(backend_target, backend_spec.architecture.target)
-
-    def test_user_defaults(self):
-        default_os = str(self.platform.operating_system("default_os"))
-        default_target = str(self.platform.target("default_target"))
-
-        default_spec = Spec("libelf")  # default is no args
-        default_spec.concretize()
-
-        self.assertEqual(default_os, default_spec.architecture.platform_os)
-        self.assertEqual(default_target, default_spec.architecture.target)
-
-    def test_user_input_combination(self):
-        os_list = self.platform.operating_sys.keys()
-        target_list = self.platform.targets.keys()
-        additional = ["fe", "be", "frontend", "backend"]
-
-        os_list.extend(additional)
-        target_list.extend(additional)
-
-        combinations = itertools.product(os_list, target_list)
-        results = []
-        for arch in combinations:
-            o, t = arch
-            spec = Spec("libelf os=%s target=%s" % (o, t))
-            spec.concretize()
-            results.append(spec.architecture.platform_os ==
-                           str(self.platform.operating_system(o)))
-            results.append(spec.architecture.target ==
-                           str(self.platform.target(t)))
-        res = all(results)
-
-        self.assertTrue(res)
+        assert str(output_platform_class) == str(my_platform_class)
+
+
+def test_boolness():
+    # Make sure architecture reports that it's False when nothing's set.
+    arch = spack.architecture.Arch()
+    assert not arch
+
+    # Dummy architecture parts
+    plat = spack.architecture.platform()
+    plat_os = plat.operating_system('default_os')
+    plat_target = plat.target('default_target')
+
+    # Make sure architecture reports that it's True when anything is set.
+    arch = spack.architecture.Arch()
+    arch.platform = plat
+    assert arch
+
+    arch = spack.architecture.Arch()
+    arch.platform_os = plat_os
+    assert arch
+
+    arch = spack.architecture.Arch()
+    arch.target = plat_target
+    assert arch
+
+
+def test_user_front_end_input(config):
+    """Test when user inputs just frontend that both the frontend target
+    and frontend operating system match
+    """
+    platform = spack.architecture.platform()
+    frontend_os = str(platform.operating_system('frontend'))
+    frontend_target = str(platform.target('frontend'))
+
+    frontend_spec = Spec('libelf os=frontend target=frontend')
+    frontend_spec.concretize()
+
+    assert frontend_os == frontend_spec.architecture.platform_os
+    assert frontend_target == frontend_spec.architecture.target
+
+
+def test_user_back_end_input(config):
+    """Test when user inputs backend that both the backend target and
+    backend operating system match
+    """
+    platform = spack.architecture.platform()
+    backend_os = str(platform.operating_system("backend"))
+    backend_target = str(platform.target("backend"))
+
+    backend_spec = Spec("libelf os=backend target=backend")
+    backend_spec.concretize()
+
+    assert backend_os == backend_spec.architecture.platform_os
+    assert backend_target == backend_spec.architecture.target
+
+
+def test_user_defaults(config):
+    platform = spack.architecture.platform()
+    default_os = str(platform.operating_system("default_os"))
+    default_target = str(platform.target("default_target"))
+
+    default_spec = Spec("libelf")  # default is no args
+    default_spec.concretize()
+
+    assert default_os == default_spec.architecture.platform_os
+    assert default_target == default_spec.architecture.target
+
+
+def test_user_input_combination(config):
+    platform = spack.architecture.platform()
+    os_list = platform.operating_sys.keys()
+    target_list = platform.targets.keys()
+    additional = ["fe", "be", "frontend", "backend"]
+
+    os_list.extend(additional)
+    target_list.extend(additional)
+
+    combinations = itertools.product(os_list, target_list)
+    results = []
+    for arch in combinations:
+        o, t = arch
+        spec = Spec("libelf os=%s target=%s" % (o, t))
+        spec.concretize()
+        results.append(
+            spec.architecture.platform_os == str(platform.operating_system(o))
+        )
+        results.append(
+            spec.architecture.target == str(platform.target(t))
+        )
+    res = all(results)
+    assert res
diff --git a/lib/spack/spack/test/build_system_guess.py b/lib/spack/spack/test/build_system_guess.py
index e728a47cf4..97a9d67b47 100644
--- a/lib/spack/spack/test/build_system_guess.py
+++ b/lib/spack/spack/test/build_system_guess.py
@@ -22,60 +22,43 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-import os
-import shutil
-import tempfile
-import unittest
 
-from llnl.util.filesystem import *
-from spack.cmd.create import BuildSystemGuesser
-from spack.stage import Stage
-from spack.test.mock_packages_test import *
-from spack.util.executable import which
+import pytest
+import spack.cmd.create
+import spack.util.executable
+import spack.stage
 
 
-class InstallTest(unittest.TestCase):
-    """Tests the build system guesser in spack create"""
+@pytest.fixture(
+    scope='function',
+    params=[
+        ('configure', 'autotools'),
+        ('CMakeLists.txt', 'cmake'),
+        ('SConstruct', 'scons'),
+        ('setup.py', 'python'),
+        ('NAMESPACE', 'R'),
+        ('foobar', 'unknown')
+    ]
+)
+def url_and_build_system(request, tmpdir):
+    """Sets up the resources to be pulled by the stage with
+    the appropriate file name and returns their url along with
+    the correct build-system guess
+    """
+    tar = spack.util.executable.which('tar')
+    orig_dir = tmpdir.chdir()
+    filename, system = request.param
+    tmpdir.ensure('archive', filename)
+    tar('czf', 'archive.tar.gz', 'archive')
+    url = 'file://' + str(tmpdir.join('archive.tar.gz'))
+    yield url, system
+    orig_dir.chdir()
 
-    def setUp(self):
-        self.tar = which('tar')
-        self.tmpdir = tempfile.mkdtemp()
-        self.orig_dir = os.getcwd()
-        os.chdir(self.tmpdir)
-        self.stage = None
 
-    def tearDown(self):
-        shutil.rmtree(self.tmpdir, ignore_errors=True)
-        os.chdir(self.orig_dir)
-
-    def check_archive(self, filename, system):
-        mkdirp('archive')
-        touch(join_path('archive', filename))
-        self.tar('czf', 'archive.tar.gz', 'archive')
-
-        url = 'file://' + join_path(os.getcwd(), 'archive.tar.gz')
-        print url
-        with Stage(url) as stage:
-            stage.fetch()
-
-            guesser = BuildSystemGuesser()
-            guesser(stage, url)
-            self.assertEqual(system, guesser.build_system)
-
-    def test_autotools(self):
-        self.check_archive('configure', 'autotools')
-
-    def test_cmake(self):
-        self.check_archive('CMakeLists.txt', 'cmake')
-
-    def test_scons(self):
-        self.check_archive('SConstruct', 'scons')
-
-    def test_python(self):
-        self.check_archive('setup.py', 'python')
-
-    def test_R(self):
-        self.check_archive('NAMESPACE', 'R')
-
-    def test_unknown(self):
-        self.check_archive('foobar', 'unknown')
+def test_build_systems(url_and_build_system):
+    url, build_system = url_and_build_system
+    with spack.stage.Stage(url) as stage:
+        stage.fetch()
+        guesser = spack.cmd.create.BuildSystemGuesser()
+        guesser(stage, url)
+        assert build_system == guesser.build_system
diff --git a/lib/spack/spack/test/cmd/find.py b/lib/spack/spack/test/cmd/find.py
index 4788da8ec6..dcd123d46e 100644
--- a/lib/spack/spack/test/cmd/find.py
+++ b/lib/spack/spack/test/cmd/find.py
@@ -22,33 +22,32 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-
-
 import spack.cmd.find
-import unittest
-
 from spack.util.pattern import Bunch
 
 
-class FindTest(unittest.TestCase):
-
-    def test_query_arguments(self):
-        query_arguments = spack.cmd.find.query_arguments
-        # Default arguments
-        args = Bunch(only_missing=False, missing=False,
-                     unknown=False, explicit=False, implicit=False)
-        q_args = query_arguments(args)
-        self.assertTrue('installed' in q_args)
-        self.assertTrue('known' in q_args)
-        self.assertTrue('explicit' in q_args)
-        self.assertEqual(q_args['installed'], True)
-        self.assertEqual(q_args['known'], any)
-        self.assertEqual(q_args['explicit'], any)
-        # Check that explicit works correctly
-        args.explicit = True
-        q_args = query_arguments(args)
-        self.assertEqual(q_args['explicit'], True)
-        args.explicit = False
-        args.implicit = True
-        q_args = query_arguments(args)
-        self.assertEqual(q_args['explicit'], False)
+def test_query_arguments():
+    query_arguments = spack.cmd.find.query_arguments
+    # Default arguments
+    args = Bunch(
+        only_missing=False,
+        missing=False,
+        unknown=False,
+        explicit=False,
+        implicit=False
+    )
+    q_args = query_arguments(args)
+    assert 'installed' in q_args
+    assert 'known' in q_args
+    assert 'explicit' in q_args
+    assert q_args['installed'] is True
+    assert q_args['known'] is any
+    assert q_args['explicit'] is any
+    # Check that explicit works correctly
+    args.explicit = True
+    q_args = query_arguments(args)
+    assert q_args['explicit'] is True
+    args.explicit = False
+    args.implicit = True
+    q_args = query_arguments(args)
+    assert q_args['explicit'] is False
diff --git a/lib/spack/spack/test/cmd/module.py b/lib/spack/spack/test/cmd/module.py
index 39f9c5649f..03ce1ef206 100644
--- a/lib/spack/spack/test/cmd/module.py
+++ b/lib/spack/spack/test/cmd/module.py
@@ -25,67 +25,82 @@
 import argparse
 import os.path
 
+import pytest
 import spack.cmd.module as module
 import spack.modules as modules
-import spack.test.mock_database
 
 
-class TestModule(spack.test.mock_database.MockDatabase):
+def _get_module_files(args):
+    return [modules.module_types[args.module_type](spec).file_name
+            for spec in args.specs()]
 
-    def _get_module_files(self, args):
-        return [modules.module_types[args.module_type](spec).file_name
-                for spec in args.specs()]
 
-    def test_module_common_operations(self):
-        parser = argparse.ArgumentParser()
-        module.setup_parser(parser)
+@pytest.fixture(scope='module')
+def parser():
+    """Returns the parser for the module command"""
+    parser = argparse.ArgumentParser()
+    module.setup_parser(parser)
+    return parser
 
-        # Try to remove a non existing module [tcl]
-        args = parser.parse_args(['rm', 'doesnotexist'])
-        self.assertRaises(SystemExit, module.module, parser, args)
 
-        # Remove existing modules [tcl]
-        args = parser.parse_args(['rm', '-y', 'mpileaks'])
-        module_files = self._get_module_files(args)
-        for item in module_files:
-            self.assertTrue(os.path.exists(item))
-        module.module(parser, args)
-        for item in module_files:
-            self.assertFalse(os.path.exists(item))
+@pytest.fixture(
+    params=[
+        ['rm', 'doesnotexist'],  # Try to remove a non existing module [tcl]
+        ['find', 'mpileaks'],  # Try to find a module with multiple matches
+        ['find', 'doesnotexist'],  # Try to find a module with no matches
+    ]
+)
+def failure_args(request):
+    """A list of arguments that will cause a failure"""
+    return request.param
+
+
+# TODO : test the --delete-tree option
+# TODO : this requires having a separate directory for test modules
+# TODO : add tests for loads and find to check the prompt format
 
-        # Add them back [tcl]
-        args = parser.parse_args(['refresh', '-y', 'mpileaks'])
+
+def test_exit_with_failure(database, parser, failure_args):
+    args = parser.parse_args(failure_args)
+    with pytest.raises(SystemExit):
         module.module(parser, args)
-        for item in module_files:
-            self.assertTrue(os.path.exists(item))
 
-        # TODO : test the --delete-tree option
-        # TODO : this requires having a separate directory for test modules
 
-        # Try to find a module with multiple matches
-        args = parser.parse_args(['find', 'mpileaks'])
-        self.assertRaises(SystemExit, module.module, parser, args)
+def test_remove_and_add_tcl(database, parser):
+    # Remove existing modules [tcl]
+    args = parser.parse_args(['rm', '-y', 'mpileaks'])
+    module_files = _get_module_files(args)
+    for item in module_files:
+        assert os.path.exists(item)
+    module.module(parser, args)
+    for item in module_files:
+        assert not os.path.exists(item)
 
-        # Try to find a module with no matches
-        args = parser.parse_args(['find', 'doesnotexist'])
-        self.assertRaises(SystemExit, module.module, parser, args)
+    # Add them back [tcl]
+    args = parser.parse_args(['refresh', '-y', 'mpileaks'])
+    module.module(parser, args)
+    for item in module_files:
+        assert os.path.exists(item)
 
-        # Try to find a module
-        args = parser.parse_args(['find', 'libelf'])
-        module.module(parser, args)
 
-        # Remove existing modules [dotkit]
-        args = parser.parse_args(['rm', '-y', '-m', 'dotkit', 'mpileaks'])
-        module_files = self._get_module_files(args)
-        for item in module_files:
-            self.assertTrue(os.path.exists(item))
-        module.module(parser, args)
-        for item in module_files:
-            self.assertFalse(os.path.exists(item))
+def test_find(database, parser):
+    # Try to find a module
+    args = parser.parse_args(['find', 'libelf'])
+    module.module(parser, args)
 
-        # Add them back [dotkit]
-        args = parser.parse_args(['refresh', '-y', '-m', 'dotkit', 'mpileaks'])
-        module.module(parser, args)
-        for item in module_files:
-            self.assertTrue(os.path.exists(item))
-        # TODO : add tests for loads and find to check the prompt format
+
+def test_remove_and_add_dotkit(database, parser):
+    # Remove existing modules [dotkit]
+    args = parser.parse_args(['rm', '-y', '-m', 'dotkit', 'mpileaks'])
+    module_files = _get_module_files(args)
+    for item in module_files:
+        assert os.path.exists(item)
+    module.module(parser, args)
+    for item in module_files:
+        assert not os.path.exists(item)
+
+    # Add them back [dotkit]
+    args = parser.parse_args(['refresh', '-y', '-m', 'dotkit', 'mpileaks'])
+    module.module(parser, args)
+    for item in module_files:
+        assert os.path.exists(item)
diff --git a/lib/spack/spack/test/cmd/test_compiler_cmd.py b/lib/spack/spack/test/cmd/test_compiler_cmd.py
index f6e7cdeb64..647404e6da 100644
--- a/lib/spack/spack/test/cmd/test_compiler_cmd.py
+++ b/lib/spack/spack/test/cmd/test_compiler_cmd.py
@@ -22,42 +22,30 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-import os
-import shutil
-from tempfile import mkdtemp
+import pytest
+import llnl.util.filesystem
 
-from llnl.util.filesystem import set_executable, mkdirp
-
-import spack.spec
 import spack.cmd.compiler
 import spack.compilers
+import spack.spec
+import spack.util.pattern
 from spack.version import Version
-from spack.test.mock_packages_test import *
 
 test_version = '4.5-spacktest'
 
 
-class MockArgs(object):
-
-    def __init__(self, add_paths=[], scope=None, compiler_spec=None, all=None):
-        self.add_paths = add_paths
-        self.scope = scope
-        self.compiler_spec = compiler_spec
-        self.all = all
+@pytest.fixture()
+def mock_compiler_dir(tmpdir):
+    """Return a directory containing a fake, but detectable compiler."""
 
+    tmpdir.ensure('bin', dir=True)
+    bin_dir = tmpdir.join('bin')
 
-def make_mock_compiler():
-    """Make a directory containing a fake, but detectable compiler."""
-    mock_compiler_dir = mkdtemp()
-    bin_dir = os.path.join(mock_compiler_dir, 'bin')
-    mkdirp(bin_dir)
+    gcc_path = bin_dir.join('gcc')
+    gxx_path = bin_dir.join('g++')
+    gfortran_path = bin_dir.join('gfortran')
 
-    gcc_path = os.path.join(bin_dir, 'gcc')
-    gxx_path = os.path.join(bin_dir, 'g++')
-    gfortran_path = os.path.join(bin_dir, 'gfortran')
-
-    with open(gcc_path, 'w') as f:
-        f.write("""\
+    gcc_path.write("""\
 #!/bin/sh
 
 for arg in "$@"; do
@@ -68,39 +56,39 @@ def make_mock_compiler():
 """ % test_version)
 
     # Create some mock compilers in the temporary directory
-    set_executable(gcc_path)
-    shutil.copy(gcc_path, gxx_path)
-    shutil.copy(gcc_path, gfortran_path)
+    llnl.util.filesystem.set_executable(str(gcc_path))
+    gcc_path.copy(gxx_path, mode=True)
+    gcc_path.copy(gfortran_path, mode=True)
 
-    return mock_compiler_dir
+    return str(tmpdir)
 
 
-class CompilerCmdTest(MockPackagesTest):
-    """ Test compiler commands for add and remove """
+@pytest.mark.usefixtures('config', 'builtin_mock')
+class TestCompilerCommand(object):
 
     def test_compiler_remove(self):
-        args = MockArgs(all=True, compiler_spec='gcc@4.5.0')
+        args = spack.util.pattern.Bunch(
+            all=True, compiler_spec='gcc@4.5.0', add_paths=[], scope=None
+        )
         spack.cmd.compiler.compiler_remove(args)
         compilers = spack.compilers.all_compilers()
-        self.assertTrue(spack.spec.CompilerSpec("gcc@4.5.0") not in compilers)
+        assert spack.spec.CompilerSpec("gcc@4.5.0") not in compilers
 
-    def test_compiler_add(self):
-        # compilers available by default.
+    def test_compiler_add(self, mock_compiler_dir):
+        # Compilers available by default.
         old_compilers = set(spack.compilers.all_compilers())
 
-        # add our new compiler and find again.
-        compiler_dir = make_mock_compiler()
-
-        try:
-            args = MockArgs(add_paths=[compiler_dir])
-            spack.cmd.compiler.compiler_find(args)
-
-            # ensure new compiler is in there
-            new_compilers = set(spack.compilers.all_compilers())
-            new_compiler = new_compilers - old_compilers
-            self.assertTrue(new_compiler)
-            self.assertTrue(new_compiler.pop().version ==
-                            Version(test_version))
-
-        finally:
-            shutil.rmtree(compiler_dir, ignore_errors=True)
+        args = spack.util.pattern.Bunch(
+            all=None,
+            compiler_spec=None,
+            add_paths=[mock_compiler_dir],
+            scope=None
+        )
+        spack.cmd.compiler.compiler_find(args)
+
+        # Ensure new compiler is in there
+        new_compilers = set(spack.compilers.all_compilers())
+        new_compiler = new_compilers - old_compilers
+        assert new_compiler
+        c = new_compiler.pop()
+        assert c.version == Version(test_version)
diff --git a/lib/spack/spack/test/cmd/uninstall.py b/lib/spack/spack/test/cmd/uninstall.py
index 6a86a1543f..bfbb9b8148 100644
--- a/lib/spack/spack/test/cmd/uninstall.py
+++ b/lib/spack/spack/test/cmd/uninstall.py
@@ -22,9 +22,9 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-import spack.test.mock_database
+import pytest
 import spack.store
-from spack.cmd.uninstall import uninstall
+import spack.cmd.uninstall
 
 
 class MockArgs(object):
@@ -37,27 +37,28 @@ def __init__(self, packages, all=False, force=False, dependents=False):
         self.yes_to_all = True
 
 
-class TestUninstall(spack.test.mock_database.MockDatabase):
-
-    def test_uninstall(self):
-        parser = None
-        # Multiple matches
-        args = MockArgs(['mpileaks'])
-        self.assertRaises(SystemExit, uninstall, parser, args)
-        # Installed dependents
-        args = MockArgs(['libelf'])
-        self.assertRaises(SystemExit, uninstall, parser, args)
-        # Recursive uninstall
-        args = MockArgs(['callpath'], all=True, dependents=True)
+def test_uninstall(database):
+    parser = None
+    uninstall = spack.cmd.uninstall.uninstall
+    # Multiple matches
+    args = MockArgs(['mpileaks'])
+    with pytest.raises(SystemExit):
+        uninstall(parser, args)
+    # Installed dependents
+    args = MockArgs(['libelf'])
+    with pytest.raises(SystemExit):
         uninstall(parser, args)
+    # Recursive uninstall
+    args = MockArgs(['callpath'], all=True, dependents=True)
+    uninstall(parser, args)
 
-        all_specs = spack.store.layout.all_specs()
-        self.assertEqual(len(all_specs), 7)
-        # query specs with multiple configurations
-        mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
-        callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
-        mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
+    all_specs = spack.store.layout.all_specs()
+    assert len(all_specs) == 7
+    # query specs with multiple configurations
+    mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
+    callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
+    mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
 
-        self.assertEqual(len(mpileaks_specs), 0)
-        self.assertEqual(len(callpath_specs), 0)
-        self.assertEqual(len(mpi_specs),      3)
+    assert len(mpileaks_specs) == 0
+    assert len(callpath_specs) == 0
+    assert len(mpi_specs) == 3
diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py
index 42ae9aa18e..1f8eeaa29e 100644
--- a/lib/spack/spack/test/concretize.py
+++ b/lib/spack/spack/test/concretize.py
@@ -22,160 +22,152 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import pytest
 import spack
 import spack.architecture
+from spack.concretize import find_spec
 from spack.spec import Spec, CompilerSpec
 from spack.version import ver
-from spack.concretize import find_spec
-from spack.test.mock_packages_test import *
-
-
-class ConcretizeTest(MockPackagesTest):
-
-    def check_spec(self, abstract, concrete):
-        if abstract.versions.concrete:
-            self.assertEqual(abstract.versions, concrete.versions)
-
-        if abstract.variants:
-            for name in abstract.variants:
-                avariant = abstract.variants[name]
-                cvariant = concrete.variants[name]
-                self.assertEqual(avariant.value, cvariant.value)
-
-        if abstract.compiler_flags:
-            for flag in abstract.compiler_flags:
-                aflag = abstract.compiler_flags[flag]
-                cflag = concrete.compiler_flags[flag]
-                self.assertTrue(set(aflag) <= set(cflag))
-
-        for name in abstract.package.variants:
-            self.assertTrue(name in concrete.variants)
 
-        for flag in concrete.compiler_flags.valid_compiler_flags():
-            self.assertTrue(flag in concrete.compiler_flags)
 
-        if abstract.compiler and abstract.compiler.concrete:
-            self.assertEqual(abstract.compiler, concrete.compiler)
-
-        if abstract.architecture and abstract.architecture.concrete:
-            self.assertEqual(abstract.architecture, concrete.architecture)
-
-    def check_concretize(self, abstract_spec):
-        abstract = Spec(abstract_spec)
-        concrete = abstract.concretized()
-
-        self.assertFalse(abstract.concrete)
-        self.assertTrue(concrete.concrete)
-        self.check_spec(abstract, concrete)
-
-        return concrete
-
-    def test_concretize_no_deps(self):
-        self.check_concretize('libelf')
-        self.check_concretize('libelf@0.8.13')
-
-    def test_concretize_dag(self):
-        self.check_concretize('callpath')
-        self.check_concretize('mpileaks')
-        self.check_concretize('libelf')
+def check_spec(abstract, concrete):
+    if abstract.versions.concrete:
+        assert abstract.versions == concrete.versions
+
+    if abstract.variants:
+        for name in abstract.variants:
+            avariant = abstract.variants[name]
+            cvariant = concrete.variants[name]
+            assert avariant.value == cvariant.value
+
+    if abstract.compiler_flags:
+        for flag in abstract.compiler_flags:
+            aflag = abstract.compiler_flags[flag]
+            cflag = concrete.compiler_flags[flag]
+            assert set(aflag) <= set(cflag)
+
+    for name in abstract.package.variants:
+        assert name in concrete.variants
+
+    for flag in concrete.compiler_flags.valid_compiler_flags():
+        assert flag in concrete.compiler_flags
+
+    if abstract.compiler and abstract.compiler.concrete:
+        assert abstract.compiler == concrete.compiler
+
+    if abstract.architecture and abstract.architecture.concrete:
+        assert abstract.architecture == concrete.architecture
+
+
+def check_concretize(abstract_spec):
+    abstract = Spec(abstract_spec)
+    concrete = abstract.concretized()
+    assert not abstract.concrete
+    assert concrete.concrete
+    check_spec(abstract, concrete)
+    return concrete
+
+
+@pytest.fixture(
+    params=[
+        # no_deps
+        'libelf', 'libelf@0.8.13',
+        # dag
+        'callpath', 'mpileaks', 'libelf',
+        # variant
+        'mpich+debug', 'mpich~debug', 'mpich debug=2', 'mpich',
+        # compiler flags
+        'mpich cppflags="-O3"',
+        # with virtual
+        'mpileaks ^mpi', 'mpileaks ^mpi@:1.1', 'mpileaks ^mpi@2:',
+        'mpileaks ^mpi@2.1', 'mpileaks ^mpi@2.2', 'mpileaks ^mpi@2.2',
+        'mpileaks ^mpi@:1', 'mpileaks ^mpi@1.2:2'
+    ]
+)
+def spec(request):
+    """Spec to be concretized"""
+    return request.param
+
+
+@pytest.mark.usefixtures('config', 'builtin_mock')
+class TestConcretize(object):
+    def test_concretize(self, spec):
+        check_concretize(spec)
 
     def test_concretize_mention_build_dep(self):
-        spec = self.check_concretize('cmake-client ^cmake@3.4.3')
-
+        spec = check_concretize('cmake-client ^cmake@3.4.3')
         # Check parent's perspective of child
         dependency = spec.dependencies_dict()['cmake']
-        self.assertEqual(set(dependency.deptypes), set(['build']))
-
+        assert set(dependency.deptypes) == set(['build'])
         # Check child's perspective of parent
         cmake = spec['cmake']
         dependent = cmake.dependents_dict()['cmake-client']
-        self.assertEqual(set(dependent.deptypes), set(['build']))
-
-    def test_concretize_variant(self):
-        self.check_concretize('mpich+debug')
-        self.check_concretize('mpich~debug')
-        self.check_concretize('mpich debug=2')
-        self.check_concretize('mpich')
-
-    def test_conretize_compiler_flags(self):
-        self.check_concretize('mpich cppflags="-O3"')
+        assert set(dependent.deptypes) == set(['build'])
 
     def test_concretize_preferred_version(self):
-        spec = self.check_concretize('python')
-        self.assertEqual(spec.versions, ver('2.7.11'))
-
-        spec = self.check_concretize('python@3.5.1')
-        self.assertEqual(spec.versions, ver('3.5.1'))
-
-    def test_concretize_with_virtual(self):
-        self.check_concretize('mpileaks ^mpi')
-        self.check_concretize('mpileaks ^mpi@:1.1')
-        self.check_concretize('mpileaks ^mpi@2:')
-        self.check_concretize('mpileaks ^mpi@2.1')
-        self.check_concretize('mpileaks ^mpi@2.2')
-        self.check_concretize('mpileaks ^mpi@2.2')
-        self.check_concretize('mpileaks ^mpi@:1')
-        self.check_concretize('mpileaks ^mpi@1.2:2')
+        spec = check_concretize('python')
+        assert spec.versions == ver('2.7.11')
+        spec = check_concretize('python@3.5.1')
+        assert spec.versions == ver('3.5.1')
 
     def test_concretize_with_restricted_virtual(self):
-        self.check_concretize('mpileaks ^mpich2')
+        check_concretize('mpileaks ^mpich2')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@1.1')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.1'))
+        concrete = check_concretize('mpileaks   ^mpich2@1.1')
+        assert concrete['mpich2'].satisfies('mpich2@1.1')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@1.2')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.2'))
+        concrete = check_concretize('mpileaks   ^mpich2@1.2')
+        assert concrete['mpich2'].satisfies('mpich2@1.2')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@:1.5')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.5'))
+        concrete = check_concretize('mpileaks   ^mpich2@:1.5')
+        assert concrete['mpich2'].satisfies('mpich2@:1.5')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@:1.3')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.3'))
+        concrete = check_concretize('mpileaks   ^mpich2@:1.3')
+        assert concrete['mpich2'].satisfies('mpich2@:1.3')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@:1.2')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.2'))
+        concrete = check_concretize('mpileaks   ^mpich2@:1.2')
+        assert concrete['mpich2'].satisfies('mpich2@:1.2')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@:1.1')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@:1.1'))
+        concrete = check_concretize('mpileaks   ^mpich2@:1.1')
+        assert concrete['mpich2'].satisfies('mpich2@:1.1')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@1.1:')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.1:'))
+        concrete = check_concretize('mpileaks   ^mpich2@1.1:')
+        assert concrete['mpich2'].satisfies('mpich2@1.1:')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@1.5:')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.5:'))
+        concrete = check_concretize('mpileaks   ^mpich2@1.5:')
+        assert concrete['mpich2'].satisfies('mpich2@1.5:')
 
-        concrete = self.check_concretize('mpileaks   ^mpich2@1.3.1:1.4')
-        self.assertTrue(concrete['mpich2'].satisfies('mpich2@1.3.1:1.4'))
+        concrete = check_concretize('mpileaks   ^mpich2@1.3.1:1.4')
+        assert concrete['mpich2'].satisfies('mpich2@1.3.1:1.4')
 
     def test_concretize_with_provides_when(self):
         """Make sure insufficient versions of MPI are not in providers list when
-           we ask for some advanced version.
+        we ask for some advanced version.
         """
-        self.assertTrue(
-            not any(spec.satisfies('mpich2@:1.0')
-                    for spec in spack.repo.providers_for('mpi@2.1')))
-
-        self.assertTrue(
-            not any(spec.satisfies('mpich2@:1.1')
-                    for spec in spack.repo.providers_for('mpi@2.2')))
-
-        self.assertTrue(
-            not any(spec.satisfies('mpich@:1')
-                    for spec in spack.repo.providers_for('mpi@2')))
-
-        self.assertTrue(
-            not any(spec.satisfies('mpich@:1')
-                    for spec in spack.repo.providers_for('mpi@3')))
-
-        self.assertTrue(
-            not any(spec.satisfies('mpich2')
-                    for spec in spack.repo.providers_for('mpi@3')))
+        repo = spack.repo
+        assert not any(
+            s.satisfies('mpich2@:1.0') for s in repo.providers_for('mpi@2.1')
+        )
+        assert not any(
+            s.satisfies('mpich2@:1.1') for s in repo.providers_for('mpi@2.2')
+        )
+        assert not any(
+            s.satisfies('mpich@:1') for s in repo.providers_for('mpi@2')
+        )
+        assert not any(
+            s.satisfies('mpich@:1') for s in repo.providers_for('mpi@3')
+        )
+        assert not any(
+            s.satisfies('mpich2') for s in repo.providers_for('mpi@3')
+        )
 
     def test_concretize_two_virtuals(self):
         """Test a package with multiple virtual dependencies."""
         Spec('hypre').concretize()
 
-    def test_concretize_two_virtuals_with_one_bound(self):
+    def test_concretize_two_virtuals_with_one_bound(
+            self, refresh_builtin_mock
+    ):
         """Test a package with multiple virtual dependencies and one preset."""
         Spec('hypre ^openblas').concretize()
 
@@ -185,54 +177,48 @@ def test_concretize_two_virtuals_with_two_bound(self):
 
     def test_concretize_two_virtuals_with_dual_provider(self):
         """Test a package with multiple virtual dependencies and force a provider
-           that provides both."""
+        that provides both.
+        """
         Spec('hypre ^openblas-with-lapack').concretize()
 
-    def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(self):
+    def test_concretize_two_virtuals_with_dual_provider_and_a_conflict(
+            self
+    ):
         """Test a package with multiple virtual dependencies and force a
-           provider that provides both, and another conflicting package that
-           provides one.
+        provider that provides both, and another conflicting package that
+        provides one.
         """
         s = Spec('hypre ^openblas-with-lapack ^netlib-lapack')
-        self.assertRaises(spack.spec.MultipleProviderError, s.concretize)
+        with pytest.raises(spack.spec.MultipleProviderError):
+            s.concretize()
 
     def test_virtual_is_fully_expanded_for_callpath(self):
         # force dependence on fake "zmpi" by asking for MPI 10.0
         spec = Spec('callpath ^mpi@10.0')
-        self.assertTrue('mpi' in spec._dependencies)
-        self.assertFalse('fake' in spec)
-
+        assert 'mpi' in spec._dependencies
+        assert 'fake' not in spec
         spec.concretize()
-
-        self.assertTrue('zmpi' in spec._dependencies)
-        self.assertTrue(all('mpi' not in d._dependencies
-                            for d in spec.traverse()))
-        self.assertTrue('zmpi' in spec)
-        self.assertTrue('mpi' in spec)
-
-        self.assertTrue('fake' in spec._dependencies['zmpi'].spec)
-
-    def test_virtual_is_fully_expanded_for_mpileaks(self):
+        assert 'zmpi' in spec._dependencies
+        assert all('mpi' not in d._dependencies for d in spec.traverse())
+        assert 'zmpi' in spec
+        assert 'mpi' in spec
+        assert 'fake' in spec._dependencies['zmpi'].spec
+
+    def test_virtual_is_fully_expanded_for_mpileaks(
+            self
+    ):
         spec = Spec('mpileaks ^mpi@10.0')
-        self.assertTrue('mpi' in spec._dependencies)
-        self.assertFalse('fake' in spec)
-
+        assert 'mpi' in spec._dependencies
+        assert 'fake' not in spec
         spec.concretize()
-
-        self.assertTrue('zmpi' in spec._dependencies)
-        self.assertTrue('callpath' in spec._dependencies)
-        self.assertTrue(
-            'zmpi' in spec._dependencies['callpath']
-            .spec._dependencies)
-        self.assertTrue(
-            'fake' in spec._dependencies['callpath']
-            .spec._dependencies['zmpi']
-            .spec._dependencies)
-
-        self.assertTrue(
-            all('mpi' not in d._dependencies for d in spec.traverse()))
-        self.assertTrue('zmpi' in spec)
-        self.assertTrue('mpi' in spec)
+        assert 'zmpi' in spec._dependencies
+        assert 'callpath' in spec._dependencies
+        assert 'zmpi' in spec._dependencies['callpath'].spec._dependencies
+        assert 'fake' in spec._dependencies['callpath'].spec._dependencies[
+            'zmpi'].spec._dependencies  # NOQA: ignore=E501
+        assert all('mpi' not in d._dependencies for d in spec.traverse())
+        assert 'zmpi' in spec
+        assert 'mpi' in spec
 
     def test_my_dep_depends_on_provider_of_my_virtual_dep(self):
         spec = Spec('indirect_mpich')
@@ -242,36 +228,31 @@ def test_my_dep_depends_on_provider_of_my_virtual_dep(self):
     def test_compiler_inheritance(self):
         spec = Spec('mpileaks')
         spec.normalize()
-
         spec['dyninst'].compiler = CompilerSpec('clang')
         spec.concretize()
-
         # TODO: not exactly the syntax I would like.
-        self.assertTrue(spec['libdwarf'].compiler.satisfies('clang'))
-        self.assertTrue(spec['libelf'].compiler.satisfies('clang'))
+        assert spec['libdwarf'].compiler.satisfies('clang')
+        assert spec['libelf'].compiler.satisfies('clang')
 
     def test_external_package(self):
         spec = Spec('externaltool%gcc')
         spec.concretize()
-
-        self.assertEqual(
-            spec['externaltool'].external, '/path/to/external_tool')
-        self.assertFalse('externalprereq' in spec)
-        self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
+        assert spec['externaltool'].external == '/path/to/external_tool'
+        assert 'externalprereq' not in spec
+        assert spec['externaltool'].compiler.satisfies('gcc')
 
     def test_external_package_module(self):
         # No tcl modules on darwin/linux machines
         # TODO: improved way to check for this.
         platform = spack.architecture.real_platform().name
-        if (platform == 'darwin' or platform == 'linux'):
+        if platform == 'darwin' or platform == 'linux':
             return
 
         spec = Spec('externalmodule')
         spec.concretize()
-        self.assertEqual(
-            spec['externalmodule'].external_module, 'external-module')
-        self.assertFalse('externalprereq' in spec)
-        self.assertTrue(spec['externalmodule'].compiler.satisfies('gcc'))
+        assert spec['externalmodule'].external_module == 'external-module'
+        assert 'externalprereq' not in spec
+        assert spec['externalmodule'].compiler.satisfies('gcc')
 
     def test_nobuild_package(self):
         got_error = False
@@ -280,17 +261,15 @@ def test_nobuild_package(self):
             spec.concretize()
         except spack.concretize.NoBuildError:
             got_error = True
-        self.assertTrue(got_error)
+        assert got_error
 
     def test_external_and_virtual(self):
         spec = Spec('externaltest')
         spec.concretize()
-        self.assertEqual(
-            spec['externaltool'].external, '/path/to/external_tool')
-        self.assertEqual(
-            spec['stuff'].external, '/path/to/external_virtual_gcc')
-        self.assertTrue(spec['externaltool'].compiler.satisfies('gcc'))
-        self.assertTrue(spec['stuff'].compiler.satisfies('gcc'))
+        assert spec['externaltool'].external == '/path/to/external_tool'
+        assert spec['stuff'].external == '/path/to/external_virtual_gcc'
+        assert spec['externaltool'].compiler.satisfies('gcc')
+        assert spec['stuff'].compiler.satisfies('gcc')
 
     def test_find_spec_parents(self):
         """Tests the spec finding logic used by concretization. """
@@ -300,7 +279,7 @@ def test_find_spec_parents(self):
                       Spec('d +foo')),
                  Spec('e +foo'))
 
-        self.assertEqual('a', find_spec(s['b'], lambda s: '+foo' in s).name)
+        assert 'a' == find_spec(s['b'], lambda s: '+foo' in s).name
 
     def test_find_spec_children(self):
         s = Spec('a',
@@ -308,13 +287,13 @@ def test_find_spec_children(self):
                       Spec('c'),
                       Spec('d +foo')),
                  Spec('e +foo'))
-        self.assertEqual('d', find_spec(s['b'], lambda s: '+foo' in s).name)
+        assert 'd' == find_spec(s['b'], lambda s: '+foo' in s).name
         s = Spec('a',
                  Spec('b +foo',
                       Spec('c +foo'),
                       Spec('d')),
                  Spec('e +foo'))
-        self.assertEqual('c', find_spec(s['b'], lambda s: '+foo' in s).name)
+        assert 'c' == find_spec(s['b'], lambda s: '+foo' in s).name
 
     def test_find_spec_sibling(self):
         s = Spec('a',
@@ -322,8 +301,8 @@ def test_find_spec_sibling(self):
                       Spec('c'),
                       Spec('d')),
                  Spec('e +foo'))
-        self.assertEqual('e', find_spec(s['b'], lambda s: '+foo' in s).name)
-        self.assertEqual('b', find_spec(s['e'], lambda s: '+foo' in s).name)
+        assert 'e' == find_spec(s['b'], lambda s: '+foo' in s).name
+        assert 'b' == find_spec(s['e'], lambda s: '+foo' in s).name
 
         s = Spec('a',
                  Spec('b +foo',
@@ -331,7 +310,7 @@ def test_find_spec_sibling(self):
                       Spec('d')),
                  Spec('e',
                       Spec('f +foo')))
-        self.assertEqual('f', find_spec(s['b'], lambda s: '+foo' in s).name)
+        assert 'f' == find_spec(s['b'], lambda s: '+foo' in s).name
 
     def test_find_spec_self(self):
         s = Spec('a',
@@ -339,7 +318,7 @@ def test_find_spec_self(self):
                       Spec('c'),
                       Spec('d')),
                  Spec('e'))
-        self.assertEqual('b', find_spec(s['b'], lambda s: '+foo' in s).name)
+        assert 'b' == find_spec(s['b'], lambda s: '+foo' in s).name
 
     def test_find_spec_none(self):
         s = Spec('a',
@@ -347,10 +326,10 @@ def test_find_spec_none(self):
                       Spec('c'),
                       Spec('d')),
                  Spec('e'))
-        self.assertEqual(None, find_spec(s['b'], lambda s: '+foo' in s))
+        assert find_spec(s['b'], lambda s: '+foo' in s) is None
 
     def test_compiler_child(self):
         s = Spec('mpileaks%clang ^dyninst%gcc')
         s.concretize()
-        self.assertTrue(s['mpileaks'].satisfies('%clang'))
-        self.assertTrue(s['dyninst'].satisfies('%gcc'))
+        assert s['mpileaks'].satisfies('%clang')
+        assert s['dyninst'].satisfies('%gcc')
diff --git a/lib/spack/spack/test/concretize_preferences.py b/lib/spack/spack/test/concretize_preferences.py
index 575e912609..21d457d2e0 100644
--- a/lib/spack/spack/test/concretize_preferences.py
+++ b/lib/spack/spack/test/concretize_preferences.py
@@ -22,92 +22,95 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import pytest
+
 import spack
-import spack.architecture
-from spack.test.mock_packages_test import *
-from tempfile import mkdtemp
+from spack.spec import Spec
 
 
-class ConcretizePreferencesTest(MockPackagesTest):
-    """Test concretization preferences are being applied correctly.
-    """
+@pytest.fixture()
+def concretize_scope(config, tmpdir):
+    """Adds a scope for concretization preferences"""
+    tmpdir.ensure_dir('concretize')
+    spack.config.ConfigScope(
+        'concretize', str(tmpdir.join('concretize'))
+    )
+    yield
+    # This is kind of weird, but that's how config scopes are
+    # set in ConfigScope.__init__
+    spack.config.config_scopes.pop('concretize')
+    spack.pkgsort = spack.PreferredPackages()
+
+
+def concretize(abstract_spec):
+    return Spec(abstract_spec).concretized()
+
+
+def update_packages(pkgname, section, value):
+    """Update config and reread package list"""
+    conf = {pkgname: {section: value}}
+    spack.config.update_config('packages', conf, 'concretize')
+    spack.pkgsort = spack.PreferredPackages()
 
-    def setUp(self):
-        """Create config section to store concretization preferences
-        """
-        super(ConcretizePreferencesTest, self).setUp()
-        self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
-        spack.config.ConfigScope('concretize',
-                                 os.path.join(self.tmp_dir, 'concretize'))
-
-    def tearDown(self):
-        super(ConcretizePreferencesTest, self).tearDown()
-        shutil.rmtree(self.tmp_dir, True)
-        spack.pkgsort = spack.PreferredPackages()
-
-    def concretize(self, abstract_spec):
-        return Spec(abstract_spec).concretized()
-
-    def update_packages(self, pkgname, section, value):
-        """Update config and reread package list"""
-        conf = {pkgname: {section: value}}
-        spack.config.update_config('packages', conf, 'concretize')
-        spack.pkgsort = spack.PreferredPackages()
-
-    def assert_variant_values(self, spec, **variants):
-        concrete = self.concretize(spec)
-        for variant, value in variants.items():
-            self.assertEqual(concrete.variants[variant].value, value)
 
+def assert_variant_values(spec, **variants):
+    concrete = concretize(spec)
+    for variant, value in variants.items():
+        assert concrete.variants[variant].value == value
+
+
+@pytest.mark.usefixtures('concretize_scope', 'builtin_mock')
+class TestConcretizePreferences(object):
     def test_preferred_variants(self):
         """Test preferred variants are applied correctly
         """
-        self.update_packages('mpileaks', 'variants',
-                             '~debug~opt+shared+static')
-        self.assert_variant_values('mpileaks', debug=False, opt=False,
-                                   shared=True, static=True)
-
-        self.update_packages('mpileaks', 'variants',
-                             ['+debug', '+opt', '~shared', '-static'])
-        self.assert_variant_values('mpileaks', debug=True, opt=True,
-                                   shared=False, static=False)
-
-    def test_preferred_compilers(self):
+        update_packages('mpileaks', 'variants', '~debug~opt+shared+static')
+        assert_variant_values(
+            'mpileaks', debug=False, opt=False, shared=True, static=True
+        )
+        update_packages(
+            'mpileaks', 'variants', ['+debug', '+opt', '~shared', '-static']
+        )
+        assert_variant_values(
+            'mpileaks', debug=True, opt=True, shared=False, static=False
+        )
+
+    def test_preferred_compilers(self, refresh_builtin_mock):
         """Test preferred compilers are applied correctly
         """
-        self.update_packages('mpileaks', 'compiler', ['clang@3.3'])
-        spec = self.concretize('mpileaks')
-        self.assertEqual(spec.compiler, spack.spec.CompilerSpec('clang@3.3'))
+        update_packages('mpileaks', 'compiler', ['clang@3.3'])
+        spec = concretize('mpileaks')
+        assert spec.compiler == spack.spec.CompilerSpec('clang@3.3')
 
-        self.update_packages('mpileaks', 'compiler', ['gcc@4.5.0'])
-        spec = self.concretize('mpileaks')
-        self.assertEqual(spec.compiler, spack.spec.CompilerSpec('gcc@4.5.0'))
+        update_packages('mpileaks', 'compiler', ['gcc@4.5.0'])
+        spec = concretize('mpileaks')
+        assert spec.compiler == spack.spec.CompilerSpec('gcc@4.5.0')
 
     def test_preferred_versions(self):
         """Test preferred package versions are applied correctly
         """
-        self.update_packages('mpileaks', 'version', ['2.3'])
-        spec = self.concretize('mpileaks')
-        self.assertEqual(spec.version, spack.spec.Version('2.3'))
+        update_packages('mpileaks', 'version', ['2.3'])
+        spec = concretize('mpileaks')
+        assert spec.version == spack.spec.Version('2.3')
 
-        self.update_packages('mpileaks', 'version', ['2.2'])
-        spec = self.concretize('mpileaks')
-        self.assertEqual(spec.version, spack.spec.Version('2.2'))
+        update_packages('mpileaks', 'version', ['2.2'])
+        spec = concretize('mpileaks')
+        assert spec.version == spack.spec.Version('2.2')
 
     def test_preferred_providers(self):
-        """Test preferred providers of virtual packages are applied correctly
+        """Test preferred providers of virtual packages are
+        applied correctly
         """
-        self.update_packages('all', 'providers', {'mpi': ['mpich']})
-        spec = self.concretize('mpileaks')
-        self.assertTrue('mpich' in spec)
+        update_packages('all', 'providers', {'mpi': ['mpich']})
+        spec = concretize('mpileaks')
+        assert 'mpich' in spec
 
-        self.update_packages('all', 'providers', {'mpi': ['zmpi']})
-        spec = self.concretize('mpileaks')
-        self.assertTrue('zmpi', spec)
+        update_packages('all', 'providers', {'mpi': ['zmpi']})
+        spec = concretize('mpileaks')
+        assert 'zmpi' in spec
 
     def test_develop(self):
-        """Test conretization with develop version
-        """
+        """Test concretization with develop version"""
         spec = Spec('builtin.mock.develop-test')
         spec.concretize()
-        self.assertEqual(spec.version, spack.spec.Version('0.2.15'))
+        assert spec.version == spack.spec.Version('0.2.15')
diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py
index adc0795916..ed8f78ceb4 100644
--- a/lib/spack/spack/test/config.py
+++ b/lib/spack/spack/test/config.py
@@ -22,17 +22,17 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-import os
-import shutil
+import collections
 import getpass
-import yaml
-from tempfile import mkdtemp
+import os
+import tempfile
 
+import ordereddict_backport
+import pytest
 import spack
 import spack.config
+import yaml
 from spack.util.path import canonicalize_path
-from ordereddict_backport import OrderedDict
-from spack.test.mock_packages_test import *
 
 # Some sample compiler config data
 a_comps = {
@@ -167,104 +167,115 @@
         'build_stage:': ['patha', 'pathb']}}
 
 
-class ConfigTest(MockPackagesTest):
-
-    def setUp(self):
-        super(ConfigTest, self).setUp()
-        self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
-        self.a_comp_specs = [
-            ac['compiler']['spec'] for ac in a_comps['compilers']]
-        self.b_comp_specs = [
-            bc['compiler']['spec'] for bc in b_comps['compilers']]
+def check_compiler_config(comps, *compiler_names):
+    """Check that named compilers in comps match Spack's config."""
+    config = spack.config.get_config('compilers')
+    compiler_list = ['cc', 'cxx', 'f77', 'fc']
+    flag_list = ['cflags', 'cxxflags', 'fflags', 'cppflags',
+                 'ldflags', 'ldlibs']
+    param_list = ['modules', 'paths', 'spec', 'operating_system']
+    for compiler in config:
+        conf = compiler['compiler']
+        if conf['spec'] in compiler_names:
+            comp = next((c['compiler'] for c in comps if
+                         c['compiler']['spec'] == conf['spec']), None)
+            if not comp:
+                raise ValueError('Bad config spec')
+            for p in param_list:
+                assert conf[p] == comp[p]
+            for f in flag_list:
+                expected = comp.get('flags', {}).get(f, None)
+                actual = conf.get('flags', {}).get(f, None)
+                assert expected == actual
+            for c in compiler_list:
+                expected = comp['paths'][c]
+                actual = conf['paths'][c]
+                assert expected == actual
+
+
+@pytest.fixture()
+def config(tmpdir):
+    """Mocks the configuration scope."""
+    spack.config.clear_config_caches()
+    real_scope = spack.config.config_scopes
+    spack.config.config_scopes = ordereddict_backport.OrderedDict()
+    for priority in ['low', 'high']:
+        spack.config.ConfigScope(priority, str(tmpdir.join(priority)))
+    Config = collections.namedtuple('Config', ['real', 'mock'])
+    yield Config(real=real_scope, mock=spack.config.config_scopes)
+    spack.config.config_scopes = real_scope
+    spack.config.clear_config_caches()
+
+
+@pytest.fixture()
+def write_config_file(tmpdir):
+    """Returns a function that writes a config file."""
+    def _write(config, data, scope):
+        config_yaml = tmpdir.join(scope, config + '.yaml')
+        config_yaml.ensure()
+        with config_yaml.open('w') as f:
+            yaml.dump(data, f)
+    return _write
 
-        spack.config.config_scopes = OrderedDict()
-        for priority in ['low', 'high']:
-            scope_dir = os.path.join(self.tmp_dir, priority)
-            spack.config.ConfigScope(priority, scope_dir)
 
-    def tearDown(self):
-        super(ConfigTest, self).tearDown()
-        shutil.rmtree(self.tmp_dir, True)
+@pytest.fixture()
+def compiler_specs():
+    """Returns a couple of compiler specs needed for the tests"""
+    a = [ac['compiler']['spec'] for ac in a_comps['compilers']]
+    b = [bc['compiler']['spec'] for bc in b_comps['compilers']]
+    CompilerSpecs = collections.namedtuple('CompilerSpecs', ['a', 'b'])
+    return CompilerSpecs(a=a, b=b)
 
-    def write_config_file(self, config, data, scope):
-        scope_dir = os.path.join(self.tmp_dir, scope)
-        mkdirp(scope_dir)
 
-        path = os.path.join(scope_dir, config + '.yaml')
-        with open(path, 'w') as f:
-            print yaml
-            yaml.dump(data, f)
-
-    def check_compiler_config(self, comps, *compiler_names):
-        """Check that named compilers in comps match Spack's config."""
-        config = spack.config.get_config('compilers')
-        compiler_list = ['cc', 'cxx', 'f77', 'fc']
-        flag_list = ['cflags', 'cxxflags', 'fflags', 'cppflags',
-                     'ldflags', 'ldlibs']
-        param_list = ['modules', 'paths', 'spec', 'operating_system']
-        for compiler in config:
-            conf = compiler['compiler']
-            if conf['spec'] in compiler_names:
-                comp = next((c['compiler'] for c in comps if
-                             c['compiler']['spec'] == conf['spec']), None)
-                if not comp:
-                    self.fail('Bad config spec')
-                for p in param_list:
-                    self.assertEqual(conf[p], comp[p])
-                for f in flag_list:
-                    expected = comp.get('flags', {}).get(f, None)
-                    actual = conf.get('flags', {}).get(f, None)
-                    self.assertEqual(expected, actual)
-                for c in compiler_list:
-                    expected = comp['paths'][c]
-                    actual = conf['paths'][c]
-                    self.assertEqual(expected, actual)
+@pytest.mark.usefixtures('config')
+class TestConfig(object):
 
     def test_write_list_in_memory(self):
         spack.config.update_config('repos', repos_low['repos'], scope='low')
         spack.config.update_config('repos', repos_high['repos'], scope='high')
 
         config = spack.config.get_config('repos')
-        self.assertEqual(config, repos_high['repos'] + repos_low['repos'])
+        assert config == repos_high['repos'] + repos_low['repos']
 
-    def test_write_key_in_memory(self):
+    def test_write_key_in_memory(self, compiler_specs):
         # Write b_comps "on top of" a_comps.
         spack.config.update_config(
-            'compilers', a_comps['compilers'], scope='low')
+            'compilers', a_comps['compilers'], scope='low'
+        )
         spack.config.update_config(
-            'compilers', b_comps['compilers'], scope='high')
-
+            'compilers', b_comps['compilers'], scope='high'
+        )
         # Make sure the config looks how we expect.
-        self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs)
-        self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs)
+        check_compiler_config(a_comps['compilers'], *compiler_specs.a)
+        check_compiler_config(b_comps['compilers'], *compiler_specs.b)
 
-    def test_write_key_to_disk(self):
+    def test_write_key_to_disk(self, compiler_specs):
         # Write b_comps "on top of" a_comps.
         spack.config.update_config(
-            'compilers', a_comps['compilers'], scope='low')
+            'compilers', a_comps['compilers'], scope='low'
+        )
         spack.config.update_config(
-            'compilers', b_comps['compilers'], scope='high')
-
+            'compilers', b_comps['compilers'], scope='high'
+        )
         # Clear caches so we're forced to read from disk.
         spack.config.clear_config_caches()
-
         # Same check again, to ensure consistency.
-        self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs)
-        self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs)
+        check_compiler_config(a_comps['compilers'], *compiler_specs.a)
+        check_compiler_config(b_comps['compilers'], *compiler_specs.b)
 
-    def test_write_to_same_priority_file(self):
+    def test_write_to_same_priority_file(self, compiler_specs):
         # Write b_comps in the same file as a_comps.
         spack.config.update_config(
-            'compilers', a_comps['compilers'], scope='low')
+            'compilers', a_comps['compilers'], scope='low'
+        )
         spack.config.update_config(
-            'compilers', b_comps['compilers'], scope='low')
-
+            'compilers', b_comps['compilers'], scope='low'
+        )
         # Clear caches so we're forced to read from disk.
         spack.config.clear_config_caches()
-
         # Same check again, to ensure consistency.
-        self.check_compiler_config(a_comps['compilers'], *self.a_comp_specs)
-        self.check_compiler_config(b_comps['compilers'], *self.b_comp_specs)
+        check_compiler_config(a_comps['compilers'], *compiler_specs.a)
+        check_compiler_config(b_comps['compilers'], *compiler_specs.b)
 
     def check_canonical(self, var, expected):
         """Ensure that <expected> is substituted properly for <var> in strings
@@ -283,72 +294,78 @@ def check_canonical(self, var, expected):
     def test_substitute_config_variables(self):
         prefix = spack.prefix.lstrip('/')
 
-        self.assertEqual(os.path.join('/foo/bar/baz', prefix),
-                         canonicalize_path('/foo/bar/baz/$spack'))
+        assert os.path.join(
+            '/foo/bar/baz', prefix
+        ) == canonicalize_path('/foo/bar/baz/$spack')
 
-        self.assertEqual(os.path.join(spack.prefix, 'foo/bar/baz'),
-                         canonicalize_path('$spack/foo/bar/baz/'))
+        assert os.path.join(
+            spack.prefix, 'foo/bar/baz'
+        ) == canonicalize_path('$spack/foo/bar/baz/')
 
-        self.assertEqual(os.path.join('/foo/bar/baz', prefix, 'foo/bar/baz'),
-                         canonicalize_path('/foo/bar/baz/$spack/foo/bar/baz/'))
+        assert os.path.join(
+            '/foo/bar/baz', prefix, 'foo/bar/baz'
+        ) == canonicalize_path('/foo/bar/baz/$spack/foo/bar/baz/')
 
-        self.assertEqual(os.path.join('/foo/bar/baz', prefix),
-                         canonicalize_path('/foo/bar/baz/${spack}'))
+        assert os.path.join(
+            '/foo/bar/baz', prefix
+        ) == canonicalize_path('/foo/bar/baz/${spack}')
 
-        self.assertEqual(os.path.join(spack.prefix, 'foo/bar/baz'),
-                         canonicalize_path('${spack}/foo/bar/baz/'))
+        assert os.path.join(
+            spack.prefix, 'foo/bar/baz'
+        ) == canonicalize_path('${spack}/foo/bar/baz/')
 
-        self.assertEqual(
-            os.path.join('/foo/bar/baz', prefix, 'foo/bar/baz'),
-            canonicalize_path('/foo/bar/baz/${spack}/foo/bar/baz/'))
+        assert os.path.join(
+            '/foo/bar/baz', prefix, 'foo/bar/baz'
+        ) == canonicalize_path('/foo/bar/baz/${spack}/foo/bar/baz/')
 
-        self.assertNotEqual(
-            os.path.join('/foo/bar/baz', prefix, 'foo/bar/baz'),
-            canonicalize_path('/foo/bar/baz/${spack/foo/bar/baz/'))
+        assert os.path.join(
+            '/foo/bar/baz', prefix, 'foo/bar/baz'
+        ) != canonicalize_path('/foo/bar/baz/${spack/foo/bar/baz/')
 
     def test_substitute_user(self):
         user = getpass.getuser()
-        self.assertEqual('/foo/bar/' + user + '/baz',
-                         canonicalize_path('/foo/bar/$user/baz'))
+        assert '/foo/bar/' + user + '/baz' == canonicalize_path(
+            '/foo/bar/$user/baz'
+        )
 
     def test_substitute_tempdir(self):
         tempdir = tempfile.gettempdir()
-        self.assertEqual(tempdir, canonicalize_path('$tempdir'))
-        self.assertEqual(tempdir + '/foo/bar/baz',
-                         canonicalize_path('$tempdir/foo/bar/baz'))
-
-    def test_read_config(self):
-        self.write_config_file('config', config_low, 'low')
-        self.assertEqual(spack.config.get_config('config'),
-                         config_low['config'])
-
-    def test_read_config_override_all(self):
-        self.write_config_file('config', config_low, 'low')
-        self.write_config_file('config', config_override_all, 'high')
-        self.assertEqual(spack.config.get_config('config'), {
+        assert tempdir == canonicalize_path('$tempdir')
+        assert tempdir + '/foo/bar/baz' == canonicalize_path(
+            '$tempdir/foo/bar/baz'
+        )
+
+    def test_read_config(self, write_config_file):
+        write_config_file('config', config_low, 'low')
+        assert spack.config.get_config('config') == config_low['config']
+
+    def test_read_config_override_all(self, write_config_file):
+        write_config_file('config', config_low, 'low')
+        write_config_file('config', config_override_all, 'high')
+        assert spack.config.get_config('config') == {
             'install_tree': 'override_all'
-        })
+        }
 
-    def test_read_config_override_key(self):
-        self.write_config_file('config', config_low, 'low')
-        self.write_config_file('config', config_override_key, 'high')
-        self.assertEqual(spack.config.get_config('config'), {
+    def test_read_config_override_key(self, write_config_file):
+        write_config_file('config', config_low, 'low')
+        write_config_file('config', config_override_key, 'high')
+        assert spack.config.get_config('config') == {
             'install_tree': 'override_key',
             'build_stage': ['path1', 'path2', 'path3']
-        })
+        }
 
-    def test_read_config_merge_list(self):
-        self.write_config_file('config', config_low, 'low')
-        self.write_config_file('config', config_merge_list, 'high')
-        self.assertEqual(spack.config.get_config('config'), {
+    def test_read_config_merge_list(self, write_config_file):
+        write_config_file('config', config_low, 'low')
+        write_config_file('config', config_merge_list, 'high')
+        assert spack.config.get_config('config') == {
             'install_tree': 'install_tree_path',
             'build_stage': ['patha', 'pathb', 'path1', 'path2', 'path3']
-        })
+        }
 
-    def test_read_config_override_list(self):
-        self.write_config_file('config', config_low, 'low')
-        self.write_config_file('config', config_override_list, 'high')
-        self.assertEqual(spack.config.get_config('config'), {
+    def test_read_config_override_list(self, write_config_file):
+        write_config_file('config', config_low, 'low')
+        write_config_file('config', config_override_list, 'high')
+        assert spack.config.get_config('config') == {
             'install_tree': 'install_tree_path',
             'build_stage': ['patha', 'pathb']
-        })
+        }
diff --git a/lib/spack/spack/test/conftest.py b/lib/spack/spack/test/conftest.py
new file mode 100644
index 0000000000..11127d8735
--- /dev/null
+++ b/lib/spack/spack/test/conftest.py
@@ -0,0 +1,515 @@
+##############################################################################
+# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License (as
+# published by the Free Software Foundation) version 2.1, February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import collections
+import copy
+import os
+import re
+import shutil
+
+import cStringIO
+import llnl.util.filesystem
+import llnl.util.lang
+import ordereddict_backport
+import py
+import pytest
+import spack
+import spack.architecture
+import spack.database
+import spack.directory_layout
+import spack.fetch_strategy
+import spack.platforms.test
+import spack.repository
+import spack.stage
+import spack.util.executable
+import spack.util.pattern
+
+
+##########
+# Monkey-patching that is applied to all tests
+##########
+
+
+@pytest.fixture(autouse=True)
+def no_stdin_duplication(monkeypatch):
+    """Duplicating stdin (or any other stream) returns an empty
+    cStringIO object.
+    """
+    monkeypatch.setattr(
+        llnl.util.lang,
+        'duplicate_stream',
+        lambda x: cStringIO.StringIO()
+    )
+
+
+@pytest.fixture(autouse=True)
+def mock_fetch_cache(monkeypatch):
+    """Substitutes spack.fetch_cache with a mock object that does nothing
+    and raises on fetch.
+    """
+    class MockCache(object):
+        def store(self, copyCmd, relativeDst):
+            pass
+
+        def fetcher(self, targetPath, digest, **kwargs):
+            return MockCacheFetcher()
+
+    class MockCacheFetcher(object):
+        def set_stage(self, stage):
+            pass
+
+        def fetch(self):
+            raise spack.fetch_strategy.FetchError(
+                'Mock cache always fails for tests'
+            )
+
+        def __str__(self):
+            return "[mock fetcher]"
+
+    monkeypatch.setattr(spack, 'fetch_cache', MockCache())
+
+
+# FIXME: The lines below should better be added to a fixture with
+# FIXME: session-scope. Anyhow doing it is not easy, as it seems
+# FIXME: there's some weird interaction with compilers during concretization.
+spack.architecture.real_platform = spack.architecture.platform
+spack.architecture.platform = lambda: spack.platforms.test.Test()
+
+##########
+# Test-specific fixtures
+##########
+
+
+@pytest.fixture(scope='session')
+def repo_path():
+    """Session scoped RepoPath object pointing to the mock repository"""
+    return spack.repository.RepoPath(spack.mock_packages_path)
+
+
+@pytest.fixture(scope='module')
+def builtin_mock(repo_path):
+    """Uses the 'builtin.mock' repository instead of 'builtin'"""
+    mock_repo = copy.deepcopy(repo_path)
+    spack.repo.swap(mock_repo)
+    BuiltinMock = collections.namedtuple('BuiltinMock', ['real', 'mock'])
+    # Confusing, but we swapped above
+    yield BuiltinMock(real=mock_repo, mock=spack.repo)
+    spack.repo.swap(mock_repo)
+
+
+@pytest.fixture()
+def refresh_builtin_mock(builtin_mock, repo_path):
+    """Refreshes the state of spack.repo"""
+    # Get back the real repository
+    spack.repo.swap(builtin_mock.real)
+    mock_repo = copy.deepcopy(repo_path)
+    spack.repo.swap(mock_repo)
+    return builtin_mock
+
+
+@pytest.fixture(scope='session')
+def linux_os():
+    """Returns a named tuple with attributes 'name' and 'version'
+    representing the OS.
+    """
+    platform = spack.architecture.platform()
+    name, version = 'debian', '6'
+    if platform.name == 'linux':
+        platform = spack.architecture.platform()
+        current_os = platform.operating_system('default_os')
+        name, version = current_os.name, current_os.version
+    LinuxOS = collections.namedtuple('LinuxOS', ['name', 'version'])
+    return LinuxOS(name=name, version=version)
+
+
+@pytest.fixture(scope='session')
+def configuration_dir(tmpdir_factory, linux_os):
+    """Copies mock configuration files in a temporary directory. Returns the
+    directory path.
+    """
+    tmpdir = tmpdir_factory.mktemp('configurations')
+    # Name of the yaml files in the test/data folder
+    test_path = py.path.local(spack.test_path)
+    compilers_yaml = test_path.join('data', 'compilers.yaml')
+    packages_yaml = test_path.join('data', 'packages.yaml')
+    config_yaml = test_path.join('data', 'config.yaml')
+    # Create temporary 'site' and 'user' folders
+    tmpdir.ensure('site', dir=True)
+    tmpdir.ensure('user', dir=True)
+    # Copy the configurations that don't need further work
+    packages_yaml.copy(tmpdir.join('site', 'packages.yaml'))
+    config_yaml.copy(tmpdir.join('site', 'config.yaml'))
+    # Write the one that needs modifications
+    content = ''.join(compilers_yaml.read()).format(linux_os)
+    t = tmpdir.join('site', 'compilers.yaml')
+    t.write(content)
+    return tmpdir
+
+
+@pytest.fixture(scope='module')
+def config(configuration_dir):
+    """Hooks the mock configuration files into spack.config"""
+    # Set up a mock config scope
+    spack.config.clear_config_caches()
+    real_scope = spack.config.config_scopes
+    spack.config.config_scopes = ordereddict_backport.OrderedDict()
+    spack.config.ConfigScope('site', str(configuration_dir.join('site')))
+    spack.config.ConfigScope('user', str(configuration_dir.join('user')))
+    Config = collections.namedtuple('Config', ['real', 'mock'])
+    yield Config(real=real_scope, mock=spack.config.config_scopes)
+    spack.config.config_scopes = real_scope
+    spack.config.clear_config_caches()
+
+
+@pytest.fixture(scope='module')
+def database(tmpdir_factory, builtin_mock, config):
+    """Creates a mock database with some packages installed note that
+    the ref count for dyninst here will be 3, as it's recycled
+    across each install.
+    """
+
+    # Here is what the mock DB looks like:
+    #
+    # o  mpileaks     o  mpileaks'    o  mpileaks''
+    # |\              |\              |\
+    # | o  callpath   | o  callpath'  | o  callpath''
+    # |/|             |/|             |/|
+    # o |  mpich      o |  mpich2     o |  zmpi
+    #   |               |             o |  fake
+    #   |               |               |
+    #   |               |______________/
+    #   | .____________/
+    #   |/
+    #   o  dyninst
+    #   |\
+    #   | o  libdwarf
+    #   |/
+    #   o  libelf
+
+    # Make a fake install directory
+    install_path = tmpdir_factory.mktemp('install_for_database')
+    spack_install_path = py.path.local(spack.store.root)
+    spack.store.root = str(install_path)
+
+    install_layout = spack.directory_layout.YamlDirectoryLayout(
+        str(install_path)
+    )
+    spack_install_layout = spack.store.layout
+    spack.store.layout = install_layout
+
+    # Make fake database and fake install directory.
+    install_db = spack.database.Database(str(install_path))
+    spack_install_db = spack.store.db
+    spack.store.db = install_db
+
+    Entry = collections.namedtuple('Entry', ['path', 'layout', 'db'])
+    Database = collections.namedtuple(
+        'Database', ['real', 'mock', 'install', 'uninstall', 'refresh']
+    )
+
+    real = Entry(
+        path=spack_install_path,
+        layout=spack_install_layout,
+        db=spack_install_db
+    )
+    mock = Entry(path=install_path, layout=install_layout, db=install_db)
+
+    def _install(spec):
+        s = spack.spec.Spec(spec)
+        s.concretize()
+        pkg = spack.repo.get(s)
+        pkg.do_install(fake=True)
+
+    def _uninstall(spec):
+        spec.package.do_uninstall(spec)
+
+    def _refresh():
+        with spack.store.db.write_transaction():
+            for spec in spack.store.db.query():
+                _uninstall(spec)
+            _install('mpileaks ^mpich')
+            _install('mpileaks ^mpich2')
+            _install('mpileaks ^zmpi')
+
+    t = Database(
+        real=real,
+        mock=mock,
+        install=_install,
+        uninstall=_uninstall,
+        refresh=_refresh
+    )
+    # Transaction used to avoid repeated writes.
+    with spack.store.db.write_transaction():
+        t.install('mpileaks ^mpich')
+        t.install('mpileaks ^mpich2')
+        t.install('mpileaks ^zmpi')
+
+    yield t
+
+    with spack.store.db.write_transaction():
+        for spec in spack.store.db.query():
+            t.uninstall(spec)
+
+    install_path.remove(rec=1)
+    spack.store.root = str(spack_install_path)
+    spack.store.layout = spack_install_layout
+    spack.store.db = spack_install_db
+
+
+@pytest.fixture()
+def refresh_db_on_exit(database):
+    """"Restores the state of the database after a test."""
+    yield
+    database.refresh()
+
+##########
+# Fake archives and repositories
+##########
+
+
+@pytest.fixture(scope='session')
+def mock_archive():
+    """Creates a very simple archive directory with a configure script and a
+    makefile that installs to a prefix. Tars it up into an archive.
+    """
+    tar = spack.util.executable.which('tar', required=True)
+    stage = spack.stage.Stage('mock-archive-stage')
+    tmpdir = py.path.local(stage.path)
+    repo_name = 'mock-archive-repo'
+    tmpdir.ensure(repo_name, dir=True)
+    repodir = tmpdir.join(repo_name)
+    # Create the configure script
+    configure_path = str(tmpdir.join(repo_name, 'configure'))
+    with open(configure_path, 'w') as f:
+        f.write(
+            "#!/bin/sh\n"
+            "prefix=$(echo $1 | sed 's/--prefix=//')\n"
+            "cat > Makefile <<EOF\n"
+            "all:\n"
+            "\techo Building...\n\n"
+            "install:\n"
+            "\tmkdir -p $prefix\n"
+            "\ttouch $prefix/dummy_file\n"
+            "EOF\n"
+        )
+    os.chmod(configure_path, 0755)
+    # Archive it
+    current = tmpdir.chdir()
+    archive_name = '{0}.tar.gz'.format(repo_name)
+    tar('-czf', archive_name, repo_name)
+    current.chdir()
+    Archive = collections.namedtuple('Archive', ['url', 'path'])
+    url = 'file://' + str(tmpdir.join(archive_name))
+    # Return the url
+    yield Archive(url=url, path=str(repodir))
+    stage.destroy()
+
+
+@pytest.fixture(scope='session')
+def mock_git_repository():
+    """Creates a very simple git repository with two branches and
+    two commits.
+    """
+    git = spack.util.executable.which('git', required=True)
+    stage = spack.stage.Stage('mock-git-stage')
+    tmpdir = py.path.local(stage.path)
+    repo_name = 'mock-git-repo'
+    tmpdir.ensure(repo_name, dir=True)
+    repodir = tmpdir.join(repo_name)
+
+    # Initialize the repository
+    current = repodir.chdir()
+    git('init')
+    url = 'file://' + str(repodir)
+
+    # r0 is just the first commit
+    r0_file = 'r0_file'
+    repodir.ensure(r0_file)
+    git('add', r0_file)
+    git('commit', '-m', 'mock-git-repo r0')
+
+    branch = 'test-branch'
+    branch_file = 'branch_file'
+    git('branch', branch)
+
+    tag_branch = 'tag-branch'
+    tag_file = 'tag_file'
+    git('branch', tag_branch)
+
+    # Check out first branch
+    git('checkout', branch)
+    repodir.ensure(branch_file)
+    git('add', branch_file)
+    git('commit', '-m' 'r1 test branch')
+
+    # Check out a second branch and tag it
+    git('checkout', tag_branch)
+    repodir.ensure(tag_file)
+    git('add', tag_file)
+    git('commit', '-m' 'tag test branch')
+
+    tag = 'test-tag'
+    git('tag', tag)
+
+    git('checkout', 'master')
+
+    # R1 test is the same as test for branch
+    rev_hash = lambda x: git('rev-parse', x, output=str).strip()
+    r1 = rev_hash(branch)
+    r1_file = branch_file
+    current.chdir()
+
+    Bunch = spack.util.pattern.Bunch
+
+    checks = {
+        'master': Bunch(
+            revision='master', file=r0_file, args={'git': str(repodir)}
+        ),
+        'branch': Bunch(
+            revision=branch, file=branch_file, args={
+                'git': str(repodir), 'branch': branch
+            }
+        ),
+        'tag': Bunch(
+            revision=tag, file=tag_file, args={'git': str(repodir), 'tag': tag}
+        ),
+        'commit': Bunch(
+            revision=r1, file=r1_file, args={'git': str(repodir), 'commit': r1}
+        )
+    }
+
+    t = Bunch(checks=checks, url=url, hash=rev_hash, path=str(repodir))
+    yield t
+    stage.destroy()
+
+
+@pytest.fixture(scope='session')
+def mock_hg_repository():
+    """Creates a very simple hg repository with two commits."""
+    hg = spack.util.executable.which('hg', required=True)
+    stage = spack.stage.Stage('mock-hg-stage')
+    tmpdir = py.path.local(stage.path)
+    repo_name = 'mock-hg-repo'
+    tmpdir.ensure(repo_name, dir=True)
+    repodir = tmpdir.join(repo_name)
+
+    get_rev = lambda: hg('id', '-i', output=str).strip()
+
+    # Initialize the repository
+    current = repodir.chdir()
+    url = 'file://' + str(repodir)
+    hg('init')
+    # Commit file r0
+    r0_file = 'r0_file'
+    repodir.ensure(r0_file)
+    hg('add', r0_file)
+    hg('commit', '-m', 'revision 0', '-u', 'test')
+    r0 = get_rev()
+    # Commit file r1
+    r1_file = 'r1_file'
+    repodir.ensure(r1_file)
+    hg('add', r1_file)
+    hg('commit', '-m' 'revision 1', '-u', 'test')
+    r1 = get_rev()
+    current.chdir()
+
+    Bunch = spack.util.pattern.Bunch
+
+    checks = {
+        'default': Bunch(
+            revision=r1, file=r1_file, args={'hg': str(repodir)}
+        ),
+        'rev0': Bunch(
+            revision=r0, file=r0_file, args={
+                'hg': str(repodir), 'revision': r0
+            }
+        )
+    }
+    t = Bunch(checks=checks, url=url, hash=get_rev, path=str(repodir))
+    yield t
+    stage.destroy()
+
+
+@pytest.fixture(scope='session')
+def mock_svn_repository():
+    """Creates a very simple svn repository with two commits."""
+    svn = spack.util.executable.which('svn', required=True)
+    svnadmin = spack.util.executable.which('svnadmin', required=True)
+    stage = spack.stage.Stage('mock-svn-stage')
+    tmpdir = py.path.local(stage.path)
+    repo_name = 'mock-svn-repo'
+    tmpdir.ensure(repo_name, dir=True)
+    repodir = tmpdir.join(repo_name)
+    url = 'file://' + str(repodir)
+    # Initialize the repository
+    current = repodir.chdir()
+    svnadmin('create', str(repodir))
+
+    # Import a structure (first commit)
+    r0_file = 'r0_file'
+    tmpdir.ensure('tmp-path', r0_file)
+    svn(
+        'import',
+        str(tmpdir.join('tmp-path')),
+        url,
+        '-m',
+        'Initial import r0'
+    )
+    shutil.rmtree(str(tmpdir.join('tmp-path')))
+    # Second commit
+    r1_file = 'r1_file'
+    svn('checkout', url, str(tmpdir.join('tmp-path')))
+    tmpdir.ensure('tmp-path', r1_file)
+    tmpdir.join('tmp-path').chdir()
+    svn('add', str(tmpdir.ensure('tmp-path', r1_file)))
+    svn('ci', '-m', 'second revision r1')
+    repodir.chdir()
+    shutil.rmtree(str(tmpdir.join('tmp-path')))
+    r0 = '1'
+    r1 = '2'
+
+    Bunch = spack.util.pattern.Bunch
+
+    checks = {
+        'default': Bunch(
+            revision=r1, file=r1_file, args={'svn': url}
+        ),
+        'rev0': Bunch(
+            revision=r0, file=r0_file, args={
+                'svn': url, 'revision': r0
+            }
+        )
+    }
+
+    def get_rev():
+        output = svn('info', output=str)
+        assert "Revision" in output
+        for line in output.split('\n'):
+            match = re.match(r'Revision: (\d+)', line)
+            if match:
+                return match.group(1)
+
+    t = Bunch(checks=checks, url=url, hash=get_rev, path=str(repodir))
+    yield t
+    current.chdir()
\ No newline at end of file
diff --git a/lib/spack/spack/test/data/compilers.yaml b/lib/spack/spack/test/data/compilers.yaml
new file mode 100644
index 0000000000..ebba6a601d
--- /dev/null
+++ b/lib/spack/spack/test/data/compilers.yaml
@@ -0,0 +1,116 @@
+compilers:
+- compiler:
+    spec: clang@3.3
+    operating_system: {0.name}{0.version}
+    paths:
+      cc: /path/to/clang
+      cxx: /path/to/clang++
+      f77: None
+      fc: None
+    modules: 'None'
+- compiler:
+    spec: gcc@4.5.0
+    operating_system: {0.name}{0.version}
+    paths:
+      cc: /path/to/gcc
+      cxx: /path/to/g++
+      f77: None
+      fc: None
+    modules: 'None'
+- compiler:
+    spec: clang@3.3
+    operating_system: CNL
+    paths:
+      cc: /path/to/clang
+      cxx: /path/to/clang++
+      f77: None
+      fc: None
+    modules: 'None'
+- compiler:
+    spec: clang@3.3
+    operating_system: SuSE11
+    paths:
+      cc: /path/to/clang
+      cxx: /path/to/clang++
+      f77: None
+      fc: None
+    modules: 'None'
+- compiler:
+    spec: clang@3.3
+    operating_system: yosemite
+    paths:
+      cc: /path/to/clang
+      cxx: /path/to/clang++
+      f77: None
+      fc: None
+    modules: 'None'
+- compiler:
+    paths:
+      cc: /path/to/gcc
+      cxx: /path/to/g++
+      f77: /path/to/gfortran
+      fc: /path/to/gfortran
+    operating_system: CNL
+    spec: gcc@4.5.0
+    modules: 'None'
+- compiler:
+    paths:
+      cc: /path/to/gcc
+      cxx: /path/to/g++
+      f77: /path/to/gfortran
+      fc: /path/to/gfortran
+    operating_system: SuSE11
+    spec: gcc@4.5.0
+    modules: 'None'
+- compiler:
+    paths:
+      cc: /path/to/gcc
+      cxx: /path/to/g++
+      f77: /path/to/gfortran
+      fc: /path/to/gfortran
+    operating_system: yosemite
+    spec: gcc@4.5.0
+    modules: 'None'
+- compiler:
+    paths:
+      cc: /path/to/gcc
+      cxx: /path/to/g++
+      f77: /path/to/gfortran
+      fc: /path/to/gfortran
+    operating_system: elcapitan
+    spec: gcc@4.5.0
+    modules: 'None'
+- compiler:
+    spec: clang@3.3
+    operating_system: elcapitan
+    paths:
+      cc: /path/to/clang
+      cxx: /path/to/clang++
+      f77: None
+      fc: None
+    modules: 'None'
+- compiler:
+    spec: gcc@4.7.2
+    operating_system: redhat6
+    paths:
+      cc: /path/to/gcc472
+      cxx: /path/to/g++472
+      f77: /path/to/gfortran472
+      fc: /path/to/gfortran472
+    flags:
+      cflags: -O0
+      cxxflags: -O0
+      fflags: -O0
+    modules: 'None'
+- compiler:
+    spec: clang@3.5
+    operating_system: redhat6
+    paths:
+      cc: /path/to/clang35
+      cxx: /path/to/clang++35
+      f77: None
+      fc: None
+    flags:
+      cflags: -O3
+      cxxflags: -O3
+    modules: 'None'
diff --git a/lib/spack/spack/test/data/config.yaml b/lib/spack/spack/test/data/config.yaml
new file mode 100644
index 0000000000..d1758e9c16
--- /dev/null
+++ b/lib/spack/spack/test/data/config.yaml
@@ -0,0 +1,11 @@
+config:
+  install_tree: $spack/opt/spack
+  build_stage:
+  - $tempdir
+  - /nfs/tmp2/$user
+  - $spack/var/spack/stage
+  source_cache: $spack/var/spack/cache
+  misc_cache: ~/.spack/cache
+  verify_ssl: true
+  checksum: true
+  dirty: True
diff --git a/lib/spack/spack/test/data/packages.yaml b/lib/spack/spack/test/data/packages.yaml
new file mode 100644
index 0000000000..923d63173a
--- /dev/null
+++ b/lib/spack/spack/test/data/packages.yaml
@@ -0,0 +1,14 @@
+packages:
+  externaltool:
+    buildable: False
+    paths:
+      externaltool@1.0%gcc@4.5.0: /path/to/external_tool
+  externalvirtual:
+    buildable: False
+    paths:
+      externalvirtual@2.0%clang@3.3: /path/to/external_virtual_clang
+      externalvirtual@1.0%gcc@4.5.0: /path/to/external_virtual_gcc
+  externalmodule:
+    buildable: False
+    modules:
+      externalmodule@1.0%gcc@4.5.0: external-module
diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py
index 55a1d84e20..bbaa88b91d 100644
--- a/lib/spack/spack/test/database.py
+++ b/lib/spack/spack/test/database.py
@@ -29,11 +29,10 @@
 import multiprocessing
 import os.path
 
+import pytest
 import spack
 import spack.store
-from llnl.util.filesystem import join_path
 from llnl.util.tty.colify import colify
-from spack.test.mock_database import MockDatabase
 
 
 def _print_ref_counts():
@@ -71,264 +70,286 @@ def add_rec(spec):
     colify(recs, cols=3)
 
 
-class DatabaseTest(MockDatabase):
-
-    def test_005_db_exists(self):
-        """Make sure db cache file exists after creating."""
-        index_file = join_path(self.install_path, '.spack-db', 'index.json')
-        lock_file = join_path(self.install_path, '.spack-db', 'lock')
-
-        self.assertTrue(os.path.exists(index_file))
-        self.assertTrue(os.path.exists(lock_file))
-
-    def test_010_all_install_sanity(self):
-        """Ensure that the install layout reflects what we think it does."""
-        all_specs = spack.store.layout.all_specs()
-        self.assertEqual(len(all_specs), 13)
-
-        # query specs with multiple configurations
-        mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
-        callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
-        mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
-
-        self.assertEqual(len(mpileaks_specs), 3)
-        self.assertEqual(len(callpath_specs), 3)
-        self.assertEqual(len(mpi_specs),      3)
-
-        # query specs with single configurations
-        dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
-        libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
-        libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
-
-        self.assertEqual(len(dyninst_specs),  1)
-        self.assertEqual(len(libdwarf_specs), 1)
-        self.assertEqual(len(libelf_specs),   1)
-
-        # Query by dependency
-        self.assertEqual(
-            len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]), 1)
-        self.assertEqual(
-            len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]), 1)
-        self.assertEqual(
-            len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]), 1)
-
-    def test_015_write_and_read(self):
-        # write and read DB
-        with spack.store.db.write_transaction():
-            specs = spack.store.db.query()
-            recs = [spack.store.db.get_record(s) for s in specs]
-
-        for spec, rec in zip(specs, recs):
-            new_rec = spack.store.db.get_record(spec)
-            self.assertEqual(new_rec.ref_count, rec.ref_count)
-            self.assertEqual(new_rec.spec,      rec.spec)
-            self.assertEqual(new_rec.path,      rec.path)
-            self.assertEqual(new_rec.installed, rec.installed)
-
-    def _check_merkleiness(self):
-        """Ensure the spack database is a valid merkle graph."""
-        all_specs = spack.store.db.query(installed=any)
-
-        seen = {}
-        for spec in all_specs:
-            for dep in spec.dependencies():
-                hash_key = dep.dag_hash()
-                if hash_key not in seen:
-                    seen[hash_key] = id(dep)
-                else:
-                    self.assertEqual(seen[hash_key], id(dep))
-
-    def _check_db_sanity(self):
-        """Utiilty function to check db against install layout."""
-        expected = sorted(spack.store.layout.all_specs())
-        actual = sorted(self.install_db.query())
-
-        self.assertEqual(len(expected), len(actual))
-        for e, a in zip(expected, actual):
-            self.assertEqual(e, a)
-
-        self._check_merkleiness()
-
-    def test_020_db_sanity(self):
-        """Make sure query() returns what's actually in the db."""
-        self._check_db_sanity()
-
-    def test_025_reindex(self):
-        """Make sure reindex works and ref counts are valid."""
-        spack.store.db.reindex(spack.store.layout)
-        self._check_db_sanity()
-
-    def test_030_db_sanity_from_another_process(self):
-        def read_and_modify():
-            self._check_db_sanity()  # check that other process can read DB
-            with self.install_db.write_transaction():
-                self._mock_remove('mpileaks ^zmpi')
-
-        p = multiprocessing.Process(target=read_and_modify, args=())
-        p.start()
-        p.join()
-
-        # ensure child process change is visible in parent process
-        with self.install_db.read_transaction():
-            self.assertEqual(len(self.install_db.query('mpileaks ^zmpi')), 0)
-
-    def test_040_ref_counts(self):
-        """Ensure that we got ref counts right when we read the DB."""
-        self.install_db._check_ref_counts()
-
-    def test_050_basic_query(self):
-        """Ensure querying database is consistent with what is installed."""
-        # query everything
-        self.assertEqual(len(spack.store.db.query()), 13)
-
-        # query specs with multiple configurations
-        mpileaks_specs = self.install_db.query('mpileaks')
-        callpath_specs = self.install_db.query('callpath')
-        mpi_specs = self.install_db.query('mpi')
-
-        self.assertEqual(len(mpileaks_specs), 3)
-        self.assertEqual(len(callpath_specs), 3)
-        self.assertEqual(len(mpi_specs),      3)
-
-        # query specs with single configurations
-        dyninst_specs = self.install_db.query('dyninst')
-        libdwarf_specs = self.install_db.query('libdwarf')
-        libelf_specs = self.install_db.query('libelf')
-
-        self.assertEqual(len(dyninst_specs),  1)
-        self.assertEqual(len(libdwarf_specs), 1)
-        self.assertEqual(len(libelf_specs),   1)
-
-        # Query by dependency
-        self.assertEqual(len(self.install_db.query('mpileaks ^mpich')),  1)
-        self.assertEqual(len(self.install_db.query('mpileaks ^mpich2')), 1)
-        self.assertEqual(len(self.install_db.query('mpileaks ^zmpi')),   1)
-
-    def _check_remove_and_add_package(self, spec):
-        """Remove a spec from the DB, then add it and make sure everything's
-           still ok once it is added.  This checks that it was
-           removed, that it's back when added again, and that ref
-           counts are consistent.
-        """
-        original = self.install_db.query()
-        self.install_db._check_ref_counts()
-
-        # Remove spec
-        concrete_spec = self.install_db.remove(spec)
-        self.install_db._check_ref_counts()
-        remaining = self.install_db.query()
-
-        # ensure spec we removed is gone
-        self.assertEqual(len(original) - 1, len(remaining))
-        self.assertTrue(all(s in original for s in remaining))
-        self.assertTrue(concrete_spec not in remaining)
-
-        # add it back and make sure everything is ok.
-        self.install_db.add(concrete_spec, spack.store.layout)
-        installed = self.install_db.query()
-        self.assertTrue(concrete_spec in installed)
-        self.assertEqual(installed, original)
-
-        # sanity check against direcory layout and check ref counts.
-        self._check_db_sanity()
-        self.install_db._check_ref_counts()
-
-    def test_060_remove_and_add_root_package(self):
-        self._check_remove_and_add_package('mpileaks ^mpich')
-
-    def test_070_remove_and_add_dependency_package(self):
-        self._check_remove_and_add_package('dyninst')
-
-    def test_080_root_ref_counts(self):
-        rec = self.install_db.get_record('mpileaks ^mpich')
-
-        # Remove a top-level spec from the DB
-        self.install_db.remove('mpileaks ^mpich')
-
-        # record no longer in DB
-        self.assertEqual(
-            self.install_db.query('mpileaks ^mpich', installed=any), [])
-
-        # record's deps have updated ref_counts
-        self.assertEqual(
-            self.install_db.get_record('callpath ^mpich').ref_count, 0)
-        self.assertEqual(self.install_db.get_record('mpich').ref_count, 1)
-
-        # Put the spec back
-        self.install_db.add(rec.spec, spack.store.layout)
-
-        # record is present again
-        self.assertEqual(
-            len(self.install_db.query('mpileaks ^mpich', installed=any)), 1)
-
-        # dependencies have ref counts updated
-        self.assertEqual(
-            self.install_db.get_record('callpath ^mpich').ref_count, 1)
-        self.assertEqual(self.install_db.get_record('mpich').ref_count, 2)
-
-    def test_090_non_root_ref_counts(self):
-        self.install_db.get_record('mpileaks ^mpich')
-        self.install_db.get_record('callpath ^mpich')
-
-        # "force remove" a non-root spec from the DB
-        self.install_db.remove('callpath ^mpich')
-
-        # record still in DB but marked uninstalled
-        self.assertEqual(
-            self.install_db.query('callpath ^mpich', installed=True), [])
-        self.assertEqual(
-            len(self.install_db.query('callpath ^mpich', installed=any)), 1)
-
-        # record and its deps have same ref_counts
-        self.assertEqual(self.install_db.get_record(
-            'callpath ^mpich', installed=any).ref_count, 1)
-        self.assertEqual(self.install_db.get_record('mpich').ref_count, 2)
-
-        # remove only dependent of uninstalled callpath record
-        self.install_db.remove('mpileaks ^mpich')
-
-        # record and parent are completely gone.
-        self.assertEqual(
-            self.install_db.query('mpileaks ^mpich', installed=any), [])
-        self.assertEqual(
-            self.install_db.query('callpath ^mpich', installed=any), [])
-
-        # mpich ref count updated properly.
-        mpich_rec = self.install_db.get_record('mpich')
-        self.assertEqual(mpich_rec.ref_count, 0)
-
-    def test_100_no_write_with_exception_on_remove(self):
-        def fail_while_writing():
-            with self.install_db.write_transaction():
-                self._mock_remove('mpileaks ^zmpi')
-                raise Exception()
-
-        with self.install_db.read_transaction():
-            self.assertEqual(
-                len(self.install_db.query('mpileaks ^zmpi', installed=any)),
-                1)
-
-        self.assertRaises(Exception, fail_while_writing)
-
-        # reload DB and make sure zmpi is still there.
-        with self.install_db.read_transaction():
-            self.assertEqual(
-                len(self.install_db.query('mpileaks ^zmpi', installed=any)),
-                1)
-
-    def test_110_no_write_with_exception_on_install(self):
-        def fail_while_writing():
-            with self.install_db.write_transaction():
-                self._mock_install('cmake')
-                raise Exception()
-
-        with self.install_db.read_transaction():
-            self.assertEqual(
-                self.install_db.query('cmake', installed=any), [])
-
-        self.assertRaises(Exception, fail_while_writing)
-
-        # reload DB and make sure cmake was not written.
-        with self.install_db.read_transaction():
-            self.assertEqual(
-                self.install_db.query('cmake', installed=any), [])
+def _check_merkleiness():
+    """Ensure the spack database is a valid merkle graph."""
+    all_specs = spack.store.db.query(installed=any)
+
+    seen = {}
+    for spec in all_specs:
+        for dep in spec.dependencies():
+            hash_key = dep.dag_hash()
+            if hash_key not in seen:
+                seen[hash_key] = id(dep)
+            else:
+                assert seen[hash_key] == id(dep)
+
+
+def _check_db_sanity(install_db):
+    """Utiilty function to check db against install layout."""
+    expected = sorted(spack.store.layout.all_specs())
+    actual = sorted(install_db.query())
+
+    assert len(expected) == len(actual)
+    for e, a in zip(expected, actual):
+        assert e == a
+
+    _check_merkleiness()
+
+
+def _mock_remove(spec):
+    specs = spack.store.db.query(spec)
+    assert len(specs) == 1
+    spec = specs[0]
+    spec.package.do_uninstall(spec)
+
+
+def test_005_db_exists(database):
+    """Make sure db cache file exists after creating."""
+    install_path = database.mock.path
+    index_file = install_path.join('.spack-db', 'index.json')
+    lock_file = install_path.join('.spack-db', 'lock')
+    assert os.path.exists(str(index_file))
+    assert os.path.exists(str(lock_file))
+
+
+def test_010_all_install_sanity(database):
+    """Ensure that the install layout reflects what we think it does."""
+    all_specs = spack.store.layout.all_specs()
+    assert len(all_specs) == 13
+
+    # Query specs with multiple configurations
+    mpileaks_specs = [s for s in all_specs if s.satisfies('mpileaks')]
+    callpath_specs = [s for s in all_specs if s.satisfies('callpath')]
+    mpi_specs = [s for s in all_specs if s.satisfies('mpi')]
+
+    assert len(mpileaks_specs) == 3
+    assert len(callpath_specs) == 3
+    assert len(mpi_specs) == 3
+
+    # Query specs with single configurations
+    dyninst_specs = [s for s in all_specs if s.satisfies('dyninst')]
+    libdwarf_specs = [s for s in all_specs if s.satisfies('libdwarf')]
+    libelf_specs = [s for s in all_specs if s.satisfies('libelf')]
+
+    assert len(dyninst_specs) == 1
+    assert len(libdwarf_specs) == 1
+    assert len(libelf_specs) == 1
+
+    # Query by dependency
+    assert len([s for s in all_specs if s.satisfies('mpileaks ^mpich')]) == 1
+    assert len([s for s in all_specs if s.satisfies('mpileaks ^mpich2')]) == 1
+    assert len([s for s in all_specs if s.satisfies('mpileaks ^zmpi')]) == 1
+
+
+def test_015_write_and_read(database):
+    # write and read DB
+    with spack.store.db.write_transaction():
+        specs = spack.store.db.query()
+        recs = [spack.store.db.get_record(s) for s in specs]
+
+    for spec, rec in zip(specs, recs):
+        new_rec = spack.store.db.get_record(spec)
+        assert new_rec.ref_count == rec.ref_count
+        assert new_rec.spec == rec.spec
+        assert new_rec.path == rec.path
+        assert new_rec.installed == rec.installed
+
+
+def test_020_db_sanity(database):
+    """Make sure query() returns what's actually in the db."""
+    install_db = database.mock.db
+    _check_db_sanity(install_db)
+
+
+def test_025_reindex(database):
+    """Make sure reindex works and ref counts are valid."""
+    install_db = database.mock.db
+    spack.store.db.reindex(spack.store.layout)
+    _check_db_sanity(install_db)
+
+
+def test_030_db_sanity_from_another_process(database, refresh_db_on_exit):
+    install_db = database.mock.db
+
+    def read_and_modify():
+        _check_db_sanity(install_db)  # check that other process can read DB
+        with install_db.write_transaction():
+            _mock_remove('mpileaks ^zmpi')
+
+    p = multiprocessing.Process(target=read_and_modify, args=())
+    p.start()
+    p.join()
+
+    # ensure child process change is visible in parent process
+    with install_db.read_transaction():
+        assert len(install_db.query('mpileaks ^zmpi')) == 0
+
+
+def test_040_ref_counts(database):
+    """Ensure that we got ref counts right when we read the DB."""
+    install_db = database.mock.db
+    install_db._check_ref_counts()
+
+
+def test_050_basic_query(database):
+    """Ensure querying database is consistent with what is installed."""
+    install_db = database.mock.db
+    # query everything
+    assert len(spack.store.db.query()) == 13
+
+    # query specs with multiple configurations
+    mpileaks_specs = install_db.query('mpileaks')
+    callpath_specs = install_db.query('callpath')
+    mpi_specs = install_db.query('mpi')
+
+    assert len(mpileaks_specs) == 3
+    assert len(callpath_specs) == 3
+    assert len(mpi_specs) == 3
+
+    # query specs with single configurations
+    dyninst_specs = install_db.query('dyninst')
+    libdwarf_specs = install_db.query('libdwarf')
+    libelf_specs = install_db.query('libelf')
+
+    assert len(dyninst_specs) == 1
+    assert len(libdwarf_specs) == 1
+    assert len(libelf_specs) == 1
+
+    # Query by dependency
+    assert len(install_db.query('mpileaks ^mpich')) == 1
+    assert len(install_db.query('mpileaks ^mpich2')) == 1
+    assert len(install_db.query('mpileaks ^zmpi')) == 1
+
+
+def _check_remove_and_add_package(install_db, spec):
+    """Remove a spec from the DB, then add it and make sure everything's
+    still ok once it is added.  This checks that it was
+    removed, that it's back when added again, and that ref
+    counts are consistent.
+    """
+    original = install_db.query()
+    install_db._check_ref_counts()
+
+    # Remove spec
+    concrete_spec = install_db.remove(spec)
+    install_db._check_ref_counts()
+    remaining = install_db.query()
+
+    # ensure spec we removed is gone
+    assert len(original) - 1 == len(remaining)
+    assert all(s in original for s in remaining)
+    assert concrete_spec not in remaining
+
+    # add it back and make sure everything is ok.
+    install_db.add(concrete_spec, spack.store.layout)
+    installed = install_db.query()
+    assert concrete_spec in installed
+    assert installed == original
+
+    # sanity check against direcory layout and check ref counts.
+    _check_db_sanity(install_db)
+    install_db._check_ref_counts()
+
+
+def test_060_remove_and_add_root_package(database):
+    install_db = database.mock.db
+    _check_remove_and_add_package(install_db, 'mpileaks ^mpich')
+
+
+def test_070_remove_and_add_dependency_package(database):
+    install_db = database.mock.db
+    _check_remove_and_add_package(install_db, 'dyninst')
+
+
+def test_080_root_ref_counts(database):
+    install_db = database.mock.db
+    rec = install_db.get_record('mpileaks ^mpich')
+
+    # Remove a top-level spec from the DB
+    install_db.remove('mpileaks ^mpich')
+
+    # record no longer in DB
+    assert install_db.query('mpileaks ^mpich', installed=any) == []
+
+    # record's deps have updated ref_counts
+    assert install_db.get_record('callpath ^mpich').ref_count == 0
+    assert install_db.get_record('mpich').ref_count == 1
+
+    # Put the spec back
+    install_db.add(rec.spec, spack.store.layout)
+
+    # record is present again
+    assert len(install_db.query('mpileaks ^mpich', installed=any)) == 1
+
+    # dependencies have ref counts updated
+    assert install_db.get_record('callpath ^mpich').ref_count == 1
+    assert install_db.get_record('mpich').ref_count == 2
+
+
+def test_090_non_root_ref_counts(database):
+    install_db = database.mock.db
+
+    install_db.get_record('mpileaks ^mpich')
+    install_db.get_record('callpath ^mpich')
+
+    # "force remove" a non-root spec from the DB
+    install_db.remove('callpath ^mpich')
+
+    # record still in DB but marked uninstalled
+    assert install_db.query('callpath ^mpich', installed=True) == []
+    assert len(install_db.query('callpath ^mpich', installed=any)) == 1
+
+    # record and its deps have same ref_counts
+    assert install_db.get_record(
+        'callpath ^mpich', installed=any
+    ).ref_count == 1
+    assert install_db.get_record('mpich').ref_count == 2
+
+    # remove only dependent of uninstalled callpath record
+    install_db.remove('mpileaks ^mpich')
+
+    # record and parent are completely gone.
+    assert install_db.query('mpileaks ^mpich', installed=any) == []
+    assert install_db.query('callpath ^mpich', installed=any) == []
+
+    # mpich ref count updated properly.
+    mpich_rec = install_db.get_record('mpich')
+    assert mpich_rec.ref_count == 0
+
+
+def test_100_no_write_with_exception_on_remove(database):
+    install_db = database.mock.db
+
+    def fail_while_writing():
+        with install_db.write_transaction():
+            _mock_remove('mpileaks ^zmpi')
+            raise Exception()
+
+    with install_db.read_transaction():
+        assert len(install_db.query('mpileaks ^zmpi', installed=any)) == 1
+
+    with pytest.raises(Exception):
+        fail_while_writing()
+
+    # reload DB and make sure zmpi is still there.
+    with install_db.read_transaction():
+        assert len(install_db.query('mpileaks ^zmpi', installed=any)) == 1
+
+
+def test_110_no_write_with_exception_on_install(database):
+    install_db = database.mock.db
+
+    def fail_while_writing():
+        with install_db.write_transaction():
+            _mock_install('cmake')
+            raise Exception()
+
+    with install_db.read_transaction():
+        assert install_db.query('cmake', installed=any) == []
+
+    with pytest.raises(Exception):
+        fail_while_writing()
+
+    # reload DB and make sure cmake was not written.
+    with install_db.read_transaction():
+        assert install_db.query('cmake', installed=any) == []
diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py
index fdaa43464b..2caadad0fe 100644
--- a/lib/spack/spack/test/directory_layout.py
+++ b/lib/spack/spack/test/directory_layout.py
@@ -22,175 +22,173 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-"""\
+"""
 This test verifies that the Spack directory layout works properly.
 """
 import os
-import shutil
-import tempfile
 
+import pytest
 import spack
-from llnl.util.filesystem import *
 from spack.directory_layout import YamlDirectoryLayout
 from spack.repository import RepoPath
 from spack.spec import Spec
-from spack.test.mock_packages_test import *
 
 # number of packages to test (to reduce test time)
 max_packages = 10
 
 
-class DirectoryLayoutTest(MockPackagesTest):
-    """Tests that a directory layout works correctly and produces a
-       consistent install path."""
-
-    def setUp(self):
-        super(DirectoryLayoutTest, self).setUp()
-        self.tmpdir = tempfile.mkdtemp()
-        self.layout = YamlDirectoryLayout(self.tmpdir)
-
-    def tearDown(self):
-        super(DirectoryLayoutTest, self).tearDown()
-        shutil.rmtree(self.tmpdir, ignore_errors=True)
-        self.layout = None
-
-    def test_read_and_write_spec(self):
-        """This goes through each package in spack and creates a directory for
-           it.  It then ensures that the spec for the directory's
-           installed package can be read back in consistently, and
-           finally that the directory can be removed by the directory
-           layout.
-        """
-        packages = list(spack.repo.all_packages())[:max_packages]
-
-        for pkg in packages:
-            if pkg.name.startswith('external'):
-                # External package tests cannot be installed
-                continue
-            spec = pkg.spec
-
-            # If a spec fails to concretize, just skip it.  If it is a
-            # real error, it will be caught by concretization tests.
-            try:
-                spec.concretize()
-            except:
-                continue
-
-            self.layout.create_install_directory(spec)
-
-            install_dir = self.layout.path_for_spec(spec)
-            spec_path = self.layout.spec_file_path(spec)
-
-            # Ensure directory has been created in right place.
-            self.assertTrue(os.path.isdir(install_dir))
-            self.assertTrue(install_dir.startswith(self.tmpdir))
-
-            # Ensure spec file exists when directory is created
-            self.assertTrue(os.path.isfile(spec_path))
-            self.assertTrue(spec_path.startswith(install_dir))
-
-            # Make sure spec file can be read back in to get the original spec
-            spec_from_file = self.layout.read_spec(spec_path)
-
-            # currently we don't store build dependency information when
-            # we write out specs to the filesystem.
-
-            # TODO: fix this when we can concretize more loosely based on
-            # TODO: what is installed. We currently omit these to
-            # TODO: increase reuse of build dependencies.
-            stored_deptypes = ('link', 'run')
-            expected = spec.copy(deps=stored_deptypes)
-            self.assertEqual(expected, spec_from_file)
-            self.assertTrue(expected.eq_dag, spec_from_file)
-            self.assertTrue(spec_from_file.concrete)
-
-            # Ensure that specs that come out "normal" are really normal.
-            with open(spec_path) as spec_file:
-                read_separately = Spec.from_yaml(spec_file.read())
-
-                # TODO: revise this when build deps are in dag_hash
-                norm = read_separately.normalized().copy(deps=stored_deptypes)
-                self.assertEqual(norm, spec_from_file)
-
-                # TODO: revise this when build deps are in dag_hash
-                conc = read_separately.concretized().copy(deps=stored_deptypes)
-                self.assertEqual(conc, spec_from_file)
-
-            # Make sure the hash of the read-in spec is the same
-            self.assertEqual(expected.dag_hash(), spec_from_file.dag_hash())
-
-            # Ensure directories are properly removed
-            self.layout.remove_install_directory(spec)
-            self.assertFalse(os.path.isdir(install_dir))
-            self.assertFalse(os.path.exists(install_dir))
-
-    def test_handle_unknown_package(self):
-        """This test ensures that spack can at least do *some*
-           operations with packages that are installed but that it
-           does not know about.  This is actually not such an uncommon
-           scenario with spack; it can happen when you switch from a
-           git branch where you're working on a new package.
-
-           This test ensures that the directory layout stores enough
-           information about installed packages' specs to uninstall
-           or query them again if the package goes away.
-        """
-        mock_db = RepoPath(spack.mock_packages_path)
-
-        not_in_mock = set.difference(
-            set(spack.repo.all_package_names()),
-            set(mock_db.all_package_names()))
-        packages = list(not_in_mock)[:max_packages]
-
-        # Create all the packages that are not in mock.
-        installed_specs = {}
-        for pkg_name in packages:
-            spec = spack.repo.get(pkg_name).spec
-
-            # If a spec fails to concretize, just skip it.  If it is a
-            # real error, it will be caught by concretization tests.
-            try:
-                spec.concretize()
-            except:
-                continue
-
-            self.layout.create_install_directory(spec)
-            installed_specs[spec] = self.layout.path_for_spec(spec)
-
-        spack.repo.swap(mock_db)
-
-        # Now check that even without the package files, we know
-        # enough to read a spec from the spec file.
-        for spec, path in installed_specs.items():
-            spec_from_file = self.layout.read_spec(
-                join_path(path, '.spack', 'spec.yaml'))
-
-            # To satisfy these conditions, directory layouts need to
-            # read in concrete specs from their install dirs somehow.
-            self.assertEqual(path, self.layout.path_for_spec(spec_from_file))
-            self.assertEqual(spec, spec_from_file)
-            self.assertTrue(spec.eq_dag(spec_from_file))
-            self.assertEqual(spec.dag_hash(), spec_from_file.dag_hash())
-
-        spack.repo.swap(mock_db)
-
-    def test_find(self):
-        """Test that finding specs within an install layout works."""
-        packages = list(spack.repo.all_packages())[:max_packages]
-
-        # Create install prefixes for all packages in the list
-        installed_specs = {}
-        for pkg in packages:
-            if pkg.name.startswith('external'):
-                # External package tests cannot be installed
-                continue
-            spec = pkg.spec.concretized()
-            installed_specs[spec.name] = spec
-            self.layout.create_install_directory(spec)
-
-        # Make sure all the installed specs appear in
-        # DirectoryLayout.all_specs()
-        found_specs = dict((s.name, s) for s in self.layout.all_specs())
-        for name, spec in found_specs.items():
-            self.assertTrue(name in found_specs)
-            self.assertTrue(found_specs[name].eq_dag(spec))
+@pytest.fixture()
+def layout_and_dir(tmpdir):
+    """Returns a directory layout and the corresponding directory."""
+    yield YamlDirectoryLayout(str(tmpdir)), str(tmpdir)
+
+
+def test_read_and_write_spec(
+        layout_and_dir, config, builtin_mock
+):
+    """This goes through each package in spack and creates a directory for
+    it.  It then ensures that the spec for the directory's
+    installed package can be read back in consistently, and
+    finally that the directory can be removed by the directory
+    layout.
+    """
+    layout, tmpdir = layout_and_dir
+    packages = list(spack.repo.all_packages())[:max_packages]
+
+    for pkg in packages:
+        if pkg.name.startswith('external'):
+            # External package tests cannot be installed
+            continue
+        spec = pkg.spec
+
+        # If a spec fails to concretize, just skip it.  If it is a
+        # real error, it will be caught by concretization tests.
+        try:
+            spec.concretize()
+        except Exception:
+            continue
+
+        layout.create_install_directory(spec)
+
+        install_dir = layout.path_for_spec(spec)
+        spec_path = layout.spec_file_path(spec)
+
+        # Ensure directory has been created in right place.
+        assert os.path.isdir(install_dir)
+        assert install_dir.startswith(str(tmpdir))
+
+        # Ensure spec file exists when directory is created
+        assert os.path.isfile(spec_path)
+        assert spec_path.startswith(install_dir)
+
+        # Make sure spec file can be read back in to get the original spec
+        spec_from_file = layout.read_spec(spec_path)
+
+        # currently we don't store build dependency information when
+        # we write out specs to the filesystem.
+
+        # TODO: fix this when we can concretize more loosely based on
+        # TODO: what is installed. We currently omit these to
+        # TODO: increase reuse of build dependencies.
+        stored_deptypes = ('link', 'run')
+        expected = spec.copy(deps=stored_deptypes)
+        assert expected == spec_from_file
+        assert expected.eq_dag  # msg , spec_from_file
+        assert spec_from_file.concrete
+
+        # Ensure that specs that come out "normal" are really normal.
+        with open(spec_path) as spec_file:
+            read_separately = Spec.from_yaml(spec_file.read())
+
+            # TODO: revise this when build deps are in dag_hash
+            norm = read_separately.normalized().copy(deps=stored_deptypes)
+            assert norm == spec_from_file
+
+            # TODO: revise this when build deps are in dag_hash
+            conc = read_separately.concretized().copy(deps=stored_deptypes)
+            assert conc == spec_from_file
+
+        # Make sure the hash of the read-in spec is the same
+        assert expected.dag_hash() == spec_from_file.dag_hash()
+
+        # Ensure directories are properly removed
+        layout.remove_install_directory(spec)
+        assert not os.path.isdir(install_dir)
+        assert not os.path.exists(install_dir)
+
+
+def test_handle_unknown_package(
+        layout_and_dir, config, builtin_mock
+):
+    """This test ensures that spack can at least do *some*
+    operations with packages that are installed but that it
+    does not know about.  This is actually not such an uncommon
+    scenario with spack; it can happen when you switch from a
+    git branch where you're working on a new package.
+
+    This test ensures that the directory layout stores enough
+    information about installed packages' specs to uninstall
+    or query them again if the package goes away.
+    """
+    layout, _ = layout_and_dir
+    mock_db = RepoPath(spack.mock_packages_path)
+
+    not_in_mock = set.difference(
+        set(spack.repo.all_package_names()),
+        set(mock_db.all_package_names()))
+    packages = list(not_in_mock)[:max_packages]
+
+    # Create all the packages that are not in mock.
+    installed_specs = {}
+    for pkg_name in packages:
+        spec = spack.repo.get(pkg_name).spec
+
+        # If a spec fails to concretize, just skip it.  If it is a
+        # real error, it will be caught by concretization tests.
+        try:
+            spec.concretize()
+        except Exception:
+            continue
+
+        layout.create_install_directory(spec)
+        installed_specs[spec] = layout.path_for_spec(spec)
+
+    spack.repo.swap(mock_db)
+
+    # Now check that even without the package files, we know
+    # enough to read a spec from the spec file.
+    for spec, path in installed_specs.items():
+        spec_from_file = layout.read_spec(
+            join_path(path, '.spack', 'spec.yaml')
+        )
+        # To satisfy these conditions, directory layouts need to
+        # read in concrete specs from their install dirs somehow.
+        assert path == layout.path_for_spec(spec_from_file)
+        assert spec == spec_from_file
+        assert spec.eq_dag(spec_from_file)
+        assert spec.dag_hash() == spec_from_file.dag_hash()
+
+    spack.repo.swap(mock_db)
+
+
+def test_find(layout_and_dir, config, builtin_mock):
+    """Test that finding specs within an install layout works."""
+    layout, _ = layout_and_dir
+    packages = list(spack.repo.all_packages())[:max_packages]
+
+    # Create install prefixes for all packages in the list
+    installed_specs = {}
+    for pkg in packages:
+        if pkg.name.startswith('external'):
+            # External package tests cannot be installed
+            continue
+        spec = pkg.spec.concretized()
+        installed_specs[spec.name] = spec
+        layout.create_install_directory(spec)
+
+    # Make sure all the installed specs appear in
+    # DirectoryLayout.all_specs()
+    found_specs = dict((s.name, s) for s in layout.all_specs())
+    for name, spec in found_specs.items():
+        assert name in found_specs
+        assert found_specs[name].eq_dag(spec)
diff --git a/lib/spack/spack/test/git_fetch.py b/lib/spack/spack/test/git_fetch.py
index 7aff98cc54..3bd998c5c2 100644
--- a/lib/spack/spack/test/git_fetch.py
+++ b/lib/spack/spack/test/git_fetch.py
@@ -24,93 +24,61 @@
 ##############################################################################
 import os
 
+import pytest
 import spack
 from llnl.util.filesystem import *
-from spack.test.mock_packages_test import *
-from spack.test.mock_repo import MockGitRepo
+from spack.spec import Spec
 from spack.version import ver
 
 
-class GitFetchTest(MockPackagesTest):
-    """Tests fetching from a dummy git repository."""
-
-    def setUp(self):
-        """Create a git repository with master and two other branches,
-           and one tag, so that we can experiment on it."""
-        super(GitFetchTest, self).setUp()
-
-        self.repo = MockGitRepo()
-
-        spec = Spec('git-test')
-        spec.concretize()
-        self.pkg = spack.repo.get(spec, new=True)
-
-    def tearDown(self):
-        """Destroy the stage space used by this test."""
-        super(GitFetchTest, self).tearDown()
-        self.repo.destroy()
-
-    def assert_rev(self, rev):
-        """Check that the current git revision is equal to the supplied rev."""
-        self.assertEqual(self.repo.rev_hash('HEAD'), self.repo.rev_hash(rev))
-
-    def try_fetch(self, rev, test_file, args):
-        """Tries to:
-
-        1. Fetch the repo using a fetch strategy constructed with
-           supplied args.
-        2. Check if the test_file is in the checked out repository.
-        3. Assert that the repository is at the revision supplied.
-        4. Add and remove some files, then reset the repo, and
-           ensure it's all there again.
-        """
-        self.pkg.versions[ver('git')] = args
-
-        with self.pkg.stage:
-            self.pkg.do_stage()
-            self.assert_rev(rev)
-
-            file_path = join_path(self.pkg.stage.source_path, test_file)
-            self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
-            self.assertTrue(os.path.isfile(file_path))
-
-            os.unlink(file_path)
-            self.assertFalse(os.path.isfile(file_path))
-
-            untracked_file = 'foobarbaz'
-            touch(untracked_file)
-            self.assertTrue(os.path.isfile(untracked_file))
-            self.pkg.do_restage()
-            self.assertFalse(os.path.isfile(untracked_file))
-
-            self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
-            self.assertTrue(os.path.isfile(file_path))
-
-            self.assert_rev(rev)
-
-    def test_fetch_master(self):
-        """Test a default git checkout with no commit or tag specified."""
-        self.try_fetch('master', self.repo.r0_file, {
-            'git': self.repo.path
-        })
-
-    def test_fetch_branch(self):
-        """Test fetching a branch."""
-        self.try_fetch(self.repo.branch, self.repo.branch_file, {
-            'git': self.repo.path,
-            'branch': self.repo.branch
-        })
-
-    def test_fetch_tag(self):
-        """Test fetching a tag."""
-        self.try_fetch(self.repo.tag, self.repo.tag_file, {
-            'git': self.repo.path,
-            'tag': self.repo.tag
-        })
-
-    def test_fetch_commit(self):
-        """Test fetching a particular commit."""
-        self.try_fetch(self.repo.r1, self.repo.r1_file, {
-            'git': self.repo.path,
-            'commit': self.repo.r1
-        })
+@pytest.fixture(params=['master', 'branch', 'tag', 'commit'])
+def type_of_test(request):
+    """Returns one of the test type available for the mock_git_repository"""
+    return request.param
+
+
+def test_fetch(
+        type_of_test,
+        mock_git_repository,
+        config,
+        refresh_builtin_mock
+):
+    """Tries to:
+
+    1. Fetch the repo using a fetch strategy constructed with
+       supplied args (they depend on type_of_test).
+    2. Check if the test_file is in the checked out repository.
+    3. Assert that the repository is at the revision supplied.
+    4. Add and remove some files, then reset the repo, and
+       ensure it's all there again.
+    """
+    # Retrieve the right test parameters
+    t = mock_git_repository.checks[type_of_test]
+    h = mock_git_repository.hash
+    # Construct the package under test
+    spec = Spec('git-test')
+    spec.concretize()
+    pkg = spack.repo.get(spec, new=True)
+    pkg.versions[ver('git')] = t.args
+    # Enter the stage directory and check some properties
+    with pkg.stage:
+        pkg.do_stage()
+        assert h('HEAD') == h(t.revision)
+
+        file_path = join_path(pkg.stage.source_path, t.file)
+        assert os.path.isdir(pkg.stage.source_path)
+        assert os.path.isfile(file_path)
+
+        os.unlink(file_path)
+        assert not os.path.isfile(file_path)
+
+        untracked_file = 'foobarbaz'
+        touch(untracked_file)
+        assert os.path.isfile(untracked_file)
+        pkg.do_restage()
+        assert not os.path.isfile(untracked_file)
+
+        assert os.path.isdir(pkg.stage.source_path)
+        assert os.path.isfile(file_path)
+
+        assert h('HEAD') == h(t.revision)
diff --git a/lib/spack/spack/test/hg_fetch.py b/lib/spack/spack/test/hg_fetch.py
index 03e35ea093..71e4693c56 100644
--- a/lib/spack/spack/test/hg_fetch.py
+++ b/lib/spack/spack/test/hg_fetch.py
@@ -23,76 +23,62 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
 import os
-import spack
 
-from spack.version import ver
-from spack.test.mock_repo import MockHgRepo
+import pytest
+import spack
 from llnl.util.filesystem import *
-from spack.test.mock_packages_test import *
-
-
-class HgFetchTest(MockPackagesTest):
-    """Tests fetching from a dummy hg repository."""
-
-    def setUp(self):
-        """Create a hg repository with master and two other branches,
-           and one tag, so that we can experiment on it."""
-        super(HgFetchTest, self).setUp()
-
-        self.repo = MockHgRepo()
-
-        spec = Spec('hg-test')
-        spec.concretize()
-        self.pkg = spack.repo.get(spec, new=True)
-
-    def tearDown(self):
-        """Destroy the stage space used by this test."""
-        super(HgFetchTest, self).tearDown()
-        self.repo.destroy()
-
-    def try_fetch(self, rev, test_file, args):
-        """Tries to:
-
-        1. Fetch the repo using a fetch strategy constructed with
-           supplied args.
-        2. Check if the test_file is in the checked out repository.
-        3. Assert that the repository is at the revision supplied.
-        4. Add and remove some files, then reset the repo, and
-           ensure it's all there again.
-        """
-        self.pkg.versions[ver('hg')] = args
-
-        with self.pkg.stage:
-            self.pkg.do_stage()
-            self.assertEqual(self.repo.get_rev(), rev)
-
-            file_path = join_path(self.pkg.stage.source_path, test_file)
-            self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
-            self.assertTrue(os.path.isfile(file_path))
-
-            os.unlink(file_path)
-            self.assertFalse(os.path.isfile(file_path))
-
-            untracked = 'foobarbaz'
-            touch(untracked)
-            self.assertTrue(os.path.isfile(untracked))
-            self.pkg.do_restage()
-            self.assertFalse(os.path.isfile(untracked))
-
-            self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
-            self.assertTrue(os.path.isfile(file_path))
-
-            self.assertEqual(self.repo.get_rev(), rev)
+from spack.spec import Spec
+from spack.version import ver
 
-    def test_fetch_default(self):
-        """Test a default hg checkout with no commit or tag specified."""
-        self.try_fetch(self.repo.r1, self.repo.r1_file, {
-            'hg': self.repo.path
-        })
 
-    def test_fetch_rev0(self):
-        """Test fetching a branch."""
-        self.try_fetch(self.repo.r0, self.repo.r0_file, {
-            'hg': self.repo.path,
-            'revision': self.repo.r0
-        })
+@pytest.fixture(params=['default', 'rev0'])
+def type_of_test(request):
+    """Returns one of the test type available for the mock_hg_repository"""
+    return request.param
+
+
+def test_fetch(
+        type_of_test,
+        mock_hg_repository,
+        config,
+        refresh_builtin_mock
+):
+    """Tries to:
+
+    1. Fetch the repo using a fetch strategy constructed with
+       supplied args (they depend on type_of_test).
+    2. Check if the test_file is in the checked out repository.
+    3. Assert that the repository is at the revision supplied.
+    4. Add and remove some files, then reset the repo, and
+       ensure it's all there again.
+    """
+    # Retrieve the right test parameters
+    t = mock_hg_repository.checks[type_of_test]
+    h = mock_hg_repository.hash
+    # Construct the package under test
+    spec = Spec('hg-test')
+    spec.concretize()
+    pkg = spack.repo.get(spec, new=True)
+    pkg.versions[ver('hg')] = t.args
+    # Enter the stage directory and check some properties
+    with pkg.stage:
+        pkg.do_stage()
+        assert h() == t.revision
+
+        file_path = join_path(pkg.stage.source_path, t.file)
+        assert os.path.isdir(pkg.stage.source_path)
+        assert os.path.isfile(file_path)
+
+        os.unlink(file_path)
+        assert not os.path.isfile(file_path)
+
+        untracked_file = 'foobarbaz'
+        touch(untracked_file)
+        assert os.path.isfile(untracked_file)
+        pkg.do_restage()
+        assert not os.path.isfile(untracked_file)
+
+        assert os.path.isdir(pkg.stage.source_path)
+        assert os.path.isfile(file_path)
+
+        assert h() == t.revision
diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py
index 0524c14cfd..3a83280c6f 100644
--- a/lib/spack/spack/test/install.py
+++ b/lib/spack/spack/test/install.py
@@ -22,85 +22,71 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-import shutil
-import tempfile
-
+import pytest
 import spack
 import spack.store
-from llnl.util.filesystem import *
-from spack.directory_layout import YamlDirectoryLayout
 from spack.database import Database
+from spack.directory_layout import YamlDirectoryLayout
 from spack.fetch_strategy import URLFetchStrategy, FetchStrategyComposite
-from spack.test.mock_packages_test import *
-from spack.test.mock_repo import MockArchive
-
-
-class InstallTest(MockPackagesTest):
-    """Tests install and uninstall on a trivial package."""
-
-    def setUp(self):
-        super(InstallTest, self).setUp()
-
-        # create a simple installable package directory and tarball
-        self.repo = MockArchive()
-
-        # We use a fake package, so skip the checksum.
-        spack.do_checksum = False
-
-        # Use a fake install directory to avoid conflicts bt/w
-        # installed pkgs and mock packages.
-        self.tmpdir = tempfile.mkdtemp()
-        self.orig_layout = spack.store.layout
-        self.orig_db = spack.store.db
-
-        spack.store.layout = YamlDirectoryLayout(self.tmpdir)
-        spack.store.db     = Database(self.tmpdir)
-
-    def tearDown(self):
-        super(InstallTest, self).tearDown()
-        self.repo.destroy()
-
-        # Turn checksumming back on
-        spack.do_checksum = True
-
-        # restore spack's layout.
-        spack.store.layout = self.orig_layout
-        spack.store.db     = self.orig_db
-        shutil.rmtree(self.tmpdir, ignore_errors=True)
-
-    def fake_fetchify(self, pkg):
-        """Fake the URL for a package so it downloads from a file."""
-        fetcher = FetchStrategyComposite()
-        fetcher.append(URLFetchStrategy(self.repo.url))
-        pkg.fetcher = fetcher
-
-    def test_install_and_uninstall(self):
-        # Get a basic concrete spec for the trivial install package.
-        spec = Spec('trivial_install_test_package')
-        spec.concretize()
-        self.assertTrue(spec.concrete)
-
-        # Get the package
-        pkg = spack.repo.get(spec)
-
-        self.fake_fetchify(pkg)
-
-        try:
-            pkg.do_install()
-            pkg.do_uninstall()
-        except Exception:
-            pkg.remove_prefix()
-            raise
-
-    def test_store(self):
-        spec = Spec('cmake-client').concretized()
-
-        for s in spec.traverse():
-            self.fake_fetchify(s.package)
-
-        pkg = spec.package
-        try:
-            pkg.do_install()
-        except Exception:
-            pkg.remove_prefix()
-            raise
+from spack.spec import Spec
+
+
+@pytest.fixture()
+def install_mockery(tmpdir, config, builtin_mock):
+    """Hooks a fake install directory and a fake db into Spack."""
+    layout = spack.store.layout
+    db = spack.store.db
+    # Use a fake install directory to avoid conflicts bt/w
+    # installed pkgs and mock packages.
+    spack.store.layout = YamlDirectoryLayout(str(tmpdir))
+    spack.store.db = Database(str(tmpdir))
+    # We use a fake package, so skip the checksum.
+    spack.do_checksum = False
+    yield
+    # Turn checksumming back on
+    spack.do_checksum = True
+    # Restore Spack's layout.
+    spack.store.layout = layout
+    spack.store.db = db
+
+
+def fake_fetchify(url, pkg):
+    """Fake the URL for a package so it downloads from a file."""
+    fetcher = FetchStrategyComposite()
+    fetcher.append(URLFetchStrategy(url))
+    pkg.fetcher = fetcher
+
+
+@pytest.mark.usefixtures('install_mockery')
+def test_install_and_uninstall(mock_archive):
+    # Get a basic concrete spec for the trivial install package.
+    spec = Spec('trivial_install_test_package')
+    spec.concretize()
+    assert spec.concrete
+
+    # Get the package
+    pkg = spack.repo.get(spec)
+
+    fake_fetchify(mock_archive.url, pkg)
+
+    try:
+        pkg.do_install()
+        pkg.do_uninstall()
+    except Exception:
+        pkg.remove_prefix()
+        raise
+
+
+@pytest.mark.usefixtures('install_mockery')
+def test_store(mock_archive):
+    spec = Spec('cmake-client').concretized()
+
+    for s in spec.traverse():
+        fake_fetchify(mock_archive.url, s.package)
+
+    pkg = spec.package
+    try:
+        pkg.do_install()
+    except Exception:
+        pkg.remove_prefix()
+        raise
diff --git a/lib/spack/spack/test/mirror.py b/lib/spack/spack/test/mirror.py
index f9ff190468..13219ef878 100644
--- a/lib/spack/spack/test/mirror.py
+++ b/lib/spack/spack/test/mirror.py
@@ -22,123 +22,127 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import filecmp
 import os
+import pytest
+
 import spack
 import spack.mirror
-
-from filecmp import dircmp
-from spack.test.mock_packages_test import *
-from spack.test.mock_repo import *
+import spack.util.executable
+from llnl.util.filesystem import join_path
+from spack.spec import Spec
+from spack.stage import Stage
 
 # paths in repos that shouldn't be in the mirror tarballs.
 exclude = ['.hg', '.git', '.svn']
 
-
-class MirrorTest(MockPackagesTest):
-
-    def setUp(self):
-        """Sets up a mock package and a mock repo for each fetch strategy, to
-           ensure that the mirror can create archives for each of them.
-        """
-        super(MirrorTest, self).setUp()
-        self.repos = {}
-
-    def tearDown(self):
-        """Destroy all the stages created by the repos in setup."""
-        super(MirrorTest, self).tearDown()
-        for repo in self.repos.values():
-            repo.destroy()
-        self.repos.clear()
-
-    def set_up_package(self, name, MockRepoClass, url_attr):
-        """Set up a mock package to be mirrored.
-        Each package needs us to:
-
-        1. Set up a mock repo/archive to fetch from.
-        2. Point the package's version args at that repo.
-        """
-        # Set up packages to point at mock repos.
-        spec = Spec(name)
-        spec.concretize()
-
-        # Get the package and fix its fetch args to point to a mock repo
-        pkg = spack.repo.get(spec)
-        repo = MockRepoClass()
-        self.repos[name] = repo
-
-        # change the fetch args of the first (only) version.
-        assert(len(pkg.versions) == 1)
-        v = next(iter(pkg.versions))
-        pkg.versions[v][url_attr] = repo.url
-
-    def check_mirror(self):
-        with Stage('spack-mirror-test') as stage:
-            mirror_root = join_path(stage.path, 'test-mirror')
-
-            # register mirror with spack config
-            mirrors = {'spack-mirror-test': 'file://' + mirror_root}
-            spack.config.update_config('mirrors', mirrors)
-
-            os.chdir(stage.path)
-            spack.mirror.create(
-                mirror_root, self.repos, no_checksum=True)
-
-            # Stage directory exists
-            self.assertTrue(os.path.isdir(mirror_root))
-
-            # check that there are subdirs for each package
-            for name in self.repos:
-                subdir = join_path(mirror_root, name)
-                self.assertTrue(os.path.isdir(subdir))
-
-                files = os.listdir(subdir)
-                self.assertEqual(len(files), 1)
-
-                # Now try to fetch each package.
-                for name, mock_repo in self.repos.items():
-                    spec = Spec(name).concretized()
-                    pkg = spec.package
-
-                    saved_checksum_setting = spack.do_checksum
-                    with pkg.stage:
-                        # Stage the archive from the mirror and cd to it.
-                        spack.do_checksum = False
-                        pkg.do_stage(mirror_only=True)
-                        # Compare the original repo with the expanded archive
-                        original_path = mock_repo.path
-                        if 'svn' in name:
-                            # have to check out the svn repo to compare.
-                            original_path = join_path(
-                                mock_repo.path, 'checked_out')
-                            svn('checkout', mock_repo.url, original_path)
-                        dcmp = dircmp(original_path, pkg.stage.source_path)
-                        # make sure there are no new files in the expanded
-                        # tarball
-                        self.assertFalse(dcmp.right_only)
-                        # and that all original files are present.
-                        self.assertTrue(
-                            all(l in exclude for l in dcmp.left_only))
-                        spack.do_checksum = saved_checksum_setting
-
-    def test_git_mirror(self):
-        self.set_up_package('git-test', MockGitRepo, 'git')
-        self.check_mirror()
-
-    def test_svn_mirror(self):
-        self.set_up_package('svn-test', MockSvnRepo, 'svn')
-        self.check_mirror()
-
-    def test_hg_mirror(self):
-        self.set_up_package('hg-test', MockHgRepo, 'hg')
-        self.check_mirror()
-
-    def test_url_mirror(self):
-        self.set_up_package('trivial_install_test_package', MockArchive, 'url')
-        self.check_mirror()
-
-    def test_all_mirror(self):
-        self.set_up_package('git-test', MockGitRepo, 'git')
-        self.set_up_package('svn-test', MockSvnRepo, 'svn')
-        self.set_up_package('hg-test',  MockHgRepo,  'hg')
-        self.set_up_package('trivial_install_test_package', MockArchive, 'url')
-        self.check_mirror()
+repos = {}
+svn = spack.util.executable.which('svn', required=True)
+
+
+def set_up_package(name, repository, url_attr):
+    """Set up a mock package to be mirrored.
+    Each package needs us to:
+
+    1. Set up a mock repo/archive to fetch from.
+    2. Point the package's version args at that repo.
+    """
+    # Set up packages to point at mock repos.
+    spec = Spec(name)
+    spec.concretize()
+    # Get the package and fix its fetch args to point to a mock repo
+    pkg = spack.repo.get(spec)
+
+    repos[name] = repository
+
+    # change the fetch args of the first (only) version.
+    assert len(pkg.versions) == 1
+    v = next(iter(pkg.versions))
+
+    pkg.versions[v][url_attr] = repository.url
+
+
+def check_mirror():
+    with Stage('spack-mirror-test') as stage:
+        mirror_root = join_path(stage.path, 'test-mirror')
+        # register mirror with spack config
+        mirrors = {'spack-mirror-test': 'file://' + mirror_root}
+        spack.config.update_config('mirrors', mirrors)
+
+        os.chdir(stage.path)
+        spack.mirror.create(
+            mirror_root, repos, no_checksum=True
+        )
+
+        # Stage directory exists
+        assert os.path.isdir(mirror_root)
+
+        # check that there are subdirs for each package
+        for name in repos:
+            subdir = join_path(mirror_root, name)
+            assert os.path.isdir(subdir)
+
+            files = os.listdir(subdir)
+            assert len(files) == 1
+
+            # Now try to fetch each package.
+            for name, mock_repo in repos.items():
+                spec = Spec(name).concretized()
+                pkg = spec.package
+
+                saved_checksum_setting = spack.do_checksum
+                with pkg.stage:
+                    # Stage the archive from the mirror and cd to it.
+                    spack.do_checksum = False
+                    pkg.do_stage(mirror_only=True)
+                    # Compare the original repo with the expanded archive
+                    original_path = mock_repo.path
+                    if 'svn' in name:
+                        # have to check out the svn repo to compare.
+                        original_path = join_path(
+                            mock_repo.path, 'checked_out')
+                        svn('checkout', mock_repo.url, original_path)
+                    dcmp = filecmp.dircmp(original_path, pkg.stage.source_path)
+                    # make sure there are no new files in the expanded
+                    # tarball
+                    assert not dcmp.right_only
+                    # and that all original files are present.
+                    assert all(l in exclude for l in dcmp.left_only)
+                    spack.do_checksum = saved_checksum_setting
+
+
+@pytest.mark.usefixtures('config', 'refresh_builtin_mock')
+class TestMirror(object):
+    def test_git_mirror(self, mock_git_repository):
+        set_up_package('git-test', mock_git_repository, 'git')
+        check_mirror()
+        repos.clear()
+
+    def test_svn_mirror(self, mock_svn_repository):
+        set_up_package('svn-test', mock_svn_repository, 'svn')
+        check_mirror()
+        repos.clear()
+
+    def test_hg_mirror(self, mock_hg_repository):
+        set_up_package('hg-test', mock_hg_repository, 'hg')
+        check_mirror()
+        repos.clear()
+
+    def test_url_mirror(self, mock_archive):
+        set_up_package('trivial_install_test_package', mock_archive, 'url')
+        check_mirror()
+        repos.clear()
+
+    def test_all_mirror(
+            self,
+            mock_git_repository,
+            mock_svn_repository,
+            mock_hg_repository,
+            mock_archive,
+    ):
+        set_up_package('git-test', mock_git_repository, 'git')
+        set_up_package('svn-test', mock_svn_repository, 'svn')
+        set_up_package('hg-test', mock_hg_repository, 'hg')
+        set_up_package('trivial_install_test_package', mock_archive, 'url')
+        check_mirror()
+        repos.clear()
diff --git a/lib/spack/spack/test/mock_database.py b/lib/spack/spack/test/mock_database.py
deleted file mode 100644
index 1eaec2b598..0000000000
--- a/lib/spack/spack/test/mock_database.py
+++ /dev/null
@@ -1,108 +0,0 @@
-##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License (as
-# published by the Free Software Foundation) version 2.1, February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import shutil
-import tempfile
-
-import spack
-import spack.store
-from spack.spec import Spec
-from spack.database import Database
-from spack.directory_layout import YamlDirectoryLayout
-from spack.test.mock_packages_test import MockPackagesTest
-
-
-class MockDatabase(MockPackagesTest):
-
-    def _mock_install(self, spec):
-        s = Spec(spec)
-        s.concretize()
-        pkg = spack.repo.get(s)
-        pkg.do_install(fake=True)
-
-    def _mock_remove(self, spec):
-        specs = spack.store.db.query(spec)
-        assert len(specs) == 1
-        spec = specs[0]
-        spec.package.do_uninstall(spec)
-
-    def setUp(self):
-        super(MockDatabase, self).setUp()
-        #
-        # TODO: make the mockup below easier.
-        #
-
-        # Make a fake install directory
-        self.install_path = tempfile.mkdtemp()
-        self.spack_install_path = spack.store.root
-        spack.store.root = self.install_path
-
-        self.install_layout = YamlDirectoryLayout(self.install_path)
-        self.spack_install_layout = spack.store.layout
-        spack.store.layout = self.install_layout
-
-        # Make fake database and fake install directory.
-        self.install_db = Database(self.install_path)
-        self.spack_install_db = spack.store.db
-        spack.store.db = self.install_db
-
-        # make a mock database with some packages installed note that
-        # the ref count for dyninst here will be 3, as it's recycled
-        # across each install.
-        #
-        # Here is what the mock DB looks like:
-        #
-        # o  mpileaks     o  mpileaks'    o  mpileaks''
-        # |\              |\              |\
-        # | o  callpath   | o  callpath'  | o  callpath''
-        # |/|             |/|             |/|
-        # o |  mpich      o |  mpich2     o |  zmpi
-        #   |               |             o |  fake
-        #   |               |               |
-        #   |               |______________/
-        #   | .____________/
-        #   |/
-        #   o  dyninst
-        #   |\
-        #   | o  libdwarf
-        #   |/
-        #   o  libelf
-        #
-
-        # Transaction used to avoid repeated writes.
-        with spack.store.db.write_transaction():
-            self._mock_install('mpileaks ^mpich')
-            self._mock_install('mpileaks ^mpich2')
-            self._mock_install('mpileaks ^zmpi')
-
-    def tearDown(self):
-        with spack.store.db.write_transaction():
-            for spec in spack.store.db.query():
-                spec.package.do_uninstall(spec)
-
-        super(MockDatabase, self).tearDown()
-        shutil.rmtree(self.install_path)
-        spack.store.root = self.spack_install_path
-        spack.store.layout = self.spack_install_layout
-        spack.store.db = self.spack_install_db
diff --git a/lib/spack/spack/test/mock_packages_test.py b/lib/spack/spack/test/mock_packages_test.py
deleted file mode 100644
index 69c52c2f53..0000000000
--- a/lib/spack/spack/test/mock_packages_test.py
+++ /dev/null
@@ -1,281 +0,0 @@
-##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License (as
-# published by the Free Software Foundation) version 2.1, February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import os
-import shutil
-import tempfile
-import unittest
-
-import spack
-import spack.config
-from llnl.util.filesystem import mkdirp
-from ordereddict_backport import OrderedDict
-from spack.repository import RepoPath
-from spack.spec import Spec
-
-platform = spack.architecture.platform()
-
-linux_os_name = 'debian'
-linux_os_version = '6'
-
-if platform.name == 'linux':
-    linux_os = platform.operating_system("default_os")
-    linux_os_name = linux_os.name
-    linux_os_version = linux_os.version
-
-mock_compiler_config = """\
-compilers:
-- compiler:
-    spec: clang@3.3
-    operating_system: {0}{1}
-    paths:
-      cc: /path/to/clang
-      cxx: /path/to/clang++
-      f77: None
-      fc: None
-    modules: 'None'
-- compiler:
-    spec: gcc@4.5.0
-    operating_system: {0}{1}
-    paths:
-      cc: /path/to/gcc
-      cxx: /path/to/g++
-      f77: None
-      fc: None
-    modules: 'None'
-- compiler:
-    spec: clang@3.3
-    operating_system: CNL
-    paths:
-      cc: /path/to/clang
-      cxx: /path/to/clang++
-      f77: None
-      fc: None
-    modules: 'None'
-- compiler:
-    spec: clang@3.3
-    operating_system: SuSE11
-    paths:
-      cc: /path/to/clang
-      cxx: /path/to/clang++
-      f77: None
-      fc: None
-    modules: 'None'
-- compiler:
-    spec: clang@3.3
-    operating_system: yosemite
-    paths:
-      cc: /path/to/clang
-      cxx: /path/to/clang++
-      f77: None
-      fc: None
-    modules: 'None'
-- compiler:
-    paths:
-      cc: /path/to/gcc
-      cxx: /path/to/g++
-      f77: /path/to/gfortran
-      fc: /path/to/gfortran
-    operating_system: CNL
-    spec: gcc@4.5.0
-    modules: 'None'
-- compiler:
-    paths:
-      cc: /path/to/gcc
-      cxx: /path/to/g++
-      f77: /path/to/gfortran
-      fc: /path/to/gfortran
-    operating_system: SuSE11
-    spec: gcc@4.5.0
-    modules: 'None'
-- compiler:
-    paths:
-      cc: /path/to/gcc
-      cxx: /path/to/g++
-      f77: /path/to/gfortran
-      fc: /path/to/gfortran
-    operating_system: yosemite
-    spec: gcc@4.5.0
-    modules: 'None'
-- compiler:
-    paths:
-      cc: /path/to/gcc
-      cxx: /path/to/g++
-      f77: /path/to/gfortran
-      fc: /path/to/gfortran
-    operating_system: elcapitan
-    spec: gcc@4.5.0
-    modules: 'None'
-- compiler:
-    spec: clang@3.3
-    operating_system: elcapitan
-    paths:
-      cc: /path/to/clang
-      cxx: /path/to/clang++
-      f77: None
-      fc: None
-    modules: 'None'
-- compiler:
-    spec: gcc@4.7.2
-    operating_system: redhat6
-    paths:
-      cc: /path/to/gcc472
-      cxx: /path/to/g++472
-      f77: /path/to/gfortran472
-      fc: /path/to/gfortran472
-    flags:
-      cflags: -O0
-      cxxflags: -O0
-      fflags: -O0
-    modules: 'None'
-- compiler:
-    spec: clang@3.5
-    operating_system: redhat6
-    paths:
-      cc: /path/to/clang35
-      cxx: /path/to/clang++35
-      f77: None
-      fc: None
-    flags:
-      cflags: -O3
-      cxxflags: -O3
-    modules: 'None'
-""".format(linux_os_name, linux_os_version)
-
-mock_packages_config = """\
-packages:
-  externaltool:
-    buildable: False
-    paths:
-      externaltool@1.0%gcc@4.5.0: /path/to/external_tool
-  externalvirtual:
-    buildable: False
-    paths:
-      externalvirtual@2.0%clang@3.3: /path/to/external_virtual_clang
-      externalvirtual@1.0%gcc@4.5.0: /path/to/external_virtual_gcc
-  externalmodule:
-    buildable: False
-    modules:
-      externalmodule@1.0%gcc@4.5.0: external-module
-"""
-
-mock_config = """\
-config:
-  install_tree: $spack/opt/spack
-  build_stage:
-  - $tempdir
-  - /nfs/tmp2/$user
-  - $spack/var/spack/stage
-  source_cache: $spack/var/spack/cache
-  misc_cache: ~/.spack/cache
-  verify_ssl: true
-  checksum: true
-  dirty: True
-"""
-
-# these are written out to mock config files.
-mock_configs = {
-    'config.yaml': mock_config,
-    'compilers.yaml': mock_compiler_config,
-    'packages.yaml': mock_packages_config,
-}
-
-
-class MockPackagesTest(unittest.TestCase):
-
-    def initmock(self):
-        # Use the mock packages database for these tests.  This allows
-        # us to set up contrived packages that don't interfere with
-        # real ones.
-        self.db = RepoPath(spack.mock_packages_path)
-        spack.repo.swap(self.db)
-
-        # Mock up temporary configuration directories
-        self.temp_config = tempfile.mkdtemp()
-        self.mock_site_config = os.path.join(self.temp_config, 'site')
-        self.mock_user_config = os.path.join(self.temp_config, 'user')
-        mkdirp(self.mock_site_config)
-        mkdirp(self.mock_user_config)
-        for filename, data in mock_configs.items():
-            conf_yaml = os.path.join(self.mock_site_config, filename)
-            with open(conf_yaml, 'w') as f:
-                f.write(data)
-
-        # TODO: Mocking this up is kind of brittle b/c ConfigScope
-        # TODO: constructor modifies config_scopes.  Make it cleaner.
-        spack.config.clear_config_caches()
-        self.real_scopes = spack.config.config_scopes
-
-        spack.config.config_scopes = OrderedDict()
-        spack.config.ConfigScope('site', self.mock_site_config)
-        spack.config.ConfigScope('user', self.mock_user_config)
-
-        # Keep tests from interfering with the actual module path.
-        self.real_share_path = spack.share_path
-        spack.share_path = tempfile.mkdtemp()
-
-        # Store changes to the package's dependencies so we can
-        # restore later.
-        self.saved_deps = {}
-
-    def set_pkg_dep(self, pkg_name, spec, deptypes=spack.alldeps):
-        """Alters dependence information for a package.
-
-        Adds a dependency on <spec> to pkg.
-        Use this to mock up constraints.
-        """
-        spec = Spec(spec)
-
-        # Save original dependencies before making any changes.
-        pkg = spack.repo.get(pkg_name)
-        if pkg_name not in self.saved_deps:
-            self.saved_deps[pkg_name] = (pkg, pkg.dependencies.copy())
-
-        # Change dep spec
-        # XXX(deptype): handle deptypes.
-        pkg.dependencies[spec.name] = {Spec(pkg_name): spec}
-        pkg.dependency_types[spec.name] = set(deptypes)
-
-    def cleanmock(self):
-        """Restore the real packages path after any test."""
-        spack.repo.swap(self.db)
-        spack.config.config_scopes = self.real_scopes
-
-        shutil.rmtree(self.temp_config, ignore_errors=True)
-        spack.config.clear_config_caches()
-
-        # XXX(deptype): handle deptypes.
-        # Restore dependency changes that happened during the test
-        for pkg_name, (pkg, deps) in self.saved_deps.items():
-            pkg.dependencies.clear()
-            pkg.dependencies.update(deps)
-
-        shutil.rmtree(spack.share_path, ignore_errors=True)
-        spack.share_path = self.real_share_path
-
-    def setUp(self):
-        self.initmock()
-
-    def tearDown(self):
-        self.cleanmock()
diff --git a/lib/spack/spack/test/mock_repo.py b/lib/spack/spack/test/mock_repo.py
deleted file mode 100644
index 0ae7dbd516..0000000000
--- a/lib/spack/spack/test/mock_repo.py
+++ /dev/null
@@ -1,202 +0,0 @@
-##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License (as
-# published by the Free Software Foundation) version 2.1, February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import os
-import shutil
-
-from llnl.util.filesystem import *
-from spack.stage import Stage
-from spack.util.executable import which
-
-#
-# VCS Systems used by mock repo code.
-#
-git      = which('git',      required=True)
-svn      = which('svn',      required=True)
-svnadmin = which('svnadmin', required=True)
-hg       = which('hg',       required=True)
-tar      = which('tar',      required=True)
-
-
-class MockRepo(object):
-
-    def __init__(self, stage_name, repo_name):
-        """This creates a stage where some archive/repo files can be staged
-           for testing spack's fetch strategies."""
-        # Stage where this repo has been created
-        self.stage = Stage(stage_name)
-
-        # Full path to the repo within the stage.
-        self.path = join_path(self.stage.path, repo_name)
-        mkdirp(self.path)
-
-    def destroy(self):
-        """Destroy resources associated with this mock repo."""
-        if self.stage:
-            self.stage.destroy()
-
-
-class MockArchive(MockRepo):
-    """Creates a very simple archive directory with a configure script and a
-       makefile that installs to a prefix.  Tars it up into an archive."""
-
-    def __init__(self):
-        repo_name = 'mock-archive-repo'
-        super(MockArchive, self).__init__('mock-archive-stage', repo_name)
-
-        with working_dir(self.path):
-            configure = join_path(self.path, 'configure')
-
-            with open(configure, 'w') as cfg_file:
-                cfg_file.write(
-                    "#!/bin/sh\n"
-                    "prefix=$(echo $1 | sed 's/--prefix=//')\n"
-                    "cat > Makefile <<EOF\n"
-                    "all:\n"
-                    "\techo Building...\n\n"
-                    "install:\n"
-                    "\tmkdir -p $prefix\n"
-                    "\ttouch $prefix/dummy_file\n"
-                    "EOF\n")
-            os.chmod(configure, 0755)
-
-        with working_dir(self.stage.path):
-            archive_name = "%s.tar.gz" % repo_name
-            tar('-czf', archive_name, repo_name)
-
-        self.archive_path = join_path(self.stage.path, archive_name)
-        self.url = 'file://' + self.archive_path
-
-
-class MockVCSRepo(MockRepo):
-
-    def __init__(self, stage_name, repo_name):
-        """This creates a stage and a repo directory within the stage."""
-        super(MockVCSRepo, self).__init__(stage_name, repo_name)
-
-        # Name for rev0 & rev1 files in the repo to be
-        self.r0_file = 'r0_file'
-        self.r1_file = 'r1_file'
-
-
-class MockGitRepo(MockVCSRepo):
-
-    def __init__(self):
-        super(MockGitRepo, self).__init__('mock-git-stage', 'mock-git-repo')
-
-        self.url = 'file://' + self.path
-
-        with working_dir(self.path):
-            git('init')
-
-            # r0 is just the first commit
-            touch(self.r0_file)
-            git('add', self.r0_file)
-            git('commit', '-m', 'mock-git-repo r0')
-
-            self.branch      = 'test-branch'
-            self.branch_file = 'branch_file'
-            git('branch', self.branch)
-
-            self.tag_branch = 'tag-branch'
-            self.tag_file   = 'tag_file'
-            git('branch', self.tag_branch)
-
-            # Check out first branch
-            git('checkout', self.branch)
-            touch(self.branch_file)
-            git('add', self.branch_file)
-            git('commit', '-m' 'r1 test branch')
-
-            # Check out a second branch and tag it
-            git('checkout', self.tag_branch)
-            touch(self.tag_file)
-            git('add', self.tag_file)
-            git('commit', '-m' 'tag test branch')
-
-            self.tag = 'test-tag'
-            git('tag', self.tag)
-
-            git('checkout', 'master')
-
-            # R1 test is the same as test for branch
-            self.r1      = self.rev_hash(self.branch)
-            self.r1_file = self.branch_file
-
-    def rev_hash(self, rev):
-        return git('rev-parse', rev, output=str).strip()
-
-
-class MockSvnRepo(MockVCSRepo):
-
-    def __init__(self):
-        super(MockSvnRepo, self).__init__('mock-svn-stage', 'mock-svn-repo')
-
-        self.url = 'file://' + self.path
-
-        with working_dir(self.stage.path):
-            svnadmin('create', self.path)
-
-            tmp_path = join_path(self.stage.path, 'tmp-path')
-            mkdirp(tmp_path)
-            with working_dir(tmp_path):
-                touch(self.r0_file)
-
-            svn('import', tmp_path, self.url, '-m', 'Initial import r0')
-
-            shutil.rmtree(tmp_path)
-            svn('checkout', self.url, tmp_path)
-            with working_dir(tmp_path):
-                touch(self.r1_file)
-                svn('add', self.r1_file)
-                svn('ci', '-m', 'second revision r1')
-
-            shutil.rmtree(tmp_path)
-
-            self.r0 = '1'
-            self.r1 = '2'
-
-
-class MockHgRepo(MockVCSRepo):
-
-    def __init__(self):
-        super(MockHgRepo, self).__init__('mock-hg-stage', 'mock-hg-repo')
-        self.url = 'file://' + self.path
-
-        with working_dir(self.path):
-            hg('init')
-
-            touch(self.r0_file)
-            hg('add', self.r0_file)
-            hg('commit', '-m', 'revision 0', '-u', 'test')
-            self.r0 = self.get_rev()
-
-            touch(self.r1_file)
-            hg('add', self.r1_file)
-            hg('commit', '-m' 'revision 1', '-u', 'test')
-            self.r1 = self.get_rev()
-
-    def get_rev(self):
-        """Get current mercurial revision."""
-        return hg('id', '-i', output=str).strip()
diff --git a/lib/spack/spack/test/modules.py b/lib/spack/spack/test/modules.py
index 42f072debb..4f35df1982 100644
--- a/lib/spack/spack/test/modules.py
+++ b/lib/spack/spack/test/modules.py
@@ -23,112 +23,124 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
 import collections
-from contextlib import contextmanager
+import contextlib
 
-import StringIO
+import cStringIO
+import pytest
 import spack.modules
 import spack.spec
-import llnl.util.filesystem
-from spack.test.mock_packages_test import MockPackagesTest
 
-FILE_REGISTRY = collections.defaultdict(StringIO.StringIO)
+# Our "filesystem" for the tests below
+FILE_REGISTRY = collections.defaultdict(cStringIO.StringIO)
+# Spec strings that will be used throughout the tests
+mpich_spec_string = 'mpich@3.0.4'
+mpileaks_spec_string = 'mpileaks'
+libdwarf_spec_string = 'libdwarf arch=x64-linux'
 
 
-# Monkey-patch open to write module files to a StringIO instance
-@contextmanager
-def mock_open(filename, mode):
-    if not mode == 'w':
-        raise RuntimeError(
-            'test.modules : unexpected opening mode for monkey-patched open')
+@pytest.fixture()
+def stringio_open(monkeypatch):
+    """Overrides the `open` builtin in spack.modules with an implementation
+    that writes on a StringIO instance.
+    """
+    @contextlib.contextmanager
+    def _mock(filename, mode):
+        if not mode == 'w':
+            raise RuntimeError('unexpected opening mode for stringio_open')
 
-    FILE_REGISTRY[filename] = StringIO.StringIO()
+        FILE_REGISTRY[filename] = cStringIO.StringIO()
 
-    try:
-        yield FILE_REGISTRY[filename]
-    finally:
-        handle = FILE_REGISTRY[filename]
-        FILE_REGISTRY[filename] = handle.getvalue()
-        handle.close()
+        try:
+            yield FILE_REGISTRY[filename]
+        finally:
+            handle = FILE_REGISTRY[filename]
+            FILE_REGISTRY[filename] = handle.getvalue()
+            handle.close()
 
+    monkeypatch.setattr(spack.modules, 'open', _mock, raising=False)
 
-# Spec strings that will be used throughout the tests
-mpich_spec_string = 'mpich@3.0.4'
-mpileaks_spec_string = 'mpileaks'
-libdwarf_spec_string = 'libdwarf arch=x64-linux'
 
+def get_modulefile_content(factory, spec):
+    """Writes the module file and returns the content as a string.
 
-class HelperFunctionsTests(MockPackagesTest):
-
-    def test_update_dictionary_extending_list(self):
-        target = {
-            'foo': {
-                'a': 1,
-                'b': 2,
-                'd': 4
-            },
-            'bar': [1, 2, 4],
-            'baz': 'foobar'
-        }
-        update = {
-            'foo': {
-                'c': 3,
-            },
-            'bar': [3],
-            'baz': 'foobaz',
-            'newkey': {
-                'd': 4
-            }
-        }
-        spack.modules.update_dictionary_extending_lists(target, update)
-        self.assertTrue(len(target) == 4)
-        self.assertTrue(len(target['foo']) == 4)
-        self.assertTrue(len(target['bar']) == 4)
-        self.assertEqual(target['baz'], 'foobaz')
-
-    def test_inspect_path(self):
-        env = spack.modules.inspect_path('/usr')
-        names = [item.name for item in env]
-        self.assertTrue('PATH' in names)
-        self.assertTrue('LIBRARY_PATH' in names)
-        self.assertTrue('LD_LIBRARY_PATH' in names)
-        self.assertTrue('CPATH' in names)
-
-
-class ModuleFileGeneratorTests(MockPackagesTest):
-    """
-    Base class to test module file generators. Relies on child having defined
-    a 'factory' attribute to create an instance of the generator to be tested.
+    :param factory: module file factory
+    :param spec: spec of the module file to be written
+    :return: content of the module file
+    :rtype: str
     """
+    spec.concretize()
+    generator = factory(spec)
+    generator.write()
+    content = FILE_REGISTRY[generator.file_name].split('\n')
+    generator.remove()
+    return content
+
+
+def test_update_dictionary_extending_list():
+    target = {
+        'foo': {
+            'a': 1,
+            'b': 2,
+            'd': 4
+        },
+        'bar': [1, 2, 4],
+        'baz': 'foobar'
+    }
+    update = {
+        'foo': {
+            'c': 3,
+        },
+        'bar': [3],
+        'baz': 'foobaz',
+        'newkey': {
+            'd': 4
+        }
+    }
+    spack.modules.update_dictionary_extending_lists(target, update)
+    assert len(target) == 4
+    assert len(target['foo']) == 4
+    assert len(target['bar']) == 4
+    assert target['baz'] == 'foobaz'
 
-    def setUp(self):
-        super(ModuleFileGeneratorTests, self).setUp()
-        self.configuration_instance = spack.modules._module_config
-        self.module_types_instance = spack.modules.module_types
-        spack.modules.open = mock_open
-        spack.modules.mkdirp = lambda x: None
-        # Make sure that a non-mocked configuration will trigger an error
-        spack.modules._module_config = None
-        spack.modules.module_types = {self.factory.name: self.factory}
-
-    def tearDown(self):
-        del spack.modules.open
-        spack.modules.module_types = self.module_types_instance
-        spack.modules._module_config = self.configuration_instance
-        spack.modules.mkdirp = llnl.util.filesystem.mkdirp
-        super(ModuleFileGeneratorTests, self).tearDown()
-
-    def get_modulefile_content(self, spec):
-        spec.concretize()
-        generator = self.factory(spec)
-        generator.write()
-        content = FILE_REGISTRY[generator.file_name].split('\n')
-        generator.remove()
-        return content
 
+def test_inspect_path():
+    env = spack.modules.inspect_path('/usr')
+    names = [item.name for item in env]
+    assert 'PATH' in names
+    assert 'LIBRARY_PATH' in names
+    assert 'LD_LIBRARY_PATH' in names
+    assert 'CPATH' in names
 
-class TclTests(ModuleFileGeneratorTests):
 
+@pytest.fixture()
+def tcl_factory(tmpdir, monkeypatch):
+    """Returns a factory that writes non-hierarchical TCL module files."""
     factory = spack.modules.TclModule
+    monkeypatch.setattr(factory, 'path', str(tmpdir))
+    monkeypatch.setattr(spack.modules, 'module_types', {factory.name: factory})
+    return factory
+
+
+@pytest.fixture()
+def lmod_factory(tmpdir, monkeypatch):
+    """Returns a factory that writes hierarchical LUA module files."""
+    factory = spack.modules.LmodModule
+    monkeypatch.setattr(factory, 'path', str(tmpdir))
+    monkeypatch.setattr(spack.modules, 'module_types', {factory.name: factory})
+    return factory
+
+
+@pytest.fixture()
+def dotkit_factory(tmpdir, monkeypatch):
+    """Returns a factory that writes DotKit module files."""
+    factory = spack.modules.Dotkit
+    monkeypatch.setattr(factory, 'path', str(tmpdir))
+    monkeypatch.setattr(spack.modules, 'module_types', {factory.name: factory})
+    return factory
+
+
+@pytest.mark.usefixtures('config', 'builtin_mock', 'stringio_open')
+class TestTcl(object):
 
     configuration_autoload_direct = {
         'enable': ['tcl'],
@@ -230,26 +242,26 @@ class TclTests(ModuleFileGeneratorTests):
         }
     }
 
-    def test_simple_case(self):
+    def test_simple_case(self, tcl_factory):
         spack.modules._module_config = self.configuration_autoload_direct
         spec = spack.spec.Spec(mpich_spec_string)
-        content = self.get_modulefile_content(spec)
-        self.assertTrue('module-whatis "mpich @3.0.4"' in content)
-        self.assertRaises(TypeError, spack.modules.dependencies,
-                          spec, 'non-existing-tag')
+        content = get_modulefile_content(tcl_factory, spec)
+        assert 'module-whatis "mpich @3.0.4"' in content
+        with pytest.raises(TypeError):
+            spack.modules.dependencies(spec, 'non-existing-tag')
 
-    def test_autoload(self):
+    def test_autoload(self, tcl_factory):
         spack.modules._module_config = self.configuration_autoload_direct
         spec = spack.spec.Spec(mpileaks_spec_string)
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'is-loaded' in x]), 2)
-        self.assertEqual(len([x for x in content if 'module load ' in x]), 2)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'is-loaded' in x]) == 2
+        assert len([x for x in content if 'module load ' in x]) == 2
 
         spack.modules._module_config = self.configuration_autoload_all
         spec = spack.spec.Spec(mpileaks_spec_string)
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'is-loaded' in x]), 5)
-        self.assertEqual(len([x for x in content if 'module load ' in x]), 5)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'is-loaded' in x]) == 5
+        assert len([x for x in content if 'module load ' in x]) == 5
 
         # dtbuild1 has
         # - 1 ('run',) dependency
@@ -258,9 +270,9 @@ def test_autoload(self):
         # Just make sure the 'build' dependency is not there
         spack.modules._module_config = self.configuration_autoload_direct
         spec = spack.spec.Spec('dtbuild1')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'is-loaded' in x]), 2)
-        self.assertEqual(len([x for x in content if 'module load ' in x]), 2)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'is-loaded' in x]) == 2
+        assert len([x for x in content if 'module load ' in x]) == 2
 
         # dtbuild1 has
         # - 1 ('run',) dependency
@@ -269,95 +281,85 @@ def test_autoload(self):
         # Just make sure the 'build' dependency is not there
         spack.modules._module_config = self.configuration_autoload_all
         spec = spack.spec.Spec('dtbuild1')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'is-loaded' in x]), 2)
-        self.assertEqual(len([x for x in content if 'module load ' in x]), 2)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'is-loaded' in x]) == 2
+        assert len([x for x in content if 'module load ' in x]) == 2
 
-    def test_prerequisites(self):
+    def test_prerequisites(self, tcl_factory):
         spack.modules._module_config = self.configuration_prerequisites_direct
         spec = spack.spec.Spec('mpileaks arch=x86-linux')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'prereq' in x]), 2)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'prereq' in x]) == 2
 
         spack.modules._module_config = self.configuration_prerequisites_all
         spec = spack.spec.Spec('mpileaks arch=x86-linux')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'prereq' in x]), 5)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'prereq' in x]) == 5
 
-    def test_alter_environment(self):
+    def test_alter_environment(self, tcl_factory):
         spack.modules._module_config = self.configuration_alter_environment
         spec = spack.spec.Spec('mpileaks platform=test target=x86_64')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(
-            len([x
-                 for x in content
-                 if x.startswith('prepend-path CMAKE_PREFIX_PATH')]), 0)
-        self.assertEqual(
-            len([x for x in content if 'setenv FOO "foo"' in x]), 1)
-        self.assertEqual(len([x for x in content if 'unsetenv BAR' in x]), 1)
-        self.assertEqual(
-            len([x for x in content if 'setenv MPILEAKS_ROOT' in x]), 1)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content
+                    if x.startswith('prepend-path CMAKE_PREFIX_PATH')
+                    ]) == 0
+        assert len([x for x in content if 'setenv FOO "foo"' in x]) == 1
+        assert len([x for x in content if 'unsetenv BAR' in x]) == 1
+        assert len([x for x in content if 'setenv MPILEAKS_ROOT' in x]) == 1
 
         spec = spack.spec.Spec('libdwarf %clang platform=test target=x86_32')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(
-            len([x
-                 for x in content
-                 if x.startswith('prepend-path CMAKE_PREFIX_PATH')]), 0)
-        self.assertEqual(
-            len([x for x in content if 'setenv FOO "foo"' in x]), 0)
-        self.assertEqual(len([x for x in content if 'unsetenv BAR' in x]), 0)
-        self.assertEqual(
-            len([x for x in content if 'is-loaded foo/bar' in x]), 1)
-        self.assertEqual(
-            len([x for x in content if 'module load foo/bar' in x]), 1)
-        self.assertEqual(
-            len([x for x in content if 'setenv LIBDWARF_ROOT' in x]), 1)
-
-    def test_blacklist(self):
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len(
+            [x for x in content if x.startswith('prepend-path CMAKE_PREFIX_PATH')]  # NOQA: ignore=E501
+        ) == 0
+        assert len([x for x in content if 'setenv FOO "foo"' in x]) == 0
+        assert len([x for x in content if 'unsetenv BAR' in x]) == 0
+        assert len([x for x in content if 'is-loaded foo/bar' in x]) == 1
+        assert len([x for x in content if 'module load foo/bar' in x]) == 1
+        assert len([x for x in content if 'setenv LIBDWARF_ROOT' in x]) == 1
+
+    def test_blacklist(self, tcl_factory):
         spack.modules._module_config = self.configuration_blacklist
         spec = spack.spec.Spec('mpileaks ^zmpi')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'is-loaded' in x]), 1)
-        self.assertEqual(len([x for x in content if 'module load ' in x]), 1)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'is-loaded' in x]) == 1
+        assert len([x for x in content if 'module load ' in x]) == 1
         spec = spack.spec.Spec('callpath arch=x86-linux')
         # Returns a StringIO instead of a string as no module file was written
-        self.assertRaises(AttributeError, self.get_modulefile_content, spec)
+        with pytest.raises(AttributeError):
+            get_modulefile_content(tcl_factory, spec)
         spec = spack.spec.Spec('zmpi arch=x86-linux')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(len([x for x in content if 'is-loaded' in x]), 1)
-        self.assertEqual(len([x for x in content if 'module load ' in x]), 1)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if 'is-loaded' in x]) == 1
+        assert len([x for x in content if 'module load ' in x]) == 1
 
-    def test_conflicts(self):
+    def test_conflicts(self, tcl_factory):
         spack.modules._module_config = self.configuration_conflicts
         spec = spack.spec.Spec('mpileaks')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(
-            len([x for x in content if x.startswith('conflict')]), 2)
-        self.assertEqual(
-            len([x for x in content if x == 'conflict mpileaks']), 1)
-        self.assertEqual(
-            len([x for x in content if x == 'conflict intel/14.0.1']), 1)
+        content = get_modulefile_content(tcl_factory, spec)
+        assert len([x for x in content if x.startswith('conflict')]) == 2
+        assert len([x for x in content if x == 'conflict mpileaks']) == 1
+        assert len([x for x in content if x == 'conflict intel/14.0.1']) == 1
 
         spack.modules._module_config = self.configuration_wrong_conflicts
-        self.assertRaises(SystemExit, self.get_modulefile_content, spec)
+        with pytest.raises(SystemExit):
+            get_modulefile_content(tcl_factory, spec)
 
-    def test_suffixes(self):
+    def test_suffixes(self, tcl_factory):
         spack.modules._module_config = self.configuration_suffix
         spec = spack.spec.Spec('mpileaks+debug arch=x86-linux')
         spec.concretize()
-        generator = spack.modules.TclModule(spec)
-        self.assertTrue('foo' in generator.use_name)
+        generator = tcl_factory(spec)
+        assert 'foo' in generator.use_name
 
         spec = spack.spec.Spec('mpileaks~debug arch=x86-linux')
         spec.concretize()
-        generator = spack.modules.TclModule(spec)
-        self.assertTrue('bar' in generator.use_name)
-
+        generator = tcl_factory(spec)
+        assert 'bar' in generator.use_name
 
-class LmodTests(ModuleFileGeneratorTests):
-    factory = spack.modules.LmodModule
 
+@pytest.mark.usefixtures('config', 'builtin_mock', 'stringio_open')
+class TestLmod(object):
     configuration_autoload_direct = {
         'enable': ['lmod'],
         'lmod': {
@@ -411,83 +413,74 @@ class LmodTests(ModuleFileGeneratorTests):
         }
     }
 
-    def test_simple_case(self):
+    def test_simple_case(self, lmod_factory):
         spack.modules._module_config = self.configuration_autoload_direct
         spec = spack.spec.Spec(mpich_spec_string)
-        content = self.get_modulefile_content(spec)
-        self.assertTrue('-- -*- lua -*-' in content)
-        self.assertTrue('whatis([[Name : mpich]])' in content)
-        self.assertTrue('whatis([[Version : 3.0.4]])' in content)
+        content = get_modulefile_content(lmod_factory, spec)
+        assert '-- -*- lua -*-' in content
+        assert 'whatis([[Name : mpich]])' in content
+        assert 'whatis([[Version : 3.0.4]])' in content
 
-    def test_autoload(self):
+    def test_autoload(self, lmod_factory):
         spack.modules._module_config = self.configuration_autoload_direct
         spec = spack.spec.Spec(mpileaks_spec_string)
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(
-            len([x for x in content if 'if not isloaded(' in x]), 2)
-        self.assertEqual(len([x for x in content if 'load(' in x]), 2)
+        content = get_modulefile_content(lmod_factory, spec)
+        assert len([x for x in content if 'if not isloaded(' in x]) == 2
+        assert len([x for x in content if 'load(' in x]) == 2
 
         spack.modules._module_config = self.configuration_autoload_all
         spec = spack.spec.Spec(mpileaks_spec_string)
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(
-            len([x for x in content if 'if not isloaded(' in x]), 5)
-        self.assertEqual(len([x for x in content if 'load(' in x]), 5)
+        content = get_modulefile_content(lmod_factory, spec)
+        assert len([x for x in content if 'if not isloaded(' in x]) == 5
+        assert len([x for x in content if 'load(' in x]) == 5
 
-    def test_alter_environment(self):
+    def test_alter_environment(self, lmod_factory):
         spack.modules._module_config = self.configuration_alter_environment
         spec = spack.spec.Spec('mpileaks platform=test target=x86_64')
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(
-            len([x
-                 for x in content
-                 if x.startswith('prepend_path("CMAKE_PREFIX_PATH"')]), 0)
-        self.assertEqual(
-            len([x for x in content if 'setenv("FOO", "foo")' in x]), 1)
-        self.assertEqual(
-            len([x for x in content if 'unsetenv("BAR")' in x]), 1)
+        content = get_modulefile_content(lmod_factory, spec)
+        assert len(
+            [x for x in content if x.startswith('prepend_path("CMAKE_PREFIX_PATH"')]  # NOQA: ignore=E501
+        ) == 0
+        assert len([x for x in content if 'setenv("FOO", "foo")' in x]) == 1
+        assert len([x for x in content if 'unsetenv("BAR")' in x]) == 1
 
         spec = spack.spec.Spec('libdwarf %clang platform=test target=x86_32')
-        content = self.get_modulefile_content(spec)
-        print('\n'.join(content))
-        self.assertEqual(
-            len([x
-                 for x in content
-                 if x.startswith('prepend-path("CMAKE_PREFIX_PATH"')]), 0)
-        self.assertEqual(
-            len([x for x in content if 'setenv("FOO", "foo")' in x]), 0)
-        self.assertEqual(
-            len([x for x in content if 'unsetenv("BAR")' in x]), 0)
-
-    def test_blacklist(self):
+        content = get_modulefile_content(lmod_factory, spec)
+        assert len(
+            [x for x in content if x.startswith('prepend-path("CMAKE_PREFIX_PATH"')]  # NOQA: ignore=E501
+        ) == 0
+        assert len([x for x in content if 'setenv("FOO", "foo")' in x]) == 0
+        assert len([x for x in content if 'unsetenv("BAR")' in x]) == 0
+
+    def test_blacklist(self, lmod_factory):
         spack.modules._module_config = self.configuration_blacklist
         spec = spack.spec.Spec(mpileaks_spec_string)
-        content = self.get_modulefile_content(spec)
-        self.assertEqual(
-            len([x for x in content if 'if not isloaded(' in x]), 1)
-        self.assertEqual(len([x for x in content if 'load(' in x]), 1)
+        content = get_modulefile_content(lmod_factory, spec)
+        assert len([x for x in content if 'if not isloaded(' in x]) == 1
+        assert len([x for x in content if 'load(' in x]) == 1
 
-    def test_no_hash(self):
+    def test_no_hash(self, lmod_factory):
         # Make sure that virtual providers (in the hierarchy) always
         # include a hash. Make sure that the module file for the spec
         # does not include a hash if hash_length is 0.
         spack.modules._module_config = self.configuration_no_hash
         spec = spack.spec.Spec(mpileaks_spec_string)
         spec.concretize()
-        module = spack.modules.LmodModule(spec)
+        module = lmod_factory(spec)
         path = module.file_name
-        mpiSpec = spec['mpi']
+        mpi_spec = spec['mpi']
         mpiElement = "{0}/{1}-{2}/".format(
-            mpiSpec.name, mpiSpec.version, mpiSpec.dag_hash(length=7))
-        self.assertTrue(mpiElement in path)
-        mpileaksSpec = spec
-        mpileaksElement = "{0}/{1}.lua".format(
-            mpileaksSpec.name, mpileaksSpec.version)
-        self.assertTrue(path.endswith(mpileaksElement))
-
+            mpi_spec.name, mpi_spec.version, mpi_spec.dag_hash(length=7)
+        )
+        assert mpiElement in path
+        mpileaks_spec = spec
+        mpileaks_element = "{0}/{1}.lua".format(
+            mpileaks_spec.name, mpileaks_spec.version)
+        assert path.endswith(mpileaks_element)
 
-class DotkitTests(MockPackagesTest):
 
+@pytest.mark.usefixtures('config', 'builtin_mock', 'stringio_open')
+class TestDotkit(object):
     configuration_dotkit = {
         'enable': ['dotkit'],
         'dotkit': {
@@ -497,28 +490,9 @@ class DotkitTests(MockPackagesTest):
         }
     }
 
-    def setUp(self):
-        super(DotkitTests, self).setUp()
-        self.configuration_obj = spack.modules._module_config
-        spack.modules.open = mock_open
-        # Make sure that a non-mocked configuration will trigger an error
-        spack.modules._module_config = None
-
-    def tearDown(self):
-        del spack.modules.open
-        spack.modules._module_config = self.configuration_obj
-        super(DotkitTests, self).tearDown()
-
-    def get_modulefile_content(self, spec):
-        spec.concretize()
-        generator = spack.modules.Dotkit(spec)
-        generator.write()
-        content = FILE_REGISTRY[generator.file_name].split('\n')
-        return content
-
-    def test_dotkit(self):
+    def test_dotkit(self, dotkit_factory):
         spack.modules._module_config = self.configuration_dotkit
         spec = spack.spec.Spec('mpileaks arch=x86-linux')
-        content = self.get_modulefile_content(spec)
-        self.assertTrue('#c spack' in content)
-        self.assertTrue('#d mpileaks @2.3' in content)
+        content = get_modulefile_content(dotkit_factory, spec)
+        assert '#c spack' in content
+        assert '#d mpileaks @2.3' in content
diff --git a/lib/spack/spack/test/multimethod.py b/lib/spack/spack/test/multimethod.py
index a885374080..90948f010c 100644
--- a/lib/spack/spack/test/multimethod.py
+++ b/lib/spack/spack/test/multimethod.py
@@ -22,93 +22,99 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-"""
-Test for multi_method dispatch.
-"""
+"""Test for multi_method dispatch."""
 import spack
+import pytest
 from spack.multimethod import *
 from spack.version import *
-from spack.test.mock_packages_test import *
 
 
-class MultiMethodTest(MockPackagesTest):
+def test_no_version_match(builtin_mock):
+    pkg = spack.repo.get('multimethod@2.0')
+    with pytest.raises(NoSuchMethodError):
+        pkg.no_version_2()
 
-    def test_no_version_match(self):
-        pkg = spack.repo.get('multimethod@2.0')
-        self.assertRaises(NoSuchMethodError, pkg.no_version_2)
 
-    def test_one_version_match(self):
-        pkg = spack.repo.get('multimethod@1.0')
-        self.assertEqual(pkg.no_version_2(), 1)
+def test_one_version_match(builtin_mock):
+    pkg = spack.repo.get('multimethod@1.0')
+    assert pkg.no_version_2() == 1
 
-        pkg = spack.repo.get('multimethod@3.0')
-        self.assertEqual(pkg.no_version_2(), 3)
+    pkg = spack.repo.get('multimethod@3.0')
+    assert pkg.no_version_2() == 3
 
-        pkg = spack.repo.get('multimethod@4.0')
-        self.assertEqual(pkg.no_version_2(), 4)
+    pkg = spack.repo.get('multimethod@4.0')
+    assert pkg.no_version_2() == 4
 
-    def test_version_overlap(self):
-        pkg = spack.repo.get('multimethod@2.0')
-        self.assertEqual(pkg.version_overlap(), 1)
 
-        pkg = spack.repo.get('multimethod@5.0')
-        self.assertEqual(pkg.version_overlap(), 2)
+def test_version_overlap(builtin_mock):
+    pkg = spack.repo.get('multimethod@2.0')
+    assert pkg.version_overlap() == 1
 
-    def test_mpi_version(self):
-        pkg = spack.repo.get('multimethod^mpich@3.0.4')
-        self.assertEqual(pkg.mpi_version(), 3)
+    pkg = spack.repo.get('multimethod@5.0')
+    assert pkg.version_overlap() == 2
 
-        pkg = spack.repo.get('multimethod^mpich2@1.2')
-        self.assertEqual(pkg.mpi_version(), 2)
 
-        pkg = spack.repo.get('multimethod^mpich@1.0')
-        self.assertEqual(pkg.mpi_version(), 1)
+def test_mpi_version(builtin_mock):
+    pkg = spack.repo.get('multimethod^mpich@3.0.4')
+    assert pkg.mpi_version() == 3
 
-    def test_undefined_mpi_version(self):
-        pkg = spack.repo.get('multimethod^mpich@0.4')
-        self.assertEqual(pkg.mpi_version(), 1)
+    pkg = spack.repo.get('multimethod^mpich2@1.2')
+    assert pkg.mpi_version() == 2
 
-        pkg = spack.repo.get('multimethod^mpich@1.4')
-        self.assertEqual(pkg.mpi_version(), 1)
+    pkg = spack.repo.get('multimethod^mpich@1.0')
+    assert pkg.mpi_version() == 1
 
-    def test_default_works(self):
-        pkg = spack.repo.get('multimethod%gcc')
-        self.assertEqual(pkg.has_a_default(), 'gcc')
 
-        pkg = spack.repo.get('multimethod%intel')
-        self.assertEqual(pkg.has_a_default(), 'intel')
+def test_undefined_mpi_version(builtin_mock):
+    pkg = spack.repo.get('multimethod^mpich@0.4')
+    assert pkg.mpi_version() == 1
 
-        pkg = spack.repo.get('multimethod%pgi')
-        self.assertEqual(pkg.has_a_default(), 'default')
+    pkg = spack.repo.get('multimethod^mpich@1.4')
+    assert pkg.mpi_version() == 1
 
-    def test_target_match(self):
-        platform = spack.architecture.platform()
-        targets = platform.targets.values()
-        for target in targets[:-1]:
-            pkg = spack.repo.get('multimethod target=' + target.name)
-            self.assertEqual(pkg.different_by_target(), target.name)
 
-        pkg = spack.repo.get('multimethod target=' + targets[-1].name)
-        if len(targets) == 1:
-            self.assertEqual(pkg.different_by_target(), targets[-1].name)
-        else:
-            self.assertRaises(NoSuchMethodError, pkg.different_by_target)
+def test_default_works(builtin_mock):
+    pkg = spack.repo.get('multimethod%gcc')
+    assert pkg.has_a_default() == 'gcc'
 
-    def test_dependency_match(self):
-        pkg = spack.repo.get('multimethod^zmpi')
-        self.assertEqual(pkg.different_by_dep(), 'zmpi')
+    pkg = spack.repo.get('multimethod%intel')
+    assert pkg.has_a_default() == 'intel'
 
-        pkg = spack.repo.get('multimethod^mpich')
-        self.assertEqual(pkg.different_by_dep(), 'mpich')
+    pkg = spack.repo.get('multimethod%pgi')
+    assert pkg.has_a_default() == 'default'
 
-        # If we try to switch on some entirely different dep, it's ambiguous,
-        # but should take the first option
-        pkg = spack.repo.get('multimethod^foobar')
-        self.assertEqual(pkg.different_by_dep(), 'mpich')
 
-    def test_virtual_dep_match(self):
-        pkg = spack.repo.get('multimethod^mpich2')
-        self.assertEqual(pkg.different_by_virtual_dep(), 2)
+def test_target_match(builtin_mock):
+    platform = spack.architecture.platform()
+    targets = platform.targets.values()
+    for target in targets[:-1]:
+        pkg = spack.repo.get('multimethod target=' + target.name)
+        assert pkg.different_by_target() == target.name
 
-        pkg = spack.repo.get('multimethod^mpich@1.0')
-        self.assertEqual(pkg.different_by_virtual_dep(), 1)
+    pkg = spack.repo.get('multimethod target=' + targets[-1].name)
+    if len(targets) == 1:
+        assert pkg.different_by_target() == targets[-1].name
+    else:
+        with pytest.raises(NoSuchMethodError):
+            pkg.different_by_target()
+
+
+def test_dependency_match(builtin_mock):
+    pkg = spack.repo.get('multimethod^zmpi')
+    assert pkg.different_by_dep() == 'zmpi'
+
+    pkg = spack.repo.get('multimethod^mpich')
+    assert pkg.different_by_dep() == 'mpich'
+
+    # If we try to switch on some entirely different dep, it's ambiguous,
+    # but should take the first option
+    pkg = spack.repo.get('multimethod^foobar')
+    assert pkg.different_by_dep() == 'mpich'
+
+
+def test_virtual_dep_match(builtin_mock):
+    pkg = spack.repo.get('multimethod^mpich2')
+    assert pkg.different_by_virtual_dep() == 2
+
+    pkg = spack.repo.get('multimethod^mpich@1.0')
+    assert pkg.different_by_virtual_dep() == 1
diff --git a/lib/spack/spack/test/optional_deps.py b/lib/spack/spack/test/optional_deps.py
index a9a2b9abf5..f013817b6f 100644
--- a/lib/spack/spack/test/optional_deps.py
+++ b/lib/spack/spack/test/optional_deps.py
@@ -22,93 +22,91 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import pytest
 from spack.spec import Spec
-from spack.test.mock_packages_test import *
 
 
-class ConcretizeTest(MockPackagesTest):
-
-    def check_normalize(self, spec_string, expected):
-        spec = Spec(spec_string)
-        spec.normalize()
-        self.assertEqual(spec, expected)
-        self.assertTrue(spec.eq_dag(expected))
-
-    def test_normalize_simple_conditionals(self):
-        self.check_normalize('optional-dep-test', Spec('optional-dep-test'))
-        self.check_normalize('optional-dep-test~a',
-                             Spec('optional-dep-test~a'))
-
-        self.check_normalize('optional-dep-test+a',
-                             Spec('optional-dep-test+a', Spec('a')))
-
-        self.check_normalize('optional-dep-test a=true',
-                             Spec('optional-dep-test a=true', Spec('a')))
-
-        self.check_normalize('optional-dep-test a=true',
-                             Spec('optional-dep-test+a', Spec('a')))
-
-        self.check_normalize('optional-dep-test@1.1',
-                             Spec('optional-dep-test@1.1', Spec('b')))
-
-        self.check_normalize('optional-dep-test%intel',
-                             Spec('optional-dep-test%intel', Spec('c')))
-
-        self.check_normalize(
-            'optional-dep-test%intel@64.1',
-            Spec('optional-dep-test%intel@64.1', Spec('c'), Spec('d')))
-
-        self.check_normalize(
-            'optional-dep-test%intel@64.1.2',
-            Spec('optional-dep-test%intel@64.1.2', Spec('c'), Spec('d')))
-
-        self.check_normalize('optional-dep-test%clang@35',
-                             Spec('optional-dep-test%clang@35', Spec('e')))
-
-    def test_multiple_conditionals(self):
-        self.check_normalize(
-            'optional-dep-test+a@1.1',
-            Spec('optional-dep-test+a@1.1', Spec('a'), Spec('b')))
-
-        self.check_normalize(
-            'optional-dep-test+a%intel',
-            Spec('optional-dep-test+a%intel', Spec('a'), Spec('c')))
-
-        self.check_normalize(
-            'optional-dep-test@1.1%intel',
-            Spec('optional-dep-test@1.1%intel', Spec('b'), Spec('c')))
-
-        self.check_normalize('optional-dep-test@1.1%intel@64.1.2+a',
-                             Spec('optional-dep-test@1.1%intel@64.1.2+a',
-                                  Spec('b'), Spec('a'), Spec('c'), Spec('d')))
-
-        self.check_normalize('optional-dep-test@1.1%clang@36.5+a',
-                             Spec('optional-dep-test@1.1%clang@36.5+a',
-                                  Spec('b'), Spec('a'), Spec('e')))
-
-    def test_chained_mpi(self):
-        self.check_normalize('optional-dep-test-2+mpi',
-                             Spec('optional-dep-test-2+mpi',
-                                  Spec('optional-dep-test+mpi',
-                                       Spec('mpi'))))
-
-    def test_default_variant(self):
-        spec = Spec('optional-dep-test-3')
-        spec.concretize()
-        self.assertTrue('a' in spec)
-
-        spec = Spec('optional-dep-test-3~var')
-        spec.concretize()
-        self.assertTrue('a' in spec)
-
-        spec = Spec('optional-dep-test-3+var')
-        spec.concretize()
-        self.assertTrue('b' in spec)
-
-    def test_transitive_chain(self):
+@pytest.fixture(
+    params=[
+        # Normalize simple conditionals
+        ('optional-dep-test', Spec('optional-dep-test')),
+        ('optional-dep-test~a', Spec('optional-dep-test~a')),
+        ('optional-dep-test+a', Spec('optional-dep-test+a', Spec('a'))),
+        ('optional-dep-test a=true', Spec(
+            'optional-dep-test a=true', Spec('a')
+        )),
+        ('optional-dep-test a=true', Spec('optional-dep-test+a', Spec('a'))),
+        ('optional-dep-test@1.1', Spec('optional-dep-test@1.1', Spec('b'))),
+        ('optional-dep-test%intel', Spec(
+            'optional-dep-test%intel', Spec('c')
+        )),
+        ('optional-dep-test%intel@64.1', Spec(
+            'optional-dep-test%intel@64.1', Spec('c'), Spec('d')
+        )),
+        ('optional-dep-test%intel@64.1.2', Spec(
+            'optional-dep-test%intel@64.1.2', Spec('c'), Spec('d')
+        )),
+        ('optional-dep-test%clang@35', Spec(
+            'optional-dep-test%clang@35', Spec('e')
+        )),
+        # Normalize multiple conditionals
+        ('optional-dep-test+a@1.1',  Spec(
+            'optional-dep-test+a@1.1', Spec('a'), Spec('b')
+        )),
+        ('optional-dep-test+a%intel', Spec(
+            'optional-dep-test+a%intel', Spec('a'), Spec('c')
+        )),
+        ('optional-dep-test@1.1%intel', Spec(
+            'optional-dep-test@1.1%intel', Spec('b'), Spec('c')
+        )),
+        ('optional-dep-test@1.1%intel@64.1.2+a', Spec(
+            'optional-dep-test@1.1%intel@64.1.2+a',
+            Spec('b'),
+            Spec('a'),
+            Spec('c'),
+            Spec('d')
+        )),
+        ('optional-dep-test@1.1%clang@36.5+a', Spec(
+            'optional-dep-test@1.1%clang@36.5+a',
+            Spec('b'),
+            Spec('a'),
+            Spec('e')
+        )),
+        # Chained MPI
+        ('optional-dep-test-2+mpi', Spec(
+            'optional-dep-test-2+mpi',
+            Spec('optional-dep-test+mpi', Spec('mpi'))
+        )),
         # Each of these dependencies comes from a conditional
         # dependency on another.  This requires iterating to evaluate
         # the whole chain.
-        self.check_normalize(
-            'optional-dep-test+f',
-            Spec('optional-dep-test+f', Spec('f'), Spec('g'), Spec('mpi')))
+        ('optional-dep-test+f', Spec(
+            'optional-dep-test+f', Spec('f'), Spec('g'), Spec('mpi')
+        ))
+    ]
+)
+def spec_and_expected(request):
+    """Parameters for te normalization test."""
+    return request.param
+
+
+def test_normalize(spec_and_expected, config, builtin_mock):
+    spec, expected = spec_and_expected
+    spec = Spec(spec)
+    spec.normalize()
+    assert spec == expected
+    assert spec.eq_dag(expected)
+
+
+def test_default_variant(config, builtin_mock):
+    spec = Spec('optional-dep-test-3')
+    spec.concretize()
+    assert 'a' in spec
+
+    spec = Spec('optional-dep-test-3~var')
+    spec.concretize()
+    assert 'a' in spec
+
+    spec = Spec('optional-dep-test-3+var')
+    spec.concretize()
+    assert 'b' in spec
diff --git a/lib/spack/spack/test/packages.py b/lib/spack/spack/test/packages.py
index 1217568c9c..39bbe4a954 100644
--- a/lib/spack/spack/test/packages.py
+++ b/lib/spack/spack/test/packages.py
@@ -25,93 +25,106 @@
 import spack
 from llnl.util.filesystem import join_path
 from spack.repository import Repo
-from spack.test.mock_packages_test import *
 from spack.util.naming import mod_to_class
 from spack.spec import *
 
 
-class PackagesTest(MockPackagesTest):
-
-    def test_load_package(self):
-        spack.repo.get('mpich')
-
-    def test_package_name(self):
-        pkg = spack.repo.get('mpich')
-        self.assertEqual(pkg.name, 'mpich')
-
-    def test_package_filename(self):
-        repo = Repo(spack.mock_packages_path)
-        filename = repo.filename_for_package_name('mpich')
-        self.assertEqual(filename,
-                         join_path(spack.mock_packages_path,
-                                   'packages', 'mpich', 'package.py'))
-
-    def test_nonexisting_package_filename(self):
-        repo = Repo(spack.mock_packages_path)
-        filename = repo.filename_for_package_name('some-nonexisting-package')
-        self.assertEqual(
-            filename,
-            join_path(spack.mock_packages_path,
-                      'packages', 'some-nonexisting-package', 'package.py'))
-
-    def test_package_class_names(self):
-        self.assertEqual('Mpich',          mod_to_class('mpich'))
-        self.assertEqual('PmgrCollective', mod_to_class('pmgr_collective'))
-        self.assertEqual('PmgrCollective', mod_to_class('pmgr-collective'))
-        self.assertEqual('Pmgrcollective', mod_to_class('PmgrCollective'))
-        self.assertEqual('_3db',        mod_to_class('3db'))
-
-    #
-    # Below tests target direct imports of spack packages from the
-    # spack.pkg namespace
-    #
-
-    def test_import_package(self):
-        import spack.pkg.builtin.mock.mpich             # noqa
-
-    def test_import_package_as(self):
-        import spack.pkg.builtin.mock.mpich as mp       # noqa
-
-    def test_import_class_from_package(self):
-        from spack.pkg.builtin.mock.mpich import Mpich  # noqa
-
-    def test_import_module_from_package(self):
-        from spack.pkg.builtin.mock import mpich        # noqa
-
-    def test_import_namespace_container_modules(self):
-        import spack.pkg                                # noqa
-        import spack.pkg as p                           # noqa
-        from spack import pkg                           # noqa
-
-        import spack.pkg.builtin                        # noqa
-        import spack.pkg.builtin as b                   # noqa
-        from spack.pkg import builtin                   # noqa
-
-        import spack.pkg.builtin.mock                   # noqa
-        import spack.pkg.builtin.mock as m              # noqa
-        from spack.pkg.builtin import mock              # noqa
-
-    def test_inheritance_of_diretives(self):
-        p = spack.repo.get('simple_inheritance')
-
-        # Check dictionaries that should have been filled by directives
-        self.assertEqual(len(p.dependencies), 3)
-        self.assertTrue('cmake' in p.dependencies)
-        self.assertTrue('openblas' in p.dependencies)
-        self.assertTrue('mpi' in p.dependencies)
-        self.assertEqual(len(p.provided), 2)
-
-        # Check that Spec instantiation behaves as we expect
-        s = Spec('simple_inheritance')
-        s.concretize()
-        self.assertTrue('^cmake' in s)
-        self.assertTrue('^openblas' in s)
-        self.assertTrue('+openblas' in s)
-        self.assertTrue('mpi' in s)
-
-        s = Spec('simple_inheritance~openblas')
-        s.concretize()
-        self.assertTrue('^cmake' in s)
-        self.assertTrue('^openblas' not in s)
-        self.assertTrue('~openblas' in s)
-        self.assertTrue('mpi' in s)
+def test_load_package(builtin_mock):
+    spack.repo.get('mpich')
+
+
+def test_package_name(builtin_mock):
+    pkg = spack.repo.get('mpich')
+    assert pkg.name == 'mpich'
+
+
+def test_package_filename(builtin_mock):
+    repo = Repo(spack.mock_packages_path)
+    filename = repo.filename_for_package_name('mpich')
+    assert filename == join_path(
+        spack.mock_packages_path,
+        'packages',
+        'mpich',
+        'package.py'
+    )
+
+
+def test_nonexisting_package_filename():
+    repo = Repo(spack.mock_packages_path)
+    filename = repo.filename_for_package_name('some-nonexisting-package')
+    assert filename == join_path(
+        spack.mock_packages_path,
+        'packages',
+        'some-nonexisting-package',
+        'package.py'
+    )
+
+
+def test_package_class_names():
+    assert 'Mpich' == mod_to_class('mpich')
+    assert 'PmgrCollective' == mod_to_class('pmgr_collective')
+    assert 'PmgrCollective' == mod_to_class('pmgr-collective')
+    assert 'Pmgrcollective' == mod_to_class('PmgrCollective')
+    assert '_3db' == mod_to_class('3db')
+
+
+# Below tests target direct imports of spack packages from the
+# spack.pkg namespace
+def test_import_package(builtin_mock):
+    import spack.pkg.builtin.mock.mpich             # noqa
+
+
+def test_import_package_as(builtin_mock):
+    import spack.pkg.builtin.mock.mpich as mp       # noqa
+
+    import spack.pkg.builtin.mock                   # noqa
+    import spack.pkg.builtin.mock as m              # noqa
+    from spack.pkg.builtin import mock              # noqa
+
+
+def test_inheritance_of_diretives():
+    p = spack.repo.get('simple_inheritance')
+
+    # Check dictionaries that should have been filled by directives
+    assert len(p.dependencies) == 3
+    assert 'cmake' in p.dependencies
+    assert 'openblas' in p.dependencies
+    assert 'mpi' in p.dependencies
+    assert len(p.provided) == 2
+
+    # Check that Spec instantiation behaves as we expect
+    s = Spec('simple_inheritance')
+    s.concretize()
+    assert '^cmake' in s
+    assert '^openblas' in s
+    assert '+openblas' in s
+    assert 'mpi' in s
+
+    s = Spec('simple_inheritance~openblas')
+    s.concretize()
+    assert '^cmake' in s
+    assert '^openblas' not in s
+    assert '~openblas' in s
+    assert 'mpi' in s
+
+
+def test_import_class_from_package(builtin_mock):
+    from spack.pkg.builtin.mock.mpich import Mpich  # noqa
+
+
+def test_import_module_from_package(builtin_mock):
+    from spack.pkg.builtin.mock import mpich        # noqa
+
+
+def test_import_namespace_container_modules(builtin_mock):
+    import spack.pkg                                # noqa
+    import spack.pkg as p                           # noqa
+    from spack import pkg                           # noqa
+
+    import spack.pkg.builtin                        # noqa
+    import spack.pkg.builtin as b                   # noqa
+    from spack.pkg import builtin                   # noqa
+
+    import spack.pkg.builtin.mock                   # noqa
+    import spack.pkg.builtin.mock as m              # noqa
+    from spack.pkg.builtin import mock              # noqa
diff --git a/lib/spack/spack/test/provider_index.py b/lib/spack/spack/test/provider_index.py
index d785038899..a176d0c315 100644
--- a/lib/spack/spack/test/provider_index.py
+++ b/lib/spack/spack/test/provider_index.py
@@ -37,57 +37,57 @@
                     mpi@:10.0: set([zmpi])},
     'stuff': {stuff: set([externalvirtual])}}
 """
-from StringIO import StringIO
-
+import StringIO
 import spack
-from spack.spec import Spec
 from spack.provider_index import ProviderIndex
-from spack.test.mock_packages_test import *
+from spack.spec import Spec
+
+
+def test_yaml_round_trip(builtin_mock):
+    p = ProviderIndex(spack.repo.all_package_names())
+
+    ostream = StringIO.StringIO()
+    p.to_yaml(ostream)
 
+    istream = StringIO.StringIO(ostream.getvalue())
+    q = ProviderIndex.from_yaml(istream)
 
-class ProviderIndexTest(MockPackagesTest):
+    assert p == q
 
-    def test_yaml_round_trip(self):
-        p = ProviderIndex(spack.repo.all_package_names())
 
-        ostream = StringIO()
-        p.to_yaml(ostream)
+def test_providers_for_simple(builtin_mock):
+    p = ProviderIndex(spack.repo.all_package_names())
 
-        istream = StringIO(ostream.getvalue())
-        q = ProviderIndex.from_yaml(istream)
+    blas_providers = p.providers_for('blas')
+    assert Spec('netlib-blas') in blas_providers
+    assert Spec('openblas') in blas_providers
+    assert Spec('openblas-with-lapack') in blas_providers
 
-        self.assertEqual(p, q)
+    lapack_providers = p.providers_for('lapack')
+    assert Spec('netlib-lapack') in lapack_providers
+    assert Spec('openblas-with-lapack') in lapack_providers
 
-    def test_providers_for_simple(self):
-        p = ProviderIndex(spack.repo.all_package_names())
 
-        blas_providers = p.providers_for('blas')
-        self.assertTrue(Spec('netlib-blas') in blas_providers)
-        self.assertTrue(Spec('openblas') in blas_providers)
-        self.assertTrue(Spec('openblas-with-lapack') in blas_providers)
+def test_mpi_providers(builtin_mock):
+    p = ProviderIndex(spack.repo.all_package_names())
 
-        lapack_providers = p.providers_for('lapack')
-        self.assertTrue(Spec('netlib-lapack') in lapack_providers)
-        self.assertTrue(Spec('openblas-with-lapack') in lapack_providers)
+    mpi_2_providers = p.providers_for('mpi@2')
+    assert Spec('mpich2') in mpi_2_providers
+    assert Spec('mpich@3:') in mpi_2_providers
 
-    def test_mpi_providers(self):
-        p = ProviderIndex(spack.repo.all_package_names())
+    mpi_3_providers = p.providers_for('mpi@3')
+    assert Spec('mpich2') not in mpi_3_providers
+    assert Spec('mpich@3:') in mpi_3_providers
+    assert Spec('zmpi') in mpi_3_providers
 
-        mpi_2_providers = p.providers_for('mpi@2')
-        self.assertTrue(Spec('mpich2') in mpi_2_providers)
-        self.assertTrue(Spec('mpich@3:') in mpi_2_providers)
 
-        mpi_3_providers = p.providers_for('mpi@3')
-        self.assertTrue(Spec('mpich2') not in mpi_3_providers)
-        self.assertTrue(Spec('mpich@3:') in mpi_3_providers)
-        self.assertTrue(Spec('zmpi') in mpi_3_providers)
+def test_equal(builtin_mock):
+    p = ProviderIndex(spack.repo.all_package_names())
+    q = ProviderIndex(spack.repo.all_package_names())
+    assert p == q
 
-    def test_equal(self):
-        p = ProviderIndex(spack.repo.all_package_names())
-        q = ProviderIndex(spack.repo.all_package_names())
-        self.assertEqual(p, q)
 
-    def test_copy(self):
-        p = ProviderIndex(spack.repo.all_package_names())
-        q = p.copy()
-        self.assertEqual(p, q)
+def test_copy(builtin_mock):
+    p = ProviderIndex(spack.repo.all_package_names())
+    q = p.copy()
+    assert p == q
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index dd536f945c..e1d3f24e07 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -28,26 +28,64 @@
 
     spack/lib/spack/spack/test/mock_packages
 """
+import pytest
 import spack
 import spack.architecture
 import spack.package
 
 from spack.spec import Spec
-from spack.test.mock_packages_test import *
 
 
-class SpecDagTest(MockPackagesTest):
+def check_links(spec_to_check):
+    for spec in spec_to_check.traverse():
+        for dependent in spec.dependents():
+            assert spec.name in dependent.dependencies_dict()
 
-    def test_conflicting_package_constraints(self):
-        self.set_pkg_dep('mpileaks', 'mpich@1.0')
-        self.set_pkg_dep('callpath', 'mpich@2.0')
+        for dependency in spec.dependencies():
+            assert spec.name in dependency.dependents_dict()
+
+
+@pytest.fixture()
+def saved_deps():
+    """Returns a dictionary to save the dependencies."""
+    return {}
+
+
+@pytest.fixture()
+def set_dependency(saved_deps):
+    """Returns a function that alters the dependency information
+    for a package.
+    """
+    def _mock(pkg_name, spec, deptypes=spack.alldeps):
+        """Alters dependence information for a package.
+
+        Adds a dependency on <spec> to pkg. Use this to mock up constraints.
+        """
+        spec = Spec(spec)
+        # Save original dependencies before making any changes.
+        pkg = spack.repo.get(pkg_name)
+        if pkg_name not in saved_deps:
+            saved_deps[pkg_name] = (pkg, pkg.dependencies.copy())
+        # Change dep spec
+        # XXX(deptype): handle deptypes.
+        pkg.dependencies[spec.name] = {Spec(pkg_name): spec}
+        pkg.dependency_types[spec.name] = set(deptypes)
+    return _mock
+
+
+@pytest.mark.usefixtures('refresh_builtin_mock')
+class TestSpecDag(object):
+
+    def test_conflicting_package_constraints(self, set_dependency):
+        set_dependency('mpileaks', 'mpich@1.0')
+        set_dependency('callpath', 'mpich@2.0')
 
         spec = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
 
-        # TODO: try to do something to showt that the issue was with
+        # TODO: try to do something to show that the issue was with
         # TODO: the user's input or with package inconsistencies.
-        self.assertRaises(spack.spec.UnsatisfiableVersionSpecError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.UnsatisfiableVersionSpecError):
+            spec.normalize()
 
     def test_preorder_node_traversal(self):
         dag = Spec('mpileaks ^zmpi')
@@ -58,10 +96,10 @@ def test_preorder_node_traversal(self):
         pairs = zip([0, 1, 2, 3, 4, 2, 3], names)
 
         traversal = dag.traverse()
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
         traversal = dag.traverse(depth=True)
-        self.assertEqual([(x, y.name) for x, y in traversal], pairs)
+        assert [(x, y.name) for x, y in traversal] == pairs
 
     def test_preorder_edge_traversal(self):
         dag = Spec('mpileaks ^zmpi')
@@ -72,10 +110,10 @@ def test_preorder_edge_traversal(self):
         pairs = zip([0, 1, 2, 3, 4, 3, 2, 3, 1], names)
 
         traversal = dag.traverse(cover='edges')
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
         traversal = dag.traverse(cover='edges', depth=True)
-        self.assertEqual([(x, y.name) for x, y in traversal], pairs)
+        assert [(x, y.name) for x, y in traversal] == pairs
 
     def test_preorder_path_traversal(self):
         dag = Spec('mpileaks ^zmpi')
@@ -86,10 +124,10 @@ def test_preorder_path_traversal(self):
         pairs = zip([0, 1, 2, 3, 4, 3, 2, 3, 1, 2], names)
 
         traversal = dag.traverse(cover='paths')
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
         traversal = dag.traverse(cover='paths', depth=True)
-        self.assertEqual([(x, y.name) for x, y in traversal], pairs)
+        assert [(x, y.name) for x, y in traversal] == pairs
 
     def test_postorder_node_traversal(self):
         dag = Spec('mpileaks ^zmpi')
@@ -100,10 +138,10 @@ def test_postorder_node_traversal(self):
         pairs = zip([4, 3, 2, 3, 2, 1, 0], names)
 
         traversal = dag.traverse(order='post')
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
         traversal = dag.traverse(depth=True, order='post')
-        self.assertEqual([(x, y.name) for x, y in traversal], pairs)
+        assert [(x, y.name) for x, y in traversal] == pairs
 
     def test_postorder_edge_traversal(self):
         dag = Spec('mpileaks ^zmpi')
@@ -114,10 +152,10 @@ def test_postorder_edge_traversal(self):
         pairs = zip([4, 3, 3, 2, 3, 2, 1, 1, 0], names)
 
         traversal = dag.traverse(cover='edges', order='post')
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
         traversal = dag.traverse(cover='edges', depth=True, order='post')
-        self.assertEqual([(x, y.name) for x, y in traversal], pairs)
+        assert [(x, y.name) for x, y in traversal] == pairs
 
     def test_postorder_path_traversal(self):
         dag = Spec('mpileaks ^zmpi')
@@ -128,10 +166,10 @@ def test_postorder_path_traversal(self):
         pairs = zip([4, 3, 3, 2, 3, 2, 1, 2, 1, 0], names)
 
         traversal = dag.traverse(cover='paths', order='post')
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
         traversal = dag.traverse(cover='paths', depth=True, order='post')
-        self.assertEqual([(x, y.name) for x, y in traversal], pairs)
+        assert [(x, y.name) for x, y in traversal] == pairs
 
     def test_conflicting_spec_constraints(self):
         mpileaks = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
@@ -143,8 +181,8 @@ def test_conflicting_spec_constraints(self):
         mpileaks._dependencies['callpath']. \
             spec._dependencies['mpich'].spec = Spec('mpich@2.0')
 
-        self.assertRaises(spack.spec.InconsistentSpecError,
-                          lambda: mpileaks.flat_dependencies(copy=False))
+        with pytest.raises(spack.spec.InconsistentSpecError):
+            mpileaks.flat_dependencies(copy=False)
 
     def test_normalize_twice(self):
         """Make sure normalize can be run twice on the same spec,
@@ -154,7 +192,7 @@ def test_normalize_twice(self):
         n1 = spec.copy()
 
         spec.normalize()
-        self.assertEqual(n1, spec)
+        assert n1 == spec
 
     def test_normalize_a_lot(self):
         spec = Spec('mpileaks')
@@ -182,21 +220,7 @@ def test_normalize_with_virtual_spec(self):
             counts[spec.name] += 1
 
         for name in counts:
-            self.assertEqual(counts[name], 1, "Count for %s was not 1!" % name)
-
-    def check_links(self, spec_to_check):
-        for spec in spec_to_check.traverse():
-            for dependent in spec.dependents():
-                self.assertTrue(
-                    spec.name in dependent.dependencies_dict(),
-                    "%s not in dependencies of %s" %
-                    (spec.name, dependent.name))
-
-            for dependency in spec.dependencies():
-                self.assertTrue(
-                    spec.name in dependency.dependents_dict(),
-                    "%s not in dependents of %s" %
-                    (spec.name, dependency.name))
+            assert counts[name] == 1
 
     def test_dependents_and_dependencies_are_correct(self):
         spec = Spec('mpileaks',
@@ -208,49 +232,49 @@ def test_dependents_and_dependencies_are_correct(self):
                          Spec('mpi')),
                     Spec('mpi'))
 
-        self.check_links(spec)
+        check_links(spec)
         spec.normalize()
-        self.check_links(spec)
+        check_links(spec)
 
-    def test_unsatisfiable_version(self):
-        self.set_pkg_dep('mpileaks', 'mpich@1.0')
+    def test_unsatisfiable_version(self, set_dependency):
+        set_dependency('mpileaks', 'mpich@1.0')
         spec = Spec('mpileaks ^mpich@2.0 ^callpath ^dyninst ^libelf ^libdwarf')
-        self.assertRaises(spack.spec.UnsatisfiableVersionSpecError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.UnsatisfiableVersionSpecError):
+            spec.normalize()
 
-    def test_unsatisfiable_compiler(self):
-        self.set_pkg_dep('mpileaks', 'mpich%gcc')
+    def test_unsatisfiable_compiler(self, set_dependency):
+        set_dependency('mpileaks', 'mpich%gcc')
         spec = Spec('mpileaks ^mpich%intel ^callpath ^dyninst ^libelf'
                     ' ^libdwarf')
-        self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.UnsatisfiableCompilerSpecError):
+            spec.normalize()
 
-    def test_unsatisfiable_compiler_version(self):
-        self.set_pkg_dep('mpileaks', 'mpich%gcc@4.6')
+    def test_unsatisfiable_compiler_version(self, set_dependency):
+        set_dependency('mpileaks', 'mpich%gcc@4.6')
         spec = Spec('mpileaks ^mpich%gcc@4.5 ^callpath ^dyninst ^libelf'
                     ' ^libdwarf')
-        self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.UnsatisfiableCompilerSpecError):
+            spec.normalize()
 
-    def test_unsatisfiable_architecture(self):
-        self.set_pkg_dep('mpileaks', 'mpich platform=test target=be')
+    def test_unsatisfiable_architecture(self, set_dependency):
+        set_dependency('mpileaks', 'mpich platform=test target=be')
         spec = Spec('mpileaks ^mpich platform=test target=fe ^callpath'
                     ' ^dyninst ^libelf ^libdwarf')
-        self.assertRaises(spack.spec.UnsatisfiableArchitectureSpecError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.UnsatisfiableArchitectureSpecError):
+            spec.normalize()
 
     def test_invalid_dep(self):
         spec = Spec('libelf ^mpich')
-        self.assertRaises(spack.spec.InvalidDependencyError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.InvalidDependencyError):
+            spec.normalize()
 
         spec = Spec('libelf ^libdwarf')
-        self.assertRaises(spack.spec.InvalidDependencyError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.InvalidDependencyError):
+            spec.normalize()
 
         spec = Spec('mpich ^dyninst ^libelf')
-        self.assertRaises(spack.spec.InvalidDependencyError,
-                          spec.normalize)
+        with pytest.raises(spack.spec.InvalidDependencyError):
+            spec.normalize()
 
     def test_equal(self):
         # Different spec structures to test for equality
@@ -273,21 +297,21 @@ def test_equal(self):
         # All these are equal to each other with regular ==
         specs = (flat, flat_init, flip_flat, dag, flip_dag)
         for lhs, rhs in zip(specs, specs):
-            self.assertEqual(lhs, rhs)
-            self.assertEqual(str(lhs), str(rhs))
+            assert lhs == rhs
+            assert str(lhs) == str(rhs)
 
         # Same DAGs constructed different ways are equal
-        self.assertTrue(flat.eq_dag(flat_init))
+        assert flat.eq_dag(flat_init)
 
         # order at same level does not matter -- (dep on same parent)
-        self.assertTrue(flat.eq_dag(flip_flat))
+        assert flat.eq_dag(flip_flat)
 
         # DAGs should be unequal if nesting is different
-        self.assertFalse(flat.eq_dag(dag))
-        self.assertFalse(flat.eq_dag(flip_dag))
-        self.assertFalse(flip_flat.eq_dag(dag))
-        self.assertFalse(flip_flat.eq_dag(flip_dag))
-        self.assertFalse(dag.eq_dag(flip_dag))
+        assert not flat.eq_dag(dag)
+        assert not flat.eq_dag(flip_dag)
+        assert not flip_flat.eq_dag(dag)
+        assert not flip_flat.eq_dag(flip_dag)
+        assert not dag.eq_dag(flip_dag)
 
     def test_normalize_mpileaks(self):
         # Spec parsed in from a string
@@ -328,32 +352,32 @@ def test_normalize_mpileaks(self):
         # All specs here should be equal under regular equality
         specs = (spec, expected_flat, expected_normalized, non_unique_nodes)
         for lhs, rhs in zip(specs, specs):
-            self.assertEqual(lhs, rhs)
-            self.assertEqual(str(lhs), str(rhs))
+            assert lhs == rhs
+            assert str(lhs) == str(rhs)
 
         # Test that equal and equal_dag are doing the right thing
-        self.assertEqual(spec, expected_flat)
-        self.assertTrue(spec.eq_dag(expected_flat))
+        assert spec == expected_flat
+        assert spec.eq_dag(expected_flat)
 
         # Normalized has different DAG structure, so NOT equal.
-        self.assertNotEqual(spec, expected_normalized)
-        self.assertFalse(spec.eq_dag(expected_normalized))
+        assert spec != expected_normalized
+        assert not spec.eq_dag(expected_normalized)
 
         # Again, different DAG structure so not equal.
-        self.assertNotEqual(spec, non_unique_nodes)
-        self.assertFalse(spec.eq_dag(non_unique_nodes))
+        assert spec != non_unique_nodes
+        assert not spec.eq_dag(non_unique_nodes)
 
         spec.normalize()
 
         # After normalizing, spec_dag_equal should match the normalized spec.
-        self.assertNotEqual(spec, expected_flat)
-        self.assertFalse(spec.eq_dag(expected_flat))
+        assert spec != expected_flat
+        assert not spec.eq_dag(expected_flat)
 
-        self.assertEqual(spec, expected_normalized)
-        self.assertTrue(spec.eq_dag(expected_normalized))
+        assert spec == expected_normalized
+        assert spec.eq_dag(expected_normalized)
 
-        self.assertEqual(spec, non_unique_nodes)
-        self.assertFalse(spec.eq_dag(non_unique_nodes))
+        assert spec == non_unique_nodes
+        assert not spec.eq_dag(non_unique_nodes)
 
     def test_normalize_with_virtual_package(self):
         spec = Spec('mpileaks ^mpi ^libelf@1.8.11 ^libdwarf')
@@ -368,67 +392,66 @@ def test_normalize_with_virtual_package(self):
                       Spec('libelf@1.8.11')),
                  Spec('mpi')), Spec('mpi'))
 
-        self.assertEqual(str(spec), str(expected_normalized))
+        assert str(spec) == str(expected_normalized)
 
     def test_contains(self):
         spec = Spec('mpileaks ^mpi ^libelf@1.8.11 ^libdwarf')
-        self.assertTrue(Spec('mpi') in spec)
-        self.assertTrue(Spec('libelf') in spec)
-        self.assertTrue(Spec('libelf@1.8.11') in spec)
-        self.assertFalse(Spec('libelf@1.8.12') in spec)
-        self.assertTrue(Spec('libdwarf') in spec)
-        self.assertFalse(Spec('libgoblin') in spec)
-        self.assertTrue(Spec('mpileaks') in spec)
+        assert Spec('mpi') in spec
+        assert Spec('libelf') in spec
+        assert Spec('libelf@1.8.11') in spec
+        assert Spec('libelf@1.8.12') not in spec
+        assert Spec('libdwarf') in spec
+        assert Spec('libgoblin') not in spec
+        assert Spec('mpileaks') in spec
 
     def test_copy_simple(self):
         orig = Spec('mpileaks')
         copy = orig.copy()
+        check_links(copy)
 
-        self.check_links(copy)
-
-        self.assertEqual(orig, copy)
-        self.assertTrue(orig.eq_dag(copy))
-        self.assertEqual(orig._normal, copy._normal)
-        self.assertEqual(orig._concrete, copy._concrete)
+        assert orig == copy
+        assert orig.eq_dag(copy)
+        assert orig._normal == copy._normal
+        assert orig._concrete == copy._concrete
 
         # ensure no shared nodes bt/w orig and copy.
         orig_ids = set(id(s) for s in orig.traverse())
         copy_ids = set(id(s) for s in copy.traverse())
-        self.assertFalse(orig_ids.intersection(copy_ids))
+        assert not orig_ids.intersection(copy_ids)
 
     def test_copy_normalized(self):
         orig = Spec('mpileaks')
         orig.normalize()
         copy = orig.copy()
+        check_links(copy)
 
-        self.check_links(copy)
-
-        self.assertEqual(orig, copy)
-        self.assertTrue(orig.eq_dag(copy))
-        self.assertEqual(orig._normal, copy._normal)
-        self.assertEqual(orig._concrete, copy._concrete)
+        assert orig == copy
+        assert orig.eq_dag(copy)
+        assert orig._normal == copy._normal
+        assert orig._concrete == copy._concrete
 
         # ensure no shared nodes bt/w orig and copy.
         orig_ids = set(id(s) for s in orig.traverse())
         copy_ids = set(id(s) for s in copy.traverse())
-        self.assertFalse(orig_ids.intersection(copy_ids))
+        assert not orig_ids.intersection(copy_ids)
 
+    @pytest.mark.usefixtures('config')
     def test_copy_concretized(self):
         orig = Spec('mpileaks')
         orig.concretize()
         copy = orig.copy()
 
-        self.check_links(copy)
+        check_links(copy)
 
-        self.assertEqual(orig, copy)
-        self.assertTrue(orig.eq_dag(copy))
-        self.assertEqual(orig._normal, copy._normal)
-        self.assertEqual(orig._concrete, copy._concrete)
+        assert orig == copy
+        assert orig.eq_dag(copy)
+        assert orig._normal == copy._normal
+        assert orig._concrete == copy._concrete
 
         # ensure no shared nodes bt/w orig and copy.
         orig_ids = set(id(s) for s in orig.traverse())
         copy_ids = set(id(s) for s in copy.traverse())
-        self.assertFalse(orig_ids.intersection(copy_ids))
+        assert not orig_ids.intersection(copy_ids)
 
     """
     Here is the graph with deptypes labeled (assume all packages have a 'dt'
@@ -464,7 +487,7 @@ def test_deptype_traversal(self):
                  'dtlink1', 'dtlink3', 'dtlink4']
 
         traversal = dag.traverse(deptype=('build', 'link'))
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
     def test_deptype_traversal_with_builddeps(self):
         dag = Spec('dttop')
@@ -474,7 +497,7 @@ def test_deptype_traversal_with_builddeps(self):
                  'dtlink1', 'dtlink3', 'dtlink4']
 
         traversal = dag.traverse(deptype=('build', 'link'))
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
     def test_deptype_traversal_full(self):
         dag = Spec('dttop')
@@ -485,7 +508,7 @@ def test_deptype_traversal_full(self):
                  'dtrun3', 'dtbuild3']
 
         traversal = dag.traverse(deptype=spack.alldeps)
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
     def test_deptype_traversal_run(self):
         dag = Spec('dttop')
@@ -494,7 +517,7 @@ def test_deptype_traversal_run(self):
         names = ['dttop', 'dtrun1', 'dtrun3']
 
         traversal = dag.traverse(deptype='run')
-        self.assertEqual([x.name for x in traversal], names)
+        assert [x.name for x in traversal] == names
 
     def test_hash_bits(self):
         """Ensure getting first n bits of a base32-encoded DAG hash works."""
@@ -522,10 +545,10 @@ def test_hash_bits(self):
                 fmt = "#0%sb" % (bits + 2)
                 actual = format(actual_int, fmt).replace('0b', '')
 
-                self.assertEqual(expected[:bits], actual)
+                assert expected[:bits] == actual
 
-            self.assertRaises(
-                ValueError, spack.spec.base32_prefix_bits, test_hash, 161)
+            with pytest.raises(ValueError):
+                spack.spec.base32_prefix_bits(test_hash, 161)
 
-            self.assertRaises(
-                ValueError, spack.spec.base32_prefix_bits, test_hash, 256)
+            with pytest.raises(ValueError):
+                spack.spec.base32_prefix_bits(test_hash, 256)
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index 16d6121dea..84c8650f15 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -23,340 +23,344 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
 import spack.architecture
+import pytest
 from spack.spec import *
-from spack.test.mock_packages_test import *
 
 
-class SpecSematicsTest(MockPackagesTest):
-    """This tests satisfies(), constrain() and other semantic operations
-       on specs."""
+def check_satisfies(spec, anon_spec, concrete=False):
+    left = Spec(spec, concrete=concrete)
+    try:
+        right = Spec(anon_spec)  # if it's not anonymous, allow it.
+    except Exception:
+        right = parse_anonymous_spec(anon_spec, left.name)
 
-    # ========================================================================
-    # Utility functions to set everything up.
-    # ========================================================================
-    def check_satisfies(self, spec, anon_spec, concrete=False):
-        left = Spec(spec, concrete=concrete)
-        try:
-            right = Spec(anon_spec)  # if it's not anonymous, allow it.
-        except:
-            right = parse_anonymous_spec(anon_spec, left.name)
-
-        # Satisfies is one-directional.
-        self.assertTrue(left.satisfies(right))
-        self.assertTrue(left.satisfies(anon_spec))
-
-        # if left satisfies right, then we should be able to consrain
-        # right by left.  Reverse is not always true.
+    # Satisfies is one-directional.
+    assert left.satisfies(right)
+    assert left.satisfies(anon_spec)
+
+    # if left satisfies right, then we should be able to consrain
+    # right by left.  Reverse is not always true.
+    right.copy().constrain(left)
+
+
+def check_unsatisfiable(spec, anon_spec, concrete=False):
+    left = Spec(spec, concrete=concrete)
+    try:
+        right = Spec(anon_spec)  # if it's not anonymous, allow it.
+    except Exception:
+        right = parse_anonymous_spec(anon_spec, left.name)
+
+    assert not left.satisfies(right)
+    assert not left.satisfies(anon_spec)
+
+    with pytest.raises(UnsatisfiableSpecError):
         right.copy().constrain(left)
 
-    def check_unsatisfiable(self, spec, anon_spec, concrete=False):
-        left = Spec(spec, concrete=concrete)
-        try:
-            right = Spec(anon_spec)  # if it's not anonymous, allow it.
-        except:
-            right = parse_anonymous_spec(anon_spec, left.name)
 
-        self.assertFalse(left.satisfies(right))
-        self.assertFalse(left.satisfies(anon_spec))
+def check_constrain(expected, spec, constraint):
+    exp = Spec(expected)
+    spec = Spec(spec)
+    constraint = Spec(constraint)
+    spec.constrain(constraint)
+    assert exp == spec
 
-        self.assertRaises(UnsatisfiableSpecError, right.copy().constrain, left)
 
-    def check_constrain(self, expected, spec, constraint):
-        exp = Spec(expected)
-        spec = Spec(spec)
-        constraint = Spec(constraint)
-        spec.constrain(constraint)
-        self.assertEqual(exp, spec)
+def check_constrain_changed(spec, constraint):
+    spec = Spec(spec)
+    assert spec.constrain(constraint)
 
-    def check_constrain_changed(self, spec, constraint):
-        spec = Spec(spec)
-        self.assertTrue(spec.constrain(constraint))
 
-    def check_constrain_not_changed(self, spec, constraint):
-        spec = Spec(spec)
-        self.assertFalse(spec.constrain(constraint))
+def check_constrain_not_changed(spec, constraint):
+    spec = Spec(spec)
+    assert not spec.constrain(constraint)
 
-    def check_invalid_constraint(self, spec, constraint):
-        spec = Spec(spec)
-        constraint = Spec(constraint)
-        self.assertRaises(UnsatisfiableSpecError, spec.constrain, constraint)
 
-    # ========================================================================
-    # Satisfiability
-    # ========================================================================
+def check_invalid_constraint(spec, constraint):
+    spec = Spec(spec)
+    constraint = Spec(constraint)
+    with pytest.raises(UnsatisfiableSpecError):
+        spec.constrain(constraint)
+
+
+@pytest.mark.usefixtures('config', 'builtin_mock')
+class TestSpecSematics(object):
+    """This tests satisfies(), constrain() and other semantic operations
+    on specs.
+    """
     def test_satisfies(self):
-        self.check_satisfies('libelf@0.8.13', '@0:1')
-        self.check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1')
+        check_satisfies('libelf@0.8.13', '@0:1')
+        check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1')
 
     def test_satisfies_namespace(self):
-        self.check_satisfies('builtin.mpich', 'mpich')
-        self.check_satisfies('builtin.mock.mpich', 'mpich')
+        check_satisfies('builtin.mpich', 'mpich')
+        check_satisfies('builtin.mock.mpich', 'mpich')
 
         # TODO: only works for deps now, but shouldn't we allow for root spec?
-        # self.check_satisfies('builtin.mock.mpich', 'mpi')
+        # check_satisfies('builtin.mock.mpich', 'mpi')
 
-        self.check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich')
+        check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich')
 
-        self.check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich')
+        check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich')
 
     def test_satisfies_namespaced_dep(self):
         """Ensure spec from same or unspecified namespace satisfies namespace
            constraint."""
-        self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')
+        check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')
 
-        self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')
-        self.check_satisfies(
+        check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')
+        check_satisfies(
             'mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich')
 
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'mpileaks ^builtin.mock.mpich', '^builtin.mpich')
 
     def test_satisfies_compiler(self):
-        self.check_satisfies('foo%gcc', '%gcc')
-        self.check_satisfies('foo%intel', '%intel')
-        self.check_unsatisfiable('foo%intel', '%gcc')
-        self.check_unsatisfiable('foo%intel', '%pgi')
+        check_satisfies('foo%gcc', '%gcc')
+        check_satisfies('foo%intel', '%intel')
+        check_unsatisfiable('foo%intel', '%gcc')
+        check_unsatisfiable('foo%intel', '%pgi')
 
     def test_satisfies_compiler_version(self):
-        self.check_satisfies('foo%gcc', '%gcc@4.7.2')
-        self.check_satisfies('foo%intel', '%intel@4.7.2')
+        check_satisfies('foo%gcc', '%gcc@4.7.2')
+        check_satisfies('foo%intel', '%intel@4.7.2')
 
-        self.check_satisfies('foo%pgi@4.5', '%pgi@4.4:4.6')
-        self.check_satisfies('foo@2.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
+        check_satisfies('foo%pgi@4.5', '%pgi@4.4:4.6')
+        check_satisfies('foo@2.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
 
-        self.check_unsatisfiable('foo%pgi@4.3', '%pgi@4.4:4.6')
-        self.check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi')
-        self.check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
+        check_unsatisfiable('foo%pgi@4.3', '%pgi@4.4:4.6')
+        check_unsatisfiable('foo@4.0%pgi', '@1:3%pgi')
+        check_unsatisfiable('foo@4.0%pgi@4.5', '@1:3%pgi@4.4:4.6')
 
-        self.check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
-        self.check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
+        check_satisfies('foo %gcc@4.7.3', '%gcc@4.7')
+        check_unsatisfiable('foo %gcc@4.7', '%gcc@4.7.3')
 
     def test_satisfies_architecture(self):
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=test',
             'platform=test')
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=linux',
             'platform=linux')
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=test',
             'platform=test target=frontend')
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=test',
             'platform=test os=frontend target=frontend')
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=test os=frontend target=frontend',
             'platform=test')
 
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'foo platform=linux',
             'platform=test os=redhat6 target=x86_32')
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'foo os=redhat6',
             'platform=test os=debian6 target=x86_64')
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'foo target=x86_64',
             'platform=test os=redhat6 target=x86_32')
 
-        self.check_satisfies(
+        check_satisfies(
             'foo arch=test-None-None',
             'platform=test')
-        self.check_satisfies(
+        check_satisfies(
             'foo arch=test-None-frontend',
             'platform=test target=frontend')
-        self.check_satisfies(
+        check_satisfies(
             'foo arch=test-frontend-frontend',
             'platform=test os=frontend target=frontend')
-        self.check_satisfies(
+        check_satisfies(
             'foo arch=test-frontend-frontend',
             'platform=test')
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'foo arch=test-frontend-frontend',
             'platform=test os=frontend target=backend')
 
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=test target=frontend os=frontend',
             'platform=test target=frontend os=frontend')
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=test target=backend os=backend',
             'platform=test target=backend os=backend')
-        self.check_satisfies(
+        check_satisfies(
             'foo platform=test target=default_target os=default_os',
             'platform=test os=default_os')
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'foo platform=test target=x86_32 os=redhat6',
             'platform=linux target=x86_32 os=redhat6')
 
     def test_satisfies_dependencies(self):
-        self.check_satisfies('mpileaks^mpich', '^mpich')
-        self.check_satisfies('mpileaks^zmpi', '^zmpi')
+        check_satisfies('mpileaks^mpich', '^mpich')
+        check_satisfies('mpileaks^zmpi', '^zmpi')
 
-        self.check_unsatisfiable('mpileaks^mpich', '^zmpi')
-        self.check_unsatisfiable('mpileaks^zmpi', '^mpich')
+        check_unsatisfiable('mpileaks^mpich', '^zmpi')
+        check_unsatisfiable('mpileaks^zmpi', '^mpich')
 
     def test_satisfies_dependency_versions(self):
-        self.check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3')
-        self.check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0')
+        check_satisfies('mpileaks^mpich@2.0', '^mpich@1:3')
+        check_unsatisfiable('mpileaks^mpich@1.2', '^mpich@2.0')
 
-        self.check_satisfies(
+        check_satisfies(
             'mpileaks^mpich@2.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'mpileaks^mpich@4.0^callpath@1.5', '^mpich@1:3^callpath@1.4:1.6')
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'mpileaks^mpich@2.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'mpileaks^mpich@4.0^callpath@1.7', '^mpich@1:3^callpath@1.4:1.6')
 
     def test_satisfies_virtual_dependencies(self):
-        self.check_satisfies('mpileaks^mpi', '^mpi')
-        self.check_satisfies('mpileaks^mpi', '^mpich')
+        check_satisfies('mpileaks^mpi', '^mpi')
+        check_satisfies('mpileaks^mpi', '^mpich')
 
-        self.check_satisfies('mpileaks^mpi', '^zmpi')
-        self.check_unsatisfiable('mpileaks^mpich', '^zmpi')
+        check_satisfies('mpileaks^mpi', '^zmpi')
+        check_unsatisfiable('mpileaks^mpich', '^zmpi')
 
     def test_satisfies_virtual_dependency_versions(self):
-        self.check_satisfies('mpileaks^mpi@1.5', '^mpi@1.2:1.6')
-        self.check_unsatisfiable('mpileaks^mpi@3', '^mpi@1.2:1.6')
+        check_satisfies('mpileaks^mpi@1.5', '^mpi@1.2:1.6')
+        check_unsatisfiable('mpileaks^mpi@3', '^mpi@1.2:1.6')
 
-        self.check_satisfies('mpileaks^mpi@2:', '^mpich')
-        self.check_satisfies('mpileaks^mpi@2:', '^mpich@3.0.4')
-        self.check_satisfies('mpileaks^mpi@2:', '^mpich2@1.4')
+        check_satisfies('mpileaks^mpi@2:', '^mpich')
+        check_satisfies('mpileaks^mpi@2:', '^mpich@3.0.4')
+        check_satisfies('mpileaks^mpi@2:', '^mpich2@1.4')
 
-        self.check_satisfies('mpileaks^mpi@1:', '^mpich2')
-        self.check_satisfies('mpileaks^mpi@2:', '^mpich2')
+        check_satisfies('mpileaks^mpi@1:', '^mpich2')
+        check_satisfies('mpileaks^mpi@2:', '^mpich2')
 
-        self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich2@1.4')
-        self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich2')
-        self.check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0')
+        check_unsatisfiable('mpileaks^mpi@3:', '^mpich2@1.4')
+        check_unsatisfiable('mpileaks^mpi@3:', '^mpich2')
+        check_unsatisfiable('mpileaks^mpi@3:', '^mpich@1.0')
 
     def test_satisfies_matching_variant(self):
-        self.check_satisfies('mpich+foo', 'mpich+foo')
-        self.check_satisfies('mpich~foo', 'mpich~foo')
-        self.check_satisfies('mpich foo=1', 'mpich foo=1')
+        check_satisfies('mpich+foo', 'mpich+foo')
+        check_satisfies('mpich~foo', 'mpich~foo')
+        check_satisfies('mpich foo=1', 'mpich foo=1')
 
         # confirm that synonymous syntax works correctly
-        self.check_satisfies('mpich+foo', 'mpich foo=True')
-        self.check_satisfies('mpich foo=true', 'mpich+foo')
-        self.check_satisfies('mpich~foo', 'mpich foo=FALSE')
-        self.check_satisfies('mpich foo=False', 'mpich~foo')
+        check_satisfies('mpich+foo', 'mpich foo=True')
+        check_satisfies('mpich foo=true', 'mpich+foo')
+        check_satisfies('mpich~foo', 'mpich foo=FALSE')
+        check_satisfies('mpich foo=False', 'mpich~foo')
 
     def test_satisfies_unconstrained_variant(self):
         # only asked for mpich, no constraints.  Either will do.
-        self.check_satisfies('mpich+foo', 'mpich')
-        self.check_satisfies('mpich~foo', 'mpich')
-        self.check_satisfies('mpich foo=1', 'mpich')
+        check_satisfies('mpich+foo', 'mpich')
+        check_satisfies('mpich~foo', 'mpich')
+        check_satisfies('mpich foo=1', 'mpich')
 
     def test_unsatisfiable_variants(self):
         # This case is different depending on whether the specs are concrete.
 
         # 'mpich' is not concrete:
-        self.check_satisfies('mpich', 'mpich+foo', False)
-        self.check_satisfies('mpich', 'mpich~foo', False)
-        self.check_satisfies('mpich', 'mpich foo=1', False)
+        check_satisfies('mpich', 'mpich+foo', False)
+        check_satisfies('mpich', 'mpich~foo', False)
+        check_satisfies('mpich', 'mpich foo=1', False)
 
         # 'mpich' is concrete:
-        self.check_unsatisfiable('mpich', 'mpich+foo', True)
-        self.check_unsatisfiable('mpich', 'mpich~foo', True)
-        self.check_unsatisfiable('mpich', 'mpich foo=1', True)
+        check_unsatisfiable('mpich', 'mpich+foo', True)
+        check_unsatisfiable('mpich', 'mpich~foo', True)
+        check_unsatisfiable('mpich', 'mpich foo=1', True)
 
     def test_unsatisfiable_variant_mismatch(self):
         # No matchi in specs
-        self.check_unsatisfiable('mpich~foo', 'mpich+foo')
-        self.check_unsatisfiable('mpich+foo', 'mpich~foo')
-        self.check_unsatisfiable('mpich foo=1', 'mpich foo=2')
+        check_unsatisfiable('mpich~foo', 'mpich+foo')
+        check_unsatisfiable('mpich+foo', 'mpich~foo')
+        check_unsatisfiable('mpich foo=1', 'mpich foo=2')
 
     def test_satisfies_matching_compiler_flag(self):
-        self.check_satisfies('mpich cppflags="-O3"', 'mpich cppflags="-O3"')
-        self.check_satisfies('mpich cppflags="-O3 -Wall"',
-                             'mpich cppflags="-O3 -Wall"')
+        check_satisfies('mpich cppflags="-O3"', 'mpich cppflags="-O3"')
+        check_satisfies(
+            'mpich cppflags="-O3 -Wall"', 'mpich cppflags="-O3 -Wall"'
+        )
 
     def test_satisfies_unconstrained_compiler_flag(self):
         # only asked for mpich, no constraints.  Any will do.
-        self.check_satisfies('mpich cppflags="-O3"', 'mpich')
+        check_satisfies('mpich cppflags="-O3"', 'mpich')
 
     def test_unsatisfiable_compiler_flag(self):
         # This case is different depending on whether the specs are concrete.
 
         # 'mpich' is not concrete:
-        self.check_satisfies('mpich', 'mpich cppflags="-O3"', False)
+        check_satisfies('mpich', 'mpich cppflags="-O3"', False)
 
         # 'mpich' is concrete:
-        self.check_unsatisfiable('mpich', 'mpich cppflags="-O3"', True)
+        check_unsatisfiable('mpich', 'mpich cppflags="-O3"', True)
 
     def test_unsatisfiable_compiler_flag_mismatch(self):
         # No matchi in specs
-        self.check_unsatisfiable(
+        check_unsatisfiable(
             'mpich cppflags="-O3"', 'mpich cppflags="-O2"')
 
     def test_satisfies_virtual(self):
         # Don't use check_satisfies: it checks constrain() too, and
         # you can't constrain a non-virtual by a virtual.
-        self.assertTrue(Spec('mpich').satisfies(Spec('mpi')))
-        self.assertTrue(Spec('mpich2').satisfies(Spec('mpi')))
-        self.assertTrue(Spec('zmpi').satisfies(Spec('mpi')))
+        assert Spec('mpich').satisfies(Spec('mpi'))
+        assert Spec('mpich2').satisfies(Spec('mpi'))
+        assert Spec('zmpi').satisfies(Spec('mpi'))
 
     def test_satisfies_virtual_dep_with_virtual_constraint(self):
         """Ensure we can satisfy virtual constraints when there are multiple
            vdep providers in the specs."""
-        self.assertTrue(
-            Spec('netlib-lapack ^openblas').satisfies(
-                'netlib-lapack ^openblas'))
-        self.assertFalse(
-            Spec('netlib-lapack ^netlib-blas').satisfies(
-                'netlib-lapack ^openblas'))
-
-        self.assertFalse(
-            Spec('netlib-lapack ^openblas').satisfies(
-                'netlib-lapack ^netlib-blas'))
-        self.assertTrue(
-            Spec('netlib-lapack ^netlib-blas').satisfies(
-                'netlib-lapack ^netlib-blas'))
+        assert Spec('netlib-lapack ^openblas').satisfies(
+            'netlib-lapack ^openblas'
+        )
+        assert not Spec('netlib-lapack ^netlib-blas').satisfies(
+            'netlib-lapack ^openblas'
+        )
+        assert not Spec('netlib-lapack ^openblas').satisfies(
+            'netlib-lapack ^netlib-blas'
+        )
+        assert Spec('netlib-lapack ^netlib-blas').satisfies(
+            'netlib-lapack ^netlib-blas'
+        )
 
     def test_satisfies_same_spec_with_different_hash(self):
         """Ensure that concrete specs are matched *exactly* by hash."""
         s1 = Spec('mpileaks').concretized()
         s2 = s1.copy()
 
-        self.assertTrue(s1.satisfies(s2))
-        self.assertTrue(s2.satisfies(s1))
+        assert s1.satisfies(s2)
+        assert s2.satisfies(s1)
 
         # Simulate specs that were installed before and after a change to
         # Spack's hashing algorithm.  This just reverses s2's hash.
         s2._hash = s1.dag_hash()[-1::-1]
 
-        self.assertFalse(s1.satisfies(s2))
-        self.assertFalse(s2.satisfies(s1))
+        assert not s1.satisfies(s2)
+        assert not s2.satisfies(s1)
 
     # ========================================================================
     # Indexing specs
     # ========================================================================
     def test_self_index(self):
         s = Spec('callpath')
-        self.assertTrue(s['callpath'] == s)
+        assert s['callpath'] == s
 
     def test_dep_index(self):
         s = Spec('callpath')
         s.normalize()
 
-        self.assertTrue(s['callpath'] == s)
-        self.assertTrue(type(s['dyninst']) == Spec)
-        self.assertTrue(type(s['libdwarf']) == Spec)
-        self.assertTrue(type(s['libelf']) == Spec)
-        self.assertTrue(type(s['mpi']) == Spec)
+        assert s['callpath'] == s
+        assert type(s['dyninst']) == Spec
+        assert type(s['libdwarf']) == Spec
+        assert type(s['libelf']) == Spec
+        assert type(s['mpi']) == Spec
 
-        self.assertTrue(s['dyninst'].name  == 'dyninst')
-        self.assertTrue(s['libdwarf'].name == 'libdwarf')
-        self.assertTrue(s['libelf'].name   == 'libelf')
-        self.assertTrue(s['mpi'].name      == 'mpi')
+        assert s['dyninst'].name == 'dyninst'
+        assert s['libdwarf'].name == 'libdwarf'
+        assert s['libelf'].name == 'libelf'
+        assert s['mpi'].name == 'mpi'
 
     def test_spec_contains_deps(self):
         s = Spec('callpath')
         s.normalize()
-        self.assertTrue('dyninst' in s)
-        self.assertTrue('libdwarf' in s)
-        self.assertTrue('libelf' in s)
-        self.assertTrue('mpi' in s)
+        assert 'dyninst' in s
+        assert 'libdwarf' in s
+        assert 'libelf' in s
+        assert 'mpi' in s
 
+    @pytest.mark.usefixtures('config')
     def test_virtual_index(self):
         s = Spec('callpath')
         s.concretize()
@@ -370,133 +374,149 @@ def test_virtual_index(self):
         s_zmpi = Spec('callpath ^zmpi')
         s_zmpi.concretize()
 
-        self.assertTrue(s['mpi'].name != 'mpi')
-        self.assertTrue(s_mpich['mpi'].name == 'mpich')
-        self.assertTrue(s_mpich2['mpi'].name == 'mpich2')
-        self.assertTrue(s_zmpi['zmpi'].name == 'zmpi')
+        assert s['mpi'].name != 'mpi'
+        assert s_mpich['mpi'].name == 'mpich'
+        assert s_mpich2['mpi'].name == 'mpich2'
+        assert s_zmpi['zmpi'].name == 'zmpi'
 
         for spec in [s, s_mpich, s_mpich2, s_zmpi]:
-            self.assertTrue('mpi' in spec)
+            assert 'mpi' in spec
 
     # ========================================================================
     # Constraints
     # ========================================================================
     def test_constrain_variants(self):
-        self.check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3')
-        self.check_constrain('libelf@2.1:2.5%gcc@4.5:4.6',
-                             'libelf@0:2.5%gcc@2:4.6',
-                             'libelf@2.1:3%gcc@4.5:4.7')
-
-        self.check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo')
-        self.check_constrain('libelf+debug+foo',
-                             'libelf+debug', 'libelf+debug+foo')
-
-        self.check_constrain('libelf debug=2 foo=1',
-                             'libelf debug=2', 'libelf foo=1')
-        self.check_constrain('libelf debug=2 foo=1',
-                             'libelf debug=2', 'libelf debug=2 foo=1')
-
-        self.check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo')
-        self.check_constrain('libelf+debug~foo',
-                             'libelf+debug', 'libelf+debug~foo')
+        check_constrain('libelf@2.1:2.5', 'libelf@0:2.5', 'libelf@2.1:3')
+        check_constrain(
+            'libelf@2.1:2.5%gcc@4.5:4.6',
+            'libelf@0:2.5%gcc@2:4.6',
+            'libelf@2.1:3%gcc@4.5:4.7'
+        )
+        check_constrain('libelf+debug+foo', 'libelf+debug', 'libelf+foo')
+        check_constrain(
+            'libelf+debug+foo', 'libelf+debug', 'libelf+debug+foo'
+        )
+        check_constrain(
+            'libelf debug=2 foo=1', 'libelf debug=2', 'libelf foo=1'
+        )
+        check_constrain(
+            'libelf debug=2 foo=1', 'libelf debug=2', 'libelf debug=2 foo=1'
+        )
+
+        check_constrain('libelf+debug~foo', 'libelf+debug', 'libelf~foo')
+        check_constrain(
+            'libelf+debug~foo', 'libelf+debug', 'libelf+debug~foo'
+        )
 
     def test_constrain_compiler_flags(self):
-        self.check_constrain('libelf cflags="-O3" cppflags="-Wall"',
-                             'libelf cflags="-O3"', 'libelf cppflags="-Wall"')
-        self.check_constrain('libelf cflags="-O3" cppflags="-Wall"',
-                             'libelf cflags="-O3"',
-                             'libelf cflags="-O3" cppflags="-Wall"')
+        check_constrain(
+            'libelf cflags="-O3" cppflags="-Wall"',
+            'libelf cflags="-O3"',
+            'libelf cppflags="-Wall"'
+        )
+        check_constrain(
+            'libelf cflags="-O3" cppflags="-Wall"',
+            'libelf cflags="-O3"',
+            'libelf cflags="-O3" cppflags="-Wall"'
+        )
 
     def test_constrain_architecture(self):
-        self.check_constrain('libelf target=default_target os=default_os',
-                             'libelf target=default_target os=default_os',
-                             'libelf target=default_target os=default_os')
-        self.check_constrain('libelf target=default_target os=default_os',
-                             'libelf',
-                             'libelf target=default_target os=default_os')
+        check_constrain(
+            'libelf target=default_target os=default_os',
+            'libelf target=default_target os=default_os',
+            'libelf target=default_target os=default_os'
+        )
+        check_constrain(
+            'libelf target=default_target os=default_os',
+            'libelf',
+            'libelf target=default_target os=default_os'
+        )
 
     def test_constrain_compiler(self):
-        self.check_constrain('libelf %gcc@4.4.7',
-                             'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7')
-        self.check_constrain('libelf %gcc@4.4.7',
-                             'libelf', 'libelf %gcc@4.4.7')
+        check_constrain(
+            'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7', 'libelf %gcc@4.4.7'
+        )
+        check_constrain(
+            'libelf %gcc@4.4.7', 'libelf', 'libelf %gcc@4.4.7'
+        )
 
     def test_invalid_constraint(self):
-        self.check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3')
-        self.check_invalid_constraint(
+        check_invalid_constraint('libelf@0:2.0', 'libelf@2.1:3')
+        check_invalid_constraint(
             'libelf@0:2.5%gcc@4.8:4.9', 'libelf@2.1:3%gcc@4.5:4.7')
 
-        self.check_invalid_constraint('libelf+debug', 'libelf~debug')
-        self.check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo')
-        self.check_invalid_constraint('libelf debug=2', 'libelf debug=1')
+        check_invalid_constraint('libelf+debug', 'libelf~debug')
+        check_invalid_constraint('libelf+debug~foo', 'libelf+debug+foo')
+        check_invalid_constraint('libelf debug=2', 'libelf debug=1')
 
-        self.check_invalid_constraint(
+        check_invalid_constraint(
             'libelf cppflags="-O3"', 'libelf cppflags="-O2"')
-        self.check_invalid_constraint('libelf platform=test target=be os=be',
-                                      'libelf target=fe os=fe')
+        check_invalid_constraint(
+            'libelf platform=test target=be os=be', 'libelf target=fe os=fe'
+        )
 
     def test_constrain_changed(self):
-        self.check_constrain_changed('libelf', '@1.0')
-        self.check_constrain_changed('libelf', '@1.0:5.0')
-        self.check_constrain_changed('libelf', '%gcc')
-        self.check_constrain_changed('libelf%gcc', '%gcc@4.5')
-        self.check_constrain_changed('libelf', '+debug')
-        self.check_constrain_changed('libelf', '~debug')
-        self.check_constrain_changed('libelf', 'debug=2')
-        self.check_constrain_changed('libelf', 'cppflags="-O3"')
+        check_constrain_changed('libelf', '@1.0')
+        check_constrain_changed('libelf', '@1.0:5.0')
+        check_constrain_changed('libelf', '%gcc')
+        check_constrain_changed('libelf%gcc', '%gcc@4.5')
+        check_constrain_changed('libelf', '+debug')
+        check_constrain_changed('libelf', '~debug')
+        check_constrain_changed('libelf', 'debug=2')
+        check_constrain_changed('libelf', 'cppflags="-O3"')
 
         platform = spack.architecture.platform()
-        self.check_constrain_changed(
+        check_constrain_changed(
             'libelf', 'target=' + platform.target('default_target').name)
-        self.check_constrain_changed(
+        check_constrain_changed(
             'libelf', 'os=' + platform.operating_system('default_os').name)
 
     def test_constrain_not_changed(self):
-        self.check_constrain_not_changed('libelf', 'libelf')
-        self.check_constrain_not_changed('libelf@1.0', '@1.0')
-        self.check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0')
-        self.check_constrain_not_changed('libelf%gcc', '%gcc')
-        self.check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5')
-        self.check_constrain_not_changed('libelf+debug', '+debug')
-        self.check_constrain_not_changed('libelf~debug', '~debug')
-        self.check_constrain_not_changed('libelf debug=2', 'debug=2')
-        self.check_constrain_not_changed(
+        check_constrain_not_changed('libelf', 'libelf')
+        check_constrain_not_changed('libelf@1.0', '@1.0')
+        check_constrain_not_changed('libelf@1.0:5.0', '@1.0:5.0')
+        check_constrain_not_changed('libelf%gcc', '%gcc')
+        check_constrain_not_changed('libelf%gcc@4.5', '%gcc@4.5')
+        check_constrain_not_changed('libelf+debug', '+debug')
+        check_constrain_not_changed('libelf~debug', '~debug')
+        check_constrain_not_changed('libelf debug=2', 'debug=2')
+        check_constrain_not_changed(
             'libelf cppflags="-O3"', 'cppflags="-O3"')
 
         platform = spack.architecture.platform()
         default_target = platform.target('default_target').name
-        self.check_constrain_not_changed(
+        check_constrain_not_changed(
             'libelf target=' + default_target, 'target=' + default_target)
 
     def test_constrain_dependency_changed(self):
-        self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0')
-        self.check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0')
-        self.check_constrain_changed('libelf^foo', 'libelf^foo%gcc')
-        self.check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5')
-        self.check_constrain_changed('libelf^foo', 'libelf^foo+debug')
-        self.check_constrain_changed('libelf^foo', 'libelf^foo~debug')
+        check_constrain_changed('libelf^foo', 'libelf^foo@1.0')
+        check_constrain_changed('libelf^foo', 'libelf^foo@1.0:5.0')
+        check_constrain_changed('libelf^foo', 'libelf^foo%gcc')
+        check_constrain_changed('libelf^foo%gcc', 'libelf^foo%gcc@4.5')
+        check_constrain_changed('libelf^foo', 'libelf^foo+debug')
+        check_constrain_changed('libelf^foo', 'libelf^foo~debug')
 
         platform = spack.architecture.platform()
         default_target = platform.target('default_target').name
-        self.check_constrain_changed(
+        check_constrain_changed(
             'libelf^foo', 'libelf^foo target=' + default_target)
 
     def test_constrain_dependency_not_changed(self):
-        self.check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0')
-        self.check_constrain_not_changed(
+        check_constrain_not_changed('libelf^foo@1.0', 'libelf^foo@1.0')
+        check_constrain_not_changed(
             'libelf^foo@1.0:5.0', 'libelf^foo@1.0:5.0')
-        self.check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc')
-        self.check_constrain_not_changed(
+        check_constrain_not_changed('libelf^foo%gcc', 'libelf^foo%gcc')
+        check_constrain_not_changed(
             'libelf^foo%gcc@4.5', 'libelf^foo%gcc@4.5')
-        self.check_constrain_not_changed(
+        check_constrain_not_changed(
             'libelf^foo+debug', 'libelf^foo+debug')
-        self.check_constrain_not_changed(
+        check_constrain_not_changed(
             'libelf^foo~debug', 'libelf^foo~debug')
-        self.check_constrain_not_changed(
+        check_constrain_not_changed(
             'libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"')
 
         platform = spack.architecture.platform()
         default_target = platform.target('default_target').name
-        self.check_constrain_not_changed(
+        check_constrain_not_changed(
             'libelf^foo target=' + default_target,
             'libelf^foo target=' + default_target)
diff --git a/lib/spack/spack/test/spec_syntax.py b/lib/spack/spack/test/spec_syntax.py
index 1e072fe970..3cf094f25a 100644
--- a/lib/spack/spack/test/spec_syntax.py
+++ b/lib/spack/spack/test/spec_syntax.py
@@ -22,7 +22,7 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-import unittest
+import pytest
 
 import spack.spec as sp
 from spack.parse import Token
@@ -54,7 +54,7 @@
                Token(sp.ID, '8.1_1e')]
 
 
-class SpecSyntaxTest(unittest.TestCase):
+class TestSpecSyntax(object):
     # ========================================================================
     # Parse checks
     # ========================================================================
@@ -77,17 +77,22 @@ def check_parse(self, expected, spec=None, remove_arch=True):
         output = sp.parse(spec)
 
         parsed = (" ".join(str(spec) for spec in output))
-        self.assertEqual(expected, parsed)
+        assert expected == parsed
 
     def check_lex(self, tokens, spec):
         """Check that the provided spec parses to the provided token list."""
         lex_output = sp.SpecLexer().lex(spec)
         for tok, spec_tok in zip(tokens, lex_output):
             if tok.type == sp.ID:
-                self.assertEqual(tok, spec_tok)
+                assert tok == spec_tok
             else:
                 # Only check the type for non-identifiers.
-                self.assertEqual(tok.type, spec_tok.type)
+                assert tok.type == spec_tok.type
+
+    def _check_raises(self, exc_type, items):
+        for item in items:
+            with pytest.raises(exc_type):
+                self.check_parse(item)
 
     # ========================================================================
     # Parse checks
@@ -107,6 +112,10 @@ def test_dependencies_with_versions(self):
         self.check_parse("openmpi^hwloc@:1.4b7-rc3")
         self.check_parse("openmpi^hwloc@1.2e6:1.4b7-rc3")
 
+    @pytest.mark.xfail
+    def test_multiple_specs(self):
+        self.check_parse("mvapich emacs")
+
     def test_full_specs(self):
         self.check_parse(
             "mvapich_foo"
@@ -167,88 +176,53 @@ def test_canonicalize(self):
         self.check_parse("x^y", "x@: ^y@:")
 
     def test_parse_errors(self):
-        self.assertRaises(SpecParseError, self.check_parse, "x@@1.2")
-        self.assertRaises(SpecParseError, self.check_parse, "x ^y@@1.2")
-        self.assertRaises(SpecParseError, self.check_parse, "x@1.2::")
-        self.assertRaises(SpecParseError, self.check_parse, "x::")
+        errors = ['x@@1.2', 'x ^y@@1.2', 'x@1.2::', 'x::']
+        self._check_raises(SpecParseError, errors)
 
     def test_duplicate_variant(self):
-        self.assertRaises(DuplicateVariantError,
-                          self.check_parse, "x@1.2+debug+debug")
-        self.assertRaises(DuplicateVariantError,
-                          self.check_parse, "x ^y@1.2+debug debug=true")
-        self.assertRaises(DuplicateVariantError, self.check_parse,
-                          "x ^y@1.2 debug=false debug=true")
-        self.assertRaises(DuplicateVariantError,
-                          self.check_parse, "x ^y@1.2 debug=false~debug")
-
-    def test_duplicate_depdendence(self):
-        self.assertRaises(DuplicateDependencyError,
-                          self.check_parse, "x ^y ^y")
-
-    def test_duplicate_compiler(self):
-        self.assertRaises(DuplicateCompilerSpecError,
-                          self.check_parse, "x%intel%intel")
+        duplicates = [
+            'x@1.2+debug+debug',
+            'x ^y@1.2+debug debug=true',
+            'x ^y@1.2 debug=false debug=true',
+            'x ^y@1.2 debug=false~debug'
+        ]
+        self._check_raises(DuplicateVariantError, duplicates)
 
-        self.assertRaises(DuplicateCompilerSpecError,
-                          self.check_parse, "x%intel%gcc")
-        self.assertRaises(DuplicateCompilerSpecError,
-                          self.check_parse, "x%gcc%intel")
+    def test_duplicate_dependency(self):
+        self._check_raises(DuplicateDependencyError, ["x ^y ^y"])
 
-        self.assertRaises(DuplicateCompilerSpecError,
-                          self.check_parse, "x ^y%intel%intel")
-        self.assertRaises(DuplicateCompilerSpecError,
-                          self.check_parse, "x ^y%intel%gcc")
-        self.assertRaises(DuplicateCompilerSpecError,
-                          self.check_parse, "x ^y%gcc%intel")
+    def test_duplicate_compiler(self):
+        duplicates = [
+            "x%intel%intel",
+            "x%intel%gcc",
+            "x%gcc%intel",
+            "x ^y%intel%intel",
+            "x ^y%intel%gcc",
+            "x ^y%gcc%intel"
+        ]
+        self._check_raises(DuplicateCompilerSpecError, duplicates)
 
     def test_duplicate_architecture(self):
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64")
-
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le")
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x arch=linux-rhel7-ppc64le arch=linux-rhel7-x86_64")
-
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64")
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le")
+        duplicates = [
+            "x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64",
+            "x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le",
+            "x arch=linux-rhel7-ppc64le arch=linux-rhel7-x86_64",
+            "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-x86_64",
+            "y ^x arch=linux-rhel7-x86_64 arch=linux-rhel7-ppc64le"
+        ]
+        self._check_raises(DuplicateArchitectureError, duplicates)
 
     def test_duplicate_architecture_component(self):
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x os=fe os=fe")
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x os=fe os=be")
-
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x target=fe target=fe")
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x target=fe target=be")
-
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x platform=test platform=test")
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x platform=test platform=test")
-
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x os=fe platform=test target=fe os=fe")
-        self.assertRaises(
-            DuplicateArchitectureError, self.check_parse,
-            "x target=be platform=test os=be os=fe")
+        duplicates = [
+            "x os=fe os=fe",
+            "x os=fe os=be",
+            "x target=fe target=fe",
+            "x target=fe target=be",
+            "x platform=test platform=test",
+            "x os=fe platform=test target=fe os=fe",
+            "x target=be platform=test os=be os=fe"
+        ]
+        self._check_raises(DuplicateArchitectureError, duplicates)
 
     # ========================================================================
     # Lex checks
@@ -256,11 +230,13 @@ def test_duplicate_architecture_component(self):
     def test_ambiguous(self):
         # This first one is ambiguous because - can be in an identifier AND
         # indicate disabling an option.
-        self.assertRaises(
-            AssertionError, self.check_lex, complex_lex,
-            "mvapich_foo"
-            "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug-qt_4"
-            "^stackwalker@8.1_1e")
+        with pytest.raises(AssertionError):
+            self.check_lex(
+                complex_lex,
+                "mvapich_foo"
+                "^_openmpi@1.2:1.4,1.6%intel@12.1:12.6+debug-qt_4"
+                "^stackwalker@8.1_1e"
+            )
 
     # The following lexes are non-ambiguous (add a space before -qt_4)
     # and should all result in the tokens in complex_lex
diff --git a/lib/spack/spack/test/spec_yaml.py b/lib/spack/spack/test/spec_yaml.py
index 442c6e6e81..e913dc8412 100644
--- a/lib/spack/spack/test/spec_yaml.py
+++ b/lib/spack/spack/test/spec_yaml.py
@@ -27,155 +27,153 @@
 YAML format preserves DAG informatoin in the spec.
 
 """
-import spack.util.spack_yaml as syaml
 import spack.util.spack_json as sjson
+import spack.util.spack_yaml as syaml
+from spack.spec import Spec
 from spack.util.spack_yaml import syaml_dict
 
-from spack.spec import Spec
-from spack.test.mock_packages_test import *
 
+def check_yaml_round_trip(spec):
+    yaml_text = spec.to_yaml()
+    spec_from_yaml = Spec.from_yaml(yaml_text)
+    assert spec.eq_dag(spec_from_yaml)
 
-class SpecYamlTest(MockPackagesTest):
 
-    def check_yaml_round_trip(self, spec):
-        yaml_text = spec.to_yaml()
-        spec_from_yaml = Spec.from_yaml(yaml_text)
-        self.assertTrue(spec.eq_dag(spec_from_yaml))
+def test_simple_spec():
+    spec = Spec('mpileaks')
+    check_yaml_round_trip(spec)
 
-    def test_simple_spec(self):
-        spec = Spec('mpileaks')
-        self.check_yaml_round_trip(spec)
 
-    def test_normal_spec(self):
-        spec = Spec('mpileaks+debug~opt')
-        spec.normalize()
-        self.check_yaml_round_trip(spec)
+def test_normal_spec(builtin_mock):
+    spec = Spec('mpileaks+debug~opt')
+    spec.normalize()
+    check_yaml_round_trip(spec)
 
-    def test_ambiguous_version_spec(self):
-        spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt')
-        spec.normalize()
-        self.check_yaml_round_trip(spec)
 
-    def test_concrete_spec(self):
-        spec = Spec('mpileaks+debug~opt')
-        spec.concretize()
-        self.check_yaml_round_trip(spec)
+def test_ambiguous_version_spec(builtin_mock):
+    spec = Spec('mpileaks@1.0:5.0,6.1,7.3+debug~opt')
+    spec.normalize()
+    check_yaml_round_trip(spec)
+
+
+def test_concrete_spec(config, builtin_mock):
+    spec = Spec('mpileaks+debug~opt')
+    spec.concretize()
+    check_yaml_round_trip(spec)
+
+
+def test_yaml_subdag(config, builtin_mock):
+    spec = Spec('mpileaks^mpich+debug')
+    spec.concretize()
+    yaml_spec = Spec.from_yaml(spec.to_yaml())
 
-    def test_yaml_subdag(self):
-        spec = Spec('mpileaks^mpich+debug')
+    for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'):
+        assert spec[dep].eq_dag(yaml_spec[dep])
+
+
+def test_using_ordered_dict(builtin_mock):
+    """ Checks that dicts are ordered
+
+    Necessary to make sure that dag_hash is stable across python
+    versions and processes.
+    """
+    def descend_and_check(iterable, level=0):
+        from spack.util.spack_yaml import syaml_dict
+        from collections import Iterable, Mapping
+        if isinstance(iterable, Mapping):
+            assert isinstance(iterable, syaml_dict)
+            return descend_and_check(iterable.values(), level=level + 1)
+        max_level = level
+        for value in iterable:
+            if isinstance(value, Iterable) and not isinstance(value, str):
+                nlevel = descend_and_check(value, level=level + 1)
+                if nlevel > max_level:
+                    max_level = nlevel
+        return max_level
+
+    specs = ['mpileaks ^zmpi', 'dttop', 'dtuse']
+    for spec in specs:
+        dag = Spec(spec)
+        dag.normalize()
+        level = descend_and_check(dag.to_node_dict())
+        # level just makes sure we are doing something here
+        assert level >= 5
+
+
+def test_ordered_read_not_required_for_consistent_dag_hash(
+        config, builtin_mock
+):
+    """Make sure ordered serialization isn't required to preserve hashes.
+
+    For consistent hashes, we require that YAML and json documents
+    have their keys serialized in a deterministic order. However, we
+    don't want to require them to be serialized in order. This
+    ensures that is not required.
+    """
+    specs = ['mpileaks ^zmpi', 'dttop', 'dtuse']
+    for spec in specs:
+        spec = Spec(spec)
         spec.concretize()
-        yaml_spec = Spec.from_yaml(spec.to_yaml())
-
-        for dep in ('callpath', 'mpich', 'dyninst', 'libdwarf', 'libelf'):
-            self.assertTrue(spec[dep].eq_dag(yaml_spec[dep]))
-
-    def test_using_ordered_dict(self):
-        """ Checks that dicts are ordered
-
-            Necessary to make sure that dag_hash is stable across python
-            versions and processes.
-        """
-        def descend_and_check(iterable, level=0):
-            from spack.util.spack_yaml import syaml_dict
-            from collections import Iterable, Mapping
-            if isinstance(iterable, Mapping):
-                self.assertTrue(isinstance(iterable, syaml_dict))
-                return descend_and_check(iterable.values(), level=level + 1)
-            max_level = level
-            for value in iterable:
-                if isinstance(value, Iterable) and not isinstance(value, str):
-                    nlevel = descend_and_check(value, level=level + 1)
-                    if nlevel > max_level:
-                        max_level = nlevel
-            return max_level
-
-        specs = ['mpileaks ^zmpi', 'dttop', 'dtuse']
-        for spec in specs:
-            dag = Spec(spec)
-            dag.normalize()
-            level = descend_and_check(dag.to_node_dict())
-            # level just makes sure we are doing something here
-            self.assertTrue(level >= 5)
-
-    def test_ordered_read_not_required_for_consistent_dag_hash(self):
-        """Make sure ordered serialization isn't required to preserve hashes.
-
-        For consistent hashes, we require that YAML and json documents
-        have their keys serialized in a deterministic order. However, we
-        don't want to require them to be serialized in order. This
-        ensures that is not reauired.
-
-        """
-        specs = ['mpileaks ^zmpi', 'dttop', 'dtuse']
-        for spec in specs:
-            spec = Spec(spec)
-            spec.concretize()
-
-            #
-            # Dict & corresponding YAML & JSON from the original spec.
-            #
-            spec_dict = spec.to_dict()
-            spec_yaml = spec.to_yaml()
-            spec_json = spec.to_json()
-
-            #
-            # Make a spec with reversed OrderedDicts for every
-            # OrderedDict in the original.
-            #
-            reversed_spec_dict = reverse_all_dicts(spec.to_dict())
-
-            #
-            # Dump to YAML and JSON
-            #
-            yaml_string = syaml.dump(spec_dict, default_flow_style=False)
-            reversed_yaml_string = syaml.dump(reversed_spec_dict,
-                                              default_flow_style=False)
-            json_string = sjson.dump(spec_dict)
-            reversed_json_string = sjson.dump(reversed_spec_dict)
-
-            #
-            # Do many consistency checks
-            #
-
-            # spec yaml is ordered like the spec dict
-            self.assertEqual(yaml_string, spec_yaml)
-            self.assertEqual(json_string, spec_json)
-
-            # reversed string is different from the original, so it
-            # *would* generate a different hash
-            self.assertNotEqual(yaml_string, reversed_yaml_string)
-            self.assertNotEqual(json_string, reversed_json_string)
-
-            # build specs from the "wrongly" ordered data
-            round_trip_yaml_spec = Spec.from_yaml(yaml_string)
-            round_trip_json_spec = Spec.from_json(json_string)
-            round_trip_reversed_yaml_spec = Spec.from_yaml(
-                reversed_yaml_string)
-            round_trip_reversed_json_spec = Spec.from_yaml(
-                reversed_json_string)
-
-            # TODO: remove this when build deps are in provenance.
-            spec = spec.copy(deps=('link', 'run'))
-
-            # specs are equal to the original
-            self.assertEqual(spec, round_trip_yaml_spec)
-            self.assertEqual(spec, round_trip_json_spec)
-            self.assertEqual(spec, round_trip_reversed_yaml_spec)
-            self.assertEqual(spec, round_trip_reversed_json_spec)
-            self.assertEqual(round_trip_yaml_spec,
-                             round_trip_reversed_yaml_spec)
-            self.assertEqual(round_trip_json_spec,
-                             round_trip_reversed_json_spec)
-
-            # dag_hashes are equal
-            self.assertEqual(
-                spec.dag_hash(), round_trip_yaml_spec.dag_hash())
-            self.assertEqual(
-                spec.dag_hash(), round_trip_json_spec.dag_hash())
-            self.assertEqual(
-                spec.dag_hash(), round_trip_reversed_yaml_spec.dag_hash())
-            self.assertEqual(
-                spec.dag_hash(), round_trip_reversed_json_spec.dag_hash())
+
+        #
+        # Dict & corresponding YAML & JSON from the original spec.
+        #
+        spec_dict = spec.to_dict()
+        spec_yaml = spec.to_yaml()
+        spec_json = spec.to_json()
+
+        #
+        # Make a spec with reversed OrderedDicts for every
+        # OrderedDict in the original.
+        #
+        reversed_spec_dict = reverse_all_dicts(spec.to_dict())
+
+        #
+        # Dump to YAML and JSON
+        #
+        yaml_string = syaml.dump(spec_dict, default_flow_style=False)
+        reversed_yaml_string = syaml.dump(reversed_spec_dict,
+                                          default_flow_style=False)
+        json_string = sjson.dump(spec_dict)
+        reversed_json_string = sjson.dump(reversed_spec_dict)
+
+        #
+        # Do many consistency checks
+        #
+
+        # spec yaml is ordered like the spec dict
+        assert yaml_string == spec_yaml
+        assert json_string == spec_json
+
+        # reversed string is different from the original, so it
+        # *would* generate a different hash
+        assert yaml_string != reversed_yaml_string
+        assert json_string != reversed_json_string
+
+        # build specs from the "wrongly" ordered data
+        round_trip_yaml_spec = Spec.from_yaml(yaml_string)
+        round_trip_json_spec = Spec.from_json(json_string)
+        round_trip_reversed_yaml_spec = Spec.from_yaml(
+            reversed_yaml_string
+        )
+        round_trip_reversed_json_spec = Spec.from_yaml(
+            reversed_json_string
+        )
+
+        # TODO: remove this when build deps are in provenance.
+        spec = spec.copy(deps=('link', 'run'))
+        # specs are equal to the original
+        assert spec == round_trip_yaml_spec
+        assert spec == round_trip_json_spec
+        assert spec == round_trip_reversed_yaml_spec
+        assert spec == round_trip_reversed_json_spec
+        assert round_trip_yaml_spec == round_trip_reversed_yaml_spec
+        assert round_trip_json_spec == round_trip_reversed_json_spec
+        # dag_hashes are equal
+        assert spec.dag_hash() == round_trip_yaml_spec.dag_hash()
+        assert spec.dag_hash() == round_trip_json_spec.dag_hash()
+        assert spec.dag_hash() == round_trip_reversed_yaml_spec.dag_hash()
+        assert spec.dag_hash() == round_trip_reversed_json_spec.dag_hash()
 
 
 def reverse_all_dicts(data):
diff --git a/lib/spack/spack/test/stage.py b/lib/spack/spack/test/stage.py
index cfeb80dd35..5b4c46e0bf 100644
--- a/lib/spack/spack/test/stage.py
+++ b/lib/spack/spack/test/stage.py
@@ -22,355 +22,360 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-"""\
-Test that the Stage class works correctly.
-"""
+"""Test that the Stage class works correctly."""
+import collections
 import os
-import shutil
-import tempfile
-from contextlib import *
 
+import pytest
 import spack
 import spack.stage
-from llnl.util.filesystem import *
+import spack.util.executable
+from llnl.util.filesystem import join_path
 from spack.stage import Stage
-from spack.util.executable import which
-from spack.test.mock_packages_test import *
 
-_test_tmp_path = None
 
+def check_chdir_to_source(stage, stage_name):
+    stage_path = get_stage_path(stage, stage_name)
+    archive_dir = 'test-files'
+    assert join_path(
+        os.path.realpath(stage_path), archive_dir
+    ) == os.getcwd()
 
-@contextmanager
-def use_tmp(use_tmp):
-    """Allow some test code to be executed such that spack will either use or
-       not use temporary space for stages.
-    """
-    # mock up config
-    assert(_test_tmp_path is not None)
 
-    if use_tmp:
-        path = _test_tmp_path    # use temporary stage
+def check_expand_archive(stage, stage_name, mock_archive):
+    stage_path = get_stage_path(stage, stage_name)
+    archive_name = 'test-files.tar.gz'
+    archive_dir = 'test-files'
+    assert archive_name in os.listdir(stage_path)
+    assert archive_dir in os.listdir(stage_path)
+
+    assert join_path(stage_path, archive_dir) == stage.source_path
+
+    readme = join_path(stage_path, archive_dir, 'README.txt')
+    assert os.path.isfile(readme)
+    with open(readme) as file:
+        'hello world!\n' == file.read()
+
+
+def check_fetch(stage, stage_name):
+    archive_name = 'test-files.tar.gz'
+    stage_path = get_stage_path(stage, stage_name)
+    assert archive_name in os.listdir(stage_path)
+    assert join_path(stage_path, archive_name) == stage.fetcher.archive_file
+
+
+def check_chdir(stage, stage_name):
+    stage_path = get_stage_path(stage, stage_name)
+    assert os.path.realpath(stage_path) == os.getcwd()
+
+
+def check_destroy(stage, stage_name):
+    """Figure out whether a stage was destroyed correctly."""
+    stage_path = get_stage_path(stage, stage_name)
+
+    # check that the stage dir/link was removed.
+    assert not os.path.exists(stage_path)
+
+    # tmp stage needs to remove tmp dir too.
+    if spack.stage._use_tmp_stage:
+        target = os.path.realpath(stage_path)
+        assert not os.path.exists(target)
+
+
+def check_setup(stage, stage_name, archive):
+    """Figure out whether a stage was set up correctly."""
+    stage_path = get_stage_path(stage, stage_name)
+
+    # Ensure stage was created in the spack stage directory
+    assert os.path.isdir(stage_path)
+
+    if spack.stage.get_tmp_root():
+        # Check that the stage dir is really a symlink.
+        assert os.path.islink(stage_path)
+
+        # Make sure it points to a valid directory
+        target = os.path.realpath(stage_path)
+        assert os.path.isdir(target)
+        assert not os.path.islink(target)
+
+        # Make sure the directory is in the place we asked it to
+        # be (see setUp, tearDown, and use_tmp)
+        assert target.startswith(str(archive.test_tmp_dir))
+
     else:
-        path = spack.stage_path  # Use Spack's stage dir (no links)
+        # Make sure the stage path is NOT a link for a non-tmp stage
+        assert os.path.islink(stage_path)
 
-    spack.config.update_config(
-        'config', {'build_stage': [path]}, scope='user')
 
-    yield
+def get_stage_path(stage, stage_name):
+    """Figure out where a stage should be living. This depends on
+    whether it's named.
+    """
+    if stage_name is not None:
+        # If it is a named stage, we know where the stage should be
+        return join_path(spack.stage_path, stage_name)
+    else:
+        # If it's unnamed, ensure that we ran mkdtemp in the right spot.
+        assert stage.path is not None
+        assert stage.path.startswith(spack.stage_path)
+        return stage.path
 
 
-def fail_search_fn():
-    raise Exception("This should not have been called")
-
-
-class FailingFetchStrategy(spack.fetch_strategy.FetchStrategy):
-    def fetch(self):
-        raise spack.fetch_strategy.FailedDownloadError(
-            "<non-existent URL>",
-            "This implementation of FetchStrategy always fails")
-
-
-class MockSearchFunction(object):
-    def __init__(self):
-        self.performed_search = False
-
-    def __call__(self):
-        self.performed_search = True
-        return []
-
-
-class StageTest(MockPackagesTest):
-
-    def setUp(self):
-        """This sets up a mock archive to fetch, and a mock temp space for use
-           by the Stage class.  It doesn't actually create the Stage -- that
-           is done by individual tests.
-        """
-        super(StageTest, self).setUp()
-
-        global _test_tmp_path
-
-        #
-        # Mock up a stage area that looks like this:
-        #
-        # TMPDIR/                    test_files_dir
-        #     tmp/                   test_tmp_path (where stage should be)
-        #     test-files/            archive_dir_path
-        #         README.txt         test_readme (contains "hello world!\n")
-        #     test-files.tar.gz      archive_url = file:///path/to/this
-        #
-        self.test_files_dir = tempfile.mkdtemp()
-        self.test_tmp_path  = os.path.realpath(
-            os.path.join(self.test_files_dir, 'tmp'))
-        _test_tmp_path = self.test_tmp_path
-
-        # set _test_tmp_path as the default test directory to use for stages.
-        spack.config.update_config(
-            'config', {'build_stage': [_test_tmp_path]}, scope='user')
-
-        self.archive_dir = 'test-files'
-        self.archive_name = self.archive_dir + '.tar.gz'
-        archive_dir_path = os.path.join(self.test_files_dir,
-                                        self.archive_dir)
-        self.archive_url = 'file://' + os.path.join(self.test_files_dir,
-                                                    self.archive_name)
-        test_readme = join_path(archive_dir_path, 'README.txt')
-        self.readme_text = "hello world!\n"
-
-        self.stage_name = 'spack-test-stage'
-
-        mkdirp(archive_dir_path)
-        mkdirp(self.test_tmp_path)
-
-        with open(test_readme, 'w') as readme:
-            readme.write(self.readme_text)
-
-        with working_dir(self.test_files_dir):
-            tar = which('tar', required=True)
-            tar('czf', self.archive_name, self.archive_dir)
-
-        # Make spack use the test environment for tmp stuff.
-        self._old_tmp_root = spack.stage._tmp_root
-        self._old_use_tmp_stage = spack.stage._use_tmp_stage
-        spack.stage._tmp_root = None
-        spack.stage._use_tmp_stage = True
-
-        # record this since this test changes to directories that will
-        # be removed.
-        self.working_dir = os.getcwd()
-
-    def tearDown(self):
-        """Blows away the test environment directory."""
-        super(StageTest, self).tearDown()
-
-        shutil.rmtree(self.test_files_dir, ignore_errors=True)
-
-        # chdir back to original working dir
-        os.chdir(self.working_dir)
-
-        # restore spack's original tmp environment
-        spack.stage._tmp_root = self._old_tmp_root
-        spack.stage._use_tmp_stage = self._old_use_tmp_stage
-
-    def get_stage_path(self, stage, stage_name):
-        """Figure out where a stage should be living.  This depends on
-           whether it's named.
-        """
-        if stage_name is not None:
-            # If it is a named stage, we know where the stage should be
-            return join_path(spack.stage_path, stage_name)
-        else:
-            # If it's unnamed, ensure that we ran mkdtemp in the right spot.
-            self.assertTrue(stage.path is not None)
-            self.assertTrue(stage.path.startswith(spack.stage_path))
-            return stage.path
-
-    def check_setup(self, stage, stage_name):
-        """Figure out whether a stage was set up correctly."""
-        stage_path = self.get_stage_path(stage, stage_name)
-
-        # Ensure stage was created in the spack stage directory
-        self.assertTrue(os.path.isdir(stage_path))
-
-        if spack.stage.get_tmp_root():
-            # Check that the stage dir is really a symlink.
-            self.assertTrue(os.path.islink(stage_path))
-
-            # Make sure it points to a valid directory
-            target = os.path.realpath(stage_path)
-            self.assertTrue(os.path.isdir(target))
-            self.assertFalse(os.path.islink(target))
-
-            # Make sure the directory is in the place we asked it to
-            # be (see setUp, tearDown, and use_tmp)
-            self.assertTrue(target.startswith(self.test_tmp_path))
-
-        else:
-            # Make sure the stage path is NOT a link for a non-tmp stage
-            self.assertFalse(os.path.islink(stage_path))
-
-    def check_fetch(self, stage, stage_name):
-        stage_path = self.get_stage_path(stage, stage_name)
-        self.assertTrue(self.archive_name in os.listdir(stage_path))
-        self.assertEqual(join_path(stage_path, self.archive_name),
-                         stage.fetcher.archive_file)
-
-    def check_expand_archive(self, stage, stage_name):
-        stage_path = self.get_stage_path(stage, stage_name)
-        self.assertTrue(self.archive_name in os.listdir(stage_path))
-        self.assertTrue(self.archive_dir in os.listdir(stage_path))
-
-        self.assertEqual(
-            join_path(stage_path, self.archive_dir),
-            stage.source_path)
-
-        readme = join_path(stage_path, self.archive_dir, 'README.txt')
-        self.assertTrue(os.path.isfile(readme))
-
-        with open(readme) as file:
-            self.assertEqual(self.readme_text, file.read())
-
-    def check_chdir(self, stage, stage_name):
-        stage_path = self.get_stage_path(stage, stage_name)
-        self.assertEqual(os.path.realpath(stage_path), os.getcwd())
-
-    def check_chdir_to_source(self, stage, stage_name):
-        stage_path = self.get_stage_path(stage, stage_name)
-        self.assertEqual(
-            join_path(os.path.realpath(stage_path), self.archive_dir),
-            os.getcwd())
-
-    def check_destroy(self, stage, stage_name):
-        """Figure out whether a stage was destroyed correctly."""
-        stage_path = self.get_stage_path(stage, stage_name)
-
-        # check that the stage dir/link was removed.
-        self.assertFalse(os.path.exists(stage_path))
-
-        # tmp stage needs to remove tmp dir too.
-        if spack.stage._use_tmp_stage:
-            target = os.path.realpath(stage_path)
-            self.assertFalse(os.path.exists(target))
-
-    def test_setup_and_destroy_name_with_tmp(self):
-        with use_tmp(True):
-            with Stage(self.archive_url, name=self.stage_name) as stage:
-                self.check_setup(stage, self.stage_name)
-            self.check_destroy(stage, self.stage_name)
-
-    def test_setup_and_destroy_name_without_tmp(self):
-        with use_tmp(False):
-            with Stage(self.archive_url, name=self.stage_name) as stage:
-                self.check_setup(stage, self.stage_name)
-            self.check_destroy(stage, self.stage_name)
-
-    def test_setup_and_destroy_no_name_with_tmp(self):
-        with use_tmp(True):
-            with Stage(self.archive_url) as stage:
-                self.check_setup(stage, None)
-            self.check_destroy(stage, None)
-
-    def test_setup_and_destroy_no_name_without_tmp(self):
-        with use_tmp(False):
-            with Stage(self.archive_url) as stage:
-                self.check_setup(stage, None)
-            self.check_destroy(stage, None)
-
-    def test_chdir(self):
-        with Stage(self.archive_url, name=self.stage_name) as stage:
+@pytest.fixture()
+def tmpdir_for_stage(mock_archive):
+    """Uses a temporary directory for staging"""
+    current = spack.stage_path
+    spack.config.update_config(
+        'config',
+        {'build_stage': [str(mock_archive.test_tmp_dir)]},
+        scope='user'
+    )
+    yield
+    spack.config.update_config(
+        'config', {'build_stage': [current]}, scope='user'
+    )
+
+
+@pytest.fixture()
+def mock_archive(tmpdir, monkeypatch):
+    """Creates a mock archive with the structure expected by the tests"""
+    # Mock up a stage area that looks like this:
+    #
+    # TMPDIR/                    test_files_dir
+    #     tmp/                   test_tmp_path (where stage should be)
+    #     test-files/            archive_dir_path
+    #         README.txt         test_readme (contains "hello world!\n")
+    #     test-files.tar.gz      archive_url = file:///path/to/this
+    #
+    test_tmp_path = tmpdir.join('tmp')
+    # set _test_tmp_path as the default test directory to use for stages.
+    spack.config.update_config(
+        'config', {'build_stage': [str(test_tmp_path)]}, scope='user'
+    )
+
+    archive_dir = tmpdir.join('test-files')
+    archive_name = 'test-files.tar.gz'
+    archive = tmpdir.join(archive_name)
+    archive_url = 'file://' + str(archive)
+    test_readme = archive_dir.join('README.txt')
+    archive_dir.ensure(dir=True)
+    test_tmp_path.ensure(dir=True)
+    test_readme.write('hello world!\n')
+
+    current = tmpdir.chdir()
+    tar = spack.util.executable.which('tar', required=True)
+    tar('czf', str(archive_name), 'test-files')
+    current.chdir()
+
+    # Make spack use the test environment for tmp stuff.
+    monkeypatch.setattr(spack.stage, '_tmp_root', None)
+    monkeypatch.setattr(spack.stage, '_use_tmp_stage', True)
+
+    Archive = collections.namedtuple(
+        'Archive', ['url', 'tmpdir', 'test_tmp_dir', 'archive_dir']
+    )
+    yield Archive(
+        url=archive_url,
+        tmpdir=tmpdir,
+        test_tmp_dir=test_tmp_path,
+        archive_dir=archive_dir
+    )
+    # record this since this test changes to directories that will
+    # be removed.
+    current.chdir()
+
+
+@pytest.fixture()
+def failing_search_fn():
+    """Returns a search function that fails! Always!"""
+    def _mock():
+        raise Exception("This should not have been called")
+    return _mock
+
+
+@pytest.fixture()
+def failing_fetch_strategy():
+    """Returns a fetch strategy that fails."""
+    class FailingFetchStrategy(spack.fetch_strategy.FetchStrategy):
+        def fetch(self):
+            raise spack.fetch_strategy.FailedDownloadError(
+                "<non-existent URL>",
+                "This implementation of FetchStrategy always fails"
+            )
+    return FailingFetchStrategy()
+
+
+@pytest.fixture()
+def search_fn():
+    """Returns a search function that always succeeds."""
+    class _Mock(object):
+        performed_search = False
+
+        def __call__(self):
+            self.performed_search = True
+            return []
+
+    return _Mock()
+
+
+@pytest.mark.usefixtures('builtin_mock')
+class TestStage(object):
+
+    stage_name = 'spack-test-stage'
+
+    @pytest.mark.usefixtures('tmpdir_for_stage')
+    def test_setup_and_destroy_name_with_tmp(self, mock_archive):
+        with Stage(mock_archive.url, name=self.stage_name) as stage:
+            check_setup(stage, self.stage_name, mock_archive)
+        check_destroy(stage, self.stage_name)
+
+    def test_setup_and_destroy_name_without_tmp(self, mock_archive):
+        with Stage(mock_archive.url, name=self.stage_name) as stage:
+            check_setup(stage, self.stage_name, mock_archive)
+        check_destroy(stage, self.stage_name)
+
+    @pytest.mark.usefixtures('tmpdir_for_stage')
+    def test_setup_and_destroy_no_name_with_tmp(self, mock_archive):
+        with Stage(mock_archive.url) as stage:
+            check_setup(stage, None, mock_archive)
+        check_destroy(stage, None)
+
+    def test_setup_and_destroy_no_name_without_tmp(self, mock_archive):
+        with Stage(mock_archive.url) as stage:
+            check_setup(stage, None, mock_archive)
+        check_destroy(stage, None)
+
+    def test_chdir(self, mock_archive):
+        with Stage(mock_archive.url, name=self.stage_name) as stage:
             stage.chdir()
-            self.check_setup(stage, self.stage_name)
-            self.check_chdir(stage, self.stage_name)
-        self.check_destroy(stage, self.stage_name)
+            check_setup(stage, self.stage_name, mock_archive)
+            check_chdir(stage, self.stage_name)
+        check_destroy(stage, self.stage_name)
 
-    def test_fetch(self):
-        with Stage(self.archive_url, name=self.stage_name) as stage:
+    def test_fetch(self, mock_archive):
+        with Stage(mock_archive.url, name=self.stage_name) as stage:
             stage.fetch()
-            self.check_setup(stage, self.stage_name)
-            self.check_chdir(stage, self.stage_name)
-            self.check_fetch(stage, self.stage_name)
-        self.check_destroy(stage, self.stage_name)
-
-    def test_no_search_if_default_succeeds(self):
-        with Stage(self.archive_url, name=self.stage_name,
-                   search_fn=fail_search_fn) as stage:
+            check_setup(stage, self.stage_name, mock_archive)
+            check_chdir(stage, self.stage_name)
+            check_fetch(stage, self.stage_name)
+        check_destroy(stage, self.stage_name)
+
+    def test_no_search_if_default_succeeds(
+            self, mock_archive, failing_search_fn
+    ):
+        with Stage(
+                mock_archive.url,
+                name=self.stage_name,
+                search_fn=failing_search_fn
+        ) as stage:
             stage.fetch()
-        self.check_destroy(stage, self.stage_name)
-
-    def test_no_search_mirror_only(self):
-        with Stage(FailingFetchStrategy(), name=self.stage_name,
-                   search_fn=fail_search_fn) as stage:
+        check_destroy(stage, self.stage_name)
+
+    def test_no_search_mirror_only(
+            self, failing_fetch_strategy, failing_search_fn
+    ):
+        with Stage(
+                failing_fetch_strategy,
+                name=self.stage_name,
+                search_fn=failing_search_fn
+        ) as stage:
             try:
                 stage.fetch(mirror_only=True)
             except spack.fetch_strategy.FetchError:
                 pass
-        self.check_destroy(stage, self.stage_name)
-
-    def test_search_if_default_fails(self):
-        test_search = MockSearchFunction()
-        with Stage(FailingFetchStrategy(), name=self.stage_name,
-                   search_fn=test_search) as stage:
+        check_destroy(stage, self.stage_name)
+
+    def test_search_if_default_fails(self, failing_fetch_strategy, search_fn):
+        with Stage(
+                failing_fetch_strategy,
+                name=self.stage_name,
+                search_fn=search_fn
+        ) as stage:
             try:
                 stage.fetch(mirror_only=False)
             except spack.fetch_strategy.FetchError:
                 pass
-        self.check_destroy(stage, self.stage_name)
-        self.assertTrue(test_search.performed_search)
+        check_destroy(stage, self.stage_name)
+        assert search_fn.performed_search
 
-    def test_expand_archive(self):
-        with Stage(self.archive_url, name=self.stage_name) as stage:
+    def test_expand_archive(self, mock_archive):
+        with Stage(mock_archive.url, name=self.stage_name) as stage:
             stage.fetch()
-            self.check_setup(stage, self.stage_name)
-            self.check_fetch(stage, self.stage_name)
+            check_setup(stage, self.stage_name, mock_archive)
+            check_fetch(stage, self.stage_name)
             stage.expand_archive()
-            self.check_expand_archive(stage, self.stage_name)
-        self.check_destroy(stage, self.stage_name)
+            check_expand_archive(stage, self.stage_name, mock_archive)
+        check_destroy(stage, self.stage_name)
 
-    def test_expand_archive_with_chdir(self):
-        with Stage(self.archive_url, name=self.stage_name) as stage:
+    def test_expand_archive_with_chdir(self, mock_archive):
+        with Stage(mock_archive.url, name=self.stage_name) as stage:
             stage.fetch()
-            self.check_setup(stage, self.stage_name)
-            self.check_fetch(stage, self.stage_name)
+            check_setup(stage, self.stage_name, mock_archive)
+            check_fetch(stage, self.stage_name)
             stage.expand_archive()
             stage.chdir_to_source()
-            self.check_expand_archive(stage, self.stage_name)
-            self.check_chdir_to_source(stage, self.stage_name)
-        self.check_destroy(stage, self.stage_name)
+            check_expand_archive(stage, self.stage_name, mock_archive)
+            check_chdir_to_source(stage, self.stage_name)
+        check_destroy(stage, self.stage_name)
 
-    def test_restage(self):
-        with Stage(self.archive_url, name=self.stage_name) as stage:
+    def test_restage(self, mock_archive):
+        with Stage(mock_archive.url, name=self.stage_name) as stage:
             stage.fetch()
             stage.expand_archive()
             stage.chdir_to_source()
-            self.check_expand_archive(stage, self.stage_name)
-            self.check_chdir_to_source(stage, self.stage_name)
+            check_expand_archive(stage, self.stage_name, mock_archive)
+            check_chdir_to_source(stage, self.stage_name)
 
             # Try to make a file in the old archive dir
             with open('foobar', 'w') as file:
                 file.write("this file is to be destroyed.")
 
-            self.assertTrue('foobar' in os.listdir(stage.source_path))
+            assert 'foobar' in os.listdir(stage.source_path)
 
             # Make sure the file is not there after restage.
             stage.restage()
-            self.check_chdir(stage, self.stage_name)
-            self.check_fetch(stage, self.stage_name)
+            check_chdir(stage, self.stage_name)
+            check_fetch(stage, self.stage_name)
             stage.chdir_to_source()
-            self.check_chdir_to_source(stage, self.stage_name)
-            self.assertFalse('foobar' in os.listdir(stage.source_path))
-        self.check_destroy(stage, self.stage_name)
-
-    def test_no_keep_without_exceptions(self):
-        with Stage(self.archive_url,
-                   name=self.stage_name, keep=False) as stage:
+            check_chdir_to_source(stage, self.stage_name)
+            assert 'foobar' not in os.listdir(stage.source_path)
+        check_destroy(stage, self.stage_name)
+
+    def test_no_keep_without_exceptions(self, mock_archive):
+        with Stage(
+                mock_archive.url, name=self.stage_name, keep=False
+        ) as stage:
             pass
-        self.check_destroy(stage, self.stage_name)
+        check_destroy(stage, self.stage_name)
 
-    def test_keep_without_exceptions(self):
-        with Stage(self.archive_url,
-                   name=self.stage_name, keep=True) as stage:
+    def test_keep_without_exceptions(self, mock_archive):
+        with Stage(
+                mock_archive.url, name=self.stage_name, keep=True
+        ) as stage:
             pass
-        path = self.get_stage_path(stage, self.stage_name)
-        self.assertTrue(os.path.isdir(path))
+        path = get_stage_path(stage, self.stage_name)
+        assert os.path.isdir(path)
 
-    def test_no_keep_with_exceptions(self):
+    def test_no_keep_with_exceptions(self, mock_archive):
+        class ThisMustFailHere(Exception):
+            pass
         try:
-            with Stage(self.archive_url,
-                       name=self.stage_name, keep=False) as stage:
-                raise Exception()
-
-            path = self.get_stage_path(stage, self.stage_name)
-            self.assertTrue(os.path.isdir(path))
-        except:
-            pass  # ignore here.
-
-    def test_keep_exceptions(self):
+            with Stage(
+                    mock_archive.url, name=self.stage_name, keep=False
+            ) as stage:
+                raise ThisMustFailHere()
+        except ThisMustFailHere:
+            path = get_stage_path(stage, self.stage_name)
+            assert os.path.isdir(path)
+
+    def test_keep_exceptions(self, mock_archive):
+        class ThisMustFailHere(Exception):
+            pass
         try:
-            with Stage(self.archive_url,
-                       name=self.stage_name, keep=True) as stage:
-                raise Exception()
-
-            path = self.get_stage_path(stage, self.stage_name)
-            self.assertTrue(os.path.isdir(path))
-        except:
-            pass  # ignore here.
+            with Stage(
+                    mock_archive.url, name=self.stage_name, keep=True
+            ) as stage:
+                raise ThisMustFailHere()
+        except ThisMustFailHere:
+            path = get_stage_path(stage, self.stage_name)
+            assert os.path.isdir(path)
diff --git a/lib/spack/spack/test/svn_fetch.py b/lib/spack/spack/test/svn_fetch.py
index 01ffc488a7..962a150909 100644
--- a/lib/spack/spack/test/svn_fetch.py
+++ b/lib/spack/spack/test/svn_fetch.py
@@ -23,87 +23,62 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
 import os
-import re
-import spack
 
-from spack.test.mock_repo import svn, MockSvnRepo
-from spack.version import ver
-from spack.test.mock_packages_test import *
+import pytest
+import spack
 from llnl.util.filesystem import *
+from spack.spec import Spec
+from spack.version import ver
 
 
-class SvnFetchTest(MockPackagesTest):
-    """Tests fetching from a dummy git repository."""
-
-    def setUp(self):
-        """Create an svn repository with two revisions."""
-        super(SvnFetchTest, self).setUp()
-
-        self.repo = MockSvnRepo()
-
-        spec = Spec('svn-test')
-        spec.concretize()
-        self.pkg = spack.repo.get(spec, new=True)
-
-    def tearDown(self):
-        """Destroy the stage space used by this test."""
-        super(SvnFetchTest, self).tearDown()
-        self.repo.destroy()
-
-    def assert_rev(self, rev):
-        """Check that the current revision is equal to the supplied rev."""
-        def get_rev():
-            output = svn('info', output=str)
-            self.assertTrue("Revision" in output)
-            for line in output.split('\n'):
-                match = re.match(r'Revision: (\d+)', line)
-                if match:
-                    return match.group(1)
-        self.assertEqual(get_rev(), rev)
-
-    def try_fetch(self, rev, test_file, args):
-        """Tries to:
-
-        1. Fetch the repo using a fetch strategy constructed with
-           supplied args.
-        2. Check if the test_file is in the checked out repository.
-        3. Assert that the repository is at the revision supplied.
-        4. Add and remove some files, then reset the repo, and
-           ensure it's all there again.
-        """
-        self.pkg.versions[ver('svn')] = args
-
-        with self.pkg.stage:
-            self.pkg.do_stage()
-            self.assert_rev(rev)
-
-            file_path = join_path(self.pkg.stage.source_path, test_file)
-            self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
-            self.assertTrue(os.path.isfile(file_path))
-
-            os.unlink(file_path)
-            self.assertFalse(os.path.isfile(file_path))
-
-            untracked = 'foobarbaz'
-            touch(untracked)
-            self.assertTrue(os.path.isfile(untracked))
-            self.pkg.do_restage()
-            self.assertFalse(os.path.isfile(untracked))
-
-            self.assertTrue(os.path.isdir(self.pkg.stage.source_path))
-            self.assertTrue(os.path.isfile(file_path))
-
-            self.assert_rev(rev)
-
-    def test_fetch_default(self):
-        """Test a default checkout and make sure it's on rev 1"""
-        self.try_fetch(self.repo.r1, self.repo.r1_file, {
-            'svn': self.repo.url
-        })
-
-    def test_fetch_r1(self):
-        """Test fetching an older revision (0)."""
-        self.try_fetch(self.repo.r0, self.repo.r0_file, {
-            'svn': self.repo.url,
-            'revision': self.repo.r0
-        })
+@pytest.fixture(params=['default', 'rev0'])
+def type_of_test(request):
+    """Returns one of the test type available for the mock_hg_repository"""
+    return request.param
+
+
+def test_fetch(
+        type_of_test,
+        mock_svn_repository,
+        config,
+        refresh_builtin_mock
+):
+    """Tries to:
+
+    1. Fetch the repo using a fetch strategy constructed with
+       supplied args (they depend on type_of_test).
+    2. Check if the test_file is in the checked out repository.
+    3. Assert that the repository is at the revision supplied.
+    4. Add and remove some files, then reset the repo, and
+       ensure it's all there again.
+    """
+    # Retrieve the right test parameters
+    t = mock_svn_repository.checks[type_of_test]
+    h = mock_svn_repository.hash
+    # Construct the package under test
+    spec = Spec('hg-test')
+    spec.concretize()
+    pkg = spack.repo.get(spec, new=True)
+    pkg.versions[ver('hg')] = t.args
+    # Enter the stage directory and check some properties
+    with pkg.stage:
+        pkg.do_stage()
+        assert h() == t.revision
+
+        file_path = join_path(pkg.stage.source_path, t.file)
+        assert os.path.isdir(pkg.stage.source_path)
+        assert os.path.isfile(file_path)
+
+        os.unlink(file_path)
+        assert not os.path.isfile(file_path)
+
+        untracked_file = 'foobarbaz'
+        touch(untracked_file)
+        assert os.path.isfile(untracked_file)
+        pkg.do_restage()
+        assert not os.path.isfile(untracked_file)
+
+        assert os.path.isdir(pkg.stage.source_path)
+        assert os.path.isfile(file_path)
+
+        assert h() == t.revision
diff --git a/lib/spack/spack/test/tally_plugin.py b/lib/spack/spack/test/tally_plugin.py
deleted file mode 100644
index d848f2cb9f..0000000000
--- a/lib/spack/spack/test/tally_plugin.py
+++ /dev/null
@@ -1,64 +0,0 @@
-##############################################################################
-# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU Lesser General Public License (as
-# published by the Free Software Foundation) version 2.1, February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU Lesser General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public
-# License along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import os
-
-from nose.plugins import Plugin
-
-
-class Tally(Plugin):
-    name = 'tally'
-
-    def __init__(self):
-        super(Tally, self).__init__()
-        self.successCount = 0
-        self.failCount = 0
-        self.errorCount = 0
-        self.error_list = []
-        self.fail_list = []
-
-    @property
-    def numberOfTestsRun(self):
-        """Excludes skipped tests"""
-        return self.errorCount + self.failCount + self.successCount
-
-    def options(self, parser, env=os.environ):
-        super(Tally, self).options(parser, env=env)
-
-    def configure(self, options, conf):
-        super(Tally, self).configure(options, conf)
-
-    def addSuccess(self, test):
-        self.successCount += 1
-
-    def addError(self, test, err):
-        self.errorCount += 1
-        self.error_list.append(test)
-
-    def addFailure(self, test, err):
-        self.failCount += 1
-        self.fail_list.append(test)
-
-    def finalize(self, result):
-        pass
diff --git a/lib/spack/spack/test/url_extrapolate.py b/lib/spack/spack/test/url_extrapolate.py
index ca14dab958..5f5cf555ae 100644
--- a/lib/spack/spack/test/url_extrapolate.py
+++ b/lib/spack/spack/test/url_extrapolate.py
@@ -22,11 +22,12 @@
 # License along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-"""\
-Tests ability of spack to extrapolate URL versions from existing versions.
+"""Tests ability of spack to extrapolate URL versions from
+existing versions.
 """
+import unittest
+
 import spack.url as url
-from spack.test.mock_packages_test import *
 
 
 class UrlExtrapolateTest(unittest.TestCase):
diff --git a/pytest.ini b/pytest.ini
new file mode 100644
index 0000000000..0d8d2b271f
--- /dev/null
+++ b/pytest.ini
@@ -0,0 +1,5 @@
+# content of pytest.ini
+[pytest]
+addopts = --durations=20 -ra
+testpaths = lib/spack/spack/test
+python_files = *.py
\ No newline at end of file
diff --git a/share/spack/qa/changed_files b/share/spack/qa/changed_files
deleted file mode 100755
index c1fa55c053..0000000000
--- a/share/spack/qa/changed_files
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-# Description:
-#     Returns a list of changed files.
-#
-# Usage:
-#     changed_files [<directory> ...]
-#     changed_files [<file> ...]
-#     changed_files ["*.<extension>" ...]
-#
-# Options:
-#     Directories, files, or globs to search for changed files.
-#
-
-# Move to root directory of Spack
-# Allows script to be run from anywhere
-SPACK_ROOT="$(dirname "$0")/../../.."
-cd "$SPACK_ROOT"
-
-# Add changed files that have been committed since branching off of develop
-changed=($(git diff --name-only --diff-filter=ACMR develop... -- "$@"))
-# Add changed files that have been staged but not yet committed
-changed+=($(git diff --name-only --diff-filter=ACMR --cached -- "$@"))
-# Add changed files that are unstaged
-changed+=($(git diff --name-only --diff-filter=ACMR -- "$@"))
-# Add new files that are untracked
-changed+=($(git ls-files --exclude-standard --other -- "$@"))
-
-# Return array
-# Ensure that each file in the array is unique
-printf '%s\n' "${changed[@]}" | sort -u
diff --git a/share/spack/qa/run-unit-tests b/share/spack/qa/run-unit-tests
index 6da919e18d..0728614bc8 100755
--- a/share/spack/qa/run-unit-tests
+++ b/share/spack/qa/run-unit-tests
@@ -43,4 +43,9 @@ spack config get compilers
 spack install -v libdwarf
 
 # Run unit tests with code coverage
-coverage run bin/spack test "$@"
+if [[ "$TRAVIS_PYTHON_VERSION" == 2.7 ]]; then
+    coverage run bin/spack test "$@"
+    coverage combine
+else
+    spack test "$@"
+fi
-- 
GitLab