diff --git a/.gitignore b/.gitignore
index 1c6ca4c99e181cc853d9dd78e685a0a912b913cc..4b97de5d507070780e7773cd8672c4edf792c168 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,3 +8,4 @@
 /etc/spackconfig
 /share/spack/dotkit
 /share/spack/modules
+/TAGS
diff --git a/.mailmap b/.mailmap
index 1cc13c1eb14644d950e6a6c7497b286c039537b1..1b99da32b5be15fe44fda44b586830a4f6d471d7 100644
--- a/.mailmap
+++ b/.mailmap
@@ -9,3 +9,5 @@ Saravan Pantham    <saravan.pantham@gmail.com>  Saravan Pantham     <pantham1@su
 Tom Scogland       <tscogland@llnl.gov>         Tom Scogland        <scogland1@llnl.gov>
 Tom Scogland       <tscogland@llnl.gov>         Tom Scogland        <tom.scogland@gmail.com>
 Joachim Protze     <protze@rz.rwth-aachen.de>   jprotze             <protze@rz.rwth-aachen.de>
+Gregory L. Lee     <lee218@llnl.gov>            Gregory L. Lee      <lee218@surface86.llnl.gov>
+Gregory L. Lee     <lee218@llnl.gov>            Gregory Lee         <lee218@llnl.gov>
diff --git a/bin/spack b/bin/spack
index cd46cf6180b7e9a69f7d75845c73520add5e12e6..31165bba9d1ec7c9c8dd42e74e203f92761ee96f 100755
--- a/bin/spack
+++ b/bin/spack
@@ -41,6 +41,14 @@ sys.path.insert(0, SPACK_LIB_PATH)
 SPACK_EXTERNAL_LIBS = os.path.join(SPACK_LIB_PATH, "external")
 sys.path.insert(0, SPACK_EXTERNAL_LIBS)
 
+import warnings
+# Avoid warnings when nose is installed with the python exe being used to run
+# spack. Note this must be done after Spack's external libs directory is added
+# to sys.path.
+with warnings.catch_warnings():
+    warnings.filterwarnings("ignore", ".*nose was already imported")
+    import nose
+
 # Quick and dirty check to clean orphaned .pyc files left over from
 # previous revisions.  These files were present in earlier versions of
 # Spack, were removed, but shadow system modules that Spack still
@@ -54,7 +62,7 @@ for pyc_file in orphaned_pyc_files:
     try:
         os.remove(pyc_file)
     except OSError as e:
-        print "WARNING: Spack may fail mysteriously. Couldn't remove orphaned .pyc file: %s" % pyc
+        print "WARNING: Spack may fail mysteriously. Couldn't remove orphaned .pyc file: %s" % pyc_file
 
 # If there is no working directory, use the spack prefix.
 try:
@@ -132,8 +140,8 @@ def main():
 
     spack.spack_working_dir = working_dir
     if args.mock:
-        from spack.packages import PackageDB
-        spack.db = PackageDB(spack.mock_packages_path)
+        from spack.repository import RepoPath
+        spack.repo.swap(RepoPath(spack.mock_packages_path))
 
     # If the user asked for it, don't check ssl certs.
     if args.insecure:
diff --git a/etc/spack/repos.yaml b/etc/spack/repos.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2d4ff54ce6d2bf88b1200d3af727d448dacd0ec0
--- /dev/null
+++ b/etc/spack/repos.yaml
@@ -0,0 +1,8 @@
+# -------------------------------------------------------------------------
+# This is the default spack repository configuration.
+#
+# Changes to this file will affect all users of this spack install,
+# although users can override these settings in their ~/.spack/repos.yaml.
+# -------------------------------------------------------------------------
+repos:
+  - $spack/var/spack/repos/builtin
diff --git a/lib/spack/env/cc b/lib/spack/env/cc
index 0966277a91e122aab26bad92f221d477abe6aa2b..aacba996b390623b81db3a44e681406ffab71f63 100755
--- a/lib/spack/env/cc
+++ b/lib/spack/env/cc
@@ -94,11 +94,11 @@ case "$command" in
         command="$SPACK_CXX"
         language="C++"
         ;;
-    f90|fc|f95|gfortran|ifort|pgf90|xlf90)
+    f90|fc|f95|gfortran|ifort|pgf90|xlf90|nagfor)
         command="$SPACK_FC"
         language="Fortran 90"
         ;;
-    f77|gfortran|ifort|pgf77|xlf)
+    f77|gfortran|ifort|pgf77|xlf|nagfor)
         command="$SPACK_F77"
         language="Fortran 77"
         ;;
diff --git a/lib/spack/env/nag/nagfor b/lib/spack/env/nag/nagfor
new file mode 120000
index 0000000000000000000000000000000000000000..82c2b8e90a381ef733daa21645dc316ee5efe94d
--- /dev/null
+++ b/lib/spack/env/nag/nagfor
@@ -0,0 +1 @@
+../cc
\ No newline at end of file
diff --git a/lib/spack/external/argparse.py b/lib/spack/external/argparse.py
index 394e5da15245a75cf6ef76a7f7127dfa7b3ec85f..ec9a9ee738c98f779b5651d3c4d858dbe0dcdc43 100644
--- a/lib/spack/external/argparse.py
+++ b/lib/spack/external/argparse.py
@@ -1067,9 +1067,13 @@ class _SubParsersAction(Action):
 
     class _ChoicesPseudoAction(Action):
 
-        def __init__(self, name, help):
+        def __init__(self, name, aliases, help):
+            metavar = dest = name
+            if aliases:
+                metavar += ' (%s)' % ', '.join(aliases)
             sup = super(_SubParsersAction._ChoicesPseudoAction, self)
-            sup.__init__(option_strings=[], dest=name, help=help)
+            sup.__init__(option_strings=[], dest=dest, help=help,
+                         metavar=metavar)
 
     def __init__(self,
                  option_strings,
@@ -1097,15 +1101,22 @@ def add_parser(self, name, **kwargs):
         if kwargs.get('prog') is None:
             kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
 
+        aliases = kwargs.pop('aliases', ())
+
         # create a pseudo-action to hold the choice help
         if 'help' in kwargs:
             help = kwargs.pop('help')
-            choice_action = self._ChoicesPseudoAction(name, help)
+            choice_action = self._ChoicesPseudoAction(name, aliases, help)
             self._choices_actions.append(choice_action)
 
         # create the parser and add it to the map
         parser = self._parser_class(**kwargs)
         self._name_parser_map[name] = parser
+
+        # make parser available under aliases also
+        for alias in aliases:
+            self._name_parser_map[alias] = parser
+
         return parser
 
     def _get_subactions(self):
@@ -1123,8 +1134,9 @@ def __call__(self, parser, namespace, values, option_string=None):
         try:
             parser = self._name_parser_map[parser_name]
         except KeyError:
-            tup = parser_name, ', '.join(self._name_parser_map)
-            msg = _('unknown parser %r (choices: %s)' % tup)
+            args = {'parser_name': parser_name,
+                    'choices': ', '.join(self._name_parser_map)}
+            msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args
             raise ArgumentError(self, msg)
 
         # parse all the remaining options into the namespace
diff --git a/lib/spack/external/jsonschema/COPYING b/lib/spack/external/jsonschema/COPYING
new file mode 100644
index 0000000000000000000000000000000000000000..af9cfbdb134f42e5205ecbad597421d778826481
--- /dev/null
+++ b/lib/spack/external/jsonschema/COPYING
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Julian Berman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/lib/spack/external/jsonschema/README.rst b/lib/spack/external/jsonschema/README.rst
new file mode 100644
index 0000000000000000000000000000000000000000..20c2fe62667b027eae9ecfa8a207936d85e15546
--- /dev/null
+++ b/lib/spack/external/jsonschema/README.rst
@@ -0,0 +1,104 @@
+==========
+jsonschema
+==========
+
+``jsonschema`` is an implementation of `JSON Schema <http://json-schema.org>`_
+for Python (supporting 2.6+ including Python 3).
+
+.. code-block:: python
+
+    >>> from jsonschema import validate
+
+    >>> # A sample schema, like what we'd get from json.load()
+    >>> schema = {
+    ...     "type" : "object",
+    ...     "properties" : {
+    ...         "price" : {"type" : "number"},
+    ...         "name" : {"type" : "string"},
+    ...     },
+    ... }
+
+    >>> # If no exception is raised by validate(), the instance is valid.
+    >>> validate({"name" : "Eggs", "price" : 34.99}, schema)
+
+    >>> validate(
+    ...     {"name" : "Eggs", "price" : "Invalid"}, schema
+    ... )                                   # doctest: +IGNORE_EXCEPTION_DETAIL
+    Traceback (most recent call last):
+        ...
+    ValidationError: 'Invalid' is not of type 'number'
+
+
+Features
+--------
+
+* Full support for
+  `Draft 3 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft3Validator>`_
+  **and** `Draft 4 <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.Draft4Validator>`_
+  of the schema.
+
+* `Lazy validation <https://python-jsonschema.readthedocs.org/en/latest/validate/#jsonschema.IValidator.iter_errors>`_
+  that can iteratively report *all* validation errors.
+
+* Small and extensible
+
+* `Programmatic querying <https://python-jsonschema.readthedocs.org/en/latest/errors/#module-jsonschema>`_
+  of which properties or items failed validation.
+
+
+Release Notes
+-------------
+
+* A simple CLI was added for validation
+* Validation errors now keep full absolute paths and absolute schema paths in
+  their ``absolute_path`` and ``absolute_schema_path`` attributes. The ``path``
+  and ``schema_path`` attributes are deprecated in favor of ``relative_path``
+  and ``relative_schema_path``\ .
+
+*Note:* Support for Python 3.2 was dropped in this release, and installation
+now uses setuptools.
+
+
+Running the Test Suite
+----------------------
+
+``jsonschema`` uses the wonderful `Tox <http://tox.readthedocs.org>`_ for its
+test suite. (It really is wonderful, if for some reason you haven't heard of
+it, you really should use it for your projects).
+
+Assuming you have ``tox`` installed (perhaps via ``pip install tox`` or your
+package manager), just run ``tox`` in the directory of your source checkout to
+run ``jsonschema``'s test suite on all of the versions of Python ``jsonschema``
+supports. Note that you'll need to have all of those versions installed in
+order to run the tests on each of them, otherwise ``tox`` will skip (and fail)
+the tests on that version.
+
+Of course you're also free to just run the tests on a single version with your
+favorite test runner. The tests live in the ``jsonschema.tests`` package.
+
+
+Community
+---------
+
+There's a `mailing list <https://groups.google.com/forum/#!forum/jsonschema>`_
+for this implementation on Google Groups.
+
+Please join, and feel free to send questions there.
+
+
+Contributing
+------------
+
+I'm Julian Berman.
+
+``jsonschema`` is on `GitHub <http://github.com/Julian/jsonschema>`_.
+
+Get in touch, via GitHub or otherwise, if you've got something to contribute,
+it'd be most welcome!
+
+You can also generally find me on Freenode (nick: ``tos9``) in various
+channels, including ``#python``.
+
+If you feel overwhelmingly grateful, you can woo me with beer money on
+`Gittip <https://www.gittip.com/Julian/>`_ or via Google Wallet with the email
+in my GitHub profile.
diff --git a/lib/spack/external/jsonschema/__init__.py b/lib/spack/external/jsonschema/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c099f1d8bf25418b5263146f0a4a785bd37ef34
--- /dev/null
+++ b/lib/spack/external/jsonschema/__init__.py
@@ -0,0 +1,26 @@
+"""
+An implementation of JSON Schema for Python
+
+The main functionality is provided by the validator classes for each of the
+supported JSON Schema versions.
+
+Most commonly, :func:`validate` is the quickest way to simply validate a given
+instance under a schema, and will create a validator for you.
+
+"""
+
+from jsonschema.exceptions import (
+    ErrorTree, FormatError, RefResolutionError, SchemaError, ValidationError
+)
+from jsonschema._format import (
+    FormatChecker, draft3_format_checker, draft4_format_checker,
+)
+from jsonschema.validators import (
+    Draft3Validator, Draft4Validator, RefResolver, validate
+)
+
+
+__version__ = "2.4.0"
+
+
+# flake8: noqa
diff --git a/lib/spack/external/jsonschema/__main__.py b/lib/spack/external/jsonschema/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..82c29fd39e7b68fa8893d69f70b73476750694f1
--- /dev/null
+++ b/lib/spack/external/jsonschema/__main__.py
@@ -0,0 +1,2 @@
+from jsonschema.cli import main
+main()
diff --git a/lib/spack/external/jsonschema/_format.py b/lib/spack/external/jsonschema/_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb52d183ad6b27404de44ab6531e1cf1f6e9836b
--- /dev/null
+++ b/lib/spack/external/jsonschema/_format.py
@@ -0,0 +1,240 @@
+import datetime
+import re
+import socket
+
+from jsonschema.compat import str_types
+from jsonschema.exceptions import FormatError
+
+
+class FormatChecker(object):
+    """
+    A ``format`` property checker.
+
+    JSON Schema does not mandate that the ``format`` property actually do any
+    validation. If validation is desired however, instances of this class can
+    be hooked into validators to enable format validation.
+
+    :class:`FormatChecker` objects always return ``True`` when asked about
+    formats that they do not know how to validate.
+
+    To check a custom format using a function that takes an instance and
+    returns a ``bool``, use the :meth:`FormatChecker.checks` or
+    :meth:`FormatChecker.cls_checks` decorators.
+
+    :argument iterable formats: the known formats to validate. This argument
+                                can be used to limit which formats will be used
+                                during validation.
+
+    """
+
+    checkers = {}
+
+    def __init__(self, formats=None):
+        if formats is None:
+            self.checkers = self.checkers.copy()
+        else:
+            self.checkers = dict((k, self.checkers[k]) for k in formats)
+
+    def checks(self, format, raises=()):
+        """
+        Register a decorated function as validating a new format.
+
+        :argument str format: the format that the decorated function will check
+        :argument Exception raises: the exception(s) raised by the decorated
+            function when an invalid instance is found. The exception object
+            will be accessible as the :attr:`ValidationError.cause` attribute
+            of the resulting validation error.
+
+        """
+
+        def _checks(func):
+            self.checkers[format] = (func, raises)
+            return func
+        return _checks
+
+    cls_checks = classmethod(checks)
+
+    def check(self, instance, format):
+        """
+        Check whether the instance conforms to the given format.
+
+        :argument instance: the instance to check
+        :type: any primitive type (str, number, bool)
+        :argument str format: the format that instance should conform to
+        :raises: :exc:`FormatError` if instance does not conform to format
+
+        """
+
+        if format not in self.checkers:
+            return
+
+        func, raises = self.checkers[format]
+        result, cause = None, None
+        try:
+            result = func(instance)
+        except raises as e:
+            cause = e
+        if not result:
+            raise FormatError(
+                "%r is not a %r" % (instance, format), cause=cause,
+            )
+
+    def conforms(self, instance, format):
+        """
+        Check whether the instance conforms to the given format.
+
+        :argument instance: the instance to check
+        :type: any primitive type (str, number, bool)
+        :argument str format: the format that instance should conform to
+        :rtype: bool
+
+        """
+
+        try:
+            self.check(instance, format)
+        except FormatError:
+            return False
+        else:
+            return True
+
+
+_draft_checkers = {"draft3": [], "draft4": []}
+
+
+def _checks_drafts(both=None, draft3=None, draft4=None, raises=()):
+    draft3 = draft3 or both
+    draft4 = draft4 or both
+
+    def wrap(func):
+        if draft3:
+            _draft_checkers["draft3"].append(draft3)
+            func = FormatChecker.cls_checks(draft3, raises)(func)
+        if draft4:
+            _draft_checkers["draft4"].append(draft4)
+            func = FormatChecker.cls_checks(draft4, raises)(func)
+        return func
+    return wrap
+
+
+@_checks_drafts("email")
+def is_email(instance):
+    if not isinstance(instance, str_types):
+        return True
+    return "@" in instance
+
+
+_ipv4_re = re.compile(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
+
+@_checks_drafts(draft3="ip-address", draft4="ipv4")
+def is_ipv4(instance):
+    if not isinstance(instance, str_types):
+        return True
+    if not _ipv4_re.match(instance):
+        return False
+    return all(0 <= int(component) <= 255 for component in instance.split("."))
+
+
+if hasattr(socket, "inet_pton"):
+    @_checks_drafts("ipv6", raises=socket.error)
+    def is_ipv6(instance):
+        if not isinstance(instance, str_types):
+            return True
+        return socket.inet_pton(socket.AF_INET6, instance)
+
+
+_host_name_re = re.compile(r"^[A-Za-z0-9][A-Za-z0-9\.\-]{1,255}$")
+
+@_checks_drafts(draft3="host-name", draft4="hostname")
+def is_host_name(instance):
+    if not isinstance(instance, str_types):
+        return True
+    if not _host_name_re.match(instance):
+        return False
+    components = instance.split(".")
+    for component in components:
+        if len(component) > 63:
+            return False
+    return True
+
+
+try:
+    import rfc3987
+except ImportError:
+    pass
+else:
+    @_checks_drafts("uri", raises=ValueError)
+    def is_uri(instance):
+        if not isinstance(instance, str_types):
+            return True
+        return rfc3987.parse(instance, rule="URI")
+
+
+try:
+    import strict_rfc3339
+except ImportError:
+    try:
+        import isodate
+    except ImportError:
+        pass
+    else:
+        @_checks_drafts("date-time", raises=(ValueError, isodate.ISO8601Error))
+        def is_date(instance):
+            if not isinstance(instance, str_types):
+                return True
+            return isodate.parse_datetime(instance)
+else:
+        @_checks_drafts("date-time")
+        def is_date(instance):
+            if not isinstance(instance, str_types):
+                return True
+            return strict_rfc3339.validate_rfc3339(instance)
+
+
+@_checks_drafts("regex", raises=re.error)
+def is_regex(instance):
+    if not isinstance(instance, str_types):
+        return True
+    return re.compile(instance)
+
+
+@_checks_drafts(draft3="date", raises=ValueError)
+def is_date(instance):
+    if not isinstance(instance, str_types):
+        return True
+    return datetime.datetime.strptime(instance, "%Y-%m-%d")
+
+
+@_checks_drafts(draft3="time", raises=ValueError)
+def is_time(instance):
+    if not isinstance(instance, str_types):
+        return True
+    return datetime.datetime.strptime(instance, "%H:%M:%S")
+
+
+try:
+    import webcolors
+except ImportError:
+    pass
+else:
+    def is_css_color_code(instance):
+        return webcolors.normalize_hex(instance)
+
+
+    @_checks_drafts(draft3="color", raises=(ValueError, TypeError))
+    def is_css21_color(instance):
+        if (
+            not isinstance(instance, str_types) or
+            instance.lower() in webcolors.css21_names_to_hex
+        ):
+            return True
+        return is_css_color_code(instance)
+
+
+    def is_css3_color(instance):
+        if instance.lower() in webcolors.css3_names_to_hex:
+            return True
+        return is_css_color_code(instance)
+
+
+draft3_format_checker = FormatChecker(_draft_checkers["draft3"])
+draft4_format_checker = FormatChecker(_draft_checkers["draft4"])
diff --git a/lib/spack/external/jsonschema/_reflect.py b/lib/spack/external/jsonschema/_reflect.py
new file mode 100644
index 0000000000000000000000000000000000000000..d09e38fbdcf6b24ddc7a8a0fa9cfb30d01807467
--- /dev/null
+++ b/lib/spack/external/jsonschema/_reflect.py
@@ -0,0 +1,155 @@
+# -*- test-case-name: twisted.test.test_reflect -*-
+# Copyright (c) Twisted Matrix Laboratories.
+# See LICENSE for details.
+
+"""
+Standardized versions of various cool and/or strange things that you can do
+with Python's reflection capabilities.
+"""
+
+import sys
+
+from jsonschema.compat import PY3
+
+
+class _NoModuleFound(Exception):
+    """
+    No module was found because none exists.
+    """
+
+
+
+class InvalidName(ValueError):
+    """
+    The given name is not a dot-separated list of Python objects.
+    """
+
+
+
+class ModuleNotFound(InvalidName):
+    """
+    The module associated with the given name doesn't exist and it can't be
+    imported.
+    """
+
+
+
+class ObjectNotFound(InvalidName):
+    """
+    The object associated with the given name doesn't exist and it can't be
+    imported.
+    """
+
+
+
+if PY3:
+    def reraise(exception, traceback):
+        raise exception.with_traceback(traceback)
+else:
+    exec("""def reraise(exception, traceback):
+        raise exception.__class__, exception, traceback""")
+
+reraise.__doc__ = """
+Re-raise an exception, with an optional traceback, in a way that is compatible
+with both Python 2 and Python 3.
+
+Note that on Python 3, re-raised exceptions will be mutated, with their
+C{__traceback__} attribute being set.
+
+@param exception: The exception instance.
+@param traceback: The traceback to use, or C{None} indicating a new traceback.
+"""
+
+
+def _importAndCheckStack(importName):
+    """
+    Import the given name as a module, then walk the stack to determine whether
+    the failure was the module not existing, or some code in the module (for
+    example a dependent import) failing.  This can be helpful to determine
+    whether any actual application code was run.  For example, to distiguish
+    administrative error (entering the wrong module name), from programmer
+    error (writing buggy code in a module that fails to import).
+
+    @param importName: The name of the module to import.
+    @type importName: C{str}
+    @raise Exception: if something bad happens.  This can be any type of
+        exception, since nobody knows what loading some arbitrary code might
+        do.
+    @raise _NoModuleFound: if no module was found.
+    """
+    try:
+        return __import__(importName)
+    except ImportError:
+        excType, excValue, excTraceback = sys.exc_info()
+        while excTraceback:
+            execName = excTraceback.tb_frame.f_globals["__name__"]
+            # in Python 2 execName is None when an ImportError is encountered,
+            # where in Python 3 execName is equal to the importName.
+            if execName is None or execName == importName:
+                reraise(excValue, excTraceback)
+            excTraceback = excTraceback.tb_next
+        raise _NoModuleFound()
+
+
+
+def namedAny(name):
+    """
+    Retrieve a Python object by its fully qualified name from the global Python
+    module namespace.  The first part of the name, that describes a module,
+    will be discovered and imported.  Each subsequent part of the name is
+    treated as the name of an attribute of the object specified by all of the
+    name which came before it.  For example, the fully-qualified name of this
+    object is 'twisted.python.reflect.namedAny'.
+
+    @type name: L{str}
+    @param name: The name of the object to return.
+
+    @raise InvalidName: If the name is an empty string, starts or ends with
+        a '.', or is otherwise syntactically incorrect.
+
+    @raise ModuleNotFound: If the name is syntactically correct but the
+        module it specifies cannot be imported because it does not appear to
+        exist.
+
+    @raise ObjectNotFound: If the name is syntactically correct, includes at
+        least one '.', but the module it specifies cannot be imported because
+        it does not appear to exist.
+
+    @raise AttributeError: If an attribute of an object along the way cannot be
+        accessed, or a module along the way is not found.
+
+    @return: the Python object identified by 'name'.
+    """
+    if not name:
+        raise InvalidName('Empty module name')
+
+    names = name.split('.')
+
+    # if the name starts or ends with a '.' or contains '..', the __import__
+    # will raise an 'Empty module name' error. This will provide a better error
+    # message.
+    if '' in names:
+        raise InvalidName(
+            "name must be a string giving a '.'-separated list of Python "
+            "identifiers, not %r" % (name,))
+
+    topLevelPackage = None
+    moduleNames = names[:]
+    while not topLevelPackage:
+        if moduleNames:
+            trialname = '.'.join(moduleNames)
+            try:
+                topLevelPackage = _importAndCheckStack(trialname)
+            except _NoModuleFound:
+                moduleNames.pop()
+        else:
+            if len(names) == 1:
+                raise ModuleNotFound("No module named %r" % (name,))
+            else:
+                raise ObjectNotFound('%r does not name an object' % (name,))
+
+    obj = topLevelPackage
+    for n in names[1:]:
+        obj = getattr(obj, n)
+
+    return obj
diff --git a/lib/spack/external/jsonschema/_utils.py b/lib/spack/external/jsonschema/_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..2262f3305db0c3ea6ce6cd92070d422aefc80142
--- /dev/null
+++ b/lib/spack/external/jsonschema/_utils.py
@@ -0,0 +1,213 @@
+import itertools
+import json
+import pkgutil
+import re
+
+from jsonschema.compat import str_types, MutableMapping, urlsplit
+
+
+class URIDict(MutableMapping):
+    """
+    Dictionary which uses normalized URIs as keys.
+
+    """
+
+    def normalize(self, uri):
+        return urlsplit(uri).geturl()
+
+    def __init__(self, *args, **kwargs):
+        self.store = dict()
+        self.store.update(*args, **kwargs)
+
+    def __getitem__(self, uri):
+        return self.store[self.normalize(uri)]
+
+    def __setitem__(self, uri, value):
+        self.store[self.normalize(uri)] = value
+
+    def __delitem__(self, uri):
+        del self.store[self.normalize(uri)]
+
+    def __iter__(self):
+        return iter(self.store)
+
+    def __len__(self):
+        return len(self.store)
+
+    def __repr__(self):
+        return repr(self.store)
+
+
+class Unset(object):
+    """
+    An as-of-yet unset attribute or unprovided default parameter.
+
+    """
+
+    def __repr__(self):
+        return "<unset>"
+
+
+def load_schema(name):
+    """
+    Load a schema from ./schemas/``name``.json and return it.
+
+    """
+
+    data = pkgutil.get_data(__package__, "schemas/{0}.json".format(name))
+    return json.loads(data.decode("utf-8"))
+
+
+def indent(string, times=1):
+    """
+    A dumb version of :func:`textwrap.indent` from Python 3.3.
+
+    """
+
+    return "\n".join(" " * (4 * times) + line for line in string.splitlines())
+
+
+def format_as_index(indices):
+    """
+    Construct a single string containing indexing operations for the indices.
+
+    For example, [1, 2, "foo"] -> [1][2]["foo"]
+
+    :type indices: sequence
+
+    """
+
+    if not indices:
+        return ""
+    return "[%s]" % "][".join(repr(index) for index in indices)
+
+
+def find_additional_properties(instance, schema):
+    """
+    Return the set of additional properties for the given ``instance``.
+
+    Weeds out properties that should have been validated by ``properties`` and
+    / or ``patternProperties``.
+
+    Assumes ``instance`` is dict-like already.
+
+    """
+
+    properties = schema.get("properties", {})
+    patterns = "|".join(schema.get("patternProperties", {}))
+    for property in instance:
+        if property not in properties:
+            if patterns and re.search(patterns, property):
+                continue
+            yield property
+
+
+def extras_msg(extras):
+    """
+    Create an error message for extra items or properties.
+
+    """
+
+    if len(extras) == 1:
+        verb = "was"
+    else:
+        verb = "were"
+    return ", ".join(repr(extra) for extra in extras), verb
+
+
+def types_msg(instance, types):
+    """
+    Create an error message for a failure to match the given types.
+
+    If the ``instance`` is an object and contains a ``name`` property, it will
+    be considered to be a description of that object and used as its type.
+
+    Otherwise the message is simply the reprs of the given ``types``.
+
+    """
+
+    reprs = []
+    for type in types:
+        try:
+            reprs.append(repr(type["name"]))
+        except Exception:
+            reprs.append(repr(type))
+    return "%r is not of type %s" % (instance, ", ".join(reprs))
+
+
+def flatten(suitable_for_isinstance):
+    """
+    isinstance() can accept a bunch of really annoying different types:
+        * a single type
+        * a tuple of types
+        * an arbitrary nested tree of tuples
+
+    Return a flattened tuple of the given argument.
+
+    """
+
+    types = set()
+
+    if not isinstance(suitable_for_isinstance, tuple):
+        suitable_for_isinstance = (suitable_for_isinstance,)
+    for thing in suitable_for_isinstance:
+        if isinstance(thing, tuple):
+            types.update(flatten(thing))
+        else:
+            types.add(thing)
+    return tuple(types)
+
+
+def ensure_list(thing):
+    """
+    Wrap ``thing`` in a list if it's a single str.
+
+    Otherwise, return it unchanged.
+
+    """
+
+    if isinstance(thing, str_types):
+        return [thing]
+    return thing
+
+
+def unbool(element, true=object(), false=object()):
+    """
+    A hack to make True and 1 and False and 0 unique for ``uniq``.
+
+    """
+
+    if element is True:
+        return true
+    elif element is False:
+        return false
+    return element
+
+
+def uniq(container):
+    """
+    Check if all of a container's elements are unique.
+
+    Successively tries first to rely that the elements are hashable, then
+    falls back on them being sortable, and finally falls back on brute
+    force.
+
+    """
+
+    try:
+        return len(set(unbool(i) for i in container)) == len(container)
+    except TypeError:
+        try:
+            sort = sorted(unbool(i) for i in container)
+            sliced = itertools.islice(sort, 1, None)
+            for i, j in zip(sort, sliced):
+                if i == j:
+                    return False
+        except (NotImplementedError, TypeError):
+            seen = []
+            for e in container:
+                e = unbool(e)
+                if e in seen:
+                    return False
+                seen.append(e)
+    return True
diff --git a/lib/spack/external/jsonschema/_validators.py b/lib/spack/external/jsonschema/_validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..c6e801ccb23e761b255556cb6da0194960e836c5
--- /dev/null
+++ b/lib/spack/external/jsonschema/_validators.py
@@ -0,0 +1,358 @@
+import re
+
+from jsonschema import _utils
+from jsonschema.exceptions import FormatError, ValidationError
+from jsonschema.compat import iteritems
+
+
+FLOAT_TOLERANCE = 10 ** -15
+
+
+def patternProperties(validator, patternProperties, instance, schema):
+    if not validator.is_type(instance, "object"):
+        return
+
+    for pattern, subschema in iteritems(patternProperties):
+        for k, v in iteritems(instance):
+            if re.search(pattern, k):
+                for error in validator.descend(
+                    v, subschema, path=k, schema_path=pattern,
+                ):
+                    yield error
+
+
+def additionalProperties(validator, aP, instance, schema):
+    if not validator.is_type(instance, "object"):
+        return
+
+    extras = set(_utils.find_additional_properties(instance, schema))
+
+    if validator.is_type(aP, "object"):
+        for extra in extras:
+            for error in validator.descend(instance[extra], aP, path=extra):
+                yield error
+    elif not aP and extras:
+        error = "Additional properties are not allowed (%s %s unexpected)"
+        yield ValidationError(error % _utils.extras_msg(extras))
+
+
+def items(validator, items, instance, schema):
+    if not validator.is_type(instance, "array"):
+        return
+
+    if validator.is_type(items, "object"):
+        for index, item in enumerate(instance):
+            for error in validator.descend(item, items, path=index):
+                yield error
+    else:
+        for (index, item), subschema in zip(enumerate(instance), items):
+            for error in validator.descend(
+                item, subschema, path=index, schema_path=index,
+            ):
+                yield error
+
+
+def additionalItems(validator, aI, instance, schema):
+    if (
+        not validator.is_type(instance, "array") or
+        validator.is_type(schema.get("items", {}), "object")
+    ):
+        return
+
+    len_items = len(schema.get("items", []))
+    if validator.is_type(aI, "object"):
+        for index, item in enumerate(instance[len_items:], start=len_items):
+            for error in validator.descend(item, aI, path=index):
+                yield error
+    elif not aI and len(instance) > len(schema.get("items", [])):
+        error = "Additional items are not allowed (%s %s unexpected)"
+        yield ValidationError(
+            error %
+            _utils.extras_msg(instance[len(schema.get("items", [])):])
+        )
+
+
+def minimum(validator, minimum, instance, schema):
+    if not validator.is_type(instance, "number"):
+        return
+
+    if schema.get("exclusiveMinimum", False):
+        failed = float(instance) <= minimum
+        cmp = "less than or equal to"
+    else:
+        failed = float(instance) < minimum
+        cmp = "less than"
+
+    if failed:
+        yield ValidationError(
+            "%r is %s the minimum of %r" % (instance, cmp, minimum)
+        )
+
+
+def maximum(validator, maximum, instance, schema):
+    if not validator.is_type(instance, "number"):
+        return
+
+    if schema.get("exclusiveMaximum", False):
+        failed = instance >= maximum
+        cmp = "greater than or equal to"
+    else:
+        failed = instance > maximum
+        cmp = "greater than"
+
+    if failed:
+        yield ValidationError(
+            "%r is %s the maximum of %r" % (instance, cmp, maximum)
+        )
+
+
+def multipleOf(validator, dB, instance, schema):
+    if not validator.is_type(instance, "number"):
+        return
+
+    if isinstance(dB, float):
+        mod = instance % dB
+        failed = (mod > FLOAT_TOLERANCE) and (dB - mod) > FLOAT_TOLERANCE
+    else:
+        failed = instance % dB
+
+    if failed:
+        yield ValidationError("%r is not a multiple of %r" % (instance, dB))
+
+
+def minItems(validator, mI, instance, schema):
+    if validator.is_type(instance, "array") and len(instance) < mI:
+        yield ValidationError("%r is too short" % (instance,))
+
+
+def maxItems(validator, mI, instance, schema):
+    if validator.is_type(instance, "array") and len(instance) > mI:
+        yield ValidationError("%r is too long" % (instance,))
+
+
+def uniqueItems(validator, uI, instance, schema):
+    if (
+        uI and
+        validator.is_type(instance, "array") and
+        not _utils.uniq(instance)
+    ):
+        yield ValidationError("%r has non-unique elements" % instance)
+
+
+def pattern(validator, patrn, instance, schema):
+    if (
+        validator.is_type(instance, "string") and
+        not re.search(patrn, instance)
+    ):
+        yield ValidationError("%r does not match %r" % (instance, patrn))
+
+
+def format(validator, format, instance, schema):
+    if validator.format_checker is not None:
+        try:
+            validator.format_checker.check(instance, format)
+        except FormatError as error:
+            yield ValidationError(error.message, cause=error.cause)
+
+
+def minLength(validator, mL, instance, schema):
+    if validator.is_type(instance, "string") and len(instance) < mL:
+        yield ValidationError("%r is too short" % (instance,))
+
+
+def maxLength(validator, mL, instance, schema):
+    if validator.is_type(instance, "string") and len(instance) > mL:
+        yield ValidationError("%r is too long" % (instance,))
+
+
+def dependencies(validator, dependencies, instance, schema):
+    if not validator.is_type(instance, "object"):
+        return
+
+    for property, dependency in iteritems(dependencies):
+        if property not in instance:
+            continue
+
+        if validator.is_type(dependency, "object"):
+            for error in validator.descend(
+                instance, dependency, schema_path=property,
+            ):
+                yield error
+        else:
+            dependencies = _utils.ensure_list(dependency)
+            for dependency in dependencies:
+                if dependency not in instance:
+                    yield ValidationError(
+                        "%r is a dependency of %r" % (dependency, property)
+                    )
+
+
+def enum(validator, enums, instance, schema):
+    if instance not in enums:
+        yield ValidationError("%r is not one of %r" % (instance, enums))
+
+
+def ref(validator, ref, instance, schema):
+    with validator.resolver.resolving(ref) as resolved:
+        for error in validator.descend(instance, resolved):
+            yield error
+
+
+def type_draft3(validator, types, instance, schema):
+    types = _utils.ensure_list(types)
+
+    all_errors = []
+    for index, type in enumerate(types):
+        if type == "any":
+            return
+        if validator.is_type(type, "object"):
+            errors = list(validator.descend(instance, type, schema_path=index))
+            if not errors:
+                return
+            all_errors.extend(errors)
+        else:
+            if validator.is_type(instance, type):
+                return
+    else:
+        yield ValidationError(
+            _utils.types_msg(instance, types), context=all_errors,
+        )
+
+
+def properties_draft3(validator, properties, instance, schema):
+    if not validator.is_type(instance, "object"):
+        return
+
+    for property, subschema in iteritems(properties):
+        if property in instance:
+            for error in validator.descend(
+                instance[property],
+                subschema,
+                path=property,
+                schema_path=property,
+            ):
+                yield error
+        elif subschema.get("required", False):
+            error = ValidationError("%r is a required property" % property)
+            error._set(
+                validator="required",
+                validator_value=subschema["required"],
+                instance=instance,
+                schema=schema,
+            )
+            error.path.appendleft(property)
+            error.schema_path.extend([property, "required"])
+            yield error
+
+
+def disallow_draft3(validator, disallow, instance, schema):
+    for disallowed in _utils.ensure_list(disallow):
+        if validator.is_valid(instance, {"type" : [disallowed]}):
+            yield ValidationError(
+                "%r is disallowed for %r" % (disallowed, instance)
+            )
+
+
+def extends_draft3(validator, extends, instance, schema):
+    if validator.is_type(extends, "object"):
+        for error in validator.descend(instance, extends):
+            yield error
+        return
+    for index, subschema in enumerate(extends):
+        for error in validator.descend(instance, subschema, schema_path=index):
+            yield error
+
+
+def type_draft4(validator, types, instance, schema):
+    types = _utils.ensure_list(types)
+
+    if not any(validator.is_type(instance, type) for type in types):
+        yield ValidationError(_utils.types_msg(instance, types))
+
+
+def properties_draft4(validator, properties, instance, schema):
+    if not validator.is_type(instance, "object"):
+        return
+
+    for property, subschema in iteritems(properties):
+        if property in instance:
+            for error in validator.descend(
+                instance[property],
+                subschema,
+                path=property,
+                schema_path=property,
+            ):
+                yield error
+
+
+def required_draft4(validator, required, instance, schema):
+    if not validator.is_type(instance, "object"):
+        return
+    for property in required:
+        if property not in instance:
+            yield ValidationError("%r is a required property" % property)
+
+
+def minProperties_draft4(validator, mP, instance, schema):
+    if validator.is_type(instance, "object") and len(instance) < mP:
+        yield ValidationError(
+            "%r does not have enough properties" % (instance,)
+        )
+
+
+def maxProperties_draft4(validator, mP, instance, schema):
+    if not validator.is_type(instance, "object"):
+        return
+    if validator.is_type(instance, "object") and len(instance) > mP:
+        yield ValidationError("%r has too many properties" % (instance,))
+
+
+def allOf_draft4(validator, allOf, instance, schema):
+    for index, subschema in enumerate(allOf):
+        for error in validator.descend(instance, subschema, schema_path=index):
+            yield error
+
+
+def oneOf_draft4(validator, oneOf, instance, schema):
+    subschemas = enumerate(oneOf)
+    all_errors = []
+    for index, subschema in subschemas:
+        errs = list(validator.descend(instance, subschema, schema_path=index))
+        if not errs:
+            first_valid = subschema
+            break
+        all_errors.extend(errs)
+    else:
+        yield ValidationError(
+            "%r is not valid under any of the given schemas" % (instance,),
+            context=all_errors,
+        )
+
+    more_valid = [s for i, s in subschemas if validator.is_valid(instance, s)]
+    if more_valid:
+        more_valid.append(first_valid)
+        reprs = ", ".join(repr(schema) for schema in more_valid)
+        yield ValidationError(
+            "%r is valid under each of %s" % (instance, reprs)
+        )
+
+
+def anyOf_draft4(validator, anyOf, instance, schema):
+    all_errors = []
+    for index, subschema in enumerate(anyOf):
+        errs = list(validator.descend(instance, subschema, schema_path=index))
+        if not errs:
+            break
+        all_errors.extend(errs)
+    else:
+        yield ValidationError(
+            "%r is not valid under any of the given schemas" % (instance,),
+            context=all_errors,
+        )
+
+
+def not_draft4(validator, not_schema, instance, schema):
+    if validator.is_valid(instance, not_schema):
+        yield ValidationError(
+            "%r is not allowed for %r" % (not_schema, instance)
+        )
diff --git a/lib/spack/external/jsonschema/cli.py b/lib/spack/external/jsonschema/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..0126564f463e21acc06f5534366b3b1d82ba1650
--- /dev/null
+++ b/lib/spack/external/jsonschema/cli.py
@@ -0,0 +1,72 @@
+from __future__ import absolute_import
+import argparse
+import json
+import sys
+
+from jsonschema._reflect import namedAny
+from jsonschema.validators import validator_for
+
+
+def _namedAnyWithDefault(name):
+    if "." not in name:
+        name = "jsonschema." + name
+    return namedAny(name)
+
+
+def _json_file(path):
+    with open(path) as file:
+        return json.load(file)
+
+
+parser = argparse.ArgumentParser(
+    description="JSON Schema Validation CLI",
+)
+parser.add_argument(
+    "-i", "--instance",
+    action="append",
+    dest="instances",
+    type=_json_file,
+    help="a path to a JSON instance to validate "
+         "(may be specified multiple times)",
+)
+parser.add_argument(
+    "-F", "--error-format",
+    default="{error.instance}: {error.message}\n",
+    help="the format to use for each error output message, specified in "
+         "a form suitable for passing to str.format, which will be called "
+         "with 'error' for each error",
+)
+parser.add_argument(
+    "-V", "--validator",
+    type=_namedAnyWithDefault,
+    help="the fully qualified object name of a validator to use, or, for "
+         "validators that are registered with jsonschema, simply the name "
+         "of the class.",
+)
+parser.add_argument(
+    "schema",
+    help="the JSON Schema to validate with",
+    type=_json_file,
+)
+
+
+def parse_args(args):
+    arguments = vars(parser.parse_args(args=args or ["--help"]))
+    if arguments["validator"] is None:
+        arguments["validator"] = validator_for(arguments["schema"])
+    return arguments
+
+
+def main(args=sys.argv[1:]):
+    sys.exit(run(arguments=parse_args(args=args)))
+
+
+def run(arguments, stdout=sys.stdout, stderr=sys.stderr):
+    error_format = arguments["error_format"]
+    validator = arguments["validator"](schema=arguments["schema"])
+    errored = False
+    for instance in arguments["instances"] or ():
+        for error in validator.iter_errors(instance):
+            stderr.write(error_format.format(error=error))
+            errored = True
+    return errored
diff --git a/lib/spack/external/jsonschema/compat.py b/lib/spack/external/jsonschema/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ca49ab6be9163355d791e052f0270bd5b1e5626
--- /dev/null
+++ b/lib/spack/external/jsonschema/compat.py
@@ -0,0 +1,53 @@
+from __future__ import unicode_literals
+import sys
+import operator
+
+try:
+    from collections import MutableMapping, Sequence  # noqa
+except ImportError:
+    from collections.abc import MutableMapping, Sequence  # noqa
+
+PY3 = sys.version_info[0] >= 3
+
+if PY3:
+    zip = zip
+    from io import StringIO
+    from urllib.parse import (
+        unquote, urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit
+    )
+    from urllib.request import urlopen
+    str_types = str,
+    int_types = int,
+    iteritems = operator.methodcaller("items")
+else:
+    from itertools import izip as zip  # noqa
+    from StringIO import StringIO
+    from urlparse import (
+        urljoin, urlunsplit, SplitResult, urlsplit as _urlsplit # noqa
+    )
+    from urllib import unquote  # noqa
+    from urllib2 import urlopen  # noqa
+    str_types = basestring
+    int_types = int, long
+    iteritems = operator.methodcaller("iteritems")
+
+
+# On python < 3.3 fragments are not handled properly with unknown schemes
+def urlsplit(url):
+    scheme, netloc, path, query, fragment = _urlsplit(url)
+    if "#" in path:
+        path, fragment = path.split("#", 1)
+    return SplitResult(scheme, netloc, path, query, fragment)
+
+
+def urldefrag(url):
+    if "#" in url:
+        s, n, p, q, frag = urlsplit(url)
+        defrag = urlunsplit((s, n, p, q, ''))
+    else:
+        defrag = url
+        frag = ''
+    return defrag, frag
+
+
+# flake8: noqa
diff --git a/lib/spack/external/jsonschema/exceptions.py b/lib/spack/external/jsonschema/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..478e59c531f3c2e6a38b57b580e9ccbf9cec22f1
--- /dev/null
+++ b/lib/spack/external/jsonschema/exceptions.py
@@ -0,0 +1,264 @@
+from collections import defaultdict, deque
+import itertools
+import pprint
+import textwrap
+
+from jsonschema import _utils
+from jsonschema.compat import PY3, iteritems
+
+
+WEAK_MATCHES = frozenset(["anyOf", "oneOf"])
+STRONG_MATCHES = frozenset()
+
+_unset = _utils.Unset()
+
+
+class _Error(Exception):
+    def __init__(
+        self,
+        message,
+        validator=_unset,
+        path=(),
+        cause=None,
+        context=(),
+        validator_value=_unset,
+        instance=_unset,
+        schema=_unset,
+        schema_path=(),
+        parent=None,
+    ):
+        self.message = message
+        self.path = self.relative_path = deque(path)
+        self.schema_path = self.relative_schema_path = deque(schema_path)
+        self.context = list(context)
+        self.cause = self.__cause__ = cause
+        self.validator = validator
+        self.validator_value = validator_value
+        self.instance = instance
+        self.schema = schema
+        self.parent = parent
+
+        for error in context:
+            error.parent = self
+
+    def __repr__(self):
+        return "<%s: %r>" % (self.__class__.__name__, self.message)
+
+    def __str__(self):
+        return unicode(self).encode("utf-8")
+
+    def __unicode__(self):
+        essential_for_verbose = (
+            self.validator, self.validator_value, self.instance, self.schema,
+        )
+        if any(m is _unset for m in essential_for_verbose):
+            return self.message
+
+        pschema = pprint.pformat(self.schema, width=72)
+        pinstance = pprint.pformat(self.instance, width=72)
+        return self.message + textwrap.dedent("""
+
+            Failed validating %r in schema%s:
+            %s
+
+            On instance%s:
+            %s
+            """.rstrip()
+        ) % (
+            self.validator,
+            _utils.format_as_index(list(self.relative_schema_path)[:-1]),
+            _utils.indent(pschema),
+            _utils.format_as_index(self.relative_path),
+            _utils.indent(pinstance),
+        )
+
+    if PY3:
+        __str__ = __unicode__
+
+    @classmethod
+    def create_from(cls, other):
+        return cls(**other._contents())
+
+    @property
+    def absolute_path(self):
+        parent = self.parent
+        if parent is None:
+            return self.relative_path
+
+        path = deque(self.relative_path)
+        path.extendleft(parent.absolute_path)
+        return path
+
+    @property
+    def absolute_schema_path(self):
+        parent = self.parent
+        if parent is None:
+            return self.relative_schema_path
+
+        path = deque(self.relative_schema_path)
+        path.extendleft(parent.absolute_schema_path)
+        return path
+
+    def _set(self, **kwargs):
+        for k, v in iteritems(kwargs):
+            if getattr(self, k) is _unset:
+                setattr(self, k, v)
+
+    def _contents(self):
+        attrs = (
+            "message", "cause", "context", "validator", "validator_value",
+            "path", "schema_path", "instance", "schema", "parent",
+        )
+        return dict((attr, getattr(self, attr)) for attr in attrs)
+
+
+class ValidationError(_Error):
+    pass
+
+
+class SchemaError(_Error):
+    pass
+
+
+class RefResolutionError(Exception):
+    pass
+
+
+class UnknownType(Exception):
+    def __init__(self, type, instance, schema):
+        self.type = type
+        self.instance = instance
+        self.schema = schema
+
+    def __str__(self):
+        return unicode(self).encode("utf-8")
+
+    def __unicode__(self):
+        pschema = pprint.pformat(self.schema, width=72)
+        pinstance = pprint.pformat(self.instance, width=72)
+        return textwrap.dedent("""
+            Unknown type %r for validator with schema:
+            %s
+
+            While checking instance:
+            %s
+            """.rstrip()
+        ) % (self.type, _utils.indent(pschema), _utils.indent(pinstance))
+
+    if PY3:
+        __str__ = __unicode__
+
+
+
+class FormatError(Exception):
+    def __init__(self, message, cause=None):
+        super(FormatError, self).__init__(message, cause)
+        self.message = message
+        self.cause = self.__cause__ = cause
+
+    def __str__(self):
+        return self.message.encode("utf-8")
+
+    def __unicode__(self):
+        return self.message
+
+    if PY3:
+        __str__ = __unicode__
+
+
+class ErrorTree(object):
+    """
+    ErrorTrees make it easier to check which validations failed.
+
+    """
+
+    _instance = _unset
+
+    def __init__(self, errors=()):
+        self.errors = {}
+        self._contents = defaultdict(self.__class__)
+
+        for error in errors:
+            container = self
+            for element in error.path:
+                container = container[element]
+            container.errors[error.validator] = error
+
+            self._instance = error.instance
+
+    def __contains__(self, index):
+        """
+        Check whether ``instance[index]`` has any errors.
+
+        """
+
+        return index in self._contents
+
+    def __getitem__(self, index):
+        """
+        Retrieve the child tree one level down at the given ``index``.
+
+        If the index is not in the instance that this tree corresponds to and
+        is not known by this tree, whatever error would be raised by
+        ``instance.__getitem__`` will be propagated (usually this is some
+        subclass of :class:`LookupError`.
+
+        """
+
+        if self._instance is not _unset and index not in self:
+            self._instance[index]
+        return self._contents[index]
+
+    def __setitem__(self, index, value):
+        self._contents[index] = value
+
+    def __iter__(self):
+        """
+        Iterate (non-recursively) over the indices in the instance with errors.
+
+        """
+
+        return iter(self._contents)
+
+    def __len__(self):
+        """
+        Same as :attr:`total_errors`.
+
+        """
+
+        return self.total_errors
+
+    def __repr__(self):
+        return "<%s (%s total errors)>" % (self.__class__.__name__, len(self))
+
+    @property
+    def total_errors(self):
+        """
+        The total number of errors in the entire tree, including children.
+
+        """
+
+        child_errors = sum(len(tree) for _, tree in iteritems(self._contents))
+        return len(self.errors) + child_errors
+
+
+def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES):
+    def relevance(error):
+        validator = error.validator
+        return -len(error.path), validator not in weak, validator in strong
+    return relevance
+
+
+relevance = by_relevance()
+
+
+def best_match(errors, key=relevance):
+    errors = iter(errors)
+    best = next(errors, None)
+    if best is None:
+        return
+    best = max(itertools.chain([best], errors), key=key)
+
+    while best.context:
+        best = min(best.context, key=key)
+    return best
diff --git a/lib/spack/external/jsonschema/schemas/draft3.json b/lib/spack/external/jsonschema/schemas/draft3.json
new file mode 100644
index 0000000000000000000000000000000000000000..5bcefe30d5cad78b2501944273699c4285ef5d89
--- /dev/null
+++ b/lib/spack/external/jsonschema/schemas/draft3.json
@@ -0,0 +1,201 @@
+{
+    "$schema": "http://json-schema.org/draft-03/schema#",
+    "dependencies": {
+        "exclusiveMaximum": "maximum",
+        "exclusiveMinimum": "minimum"
+    },
+    "id": "http://json-schema.org/draft-03/schema#",
+    "properties": {
+        "$ref": {
+            "format": "uri",
+            "type": "string"
+        },
+        "$schema": {
+            "format": "uri",
+            "type": "string"
+        },
+        "additionalItems": {
+            "default": {},
+            "type": [
+                {
+                    "$ref": "#"
+                },
+                "boolean"
+            ]
+        },
+        "additionalProperties": {
+            "default": {},
+            "type": [
+                {
+                    "$ref": "#"
+                },
+                "boolean"
+            ]
+        },
+        "default": {
+            "type": "any"
+        },
+        "dependencies": {
+            "additionalProperties": {
+                "items": {
+                    "type": "string"
+                },
+                "type": [
+                    "string",
+                    "array",
+                    {
+                        "$ref": "#"
+                    }
+                ]
+            },
+            "default": {},
+            "type": [
+                "string",
+                "array",
+                "object"
+            ]
+        },
+        "description": {
+            "type": "string"
+        },
+        "disallow": {
+            "items": {
+                "type": [
+                    "string",
+                    {
+                        "$ref": "#"
+                    }
+                ]
+            },
+            "type": [
+                "string",
+                "array"
+            ],
+            "uniqueItems": true
+        },
+        "divisibleBy": {
+            "default": 1,
+            "exclusiveMinimum": true,
+            "minimum": 0,
+            "type": "number"
+        },
+        "enum": {
+            "minItems": 1,
+            "type": "array",
+            "uniqueItems": true
+        },
+        "exclusiveMaximum": {
+            "default": false,
+            "type": "boolean"
+        },
+        "exclusiveMinimum": {
+            "default": false,
+            "type": "boolean"
+        },
+        "extends": {
+            "default": {},
+            "items": {
+                "$ref": "#"
+            },
+            "type": [
+                {
+                    "$ref": "#"
+                },
+                "array"
+            ]
+        },
+        "format": {
+            "type": "string"
+        },
+        "id": {
+            "format": "uri",
+            "type": "string"
+        },
+        "items": {
+            "default": {},
+            "items": {
+                "$ref": "#"
+            },
+            "type": [
+                {
+                    "$ref": "#"
+                },
+                "array"
+            ]
+        },
+        "maxDecimal": {
+            "minimum": 0,
+            "type": "number"
+        },
+        "maxItems": {
+            "minimum": 0,
+            "type": "integer"
+        },
+        "maxLength": {
+            "type": "integer"
+        },
+        "maximum": {
+            "type": "number"
+        },
+        "minItems": {
+            "default": 0,
+            "minimum": 0,
+            "type": "integer"
+        },
+        "minLength": {
+            "default": 0,
+            "minimum": 0,
+            "type": "integer"
+        },
+        "minimum": {
+            "type": "number"
+        },
+        "pattern": {
+            "format": "regex",
+            "type": "string"
+        },
+        "patternProperties": {
+            "additionalProperties": {
+                "$ref": "#"
+            },
+            "default": {},
+            "type": "object"
+        },
+        "properties": {
+            "additionalProperties": {
+                "$ref": "#",
+                "type": "object"
+            },
+            "default": {},
+            "type": "object"
+        },
+        "required": {
+            "default": false,
+            "type": "boolean"
+        },
+        "title": {
+            "type": "string"
+        },
+        "type": {
+            "default": "any",
+            "items": {
+                "type": [
+                    "string",
+                    {
+                        "$ref": "#"
+                    }
+                ]
+            },
+            "type": [
+                "string",
+                "array"
+            ],
+            "uniqueItems": true
+        },
+        "uniqueItems": {
+            "default": false,
+            "type": "boolean"
+        }
+    },
+    "type": "object"
+}
diff --git a/lib/spack/external/jsonschema/schemas/draft4.json b/lib/spack/external/jsonschema/schemas/draft4.json
new file mode 100644
index 0000000000000000000000000000000000000000..fead5cefab73dcaf4b05bb2623fdf405465a5112
--- /dev/null
+++ b/lib/spack/external/jsonschema/schemas/draft4.json
@@ -0,0 +1,221 @@
+{
+    "$schema": "http://json-schema.org/draft-04/schema#",
+    "default": {},
+    "definitions": {
+        "positiveInteger": {
+            "minimum": 0,
+            "type": "integer"
+        },
+        "positiveIntegerDefault0": {
+            "allOf": [
+                {
+                    "$ref": "#/definitions/positiveInteger"
+                },
+                {
+                    "default": 0
+                }
+            ]
+        },
+        "schemaArray": {
+            "items": {
+                "$ref": "#"
+            },
+            "minItems": 1,
+            "type": "array"
+        },
+        "simpleTypes": {
+            "enum": [
+                "array",
+                "boolean",
+                "integer",
+                "null",
+                "number",
+                "object",
+                "string"
+            ]
+        },
+        "stringArray": {
+            "items": {
+                "type": "string"
+            },
+            "minItems": 1,
+            "type": "array",
+            "uniqueItems": true
+        }
+    },
+    "dependencies": {
+        "exclusiveMaximum": [
+            "maximum"
+        ],
+        "exclusiveMinimum": [
+            "minimum"
+        ]
+    },
+    "description": "Core schema meta-schema",
+    "id": "http://json-schema.org/draft-04/schema#",
+    "properties": {
+        "$schema": {
+            "format": "uri",
+            "type": "string"
+        },
+        "additionalItems": {
+            "anyOf": [
+                {
+                    "type": "boolean"
+                },
+                {
+                    "$ref": "#"
+                }
+            ],
+            "default": {}
+        },
+        "additionalProperties": {
+            "anyOf": [
+                {
+                    "type": "boolean"
+                },
+                {
+                    "$ref": "#"
+                }
+            ],
+            "default": {}
+        },
+        "allOf": {
+            "$ref": "#/definitions/schemaArray"
+        },
+        "anyOf": {
+            "$ref": "#/definitions/schemaArray"
+        },
+        "default": {},
+        "definitions": {
+            "additionalProperties": {
+                "$ref": "#"
+            },
+            "default": {},
+            "type": "object"
+        },
+        "dependencies": {
+            "additionalProperties": {
+                "anyOf": [
+                    {
+                        "$ref": "#"
+                    },
+                    {
+                        "$ref": "#/definitions/stringArray"
+                    }
+                ]
+            },
+            "type": "object"
+        },
+        "description": {
+            "type": "string"
+        },
+        "enum": {
+            "minItems": 1,
+            "type": "array",
+            "uniqueItems": true
+        },
+        "exclusiveMaximum": {
+            "default": false,
+            "type": "boolean"
+        },
+        "exclusiveMinimum": {
+            "default": false,
+            "type": "boolean"
+        },
+        "id": {
+            "format": "uri",
+            "type": "string"
+        },
+        "items": {
+            "anyOf": [
+                {
+                    "$ref": "#"
+                },
+                {
+                    "$ref": "#/definitions/schemaArray"
+                }
+            ],
+            "default": {}
+        },
+        "maxItems": {
+            "$ref": "#/definitions/positiveInteger"
+        },
+        "maxLength": {
+            "$ref": "#/definitions/positiveInteger"
+        },
+        "maxProperties": {
+            "$ref": "#/definitions/positiveInteger"
+        },
+        "maximum": {
+            "type": "number"
+        },
+        "minItems": {
+            "$ref": "#/definitions/positiveIntegerDefault0"
+        },
+        "minLength": {
+            "$ref": "#/definitions/positiveIntegerDefault0"
+        },
+        "minProperties": {
+            "$ref": "#/definitions/positiveIntegerDefault0"
+        },
+        "minimum": {
+            "type": "number"
+        },
+        "multipleOf": {
+            "exclusiveMinimum": true,
+            "minimum": 0,
+            "type": "number"
+        },
+        "not": {
+            "$ref": "#"
+        },
+        "oneOf": {
+            "$ref": "#/definitions/schemaArray"
+        },
+        "pattern": {
+            "format": "regex",
+            "type": "string"
+        },
+        "patternProperties": {
+            "additionalProperties": {
+                "$ref": "#"
+            },
+            "default": {},
+            "type": "object"
+        },
+        "properties": {
+            "additionalProperties": {
+                "$ref": "#"
+            },
+            "default": {},
+            "type": "object"
+        },
+        "required": {
+            "$ref": "#/definitions/stringArray"
+        },
+        "title": {
+            "type": "string"
+        },
+        "type": {
+            "anyOf": [
+                {
+                    "$ref": "#/definitions/simpleTypes"
+                },
+                {
+                    "items": {
+                        "$ref": "#/definitions/simpleTypes"
+                    },
+                    "minItems": 1,
+                    "type": "array",
+                    "uniqueItems": true
+                }
+            ]
+        },
+        "uniqueItems": {
+            "default": false,
+            "type": "boolean"
+        }
+    },
+    "type": "object"
+}
diff --git a/lib/spack/external/jsonschema/tests/__init__.py b/lib/spack/external/jsonschema/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/lib/spack/external/jsonschema/tests/compat.py b/lib/spack/external/jsonschema/tests/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..b37483f5ddf32954ed32c6676481f1a155ba638f
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/compat.py
@@ -0,0 +1,15 @@
+import sys
+
+
+if sys.version_info[:2] < (2, 7):  # pragma: no cover
+    import unittest2 as unittest
+else:
+    import unittest
+
+try:
+    from unittest import mock
+except ImportError:
+    import mock
+
+
+# flake8: noqa
diff --git a/lib/spack/external/jsonschema/tests/test_cli.py b/lib/spack/external/jsonschema/tests/test_cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..f625ca989d17ec23c5af5b026f4af092c3ac7553
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_cli.py
@@ -0,0 +1,110 @@
+from jsonschema import Draft4Validator, ValidationError, cli
+from jsonschema.compat import StringIO
+from jsonschema.tests.compat import mock, unittest
+
+
+def fake_validator(*errors):
+    errors = list(reversed(errors))
+
+    class FakeValidator(object):
+        def __init__(self, *args, **kwargs):
+            pass
+
+        def iter_errors(self, instance):
+            if errors:
+                return errors.pop()
+            return []
+    return FakeValidator
+
+
+class TestParser(unittest.TestCase):
+    FakeValidator = fake_validator()
+
+    def setUp(self):
+        mock_open = mock.mock_open()
+        patch_open = mock.patch.object(cli, "open", mock_open, create=True)
+        patch_open.start()
+        self.addCleanup(patch_open.stop)
+
+        mock_json_load = mock.Mock()
+        mock_json_load.return_value = {}
+        patch_json_load = mock.patch("json.load")
+        patch_json_load.start()
+        self.addCleanup(patch_json_load.stop)
+
+    def test_find_validator_by_fully_qualified_object_name(self):
+        arguments = cli.parse_args(
+            [
+                "--validator",
+                "jsonschema.tests.test_cli.TestParser.FakeValidator",
+                "--instance", "foo.json",
+                "schema.json",
+            ]
+        )
+        self.assertIs(arguments["validator"], self.FakeValidator)
+
+    def test_find_validator_in_jsonschema(self):
+        arguments = cli.parse_args(
+            [
+                "--validator", "Draft4Validator",
+                "--instance", "foo.json",
+                "schema.json",
+            ]
+        )
+        self.assertIs(arguments["validator"], Draft4Validator)
+
+
+class TestCLI(unittest.TestCase):
+    def test_successful_validation(self):
+        stdout, stderr = StringIO(), StringIO()
+        exit_code = cli.run(
+            {
+                "validator": fake_validator(),
+                "schema": {},
+                "instances": [1],
+                "error_format": "{error.message}",
+            },
+            stdout=stdout,
+            stderr=stderr,
+        )
+        self.assertFalse(stdout.getvalue())
+        self.assertFalse(stderr.getvalue())
+        self.assertEqual(exit_code, 0)
+
+    def test_unsuccessful_validation(self):
+        error = ValidationError("I am an error!", instance=1)
+        stdout, stderr = StringIO(), StringIO()
+        exit_code = cli.run(
+            {
+                "validator": fake_validator([error]),
+                "schema": {},
+                "instances": [1],
+                "error_format": "{error.instance} - {error.message}",
+            },
+            stdout=stdout,
+            stderr=stderr,
+        )
+        self.assertFalse(stdout.getvalue())
+        self.assertEqual(stderr.getvalue(), "1 - I am an error!")
+        self.assertEqual(exit_code, 1)
+
+    def test_unsuccessful_validation_multiple_instances(self):
+        first_errors = [
+            ValidationError("9", instance=1),
+            ValidationError("8", instance=1),
+        ]
+        second_errors = [ValidationError("7", instance=2)]
+        stdout, stderr = StringIO(), StringIO()
+        exit_code = cli.run(
+            {
+                "validator": fake_validator(first_errors, second_errors),
+                "schema": {},
+                "instances": [1, 2],
+                "error_format": "{error.instance} - {error.message}\t",
+            },
+            stdout=stdout,
+            stderr=stderr,
+        )
+        self.assertFalse(stdout.getvalue())
+        self.assertEqual(stderr.getvalue(), "1 - 9\t1 - 8\t2 - 7\t")
+        self.assertEqual(exit_code, 1)
diff --git a/lib/spack/external/jsonschema/tests/test_exceptions.py b/lib/spack/external/jsonschema/tests/test_exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e5793c6283699b371ada213791a0442859749a6
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_exceptions.py
@@ -0,0 +1,382 @@
+import textwrap
+
+from jsonschema import Draft4Validator, exceptions
+from jsonschema.compat import PY3
+from jsonschema.tests.compat import mock, unittest
+
+
+class TestBestMatch(unittest.TestCase):
+    def best_match(self, errors):
+        errors = list(errors)
+        best = exceptions.best_match(errors)
+        reversed_best = exceptions.best_match(reversed(errors))
+        self.assertEqual(
+            best,
+            reversed_best,
+            msg="Didn't return a consistent best match!\n"
+                "Got: {0}\n\nThen: {1}".format(best, reversed_best),
+        )
+        return best
+
+    def test_shallower_errors_are_better_matches(self):
+        validator = Draft4Validator(
+            {
+                "properties" : {
+                    "foo" : {
+                        "minProperties" : 2,
+                        "properties" : {"bar" : {"type" : "object"}},
+                    }
+                }
+            }
+        )
+        best = self.best_match(validator.iter_errors({"foo" : {"bar" : []}}))
+        self.assertEqual(best.validator, "minProperties")
+
+    def test_oneOf_and_anyOf_are_weak_matches(self):
+        """
+        A property you *must* match is probably better than one you have to
+        match a part of.
+
+        """
+
+        validator = Draft4Validator(
+            {
+                "minProperties" : 2,
+                "anyOf" : [{"type" : "string"}, {"type" : "number"}],
+                "oneOf" : [{"type" : "string"}, {"type" : "number"}],
+            }
+        )
+        best = self.best_match(validator.iter_errors({}))
+        self.assertEqual(best.validator, "minProperties")
+
+    def test_if_the_most_relevant_error_is_anyOf_it_is_traversed(self):
+        """
+        If the most relevant error is an anyOf, then we traverse its context
+        and select the otherwise *least* relevant error, since in this case
+        that means the most specific, deep, error inside the instance.
+
+        I.e. since only one of the schemas must match, we look for the most
+        relevant one.
+
+        """
+
+        validator = Draft4Validator(
+            {
+                "properties" : {
+                    "foo" : {
+                        "anyOf" : [
+                            {"type" : "string"},
+                            {"properties" : {"bar" : {"type" : "array"}}},
+                        ],
+                    },
+                },
+            },
+        )
+        best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+        self.assertEqual(best.validator_value, "array")
+
+    def test_if_the_most_relevant_error_is_oneOf_it_is_traversed(self):
+        """
+        If the most relevant error is an oneOf, then we traverse its context
+        and select the otherwise *least* relevant error, since in this case
+        that means the most specific, deep, error inside the instance.
+
+        I.e. since only one of the schemas must match, we look for the most
+        relevant one.
+
+        """
+
+        validator = Draft4Validator(
+            {
+                "properties" : {
+                    "foo" : {
+                        "oneOf" : [
+                            {"type" : "string"},
+                            {"properties" : {"bar" : {"type" : "array"}}},
+                        ],
+                    },
+                },
+            },
+        )
+        best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+        self.assertEqual(best.validator_value, "array")
+
+    def test_if_the_most_relevant_error_is_allOf_it_is_traversed(self):
+        """
+        Now, if the error is allOf, we traverse but select the *most* relevant
+        error from the context, because all schemas here must match anyways.
+
+        """
+
+        validator = Draft4Validator(
+            {
+                "properties" : {
+                    "foo" : {
+                        "allOf" : [
+                            {"type" : "string"},
+                            {"properties" : {"bar" : {"type" : "array"}}},
+                        ],
+                    },
+                },
+            },
+        )
+        best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+        self.assertEqual(best.validator_value, "string")
+
+    def test_nested_context_for_oneOf(self):
+        validator = Draft4Validator(
+            {
+                "properties" : {
+                    "foo" : {
+                        "oneOf" : [
+                            {"type" : "string"},
+                            {
+                                "oneOf" : [
+                                    {"type" : "string"},
+                                    {
+                                        "properties" : {
+                                            "bar" : {"type" : "array"}
+                                        },
+                                    },
+                                ],
+                            },
+                        ],
+                    },
+                },
+            },
+        )
+        best = self.best_match(validator.iter_errors({"foo" : {"bar" : 12}}))
+        self.assertEqual(best.validator_value, "array")
+
+    def test_one_error(self):
+        validator = Draft4Validator({"minProperties" : 2})
+        error, = validator.iter_errors({})
+        self.assertEqual(
+            exceptions.best_match(validator.iter_errors({})).validator,
+            "minProperties",
+        )
+
+    def test_no_errors(self):
+        validator = Draft4Validator({})
+        self.assertIsNone(exceptions.best_match(validator.iter_errors({})))
+
+
+class TestByRelevance(unittest.TestCase):
+    def test_short_paths_are_better_matches(self):
+        shallow = exceptions.ValidationError("Oh no!", path=["baz"])
+        deep = exceptions.ValidationError("Oh yes!", path=["foo", "bar"])
+        match = max([shallow, deep], key=exceptions.relevance)
+        self.assertIs(match, shallow)
+
+        match = max([deep, shallow], key=exceptions.relevance)
+        self.assertIs(match, shallow)
+
+    def test_global_errors_are_even_better_matches(self):
+        shallow = exceptions.ValidationError("Oh no!", path=[])
+        deep = exceptions.ValidationError("Oh yes!", path=["foo"])
+
+        errors = sorted([shallow, deep], key=exceptions.relevance)
+        self.assertEqual(
+            [list(error.path) for error in errors],
+            [["foo"], []],
+        )
+
+        errors = sorted([deep, shallow], key=exceptions.relevance)
+        self.assertEqual(
+            [list(error.path) for error in errors],
+            [["foo"], []],
+        )
+
+    def test_weak_validators_are_lower_priority(self):
+        weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
+        normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
+
+        best_match = exceptions.by_relevance(weak="a")
+
+        match = max([weak, normal], key=best_match)
+        self.assertIs(match, normal)
+
+        match = max([normal, weak], key=best_match)
+        self.assertIs(match, normal)
+
+    def test_strong_validators_are_higher_priority(self):
+        weak = exceptions.ValidationError("Oh no!", path=[], validator="a")
+        normal = exceptions.ValidationError("Oh yes!", path=[], validator="b")
+        strong = exceptions.ValidationError("Oh fine!", path=[], validator="c")
+
+        best_match = exceptions.by_relevance(weak="a", strong="c")
+
+        match = max([weak, normal, strong], key=best_match)
+        self.assertIs(match, strong)
+
+        match = max([strong, normal, weak], key=best_match)
+        self.assertIs(match, strong)
+
+
+class TestErrorTree(unittest.TestCase):
+    def test_it_knows_how_many_total_errors_it_contains(self):
+        errors = [mock.MagicMock() for _ in range(8)]
+        tree = exceptions.ErrorTree(errors)
+        self.assertEqual(tree.total_errors, 8)
+
+    def test_it_contains_an_item_if_the_item_had_an_error(self):
+        errors = [exceptions.ValidationError("a message", path=["bar"])]
+        tree = exceptions.ErrorTree(errors)
+        self.assertIn("bar", tree)
+
+    def test_it_does_not_contain_an_item_if_the_item_had_no_error(self):
+        errors = [exceptions.ValidationError("a message", path=["bar"])]
+        tree = exceptions.ErrorTree(errors)
+        self.assertNotIn("foo", tree)
+
+    def test_validators_that_failed_appear_in_errors_dict(self):
+        error = exceptions.ValidationError("a message", validator="foo")
+        tree = exceptions.ErrorTree([error])
+        self.assertEqual(tree.errors, {"foo" : error})
+
+    def test_it_creates_a_child_tree_for_each_nested_path(self):
+        errors = [
+            exceptions.ValidationError("a bar message", path=["bar"]),
+            exceptions.ValidationError("a bar -> 0 message", path=["bar", 0]),
+        ]
+        tree = exceptions.ErrorTree(errors)
+        self.assertIn(0, tree["bar"])
+        self.assertNotIn(1, tree["bar"])
+
+    def test_children_have_their_errors_dicts_built(self):
+        e1, e2 = (
+            exceptions.ValidationError("1", validator="foo", path=["bar", 0]),
+            exceptions.ValidationError("2", validator="quux", path=["bar", 0]),
+        )
+        tree = exceptions.ErrorTree([e1, e2])
+        self.assertEqual(tree["bar"][0].errors, {"foo" : e1, "quux" : e2})
+
+    def test_it_does_not_contain_subtrees_that_are_not_in_the_instance(self):
+        error = exceptions.ValidationError("123", validator="foo", instance=[])
+        tree = exceptions.ErrorTree([error])
+
+        with self.assertRaises(IndexError):
+            tree[0]
+
+    def test_if_its_in_the_tree_anyhow_it_does_not_raise_an_error(self):
+        """
+        If a validator is dumb (like :validator:`required` in draft 3) and
+        refers to a path that isn't in the instance, the tree still properly
+        returns a subtree for that path.
+
+        """
+
+        error = exceptions.ValidationError(
+            "a message", validator="foo", instance={}, path=["foo"],
+        )
+        tree = exceptions.ErrorTree([error])
+        self.assertIsInstance(tree["foo"], exceptions.ErrorTree)
+
+
+class TestErrorReprStr(unittest.TestCase):
+    def make_error(self, **kwargs):
+        defaults = dict(
+            message=u"hello",
+            validator=u"type",
+            validator_value=u"string",
+            instance=5,
+            schema={u"type": u"string"},
+        )
+        defaults.update(kwargs)
+        return exceptions.ValidationError(**defaults)
+
+    def assertShows(self, expected, **kwargs):
+        if PY3:
+            expected = expected.replace("u'", "'")
+        expected = textwrap.dedent(expected).rstrip("\n")
+
+        error = self.make_error(**kwargs)
+        message_line, _, rest = str(error).partition("\n")
+        self.assertEqual(message_line, error.message)
+        self.assertEqual(rest, expected)
+
+    def test_repr(self):
+        self.assertEqual(
+            repr(exceptions.ValidationError(message="Hello!")),
+            "<ValidationError: %r>" % "Hello!",
+        )
+
+    def test_unset_error(self):
+        error = exceptions.ValidationError("message")
+        self.assertEqual(str(error), "message")
+
+        kwargs = {
+            "validator": "type",
+            "validator_value": "string",
+            "instance": 5,
+            "schema": {"type": "string"}
+        }
+        # Just the message should show if any of the attributes are unset
+        for attr in kwargs:
+            k = dict(kwargs)
+            del k[attr]
+            error = exceptions.ValidationError("message", **k)
+            self.assertEqual(str(error), "message")
+
+    def test_empty_paths(self):
+        self.assertShows(
+            """
+            Failed validating u'type' in schema:
+                {u'type': u'string'}
+
+            On instance:
+                5
+            """,
+            path=[],
+            schema_path=[],
+        )
+
+    def test_one_item_paths(self):
+        self.assertShows(
+            """
+            Failed validating u'type' in schema:
+                {u'type': u'string'}
+
+            On instance[0]:
+                5
+            """,
+            path=[0],
+            schema_path=["items"],
+        )
+
+    def test_multiple_item_paths(self):
+        self.assertShows(
+            """
+            Failed validating u'type' in schema[u'items'][0]:
+                {u'type': u'string'}
+
+            On instance[0][u'a']:
+                5
+            """,
+            path=[0, u"a"],
+            schema_path=[u"items", 0, 1],
+        )
+
+    def test_uses_pprint(self):
+        with mock.patch("pprint.pformat") as pformat:
+            str(self.make_error())
+            self.assertEqual(pformat.call_count, 2)  # schema + instance
+
+    def test_str_works_with_instances_having_overriden_eq_operator(self):
+        """
+        Check for https://github.com/Julian/jsonschema/issues/164 which
+        rendered exceptions unusable when a `ValidationError` involved
+        instances with an `__eq__` method that returned truthy values.
+
+        """
+
+        instance = mock.MagicMock()
+        error = exceptions.ValidationError(
+            "a message",
+            validator="foo",
+            instance=instance,
+            validator_value="some",
+            schema="schema",
+        )
+        str(error)
+        self.assertFalse(instance.__eq__.called)
diff --git a/lib/spack/external/jsonschema/tests/test_format.py b/lib/spack/external/jsonschema/tests/test_format.py
new file mode 100644
index 0000000000000000000000000000000000000000..8392ca1de330400693bbe36523986c0c26de2e40
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_format.py
@@ -0,0 +1,63 @@
+"""
+Tests for the parts of jsonschema related to the :validator:`format` property.
+
+"""
+
+from jsonschema.tests.compat import mock, unittest
+
+from jsonschema import FormatError, ValidationError, FormatChecker
+from jsonschema.validators import Draft4Validator
+
+
+class TestFormatChecker(unittest.TestCase):
+    def setUp(self):
+        self.fn = mock.Mock()
+
+    def test_it_can_validate_no_formats(self):
+        checker = FormatChecker(formats=())
+        self.assertFalse(checker.checkers)
+
+    def test_it_raises_a_key_error_for_unknown_formats(self):
+        with self.assertRaises(KeyError):
+            FormatChecker(formats=["o noes"])
+
+    def test_it_can_register_cls_checkers(self):
+        with mock.patch.dict(FormatChecker.checkers, clear=True):
+            FormatChecker.cls_checks("new")(self.fn)
+            self.assertEqual(FormatChecker.checkers, {"new" : (self.fn, ())})
+
+    def test_it_can_register_checkers(self):
+        checker = FormatChecker()
+        checker.checks("new")(self.fn)
+        self.assertEqual(
+            checker.checkers,
+            dict(FormatChecker.checkers, new=(self.fn, ()))
+        )
+
+    def test_it_catches_registered_errors(self):
+        checker = FormatChecker()
+        cause = self.fn.side_effect = ValueError()
+
+        checker.checks("foo", raises=ValueError)(self.fn)
+
+        with self.assertRaises(FormatError) as cm:
+            checker.check("bar", "foo")
+
+        self.assertIs(cm.exception.cause, cause)
+        self.assertIs(cm.exception.__cause__, cause)
+
+        # Unregistered errors should not be caught
+        self.fn.side_effect = AttributeError
+        with self.assertRaises(AttributeError):
+            checker.check("bar", "foo")
+
+    def test_format_error_causes_become_validation_error_causes(self):
+        checker = FormatChecker()
+        checker.checks("foo", raises=ValueError)(self.fn)
+        cause = self.fn.side_effect = ValueError()
+        validator = Draft4Validator({"format" : "foo"}, format_checker=checker)
+
+        with self.assertRaises(ValidationError) as cm:
+            validator.validate("bar")
+
+        self.assertIs(cm.exception.__cause__, cause)
diff --git a/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py b/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py
new file mode 100644
index 0000000000000000000000000000000000000000..75c6857bc03a336601608b870ad1ec8ab9ba5b55
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_jsonschema_test_suite.py
@@ -0,0 +1,290 @@
+"""
+Test runner for the JSON Schema official test suite
+
+Tests comprehensive correctness of each draft's validator.
+
+See https://github.com/json-schema/JSON-Schema-Test-Suite for details.
+
+"""
+
+from contextlib import closing
+from decimal import Decimal
+import glob
+import json
+import io
+import itertools
+import os
+import re
+import subprocess
+import sys
+
+try:
+    from sys import pypy_version_info
+except ImportError:
+    pypy_version_info = None
+
+from jsonschema import (
+    FormatError, SchemaError, ValidationError, Draft3Validator,
+    Draft4Validator, FormatChecker, draft3_format_checker,
+    draft4_format_checker, validate,
+)
+from jsonschema.compat import PY3
+from jsonschema.tests.compat import mock, unittest
+import jsonschema
+
+
+REPO_ROOT = os.path.join(os.path.dirname(jsonschema.__file__), os.path.pardir)
+SUITE = os.getenv("JSON_SCHEMA_TEST_SUITE", os.path.join(REPO_ROOT, "json"))
+
+if not os.path.isdir(SUITE):
+    raise ValueError(
+        "Can't find the JSON-Schema-Test-Suite directory. Set the "
+        "'JSON_SCHEMA_TEST_SUITE' environment variable or run the tests from "
+        "alongside a checkout of the suite."
+    )
+
+TESTS_DIR = os.path.join(SUITE, "tests")
+JSONSCHEMA_SUITE = os.path.join(SUITE, "bin", "jsonschema_suite")
+
+remotes_stdout = subprocess.Popen(
+    ["python", JSONSCHEMA_SUITE, "remotes"], stdout=subprocess.PIPE,
+).stdout
+
+with closing(remotes_stdout):
+    if PY3:
+        remotes_stdout = io.TextIOWrapper(remotes_stdout)
+    REMOTES = json.load(remotes_stdout)
+
+
+def make_case(schema, data, valid, name):
+    if valid:
+        def test_case(self):
+            kwargs = getattr(self, "validator_kwargs", {})
+            validate(data, schema, cls=self.validator_class, **kwargs)
+    else:
+        def test_case(self):
+            kwargs = getattr(self, "validator_kwargs", {})
+            with self.assertRaises(ValidationError):
+                validate(data, schema, cls=self.validator_class, **kwargs)
+
+    if not PY3:
+        name = name.encode("utf-8")
+    test_case.__name__ = name
+
+    return test_case
+
+
+def maybe_skip(skip, test_case, case, test):
+    if skip is not None:
+        reason = skip(case, test)
+        if reason is not None:
+            test_case = unittest.skip(reason)(test_case)
+    return test_case
+
+
+def load_json_cases(tests_glob, ignore_glob="", basedir=TESTS_DIR, skip=None):
+    if ignore_glob:
+        ignore_glob = os.path.join(basedir, ignore_glob)
+
+    def add_test_methods(test_class):
+        ignored = set(glob.iglob(ignore_glob))
+
+        for filename in glob.iglob(os.path.join(basedir, tests_glob)):
+            if filename in ignored:
+                continue
+
+            validating, _ = os.path.splitext(os.path.basename(filename))
+            id = itertools.count(1)
+
+            with open(filename) as test_file:
+                for case in json.load(test_file):
+                    for test in case["tests"]:
+                        name = "test_%s_%s_%s" % (
+                            validating,
+                            next(id),
+                            re.sub(r"[\W ]+", "_", test["description"]),
+                        )
+                        assert not hasattr(test_class, name), name
+
+                        test_case = make_case(
+                            data=test["data"],
+                            schema=case["schema"],
+                            valid=test["valid"],
+                            name=name,
+                        )
+                        test_case = maybe_skip(skip, test_case, case, test)
+                        setattr(test_class, name, test_case)
+
+        return test_class
+    return add_test_methods
+
+
+class TypesMixin(object):
+    @unittest.skipIf(PY3, "In Python 3 json.load always produces unicode")
+    def test_string_a_bytestring_is_a_string(self):
+        self.validator_class({"type" : "string"}).validate(b"foo")
+
+
+class DecimalMixin(object):
+    def test_it_can_validate_with_decimals(self):
+        schema = {"type" : "number"}
+        validator = self.validator_class(
+            schema, types={"number" : (int, float, Decimal)}
+        )
+
+        for valid in [1, 1.1, Decimal(1) / Decimal(8)]:
+            validator.validate(valid)
+
+        for invalid in ["foo", {}, [], True, None]:
+            with self.assertRaises(ValidationError):
+                validator.validate(invalid)
+
+
+def missing_format(checker):
+    def missing_format(case, test):
+        format = case["schema"].get("format")
+        if format not in checker.checkers:
+            return "Format checker {0!r} not found.".format(format)
+        elif (
+            format == "date-time" and
+            pypy_version_info is not None and
+            pypy_version_info[:2] <= (1, 9)
+        ):
+            # datetime.datetime is overzealous about typechecking in <=1.9
+            return "datetime.datetime is broken on this version of PyPy."
+    return missing_format
+
+
+class FormatMixin(object):
+    def test_it_returns_true_for_formats_it_does_not_know_about(self):
+        validator = self.validator_class(
+            {"format" : "carrot"}, format_checker=FormatChecker(),
+        )
+        validator.validate("bugs")
+
+    def test_it_does_not_validate_formats_by_default(self):
+        validator = self.validator_class({})
+        self.assertIsNone(validator.format_checker)
+
+    def test_it_validates_formats_if_a_checker_is_provided(self):
+        checker = mock.Mock(spec=FormatChecker)
+        validator = self.validator_class(
+            {"format" : "foo"}, format_checker=checker,
+        )
+
+        validator.validate("bar")
+
+        checker.check.assert_called_once_with("bar", "foo")
+
+        cause = ValueError()
+        checker.check.side_effect = FormatError('aoeu', cause=cause)
+
+        with self.assertRaises(ValidationError) as cm:
+            validator.validate("bar")
+        # Make sure original cause is attached
+        self.assertIs(cm.exception.cause, cause)
+
+    def test_it_validates_formats_of_any_type(self):
+        checker = mock.Mock(spec=FormatChecker)
+        validator = self.validator_class(
+            {"format" : "foo"}, format_checker=checker,
+        )
+
+        validator.validate([1, 2, 3])
+
+        checker.check.assert_called_once_with([1, 2, 3], "foo")
+
+        cause = ValueError()
+        checker.check.side_effect = FormatError('aoeu', cause=cause)
+
+        with self.assertRaises(ValidationError) as cm:
+            validator.validate([1, 2, 3])
+        # Make sure original cause is attached
+        self.assertIs(cm.exception.cause, cause)
+
+
+if sys.maxunicode == 2 ** 16 - 1:          # This is a narrow build.
+    def narrow_unicode_build(case, test):
+        if "supplementary Unicode" in test["description"]:
+            return "Not running surrogate Unicode case, this Python is narrow."
+else:
+    def narrow_unicode_build(case, test):  # This isn't, skip nothing.
+        return
+
+
+@load_json_cases(
+    "draft3/*.json",
+    skip=narrow_unicode_build,
+    ignore_glob="draft3/refRemote.json",
+)
+@load_json_cases(
+    "draft3/optional/format.json", skip=missing_format(draft3_format_checker)
+)
+@load_json_cases("draft3/optional/bignum.json")
+@load_json_cases("draft3/optional/zeroTerminatedFloats.json")
+class TestDraft3(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
+    validator_class = Draft3Validator
+    validator_kwargs = {"format_checker" : draft3_format_checker}
+
+    def test_any_type_is_valid_for_type_any(self):
+        validator = self.validator_class({"type" : "any"})
+        validator.validate(mock.Mock())
+
+    # TODO: we're in need of more meta schema tests
+    def test_invalid_properties(self):
+        with self.assertRaises(SchemaError):
+            validate({}, {"properties": {"test": True}},
+                     cls=self.validator_class)
+
+    def test_minItems_invalid_string(self):
+        with self.assertRaises(SchemaError):
+            # needs to be an integer
+            validate([1], {"minItems" : "1"}, cls=self.validator_class)
+
+
+@load_json_cases(
+    "draft4/*.json",
+    skip=narrow_unicode_build,
+    ignore_glob="draft4/refRemote.json",
+)
+@load_json_cases(
+    "draft4/optional/format.json", skip=missing_format(draft4_format_checker)
+)
+@load_json_cases("draft4/optional/bignum.json")
+@load_json_cases("draft4/optional/zeroTerminatedFloats.json")
+class TestDraft4(unittest.TestCase, TypesMixin, DecimalMixin, FormatMixin):
+    validator_class = Draft4Validator
+    validator_kwargs = {"format_checker" : draft4_format_checker}
+
+    # TODO: we're in need of more meta schema tests
+    def test_invalid_properties(self):
+        with self.assertRaises(SchemaError):
+            validate({}, {"properties": {"test": True}},
+                     cls=self.validator_class)
+
+    def test_minItems_invalid_string(self):
+        with self.assertRaises(SchemaError):
+            # needs to be an integer
+            validate([1], {"minItems" : "1"}, cls=self.validator_class)
+
+
+class RemoteRefResolutionMixin(object):
+    def setUp(self):
+        patch = mock.patch("jsonschema.validators.requests")
+        requests = patch.start()
+        requests.get.side_effect = self.resolve
+        self.addCleanup(patch.stop)
+
+    def resolve(self, reference):
+        _, _, reference = reference.partition("http://localhost:1234/")
+        return mock.Mock(**{"json.return_value" : REMOTES.get(reference)})
+
+
+@load_json_cases("draft3/refRemote.json")
+class Draft3RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
+    validator_class = Draft3Validator
+
+
+@load_json_cases("draft4/refRemote.json")
+class Draft4RemoteResolution(RemoteRefResolutionMixin, unittest.TestCase):
+    validator_class = Draft4Validator
diff --git a/lib/spack/external/jsonschema/tests/test_validators.py b/lib/spack/external/jsonschema/tests/test_validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8692388ea4e853de9136f988bb8a9f5d0c9e7bf
--- /dev/null
+++ b/lib/spack/external/jsonschema/tests/test_validators.py
@@ -0,0 +1,786 @@
+from collections import deque
+from contextlib import contextmanager
+import json
+
+from jsonschema import FormatChecker, ValidationError
+from jsonschema.tests.compat import mock, unittest
+from jsonschema.validators import (
+    RefResolutionError, UnknownType, Draft3Validator,
+    Draft4Validator, RefResolver, create, extend, validator_for, validate,
+)
+
+
+class TestCreateAndExtend(unittest.TestCase):
+    def setUp(self):
+        self.meta_schema = {u"properties" : {u"smelly" : {}}}
+        self.smelly = mock.MagicMock()
+        self.validators = {u"smelly" : self.smelly}
+        self.types = {u"dict" : dict}
+        self.Validator = create(
+            meta_schema=self.meta_schema,
+            validators=self.validators,
+            default_types=self.types,
+        )
+
+        self.validator_value = 12
+        self.schema = {u"smelly" : self.validator_value}
+        self.validator = self.Validator(self.schema)
+
+    def test_attrs(self):
+        self.assertEqual(self.Validator.VALIDATORS, self.validators)
+        self.assertEqual(self.Validator.META_SCHEMA, self.meta_schema)
+        self.assertEqual(self.Validator.DEFAULT_TYPES, self.types)
+
+    def test_init(self):
+        self.assertEqual(self.validator.schema, self.schema)
+
+    def test_iter_errors(self):
+        instance = "hello"
+
+        self.smelly.return_value = []
+        self.assertEqual(list(self.validator.iter_errors(instance)), [])
+
+        error = mock.Mock()
+        self.smelly.return_value = [error]
+        self.assertEqual(list(self.validator.iter_errors(instance)), [error])
+
+        self.smelly.assert_called_with(
+            self.validator, self.validator_value, instance, self.schema,
+        )
+
+    def test_if_a_version_is_provided_it_is_registered(self):
+        with mock.patch("jsonschema.validators.validates") as validates:
+            validates.side_effect = lambda version : lambda cls : cls
+            Validator = create(meta_schema={u"id" : ""}, version="my version")
+        validates.assert_called_once_with("my version")
+        self.assertEqual(Validator.__name__, "MyVersionValidator")
+
+    def test_if_a_version_is_not_provided_it_is_not_registered(self):
+        with mock.patch("jsonschema.validators.validates") as validates:
+            create(meta_schema={u"id" : "id"})
+        self.assertFalse(validates.called)
+
+    def test_extend(self):
+        validators = dict(self.Validator.VALIDATORS)
+        new = mock.Mock()
+
+        Extended = extend(self.Validator, validators={u"a new one" : new})
+
+        validators.update([(u"a new one", new)])
+        self.assertEqual(Extended.VALIDATORS, validators)
+        self.assertNotIn(u"a new one", self.Validator.VALIDATORS)
+
+        self.assertEqual(Extended.META_SCHEMA, self.Validator.META_SCHEMA)
+        self.assertEqual(Extended.DEFAULT_TYPES, self.Validator.DEFAULT_TYPES)
+
+
+class TestIterErrors(unittest.TestCase):
+    def setUp(self):
+        self.validator = Draft3Validator({})
+
+    def test_iter_errors(self):
+        instance = [1, 2]
+        schema = {
+            u"disallow" : u"array",
+            u"enum" : [["a", "b", "c"], ["d", "e", "f"]],
+            u"minItems" : 3
+        }
+
+        got = (e.message for e in self.validator.iter_errors(instance, schema))
+        expected = [
+            "%r is disallowed for [1, 2]" % (schema["disallow"],),
+            "[1, 2] is too short",
+            "[1, 2] is not one of %r" % (schema["enum"],),
+        ]
+        self.assertEqual(sorted(got), sorted(expected))
+
+    def test_iter_errors_multiple_failures_one_validator(self):
+        instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
+        schema = {
+            u"properties" : {
+                "foo" : {u"type" : "string"},
+                "bar" : {u"minItems" : 2},
+                "baz" : {u"maximum" : 10, u"enum" : [2, 4, 6, 8]},
+            }
+        }
+
+        errors = list(self.validator.iter_errors(instance, schema))
+        self.assertEqual(len(errors), 4)
+
+
+class TestValidationErrorMessages(unittest.TestCase):
+    def message_for(self, instance, schema, *args, **kwargs):
+        kwargs.setdefault("cls", Draft3Validator)
+        with self.assertRaises(ValidationError) as e:
+            validate(instance, schema, *args, **kwargs)
+        return e.exception.message
+
+    def test_single_type_failure(self):
+        message = self.message_for(instance=1, schema={u"type" : u"string"})
+        self.assertEqual(message, "1 is not of type %r" % u"string")
+
+    def test_single_type_list_failure(self):
+        message = self.message_for(instance=1, schema={u"type" : [u"string"]})
+        self.assertEqual(message, "1 is not of type %r" % u"string")
+
+    def test_multiple_type_failure(self):
+        types = u"string", u"object"
+        message = self.message_for(instance=1, schema={u"type" : list(types)})
+        self.assertEqual(message, "1 is not of type %r, %r" % types)
+
+    def test_object_without_title_type_failure(self):
+        type = {u"type" : [{u"minimum" : 3}]}
+        message = self.message_for(instance=1, schema={u"type" : [type]})
+        self.assertEqual(message, "1 is not of type %r" % (type,))
+
+    def test_object_with_name_type_failure(self):
+        name = "Foo"
+        schema = {u"type" : [{u"name" : name, u"minimum" : 3}]}
+        message = self.message_for(instance=1, schema=schema)
+        self.assertEqual(message, "1 is not of type %r" % (name,))
+
+    def test_minimum(self):
+        message = self.message_for(instance=1, schema={"minimum" : 2})
+        self.assertEqual(message, "1 is less than the minimum of 2")
+
+    def test_maximum(self):
+        message = self.message_for(instance=1, schema={"maximum" : 0})
+        self.assertEqual(message, "1 is greater than the maximum of 0")
+
+    def test_dependencies_failure_has_single_element_not_list(self):
+        depend, on = "bar", "foo"
+        schema = {u"dependencies" : {depend : on}}
+        message = self.message_for({"bar" : 2}, schema)
+        self.assertEqual(message, "%r is a dependency of %r" % (on, depend))
+
+    def test_additionalItems_single_failure(self):
+        message = self.message_for(
+            [2], {u"items" : [], u"additionalItems" : False},
+        )
+        self.assertIn("(2 was unexpected)", message)
+
+    def test_additionalItems_multiple_failures(self):
+        message = self.message_for(
+            [1, 2, 3], {u"items" : [], u"additionalItems" : False}
+        )
+        self.assertIn("(1, 2, 3 were unexpected)", message)
+
+    def test_additionalProperties_single_failure(self):
+        additional = "foo"
+        schema = {u"additionalProperties" : False}
+        message = self.message_for({additional : 2}, schema)
+        self.assertIn("(%r was unexpected)" % (additional,), message)
+
+    def test_additionalProperties_multiple_failures(self):
+        schema = {u"additionalProperties" : False}
+        message = self.message_for(dict.fromkeys(["foo", "bar"]), schema)
+
+        self.assertIn(repr("foo"), message)
+        self.assertIn(repr("bar"), message)
+        self.assertIn("were unexpected)", message)
+
+    def test_invalid_format_default_message(self):
+        checker = FormatChecker(formats=())
+        check_fn = mock.Mock(return_value=False)
+        checker.checks(u"thing")(check_fn)
+
+        schema = {u"format" : u"thing"}
+        message = self.message_for("bla", schema, format_checker=checker)
+
+        self.assertIn(repr("bla"), message)
+        self.assertIn(repr("thing"), message)
+        self.assertIn("is not a", message)
+
+
+class TestValidationErrorDetails(unittest.TestCase):
+    # TODO: These really need unit tests for each individual validator, rather
+    #       than just these higher level tests.
+    def test_anyOf(self):
+        instance = 5
+        schema = {
+            "anyOf": [
+                {"minimum": 20},
+                {"type": "string"}
+            ]
+        }
+
+        validator = Draft4Validator(schema)
+        errors = list(validator.iter_errors(instance))
+        self.assertEqual(len(errors), 1)
+        e = errors[0]
+
+        self.assertEqual(e.validator, "anyOf")
+        self.assertEqual(e.validator_value, schema["anyOf"])
+        self.assertEqual(e.instance, instance)
+        self.assertEqual(e.schema, schema)
+        self.assertIsNone(e.parent)
+
+        self.assertEqual(e.path, deque([]))
+        self.assertEqual(e.relative_path, deque([]))
+        self.assertEqual(e.absolute_path, deque([]))
+
+        self.assertEqual(e.schema_path, deque(["anyOf"]))
+        self.assertEqual(e.relative_schema_path, deque(["anyOf"]))
+        self.assertEqual(e.absolute_schema_path, deque(["anyOf"]))
+
+        self.assertEqual(len(e.context), 2)
+
+        e1, e2 = sorted_errors(e.context)
+
+        self.assertEqual(e1.validator, "minimum")
+        self.assertEqual(e1.validator_value, schema["anyOf"][0]["minimum"])
+        self.assertEqual(e1.instance, instance)
+        self.assertEqual(e1.schema, schema["anyOf"][0])
+        self.assertIs(e1.parent, e)
+
+        self.assertEqual(e1.path, deque([]))
+        self.assertEqual(e1.absolute_path, deque([]))
+        self.assertEqual(e1.relative_path, deque([]))
+
+        self.assertEqual(e1.schema_path, deque([0, "minimum"]))
+        self.assertEqual(e1.relative_schema_path, deque([0, "minimum"]))
+        self.assertEqual(
+            e1.absolute_schema_path, deque(["anyOf", 0, "minimum"]),
+        )
+
+        self.assertFalse(e1.context)
+
+        self.assertEqual(e2.validator, "type")
+        self.assertEqual(e2.validator_value, schema["anyOf"][1]["type"])
+        self.assertEqual(e2.instance, instance)
+        self.assertEqual(e2.schema, schema["anyOf"][1])
+        self.assertIs(e2.parent, e)
+
+        self.assertEqual(e2.path, deque([]))
+        self.assertEqual(e2.relative_path, deque([]))
+        self.assertEqual(e2.absolute_path, deque([]))
+
+        self.assertEqual(e2.schema_path, deque([1, "type"]))
+        self.assertEqual(e2.relative_schema_path, deque([1, "type"]))
+        self.assertEqual(e2.absolute_schema_path, deque(["anyOf", 1, "type"]))
+
+        self.assertEqual(len(e2.context), 0)
+
+    def test_type(self):
+        instance = {"foo": 1}
+        schema = {
+            "type": [
+                {"type": "integer"},
+                {
+                    "type": "object",
+                    "properties": {
+                        "foo": {"enum": [2]}
+                    }
+                }
+            ]
+        }
+
+        validator = Draft3Validator(schema)
+        errors = list(validator.iter_errors(instance))
+        self.assertEqual(len(errors), 1)
+        e = errors[0]
+
+        self.assertEqual(e.validator, "type")
+        self.assertEqual(e.validator_value, schema["type"])
+        self.assertEqual(e.instance, instance)
+        self.assertEqual(e.schema, schema)
+        self.assertIsNone(e.parent)
+
+        self.assertEqual(e.path, deque([]))
+        self.assertEqual(e.relative_path, deque([]))
+        self.assertEqual(e.absolute_path, deque([]))
+
+        self.assertEqual(e.schema_path, deque(["type"]))
+        self.assertEqual(e.relative_schema_path, deque(["type"]))
+        self.assertEqual(e.absolute_schema_path, deque(["type"]))
+
+        self.assertEqual(len(e.context), 2)
+
+        e1, e2 = sorted_errors(e.context)
+
+        self.assertEqual(e1.validator, "type")
+        self.assertEqual(e1.validator_value, schema["type"][0]["type"])
+        self.assertEqual(e1.instance, instance)
+        self.assertEqual(e1.schema, schema["type"][0])
+        self.assertIs(e1.parent, e)
+
+        self.assertEqual(e1.path, deque([]))
+        self.assertEqual(e1.relative_path, deque([]))
+        self.assertEqual(e1.absolute_path, deque([]))
+
+        self.assertEqual(e1.schema_path, deque([0, "type"]))
+        self.assertEqual(e1.relative_schema_path, deque([0, "type"]))
+        self.assertEqual(e1.absolute_schema_path, deque(["type", 0, "type"]))
+
+        self.assertFalse(e1.context)
+
+        self.assertEqual(e2.validator, "enum")
+        self.assertEqual(e2.validator_value, [2])
+        self.assertEqual(e2.instance, 1)
+        self.assertEqual(e2.schema, {u"enum" : [2]})
+        self.assertIs(e2.parent, e)
+
+        self.assertEqual(e2.path, deque(["foo"]))
+        self.assertEqual(e2.relative_path, deque(["foo"]))
+        self.assertEqual(e2.absolute_path, deque(["foo"]))
+
+        self.assertEqual(
+            e2.schema_path, deque([1, "properties", "foo", "enum"]),
+        )
+        self.assertEqual(
+            e2.relative_schema_path, deque([1, "properties", "foo", "enum"]),
+        )
+        self.assertEqual(
+            e2.absolute_schema_path,
+            deque(["type", 1, "properties", "foo", "enum"]),
+        )
+
+        self.assertFalse(e2.context)
+
+    def test_single_nesting(self):
+        instance = {"foo" : 2, "bar" : [1], "baz" : 15, "quux" : "spam"}
+        schema = {
+            "properties" : {
+                "foo" : {"type" : "string"},
+                "bar" : {"minItems" : 2},
+                "baz" : {"maximum" : 10, "enum" : [2, 4, 6, 8]},
+            }
+        }
+
+        validator = Draft3Validator(schema)
+        errors = validator.iter_errors(instance)
+        e1, e2, e3, e4 = sorted_errors(errors)
+
+        self.assertEqual(e1.path, deque(["bar"]))
+        self.assertEqual(e2.path, deque(["baz"]))
+        self.assertEqual(e3.path, deque(["baz"]))
+        self.assertEqual(e4.path, deque(["foo"]))
+
+        self.assertEqual(e1.relative_path, deque(["bar"]))
+        self.assertEqual(e2.relative_path, deque(["baz"]))
+        self.assertEqual(e3.relative_path, deque(["baz"]))
+        self.assertEqual(e4.relative_path, deque(["foo"]))
+
+        self.assertEqual(e1.absolute_path, deque(["bar"]))
+        self.assertEqual(e2.absolute_path, deque(["baz"]))
+        self.assertEqual(e3.absolute_path, deque(["baz"]))
+        self.assertEqual(e4.absolute_path, deque(["foo"]))
+
+        self.assertEqual(e1.validator, "minItems")
+        self.assertEqual(e2.validator, "enum")
+        self.assertEqual(e3.validator, "maximum")
+        self.assertEqual(e4.validator, "type")
+
+    def test_multiple_nesting(self):
+        instance = [1, {"foo" : 2, "bar" : {"baz" : [1]}}, "quux"]
+        schema = {
+            "type" : "string",
+            "items" : {
+                "type" : ["string", "object"],
+                "properties" : {
+                    "foo" : {"enum" : [1, 3]},
+                    "bar" : {
+                        "type" : "array",
+                        "properties" : {
+                            "bar" : {"required" : True},
+                            "baz" : {"minItems" : 2},
+                        }
+                    }
+                }
+            }
+        }
+
+        validator = Draft3Validator(schema)
+        errors = validator.iter_errors(instance)
+        e1, e2, e3, e4, e5, e6 = sorted_errors(errors)
+
+        self.assertEqual(e1.path, deque([]))
+        self.assertEqual(e2.path, deque([0]))
+        self.assertEqual(e3.path, deque([1, "bar"]))
+        self.assertEqual(e4.path, deque([1, "bar", "bar"]))
+        self.assertEqual(e5.path, deque([1, "bar", "baz"]))
+        self.assertEqual(e6.path, deque([1, "foo"]))
+
+        self.assertEqual(e1.schema_path, deque(["type"]))
+        self.assertEqual(e2.schema_path, deque(["items", "type"]))
+        self.assertEqual(
+            list(e3.schema_path), ["items", "properties", "bar", "type"],
+        )
+        self.assertEqual(
+            list(e4.schema_path),
+            ["items", "properties", "bar", "properties", "bar", "required"],
+        )
+        self.assertEqual(
+            list(e5.schema_path),
+            ["items", "properties", "bar", "properties", "baz", "minItems"]
+        )
+        self.assertEqual(
+            list(e6.schema_path), ["items", "properties", "foo", "enum"],
+        )
+
+        self.assertEqual(e1.validator, "type")
+        self.assertEqual(e2.validator, "type")
+        self.assertEqual(e3.validator, "type")
+        self.assertEqual(e4.validator, "required")
+        self.assertEqual(e5.validator, "minItems")
+        self.assertEqual(e6.validator, "enum")
+
+    def test_additionalProperties(self):
+        instance = {"bar": "bar", "foo": 2}
+        schema = {
+            "additionalProperties" : {"type": "integer", "minimum": 5}
+        }
+
+        validator = Draft3Validator(schema)
+        errors = validator.iter_errors(instance)
+        e1, e2 = sorted_errors(errors)
+
+        self.assertEqual(e1.path, deque(["bar"]))
+        self.assertEqual(e2.path, deque(["foo"]))
+
+        self.assertEqual(e1.validator, "type")
+        self.assertEqual(e2.validator, "minimum")
+
+    def test_patternProperties(self):
+        instance = {"bar": 1, "foo": 2}
+        schema = {
+            "patternProperties" : {
+                "bar": {"type": "string"},
+                "foo": {"minimum": 5}
+            }
+        }
+
+        validator = Draft3Validator(schema)
+        errors = validator.iter_errors(instance)
+        e1, e2 = sorted_errors(errors)
+
+        self.assertEqual(e1.path, deque(["bar"]))
+        self.assertEqual(e2.path, deque(["foo"]))
+
+        self.assertEqual(e1.validator, "type")
+        self.assertEqual(e2.validator, "minimum")
+
+    def test_additionalItems(self):
+        instance = ["foo", 1]
+        schema = {
+            "items": [],
+            "additionalItems" : {"type": "integer", "minimum": 5}
+        }
+
+        validator = Draft3Validator(schema)
+        errors = validator.iter_errors(instance)
+        e1, e2 = sorted_errors(errors)
+
+        self.assertEqual(e1.path, deque([0]))
+        self.assertEqual(e2.path, deque([1]))
+
+        self.assertEqual(e1.validator, "type")
+        self.assertEqual(e2.validator, "minimum")
+
+    def test_additionalItems_with_items(self):
+        instance = ["foo", "bar", 1]
+        schema = {
+            "items": [{}],
+            "additionalItems" : {"type": "integer", "minimum": 5}
+        }
+
+        validator = Draft3Validator(schema)
+        errors = validator.iter_errors(instance)
+        e1, e2 = sorted_errors(errors)
+
+        self.assertEqual(e1.path, deque([1]))
+        self.assertEqual(e2.path, deque([2]))
+
+        self.assertEqual(e1.validator, "type")
+        self.assertEqual(e2.validator, "minimum")
+
+
+class ValidatorTestMixin(object):
+    def setUp(self):
+        self.instance = mock.Mock()
+        self.schema = {}
+        self.resolver = mock.Mock()
+        self.validator = self.validator_class(self.schema)
+
+    def test_valid_instances_are_valid(self):
+        errors = iter([])
+
+        with mock.patch.object(
+            self.validator, "iter_errors", return_value=errors,
+        ):
+            self.assertTrue(
+                self.validator.is_valid(self.instance, self.schema)
+            )
+
+    def test_invalid_instances_are_not_valid(self):
+        errors = iter([mock.Mock()])
+
+        with mock.patch.object(
+            self.validator, "iter_errors", return_value=errors,
+        ):
+            self.assertFalse(
+                self.validator.is_valid(self.instance, self.schema)
+            )
+
+    def test_non_existent_properties_are_ignored(self):
+        instance, my_property, my_value = mock.Mock(), mock.Mock(), mock.Mock()
+        validate(instance=instance, schema={my_property : my_value})
+
+    def test_it_creates_a_ref_resolver_if_not_provided(self):
+        self.assertIsInstance(self.validator.resolver, RefResolver)
+
+    def test_it_delegates_to_a_ref_resolver(self):
+        resolver = RefResolver("", {})
+        schema = {"$ref" : mock.Mock()}
+
+        @contextmanager
+        def resolving():
+            yield {"type": "integer"}
+
+        with mock.patch.object(resolver, "resolving") as resolve:
+            resolve.return_value = resolving()
+            with self.assertRaises(ValidationError):
+                self.validator_class(schema, resolver=resolver).validate(None)
+
+        resolve.assert_called_once_with(schema["$ref"])
+
+    def test_is_type_is_true_for_valid_type(self):
+        self.assertTrue(self.validator.is_type("foo", "string"))
+
+    def test_is_type_is_false_for_invalid_type(self):
+        self.assertFalse(self.validator.is_type("foo", "array"))
+
+    def test_is_type_evades_bool_inheriting_from_int(self):
+        self.assertFalse(self.validator.is_type(True, "integer"))
+        self.assertFalse(self.validator.is_type(True, "number"))
+
+    def test_is_type_raises_exception_for_unknown_type(self):
+        with self.assertRaises(UnknownType):
+            self.validator.is_type("foo", object())
+
+
+class TestDraft3Validator(ValidatorTestMixin, unittest.TestCase):
+    validator_class = Draft3Validator
+
+    def test_is_type_is_true_for_any_type(self):
+        self.assertTrue(self.validator.is_valid(mock.Mock(), {"type": "any"}))
+
+    def test_is_type_does_not_evade_bool_if_it_is_being_tested(self):
+        self.assertTrue(self.validator.is_type(True, "boolean"))
+        self.assertTrue(self.validator.is_valid(True, {"type": "any"}))
+
+    def test_non_string_custom_types(self):
+        schema = {'type': [None]}
+        cls = self.validator_class(schema, types={None: type(None)})
+        cls.validate(None, schema)
+
+
+class TestDraft4Validator(ValidatorTestMixin, unittest.TestCase):
+    validator_class = Draft4Validator
+
+
+class TestBuiltinFormats(unittest.TestCase):
+    """
+    The built-in (specification-defined) formats do not raise type errors.
+
+    If an instance or value is not a string, it should be ignored.
+
+    """
+
+
+for format in FormatChecker.checkers:
+    def test(self, format=format):
+        v = Draft4Validator({"format": format}, format_checker=FormatChecker())
+        v.validate(123)
+
+    name = "test_{0}_ignores_non_strings".format(format)
+    test.__name__ = name
+    setattr(TestBuiltinFormats, name, test)
+    del test  # Ugh py.test. Stop discovering top level tests.
+
+
+class TestValidatorFor(unittest.TestCase):
+    def test_draft_3(self):
+        schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
+        self.assertIs(validator_for(schema), Draft3Validator)
+
+        schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
+        self.assertIs(validator_for(schema), Draft3Validator)
+
+    def test_draft_4(self):
+        schema = {"$schema" : "http://json-schema.org/draft-04/schema"}
+        self.assertIs(validator_for(schema), Draft4Validator)
+
+        schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
+        self.assertIs(validator_for(schema), Draft4Validator)
+
+    def test_custom_validator(self):
+        Validator = create(meta_schema={"id" : "meta schema id"}, version="12")
+        schema = {"$schema" : "meta schema id"}
+        self.assertIs(validator_for(schema), Validator)
+
+    def test_validator_for_jsonschema_default(self):
+        self.assertIs(validator_for({}), Draft4Validator)
+
+    def test_validator_for_custom_default(self):
+        self.assertIs(validator_for({}, default=None), None)
+
+
+class TestValidate(unittest.TestCase):
+    def test_draft3_validator_is_chosen(self):
+        schema = {"$schema" : "http://json-schema.org/draft-03/schema#"}
+        with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
+            validate({}, schema)
+            chk_schema.assert_called_once_with(schema)
+        # Make sure it works without the empty fragment
+        schema = {"$schema" : "http://json-schema.org/draft-03/schema"}
+        with mock.patch.object(Draft3Validator, "check_schema") as chk_schema:
+            validate({}, schema)
+            chk_schema.assert_called_once_with(schema)
+
+    def test_draft4_validator_is_chosen(self):
+        schema = {"$schema" : "http://json-schema.org/draft-04/schema#"}
+        with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
+            validate({}, schema)
+            chk_schema.assert_called_once_with(schema)
+
+    def test_draft4_validator_is_the_default(self):
+        with mock.patch.object(Draft4Validator, "check_schema") as chk_schema:
+            validate({}, {})
+            chk_schema.assert_called_once_with({})
+
+
+class TestRefResolver(unittest.TestCase):
+
+    base_uri = ""
+    stored_uri = "foo://stored"
+    stored_schema = {"stored" : "schema"}
+
+    def setUp(self):
+        self.referrer = {}
+        self.store = {self.stored_uri : self.stored_schema}
+        self.resolver = RefResolver(self.base_uri, self.referrer, self.store)
+
+    def test_it_does_not_retrieve_schema_urls_from_the_network(self):
+        ref = Draft3Validator.META_SCHEMA["id"]
+        with mock.patch.object(self.resolver, "resolve_remote") as remote:
+            with self.resolver.resolving(ref) as resolved:
+                self.assertEqual(resolved, Draft3Validator.META_SCHEMA)
+        self.assertFalse(remote.called)
+
+    def test_it_resolves_local_refs(self):
+        ref = "#/properties/foo"
+        self.referrer["properties"] = {"foo" : object()}
+        with self.resolver.resolving(ref) as resolved:
+            self.assertEqual(resolved, self.referrer["properties"]["foo"])
+
+    def test_it_resolves_local_refs_with_id(self):
+        schema = {"id": "foo://bar/schema#", "a": {"foo": "bar"}}
+        resolver = RefResolver.from_schema(schema)
+        with resolver.resolving("#/a") as resolved:
+            self.assertEqual(resolved, schema["a"])
+        with resolver.resolving("foo://bar/schema#/a") as resolved:
+            self.assertEqual(resolved, schema["a"])
+
+    def test_it_retrieves_stored_refs(self):
+        with self.resolver.resolving(self.stored_uri) as resolved:
+            self.assertIs(resolved, self.stored_schema)
+
+        self.resolver.store["cached_ref"] = {"foo" : 12}
+        with self.resolver.resolving("cached_ref#/foo") as resolved:
+            self.assertEqual(resolved, 12)
+
+    def test_it_retrieves_unstored_refs_via_requests(self):
+        ref = "http://bar#baz"
+        schema = {"baz" : 12}
+
+        with mock.patch("jsonschema.validators.requests") as requests:
+            requests.get.return_value.json.return_value = schema
+            with self.resolver.resolving(ref) as resolved:
+                self.assertEqual(resolved, 12)
+        requests.get.assert_called_once_with("http://bar")
+
+    def test_it_retrieves_unstored_refs_via_urlopen(self):
+        ref = "http://bar#baz"
+        schema = {"baz" : 12}
+
+        with mock.patch("jsonschema.validators.requests", None):
+            with mock.patch("jsonschema.validators.urlopen") as urlopen:
+                urlopen.return_value.read.return_value = (
+                    json.dumps(schema).encode("utf8"))
+                with self.resolver.resolving(ref) as resolved:
+                    self.assertEqual(resolved, 12)
+        urlopen.assert_called_once_with("http://bar")
+
+    def test_it_can_construct_a_base_uri_from_a_schema(self):
+        schema = {"id" : "foo"}
+        resolver = RefResolver.from_schema(schema)
+        self.assertEqual(resolver.base_uri, "foo")
+        with resolver.resolving("") as resolved:
+            self.assertEqual(resolved, schema)
+        with resolver.resolving("#") as resolved:
+            self.assertEqual(resolved, schema)
+        with resolver.resolving("foo") as resolved:
+            self.assertEqual(resolved, schema)
+        with resolver.resolving("foo#") as resolved:
+            self.assertEqual(resolved, schema)
+
+    def test_it_can_construct_a_base_uri_from_a_schema_without_id(self):
+        schema = {}
+        resolver = RefResolver.from_schema(schema)
+        self.assertEqual(resolver.base_uri, "")
+        with resolver.resolving("") as resolved:
+            self.assertEqual(resolved, schema)
+        with resolver.resolving("#") as resolved:
+            self.assertEqual(resolved, schema)
+
+    def test_custom_uri_scheme_handlers(self):
+        schema = {"foo": "bar"}
+        ref = "foo://bar"
+        foo_handler = mock.Mock(return_value=schema)
+        resolver = RefResolver("", {}, handlers={"foo": foo_handler})
+        with resolver.resolving(ref) as resolved:
+            self.assertEqual(resolved, schema)
+        foo_handler.assert_called_once_with(ref)
+
+    def test_cache_remote_on(self):
+        ref = "foo://bar"
+        foo_handler = mock.Mock()
+        resolver = RefResolver(
+            "", {}, cache_remote=True, handlers={"foo" : foo_handler},
+        )
+        with resolver.resolving(ref):
+            pass
+        with resolver.resolving(ref):
+            pass
+        foo_handler.assert_called_once_with(ref)
+
+    def test_cache_remote_off(self):
+        ref = "foo://bar"
+        foo_handler = mock.Mock()
+        resolver = RefResolver(
+            "", {}, cache_remote=False, handlers={"foo" : foo_handler},
+        )
+        with resolver.resolving(ref):
+            pass
+        with resolver.resolving(ref):
+            pass
+        self.assertEqual(foo_handler.call_count, 2)
+
+    def test_if_you_give_it_junk_you_get_a_resolution_error(self):
+        ref = "foo://bar"
+        foo_handler = mock.Mock(side_effect=ValueError("Oh no! What's this?"))
+        resolver = RefResolver("", {}, handlers={"foo" : foo_handler})
+        with self.assertRaises(RefResolutionError) as err:
+            with resolver.resolving(ref):
+                pass
+        self.assertEqual(str(err.exception), "Oh no! What's this?")
+
+
+def sorted_errors(errors):
+    def key(error):
+        return (
+            [str(e) for e in error.path],
+            [str(e) for e in error.schema_path]
+        )
+    return sorted(errors, key=key)
diff --git a/lib/spack/external/jsonschema/validators.py b/lib/spack/external/jsonschema/validators.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e326844f4c14aba56eb8d363bc4d63e03e80b4f
--- /dev/null
+++ b/lib/spack/external/jsonschema/validators.py
@@ -0,0 +1,428 @@
+from __future__ import division
+
+import contextlib
+import json
+import numbers
+
+try:
+    import requests
+except ImportError:
+    requests = None
+
+from jsonschema import _utils, _validators
+from jsonschema.compat import (
+    Sequence, urljoin, urlsplit, urldefrag, unquote, urlopen,
+    str_types, int_types, iteritems,
+)
+from jsonschema.exceptions import ErrorTree  # Backwards compatibility  # noqa
+from jsonschema.exceptions import RefResolutionError, SchemaError, UnknownType
+
+
+_unset = _utils.Unset()
+
+validators = {}
+meta_schemas = _utils.URIDict()
+
+
+def validates(version):
+    """
+    Register the decorated validator for a ``version`` of the specification.
+
+    Registered validators and their meta schemas will be considered when
+    parsing ``$schema`` properties' URIs.
+
+    :argument str version: an identifier to use as the version's name
+    :returns: a class decorator to decorate the validator with the version
+
+    """
+
+    def _validates(cls):
+        validators[version] = cls
+        if u"id" in cls.META_SCHEMA:
+            meta_schemas[cls.META_SCHEMA[u"id"]] = cls
+        return cls
+    return _validates
+
+
+def create(meta_schema, validators=(), version=None, default_types=None):  # noqa
+    if default_types is None:
+        default_types = {
+            u"array" : list, u"boolean" : bool, u"integer" : int_types,
+            u"null" : type(None), u"number" : numbers.Number, u"object" : dict,
+            u"string" : str_types,
+        }
+
+    class Validator(object):
+        VALIDATORS = dict(validators)
+        META_SCHEMA = dict(meta_schema)
+        DEFAULT_TYPES = dict(default_types)
+
+        def __init__(
+            self, schema, types=(), resolver=None, format_checker=None,
+        ):
+            self._types = dict(self.DEFAULT_TYPES)
+            self._types.update(types)
+
+            if resolver is None:
+                resolver = RefResolver.from_schema(schema)
+
+            self.resolver = resolver
+            self.format_checker = format_checker
+            self.schema = schema
+
+        @classmethod
+        def check_schema(cls, schema):
+            for error in cls(cls.META_SCHEMA).iter_errors(schema):
+                raise SchemaError.create_from(error)
+
+        def iter_errors(self, instance, _schema=None):
+            if _schema is None:
+                _schema = self.schema
+
+            with self.resolver.in_scope(_schema.get(u"id", u"")):
+                ref = _schema.get(u"$ref")
+                if ref is not None:
+                    validators = [(u"$ref", ref)]
+                else:
+                    validators = iteritems(_schema)
+
+                for k, v in validators:
+                    validator = self.VALIDATORS.get(k)
+                    if validator is None:
+                        continue
+
+                    errors = validator(self, v, instance, _schema) or ()
+                    for error in errors:
+                        # set details if not already set by the called fn
+                        error._set(
+                            validator=k,
+                            validator_value=v,
+                            instance=instance,
+                            schema=_schema,
+                        )
+                        if k != u"$ref":
+                            error.schema_path.appendleft(k)
+                        yield error
+
+        def descend(self, instance, schema, path=None, schema_path=None):
+            for error in self.iter_errors(instance, schema):
+                if path is not None:
+                    error.path.appendleft(path)
+                if schema_path is not None:
+                    error.schema_path.appendleft(schema_path)
+                yield error
+
+        def validate(self, *args, **kwargs):
+            for error in self.iter_errors(*args, **kwargs):
+                raise error
+
+        def is_type(self, instance, type):
+            if type not in self._types:
+                raise UnknownType(type, instance, self.schema)
+            pytypes = self._types[type]
+
+            # bool inherits from int, so ensure bools aren't reported as ints
+            if isinstance(instance, bool):
+                pytypes = _utils.flatten(pytypes)
+                is_number = any(
+                    issubclass(pytype, numbers.Number) for pytype in pytypes
+                )
+                if is_number and bool not in pytypes:
+                    return False
+            return isinstance(instance, pytypes)
+
+        def is_valid(self, instance, _schema=None):
+            error = next(self.iter_errors(instance, _schema), None)
+            return error is None
+
+    if version is not None:
+        Validator = validates(version)(Validator)
+        Validator.__name__ = version.title().replace(" ", "") + "Validator"
+
+    return Validator
+
+
+def extend(validator, validators, version=None):
+    all_validators = dict(validator.VALIDATORS)
+    all_validators.update(validators)
+    return create(
+        meta_schema=validator.META_SCHEMA,
+        validators=all_validators,
+        version=version,
+        default_types=validator.DEFAULT_TYPES,
+    )
+
+
+Draft3Validator = create(
+    meta_schema=_utils.load_schema("draft3"),
+    validators={
+        u"$ref" : _validators.ref,
+        u"additionalItems" : _validators.additionalItems,
+        u"additionalProperties" : _validators.additionalProperties,
+        u"dependencies" : _validators.dependencies,
+        u"disallow" : _validators.disallow_draft3,
+        u"divisibleBy" : _validators.multipleOf,
+        u"enum" : _validators.enum,
+        u"extends" : _validators.extends_draft3,
+        u"format" : _validators.format,
+        u"items" : _validators.items,
+        u"maxItems" : _validators.maxItems,
+        u"maxLength" : _validators.maxLength,
+        u"maximum" : _validators.maximum,
+        u"minItems" : _validators.minItems,
+        u"minLength" : _validators.minLength,
+        u"minimum" : _validators.minimum,
+        u"multipleOf" : _validators.multipleOf,
+        u"pattern" : _validators.pattern,
+        u"patternProperties" : _validators.patternProperties,
+        u"properties" : _validators.properties_draft3,
+        u"type" : _validators.type_draft3,
+        u"uniqueItems" : _validators.uniqueItems,
+    },
+    version="draft3",
+)
+
+Draft4Validator = create(
+    meta_schema=_utils.load_schema("draft4"),
+    validators={
+        u"$ref" : _validators.ref,
+        u"additionalItems" : _validators.additionalItems,
+        u"additionalProperties" : _validators.additionalProperties,
+        u"allOf" : _validators.allOf_draft4,
+        u"anyOf" : _validators.anyOf_draft4,
+        u"dependencies" : _validators.dependencies,
+        u"enum" : _validators.enum,
+        u"format" : _validators.format,
+        u"items" : _validators.items,
+        u"maxItems" : _validators.maxItems,
+        u"maxLength" : _validators.maxLength,
+        u"maxProperties" : _validators.maxProperties_draft4,
+        u"maximum" : _validators.maximum,
+        u"minItems" : _validators.minItems,
+        u"minLength" : _validators.minLength,
+        u"minProperties" : _validators.minProperties_draft4,
+        u"minimum" : _validators.minimum,
+        u"multipleOf" : _validators.multipleOf,
+        u"not" : _validators.not_draft4,
+        u"oneOf" : _validators.oneOf_draft4,
+        u"pattern" : _validators.pattern,
+        u"patternProperties" : _validators.patternProperties,
+        u"properties" : _validators.properties_draft4,
+        u"required" : _validators.required_draft4,
+        u"type" : _validators.type_draft4,
+        u"uniqueItems" : _validators.uniqueItems,
+    },
+    version="draft4",
+)
+
+
+class RefResolver(object):
+    """
+    Resolve JSON References.
+
+    :argument str base_uri: URI of the referring document
+    :argument referrer: the actual referring document
+    :argument dict store: a mapping from URIs to documents to cache
+    :argument bool cache_remote: whether remote refs should be cached after
+        first resolution
+    :argument dict handlers: a mapping from URI schemes to functions that
+        should be used to retrieve them
+
+    """
+
+    def __init__(
+        self, base_uri, referrer, store=(), cache_remote=True, handlers=(),
+    ):
+        self.base_uri = base_uri
+        self.resolution_scope = base_uri
+        # This attribute is not used, it is for backwards compatibility
+        self.referrer = referrer
+        self.cache_remote = cache_remote
+        self.handlers = dict(handlers)
+
+        self.store = _utils.URIDict(
+            (id, validator.META_SCHEMA)
+            for id, validator in iteritems(meta_schemas)
+        )
+        self.store.update(store)
+        self.store[base_uri] = referrer
+
+    @classmethod
+    def from_schema(cls, schema, *args, **kwargs):
+        """
+        Construct a resolver from a JSON schema object.
+
+        :argument schema schema: the referring schema
+        :rtype: :class:`RefResolver`
+
+        """
+
+        return cls(schema.get(u"id", u""), schema, *args, **kwargs)
+
+    @contextlib.contextmanager
+    def in_scope(self, scope):
+        old_scope = self.resolution_scope
+        self.resolution_scope = urljoin(old_scope, scope)
+        try:
+            yield
+        finally:
+            self.resolution_scope = old_scope
+
+    @contextlib.contextmanager
+    def resolving(self, ref):
+        """
+        Context manager which resolves a JSON ``ref`` and enters the
+        resolution scope of this ref.
+
+        :argument str ref: reference to resolve
+
+        """
+
+        full_uri = urljoin(self.resolution_scope, ref)
+        uri, fragment = urldefrag(full_uri)
+        if not uri:
+            uri = self.base_uri
+
+        if uri in self.store:
+            document = self.store[uri]
+        else:
+            try:
+                document = self.resolve_remote(uri)
+            except Exception as exc:
+                raise RefResolutionError(exc)
+
+        old_base_uri, self.base_uri = self.base_uri, uri
+        try:
+            with self.in_scope(uri):
+                yield self.resolve_fragment(document, fragment)
+        finally:
+            self.base_uri = old_base_uri
+
+    def resolve_fragment(self, document, fragment):
+        """
+        Resolve a ``fragment`` within the referenced ``document``.
+
+        :argument document: the referrant document
+        :argument str fragment: a URI fragment to resolve within it
+
+        """
+
+        fragment = fragment.lstrip(u"/")
+        parts = unquote(fragment).split(u"/") if fragment else []
+
+        for part in parts:
+            part = part.replace(u"~1", u"/").replace(u"~0", u"~")
+
+            if isinstance(document, Sequence):
+                # Array indexes should be turned into integers
+                try:
+                    part = int(part)
+                except ValueError:
+                    pass
+            try:
+                document = document[part]
+            except (TypeError, LookupError):
+                raise RefResolutionError(
+                    "Unresolvable JSON pointer: %r" % fragment
+                )
+
+        return document
+
+    def resolve_remote(self, uri):
+        """
+        Resolve a remote ``uri``.
+
+        Does not check the store first, but stores the retrieved document in
+        the store if :attr:`RefResolver.cache_remote` is True.
+
+        .. note::
+
+            If the requests_ library is present, ``jsonschema`` will use it to
+            request the remote ``uri``, so that the correct encoding is
+            detected and used.
+
+            If it isn't, or if the scheme of the ``uri`` is not ``http`` or
+            ``https``, UTF-8 is assumed.
+
+        :argument str uri: the URI to resolve
+        :returns: the retrieved document
+
+        .. _requests: http://pypi.python.org/pypi/requests/
+
+        """
+
+        scheme = urlsplit(uri).scheme
+
+        if scheme in self.handlers:
+            result = self.handlers[scheme](uri)
+        elif (
+            scheme in [u"http", u"https"] and
+            requests and
+            getattr(requests.Response, "json", None) is not None
+        ):
+            # Requests has support for detecting the correct encoding of
+            # json over http
+            if callable(requests.Response.json):
+                result = requests.get(uri).json()
+            else:
+                result = requests.get(uri).json
+        else:
+            # Otherwise, pass off to urllib and assume utf-8
+            result = json.loads(urlopen(uri).read().decode("utf-8"))
+
+        if self.cache_remote:
+            self.store[uri] = result
+        return result
+
+
+def validator_for(schema, default=_unset):
+    if default is _unset:
+        default = Draft4Validator
+    return meta_schemas.get(schema.get(u"$schema", u""), default)
+
+
+def validate(instance, schema, cls=None, *args, **kwargs):
+    """
+    Validate an instance under the given schema.
+
+        >>> validate([2, 3, 4], {"maxItems" : 2})
+        Traceback (most recent call last):
+            ...
+        ValidationError: [2, 3, 4] is too long
+
+    :func:`validate` will first verify that the provided schema is itself
+    valid, since not doing so can lead to less obvious error messages and fail
+    in less obvious or consistent ways. If you know you have a valid schema
+    already or don't care, you might prefer using the
+    :meth:`~IValidator.validate` method directly on a specific validator
+    (e.g. :meth:`Draft4Validator.validate`).
+
+
+    :argument instance: the instance to validate
+    :argument schema: the schema to validate with
+    :argument cls: an :class:`IValidator` class that will be used to validate
+                   the instance.
+
+    If the ``cls`` argument is not provided, two things will happen in
+    accordance with the specification. First, if the schema has a
+    :validator:`$schema` property containing a known meta-schema [#]_ then the
+    proper validator will be used.  The specification recommends that all
+    schemas contain :validator:`$schema` properties for this reason. If no
+    :validator:`$schema` property is found, the default validator class is
+    :class:`Draft4Validator`.
+
+    Any other provided positional and keyword arguments will be passed on when
+    instantiating the ``cls``.
+
+    :raises:
+        :exc:`ValidationError` if the instance is invalid
+
+        :exc:`SchemaError` if the schema itself is invalid
+
+    .. rubric:: Footnotes
+    .. [#] known by a validator registered with :func:`validates`
+    """
+    if cls is None:
+        cls = validator_for(schema)
+    cls.check_schema(schema)
+    cls(schema, *args, **kwargs).validate(instance)
diff --git a/lib/spack/external/nose/LICENSE b/lib/spack/external/nose/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..8add30ad590a65db7e5914f5417eac39a64402a3
--- /dev/null
+++ b/lib/spack/external/nose/LICENSE
@@ -0,0 +1,504 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
+
+           How to Apply These Terms to Your New Libraries
+
+  If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change.  You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+  To apply these terms, attach the following notices to the library.  It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the library's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This library is free software; you can redistribute it and/or
+    modify it under the terms of the GNU Lesser General Public
+    License as published by the Free Software Foundation; either
+    version 2.1 of the License, or (at your option) any later version.
+
+    This library is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+    Lesser General Public License for more details.
+
+    You should have received a copy of the GNU Lesser General Public
+    License along with this library; if not, write to the Free Software
+    Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the
+  library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+  <signature of Ty Coon>, 1 April 1990
+  Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/lib/spack/external/nose/__init__.py b/lib/spack/external/nose/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ae1362b7acc51c226d002e981e0f0ca3fe4ea50
--- /dev/null
+++ b/lib/spack/external/nose/__init__.py
@@ -0,0 +1,15 @@
+from nose.core import collector, main, run, run_exit, runmodule
+# backwards compatibility
+from nose.exc import SkipTest, DeprecatedTest
+from nose.tools import with_setup
+
+__author__ = 'Jason Pellerin'
+__versioninfo__ = (1, 3, 7)
+__version__ = '.'.join(map(str, __versioninfo__))
+
+__all__ = [
+    'main', 'run', 'run_exit', 'runmodule', 'with_setup',
+    'SkipTest', 'DeprecatedTest', 'collector'
+    ]
+
+
diff --git a/lib/spack/external/nose/__main__.py b/lib/spack/external/nose/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b402d9df12ac943767b98c9435c1aac09c100668
--- /dev/null
+++ b/lib/spack/external/nose/__main__.py
@@ -0,0 +1,8 @@
+import sys
+
+from nose.core import run_exit
+
+if sys.argv[0].endswith('__main__.py'):
+    sys.argv[0] = '%s -m nose' % sys.executable
+
+run_exit()
diff --git a/lib/spack/external/nose/case.py b/lib/spack/external/nose/case.py
new file mode 100644
index 0000000000000000000000000000000000000000..cffa4ab4c9e033611668ea3ed3cc2e943c62c4f1
--- /dev/null
+++ b/lib/spack/external/nose/case.py
@@ -0,0 +1,397 @@
+"""nose unittest.TestCase subclasses. It is not necessary to subclass these
+classes when writing tests; they are used internally by nose.loader.TestLoader
+to create test cases from test functions and methods in test classes.
+"""
+import logging
+import sys
+import unittest
+from inspect import isfunction
+from nose.config import Config
+from nose.failure import Failure # for backwards compatibility
+from nose.util import resolve_name, test_address, try_run
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Test']
+
+
+class Test(unittest.TestCase):
+    """The universal test case wrapper.
+
+    When a plugin sees a test, it will always see an instance of this
+    class. To access the actual test case that will be run, access the
+    test property of the nose.case.Test instance.
+    """
+    __test__ = False # do not collect
+    def __init__(self, test, config=None, resultProxy=None):
+        # sanity check
+        if not callable(test):
+            raise TypeError("nose.case.Test called with argument %r that "
+                            "is not callable. A callable is required."
+                            % test)
+        self.test = test
+        if config is None:
+            config = Config()
+        self.config = config
+        self.tbinfo = None
+        self.capturedOutput = None
+        self.resultProxy = resultProxy
+        self.plugins = config.plugins
+        self.passed = None
+        unittest.TestCase.__init__(self)
+
+    def __call__(self, *arg, **kwarg):
+        return self.run(*arg, **kwarg)
+
+    def __str__(self):
+        name = self.plugins.testName(self)
+        if name is not None:
+            return name
+        return str(self.test)
+
+    def __repr__(self):
+        return "Test(%r)" % self.test
+
+    def afterTest(self, result):
+        """Called after test is complete (after result.stopTest)
+        """
+        try:
+            afterTest = result.afterTest
+        except AttributeError:
+            pass
+        else:
+            afterTest(self.test)
+
+    def beforeTest(self, result):
+        """Called before test is run (before result.startTest)
+        """
+        try:
+            beforeTest = result.beforeTest
+        except AttributeError:
+            pass
+        else:
+            beforeTest(self.test)
+
+    def exc_info(self):
+        """Extract exception info.
+        """
+        exc, exv, tb = sys.exc_info()
+        return (exc, exv, tb)
+
+    def id(self):
+        """Get a short(er) description of the test
+        """
+        return self.test.id()
+
+    def address(self):
+        """Return a round-trip name for this test, a name that can be
+        fed back as input to loadTestByName and (assuming the same
+        plugin configuration) result in the loading of this test.
+        """
+        if hasattr(self.test, 'address'):
+            return self.test.address()
+        else:
+            # not a nose case
+            return test_address(self.test)
+
+    def _context(self):
+        try:
+            return self.test.context
+        except AttributeError:
+            pass
+        try:
+            return self.test.__class__
+        except AttributeError:
+            pass
+        try:
+            return resolve_name(self.test.__module__)
+        except AttributeError:
+            pass
+        return None
+    context = property(_context, None, None,
+                      """Get the context object of this test (if any).""")
+
+    def run(self, result):
+        """Modified run for the test wrapper.
+
+        From here we don't call result.startTest or stopTest or
+        addSuccess.  The wrapper calls addError/addFailure only if its
+        own setup or teardown fails, or running the wrapped test fails
+        (eg, if the wrapped "test" is not callable).
+
+        Two additional methods are called, beforeTest and
+        afterTest. These give plugins a chance to modify the wrapped
+        test before it is called and do cleanup after it is
+        called. They are called unconditionally.
+        """
+        if self.resultProxy:
+            result = self.resultProxy(result, self)
+        try:
+            try:
+                self.beforeTest(result)
+                self.runTest(result)
+            except KeyboardInterrupt:
+                raise
+            except:
+                err = sys.exc_info()
+                result.addError(self, err)
+        finally:
+            self.afterTest(result)
+
+    def runTest(self, result):
+        """Run the test. Plugins may alter the test by returning a
+        value from prepareTestCase. The value must be callable and
+        must accept one argument, the result instance.
+        """
+        test = self.test
+        plug_test = self.config.plugins.prepareTestCase(self)
+        if plug_test is not None:
+            test = plug_test
+        test(result)
+
+    def shortDescription(self):
+        desc = self.plugins.describeTest(self)
+        if desc is not None:
+            return desc
+        # work around bug in unittest.TestCase.shortDescription
+        # with multiline docstrings.
+        test = self.test
+        try:
+            test._testMethodDoc = test._testMethodDoc.strip()# 2.5
+        except AttributeError:
+            try:
+                # 2.4 and earlier
+                test._TestCase__testMethodDoc = \
+                    test._TestCase__testMethodDoc.strip()
+            except AttributeError:
+                pass
+        # 2.7 compat: shortDescription() always returns something
+        # which is a change from 2.6 and below, and breaks the
+        # testName plugin call.
+        try:
+            desc = self.test.shortDescription()
+        except Exception:
+            # this is probably caused by a problem in test.__str__() and is
+            # only triggered by python 3.1's unittest!
+            pass
+        try:
+            if desc == str(self.test):
+                return
+        except Exception:
+            # If str() triggers an exception then ignore it.
+            # see issue 422
+            pass
+        return desc
+
+
+class TestBase(unittest.TestCase):
+    """Common functionality for FunctionTestCase and MethodTestCase.
+    """
+    __test__ = False # do not collect
+
+    def id(self):
+        return str(self)
+
+    def runTest(self):
+        self.test(*self.arg)
+
+    def shortDescription(self):
+        if hasattr(self.test, 'description'):
+            return self.test.description
+        func, arg = self._descriptors()
+        doc = getattr(func, '__doc__', None)
+        if not doc:
+            doc = str(self)
+        return doc.strip().split("\n")[0].strip()
+
+
+class FunctionTestCase(TestBase):
+    """TestCase wrapper for test functions.
+
+    Don't use this class directly; it is used internally in nose to
+    create test cases for test functions.
+    """
+    __test__ = False # do not collect
+
+    def __init__(self, test, setUp=None, tearDown=None, arg=tuple(),
+                 descriptor=None):
+        """Initialize the MethodTestCase.
+
+        Required argument:
+
+        * test -- the test function to call.
+
+        Optional arguments:
+
+        * setUp -- function to run at setup.
+
+        * tearDown -- function to run at teardown.
+
+        * arg -- arguments to pass to the test function. This is to support
+          generator functions that yield arguments.
+
+        * descriptor -- the function, other than the test, that should be used
+          to construct the test name. This is to support generator functions.
+        """
+
+        self.test = test
+        self.setUpFunc = setUp
+        self.tearDownFunc = tearDown
+        self.arg = arg
+        self.descriptor = descriptor
+        TestBase.__init__(self)
+
+    def address(self):
+        """Return a round-trip name for this test, a name that can be
+        fed back as input to loadTestByName and (assuming the same
+        plugin configuration) result in the loading of this test.
+        """
+        if self.descriptor is not None:
+            return test_address(self.descriptor)
+        else:
+            return test_address(self.test)
+
+    def _context(self):
+        return resolve_name(self.test.__module__)
+    context = property(_context, None, None,
+                      """Get context (module) of this test""")
+
+    def setUp(self):
+        """Run any setup function attached to the test function
+        """
+        if self.setUpFunc:
+            self.setUpFunc()
+        else:
+            names = ('setup', 'setUp', 'setUpFunc')
+            try_run(self.test, names)
+
+    def tearDown(self):
+        """Run any teardown function attached to the test function
+        """
+        if self.tearDownFunc:
+            self.tearDownFunc()
+        else:
+            names = ('teardown', 'tearDown', 'tearDownFunc')
+            try_run(self.test, names)
+
+    def __str__(self):
+        func, arg = self._descriptors()
+        if hasattr(func, 'compat_func_name'):
+            name = func.compat_func_name
+        else:
+            name = func.__name__
+        name = "%s.%s" % (func.__module__, name)
+        if arg:
+            name = "%s%s" % (name, arg)
+        # FIXME need to include the full dir path to disambiguate
+        # in cases where test module of the same name was seen in
+        # another directory (old fromDirectory)
+        return name
+    __repr__ = __str__
+
+    def _descriptors(self):
+        """Get the descriptors of the test function: the function and
+        arguments that will be used to construct the test name. In
+        most cases, this is the function itself and no arguments. For
+        tests generated by generator functions, the original
+        (generator) function and args passed to the generated function
+        are returned.
+        """
+        if self.descriptor:
+            return self.descriptor, self.arg
+        else:
+            return self.test, self.arg
+
+
+class MethodTestCase(TestBase):
+    """Test case wrapper for test methods.
+
+    Don't use this class directly; it is used internally in nose to
+    create test cases for test methods.
+    """
+    __test__ = False # do not collect
+
+    def __init__(self, method, test=None, arg=tuple(), descriptor=None):
+        """Initialize the MethodTestCase.
+
+        Required argument:
+
+        * method -- the method to call, may be bound or unbound. In either
+          case, a new instance of the method's class will be instantiated to
+	  make the call.  Note: In Python 3.x, if using an unbound method, you
+	  must wrap it using pyversion.unbound_method.
+
+        Optional arguments:
+
+        * test -- the test function to call. If this is passed, it will be
+          called instead of getting a new bound method of the same name as the
+          desired method from the test instance. This is to support generator
+          methods that yield inline functions.
+
+        * arg -- arguments to pass to the test function. This is to support
+          generator methods that yield arguments.
+
+        * descriptor -- the function, other than the test, that should be used
+          to construct the test name. This is to support generator methods.
+        """
+        self.method = method
+        self.test = test
+        self.arg = arg
+        self.descriptor = descriptor
+        if isfunction(method):
+            raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase")
+        self.cls = method.im_class
+        self.inst = self.cls()
+        if self.test is None:
+            method_name = self.method.__name__
+            self.test = getattr(self.inst, method_name)
+        TestBase.__init__(self)
+
+    def __str__(self):
+        func, arg = self._descriptors()
+        if hasattr(func, 'compat_func_name'):
+            name = func.compat_func_name
+        else:
+            name = func.__name__
+        name = "%s.%s.%s" % (self.cls.__module__,
+                             self.cls.__name__,
+                             name)
+        if arg:
+            name = "%s%s" % (name, arg)
+        return name
+    __repr__ = __str__
+
+    def address(self):
+        """Return a round-trip name for this test, a name that can be
+        fed back as input to loadTestByName and (assuming the same
+        plugin configuration) result in the loading of this test.
+        """
+        if self.descriptor is not None:
+            return test_address(self.descriptor)
+        else:
+            return test_address(self.method)
+
+    def _context(self):
+        return self.cls
+    context = property(_context, None, None,
+                      """Get context (class) of this test""")
+
+    def setUp(self):
+        try_run(self.inst, ('setup', 'setUp'))
+
+    def tearDown(self):
+        try_run(self.inst, ('teardown', 'tearDown'))
+
+    def _descriptors(self):
+        """Get the descriptors of the test method: the method and
+        arguments that will be used to construct the test name. In
+        most cases, this is the method itself and no arguments. For
+        tests generated by generator methods, the original
+        (generator) method and args passed to the generated method 
+        or function are returned.
+        """
+        if self.descriptor:
+            return self.descriptor, self.arg
+        else:
+            return self.method, self.arg
diff --git a/lib/spack/external/nose/commands.py b/lib/spack/external/nose/commands.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef0e9caed4e391f0568e9a22d8a4a6641bf97885
--- /dev/null
+++ b/lib/spack/external/nose/commands.py
@@ -0,0 +1,172 @@
+"""
+nosetests setuptools command
+----------------------------
+
+The easiest way to run tests with nose is to use the `nosetests` setuptools
+command::
+
+  python setup.py nosetests
+
+This command has one *major* benefit over the standard `test` command: *all
+nose plugins are supported*.
+
+To configure the `nosetests` command, add a [nosetests] section to your
+setup.cfg. The [nosetests] section can contain any command line arguments that
+nosetests supports. The differences between issuing an option on the command
+line and adding it to setup.cfg are:
+
+* In setup.cfg, the -- prefix must be excluded
+* In setup.cfg, command line flags that take no arguments must be given an
+  argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
+
+Here's an example [nosetests] setup.cfg section::
+
+  [nosetests]
+  verbosity=1
+  detailed-errors=1
+  with-coverage=1
+  cover-package=nose
+  debug=nose.loader
+  pdb=1
+  pdb-failures=1
+
+If you commonly run nosetests with a large number of options, using
+the nosetests setuptools command and configuring with setup.cfg can
+make running your tests much less tedious. (Note that the same options
+and format supported in setup.cfg are supported in all other config
+files, and the nosetests script will also load config files.)
+
+Another reason to run tests with the command is that the command will
+install packages listed in your `tests_require`, as well as doing a
+complete build of your package before running tests. For packages with
+dependencies or that build C extensions, using the setuptools command
+can be more convenient than building by hand and running the nosetests
+script.
+
+Bootstrapping
+-------------
+
+If you are distributing your project and want users to be able to run tests
+without having to install nose themselves, add nose to the setup_requires
+section of your setup()::
+
+  setup(
+      # ...
+      setup_requires=['nose>=1.0']
+      )
+
+This will direct setuptools to download and activate nose during the setup
+process, making the ``nosetests`` command available.
+
+"""
+try:
+    from setuptools import Command
+except ImportError:
+    Command = nosetests = None
+else:
+    from nose.config import Config, option_blacklist, user_config_files, \
+        flag, _bool
+    from nose.core import TestProgram
+    from nose.plugins import DefaultPluginManager
+
+
+    def get_user_options(parser):
+        """convert a optparse option list into a distutils option tuple list"""
+        opt_list = []
+        for opt in parser.option_list:
+            if opt._long_opts[0][2:] in option_blacklist: 
+                continue
+            long_name = opt._long_opts[0][2:]
+            if opt.action not in ('store_true', 'store_false'):
+                long_name = long_name + "="
+            short_name = None
+            if opt._short_opts:
+                short_name =  opt._short_opts[0][1:]
+            opt_list.append((long_name, short_name, opt.help or ""))
+        return opt_list
+
+
+    class nosetests(Command):
+        description = "Run unit tests using nosetests"
+        __config = Config(files=user_config_files(),
+                          plugins=DefaultPluginManager())
+        __parser = __config.getParser()
+        user_options = get_user_options(__parser)
+
+        def initialize_options(self):
+            """create the member variables, but change hyphens to
+            underscores
+            """
+
+            self.option_to_cmds = {}
+            for opt in self.__parser.option_list:
+                cmd_name = opt._long_opts[0][2:]
+                option_name = cmd_name.replace('-', '_')
+                self.option_to_cmds[option_name] = cmd_name
+                setattr(self, option_name, None)
+            self.attr  = None
+
+        def finalize_options(self):
+            """nothing to do here"""
+            pass
+
+        def run(self):
+            """ensure tests are capable of being run, then
+            run nose.main with a reconstructed argument list"""
+            if getattr(self.distribution, 'use_2to3', False):
+                # If we run 2to3 we can not do this inplace:
+
+                # Ensure metadata is up-to-date
+                build_py = self.get_finalized_command('build_py')
+                build_py.inplace = 0
+                build_py.run()
+                bpy_cmd = self.get_finalized_command("build_py")
+                build_path = bpy_cmd.build_lib
+
+                # Build extensions
+                egg_info = self.get_finalized_command('egg_info')
+                egg_info.egg_base = build_path
+                egg_info.run()
+
+                build_ext = self.get_finalized_command('build_ext')
+                build_ext.inplace = 0
+                build_ext.run()
+            else:
+                self.run_command('egg_info')
+
+                # Build extensions in-place
+                build_ext = self.get_finalized_command('build_ext')
+                build_ext.inplace = 1
+                build_ext.run()
+
+            if self.distribution.install_requires:
+                self.distribution.fetch_build_eggs(
+                    self.distribution.install_requires)
+            if self.distribution.tests_require:
+                self.distribution.fetch_build_eggs(
+                    self.distribution.tests_require)
+
+            ei_cmd = self.get_finalized_command("egg_info")
+            argv = ['nosetests', '--where', ei_cmd.egg_base] 
+            for (option_name, cmd_name) in self.option_to_cmds.items():
+                if option_name in option_blacklist:
+                    continue
+                value = getattr(self, option_name)
+                if value is not None:
+                    argv.extend(
+                        self.cfgToArg(option_name.replace('_', '-'), value))
+            TestProgram(argv=argv, config=self.__config)
+
+        def cfgToArg(self, optname, value):
+            argv = []
+            long_optname = '--' + optname
+            opt = self.__parser.get_option(long_optname)
+            if opt.action in ('store_true', 'store_false'):
+                if not flag(value):
+                    raise ValueError("Invalid value '%s' for '%s'" % (
+                        value, optname))
+                if _bool(value):
+                    argv.append(long_optname)
+            else:
+                argv.extend([long_optname, value])
+            return argv
diff --git a/lib/spack/external/nose/config.py b/lib/spack/external/nose/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..125eb5579d2910a3847e500b0b63b44ae6828948
--- /dev/null
+++ b/lib/spack/external/nose/config.py
@@ -0,0 +1,661 @@
+import logging
+import optparse
+import os
+import re
+import sys
+import ConfigParser
+from optparse import OptionParser
+from nose.util import absdir, tolist
+from nose.plugins.manager import NoPlugins
+from warnings import warn, filterwarnings
+
+log = logging.getLogger(__name__)
+
+# not allowed in config files
+option_blacklist = ['help', 'verbose']
+
+config_files = [
+    # Linux users will prefer this
+    "~/.noserc",
+    # Windows users will prefer this
+    "~/nose.cfg"
+    ]
+
+# plaforms on which the exe check defaults to off
+# Windows and IronPython
+exe_allowed_platforms = ('win32', 'cli')
+
+filterwarnings("always", category=DeprecationWarning,
+               module=r'(.*\.)?nose\.config')
+
+class NoSuchOptionError(Exception):
+    def __init__(self, name):
+        Exception.__init__(self, name)
+        self.name = name
+
+
+class ConfigError(Exception):
+    pass
+
+
+class ConfiguredDefaultsOptionParser(object):
+    """
+    Handler for options from commandline and config files.
+    """
+    def __init__(self, parser, config_section, error=None, file_error=None):
+        self._parser = parser
+        self._config_section = config_section
+        if error is None:
+            error = self._parser.error
+        self._error = error
+        if file_error is None:
+            file_error = lambda msg, **kw: error(msg)
+        self._file_error = file_error
+
+    def _configTuples(self, cfg, filename):
+        config = []
+        if self._config_section in cfg.sections():
+            for name, value in cfg.items(self._config_section):
+                config.append((name, value, filename))
+        return config
+
+    def _readFromFilenames(self, filenames):
+        config = []
+        for filename in filenames:
+            cfg = ConfigParser.RawConfigParser()
+            try:
+                cfg.read(filename)
+            except ConfigParser.Error, exc:
+                raise ConfigError("Error reading config file %r: %s" %
+                                  (filename, str(exc)))
+            config.extend(self._configTuples(cfg, filename))
+        return config
+
+    def _readFromFileObject(self, fh):
+        cfg = ConfigParser.RawConfigParser()
+        try:
+            filename = fh.name
+        except AttributeError:
+            filename = '<???>'
+        try:
+            cfg.readfp(fh)
+        except ConfigParser.Error, exc:
+            raise ConfigError("Error reading config file %r: %s" %
+                              (filename, str(exc)))
+        return self._configTuples(cfg, filename)
+
+    def _readConfiguration(self, config_files):
+        try:
+            config_files.readline
+        except AttributeError:
+            filename_or_filenames = config_files
+            if isinstance(filename_or_filenames, basestring):
+                filenames = [filename_or_filenames]
+            else:
+                filenames = filename_or_filenames
+            config = self._readFromFilenames(filenames)
+        else:
+            fh = config_files
+            config = self._readFromFileObject(fh)
+        return config
+
+    def _processConfigValue(self, name, value, values, parser):
+        opt_str = '--' + name
+        option = parser.get_option(opt_str)
+        if option is None:
+            raise NoSuchOptionError(name)
+        else:
+            option.process(opt_str, value, values, parser)
+
+    def _applyConfigurationToValues(self, parser, config, values):
+        for name, value, filename in config:
+            if name in option_blacklist:
+                continue
+            try:
+                self._processConfigValue(name, value, values, parser)
+            except NoSuchOptionError, exc:
+                self._file_error(
+                    "Error reading config file %r: "
+                    "no such option %r" % (filename, exc.name),
+                    name=name, filename=filename)
+            except optparse.OptionValueError, exc:
+                msg = str(exc).replace('--' + name, repr(name), 1)
+                self._file_error("Error reading config file %r: "
+                                 "%s" % (filename, msg),
+                                 name=name, filename=filename)
+
+    def parseArgsAndConfigFiles(self, args, config_files):
+        values = self._parser.get_default_values()
+        try:
+            config = self._readConfiguration(config_files)
+        except ConfigError, exc:
+            self._error(str(exc))
+        else:
+            try:
+                self._applyConfigurationToValues(self._parser, config, values)
+            except ConfigError, exc:
+                self._error(str(exc))
+        return self._parser.parse_args(args, values)
+
+
+class Config(object):
+    """nose configuration.
+
+    Instances of Config are used throughout nose to configure
+    behavior, including plugin lists. Here are the default values for
+    all config keys::
+
+      self.env = env = kw.pop('env', {})
+      self.args = ()
+      self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+      self.addPaths = not env.get('NOSE_NOPATH', False)
+      self.configSection = 'nosetests'
+      self.debug = env.get('NOSE_DEBUG')
+      self.debugLog = env.get('NOSE_DEBUG_LOG')
+      self.exclude = None
+      self.getTestCaseNamesCompat = False
+      self.includeExe = env.get('NOSE_INCLUDE_EXE',
+                                sys.platform in exe_allowed_platforms)
+      self.ignoreFiles = (re.compile(r'^\.'),
+                          re.compile(r'^_'),
+                          re.compile(r'^setup\.py$')
+                          )
+      self.include = None
+      self.loggingConfig = None
+      self.logStream = sys.stderr
+      self.options = NoOptions()
+      self.parser = None
+      self.plugins = NoPlugins()
+      self.srcDirs = ('lib', 'src')
+      self.runOnInit = True
+      self.stopOnError = env.get('NOSE_STOP', False)
+      self.stream = sys.stderr
+      self.testNames = ()
+      self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+      self.where = ()
+      self.py3where = ()
+      self.workingDir = None
+    """
+
+    def __init__(self, **kw):
+        self.env = env = kw.pop('env', {})
+        self.args = ()
+        self.testMatchPat = env.get('NOSE_TESTMATCH',
+                                    r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
+        self.testMatch = re.compile(self.testMatchPat)
+        self.addPaths = not env.get('NOSE_NOPATH', False)
+        self.configSection = 'nosetests'
+        self.debug = env.get('NOSE_DEBUG')
+        self.debugLog = env.get('NOSE_DEBUG_LOG')
+        self.exclude = None
+        self.getTestCaseNamesCompat = False
+        self.includeExe = env.get('NOSE_INCLUDE_EXE',
+                                  sys.platform in exe_allowed_platforms)
+        self.ignoreFilesDefaultStrings = [r'^\.',
+                                          r'^_',
+                                          r'^setup\.py$',
+                                          ]
+        self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings)
+        self.include = None
+        self.loggingConfig = None
+        self.logStream = sys.stderr
+        self.options = NoOptions()
+        self.parser = None
+        self.plugins = NoPlugins()
+        self.srcDirs = ('lib', 'src')
+        self.runOnInit = True
+        self.stopOnError = env.get('NOSE_STOP', False)
+        self.stream = sys.stderr
+        self.testNames = []
+        self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+        self.where = ()
+        self.py3where = ()
+        self.workingDir = os.getcwd()
+        self.traverseNamespace = False
+        self.firstPackageWins = False
+        self.parserClass = OptionParser
+        self.worker = False
+
+        self._default = self.__dict__.copy()
+        self.update(kw)
+        self._orig = self.__dict__.copy()
+
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state['stream']
+        del state['_orig']
+        del state['_default']
+        del state['env']
+        del state['logStream']
+        # FIXME remove plugins, have only plugin manager class
+        state['plugins'] = self.plugins.__class__
+        return state
+
+    def __setstate__(self, state):
+        plugincls = state.pop('plugins')
+        self.update(state)
+        self.worker = True
+        # FIXME won't work for static plugin lists
+        self.plugins = plugincls()
+        self.plugins.loadPlugins()
+        # needed so .can_configure gets set appropriately
+        dummy_parser = self.parserClass()
+        self.plugins.addOptions(dummy_parser, {})
+        self.plugins.configure(self.options, self)
+
+    def __repr__(self):
+        d = self.__dict__.copy()
+        # don't expose env, could include sensitive info
+        d['env'] = {}
+        keys = [ k for k in d.keys()
+                 if not k.startswith('_') ]
+        keys.sort()
+        return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
+                                          for k in keys ])
+    __str__ = __repr__
+
+    def _parseArgs(self, argv, cfg_files):
+        def warn_sometimes(msg, name=None, filename=None):
+            if (hasattr(self.plugins, 'excludedOption') and
+                self.plugins.excludedOption(name)):
+                msg = ("Option %r in config file %r ignored: "
+                       "excluded by runtime environment" %
+                       (name, filename))
+                warn(msg, RuntimeWarning)
+            else:
+                raise ConfigError(msg)
+        parser = ConfiguredDefaultsOptionParser(
+            self.getParser(), self.configSection, file_error=warn_sometimes)
+        return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
+
+    def configure(self, argv=None, doc=None):
+        """Configure the nose running environment. Execute configure before
+        collecting tests with nose.TestCollector to enable output capture and
+        other features.
+        """
+        env = self.env
+        if argv is None:
+            argv = sys.argv
+
+        cfg_files = getattr(self, 'files', [])
+        options, args = self._parseArgs(argv, cfg_files)
+        # If -c --config has been specified on command line,
+        # load those config files and reparse
+        if getattr(options, 'files', []):
+            options, args = self._parseArgs(argv, options.files)
+
+        self.options = options
+        if args:
+            self.testNames = args
+        if options.testNames is not None:
+            self.testNames.extend(tolist(options.testNames))
+
+        if options.py3where is not None:
+            if sys.version_info >= (3,):
+                options.where = options.py3where
+
+        # `where` is an append action, so it can't have a default value
+        # in the parser, or that default will always be in the list
+        if not options.where:
+            options.where = env.get('NOSE_WHERE', None)
+
+        # include and exclude also
+        if not options.ignoreFiles:
+            options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
+        if not options.include:
+            options.include = env.get('NOSE_INCLUDE', [])
+        if not options.exclude:
+            options.exclude = env.get('NOSE_EXCLUDE', [])
+
+        self.addPaths = options.addPaths
+        self.stopOnError = options.stopOnError
+        self.verbosity = options.verbosity
+        self.includeExe = options.includeExe
+        self.traverseNamespace = options.traverseNamespace
+        self.debug = options.debug
+        self.debugLog = options.debugLog
+        self.loggingConfig = options.loggingConfig
+        self.firstPackageWins = options.firstPackageWins
+        self.configureLogging()
+
+        if not options.byteCompile:
+            sys.dont_write_bytecode = True
+
+        if options.where is not None:
+            self.configureWhere(options.where)
+
+        if options.testMatch:
+            self.testMatch = re.compile(options.testMatch)
+
+        if options.ignoreFiles:
+            self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles))
+            log.info("Ignoring files matching %s", options.ignoreFiles)
+        else:
+            log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
+
+        if options.include:
+            self.include = map(re.compile, tolist(options.include))
+            log.info("Including tests matching %s", options.include)
+
+        if options.exclude:
+            self.exclude = map(re.compile, tolist(options.exclude))
+            log.info("Excluding tests matching %s", options.exclude)
+
+        # When listing plugins we don't want to run them
+        if not options.showPlugins:
+            self.plugins.configure(options, self)
+            self.plugins.begin()
+
+    def configureLogging(self):
+        """Configure logging for nose, or optionally other packages. Any logger
+        name may be set with the debug option, and that logger will be set to
+        debug level and be assigned the same handler as the nose loggers, unless
+        it already has a handler.
+        """
+        if self.loggingConfig:
+            from logging.config import fileConfig
+            fileConfig(self.loggingConfig)
+            return
+
+        format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
+        if self.debugLog:
+            handler = logging.FileHandler(self.debugLog)
+        else:
+            handler = logging.StreamHandler(self.logStream)
+        handler.setFormatter(format)
+
+        logger = logging.getLogger('nose')
+        logger.propagate = 0
+
+        # only add our default handler if there isn't already one there
+        # this avoids annoying duplicate log messages.
+        found = False
+        if self.debugLog:
+            debugLogAbsPath = os.path.abspath(self.debugLog)
+            for h in logger.handlers:
+                if type(h) == logging.FileHandler and \
+                        h.baseFilename == debugLogAbsPath:
+                    found = True
+        else:
+            for h in logger.handlers:
+                if type(h) == logging.StreamHandler and \
+                        h.stream == self.logStream:
+                    found = True
+        if not found:
+            logger.addHandler(handler)
+
+        # default level
+        lvl = logging.WARNING
+        if self.verbosity >= 5:
+            lvl = 0
+        elif self.verbosity >= 4:
+            lvl = logging.DEBUG
+        elif self.verbosity >= 3:
+            lvl = logging.INFO
+        logger.setLevel(lvl)
+
+        # individual overrides
+        if self.debug:
+            # no blanks
+            debug_loggers = [ name for name in self.debug.split(',')
+                              if name ]
+            for logger_name in debug_loggers:
+                l = logging.getLogger(logger_name)
+                l.setLevel(logging.DEBUG)
+                if not l.handlers and not logger_name.startswith('nose'):
+                    l.addHandler(handler)
+
+    def configureWhere(self, where):
+        """Configure the working directory or directories for the test run.
+        """
+        from nose.importer import add_path
+        self.workingDir = None
+        where = tolist(where)
+        warned = False
+        for path in where:
+            if not self.workingDir:
+                abs_path = absdir(path)
+                if abs_path is None:
+                    raise ValueError("Working directory '%s' not found, or "
+                                     "not a directory" % path)
+                log.info("Set working dir to %s", abs_path)
+                self.workingDir = abs_path
+                if self.addPaths and \
+                       os.path.exists(os.path.join(abs_path, '__init__.py')):
+                    log.info("Working directory %s is a package; "
+                             "adding to sys.path" % abs_path)
+                    add_path(abs_path)
+                continue
+            if not warned:
+                warn("Use of multiple -w arguments is deprecated and "
+                     "support may be removed in a future release. You can "
+                     "get the same behavior by passing directories without "
+                     "the -w argument on the command line, or by using the "
+                     "--tests argument in a configuration file.",
+                     DeprecationWarning)
+                warned = True
+            self.testNames.append(path)
+
+    def default(self):
+        """Reset all config values to defaults.
+        """
+        self.__dict__.update(self._default)
+
+    def getParser(self, doc=None):
+        """Get the command line option parser.
+        """
+        if self.parser:
+            return self.parser
+        env = self.env
+        parser = self.parserClass(doc)
+        parser.add_option(
+            "-V","--version", action="store_true",
+            dest="version", default=False,
+            help="Output nose version and exit")
+        parser.add_option(
+            "-p", "--plugins", action="store_true",
+            dest="showPlugins", default=False,
+            help="Output list of available plugins and exit. Combine with "
+            "higher verbosity for greater detail")
+        parser.add_option(
+            "-v", "--verbose",
+            action="count", dest="verbosity",
+            default=self.verbosity,
+            help="Be more verbose. [NOSE_VERBOSE]")
+        parser.add_option(
+            "--verbosity", action="store", dest="verbosity",
+            metavar='VERBOSITY',
+            type="int", help="Set verbosity; --verbosity=2 is "
+            "the same as -v")
+        parser.add_option(
+            "-q", "--quiet", action="store_const", const=0, dest="verbosity",
+            help="Be less verbose")
+        parser.add_option(
+            "-c", "--config", action="append", dest="files",
+            metavar="FILES",
+            help="Load configuration from config file(s). May be specified "
+            "multiple times; in that case, all config files will be "
+            "loaded and combined")
+        parser.add_option(
+            "-w", "--where", action="append", dest="where",
+            metavar="WHERE",
+            help="Look for tests in this directory. "
+            "May be specified multiple times. The first directory passed "
+            "will be used as the working directory, in place of the current "
+            "working directory, which is the default. Others will be added "
+            "to the list of tests to execute. [NOSE_WHERE]"
+            )
+        parser.add_option(
+            "--py3where", action="append", dest="py3where",
+            metavar="PY3WHERE",
+            help="Look for tests in this directory under Python 3.x. "
+            "Functions the same as 'where', but only applies if running under "
+            "Python 3.x or above.  Note that, if present under 3.x, this "
+            "option completely replaces any directories specified with "
+            "'where', so the 'where' option becomes ineffective. "
+            "[NOSE_PY3WHERE]"
+            )
+        parser.add_option(
+            "-m", "--match", "--testmatch", action="store",
+            dest="testMatch", metavar="REGEX",
+            help="Files, directories, function names, and class names "
+            "that match this regular expression are considered tests.  "
+            "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
+            default=self.testMatchPat)
+        parser.add_option(
+            "--tests", action="store", dest="testNames", default=None,
+            metavar='NAMES',
+            help="Run these tests (comma-separated list). This argument is "
+            "useful mainly from configuration files; on the command line, "
+            "just pass the tests to run as additional arguments with no "
+            "switch.")
+        parser.add_option(
+            "-l", "--debug", action="store",
+            dest="debug", default=self.debug,
+            help="Activate debug logging for one or more systems. "
+            "Available debug loggers: nose, nose.importer, "
+            "nose.inspector, nose.plugins, nose.result and "
+            "nose.selector. Separate multiple names with a comma.")
+        parser.add_option(
+            "--debug-log", dest="debugLog", action="store",
+            default=self.debugLog, metavar="FILE",
+            help="Log debug messages to this file "
+            "(default: sys.stderr)")
+        parser.add_option(
+            "--logging-config", "--log-config",
+            dest="loggingConfig", action="store",
+            default=self.loggingConfig, metavar="FILE",
+            help="Load logging config from this file -- bypasses all other"
+            " logging config settings.")
+        parser.add_option(
+            "-I", "--ignore-files", action="append", dest="ignoreFiles",
+            metavar="REGEX",
+            help="Completely ignore any file that matches this regular "
+            "expression. Takes precedence over any other settings or "
+            "plugins. "
+            "Specifying this option will replace the default setting. "
+            "Specify this option multiple times "
+            "to add more regular expressions [NOSE_IGNORE_FILES]")
+        parser.add_option(
+            "-e", "--exclude", action="append", dest="exclude",
+            metavar="REGEX",
+            help="Don't run tests that match regular "
+            "expression [NOSE_EXCLUDE]")
+        parser.add_option(
+            "-i", "--include", action="append", dest="include",
+            metavar="REGEX",
+            help="This regular expression will be applied to files, "
+            "directories, function names, and class names for a chance "
+            "to include additional tests that do not match TESTMATCH.  "
+            "Specify this option multiple times "
+            "to add more regular expressions [NOSE_INCLUDE]")
+        parser.add_option(
+            "-x", "--stop", action="store_true", dest="stopOnError",
+            default=self.stopOnError,
+            help="Stop running tests after the first error or failure")
+        parser.add_option(
+            "-P", "--no-path-adjustment", action="store_false",
+            dest="addPaths",
+            default=self.addPaths,
+            help="Don't make any changes to sys.path when "
+            "loading tests [NOSE_NOPATH]")
+        parser.add_option(
+            "--exe", action="store_true", dest="includeExe",
+            default=self.includeExe,
+            help="Look for tests in python modules that are "
+            "executable. Normal behavior is to exclude executable "
+            "modules, since they may not be import-safe "
+            "[NOSE_INCLUDE_EXE]")
+        parser.add_option(
+            "--noexe", action="store_false", dest="includeExe",
+            help="DO NOT look for tests in python modules that are "
+            "executable. (The default on the windows platform is to "
+            "do so.)")
+        parser.add_option(
+            "--traverse-namespace", action="store_true",
+            default=self.traverseNamespace, dest="traverseNamespace",
+            help="Traverse through all path entries of a namespace package")
+        parser.add_option(
+            "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
+            action="store_true", default=False, dest="firstPackageWins",
+            help="nose's importer will normally evict a package from sys."
+            "modules if it sees a package with the same name in a different "
+            "location. Set this option to disable that behavior.")
+        parser.add_option(
+            "--no-byte-compile",
+            action="store_false", default=True, dest="byteCompile",
+            help="Prevent nose from byte-compiling the source into .pyc files "
+            "while nose is scanning for and running tests.")
+
+        self.plugins.loadPlugins()
+        self.pluginOpts(parser)
+
+        self.parser = parser
+        return parser
+
+    def help(self, doc=None):
+        """Return the generated help message
+        """
+        return self.getParser(doc).format_help()
+
+    def pluginOpts(self, parser):
+        self.plugins.addOptions(parser, self.env)
+
+    def reset(self):
+        self.__dict__.update(self._orig)
+
+    def todict(self):
+        return self.__dict__.copy()
+
+    def update(self, d):
+        self.__dict__.update(d)
+
+
+class NoOptions(object):
+    """Options container that returns None for all options.
+    """
+    def __getstate__(self):
+        return {}
+
+    def __setstate__(self, state):
+        pass
+
+    def __getnewargs__(self):
+        return ()
+
+    def __nonzero__(self):
+        return False
+
+
+def user_config_files():
+    """Return path to any existing user config files
+    """
+    return filter(os.path.exists,
+                  map(os.path.expanduser, config_files))
+
+
+def all_config_files():
+    """Return path to any existing user config files, plus any setup.cfg
+    in the current working directory.
+    """
+    user = user_config_files()
+    if os.path.exists('setup.cfg'):
+        return user + ['setup.cfg']
+    return user
+
+
+# used when parsing config files
+def flag(val):
+    """Does the value look like an on/off flag?"""
+    if val == 1:
+        return True
+    elif val == 0:
+        return False
+    val = str(val)
+    if len(val) > 5:
+        return False
+    return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
+
+
+def _bool(val):
+    return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
diff --git a/lib/spack/external/nose/core.py b/lib/spack/external/nose/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..49e7939b986676b292e18812da93c3561d7155af
--- /dev/null
+++ b/lib/spack/external/nose/core.py
@@ -0,0 +1,341 @@
+"""Implements nose test program and collector.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import time
+import unittest
+
+from nose.config import Config, all_config_files
+from nose.loader import defaultTestLoader
+from nose.plugins.manager import PluginManager, DefaultPluginManager, \
+     RestrictedPluginManager
+from nose.result import TextTestResult
+from nose.suite import FinalizingSuiteWrapper
+from nose.util import isclass, tolist
+
+
+log = logging.getLogger('nose.core')
+compat_24 = sys.version_info >= (2, 4)
+
+__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
+           'TextTestRunner']
+
+
+class TextTestRunner(unittest.TextTestRunner):
+    """Test runner that uses nose's TextTestResult to enable errorClasses,
+    as well as providing hooks for plugins to override or replace the test
+    output stream, results, and the test case itself.
+    """
+    def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
+                 config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+        unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
+
+
+    def _makeResult(self):
+        return TextTestResult(self.stream,
+                              self.descriptions,
+                              self.verbosity,
+                              self.config)
+
+    def run(self, test):
+        """Overrides to provide plugin hooks and defer all output to
+        the test result class.
+        """
+        wrapper = self.config.plugins.prepareTest(test)
+        if wrapper is not None:
+            test = wrapper
+
+        # plugins can decorate or capture the output stream
+        wrapped = self.config.plugins.setOutputStream(self.stream)
+        if wrapped is not None:
+            self.stream = wrapped
+
+        result = self._makeResult()
+        start = time.time()
+        try:
+            test(result)
+        except KeyboardInterrupt:
+            pass
+        stop = time.time()
+        result.printErrors()
+        result.printSummary(start, stop)
+        self.config.plugins.finalize(result)
+        return result
+
+
+class TestProgram(unittest.TestProgram):
+    """Collect and run tests, returning success or failure.
+
+    The arguments to TestProgram() are the same as to
+    :func:`main()` and :func:`run()`:
+
+    * module: All tests are in this module (default: None)
+    * defaultTest: Tests to load (default: '.')
+    * argv: Command line arguments (default: None; sys.argv is read)
+    * testRunner: Test runner instance (default: None)
+    * testLoader: Test loader instance (default: None)
+    * env: Environment; ignored if config is provided (default: None;
+      os.environ is read)
+    * config: :class:`nose.config.Config` instance (default: None)
+    * suite: Suite or list of tests to run (default: None). Passing a
+      suite or lists of tests will bypass all test discovery and
+      loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+      instance as the suite, context fixtures at the class, module and
+      package level will not be used, and many plugin hooks will not
+      be called. If you want normal nose behavior, either pass a list
+      of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+    * exit: Exit after running tests and printing report (default: True)
+    * plugins: List of plugins to use; ignored if config is provided
+      (default: load plugins with DefaultPluginManager)
+    * addplugins: List of **extra** plugins to use. Pass a list of plugin
+      instances in this argument to make custom plugins available while
+      still using the DefaultPluginManager.
+    """
+    verbosity = 1
+
+    def __init__(self, module=None, defaultTest='.', argv=None,
+                 testRunner=None, testLoader=None, env=None, config=None,
+                 suite=None, exit=True, plugins=None, addplugins=None):
+        if env is None:
+            env = os.environ
+        if config is None:
+            config = self.makeConfig(env, plugins)
+        if addplugins:
+            config.plugins.addPlugins(extraplugins=addplugins)
+        self.config = config
+        self.suite = suite
+        self.exit = exit
+        extra_args = {}
+        version = sys.version_info[0:2]
+        if version >= (2,7) and version != (3,0):
+            extra_args['exit'] = exit
+        unittest.TestProgram.__init__(
+            self, module=module, defaultTest=defaultTest,
+            argv=argv, testRunner=testRunner, testLoader=testLoader,
+            **extra_args)
+
+    def getAllConfigFiles(self, env=None):
+        env = env or {}
+        if env.get('NOSE_IGNORE_CONFIG_FILES', False):
+            return []
+        else:
+            return all_config_files()
+
+    def makeConfig(self, env, plugins=None):
+        """Load a Config, pre-filled with user config files if any are
+        found.
+        """
+        cfg_files = self.getAllConfigFiles(env)
+        if plugins:
+            manager = PluginManager(plugins=plugins)
+        else:
+            manager = DefaultPluginManager()
+        return Config(
+            env=env, files=cfg_files, plugins=manager)
+
+    def parseArgs(self, argv):
+        """Parse argv and env and configure running environment.
+        """
+        self.config.configure(argv, doc=self.usage())
+        log.debug("configured %s", self.config)
+
+        # quick outs: version, plugins (optparse would have already
+        # caught and exited on help)
+        if self.config.options.version:
+            from nose import __version__
+            sys.stdout = sys.__stdout__
+            print "%s version %s" % (os.path.basename(sys.argv[0]), __version__)
+            sys.exit(0)
+
+        if self.config.options.showPlugins:
+            self.showPlugins()
+            sys.exit(0)
+
+        if self.testLoader is None:
+            self.testLoader = defaultTestLoader(config=self.config)
+        elif isclass(self.testLoader):
+            self.testLoader = self.testLoader(config=self.config)
+        plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
+        if plug_loader is not None:
+            self.testLoader = plug_loader
+        log.debug("test loader is %s", self.testLoader)
+
+        # FIXME if self.module is a string, add it to self.testNames? not sure
+
+        if self.config.testNames:
+            self.testNames = self.config.testNames
+        else:
+            self.testNames = tolist(self.defaultTest)
+        log.debug('defaultTest %s', self.defaultTest)
+        log.debug('Test names are %s', self.testNames)
+        if self.config.workingDir is not None:
+            os.chdir(self.config.workingDir)
+        self.createTests()
+
+    def createTests(self):
+        """Create the tests to run. If a self.suite
+        is set, then that suite will be used. Otherwise, tests will be
+        loaded from the given test names (self.testNames) using the
+        test loader.
+        """
+        log.debug("createTests called with %s", self.suite)
+        if self.suite is not None:
+            # We were given an explicit suite to run. Make sure it's
+            # loaded and wrapped correctly.
+            self.test = self.testLoader.suiteClass(self.suite)
+        else:
+            self.test = self.testLoader.loadTestsFromNames(self.testNames)
+
+    def runTests(self):
+        """Run Tests. Returns true on success, false on failure, and sets
+        self.success to the same value.
+        """
+        log.debug("runTests called")
+        if self.testRunner is None:
+            self.testRunner = TextTestRunner(stream=self.config.stream,
+                                             verbosity=self.config.verbosity,
+                                             config=self.config)
+        plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+        if plug_runner is not None:
+            self.testRunner = plug_runner
+        result = self.testRunner.run(self.test)
+        self.success = result.wasSuccessful()
+        if self.exit:
+            sys.exit(not self.success)
+        return self.success
+
+    def showPlugins(self):
+        """Print list of available plugins.
+        """
+        import textwrap
+
+        class DummyParser:
+            def __init__(self):
+                self.options = []
+            def add_option(self, *arg, **kw):
+                self.options.append((arg, kw.pop('help', '')))
+
+        v = self.config.verbosity
+        self.config.plugins.sort()
+        for p in self.config.plugins:
+            print "Plugin %s" % p.name
+            if v >= 2:
+                print "  score: %s" % p.score
+                print '\n'.join(textwrap.wrap(p.help().strip(),
+                                              initial_indent='  ',
+                                              subsequent_indent='  '))
+                if v >= 3:
+                    parser = DummyParser()
+                    p.addOptions(parser)
+                    if len(parser.options):
+                        print
+                        print "  Options:"
+                        for opts, help in parser.options:
+                            print '  %s' % (', '.join(opts))
+                            if help:
+                                print '\n'.join(
+                                    textwrap.wrap(help.strip(),
+                                                  initial_indent='    ',
+                                                  subsequent_indent='    '))
+                print
+
+    def usage(cls):
+        import nose
+        try:
+            ld = nose.__loader__
+            text = ld.get_data(os.path.join(
+                os.path.dirname(__file__), 'usage.txt'))
+        except AttributeError:
+            f = open(os.path.join(
+                os.path.dirname(__file__), 'usage.txt'), 'r')
+            try:
+                text = f.read()
+            finally:
+                f.close()
+        # Ensure that we return str, not bytes.
+        if not isinstance(text, str):
+            text = text.decode('utf-8')
+        return text
+    usage = classmethod(usage)
+
+# backwards compatibility
+run_exit = main = TestProgram
+
+
+def run(*arg, **kw):
+    """Collect and run tests, returning success or failure.
+
+    The arguments to `run()` are the same as to `main()`:
+
+    * module: All tests are in this module (default: None)
+    * defaultTest: Tests to load (default: '.')
+    * argv: Command line arguments (default: None; sys.argv is read)
+    * testRunner: Test runner instance (default: None)
+    * testLoader: Test loader instance (default: None)
+    * env: Environment; ignored if config is provided (default: None;
+      os.environ is read)
+    * config: :class:`nose.config.Config` instance (default: None)
+    * suite: Suite or list of tests to run (default: None). Passing a
+      suite or lists of tests will bypass all test discovery and
+      loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+      instance as the suite, context fixtures at the class, module and
+      package level will not be used, and many plugin hooks will not
+      be called. If you want normal nose behavior, either pass a list
+      of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+    * plugins: List of plugins to use; ignored if config is provided
+      (default: load plugins with DefaultPluginManager)
+    * addplugins: List of **extra** plugins to use. Pass a list of plugin
+      instances in this argument to make custom plugins available while
+      still using the DefaultPluginManager.
+
+    With the exception that the ``exit`` argument is always set
+    to False.
+    """
+    kw['exit'] = False
+    return TestProgram(*arg, **kw).success
+
+
+def runmodule(name='__main__', **kw):
+    """Collect and run tests in a single module only. Defaults to running
+    tests in __main__. Additional arguments to TestProgram may be passed
+    as keyword arguments.
+    """
+    main(defaultTest=name, **kw)
+
+
+def collector():
+    """TestSuite replacement entry point. Use anywhere you might use a
+    unittest.TestSuite. The collector will, by default, load options from
+    all config files and execute loader.loadTestsFromNames() on the
+    configured testNames, or '.' if no testNames are configured.
+    """
+    # plugins that implement any of these methods are disabled, since
+    # we don't control the test runner and won't be able to run them
+    # finalize() is also not called, but plugins that use it aren't disabled,
+    # because capture needs it.
+    setuptools_incompat = ('report', 'prepareTest',
+                           'prepareTestLoader', 'prepareTestRunner',
+                           'setOutputStream')
+
+    plugins = RestrictedPluginManager(exclude=setuptools_incompat)
+    conf = Config(files=all_config_files(),
+                  plugins=plugins)
+    conf.configure(argv=['collector'])
+    loader = defaultTestLoader(conf)
+
+    if conf.testNames:
+        suite = loader.loadTestsFromNames(conf.testNames)
+    else:
+        suite = loader.loadTestsFromNames(('.',))
+    return FinalizingSuiteWrapper(suite, plugins.finalize)
+
+
+
+if __name__ == '__main__':
+    main()
diff --git a/lib/spack/external/nose/exc.py b/lib/spack/external/nose/exc.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b780db0d4203c130f942742a34127bab59056a8
--- /dev/null
+++ b/lib/spack/external/nose/exc.py
@@ -0,0 +1,9 @@
+"""Exceptions for marking tests as skipped or deprecated.
+
+This module exists to provide backwards compatibility with previous
+versions of nose where skipped and deprecated tests were core
+functionality, rather than being provided by plugins. It may be
+removed in a future release.
+"""
+from nose.plugins.skip import SkipTest
+from nose.plugins.deprecated import DeprecatedTest
diff --git a/lib/spack/external/nose/ext/__init__.py b/lib/spack/external/nose/ext/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fd1516a09b04398e1cd202d9a07f10622cf3bc3
--- /dev/null
+++ b/lib/spack/external/nose/ext/__init__.py
@@ -0,0 +1,3 @@
+"""
+External or vendor files
+"""
diff --git a/lib/spack/external/nose/ext/dtcompat.py b/lib/spack/external/nose/ext/dtcompat.py
new file mode 100644
index 0000000000000000000000000000000000000000..332cf08c1297683768f2bb5d7d2c1d303b047a77
--- /dev/null
+++ b/lib/spack/external/nose/ext/dtcompat.py
@@ -0,0 +1,2272 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+#     Jim Fulton
+#     Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+#
+# Modified for inclusion in nose to provide support for DocFileTest in
+# python 2.3:
+#
+# - all doctests removed from module (they fail under 2.3 and 2.5) 
+# - now handles the $py.class extension when ran under Jython
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+    import doctest
+    doctest.testmod()
+
+if __name__ == "__main__":
+    _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False".  In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests.  There are also many ways to override parts
+of doctest's default behaviors.  See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+    # 0, Option Flags
+    'register_optionflag',
+    'DONT_ACCEPT_TRUE_FOR_1',
+    'DONT_ACCEPT_BLANKLINE',
+    'NORMALIZE_WHITESPACE',
+    'ELLIPSIS',
+    'IGNORE_EXCEPTION_DETAIL',
+    'COMPARISON_FLAGS',
+    'REPORT_UDIFF',
+    'REPORT_CDIFF',
+    'REPORT_NDIFF',
+    'REPORT_ONLY_FIRST_FAILURE',
+    'REPORTING_FLAGS',
+    # 1. Utility Functions
+    'is_private',
+    # 2. Example & DocTest
+    'Example',
+    'DocTest',
+    # 3. Doctest Parser
+    'DocTestParser',
+    # 4. Doctest Finder
+    'DocTestFinder',
+    # 5. Doctest Runner
+    'DocTestRunner',
+    'OutputChecker',
+    'DocTestFailure',
+    'UnexpectedException',
+    'DebugRunner',
+    # 6. Test Functions
+    'testmod',
+    'testfile',
+    'run_docstring_examples',
+    # 7. Tester
+    'Tester',
+    # 8. Unittest Support
+    'DocTestSuite',
+    'DocFileSuite',
+    'set_unittest_reportflags',
+    # 9. Debugging Support
+    'script_from_examples',
+    'testsource',
+    'debug_src',
+    'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re
+import unittest, difflib, pdb, tempfile
+import warnings
+from StringIO import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+                        __name__, 0)
+
+# There are 4 basic classes:
+#  - Example: a <source, want> pair, plus an intra-docstring line number.
+#  - DocTest: a collection of examples, parsed from a docstring, plus
+#    info about where the docstring came from (name, filename, lineno).
+#  - DocTestFinder: extracts DocTests from a given object's docstring and
+#    its contained objects' docstrings.
+#  - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+#                             list of:
+# +------+                   +---------+                   +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+                   +---------+                   +-------+
+#                            | Example |
+#                            |   ...   |
+#                            | Example |
+#                            +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+    # Create a new flag unless `name` is already known.
+    return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+                    DONT_ACCEPT_BLANKLINE |
+                    NORMALIZE_WHITESPACE |
+                    ELLIPSIS |
+                    IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+                   REPORT_CDIFF |
+                   REPORT_NDIFF |
+                   REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+#  1. Utility Functions
+#  2. Example & DocTest -- store test cases
+#  3. DocTest Parser -- extracts examples from strings
+#  4. DocTest Finder -- extracts test cases from objects
+#  5. DocTest Runner -- runs test cases
+#  6. Test Functions -- convenient wrappers for testing
+#  7. Tester Class -- for backwards compatibility
+#  8. Unittest Support
+#  9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+    """prefix, base -> true iff name prefix + "." + base is "private".
+
+    Prefix may be an empty string, and base does not contain a period.
+    Prefix is ignored (although functions you write conforming to this
+    protocol may make use of it).
+    Return true iff base begins with an (at least one) underscore, but
+    does not both begin and end with (at least) two underscores.
+    """
+    warnings.warn("is_private is deprecated; it wasn't useful; "
+                  "examine DocTestFinder.find() lists instead",
+                  DeprecationWarning, stacklevel=2)
+    return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+    """
+    Return the compiler-flags associated with the future features that
+    have been imported into the given namespace (globs).
+    """
+    flags = 0
+    for fname in __future__.all_feature_names:
+        feature = globs.get(fname, None)
+        if feature is getattr(__future__, fname):
+            flags |= feature.compiler_flag
+    return flags
+
+def _normalize_module(module, depth=2):
+    """
+    Return the module specified by `module`.  In particular:
+      - If `module` is a module, then return module.
+      - If `module` is a string, then import and return the
+        module with that name.
+      - If `module` is None, then return the calling module.
+        The calling module is assumed to be the module of
+        the stack frame at the given depth in the call stack.
+    """
+    if inspect.ismodule(module):
+        return module
+    elif isinstance(module, (str, unicode)):
+        return __import__(module, globals(), locals(), ["*"])
+    elif module is None:
+        return sys.modules[sys._getframe(depth).f_globals['__name__']]
+    else:
+        raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+    """
+    Add the given number of space characters to the beginning every
+    non-blank line in `s`, and return the result.
+    """
+    # This regexp matches the start of non-blank lines:
+    return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+    """
+    Return a string containing a traceback message for the given
+    exc_info tuple (as returned by sys.exc_info()).
+    """
+    # Get a traceback message.
+    excout = StringIO()
+    exc_type, exc_val, exc_tb = exc_info
+    traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+    return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+    def getvalue(self):
+        result = StringIO.getvalue(self)
+        # If anything at all was written, make sure there's a trailing
+        # newline.  There's no way for the expected output to indicate
+        # that a trailing newline is missing.
+        if result and not result.endswith("\n"):
+            result += "\n"
+        # Prevent softspace from screwing up the next test case, in
+        # case they used print with a trailing comma in an example.
+        if hasattr(self, "softspace"):
+            del self.softspace
+        return result
+
+    def truncate(self,   size=None):
+        StringIO.truncate(self, size)
+        if hasattr(self, "softspace"):
+            del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+    if ELLIPSIS_MARKER not in want:
+        return want == got
+
+    # Find "the real" strings.
+    ws = want.split(ELLIPSIS_MARKER)
+    assert len(ws) >= 2
+
+    # Deal with exact matches possibly needed at one or both ends.
+    startpos, endpos = 0, len(got)
+    w = ws[0]
+    if w:   # starts with exact match
+        if got.startswith(w):
+            startpos = len(w)
+            del ws[0]
+        else:
+            return False
+    w = ws[-1]
+    if w:   # ends with exact match
+        if got.endswith(w):
+            endpos -= len(w)
+            del ws[-1]
+        else:
+            return False
+
+    if startpos > endpos:
+        # Exact end matches required more characters than we have, as in
+        # _ellipsis_match('aa...aa', 'aaa')
+        return False
+
+    # For the rest, we only need to find the leftmost non-overlapping
+    # match for each piece.  If there's no overall match that way alone,
+    # there's no overall match period.
+    for w in ws:
+        # w may be '' at times, if there are consecutive ellipses, or
+        # due to an ellipsis at the start or end of `want`.  That's OK.
+        # Search for an empty string succeeds, and doesn't change startpos.
+        startpos = got.find(w, startpos, endpos)
+        if startpos < 0:
+            return False
+        startpos += len(w)
+
+    return True
+
+def _comment_line(line):
+    "Return a commented form of the given line"
+    line = line.rstrip()
+    if line:
+        return '# '+line
+    else:
+        return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+    """
+    A specialized version of the python debugger that redirects stdout
+    to a given stream when interacting with the user.  Stdout is *not*
+    redirected when traced code is executed.
+    """
+    def __init__(self, out):
+        self.__out = out
+        pdb.Pdb.__init__(self)
+
+    def trace_dispatch(self, *args):
+        # Redirect stdout to the given stream.
+        save_stdout = sys.stdout
+        sys.stdout = self.__out
+        # Call Pdb's trace dispatch method.
+        try:
+            return pdb.Pdb.trace_dispatch(self, *args)
+        finally:
+            sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+    if not inspect.ismodule(module):
+        raise TypeError, 'Expected a module: %r' % module
+    if path.startswith('/'):
+        raise ValueError, 'Module-relative files may not have absolute paths'
+
+    # Find the base directory for the path.
+    if hasattr(module, '__file__'):
+        # A normal module/package
+        basedir = os.path.split(module.__file__)[0]
+    elif module.__name__ == '__main__':
+        # An interactive session.
+        if len(sys.argv)>0 and sys.argv[0] != '':
+            basedir = os.path.split(sys.argv[0])[0]
+        else:
+            basedir = os.curdir
+    else:
+        # A module w/o __file__ (this includes builtins)
+        raise ValueError("Can't resolve paths relative to the module " +
+                         module + " (it has no __file__)")
+
+    # Combine the base directory and the path.
+    return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+##   fragment of source code, and "want" is the expected output for
+##   "source."  The Example class also includes information about
+##   where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+##   a string (such as an object's docstring).  The DocTest class also
+##   includes information about where the string was extracted from.
+
+class Example:
+    """
+    A single doctest example, consisting of source code and expected
+    output.  `Example` defines the following attributes:
+
+      - source: A single Python statement, always ending with a newline.
+        The constructor adds a newline if needed.
+
+      - want: The expected output from running the source code (either
+        from stdout, or a traceback in case of exception).  `want` ends
+        with a newline unless it's empty, in which case it's an empty
+        string.  The constructor adds a newline if needed.
+
+      - exc_msg: The exception message generated by the example, if
+        the example is expected to generate an exception; or `None` if
+        it is not expected to generate an exception.  This exception
+        message is compared against the return value of
+        `traceback.format_exception_only()`.  `exc_msg` ends with a
+        newline unless it's `None`.  The constructor adds a newline
+        if needed.
+
+      - lineno: The line number within the DocTest string containing
+        this Example where the Example begins.  This line number is
+        zero-based, with respect to the beginning of the DocTest.
+
+      - indent: The example's indentation in the DocTest string.
+        I.e., the number of space characters that preceed the
+        example's first prompt.
+
+      - options: A dictionary mapping from option flags to True or
+        False, which is used to override default options for this
+        example.  Any option flags not contained in this dictionary
+        are left at their default value (as specified by the
+        DocTestRunner's optionflags).  By default, no options are set.
+    """
+    def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+                 options=None):
+        # Normalize inputs.
+        if not source.endswith('\n'):
+            source += '\n'
+        if want and not want.endswith('\n'):
+            want += '\n'
+        if exc_msg is not None and not exc_msg.endswith('\n'):
+            exc_msg += '\n'
+        # Store properties.
+        self.source = source
+        self.want = want
+        self.lineno = lineno
+        self.indent = indent
+        if options is None: options = {}
+        self.options = options
+        self.exc_msg = exc_msg
+
+class DocTest:
+    """
+    A collection of doctest examples that should be run in a single
+    namespace.  Each `DocTest` defines the following attributes:
+
+      - examples: the list of examples.
+
+      - globs: The namespace (aka globals) that the examples should
+        be run in.
+
+      - name: A name identifying the DocTest (typically, the name of
+        the object whose docstring this DocTest was extracted from).
+
+      - filename: The name of the file that this DocTest was extracted
+        from, or `None` if the filename is unknown.
+
+      - lineno: The line number within filename where this DocTest
+        begins, or `None` if the line number is unavailable.  This
+        line number is zero-based, with respect to the beginning of
+        the file.
+
+      - docstring: The string that the examples were extracted from,
+        or `None` if the string is unavailable.
+    """
+    def __init__(self, examples, globs, name, filename, lineno, docstring):
+        """
+        Create a new DocTest containing the given examples.  The
+        DocTest's globals are initialized with a copy of `globs`.
+        """
+        assert not isinstance(examples, basestring), \
+               "DocTest no longer accepts str; use DocTestParser instead"
+        self.examples = examples
+        self.docstring = docstring
+        self.globs = globs.copy()
+        self.name = name
+        self.filename = filename
+        self.lineno = lineno
+
+    def __repr__(self):
+        if len(self.examples) == 0:
+            examples = 'no examples'
+        elif len(self.examples) == 1:
+            examples = '1 example'
+        else:
+            examples = '%d examples' % len(self.examples)
+        return ('<DocTest %s from %s:%s (%s)>' %
+                (self.name, self.filename, self.lineno, examples))
+
+
+    # This lets us sort tests by name:
+    def __cmp__(self, other):
+        if not isinstance(other, DocTest):
+            return -1
+        return cmp((self.name, self.filename, self.lineno, id(self)),
+                   (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+    """
+    A class used to parse strings containing doctest examples.
+    """
+    # This regular expression is used to find doctest examples in a
+    # string.  It defines three groups: `source` is the source code
+    # (including leading indentation and prompts); `indent` is the
+    # indentation of the first (PS1) line of the source code; and
+    # `want` is the expected output (including leading indentation).
+    _EXAMPLE_RE = re.compile(r'''
+        # Source consists of a PS1 line followed by zero or more PS2 lines.
+        (?P<source>
+            (?:^(?P<indent> [ ]*) >>>    .*)    # PS1 line
+            (?:\n           [ ]*  \.\.\. .*)*)  # PS2 lines
+        \n?
+        # Want consists of any non-blank lines that do not start with PS1.
+        (?P<want> (?:(?![ ]*$)    # Not a blank line
+                     (?![ ]*>>>)  # Not a line starting with PS1
+                     .*$\n?       # But any other line
+                  )*)
+        ''', re.MULTILINE | re.VERBOSE)
+
+    # A regular expression for handling `want` strings that contain
+    # expected exceptions.  It divides `want` into three pieces:
+    #    - the traceback header line (`hdr`)
+    #    - the traceback stack (`stack`)
+    #    - the exception message (`msg`), as generated by
+    #      traceback.format_exception_only()
+    # `msg` may have multiple lines.  We assume/require that the
+    # exception message is the first non-indented line starting with a word
+    # character following the traceback header line.
+    _EXCEPTION_RE = re.compile(r"""
+        # Grab the traceback header.  Different versions of Python have
+        # said different things on the first traceback line.
+        ^(?P<hdr> Traceback\ \(
+            (?: most\ recent\ call\ last
+            |   innermost\ last
+            ) \) :
+        )
+        \s* $                # toss trailing whitespace on the header.
+        (?P<stack> .*?)      # don't blink: absorb stuff until...
+        ^ (?P<msg> \w+ .*)   #     a line *starts* with alphanum.
+        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+    # A callable returning a true value iff its argument is a blank line
+    # or contains a single comment.
+    _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+    def parse(self, string, name='<string>'):
+        """
+        Divide the given string into examples and intervening text,
+        and return them as a list of alternating Examples and strings.
+        Line numbers for the Examples are 0-based.  The optional
+        argument `name` is a name identifying this string, and is only
+        used for error messages.
+        """
+        string = string.expandtabs()
+        # If all lines begin with the same indentation, then strip it.
+        min_indent = self._min_indent(string)
+        if min_indent > 0:
+            string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+        output = []
+        charno, lineno = 0, 0
+        # Find all doctest examples in the string:
+        for m in self._EXAMPLE_RE.finditer(string):
+            # Add the pre-example text to `output`.
+            output.append(string[charno:m.start()])
+            # Update lineno (lines before this example)
+            lineno += string.count('\n', charno, m.start())
+            # Extract info from the regexp match.
+            (source, options, want, exc_msg) = \
+                     self._parse_example(m, name, lineno)
+            # Create an Example, and add it to the list.
+            if not self._IS_BLANK_OR_COMMENT(source):
+                output.append( Example(source, want, exc_msg,
+                                    lineno=lineno,
+                                    indent=min_indent+len(m.group('indent')),
+                                    options=options) )
+            # Update lineno (lines inside this example)
+            lineno += string.count('\n', m.start(), m.end())
+            # Update charno.
+            charno = m.end()
+        # Add any remaining post-example text to `output`.
+        output.append(string[charno:])
+        return output
+
+    def get_doctest(self, string, globs, name, filename, lineno):
+        """
+        Extract all doctest examples from the given string, and
+        collect them into a `DocTest` object.
+
+        `globs`, `name`, `filename`, and `lineno` are attributes for
+        the new `DocTest` object.  See the documentation for `DocTest`
+        for more information.
+        """
+        return DocTest(self.get_examples(string, name), globs,
+                       name, filename, lineno, string)
+
+    def get_examples(self, string, name='<string>'):
+        """
+        Extract all doctest examples from the given string, and return
+        them as a list of `Example` objects.  Line numbers are
+        0-based, because it's most common in doctests that nothing
+        interesting appears on the same line as opening triple-quote,
+        and so the first interesting line is called \"line 1\" then.
+
+        The optional argument `name` is a name identifying this
+        string, and is only used for error messages.
+        """
+        return [x for x in self.parse(string, name)
+                if isinstance(x, Example)]
+
+    def _parse_example(self, m, name, lineno):
+        """
+        Given a regular expression match from `_EXAMPLE_RE` (`m`),
+        return a pair `(source, want)`, where `source` is the matched
+        example's source code (with prompts and indentation stripped);
+        and `want` is the example's expected output (with indentation
+        stripped).
+
+        `name` is the string's name, and `lineno` is the line number
+        where the example starts; both are used for error messages.
+        """
+        # Get the example's indentation level.
+        indent = len(m.group('indent'))
+
+        # Divide source into lines; check that they're properly
+        # indented; and then strip their indentation & prompts.
+        source_lines = m.group('source').split('\n')
+        self._check_prompt_blank(source_lines, indent, name, lineno)
+        self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+        source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+        # Divide want into lines; check that it's properly indented; and
+        # then strip the indentation.  Spaces before the last newline should
+        # be preserved, so plain rstrip() isn't good enough.
+        want = m.group('want')
+        want_lines = want.split('\n')
+        if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+            del want_lines[-1]  # forget final newline & spaces after it
+        self._check_prefix(want_lines, ' '*indent, name,
+                           lineno + len(source_lines))
+        want = '\n'.join([wl[indent:] for wl in want_lines])
+
+        # If `want` contains a traceback message, then extract it.
+        m = self._EXCEPTION_RE.match(want)
+        if m:
+            exc_msg = m.group('msg')
+        else:
+            exc_msg = None
+
+        # Extract options from the source.
+        options = self._find_options(source, name, lineno)
+
+        return source, options, want, exc_msg
+
+    # This regular expression looks for option directives in the
+    # source code of an example.  Option directives are comments
+    # starting with "doctest:".  Warning: this may give false
+    # positives for string-literals that contain the string
+    # "#doctest:".  Eliminating these false positives would require
+    # actually parsing the string; but we limit them by ignoring any
+    # line containing "#doctest:" that is *followed* by a quote mark.
+    _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+                                      re.MULTILINE)
+
+    def _find_options(self, source, name, lineno):
+        """
+        Return a dictionary containing option overrides extracted from
+        option directives in the given source string.
+
+        `name` is the string's name, and `lineno` is the line number
+        where the example starts; both are used for error messages.
+        """
+        options = {}
+        # (note: with the current regexp, this will match at most once:)
+        for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+            option_strings = m.group(1).replace(',', ' ').split()
+            for option in option_strings:
+                if (option[0] not in '+-' or
+                    option[1:] not in OPTIONFLAGS_BY_NAME):
+                    raise ValueError('line %r of the doctest for %s '
+                                     'has an invalid option: %r' %
+                                     (lineno+1, name, option))
+                flag = OPTIONFLAGS_BY_NAME[option[1:]]
+                options[flag] = (option[0] == '+')
+        if options and self._IS_BLANK_OR_COMMENT(source):
+            raise ValueError('line %r of the doctest for %s has an option '
+                             'directive on a line with no example: %r' %
+                             (lineno, name, source))
+        return options
+
+    # This regular expression finds the indentation of every non-blank
+    # line in a string.
+    _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+    def _min_indent(self, s):
+        "Return the minimum indentation of any non-blank line in `s`"
+        indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+        if len(indents) > 0:
+            return min(indents)
+        else:
+            return 0
+
+    def _check_prompt_blank(self, lines, indent, name, lineno):
+        """
+        Given the lines of a source string (including prompts and
+        leading indentation), check to make sure that every prompt is
+        followed by a space character.  If any line is not followed by
+        a space character, then raise ValueError.
+        """
+        for i, line in enumerate(lines):
+            if len(line) >= indent+4 and line[indent+3] != ' ':
+                raise ValueError('line %r of the docstring for %s '
+                                 'lacks blank after %s: %r' %
+                                 (lineno+i+1, name,
+                                  line[indent:indent+3], line))
+
+    def _check_prefix(self, lines, prefix, name, lineno):
+        """
+        Check that every line in the given list starts with the given
+        prefix; if any line does not, then raise a ValueError.
+        """
+        for i, line in enumerate(lines):
+            if line and not line.startswith(prefix):
+                raise ValueError('line %r of the docstring for %s has '
+                                 'inconsistent leading whitespace: %r' %
+                                 (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+    """
+    A class used to extract the DocTests that are relevant to a given
+    object, from its docstring and the docstrings of its contained
+    objects.  Doctests can currently be extracted from the following
+    object types: modules, functions, classes, methods, staticmethods,
+    classmethods, and properties.
+    """
+
+    def __init__(self, verbose=False, parser=DocTestParser(),
+                 recurse=True, _namefilter=None, exclude_empty=True):
+        """
+        Create a new doctest finder.
+
+        The optional argument `parser` specifies a class or
+        function that should be used to create new DocTest objects (or
+        objects that implement the same interface as DocTest).  The
+        signature for this factory function should match the signature
+        of the DocTest constructor.
+
+        If the optional argument `recurse` is false, then `find` will
+        only examine the given object, and not any contained objects.
+
+        If the optional argument `exclude_empty` is false, then `find`
+        will include tests for objects with empty docstrings.
+        """
+        self._parser = parser
+        self._verbose = verbose
+        self._recurse = recurse
+        self._exclude_empty = exclude_empty
+        # _namefilter is undocumented, and exists only for temporary backward-
+        # compatibility support of testmod's deprecated isprivate mess.
+        self._namefilter = _namefilter
+
+    def find(self, obj, name=None, module=None, globs=None,
+             extraglobs=None):
+        """
+        Return a list of the DocTests that are defined by the given
+        object's docstring, or by any of its contained objects'
+        docstrings.
+
+        The optional parameter `module` is the module that contains
+        the given object.  If the module is not specified or is None, then
+        the test finder will attempt to automatically determine the
+        correct module.  The object's module is used:
+
+            - As a default namespace, if `globs` is not specified.
+            - To prevent the DocTestFinder from extracting DocTests
+              from objects that are imported from other modules.
+            - To find the name of the file containing the object.
+            - To help find the line number of the object within its
+              file.
+
+        Contained objects whose module does not match `module` are ignored.
+
+        If `module` is False, no attempt to find the module will be made.
+        This is obscure, of use mostly in tests:  if `module` is False, or
+        is None but cannot be found automatically, then all objects are
+        considered to belong to the (non-existent) module, so all contained
+        objects will (recursively) be searched for doctests.
+
+        The globals for each DocTest is formed by combining `globs`
+        and `extraglobs` (bindings in `extraglobs` override bindings
+        in `globs`).  A new copy of the globals dictionary is created
+        for each DocTest.  If `globs` is not specified, then it
+        defaults to the module's `__dict__`, if specified, or {}
+        otherwise.  If `extraglobs` is not specified, then it defaults
+        to {}.
+
+        """
+        # If name was not specified, then extract it from the object.
+        if name is None:
+            name = getattr(obj, '__name__', None)
+            if name is None:
+                raise ValueError("DocTestFinder.find: name must be given "
+                        "when obj.__name__ doesn't exist: %r" %
+                                 (type(obj),))
+
+        # Find the module that contains the given object (if obj is
+        # a module, then module=obj.).  Note: this may fail, in which
+        # case module will be None.
+        if module is False:
+            module = None
+        elif module is None:
+            module = inspect.getmodule(obj)
+
+        # Read the module's source code.  This is used by
+        # DocTestFinder._find_lineno to find the line number for a
+        # given object's docstring.
+        try:
+            file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+            source_lines = linecache.getlines(file)
+            if not source_lines:
+                source_lines = None
+        except TypeError:
+            source_lines = None
+
+        # Initialize globals, and merge in extraglobs.
+        if globs is None:
+            if module is None:
+                globs = {}
+            else:
+                globs = module.__dict__.copy()
+        else:
+            globs = globs.copy()
+        if extraglobs is not None:
+            globs.update(extraglobs)
+
+        # Recursively expore `obj`, extracting DocTests.
+        tests = []
+        self._find(tests, obj, name, module, source_lines, globs, {})
+        # Sort the tests by alpha order of names, for consistency in
+        # verbose-mode output.  This was a feature of doctest in Pythons
+        # <= 2.3 that got lost by accident in 2.4.  It was repaired in
+        # 2.4.4 and 2.5.
+        tests.sort()
+        return tests
+
+    def _filter(self, obj, prefix, base):
+        """
+        Return true if the given object should not be examined.
+        """
+        return (self._namefilter is not None and
+                self._namefilter(prefix, base))
+
+    def _from_module(self, module, object):
+        """
+        Return true if the given object is defined in the given
+        module.
+        """
+        if module is None:
+            return True
+        elif inspect.isfunction(object):
+            return module.__dict__ is object.func_globals
+        elif inspect.isclass(object):
+            # Some jython classes don't set __module__
+            return module.__name__ == getattr(object, '__module__', None)
+        elif inspect.getmodule(object) is not None:
+            return module is inspect.getmodule(object)
+        elif hasattr(object, '__module__'):
+            return module.__name__ == object.__module__
+        elif isinstance(object, property):
+            return True # [XX] no way not be sure.
+        else:
+            raise ValueError("object must be a class or function")
+
+    def _find(self, tests, obj, name, module, source_lines, globs, seen):
+        """
+        Find tests for the given object and any contained objects, and
+        add them to `tests`.
+        """
+        if self._verbose:
+            print 'Finding tests in %s' % name
+
+        # If we've already processed this object, then ignore it.
+        if id(obj) in seen:
+            return
+        seen[id(obj)] = 1
+
+        # Find a test for this object, and add it to the list of tests.
+        test = self._get_test(obj, name, module, globs, source_lines)
+        if test is not None:
+            tests.append(test)
+
+        # Look for tests in a module's contained objects.
+        if inspect.ismodule(obj) and self._recurse:
+            for valname, val in obj.__dict__.items():
+                # Check if this contained object should be ignored.
+                if self._filter(val, name, valname):
+                    continue
+                valname = '%s.%s' % (name, valname)
+                # Recurse to functions & classes.
+                if ((inspect.isfunction(val) or inspect.isclass(val)) and
+                    self._from_module(module, val)):
+                    self._find(tests, val, valname, module, source_lines,
+                               globs, seen)
+
+        # Look for tests in a module's __test__ dictionary.
+        if inspect.ismodule(obj) and self._recurse:
+            for valname, val in getattr(obj, '__test__', {}).items():
+                if not isinstance(valname, basestring):
+                    raise ValueError("DocTestFinder.find: __test__ keys "
+                                     "must be strings: %r" %
+                                     (type(valname),))
+                if not (inspect.isfunction(val) or inspect.isclass(val) or
+                        inspect.ismethod(val) or inspect.ismodule(val) or
+                        isinstance(val, basestring)):
+                    raise ValueError("DocTestFinder.find: __test__ values "
+                                     "must be strings, functions, methods, "
+                                     "classes, or modules: %r" %
+                                     (type(val),))
+                valname = '%s.__test__.%s' % (name, valname)
+                self._find(tests, val, valname, module, source_lines,
+                           globs, seen)
+
+        # Look for tests in a class's contained objects.
+        if inspect.isclass(obj) and self._recurse:
+            for valname, val in obj.__dict__.items():
+                # Check if this contained object should be ignored.
+                if self._filter(val, name, valname):
+                    continue
+                # Special handling for staticmethod/classmethod.
+                if isinstance(val, staticmethod):
+                    val = getattr(obj, valname)
+                if isinstance(val, classmethod):
+                    val = getattr(obj, valname).im_func
+
+                # Recurse to methods, properties, and nested classes.
+                if ((inspect.isfunction(val) or inspect.isclass(val) or
+                      isinstance(val, property)) and
+                      self._from_module(module, val)):
+                    valname = '%s.%s' % (name, valname)
+                    self._find(tests, val, valname, module, source_lines,
+                               globs, seen)
+
+    def _get_test(self, obj, name, module, globs, source_lines):
+        """
+        Return a DocTest for the given object, if it defines a docstring;
+        otherwise, return None.
+        """
+        # Extract the object's docstring.  If it doesn't have one,
+        # then return None (no test for this object).
+        if isinstance(obj, basestring):
+            docstring = obj
+        else:
+            try:
+                if obj.__doc__ is None:
+                    docstring = ''
+                else:
+                    docstring = obj.__doc__
+                    if not isinstance(docstring, basestring):
+                        docstring = str(docstring)
+            except (TypeError, AttributeError):
+                docstring = ''
+
+        # Find the docstring's location in the file.
+        lineno = self._find_lineno(obj, source_lines)
+
+        # Don't bother if the docstring is empty.
+        if self._exclude_empty and not docstring:
+            return None
+
+        # Return a DocTest for this object.
+        if module is None:
+            filename = None
+        else:
+            filename = getattr(module, '__file__', module.__name__)
+            if filename[-4:] in (".pyc", ".pyo"):
+                filename = filename[:-1]
+            elif sys.platform.startswith('java') and \
+                    filename.endswith('$py.class'):
+                filename = '%s.py' % filename[:-9]
+        return self._parser.get_doctest(docstring, globs, name,
+                                        filename, lineno)
+
+    def _find_lineno(self, obj, source_lines):
+        """
+        Return a line number of the given object's docstring.  Note:
+        this method assumes that the object has a docstring.
+        """
+        lineno = None
+
+        # Find the line number for modules.
+        if inspect.ismodule(obj):
+            lineno = 0
+
+        # Find the line number for classes.
+        # Note: this could be fooled if a class is defined multiple
+        # times in a single file.
+        if inspect.isclass(obj):
+            if source_lines is None:
+                return None
+            pat = re.compile(r'^\s*class\s*%s\b' %
+                             getattr(obj, '__name__', '-'))
+            for i, line in enumerate(source_lines):
+                if pat.match(line):
+                    lineno = i
+                    break
+
+        # Find the line number for functions & methods.
+        if inspect.ismethod(obj): obj = obj.im_func
+        if inspect.isfunction(obj): obj = obj.func_code
+        if inspect.istraceback(obj): obj = obj.tb_frame
+        if inspect.isframe(obj): obj = obj.f_code
+        if inspect.iscode(obj):
+            lineno = getattr(obj, 'co_firstlineno', None)-1
+
+        # Find the line number where the docstring starts.  Assume
+        # that it's the first line that begins with a quote mark.
+        # Note: this could be fooled by a multiline function
+        # signature, where a continuation line begins with a quote
+        # mark.
+        if lineno is not None:
+            if source_lines is None:
+                return lineno+1
+            pat = re.compile('(^|.*:)\s*\w*("|\')')
+            for lineno in range(lineno, len(source_lines)):
+                if pat.match(source_lines[lineno]):
+                    return lineno
+
+        # We couldn't find the line number.
+        return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+    # This divider string is used to separate failure messages, and to
+    # separate sections of the summary.
+    DIVIDER = "*" * 70
+
+    def __init__(self, checker=None, verbose=None, optionflags=0):
+        """
+        Create a new test runner.
+
+        Optional keyword arg `checker` is the `OutputChecker` that
+        should be used to compare the expected outputs and actual
+        outputs of doctest examples.
+
+        Optional keyword arg 'verbose' prints lots of stuff if true,
+        only failures if false; by default, it's true iff '-v' is in
+        sys.argv.
+
+        Optional argument `optionflags` can be used to control how the
+        test runner compares expected output to actual output, and how
+        it displays failures.  See the documentation for `testmod` for
+        more information.
+        """
+        self._checker = checker or OutputChecker()
+        if verbose is None:
+            verbose = '-v' in sys.argv
+        self._verbose = verbose
+        self.optionflags = optionflags
+        self.original_optionflags = optionflags
+
+        # Keep track of the examples we've run.
+        self.tries = 0
+        self.failures = 0
+        self._name2ft = {}
+
+        # Create a fake output target for capturing doctest output.
+        self._fakeout = _SpoofOut()
+
+    #/////////////////////////////////////////////////////////////////
+    # Reporting methods
+    #/////////////////////////////////////////////////////////////////
+
+    def report_start(self, out, test, example):
+        """
+        Report that the test runner is about to process the given
+        example.  (Only displays a message if verbose=True)
+        """
+        if self._verbose:
+            if example.want:
+                out('Trying:\n' + _indent(example.source) +
+                    'Expecting:\n' + _indent(example.want))
+            else:
+                out('Trying:\n' + _indent(example.source) +
+                    'Expecting nothing\n')
+
+    def report_success(self, out, test, example, got):
+        """
+        Report that the given example ran successfully.  (Only
+        displays a message if verbose=True)
+        """
+        if self._verbose:
+            out("ok\n")
+
+    def report_failure(self, out, test, example, got):
+        """
+        Report that the given example failed.
+        """
+        out(self._failure_header(test, example) +
+            self._checker.output_difference(example, got, self.optionflags))
+
+    def report_unexpected_exception(self, out, test, example, exc_info):
+        """
+        Report that the given example raised an unexpected exception.
+        """
+        out(self._failure_header(test, example) +
+            'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+    def _failure_header(self, test, example):
+        out = [self.DIVIDER]
+        if test.filename:
+            if test.lineno is not None and example.lineno is not None:
+                lineno = test.lineno + example.lineno + 1
+            else:
+                lineno = '?'
+            out.append('File "%s", line %s, in %s' %
+                       (test.filename, lineno, test.name))
+        else:
+            out.append('Line %s, in %s' % (example.lineno+1, test.name))
+        out.append('Failed example:')
+        source = example.source
+        out.append(_indent(source))
+        return '\n'.join(out)
+
+    #/////////////////////////////////////////////////////////////////
+    # DocTest Running
+    #/////////////////////////////////////////////////////////////////
+
+    def __run(self, test, compileflags, out):
+        """
+        Run the examples in `test`.  Write the outcome of each example
+        with one of the `DocTestRunner.report_*` methods, using the
+        writer function `out`.  `compileflags` is the set of compiler
+        flags that should be used to execute examples.  Return a tuple
+        `(f, t)`, where `t` is the number of examples tried, and `f`
+        is the number of examples that failed.  The examples are run
+        in the namespace `test.globs`.
+        """
+        # Keep track of the number of failures and tries.
+        failures = tries = 0
+
+        # Save the option flags (since option directives can be used
+        # to modify them).
+        original_optionflags = self.optionflags
+
+        SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+        check = self._checker.check_output
+
+        # Process each example.
+        for examplenum, example in enumerate(test.examples):
+
+            # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+            # reporting after the first failure.
+            quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+                     failures > 0)
+
+            # Merge in the example's options.
+            self.optionflags = original_optionflags
+            if example.options:
+                for (optionflag, val) in example.options.items():
+                    if val:
+                        self.optionflags |= optionflag
+                    else:
+                        self.optionflags &= ~optionflag
+
+            # Record that we started this example.
+            tries += 1
+            if not quiet:
+                self.report_start(out, test, example)
+
+            # Use a special filename for compile(), so we can retrieve
+            # the source code during interactive debugging (see
+            # __patched_linecache_getlines).
+            filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+            # Run the example in the given context (globs), and record
+            # any exception that gets raised.  (But don't intercept
+            # keyboard interrupts.)
+            try:
+                # Don't blink!  This is where the user's code gets run.
+                exec compile(example.source, filename, "single",
+                             compileflags, 1) in test.globs
+                self.debugger.set_continue() # ==== Example Finished ====
+                exception = None
+            except KeyboardInterrupt:
+                raise
+            except:
+                exception = sys.exc_info()
+                self.debugger.set_continue() # ==== Example Finished ====
+
+            got = self._fakeout.getvalue()  # the actual output
+            self._fakeout.truncate(0)
+            outcome = FAILURE   # guilty until proved innocent or insane
+
+            # If the example executed without raising any exceptions,
+            # verify its output.
+            if exception is None:
+                if check(example.want, got, self.optionflags):
+                    outcome = SUCCESS
+
+            # The example raised an exception:  check if it was expected.
+            else:
+                exc_info = sys.exc_info()
+                exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+                if not quiet:
+                    got += _exception_traceback(exc_info)
+
+                # If `example.exc_msg` is None, then we weren't expecting
+                # an exception.
+                if example.exc_msg is None:
+                    outcome = BOOM
+
+                # We expected an exception:  see whether it matches.
+                elif check(example.exc_msg, exc_msg, self.optionflags):
+                    outcome = SUCCESS
+
+                # Another chance if they didn't care about the detail.
+                elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+                    m1 = re.match(r'[^:]*:', example.exc_msg)
+                    m2 = re.match(r'[^:]*:', exc_msg)
+                    if m1 and m2 and check(m1.group(0), m2.group(0),
+                                           self.optionflags):
+                        outcome = SUCCESS
+
+            # Report the outcome.
+            if outcome is SUCCESS:
+                if not quiet:
+                    self.report_success(out, test, example, got)
+            elif outcome is FAILURE:
+                if not quiet:
+                    self.report_failure(out, test, example, got)
+                failures += 1
+            elif outcome is BOOM:
+                if not quiet:
+                    self.report_unexpected_exception(out, test, example,
+                                                     exc_info)
+                failures += 1
+            else:
+                assert False, ("unknown outcome", outcome)
+
+        # Restore the option flags (in case they were modified)
+        self.optionflags = original_optionflags
+
+        # Record and return the number of failures and tries.
+        self.__record_outcome(test, failures, tries)
+        return failures, tries
+
+    def __record_outcome(self, test, f, t):
+        """
+        Record the fact that the given DocTest (`test`) generated `f`
+        failures out of `t` tried examples.
+        """
+        f2, t2 = self._name2ft.get(test.name, (0,0))
+        self._name2ft[test.name] = (f+f2, t+t2)
+        self.failures += f
+        self.tries += t
+
+    __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+                                         r'(?P<name>[\w\.]+)'
+                                         r'\[(?P<examplenum>\d+)\]>$')
+    def __patched_linecache_getlines(self, filename):
+        m = self.__LINECACHE_FILENAME_RE.match(filename)
+        if m and m.group('name') == self.test.name:
+            example = self.test.examples[int(m.group('examplenum'))]
+            return example.source.splitlines(True)
+        else:
+            return self.save_linecache_getlines(filename)
+
+    def run(self, test, compileflags=None, out=None, clear_globs=True):
+        """
+        Run the examples in `test`, and display the results using the
+        writer function `out`.
+
+        The examples are run in the namespace `test.globs`.  If
+        `clear_globs` is true (the default), then this namespace will
+        be cleared after the test runs, to help with garbage
+        collection.  If you would like to examine the namespace after
+        the test completes, then use `clear_globs=False`.
+
+        `compileflags` gives the set of flags that should be used by
+        the Python compiler when running the examples.  If not
+        specified, then it will default to the set of future-import
+        flags that apply to `globs`.
+
+        The output of each example is checked using
+        `DocTestRunner.check_output`, and the results are formatted by
+        the `DocTestRunner.report_*` methods.
+        """
+        self.test = test
+
+        if compileflags is None:
+            compileflags = _extract_future_flags(test.globs)
+
+        save_stdout = sys.stdout
+        if out is None:
+            out = save_stdout.write
+        sys.stdout = self._fakeout
+
+        # Patch pdb.set_trace to restore sys.stdout during interactive
+        # debugging (so it's not still redirected to self._fakeout).
+        # Note that the interactive output will go to *our*
+        # save_stdout, even if that's not the real sys.stdout; this
+        # allows us to write test cases for the set_trace behavior.
+        save_set_trace = pdb.set_trace
+        self.debugger = _OutputRedirectingPdb(save_stdout)
+        self.debugger.reset()
+        pdb.set_trace = self.debugger.set_trace
+
+        # Patch linecache.getlines, so we can see the example's source
+        # when we're inside the debugger.
+        self.save_linecache_getlines = linecache.getlines
+        linecache.getlines = self.__patched_linecache_getlines
+
+        try:
+            return self.__run(test, compileflags, out)
+        finally:
+            sys.stdout = save_stdout
+            pdb.set_trace = save_set_trace
+            linecache.getlines = self.save_linecache_getlines
+            if clear_globs:
+                test.globs.clear()
+
+    #/////////////////////////////////////////////////////////////////
+    # Summarization
+    #/////////////////////////////////////////////////////////////////
+    def summarize(self, verbose=None):
+        """
+        Print a summary of all the test cases that have been run by
+        this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+        the total number of failed examples, and `t` is the total
+        number of tried examples.
+
+        The optional `verbose` argument controls how detailed the
+        summary is.  If the verbosity is not specified, then the
+        DocTestRunner's verbosity is used.
+        """
+        if verbose is None:
+            verbose = self._verbose
+        notests = []
+        passed = []
+        failed = []
+        totalt = totalf = 0
+        for x in self._name2ft.items():
+            name, (f, t) = x
+            assert f <= t
+            totalt += t
+            totalf += f
+            if t == 0:
+                notests.append(name)
+            elif f == 0:
+                passed.append( (name, t) )
+            else:
+                failed.append(x)
+        if verbose:
+            if notests:
+                print len(notests), "items had no tests:"
+                notests.sort()
+                for thing in notests:
+                    print "   ", thing
+            if passed:
+                print len(passed), "items passed all tests:"
+                passed.sort()
+                for thing, count in passed:
+                    print " %3d tests in %s" % (count, thing)
+        if failed:
+            print self.DIVIDER
+            print len(failed), "items had failures:"
+            failed.sort()
+            for thing, (f, t) in failed:
+                print " %3d of %3d in %s" % (f, t, thing)
+        if verbose:
+            print totalt, "tests in", len(self._name2ft), "items."
+            print totalt - totalf, "passed and", totalf, "failed."
+        if totalf:
+            print "***Test Failed***", totalf, "failures."
+        elif verbose:
+            print "Test passed."
+        return totalf, totalt
+
+    #/////////////////////////////////////////////////////////////////
+    # Backward compatibility cruft to maintain doctest.master.
+    #/////////////////////////////////////////////////////////////////
+    def merge(self, other):
+        d = self._name2ft
+        for name, (f, t) in other._name2ft.items():
+            if name in d:
+                print "*** DocTestRunner.merge: '" + name + "' in both" \
+                    " testers; summing outcomes."
+                f2, t2 = d[name]
+                f = f + f2
+                t = t + t2
+            d[name] = f, t
+
+class OutputChecker:
+    """
+    A class used to check the whether the actual output from a doctest
+    example matches the expected output.  `OutputChecker` defines two
+    methods: `check_output`, which compares a given pair of outputs,
+    and returns true if they match; and `output_difference`, which
+    returns a string describing the differences between two outputs.
+    """
+    def check_output(self, want, got, optionflags):
+        """
+        Return True iff the actual output from an example (`got`)
+        matches the expected output (`want`).  These strings are
+        always considered to match if they are identical; but
+        depending on what option flags the test runner is using,
+        several non-exact match types are also possible.  See the
+        documentation for `TestRunner` for more information about
+        option flags.
+        """
+        # Handle the common case first, for efficiency:
+        # if they're string-identical, always return true.
+        if got == want:
+            return True
+
+        # The values True and False replaced 1 and 0 as the return
+        # value for boolean comparisons in Python 2.3.
+        if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+            if (got,want) == ("True\n", "1\n"):
+                return True
+            if (got,want) == ("False\n", "0\n"):
+                return True
+
+        # <BLANKLINE> can be used as a special sequence to signify a
+        # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+        if not (optionflags & DONT_ACCEPT_BLANKLINE):
+            # Replace <BLANKLINE> in want with a blank line.
+            want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+                          '', want)
+            # If a line in got contains only spaces, then remove the
+            # spaces.
+            got = re.sub('(?m)^\s*?$', '', got)
+            if got == want:
+                return True
+
+        # This flag causes doctest to ignore any differences in the
+        # contents of whitespace strings.  Note that this can be used
+        # in conjunction with the ELLIPSIS flag.
+        if optionflags & NORMALIZE_WHITESPACE:
+            got = ' '.join(got.split())
+            want = ' '.join(want.split())
+            if got == want:
+                return True
+
+        # The ELLIPSIS flag says to let the sequence "..." in `want`
+        # match any substring in `got`.
+        if optionflags & ELLIPSIS:
+            if _ellipsis_match(want, got):
+                return True
+
+        # We didn't find any match; return false.
+        return False
+
+    # Should we do a fancy diff?
+    def _do_a_fancy_diff(self, want, got, optionflags):
+        # Not unless they asked for a fancy diff.
+        if not optionflags & (REPORT_UDIFF |
+                              REPORT_CDIFF |
+                              REPORT_NDIFF):
+            return False
+
+        # If expected output uses ellipsis, a meaningful fancy diff is
+        # too hard ... or maybe not.  In two real-life failures Tim saw,
+        # a diff was a major help anyway, so this is commented out.
+        # [todo] _ellipsis_match() knows which pieces do and don't match,
+        # and could be the basis for a kick-ass diff in this case.
+        ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+        ##    return False
+
+        # ndiff does intraline difference marking, so can be useful even
+        # for 1-line differences.
+        if optionflags & REPORT_NDIFF:
+            return True
+
+        # The other diff types need at least a few lines to be helpful.
+        return want.count('\n') > 2 and got.count('\n') > 2
+
+    def output_difference(self, example, got, optionflags):
+        """
+        Return a string describing the differences between the
+        expected output for a given example (`example`) and the actual
+        output (`got`).  `optionflags` is the set of option flags used
+        to compare `want` and `got`.
+        """
+        want = example.want
+        # If <BLANKLINE>s are being used, then replace blank lines
+        # with <BLANKLINE> in the actual output string.
+        if not (optionflags & DONT_ACCEPT_BLANKLINE):
+            got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+        # Check if we should use diff.
+        if self._do_a_fancy_diff(want, got, optionflags):
+            # Split want & got into lines.
+            want_lines = want.splitlines(True)  # True == keep line ends
+            got_lines = got.splitlines(True)
+            # Use difflib to find their differences.
+            if optionflags & REPORT_UDIFF:
+                diff = difflib.unified_diff(want_lines, got_lines, n=2)
+                diff = list(diff)[2:] # strip the diff header
+                kind = 'unified diff with -expected +actual'
+            elif optionflags & REPORT_CDIFF:
+                diff = difflib.context_diff(want_lines, got_lines, n=2)
+                diff = list(diff)[2:] # strip the diff header
+                kind = 'context diff with expected followed by actual'
+            elif optionflags & REPORT_NDIFF:
+                engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+                diff = list(engine.compare(want_lines, got_lines))
+                kind = 'ndiff with -expected +actual'
+            else:
+                assert 0, 'Bad diff option'
+            # Remove trailing whitespace on diff output.
+            diff = [line.rstrip() + '\n' for line in diff]
+            return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+        # If we're not using diff, then simply list the expected
+        # output followed by the actual output.
+        if want and got:
+            return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+        elif want:
+            return 'Expected:\n%sGot nothing\n' % _indent(want)
+        elif got:
+            return 'Expected nothing\nGot:\n%s' % _indent(got)
+        else:
+            return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+    """A DocTest example has failed in debugging mode.
+
+    The exception instance has variables:
+
+    - test: the DocTest object being run
+
+    - excample: the Example object that failed
+
+    - got: the actual output
+    """
+    def __init__(self, test, example, got):
+        self.test = test
+        self.example = example
+        self.got = got
+
+    def __str__(self):
+        return str(self.test)
+
+class UnexpectedException(Exception):
+    """A DocTest example has encountered an unexpected exception
+
+    The exception instance has variables:
+
+    - test: the DocTest object being run
+
+    - excample: the Example object that failed
+
+    - exc_info: the exception info
+    """
+    def __init__(self, test, example, exc_info):
+        self.test = test
+        self.example = example
+        self.exc_info = exc_info
+
+    def __str__(self):
+        return str(self.test)
+
+class DebugRunner(DocTestRunner):
+
+    def run(self, test, compileflags=None, out=None, clear_globs=True):
+        r = DocTestRunner.run(self, test, compileflags, out, False)
+        if clear_globs:
+            test.globs.clear()
+        return r
+
+    def report_unexpected_exception(self, out, test, example, exc_info):
+        raise UnexpectedException(test, example, exc_info)
+
+    def report_failure(self, out, test, example, got):
+        raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+            report=True, optionflags=0, extraglobs=None,
+            raise_on_error=False, exclude_empty=False):
+    """m=None, name=None, globs=None, verbose=None, isprivate=None,
+       report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+       exclude_empty=False
+
+    Test examples in docstrings in functions and classes reachable
+    from module m (or the current module if m is not supplied), starting
+    with m.__doc__.  Unless isprivate is specified, private names
+    are not skipped.
+
+    Also test examples reachable from dict m.__test__ if it exists and is
+    not None.  m.__test__ maps names to functions, classes and strings;
+    function and class docstrings are tested even if the name is private;
+    strings are tested directly, as if they were docstrings.
+
+    Return (#failures, #tests).
+
+    See doctest.__doc__ for an overview.
+
+    Optional keyword arg "name" gives the name of the module; by default
+    use m.__name__.
+
+    Optional keyword arg "globs" gives a dict to be used as the globals
+    when executing examples; by default, use m.__dict__.  A copy of this
+    dict is actually used for each docstring, so that each docstring's
+    examples start with a clean slate.
+
+    Optional keyword arg "extraglobs" gives a dictionary that should be
+    merged into the globals that are used to execute examples.  By
+    default, no extra globals are used.  This is new in 2.4.
+
+    Optional keyword arg "verbose" prints lots of stuff if true, prints
+    only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+    Optional keyword arg "report" prints a summary at the end when true,
+    else prints nothing at the end.  In verbose mode, the summary is
+    detailed, else very brief (in fact, empty if all tests passed).
+
+    Optional keyword arg "optionflags" or's together module constants,
+    and defaults to 0.  This is new in 2.3.  Possible values (see the
+    docs for details):
+
+        DONT_ACCEPT_TRUE_FOR_1
+        DONT_ACCEPT_BLANKLINE
+        NORMALIZE_WHITESPACE
+        ELLIPSIS
+        IGNORE_EXCEPTION_DETAIL
+        REPORT_UDIFF
+        REPORT_CDIFF
+        REPORT_NDIFF
+        REPORT_ONLY_FIRST_FAILURE
+
+    Optional keyword arg "raise_on_error" raises an exception on the
+    first unexpected exception or failure. This allows failures to be
+    post-mortem debugged.
+
+    Deprecated in Python 2.4:
+    Optional keyword arg "isprivate" specifies a function used to
+    determine whether a name is private.  The default function is
+    treat all functions as public.  Optionally, "isprivate" can be
+    set to doctest.is_private to skip over functions marked as private
+    using the underscore naming convention; see its docs for details.
+
+    Advanced tomfoolery:  testmod runs methods of a local instance of
+    class doctest.Tester, then merges the results into (or creates)
+    global Tester instance doctest.master.  Methods of doctest.master
+    can be called directly too, if you want to do something unusual.
+    Passing report=0 to testmod is especially useful then, to delay
+    displaying a summary.  Invoke doctest.master.summarize(verbose)
+    when you're done fiddling.
+    """
+    global master
+
+    if isprivate is not None:
+        warnings.warn("the isprivate argument is deprecated; "
+                      "examine DocTestFinder.find() lists instead",
+                      DeprecationWarning)
+
+    # If no module was given, then use __main__.
+    if m is None:
+        # DWA - m will still be None if this wasn't invoked from the command
+        # line, in which case the following TypeError is about as good an error
+        # as we should expect
+        m = sys.modules.get('__main__')
+
+    # Check that we were actually given a module.
+    if not inspect.ismodule(m):
+        raise TypeError("testmod: module required; %r" % (m,))
+
+    # If no name was given, then use the module's name.
+    if name is None:
+        name = m.__name__
+
+    # Find, parse, and run all tests in the given module.
+    finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+    if raise_on_error:
+        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+    else:
+        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+    for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+        runner.run(test)
+
+    if report:
+        runner.summarize()
+
+    if master is None:
+        master = runner
+    else:
+        master.merge(runner)
+
+    return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+             globs=None, verbose=None, report=True, optionflags=0,
+             extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+    """
+    Test examples in the given file.  Return (#failures, #tests).
+
+    Optional keyword arg "module_relative" specifies how filenames
+    should be interpreted:
+
+      - If "module_relative" is True (the default), then "filename"
+         specifies a module-relative path.  By default, this path is
+         relative to the calling module's directory; but if the
+         "package" argument is specified, then it is relative to that
+         package.  To ensure os-independence, "filename" should use
+         "/" characters to separate path segments, and should not
+         be an absolute path (i.e., it may not begin with "/").
+
+      - If "module_relative" is False, then "filename" specifies an
+        os-specific path.  The path may be absolute or relative (to
+        the current working directory).
+
+    Optional keyword arg "name" gives the name of the test; by default
+    use the file's basename.
+
+    Optional keyword argument "package" is a Python package or the
+    name of a Python package whose directory should be used as the
+    base directory for a module relative filename.  If no package is
+    specified, then the calling module's directory is used as the base
+    directory for module relative filenames.  It is an error to
+    specify "package" if "module_relative" is False.
+
+    Optional keyword arg "globs" gives a dict to be used as the globals
+    when executing examples; by default, use {}.  A copy of this dict
+    is actually used for each docstring, so that each docstring's
+    examples start with a clean slate.
+
+    Optional keyword arg "extraglobs" gives a dictionary that should be
+    merged into the globals that are used to execute examples.  By
+    default, no extra globals are used.
+
+    Optional keyword arg "verbose" prints lots of stuff if true, prints
+    only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+    Optional keyword arg "report" prints a summary at the end when true,
+    else prints nothing at the end.  In verbose mode, the summary is
+    detailed, else very brief (in fact, empty if all tests passed).
+
+    Optional keyword arg "optionflags" or's together module constants,
+    and defaults to 0.  Possible values (see the docs for details):
+
+        DONT_ACCEPT_TRUE_FOR_1
+        DONT_ACCEPT_BLANKLINE
+        NORMALIZE_WHITESPACE
+        ELLIPSIS
+        IGNORE_EXCEPTION_DETAIL
+        REPORT_UDIFF
+        REPORT_CDIFF
+        REPORT_NDIFF
+        REPORT_ONLY_FIRST_FAILURE
+
+    Optional keyword arg "raise_on_error" raises an exception on the
+    first unexpected exception or failure. This allows failures to be
+    post-mortem debugged.
+
+    Optional keyword arg "parser" specifies a DocTestParser (or
+    subclass) that should be used to extract tests from the files.
+
+    Advanced tomfoolery:  testmod runs methods of a local instance of
+    class doctest.Tester, then merges the results into (or creates)
+    global Tester instance doctest.master.  Methods of doctest.master
+    can be called directly too, if you want to do something unusual.
+    Passing report=0 to testmod is especially useful then, to delay
+    displaying a summary.  Invoke doctest.master.summarize(verbose)
+    when you're done fiddling.
+    """
+    global master
+
+    if package and not module_relative:
+        raise ValueError("Package may only be specified for module-"
+                         "relative paths.")
+
+    # Relativize the path
+    if module_relative:
+        package = _normalize_module(package)
+        filename = _module_relative_path(package, filename)
+
+    # If no name was given, then use the file's name.
+    if name is None:
+        name = os.path.basename(filename)
+
+    # Assemble the globals.
+    if globs is None:
+        globs = {}
+    else:
+        globs = globs.copy()
+    if extraglobs is not None:
+        globs.update(extraglobs)
+
+    if raise_on_error:
+        runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+    else:
+        runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+    # Read the file, convert it to a test, and run it.
+    s = open(filename).read()
+    test = parser.get_doctest(s, globs, name, filename, 0)
+    runner.run(test)
+
+    if report:
+        runner.summarize()
+
+    if master is None:
+        master = runner
+    else:
+        master.merge(runner)
+
+    return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+                           compileflags=None, optionflags=0):
+    """
+    Test examples in the given object's docstring (`f`), using `globs`
+    as globals.  Optional argument `name` is used in failure messages.
+    If the optional argument `verbose` is true, then generate output
+    even if there are no failures.
+
+    `compileflags` gives the set of flags that should be used by the
+    Python compiler when running the examples.  If not specified, then
+    it will default to the set of future-import flags that apply to
+    `globs`.
+
+    Optional keyword arg `optionflags` specifies options for the
+    testing and output.  See the documentation for `testmod` for more
+    information.
+    """
+    # Find, parse, and run all tests in the given module.
+    finder = DocTestFinder(verbose=verbose, recurse=False)
+    runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+    for test in finder.find(f, name, globs=globs):
+        runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility.  It's not
+# actually used in any way.
+
+class Tester:
+    def __init__(self, mod=None, globs=None, verbose=None,
+                 isprivate=None, optionflags=0):
+
+        warnings.warn("class Tester is deprecated; "
+                      "use class doctest.DocTestRunner instead",
+                      DeprecationWarning, stacklevel=2)
+        if mod is None and globs is None:
+            raise TypeError("Tester.__init__: must specify mod or globs")
+        if mod is not None and not inspect.ismodule(mod):
+            raise TypeError("Tester.__init__: mod must be a module; %r" %
+                            (mod,))
+        if globs is None:
+            globs = mod.__dict__
+        self.globs = globs
+
+        self.verbose = verbose
+        self.isprivate = isprivate
+        self.optionflags = optionflags
+        self.testfinder = DocTestFinder(_namefilter=isprivate)
+        self.testrunner = DocTestRunner(verbose=verbose,
+                                        optionflags=optionflags)
+
+    def runstring(self, s, name):
+        test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+        if self.verbose:
+            print "Running string", name
+        (f,t) = self.testrunner.run(test)
+        if self.verbose:
+            print f, "of", t, "examples failed in string", name
+        return (f,t)
+
+    def rundoc(self, object, name=None, module=None):
+        f = t = 0
+        tests = self.testfinder.find(object, name, module=module,
+                                     globs=self.globs)
+        for test in tests:
+            (f2, t2) = self.testrunner.run(test)
+            (f,t) = (f+f2, t+t2)
+        return (f,t)
+
+    def rundict(self, d, name, module=None):
+        import new
+        m = new.module(name)
+        m.__dict__.update(d)
+        if module is None:
+            module = False
+        return self.rundoc(m, name, module)
+
+    def run__test__(self, d, name):
+        import new
+        m = new.module(name)
+        m.__test__ = d
+        return self.rundoc(m, name)
+
+    def summarize(self, verbose=None):
+        return self.testrunner.summarize(verbose)
+
+    def merge(self, other):
+        self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+    global _unittest_reportflags
+
+    if (flags & REPORTING_FLAGS) != flags:
+        raise ValueError("Only reporting flags allowed", flags)
+    old = _unittest_reportflags
+    _unittest_reportflags = flags
+    return old
+
+
+class DocTestCase(unittest.TestCase):
+
+    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+                 checker=None):
+
+        unittest.TestCase.__init__(self)
+        self._dt_optionflags = optionflags
+        self._dt_checker = checker
+        self._dt_test = test
+        self._dt_setUp = setUp
+        self._dt_tearDown = tearDown
+
+    def setUp(self):
+        test = self._dt_test
+
+        if self._dt_setUp is not None:
+            self._dt_setUp(test)
+
+    def tearDown(self):
+        test = self._dt_test
+
+        if self._dt_tearDown is not None:
+            self._dt_tearDown(test)
+
+        test.globs.clear()
+
+    def runTest(self):
+        test = self._dt_test
+        old = sys.stdout
+        new = StringIO()
+        optionflags = self._dt_optionflags
+
+        if not (optionflags & REPORTING_FLAGS):
+            # The option flags don't include any reporting flags,
+            # so add the default reporting flags
+            optionflags |= _unittest_reportflags
+
+        runner = DocTestRunner(optionflags=optionflags,
+                               checker=self._dt_checker, verbose=False)
+
+        try:
+            runner.DIVIDER = "-"*70
+            failures, tries = runner.run(
+                test, out=new.write, clear_globs=False)
+        finally:
+            sys.stdout = old
+
+        if failures:
+            raise self.failureException(self.format_failure(new.getvalue()))
+
+    def format_failure(self, err):
+        test = self._dt_test
+        if test.lineno is None:
+            lineno = 'unknown line number'
+        else:
+            lineno = '%s' % test.lineno
+        lname = '.'.join(test.name.split('.')[-1:])
+        return ('Failed doctest test for %s\n'
+                '  File "%s", line %s, in %s\n\n%s'
+                % (test.name, test.filename, lineno, lname, err)
+                )
+
+    def debug(self):
+        self.setUp()
+        runner = DebugRunner(optionflags=self._dt_optionflags,
+                             checker=self._dt_checker, verbose=False)
+        runner.run(self._dt_test)
+        self.tearDown()
+
+    def id(self):
+        return self._dt_test.name
+
+    def __repr__(self):
+        name = self._dt_test.name.split('.')
+        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+    __str__ = __repr__
+
+    def shortDescription(self):
+        return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+                 **options):
+    """
+    Convert doctest tests for a module to a unittest test suite.
+
+    This converts each documentation string in a module that
+    contains doctest tests to a unittest test case.  If any of the
+    tests in a doc string fail, then the test case fails.  An exception
+    is raised showing the name of the file containing the test and a
+    (sometimes approximate) line number.
+
+    The `module` argument provides the module to be tested.  The argument
+    can be either a module or a module name.
+
+    If no argument is given, the calling module is used.
+
+    A number of options may be provided as keyword arguments:
+
+    setUp
+      A set-up function.  This is called before running the
+      tests in each file. The setUp function will be passed a DocTest
+      object.  The setUp function can access the test globals as the
+      globs attribute of the test passed.
+
+    tearDown
+      A tear-down function.  This is called after running the
+      tests in each file.  The tearDown function will be passed a DocTest
+      object.  The tearDown function can access the test globals as the
+      globs attribute of the test passed.
+
+    globs
+      A dictionary containing initial global variables for the tests.
+
+    optionflags
+       A set of doctest option flags expressed as an integer.
+    """
+
+    if test_finder is None:
+        test_finder = DocTestFinder()
+
+    module = _normalize_module(module)
+    tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+    if globs is None:
+        globs = module.__dict__
+    if not tests:
+        # Why do we want to do this? Because it reveals a bug that might
+        # otherwise be hidden.
+        raise ValueError(module, "has no tests")
+
+    tests.sort()
+    suite = unittest.TestSuite()
+    for test in tests:
+        if len(test.examples) == 0:
+            continue
+        if not test.filename:
+            filename = module.__file__
+            if filename[-4:] in (".pyc", ".pyo"):
+                filename = filename[:-1]
+            elif sys.platform.startswith('java') and \
+                    filename.endswith('$py.class'):
+                filename = '%s.py' % filename[:-9]
+            test.filename = filename
+        suite.addTest(DocTestCase(test, **options))
+
+    return suite
+
+class DocFileCase(DocTestCase):
+
+    def id(self):
+        return '_'.join(self._dt_test.name.split('.'))
+
+    def __repr__(self):
+        return self._dt_test.filename
+    __str__ = __repr__
+
+    def format_failure(self, err):
+        return ('Failed doctest test for %s\n  File "%s", line 0\n\n%s'
+                % (self._dt_test.name, self._dt_test.filename, err)
+                )
+
+def DocFileTest(path, module_relative=True, package=None,
+                globs=None, parser=DocTestParser(), **options):
+    if globs is None:
+        globs = {}
+
+    if package and not module_relative:
+        raise ValueError("Package may only be specified for module-"
+                         "relative paths.")
+
+    # Relativize the path.
+    if module_relative:
+        package = _normalize_module(package)
+        path = _module_relative_path(package, path)
+
+    # Find the file and read it.
+    name = os.path.basename(path)
+    doc = open(path).read()
+
+    # Convert it to a test, and wrap it in a DocFileCase.
+    test = parser.get_doctest(doc, globs, name, path, 0)
+    return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+    """A unittest suite for one or more doctest files.
+
+    The path to each doctest file is given as a string; the
+    interpretation of that string depends on the keyword argument
+    "module_relative".
+
+    A number of options may be provided as keyword arguments:
+
+    module_relative
+      If "module_relative" is True, then the given file paths are
+      interpreted as os-independent module-relative paths.  By
+      default, these paths are relative to the calling module's
+      directory; but if the "package" argument is specified, then
+      they are relative to that package.  To ensure os-independence,
+      "filename" should use "/" characters to separate path
+      segments, and may not be an absolute path (i.e., it may not
+      begin with "/").
+
+      If "module_relative" is False, then the given file paths are
+      interpreted as os-specific paths.  These paths may be absolute
+      or relative (to the current working directory).
+
+    package
+      A Python package or the name of a Python package whose directory
+      should be used as the base directory for module relative paths.
+      If "package" is not specified, then the calling module's
+      directory is used as the base directory for module relative
+      filenames.  It is an error to specify "package" if
+      "module_relative" is False.
+
+    setUp
+      A set-up function.  This is called before running the
+      tests in each file. The setUp function will be passed a DocTest
+      object.  The setUp function can access the test globals as the
+      globs attribute of the test passed.
+
+    tearDown
+      A tear-down function.  This is called after running the
+      tests in each file.  The tearDown function will be passed a DocTest
+      object.  The tearDown function can access the test globals as the
+      globs attribute of the test passed.
+
+    globs
+      A dictionary containing initial global variables for the tests.
+
+    optionflags
+      A set of doctest option flags expressed as an integer.
+
+    parser
+      A DocTestParser (or subclass) that should be used to extract
+      tests from the files.
+    """
+    suite = unittest.TestSuite()
+
+    # We do this here so that _normalize_module is called at the right
+    # level.  If it were called in DocFileTest, then this function
+    # would be the caller and we might guess the package incorrectly.
+    if kw.get('module_relative', True):
+        kw['package'] = _normalize_module(kw.get('package'))
+
+    for path in paths:
+        suite.addTest(DocFileTest(path, **kw))
+
+    return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+    output = []
+    for piece in DocTestParser().parse(s):
+        if isinstance(piece, Example):
+            # Add the example's source code (strip trailing NL)
+            output.append(piece.source[:-1])
+            # Add the expected output:
+            want = piece.want
+            if want:
+                output.append('# Expected:')
+                output += ['## '+l for l in want.split('\n')[:-1]]
+        else:
+            # Add non-example text.
+            output += [_comment_line(l)
+                       for l in piece.split('\n')[:-1]]
+
+    # Trim junk on both ends.
+    while output and output[-1] == '#':
+        output.pop()
+    while output and output[0] == '#':
+        output.pop(0)
+    # Combine the output, and return it.
+    # Add a courtesy newline to prevent exec from choking (see bug #1172785)
+    return '\n'.join(output) + '\n'
+
+def testsource(module, name):
+    """Extract the test sources from a doctest docstring as a script.
+
+    Provide the module (or dotted name of the module) containing the
+    test to be debugged and the name (within the module) of the object
+    with the doc string with tests to be debugged.
+    """
+    module = _normalize_module(module)
+    tests = DocTestFinder().find(module)
+    test = [t for t in tests if t.name == name]
+    if not test:
+        raise ValueError(name, "not found in tests")
+    test = test[0]
+    testsrc = script_from_examples(test.docstring)
+    return testsrc
+
+def debug_src(src, pm=False, globs=None):
+    """Debug a single doctest docstring, in argument `src`'"""
+    testsrc = script_from_examples(src)
+    debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+    "Debug a test script.  `src` is the script, as a string."
+    import pdb
+
+    # Note that tempfile.NameTemporaryFile() cannot be used.  As the
+    # docs say, a file so created cannot be opened by name a second time
+    # on modern Windows boxes, and execfile() needs to open it.
+    srcfilename = tempfile.mktemp(".py", "doctestdebug")
+    f = open(srcfilename, 'w')
+    f.write(src)
+    f.close()
+
+    try:
+        if globs:
+            globs = globs.copy()
+        else:
+            globs = {}
+
+        if pm:
+            try:
+                execfile(srcfilename, globs, globs)
+            except:
+                print sys.exc_info()[1]
+                pdb.post_mortem(sys.exc_info()[2])
+        else:
+            # Note that %r is vital here.  '%s' instead can, e.g., cause
+            # backslashes to get treated as metacharacters on Windows.
+            pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+    finally:
+        os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+    """Debug a single doctest docstring.
+
+    Provide the module (or dotted name of the module) containing the
+    test to be debugged and the name (within the module) of the object
+    with the docstring with tests to be debugged.
+    """
+    module = _normalize_module(module)
+    testsrc = testsource(module, name)
+    debug_script(testsrc, pm, module.__dict__)
+
+
+__test__ = {}
diff --git a/lib/spack/external/nose/failure.py b/lib/spack/external/nose/failure.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5fabfda5e7c8da399fe2bf2827ccf765690099d
--- /dev/null
+++ b/lib/spack/external/nose/failure.py
@@ -0,0 +1,42 @@
+import logging
+import unittest
+from traceback import format_tb
+from nose.pyversion import is_base_exception
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Failure']
+
+
+class Failure(unittest.TestCase):
+    """Unloadable or unexecutable test.
+
+    A Failure case is placed in a test suite to indicate the presence of a
+    test that could not be loaded or executed. A common example is a test
+    module that fails to import.
+    
+    """
+    __test__ = False # do not collect
+    def __init__(self, exc_class, exc_val, tb=None, address=None):
+        log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb))
+        self.exc_class = exc_class
+        self.exc_val = exc_val
+        self.tb = tb
+        self._address = address
+        unittest.TestCase.__init__(self)
+
+    def __str__(self):
+        return "Failure: %s (%s)" % (
+            getattr(self.exc_class, '__name__', self.exc_class), self.exc_val)
+
+    def address(self):
+        return self._address
+    
+    def runTest(self):
+        if self.tb is not None:
+            if is_base_exception(self.exc_val):
+                raise self.exc_val, None, self.tb
+            raise self.exc_class, self.exc_val, self.tb
+        else:
+            raise self.exc_class(self.exc_val)
diff --git a/lib/spack/external/nose/importer.py b/lib/spack/external/nose/importer.py
new file mode 100644
index 0000000000000000000000000000000000000000..e677658ce6199c71c67fb952ed2411ae7573d4c7
--- /dev/null
+++ b/lib/spack/external/nose/importer.py
@@ -0,0 +1,167 @@
+"""Implements an importer that looks only in specific path (ignoring
+sys.path), and uses a per-path cache in addition to sys.modules. This is
+necessary because test modules in different directories frequently have the
+same names, which means that the first loaded would mask the rest when using
+the builtin importer.
+"""
+import logging
+import os
+import sys
+from nose.config import Config
+
+from imp import find_module, load_module, acquire_lock, release_lock
+
+log = logging.getLogger(__name__)
+
+try:
+    _samefile = os.path.samefile
+except AttributeError:
+    def _samefile(src, dst):
+        return (os.path.normcase(os.path.realpath(src)) ==
+                os.path.normcase(os.path.realpath(dst)))
+
+
+class Importer(object):
+    """An importer class that does only path-specific imports. That
+    is, the given module is not searched for on sys.path, but only at
+    the path or in the directory specified.
+    """
+    def __init__(self, config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+
+    def importFromPath(self, path, fqname):
+        """Import a dotted-name package whose tail is at path. In other words,
+        given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then
+        bar from path/to/foo/bar, returning bar.
+        """
+        # find the base dir of the package
+        path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep)
+        name_parts = fqname.split('.')
+        if path_parts[-1] == '__init__.py':
+            path_parts.pop()
+        path_parts = path_parts[:-(len(name_parts))]
+        dir_path = os.sep.join(path_parts)
+        # then import fqname starting from that dir
+        return self.importFromDir(dir_path, fqname)
+
+    def importFromDir(self, dir, fqname):
+        """Import a module *only* from path, ignoring sys.path and
+        reloading if the version in sys.modules is not the one we want.
+        """
+        dir = os.path.normpath(os.path.abspath(dir))
+        log.debug("Import %s from %s", fqname, dir)
+
+        # FIXME reimplement local per-dir cache?
+
+        # special case for __main__
+        if fqname == '__main__':
+            return sys.modules[fqname]
+
+        if self.config.addPaths:
+            add_path(dir, self.config)
+
+        path = [dir]
+        parts = fqname.split('.')
+        part_fqname = ''
+        mod = parent = fh = None
+
+        for part in parts:
+            if part_fqname == '':
+                part_fqname = part
+            else:
+                part_fqname = "%s.%s" % (part_fqname, part)
+            try:
+                acquire_lock()
+                log.debug("find module part %s (%s) in %s",
+                          part, part_fqname, path)
+                fh, filename, desc = find_module(part, path)
+                old = sys.modules.get(part_fqname)
+                if old is not None:
+                    # test modules frequently have name overlap; make sure
+                    # we get a fresh copy of anything we are trying to load
+                    # from a new path
+                    log.debug("sys.modules has %s as %s", part_fqname, old)
+                    if (self.sameModule(old, filename)
+                        or (self.config.firstPackageWins and
+                            getattr(old, '__path__', None))):
+                        mod = old
+                    else:
+                        del sys.modules[part_fqname]
+                        mod = load_module(part_fqname, fh, filename, desc)
+                else:
+                    mod = load_module(part_fqname, fh, filename, desc)
+            finally:
+                if fh:
+                    fh.close()
+                release_lock()
+            if parent:
+                setattr(parent, part, mod)
+            if hasattr(mod, '__path__'):
+                path = mod.__path__
+            parent = mod
+        return mod
+
+    def _dirname_if_file(self, filename):
+        # We only take the dirname if we have a path to a non-dir,
+        # because taking the dirname of a symlink to a directory does not
+        # give the actual directory parent.
+        if os.path.isdir(filename):
+            return filename
+        else:
+            return os.path.dirname(filename)
+
+    def sameModule(self, mod, filename):
+        mod_paths = []
+        if hasattr(mod, '__path__'):
+            for path in mod.__path__:
+                mod_paths.append(self._dirname_if_file(path))
+        elif hasattr(mod, '__file__'):
+            mod_paths.append(self._dirname_if_file(mod.__file__))
+        else:
+            # builtin or other module-like object that
+            # doesn't have __file__; must be new
+            return False
+        new_path = self._dirname_if_file(filename)
+        for mod_path in mod_paths:
+            log.debug(
+                "module already loaded? mod: %s new: %s",
+                mod_path, new_path)
+            if _samefile(mod_path, new_path):
+                return True
+        return False
+
+
+def add_path(path, config=None):
+    """Ensure that the path, or the root of the current package (if
+    path is in a package), is in sys.path.
+    """
+
+    # FIXME add any src-looking dirs seen too... need to get config for that
+
+    log.debug('Add path %s' % path)
+    if not path:
+        return []
+    added = []
+    parent = os.path.dirname(path)
+    if (parent
+        and os.path.exists(os.path.join(path, '__init__.py'))):
+        added.extend(add_path(parent, config))
+    elif not path in sys.path:
+        log.debug("insert %s into sys.path", path)
+        sys.path.insert(0, path)
+        added.append(path)
+    if config and config.srcDirs:
+        for dirname in config.srcDirs:
+            dirpath = os.path.join(path, dirname)
+            if os.path.isdir(dirpath):
+                sys.path.insert(0, dirpath)
+                added.append(dirpath)
+    return added
+
+
+def remove_path(path):
+    log.debug('Remove path %s' % path)
+    if path in sys.path:
+        sys.path.remove(path)
diff --git a/lib/spack/external/nose/inspector.py b/lib/spack/external/nose/inspector.py
new file mode 100644
index 0000000000000000000000000000000000000000..a6c4a3e3b6a7a8881d0869c81177518f2fb61897
--- /dev/null
+++ b/lib/spack/external/nose/inspector.py
@@ -0,0 +1,207 @@
+"""Simple traceback introspection. Used to add additional information to
+AssertionErrors in tests, so that failure messages may be more informative.
+"""
+import inspect
+import logging
+import re
+import sys
+import textwrap
+import tokenize
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+def inspect_traceback(tb):
+    """Inspect a traceback and its frame, returning source for the expression
+    where the exception was raised, with simple variable replacement performed
+    and the line on which the exception was raised marked with '>>'
+    """
+    log.debug('inspect traceback %s', tb)
+
+    # we only want the innermost frame, where the exception was raised
+    while tb.tb_next:
+        tb = tb.tb_next
+        
+    frame = tb.tb_frame
+    lines, exc_line = tbsource(tb)
+        
+    # figure out the set of lines to grab.
+    inspect_lines, mark_line = find_inspectable_lines(lines, exc_line)
+    src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+    exp = Expander(frame.f_locals, frame.f_globals)
+
+    while inspect_lines:
+        try:
+            for tok in tokenize.generate_tokens(src.readline):
+                exp(*tok)
+        except tokenize.TokenError, e:
+            # this can happen if our inspectable region happens to butt up
+            # against the end of a construct like a docstring with the closing
+            # """ on separate line
+            log.debug("Tokenizer error: %s", e)
+            inspect_lines.pop(0)
+            mark_line -= 1
+            src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+            exp = Expander(frame.f_locals, frame.f_globals)
+            continue
+        break
+    padded = []
+    if exp.expanded_source:
+        exp_lines = exp.expanded_source.split('\n')
+        ep = 0
+        for line in exp_lines:
+            if ep == mark_line:
+                padded.append('>>  ' + line)
+            else:
+                padded.append('    ' + line)
+            ep += 1
+    return '\n'.join(padded)
+
+
+def tbsource(tb, context=6):
+    """Get source from  a traceback object.
+
+    A tuple of two things is returned: a list of lines of context from
+    the source code, and the index of the current line within that list.
+    The optional second argument specifies the number of lines of context
+    to return, which are centered around the current line.
+
+    .. Note ::
+       This is adapted from inspect.py in the python 2.4 standard library, 
+       since a bug in the 2.3 version of inspect prevents it from correctly
+       locating source lines in a traceback frame.
+    """
+    
+    lineno = tb.tb_lineno
+    frame = tb.tb_frame
+
+    if context > 0:
+        start = lineno - 1 - context//2
+        log.debug("lineno: %s start: %s", lineno, start)
+        
+        try:
+            lines, dummy = inspect.findsource(frame)
+        except IOError:
+            lines, index = [''], 0
+        else:
+            all_lines = lines
+            start = max(start, 1)
+            start = max(0, min(start, len(lines) - context))
+            lines = lines[start:start+context]
+            index = lineno - 1 - start
+            
+            # python 2.5 compat: if previous line ends in a continuation,
+            # decrement start by 1 to match 2.4 behavior                
+            if sys.version_info >= (2, 5) and index > 0:
+                while lines[index-1].strip().endswith('\\'):
+                    start -= 1
+                    lines = all_lines[start:start+context]
+    else:
+        lines, index = [''], 0
+    log.debug("tbsource lines '''%s''' around index %s", lines, index)
+    return (lines, index)    
+
+    
+def find_inspectable_lines(lines, pos):
+    """Find lines in home that are inspectable.
+    
+    Walk back from the err line up to 3 lines, but don't walk back over
+    changes in indent level.
+
+    Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk
+    over changes in indent level (unless part of an extended line)
+    """
+    cnt = re.compile(r'\\[\s\n]*$')
+    df = re.compile(r':[\s\n]*$')
+    ind = re.compile(r'^(\s*)')
+    toinspect = []
+    home = lines[pos]
+    home_indent = ind.match(home).groups()[0]
+    
+    before = lines[max(pos-3, 0):pos]
+    before.reverse()
+    after = lines[pos+1:min(pos+4, len(lines))]
+
+    for line in before:
+        if ind.match(line).groups()[0] == home_indent:
+            toinspect.append(line)
+        else:
+            break
+    toinspect.reverse()
+    toinspect.append(home)
+    home_pos = len(toinspect)-1
+    continued = cnt.search(home)
+    for line in after:
+        if ((continued or ind.match(line).groups()[0] == home_indent)
+            and not df.search(line)):
+            toinspect.append(line)
+            continued = cnt.search(line)
+        else:
+            break
+    log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos)
+    return toinspect, home_pos
+
+
+class Expander:
+    """Simple expression expander. Uses tokenize to find the names and
+    expands any that can be looked up in the frame.
+    """
+    def __init__(self, locals, globals):
+        self.locals = locals
+        self.globals = globals
+        self.lpos = None
+        self.expanded_source = ''
+         
+    def __call__(self, ttype, tok, start, end, line):
+        # TODO
+        # deal with unicode properly
+        
+        # TODO
+        # Dealing with instance members
+        #   always keep the last thing seen  
+        #   if the current token is a dot,
+        #      get ready to getattr(lastthing, this thing) on the
+        #      next call.
+        
+        if self.lpos is not None:
+            if start[1] >= self.lpos:
+                self.expanded_source += ' ' * (start[1]-self.lpos)
+            elif start[1] < self.lpos:
+                # newline, indent correctly
+                self.expanded_source += ' ' * start[1]
+        self.lpos = end[1]
+      
+        if ttype == tokenize.INDENT:
+            pass
+        elif ttype == tokenize.NAME:
+            # Clean this junk up
+            try:
+                val = self.locals[tok]
+                if callable(val):
+                    val = tok
+                else:
+                    val = repr(val)
+            except KeyError:
+                try:
+                    val = self.globals[tok]
+                    if callable(val):
+                        val = tok
+                    else:
+                        val = repr(val)
+
+                except KeyError:
+                    val = tok
+            # FIXME... not sure how to handle things like funcs, classes
+            # FIXME this is broken for some unicode strings
+            self.expanded_source += val
+        else:
+            self.expanded_source += tok
+        # if this is the end of the line and the line ends with
+        # \, then tack a \ and newline onto the output
+        # print line[end[1]:]
+        if re.match(r'\s+\\\n', line[end[1]:]):
+            self.expanded_source += ' \\\n'
diff --git a/lib/spack/external/nose/loader.py b/lib/spack/external/nose/loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..3744e54ff699885b2bd63dc669ba2780e9edb283
--- /dev/null
+++ b/lib/spack/external/nose/loader.py
@@ -0,0 +1,623 @@
+"""
+Test Loader
+-----------
+
+nose's test loader implements the same basic functionality as its
+superclass, unittest.TestLoader, but extends it by more liberal
+interpretations of what may be a test and how a test may be named.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+import types
+from inspect import isfunction
+from nose.pyversion import unbound_method, ismethod
+from nose.case import FunctionTestCase, MethodTestCase
+from nose.failure import Failure
+from nose.config import Config
+from nose.importer import Importer, add_path, remove_path
+from nose.selector import defaultSelector, TestAddress
+from nose.util import func_lineno, getpackage, isclass, isgenerator, \
+    ispackage, regex_last_key, resolve_name, transplant_func, \
+    transplant_class, test_address
+from nose.suite import ContextSuiteFactory, ContextList, LazySuite
+from nose.pyversion import sort_list, cmp_to_key
+
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# for efficiency and easier mocking
+op_normpath = os.path.normpath
+op_abspath = os.path.abspath
+op_join = os.path.join
+op_isdir = os.path.isdir
+op_isfile = os.path.isfile
+
+
+__all__ = ['TestLoader', 'defaultTestLoader']
+
+
+class TestLoader(unittest.TestLoader):
+    """Test loader that extends unittest.TestLoader to:
+
+    * Load tests from test-like functions and classes that are not
+      unittest.TestCase subclasses
+    * Find and load test modules in a directory
+    * Support tests that are generators
+    * Support easy extensions of or changes to that behavior through plugins
+    """
+    config = None
+    importer = None
+    workingDir = None
+    selector = None
+    suiteClass = None
+
+    def __init__(self, config=None, importer=None, workingDir=None,
+                 selector=None):
+        """Initialize a test loader.
+
+        Parameters (all optional):
+
+        * config: provide a `nose.config.Config`_ or other config class
+          instance; if not provided a `nose.config.Config`_ with
+          default values is used.
+        * importer: provide an importer instance that implements
+          `importFromPath`. If not provided, a
+          `nose.importer.Importer`_ is used.
+        * workingDir: the directory to which file and module names are
+          relative. If not provided, assumed to be the current working
+          directory.
+        * selector: a selector class or instance. If a class is
+          provided, it will be instantiated with one argument, the
+          current config. If not provided, a `nose.selector.Selector`_
+          is used.
+        """
+        if config is None:
+            config = Config()
+        if importer is None:
+            importer = Importer(config=config)
+        if workingDir is None:
+            workingDir = config.workingDir
+        if selector is None:
+            selector = defaultSelector(config)
+        elif isclass(selector):
+            selector = selector(config)
+        self.config = config
+        self.importer = importer
+        self.workingDir = op_normpath(op_abspath(workingDir))
+        self.selector = selector
+        if config.addPaths:
+            add_path(workingDir, config)
+        self.suiteClass = ContextSuiteFactory(config=config)
+
+        self._visitedPaths = set([])
+
+        unittest.TestLoader.__init__(self)
+
+    def getTestCaseNames(self, testCaseClass):
+        """Override to select with selector, unless
+        config.getTestCaseNamesCompat is True
+        """
+        if self.config.getTestCaseNamesCompat:
+            return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
+
+        def wanted(attr, cls=testCaseClass, sel=self.selector):
+            item = getattr(cls, attr, None)
+            if isfunction(item):
+                item = unbound_method(cls, item)
+            elif not ismethod(item):
+                return False
+            return sel.wantMethod(item)
+
+        cases = filter(wanted, dir(testCaseClass))
+
+        # add runTest if nothing else picked
+        if not cases and hasattr(testCaseClass, 'runTest'):
+            cases = ['runTest']
+        if self.sortTestMethodsUsing:
+            sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
+        return cases
+
+    def _haveVisited(self, path):
+        # For cases where path is None, we always pretend we haven't visited
+        # them.
+        if path is None:
+            return False
+
+        return path in self._visitedPaths
+
+    def _addVisitedPath(self, path):
+        if path is not None:
+            self._visitedPaths.add(path)
+
+    def loadTestsFromDir(self, path):
+        """Load tests from the directory at path. This is a generator
+        -- each suite of tests from a module or other file is yielded
+        and is expected to be executed before the next file is
+        examined.
+        """
+        log.debug("load from dir %s", path)
+        plugins = self.config.plugins
+        plugins.beforeDirectory(path)
+        if self.config.addPaths:
+            paths_added = add_path(path, self.config)
+
+        entries = os.listdir(path)
+        sort_list(entries, regex_last_key(self.config.testMatch))
+        for entry in entries:
+            # this hard-coded initial-dot test will be removed:
+            # http://code.google.com/p/python-nose/issues/detail?id=82
+            if entry.startswith('.'):
+                continue
+            entry_path = op_abspath(op_join(path, entry))
+            is_file = op_isfile(entry_path)
+            wanted = False
+            if is_file:
+                is_dir = False
+                wanted = self.selector.wantFile(entry_path)
+            else:
+                is_dir = op_isdir(entry_path)
+                if is_dir:
+                    # this hard-coded initial-underscore test will be removed:
+                    # http://code.google.com/p/python-nose/issues/detail?id=82
+                    if entry.startswith('_'):
+                        continue
+                    wanted = self.selector.wantDirectory(entry_path)
+            is_package = ispackage(entry_path)
+
+            # Python 3.3 now implements PEP 420: Implicit Namespace Packages.
+            # As a result, it's now possible that parent paths that have a
+            # segment with the same basename as our package ends up
+            # in module.__path__.  So we have to keep track of what we've
+            # visited, and not-revisit them again.
+            if wanted and not self._haveVisited(entry_path):
+                self._addVisitedPath(entry_path)
+                if is_file:
+                    plugins.beforeContext()
+                    if entry.endswith('.py'):
+                        yield self.loadTestsFromName(
+                            entry_path, discovered=True)
+                    else:
+                        yield self.loadTestsFromFile(entry_path)
+                    plugins.afterContext()
+                elif is_package:
+                    # Load the entry as a package: given the full path,
+                    # loadTestsFromName() will figure it out
+                    yield self.loadTestsFromName(
+                        entry_path, discovered=True)
+                else:
+                    # Another test dir in this one: recurse lazily
+                    yield self.suiteClass(
+                        lambda: self.loadTestsFromDir(entry_path))
+        tests = []
+        for test in plugins.loadTestsFromDir(path):
+            tests.append(test)
+        # TODO: is this try/except needed?
+        try:
+            if tests:
+                yield self.suiteClass(tests)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            yield self.suiteClass([Failure(*sys.exc_info())])
+
+        # pop paths
+        if self.config.addPaths:
+            for p in paths_added:
+              remove_path(p)
+        plugins.afterDirectory(path)
+
+    def loadTestsFromFile(self, filename):
+        """Load tests from a non-module file. Default is to raise a
+        ValueError; plugins may implement `loadTestsFromFile` to
+        provide a list of tests loaded from the file.
+        """
+        log.debug("Load from non-module file %s", filename)
+        try:
+            tests = [test for test in
+                     self.config.plugins.loadTestsFromFile(filename)]
+            if tests:
+                # Plugins can yield False to indicate that they were
+                # unable to load tests from a file, but it was not an
+                # error -- the file just had no tests to load.
+                tests = filter(None, tests)
+                return self.suiteClass(tests)
+            else:
+                # Nothing was able to even try to load from this file
+                open(filename, 'r').close() # trigger os error
+                raise ValueError("Unable to load tests from file %s"
+                                 % filename)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            exc = sys.exc_info()
+            return self.suiteClass(
+                [Failure(exc[0], exc[1], exc[2],
+                         address=(filename, None, None))])
+
+    def loadTestsFromGenerator(self, generator, module):
+        """Lazy-load tests from a generator function. The generator function
+        may yield either:
+
+        * a callable, or
+        * a function name resolvable within the same module
+        """
+        def generate(g=generator, m=module):
+            try:
+                for test in g():
+                    test_func, arg = self.parseGeneratedTest(test)
+                    if not callable(test_func):
+                        test_func = getattr(m, test_func)
+                    yield FunctionTestCase(test_func, arg=arg, descriptor=g)
+            except KeyboardInterrupt:
+                raise
+            except:
+                exc = sys.exc_info()
+                yield Failure(exc[0], exc[1], exc[2],
+                              address=test_address(generator))
+        return self.suiteClass(generate, context=generator, can_split=False)
+
+    def loadTestsFromGeneratorMethod(self, generator, cls):
+        """Lazy-load tests from a generator method.
+
+        This is more complicated than loading from a generator function,
+        since a generator method may yield:
+
+        * a function
+        * a bound or unbound method, or
+        * a method name
+        """
+        # convert the unbound generator method
+        # into a bound method so it can be called below
+        if hasattr(generator, 'im_class'):
+            cls = generator.im_class
+        inst = cls()
+        method = generator.__name__
+        generator = getattr(inst, method)
+
+        def generate(g=generator, c=cls):
+            try:
+                for test in g():
+                    test_func, arg = self.parseGeneratedTest(test)
+                    if not callable(test_func):
+                        test_func = unbound_method(c, getattr(c, test_func))
+                    if ismethod(test_func):
+                        yield MethodTestCase(test_func, arg=arg, descriptor=g)
+                    elif callable(test_func):
+                        # In this case we're forcing the 'MethodTestCase'
+                        # to run the inline function as its test call,
+                        # but using the generator method as the 'method of
+                        # record' (so no need to pass it as the descriptor)
+                        yield MethodTestCase(g, test=test_func, arg=arg)
+                    else:
+                        yield Failure(
+                            TypeError,
+                            "%s is not a callable or method" % test_func)
+            except KeyboardInterrupt:
+                raise
+            except:
+                exc = sys.exc_info()
+                yield Failure(exc[0], exc[1], exc[2],
+                              address=test_address(generator))
+        return self.suiteClass(generate, context=generator, can_split=False)
+
+    def loadTestsFromModule(self, module, path=None, discovered=False):
+        """Load all tests from module and return a suite containing
+        them. If the module has been discovered and is not test-like,
+        the suite will be empty by default, though plugins may add
+        their own tests.
+        """
+        log.debug("Load from module %s", module)
+        tests = []
+        test_classes = []
+        test_funcs = []
+        # For *discovered* modules, we only load tests when the module looks
+        # testlike. For modules we've been directed to load, we always
+        # look for tests. (discovered is set to True by loadTestsFromDir)
+        if not discovered or self.selector.wantModule(module):
+            for item in dir(module):
+                test = getattr(module, item, None)
+                # print "Check %s (%s) in %s" % (item, test, module.__name__)
+                if isclass(test):
+                    if self.selector.wantClass(test):
+                        test_classes.append(test)
+                elif isfunction(test) and self.selector.wantFunction(test):
+                    test_funcs.append(test)
+            sort_list(test_classes, lambda x: x.__name__)
+            sort_list(test_funcs, func_lineno)
+            tests = map(lambda t: self.makeTest(t, parent=module),
+                        test_classes + test_funcs)
+
+        # Now, descend into packages
+        # FIXME can or should this be lazy?
+        # is this syntax 2.2 compatible?
+        module_paths = getattr(module, '__path__', [])
+
+        if path:
+            path = os.path.normcase(os.path.realpath(path))
+
+        for module_path in module_paths:
+            log.debug("Load tests from module path %s?", module_path)
+            log.debug("path: %s os.path.realpath(%s): %s",
+                      path, os.path.normcase(module_path),
+                      os.path.realpath(os.path.normcase(module_path)))
+            if (self.config.traverseNamespace or not path) or \
+                    os.path.realpath(
+                        os.path.normcase(module_path)).startswith(path):
+                # Egg files can be on sys.path, so make sure the path is a
+                # directory before trying to load from it.
+                if os.path.isdir(module_path):
+                    tests.extend(self.loadTestsFromDir(module_path))
+
+        for test in self.config.plugins.loadTestsFromModule(module, path):
+            tests.append(test)
+
+        return self.suiteClass(ContextList(tests, context=module))
+
+    def loadTestsFromName(self, name, module=None, discovered=False):
+        """Load tests from the entity with the given name.
+
+        The name may indicate a file, directory, module, or any object
+        within a module. See `nose.util.split_test_name` for details on
+        test name parsing.
+        """
+        # FIXME refactor this method into little bites?
+        log.debug("load from %s (%s)", name, module)
+
+        suite = self.suiteClass
+
+        # give plugins first crack
+        plug_tests = self.config.plugins.loadTestsFromName(name, module)
+        if plug_tests:
+            return suite(plug_tests)
+
+        addr = TestAddress(name, workingDir=self.workingDir)
+        if module:
+            # Two cases:
+            #  name is class.foo
+            #    The addr will be incorrect, since it thinks class.foo is
+            #    a dotted module name. It's actually a dotted attribute
+            #    name. In this case we want to use the full submitted
+            #    name as the name to load from the module.
+            #  name is module:class.foo
+            #    The addr will be correct. The part we want is the part after
+            #    the :, which is in addr.call.
+            if addr.call:
+                name = addr.call
+            parent, obj = self.resolve(name, module)
+            if (isclass(parent)
+                and getattr(parent, '__module__', None) != module.__name__
+                and not isinstance(obj, Failure)):
+                parent = transplant_class(parent, module.__name__)
+                obj = getattr(parent, obj.__name__)
+            log.debug("parent %s obj %s module %s", parent, obj, module)
+            if isinstance(obj, Failure):
+                return suite([obj])
+            else:
+                return suite(ContextList([self.makeTest(obj, parent)],
+                                         context=parent))
+        else:
+            if addr.module:
+                try:
+                    if addr.filename is None:
+                        module = resolve_name(addr.module)
+                    else:
+                        self.config.plugins.beforeImport(
+                            addr.filename, addr.module)
+                        # FIXME: to support module.name names,
+                        # do what resolve-name does and keep trying to
+                        # import, popping tail of module into addr.call,
+                        # until we either get an import or run out of
+                        # module parts
+                        try:
+                            module = self.importer.importFromPath(
+                                addr.filename, addr.module)
+                        finally:
+                            self.config.plugins.afterImport(
+                                addr.filename, addr.module)
+                except (KeyboardInterrupt, SystemExit):
+                    raise
+                except:
+                    exc = sys.exc_info()
+                    return suite([Failure(exc[0], exc[1], exc[2],
+                                          address=addr.totuple())])
+                if addr.call:
+                    return self.loadTestsFromName(addr.call, module)
+                else:
+                    return self.loadTestsFromModule(
+                        module, addr.filename,
+                        discovered=discovered)
+            elif addr.filename:
+                path = addr.filename
+                if addr.call:
+                    package = getpackage(path)
+                    if package is None:
+                        return suite([
+                            Failure(ValueError,
+                                    "Can't find callable %s in file %s: "
+                                    "file is not a python module" %
+                                    (addr.call, path),
+                                    address=addr.totuple())])
+                    return self.loadTestsFromName(addr.call, module=package)
+                else:
+                    if op_isdir(path):
+                        # In this case we *can* be lazy since we know
+                        # that each module in the dir will be fully
+                        # loaded before its tests are executed; we
+                        # also know that we're not going to be asked
+                        # to load from . and ./some_module.py *as part
+                        # of this named test load*
+                        return LazySuite(
+                            lambda: self.loadTestsFromDir(path))
+                    elif op_isfile(path):
+                        return self.loadTestsFromFile(path)
+                    else:
+                        return suite([
+                                Failure(OSError, "No such file %s" % path,
+                                        address=addr.totuple())])
+            else:
+                # just a function? what to do? I think it can only be
+                # handled when module is not None
+                return suite([
+                    Failure(ValueError, "Unresolvable test name %s" % name,
+                            address=addr.totuple())])
+
+    def loadTestsFromNames(self, names, module=None):
+        """Load tests from all names, returning a suite containing all
+        tests.
+        """
+        plug_res = self.config.plugins.loadTestsFromNames(names, module)
+        if plug_res:
+            suite, names = plug_res
+            if suite:
+                return self.suiteClass([
+                    self.suiteClass(suite),
+                    unittest.TestLoader.loadTestsFromNames(self, names, module)
+                    ])
+        return unittest.TestLoader.loadTestsFromNames(self, names, module)
+
+    def loadTestsFromTestCase(self, testCaseClass):
+        """Load tests from a unittest.TestCase subclass.
+        """
+        cases = []
+        plugins = self.config.plugins
+        for case in plugins.loadTestsFromTestCase(testCaseClass):
+            cases.append(case)
+        # For efficiency in the most common case, just call and return from
+        # super. This avoids having to extract cases and rebuild a context
+        # suite when there are no plugin-contributed cases.
+        if not cases:
+            return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
+        cases.extend(
+            [case for case in
+             super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
+        return self.suiteClass(cases)
+
+    def loadTestsFromTestClass(self, cls):
+        """Load tests from a test class that is *not* a unittest.TestCase
+        subclass.
+
+        In this case, we can't depend on the class's `__init__` taking method
+        name arguments, so we have to compose a MethodTestCase for each
+        method in the class that looks testlike.
+        """
+        def wanted(attr, cls=cls, sel=self.selector):
+            item = getattr(cls, attr, None)
+            if isfunction(item):
+                item = unbound_method(cls, item)
+            elif not ismethod(item):
+                return False
+            return sel.wantMethod(item)
+        cases = [self.makeTest(getattr(cls, case), cls)
+                 for case in filter(wanted, dir(cls))]
+        for test in self.config.plugins.loadTestsFromTestClass(cls):
+            cases.append(test)
+        return self.suiteClass(ContextList(cases, context=cls))
+
+    def makeTest(self, obj, parent=None):
+        try:
+            return self._makeTest(obj, parent)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            exc = sys.exc_info()
+            try:
+                addr = test_address(obj)
+            except KeyboardInterrupt:
+                raise
+            except:
+                addr = None
+            return Failure(exc[0], exc[1], exc[2], address=addr)
+
+    def _makeTest(self, obj, parent=None):
+        """Given a test object and its parent, return a test case
+        or test suite.
+        """
+        plug_tests = []
+        try:
+            addr = test_address(obj)
+        except KeyboardInterrupt:
+            raise
+        except:
+            addr = None
+        for test in self.config.plugins.makeTest(obj, parent):
+            plug_tests.append(test)
+        # TODO: is this try/except needed?
+        try:
+            if plug_tests:
+                return self.suiteClass(plug_tests)
+        except (KeyboardInterrupt, SystemExit):
+            raise
+        except:
+            exc = sys.exc_info()
+            return Failure(exc[0], exc[1], exc[2], address=addr)
+
+        if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
+	    # This is a Python 3.x 'unbound method'.  Wrap it with its
+	    # associated class..
+            obj = unbound_method(parent, obj)
+
+        if isinstance(obj, unittest.TestCase):
+            return obj
+        elif isclass(obj):
+            if parent and obj.__module__ != parent.__name__:
+                obj = transplant_class(obj, parent.__name__)
+            if issubclass(obj, unittest.TestCase):
+                return self.loadTestsFromTestCase(obj)
+            else:
+                return self.loadTestsFromTestClass(obj)
+        elif ismethod(obj):
+            if parent is None:
+                parent = obj.__class__
+            if issubclass(parent, unittest.TestCase):
+                return parent(obj.__name__)
+            else:
+                if isgenerator(obj):
+                    return self.loadTestsFromGeneratorMethod(obj, parent)
+                else:
+                    return MethodTestCase(obj)
+        elif isfunction(obj):
+            if parent and obj.__module__ != parent.__name__:
+                obj = transplant_func(obj, parent.__name__)
+            if isgenerator(obj):
+                return self.loadTestsFromGenerator(obj, parent)
+            else:
+                return FunctionTestCase(obj)
+        else:
+            return Failure(TypeError,
+                           "Can't make a test from %s" % obj,
+                           address=addr)
+
+    def resolve(self, name, module):
+        """Resolve name within module
+        """
+        obj = module
+        parts = name.split('.')
+        for part in parts:
+            parent, obj = obj, getattr(obj, part, None)
+        if obj is None:
+            # no such test
+            obj = Failure(ValueError, "No such test %s" % name)
+        return parent, obj
+
+    def parseGeneratedTest(self, test):
+        """Given the yield value of a test generator, return a func and args.
+
+        This is used in the two loadTestsFromGenerator* methods.
+
+        """
+        if not isinstance(test, tuple):         # yield test
+            test_func, arg = (test, tuple())
+        elif len(test) == 1:                    # yield (test,)
+            test_func, arg = (test[0], tuple())
+        else:                                   # yield test, foo, bar, ...
+            assert len(test) > 1 # sanity check
+            test_func, arg = (test[0], test[1:])
+        return test_func, arg
+
+defaultTestLoader = TestLoader
+
diff --git a/lib/spack/external/nose/plugins/__init__.py b/lib/spack/external/nose/plugins/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..08ee8f3230b2228be5df98c0b07c79fd7209b3b7
--- /dev/null
+++ b/lib/spack/external/nose/plugins/__init__.py
@@ -0,0 +1,190 @@
+"""
+Writing Plugins
+---------------
+
+nose supports plugins for test collection, selection, observation and
+reporting. There are two basic rules for plugins:
+
+* Plugin classes should subclass :class:`nose.plugins.Plugin`.
+
+* Plugins may implement any of the methods described in the class
+  :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
+  this class is for documentary purposes only; plugins may not subclass
+  IPluginInterface.
+
+Hello World
+===========
+
+Here's a basic plugin.  It doesn't do much so read on for more ideas or dive
+into the :doc:`IPluginInterface <interface>` to see all available hooks.
+
+.. code-block:: python
+
+    import logging
+    import os
+
+    from nose.plugins import Plugin
+
+    log = logging.getLogger('nose.plugins.helloworld')
+
+    class HelloWorld(Plugin):
+        name = 'helloworld'
+
+        def options(self, parser, env=os.environ):
+            super(HelloWorld, self).options(parser, env=env)
+
+        def configure(self, options, conf):
+            super(HelloWorld, self).configure(options, conf)
+            if not self.enabled:
+                return
+
+        def finalize(self, result):
+            log.info('Hello pluginized world!')
+
+Registering
+===========
+
+.. Note::
+  Important note: the following applies only to the default
+  plugin manager. Other plugin managers may use different means to
+  locate and load plugins.
+
+For nose to find a plugin, it must be part of a package that uses
+setuptools_, and the plugin must be included in the entry points defined
+in the setup.py for the package:
+
+.. code-block:: python
+
+    setup(name='Some plugin',
+        # ...
+        entry_points = {
+            'nose.plugins.0.10': [
+                'someplugin = someplugin:SomePlugin'
+                ]
+            },
+        # ...
+        )
+
+Once the package is installed with install or develop, nose will be able
+to load the plugin.
+
+.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
+Registering a plugin without setuptools
+=======================================
+
+It is currently possible to register a plugin programmatically by
+creating a custom nose runner like this :
+
+.. code-block:: python
+
+    import nose
+    from yourplugin import YourPlugin
+
+    if __name__ == '__main__':
+        nose.main(addplugins=[YourPlugin()])
+
+Defining options
+================
+
+All plugins must implement the methods ``options(self, parser, env)``
+and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
+that want the standard options should call the superclass methods.
+
+nose uses optparse.OptionParser from the standard library to parse
+arguments. A plugin's ``options()`` method receives a parser
+instance. It's good form for a plugin to use that instance only to add
+additional arguments that take only long arguments (--like-this). Most
+of nose's built-in arguments get their default value from an environment
+variable.
+
+A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
+object, as well as the current config object. Plugins should configure their
+behavior based on the user-selected settings, and may raise exceptions
+if the configured behavior is nonsensical.
+
+Logging
+=======
+
+nose uses the logging classes from the standard library. To enable users
+to view debug messages easily, plugins should use ``logging.getLogger()`` to
+acquire a logger in the ``nose.plugins`` namespace.
+
+Recipes
+=======
+
+* Writing a plugin that monitors or controls test result output
+
+  Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
+  results. If you also want to monitor output, implement
+  ``setOutputStream`` and keep a reference to the output stream. If you
+  want to prevent the builtin ``TextTestResult`` output, implement
+  ``setOutputSteam`` and *return a dummy stream*. The default output will go
+  to the dummy stream, while you send your desired output to the real stream.
+
+  Example: `examples/html_plugin/htmlplug.py`_
+
+* Writing a plugin that handles exceptions
+
+  Subclass :doc:`ErrorClassPlugin <errorclasses>`.
+
+  Examples: :doc:`nose.plugins.deprecated <deprecated>`,
+  :doc:`nose.plugins.skip <skip>`
+
+* Writing a plugin that adds detail to error reports
+
+  Implement ``formatError`` and/or ``formatFailure``. The error tuple
+  you return (error class, error message, traceback) will replace the
+  original error tuple.
+
+  Examples: :doc:`nose.plugins.capture <capture>`,
+  :doc:`nose.plugins.failuredetail <failuredetail>`
+
+* Writing a plugin that loads tests from files other than python modules
+
+  Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
+  return True for files that you want to examine for tests. In
+  ``loadTestsFromFile``, for those files, return an iterable
+  containing TestCases (or yield them as you find them;
+  ``loadTestsFromFile`` may also be a generator).
+
+  Example: :doc:`nose.plugins.doctests <doctests>`
+
+* Writing a plugin that prints a report
+
+  Implement ``begin`` if you need to perform setup before testing
+  begins. Implement ``report`` and output your report to the provided stream.
+
+  Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
+
+* Writing a plugin that selects or rejects tests
+
+  Implement any or all ``want*``  methods. Return False to reject the test
+  candidate, True to accept it -- which  means that the test candidate
+  will pass through the rest of the system, so you must be prepared to
+  load tests from it if tests can't be loaded by the core loader or
+  another plugin -- and None if you don't care.
+
+  Examples: :doc:`nose.plugins.attrib <attrib>`,
+  :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
+
+
+More Examples
+=============
+
+See any builtin plugin or example plugin in the examples_ directory in
+the nose source distribution. There is a list of third-party plugins
+`on jottit`_.
+
+.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
+.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
+.. _on jottit: http://nose-plugins.jottit.com/
+
+"""
+from nose.plugins.base import Plugin
+from nose.plugins.manager import *
+from nose.plugins.plugintest import PluginTester
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/allmodules.py b/lib/spack/external/nose/plugins/allmodules.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ccd7773a7266a43e3b0a32010981d53940618d5
--- /dev/null
+++ b/lib/spack/external/nose/plugins/allmodules.py
@@ -0,0 +1,45 @@
+"""Use the AllModules plugin by passing ``--all-modules`` or setting the
+NOSE_ALL_MODULES environment variable to enable collection and execution of
+tests in all python modules. Normal nose behavior is to look for tests only in
+modules that match testMatch.
+
+More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
+
+.. warning ::
+
+   This plugin can have surprising interactions with plugins that load tests
+   from what nose normally considers non-test modules, such as
+   the :doc:`doctest plugin <doctests>`. This is because any given
+   object in a module can't be loaded both by a plugin and the normal nose
+   :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
+   or classes in non-test modules that look like tests but aren't, you will
+   likely see errors as nose attempts to run them as tests.
+
+"""
+
+import os
+from nose.plugins.base import Plugin
+
+class AllModules(Plugin):
+    """Collect tests from all python modules.
+    """
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        env_opt = 'NOSE_ALL_MODULES'
+        parser.add_option('--all-modules',
+                          action="store_true",
+                          dest=self.enableOpt,
+                          default=env.get(env_opt),
+                          help="Enable plugin %s: %s [%s]" %
+                          (self.__class__.__name__, self.help(), env_opt))
+
+    def wantFile(self, file):
+        """Override to return True for all files ending with .py"""
+        # always want .py files
+        if file.endswith('.py'):
+            return True
+
+    def wantModule(self, module):
+        """Override return True for all modules"""
+        return True
diff --git a/lib/spack/external/nose/plugins/attrib.py b/lib/spack/external/nose/plugins/attrib.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d4422a23aec34951b74efcd1c0e4cc17c35402a
--- /dev/null
+++ b/lib/spack/external/nose/plugins/attrib.py
@@ -0,0 +1,286 @@
+"""Attribute selector plugin.
+
+Oftentimes when testing you will want to select tests based on
+criteria rather then simply by filename. For example, you might want
+to run all tests except for the slow ones. You can do this with the
+Attribute selector plugin by setting attributes on your test methods.
+Here is an example:
+
+.. code-block:: python
+
+    def test_big_download():
+        import urllib
+        # commence slowness...
+
+    test_big_download.slow = 1
+
+Once you've assigned an attribute ``slow = 1`` you can exclude that
+test and all other tests having the slow attribute by running ::
+
+    $ nosetests -a '!slow'
+
+There is also a decorator available for you that will set attributes.
+Here's how to set ``slow=1`` like above with the decorator:
+
+.. code-block:: python
+
+    from nose.plugins.attrib import attr
+    @attr('slow')
+    def test_big_download():
+        import urllib
+        # commence slowness...
+
+And here's how to set an attribute with a specific value:
+
+.. code-block:: python
+
+    from nose.plugins.attrib import attr
+    @attr(speed='slow')
+    def test_big_download():
+        import urllib
+        # commence slowness...
+
+This test could be run with ::
+
+    $ nosetests -a speed=slow
+
+In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
+on all its test methods at once.  For example:
+
+.. code-block:: python
+
+    from nose.plugins.attrib import attr
+    @attr(speed='slow')
+    class MyTestCase:
+        def test_long_integration(self):
+            pass
+        def test_end_to_end_something(self):
+            pass
+
+Below is a reference to the different syntaxes available.
+
+Simple syntax
+-------------
+
+Examples of using the ``-a`` and ``--attr`` options:
+
+* ``nosetests -a status=stable``
+   Only runs tests with attribute "status" having value "stable"
+
+* ``nosetests -a priority=2,status=stable``
+   Runs tests having both attributes and values
+
+* ``nosetests -a priority=2 -a slow``
+   Runs tests that match either attribute
+
+* ``nosetests -a tags=http``
+   If a test's ``tags`` attribute was a list and it contained the value
+   ``http`` then it would be run
+
+* ``nosetests -a slow``
+   Runs tests with the attribute ``slow`` if its value does not equal False
+   (False, [], "", etc...)
+
+* ``nosetests -a '!slow'``
+   Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
+   attribute that is equal to False
+   **NOTE**:
+   if your shell (like bash) interprets '!' as a special character make sure to
+   put single quotes around it.
+
+Expression Evaluation
+---------------------
+
+Examples using the ``-A`` and ``--eval-attr`` options:
+
+* ``nosetests -A "not slow"``
+  Evaluates the Python expression "not slow" and runs the test if True
+
+* ``nosetests -A "(priority > 5) and not slow"``
+  Evaluates a complex Python expression and runs the test if True
+
+"""
+import inspect
+import logging
+import os
+import sys
+from inspect import isfunction
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins.attrib')
+compat_24 = sys.version_info >= (2, 4)
+
+def attr(*args, **kwargs):
+    """Decorator that adds attributes to classes or functions
+    for use with the Attribute (-a) plugin.
+    """
+    def wrap_ob(ob):
+        for name in args:
+            setattr(ob, name, True)
+        for name, value in kwargs.iteritems():
+            setattr(ob, name, value)
+        return ob
+    return wrap_ob
+
+def get_method_attr(method, cls, attr_name, default = False):
+    """Look up an attribute on a method/ function. 
+    If the attribute isn't found there, looking it up in the
+    method's class, if any.
+    """
+    Missing = object()
+    value = getattr(method, attr_name, Missing)
+    if value is Missing and cls is not None:
+        value = getattr(cls, attr_name, Missing)
+    if value is Missing:
+        return default
+    return value
+
+
+class ContextHelper:
+    """Object that can act as context dictionary for eval and looks up
+    names as attributes on a method/ function and its class. 
+    """
+    def __init__(self, method, cls):
+        self.method = method
+        self.cls = cls
+
+    def __getitem__(self, name):
+        return get_method_attr(self.method, self.cls, name)
+
+
+class AttributeSelector(Plugin):
+    """Selects test cases to be run based on their attributes.
+    """
+
+    def __init__(self):
+        Plugin.__init__(self)
+        self.attribs = []
+
+    def options(self, parser, env):
+        """Register command line options"""
+        parser.add_option("-a", "--attr",
+                          dest="attr", action="append",
+                          default=env.get('NOSE_ATTR'),
+                          metavar="ATTR",
+                          help="Run only tests that have attributes "
+                          "specified by ATTR [NOSE_ATTR]")
+        # disable in < 2.4: eval can't take needed args
+        if compat_24:
+            parser.add_option("-A", "--eval-attr",
+                              dest="eval_attr", metavar="EXPR", action="append",
+                              default=env.get('NOSE_EVAL_ATTR'),
+                              help="Run only tests for whose attributes "
+                              "the Python expression EXPR evaluates "
+                              "to True [NOSE_EVAL_ATTR]")
+
+    def configure(self, options, config):
+        """Configure the plugin and system, based on selected options.
+
+        attr and eval_attr may each be lists.
+
+        self.attribs will be a list of lists of tuples. In that list, each
+        list is a group of attributes, all of which must match for the rule to
+        match.
+        """
+        self.attribs = []
+
+        # handle python eval-expression parameter
+        if compat_24 and options.eval_attr:
+            eval_attr = tolist(options.eval_attr)
+            for attr in eval_attr:
+                # "<python expression>"
+                # -> eval(expr) in attribute context must be True
+                def eval_in_context(expr, obj, cls):
+                    return eval(expr, None, ContextHelper(obj, cls))
+                self.attribs.append([(attr, eval_in_context)])
+
+        # attribute requirements are a comma separated list of
+        # 'key=value' pairs
+        if options.attr:
+            std_attr = tolist(options.attr)
+            for attr in std_attr:
+                # all attributes within an attribute group must match
+                attr_group = []
+                for attrib in attr.strip().split(","):
+                    # don't die on trailing comma
+                    if not attrib:
+                        continue
+                    items = attrib.split("=", 1)
+                    if len(items) > 1:
+                        # "name=value"
+                        # -> 'str(obj.name) == value' must be True
+                        key, value = items
+                    else:
+                        key = items[0]
+                        if key[0] == "!":
+                            # "!name"
+                            # 'bool(obj.name)' must be False
+                            key = key[1:]
+                            value = False
+                        else:
+                            # "name"
+                            # -> 'bool(obj.name)' must be True
+                            value = True
+                    attr_group.append((key, value))
+                self.attribs.append(attr_group)
+        if self.attribs:
+            self.enabled = True
+
+    def validateAttrib(self, method, cls = None):
+        """Verify whether a method has the required attributes
+        The method is considered a match if it matches all attributes
+        for any attribute group.
+        ."""
+        # TODO: is there a need for case-sensitive value comparison?
+        any = False
+        for group in self.attribs:
+            match = True
+            for key, value in group:
+                attr = get_method_attr(method, cls, key)
+                if callable(value):
+                    if not value(key, method, cls):
+                        match = False
+                        break
+                elif value is True:
+                    # value must exist and be True
+                    if not bool(attr):
+                        match = False
+                        break
+                elif value is False:
+                    # value must not exist or be False
+                    if bool(attr):
+                        match = False
+                        break
+                elif type(attr) in (list, tuple):
+                    # value must be found in the list attribute
+                    if not str(value).lower() in [str(x).lower()
+                                                  for x in attr]:
+                        match = False
+                        break
+                else:
+                    # value must match, convert to string and compare
+                    if (value != attr
+                        and str(value).lower() != str(attr).lower()):
+                        match = False
+                        break
+            any = any or match
+        if any:
+            # not True because we don't want to FORCE the selection of the
+            # item, only say that it is acceptable
+            return None
+        return False
+
+    def wantFunction(self, function):
+        """Accept the function if its attributes match.
+        """
+        return self.validateAttrib(function)
+
+    def wantMethod(self, method):
+        """Accept the method if its attributes match.
+        """
+        try:
+            cls = method.im_class
+        except AttributeError:
+            return False
+        return self.validateAttrib(method, cls)
diff --git a/lib/spack/external/nose/plugins/base.py b/lib/spack/external/nose/plugins/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..f09beb696f01d8b0356f0e689ec0d199de5d08a5
--- /dev/null
+++ b/lib/spack/external/nose/plugins/base.py
@@ -0,0 +1,725 @@
+import os
+import textwrap
+from optparse import OptionConflictError
+from warnings import warn
+from nose.util import tolist
+
+class Plugin(object):
+    """Base class for nose plugins. It's recommended but not *necessary* to
+    subclass this class to create a plugin, but all plugins *must* implement
+    `options(self, parser, env)` and `configure(self, options, conf)`, and
+    must have the attributes `enabled`, `name` and `score`.  The `name`
+    attribute may contain hyphens ('-').
+
+    Plugins should not be enabled by default.
+
+    Subclassing Plugin (and calling the superclass methods in
+    __init__, configure, and options, if you override them) will give
+    your plugin some friendly default behavior:
+
+    * A --with-$name option will be added to the command line interface
+      to enable the plugin, and a corresponding environment variable
+      will be used as the default value. The plugin class's docstring
+      will be used as the help for this option.
+    * The plugin will not be enabled unless this option is selected by
+      the user.
+    """
+    can_configure = False
+    enabled = False
+    enableOpt = None
+    name = None
+    score = 100
+
+    def __init__(self):
+        if self.name is None:
+            self.name = self.__class__.__name__.lower()
+        if self.enableOpt is None:
+            self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
+
+    def addOptions(self, parser, env=None):
+        """Add command-line options for this plugin.
+
+        The base plugin class adds --with-$name by default, used to enable the
+        plugin.
+
+        .. warning :: Don't implement addOptions unless you want to override
+                      all default option handling behavior, including
+                      warnings for conflicting options. Implement
+                      :meth:`options
+                      <nose.plugins.base.IPluginInterface.options>`
+                      instead.
+        """
+        self.add_options(parser, env)
+
+    def add_options(self, parser, env=None):
+        """Non-camel-case version of func name for backwards compatibility.
+
+        .. warning ::
+
+           DEPRECATED: Do not use this method,
+           use :meth:`options <nose.plugins.base.IPluginInterface.options>`
+           instead.
+
+        """
+        # FIXME raise deprecation warning if wasn't called by wrapper
+        if env is None:
+            env = os.environ
+        try:
+            self.options(parser, env)
+            self.can_configure = True
+        except OptionConflictError, e:
+            warn("Plugin %s has conflicting option string: %s and will "
+                 "be disabled" % (self, e), RuntimeWarning)
+            self.enabled = False
+            self.can_configure = False
+
+    def options(self, parser, env):
+        """Register commandline options.
+
+        Implement this method for normal options behavior with protection from
+        OptionConflictErrors. If you override this method and want the default
+        --with-$name option to be registered, be sure to call super().
+        """
+        env_opt = 'NOSE_WITH_%s' % self.name.upper()
+        env_opt = env_opt.replace('-', '_')
+        parser.add_option("--with-%s" % self.name,
+                          action="store_true",
+                          dest=self.enableOpt,
+                          default=env.get(env_opt),
+                          help="Enable plugin %s: %s [%s]" %
+                          (self.__class__.__name__, self.help(), env_opt))
+
+    def configure(self, options, conf):
+        """Configure the plugin and system, based on selected options.
+
+        The base plugin class sets the plugin to enabled if the enable option
+        for the plugin (self.enableOpt) is true.
+        """
+        if not self.can_configure:
+            return
+        self.conf = conf
+        if hasattr(options, self.enableOpt):
+            self.enabled = getattr(options, self.enableOpt)
+
+    def help(self):
+        """Return help for this plugin. This will be output as the help
+        section of the --with-$name option that enables the plugin.
+        """
+        if self.__class__.__doc__:
+            # doc sections are often indented; compress the spaces
+            return textwrap.dedent(self.__class__.__doc__)
+        return "(no help available)"
+
+    # Compatiblity shim
+    def tolist(self, val):
+        warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
+             DeprecationWarning)
+        return tolist(val)
+
+
+class IPluginInterface(object):
+    """
+    IPluginInterface describes the plugin API. Do not subclass or use this
+    class directly.
+    """
+    def __new__(cls, *arg, **kw):
+        raise TypeError("IPluginInterface class is for documentation only")
+
+    def addOptions(self, parser, env):
+        """Called to allow plugin to register command-line options with the
+        parser. DO NOT return a value from this method unless you want to stop
+        all other plugins from setting their options.
+
+        .. warning ::
+
+           DEPRECATED -- implement
+           :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
+        """
+        pass
+    add_options = addOptions
+    add_options.deprecated = True
+
+    def addDeprecated(self, test):
+        """Called when a deprecated test is seen. DO NOT return a value
+        unless you want to stop other plugins from seeing the deprecated
+        test.
+
+        .. warning :: DEPRECATED -- check error class in addError instead
+        """
+        pass
+    addDeprecated.deprecated = True
+
+    def addError(self, test, err):
+        """Called when a test raises an uncaught exception. DO NOT return a
+        value unless you want to stop other plugins from seeing that the
+        test has raised an error.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`            
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    addError.changed = True
+
+    def addFailure(self, test, err):
+        """Called when a test fails. DO NOT return a value unless you
+        want to stop other plugins from seeing that the test has failed.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: 3-tuple
+        :type err: sys.exc_info() tuple
+        """
+        pass
+    addFailure.changed = True
+
+    def addSkip(self, test):
+        """Called when a test is skipped. DO NOT return a value unless
+        you want to stop other plugins from seeing the skipped test.
+
+        .. warning:: DEPRECATED -- check error class in addError instead
+        """
+        pass
+    addSkip.deprecated = True
+
+    def addSuccess(self, test):
+        """Called when a test passes. DO NOT return a value unless you
+        want to stop other plugins from seeing the passing test.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    addSuccess.changed = True
+
+    def afterContext(self):
+        """Called after a context (generally a module) has been
+        lazy-loaded, imported, setup, had its tests loaded and
+        executed, and torn down.
+        """
+        pass
+    afterContext._new = True
+
+    def afterDirectory(self, path):
+        """Called after all tests have been loaded from directory at path
+        and run.
+
+        :param path: the directory that has finished processing
+        :type path: string
+        """
+        pass
+    afterDirectory._new = True
+
+    def afterImport(self, filename, module):
+        """Called after module is imported from filename. afterImport
+        is called even if the import failed.
+
+        :param filename: The file that was loaded
+        :type filename: string
+        :param module: The name of the module
+        :type module: string
+        """
+        pass
+    afterImport._new = True
+
+    def afterTest(self, test):
+        """Called after the test has been run and the result recorded
+        (after stopTest).
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    afterTest._new = True
+
+    def beforeContext(self):
+        """Called before a context (generally a module) is
+        examined. Because the context is not yet loaded, plugins don't
+        get to know what the context is; so any context operations
+        should use a stack that is pushed in `beforeContext` and popped
+        in `afterContext` to ensure they operate symmetrically.
+
+        `beforeContext` and `afterContext` are mainly useful for tracking
+        and restoring global state around possible changes from within a
+        context, whatever the context may be. If you need to operate on
+        contexts themselves, see `startContext` and `stopContext`, which
+        are passed the context in question, but are called after
+        it has been loaded (imported in the module case).
+        """
+        pass
+    beforeContext._new = True
+
+    def beforeDirectory(self, path):
+        """Called before tests are loaded from directory at path.
+
+        :param path: the directory that is about to be processed
+        """
+        pass
+    beforeDirectory._new = True
+
+    def beforeImport(self, filename, module):
+        """Called before module is imported from filename.
+
+        :param filename: The file that will be loaded
+        :param module: The name of the module found in file
+        :type module: string
+        """
+    beforeImport._new = True
+
+    def beforeTest(self, test):
+        """Called before the test is run (before startTest).
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    beforeTest._new = True
+ 
+    def begin(self):
+        """Called before any tests are collected or run. Use this to
+        perform any setup needed before testing begins.
+        """
+        pass
+
+    def configure(self, options, conf):
+        """Called after the command line has been parsed, with the
+        parsed options and the config container. Here, implement any
+        config storage or changes to state or operation that are set
+        by command line options.
+
+        DO NOT return a value from this method unless you want to
+        stop all other plugins from being configured.
+        """
+        pass
+
+    def finalize(self, result):
+        """Called after all report output, including output from all
+        plugins, has been sent to the stream. Use this to print final
+        test results or perform final cleanup. Return None to allow
+        other plugins to continue printing, or any other value to stop
+        them.
+
+        :param result: test result object
+        
+        .. Note:: When tests are run under a test runner other than
+           :class:`nose.core.TextTestRunner`, such as
+           via ``python setup.py test``, this method may be called
+           **before** the default report output is sent.
+        """
+        pass
+
+    def describeTest(self, test):
+        """Return a test description.
+
+        Called by :meth:`nose.case.Test.shortDescription`.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    describeTest._new = True
+
+    def formatError(self, test, err):
+        """Called in result.addError, before plugin.addError. If you
+        want to replace or modify the error tuple, return a new error
+        tuple, otherwise return err, the original error tuple.
+        
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    formatError._new = True
+    formatError.chainable = True
+    # test arg is not chainable
+    formatError.static_args = (True, False)
+
+    def formatFailure(self, test, err):
+        """Called in result.addFailure, before plugin.addFailure. If you
+        want to replace or modify the error tuple, return a new error
+        tuple, otherwise return err, the original error tuple.
+        
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    formatFailure._new = True
+    formatFailure.chainable = True
+    # test arg is not chainable
+    formatFailure.static_args = (True, False)
+
+    def handleError(self, test, err):
+        """Called on addError. To handle the error yourself and prevent normal
+        error processing, return a true value.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    handleError._new = True
+
+    def handleFailure(self, test, err):
+        """Called on addFailure. To handle the failure yourself and
+        prevent normal failure processing, return a true value.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        :param err: sys.exc_info() tuple
+        :type err: 3-tuple
+        """
+        pass
+    handleFailure._new = True
+
+    def loadTestsFromDir(self, path):
+        """Return iterable of tests from a directory. May be a
+        generator.  Each item returned must be a runnable
+        unittest.TestCase (or subclass) instance or suite instance.
+        Return None if your plugin cannot collect any tests from
+        directory.
+
+        :param  path: The path to the directory.
+        """
+        pass
+    loadTestsFromDir.generative = True
+    loadTestsFromDir._new = True
+    
+    def loadTestsFromModule(self, module, path=None):
+        """Return iterable of tests in a module. May be a
+        generator. Each item returned must be a runnable
+        unittest.TestCase (or subclass) instance.
+        Return None if your plugin cannot
+        collect any tests from module.
+
+        :param module: The module object
+        :type module: python module
+        :param path: the path of the module to search, to distinguish from
+            namespace package modules
+
+            .. note::
+
+               NEW. The ``path`` parameter will only be passed by nose 0.11
+               or above.
+        """
+        pass
+    loadTestsFromModule.generative = True
+
+    def loadTestsFromName(self, name, module=None, importPath=None):
+        """Return tests in this file or module. Return None if you are not able
+        to load any tests, or an iterable if you are. May be a
+        generator.
+
+        :param name: The test name. May be a file or module name plus a test
+            callable. Use split_test_name to split into parts. Or it might
+            be some crazy name of your own devising, in which case, do
+            whatever you want.
+        :param module: Module from which the name is to be loaded
+        :param importPath: Path from which file (must be a python module) was
+            found
+
+            .. warning:: DEPRECATED: this argument will NOT be passed.
+        """
+        pass
+    loadTestsFromName.generative = True
+
+    def loadTestsFromNames(self, names, module=None):
+        """Return a tuple of (tests loaded, remaining names). Return
+        None if you are not able to load any tests. Multiple plugins
+        may implement loadTestsFromNames; the remaining name list from
+        each will be passed to the next as input.
+
+        :param names: List of test names.
+        :type names: iterable
+        :param module: Module from which the names are to be loaded
+        """
+        pass
+    loadTestsFromNames._new = True
+    loadTestsFromNames.chainable = True
+
+    def loadTestsFromFile(self, filename):
+        """Return tests in this file. Return None if you are not
+        interested in loading any tests, or an iterable if you are and
+        can load some. May be a generator. *If you are interested in
+        loading tests from the file and encounter no errors, but find
+        no tests, yield False or return [False].*
+
+        .. Note:: This method replaces loadTestsFromPath from the 0.9
+                  API.
+
+        :param filename: The full path to the file or directory.
+        """
+        pass
+    loadTestsFromFile.generative = True
+    loadTestsFromFile._new = True
+
+    def loadTestsFromPath(self, path):
+        """
+        .. warning:: DEPRECATED -- use loadTestsFromFile instead
+        """
+        pass
+    loadTestsFromPath.deprecated = True
+
+    def loadTestsFromTestCase(self, cls):
+        """Return tests in this test case class. Return None if you are
+        not able to load any tests, or an iterable if you are. May be a
+        generator.
+
+        :param cls: The test case class. Must be subclass of
+           :class:`unittest.TestCase`.
+        """
+        pass
+    loadTestsFromTestCase.generative = True
+
+    def loadTestsFromTestClass(self, cls):
+        """Return tests in this test class. Class will *not* be a
+        unittest.TestCase subclass. Return None if you are not able to
+        load any tests, an iterable if you are. May be a generator.
+
+        :param cls: The test case class. Must be **not** be subclass of
+           :class:`unittest.TestCase`.
+        """
+        pass
+    loadTestsFromTestClass._new = True
+    loadTestsFromTestClass.generative = True
+
+    def makeTest(self, obj, parent):
+        """Given an object and its parent, return or yield one or more
+        test cases. Each test must be a unittest.TestCase (or subclass)
+        instance. This is called before default test loading to allow
+        plugins to load an alternate test case or cases for an
+        object. May be a generator.
+
+        :param obj: The object to be made into a test
+        :param parent: The parent of obj (eg, for a method, the class)
+        """
+        pass
+    makeTest._new = True
+    makeTest.generative = True
+
+    def options(self, parser, env):
+        """Called to allow plugin to register command line
+        options with the parser.
+
+        DO NOT return a value from this method unless you want to stop
+        all other plugins from setting their options.
+
+        :param parser: options parser instance
+        :type parser: :class:`ConfigParser.ConfigParser`
+        :param env: environment, default is os.environ
+        """
+        pass
+    options._new = True
+
+    def prepareTest(self, test):
+        """Called before the test is run by the test runner. Please
+        note the article *the* in the previous sentence: prepareTest
+        is called *only once*, and is passed the test case or test
+        suite that the test runner will execute. It is *not* called
+        for each individual test case. If you return a non-None value,
+        that return value will be run as the test. Use this hook to
+        wrap or decorate the test with another function. If you need
+        to modify or wrap individual test cases, use `prepareTestCase`
+        instead.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+
+    def prepareTestCase(self, test):
+        """Prepare or wrap an individual test case. Called before
+        execution of the test. The test passed here is a
+        nose.case.Test instance; the case to be executed is in the
+        test attribute of the passed case. To modify the test to be
+        run, you should return a callable that takes one argument (the
+        test result object) -- it is recommended that you *do not*
+        side-effect the nose.case.Test instance you have been passed.
+
+        Keep in mind that when you replace the test callable you are
+        replacing the run() method of the test case -- including the
+        exception handling and result calls, etc.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    prepareTestCase._new = True
+    
+    def prepareTestLoader(self, loader):
+        """Called before tests are loaded. To replace the test loader,
+        return a test loader. To allow other plugins to process the
+        test loader, return None. Only one plugin may replace the test
+        loader. Only valid when using nose.TestProgram.
+
+        :param loader: :class:`nose.loader.TestLoader` 
+             (or other loader) instance
+        """
+        pass
+    prepareTestLoader._new = True
+
+    def prepareTestResult(self, result):
+        """Called before the first test is run. To use a different
+        test result handler for all tests than the given result,
+        return a test result handler. NOTE however that this handler
+        will only be seen by tests, that is, inside of the result
+        proxy system. The TestRunner and TestProgram -- whether nose's
+        or other -- will continue to see the original result
+        handler. For this reason, it is usually better to monkeypatch
+        the result (for instance, if you want to handle some
+        exceptions in a unique way). Only one plugin may replace the
+        result, but many may monkeypatch it. If you want to
+        monkeypatch and stop other plugins from doing so, monkeypatch
+        and return the patched result.
+
+        :param result: :class:`nose.result.TextTestResult` 
+             (or other result) instance
+        """
+        pass
+    prepareTestResult._new = True
+
+    def prepareTestRunner(self, runner):
+        """Called before tests are run. To replace the test runner,
+        return a test runner. To allow other plugins to process the
+        test runner, return None. Only valid when using nose.TestProgram.
+
+        :param runner: :class:`nose.core.TextTestRunner` 
+             (or other runner) instance
+        """
+        pass
+    prepareTestRunner._new = True
+        
+    def report(self, stream):
+        """Called after all error output has been printed. Print your
+        plugin's report to the provided stream. Return None to allow
+        other plugins to print reports, any other value to stop them.
+
+        :param stream: stream object; send your output here
+        :type stream: file-like object
+        """
+        pass
+
+    def setOutputStream(self, stream):
+        """Called before test output begins. To direct test output to a
+        new stream, return a stream object, which must implement a
+        `write(msg)` method. If you only want to note the stream, not
+        capture or redirect it, then return None.
+
+        :param stream: stream object; send your output here
+        :type stream: file-like object
+        """
+
+    def startContext(self, context):
+        """Called before context setup and the running of tests in the
+        context. Note that tests have already been *loaded* from the
+        context before this call.
+
+        :param context: the context about to be setup. May be a module or
+             class, or any other object that contains tests.
+        """
+        pass
+    startContext._new = True
+    
+    def startTest(self, test):
+        """Called before each test is run. DO NOT return a value unless
+        you want to stop other plugins from seeing the test start.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+
+    def stopContext(self, context):
+        """Called after the tests in a context have run and the
+        context has been torn down.
+
+        :param context: the context that has been torn down. May be a module or
+             class, or any other object that contains tests.
+        """
+        pass
+    stopContext._new = True
+    
+    def stopTest(self, test):
+        """Called after each test is run. DO NOT return a value unless
+        you want to stop other plugins from seeing that the test has stopped.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+
+    def testName(self, test):
+        """Return a short test name. Called by `nose.case.Test.__str__`.
+
+        :param test: the test case
+        :type test: :class:`nose.case.Test`
+        """
+        pass
+    testName._new = True
+
+    def wantClass(self, cls):
+        """Return true if you want the main test selector to collect
+        tests from this class, false if you don't, and None if you don't
+        care.
+
+        :param cls: The class being examined by the selector
+        """
+        pass
+    
+    def wantDirectory(self, dirname):
+        """Return true if you want test collection to descend into this
+        directory, false if you do not, and None if you don't care.
+
+        :param dirname: Full path to directory being examined by the selector
+        """
+        pass
+    
+    def wantFile(self, file):
+        """Return true if you want to collect tests from this file,
+        false if you do not and None if you don't care.
+
+        Change from 0.9: The optional package parameter is no longer passed.
+
+        :param file: Full path to file being examined by the selector
+        """
+        pass
+    
+    def wantFunction(self, function):
+        """Return true to collect this function as a test, false to
+        prevent it from being collected, and None if you don't care.
+
+        :param function: The function object being examined by the selector
+        """
+        pass
+    
+    def wantMethod(self, method):
+        """Return true to collect this method as a test, false to
+        prevent it from being collected, and None if you don't care.
+        
+        :param method: The method object being examined by the selector
+        :type method: unbound method
+        """    
+        pass
+    
+    def wantModule(self, module):
+        """Return true if you want to collection to descend into this
+        module, false to prevent the collector from descending into the
+        module, and None if you don't care.
+
+        :param module: The module object being examined by the selector
+        :type module: python module
+        """
+        pass
+    
+    def wantModuleTests(self, module):
+        """
+        .. warning:: DEPRECATED -- this method will not be called, it has
+                     been folded into wantModule.
+        """
+        pass
+    wantModuleTests.deprecated = True
+    
diff --git a/lib/spack/external/nose/plugins/builtin.py b/lib/spack/external/nose/plugins/builtin.py
new file mode 100644
index 0000000000000000000000000000000000000000..4fcc0018adc48d2ed60f0c4a29a9d65333a315c8
--- /dev/null
+++ b/lib/spack/external/nose/plugins/builtin.py
@@ -0,0 +1,34 @@
+"""
+Lists builtin plugins.
+"""
+plugins = []
+builtins = (
+    ('nose.plugins.attrib', 'AttributeSelector'),
+    ('nose.plugins.capture', 'Capture'),
+    ('nose.plugins.logcapture', 'LogCapture'),
+    ('nose.plugins.cover', 'Coverage'),
+    ('nose.plugins.debug', 'Pdb'),
+    ('nose.plugins.deprecated', 'Deprecated'),
+    ('nose.plugins.doctests', 'Doctest'),
+    ('nose.plugins.isolate', 'IsolationPlugin'),
+    ('nose.plugins.failuredetail', 'FailureDetail'),
+    ('nose.plugins.prof', 'Profile'),
+    ('nose.plugins.skip', 'Skip'),
+    ('nose.plugins.testid', 'TestId'),
+    ('nose.plugins.multiprocess', 'MultiProcess'),
+    ('nose.plugins.xunit', 'Xunit'),
+    ('nose.plugins.allmodules', 'AllModules'),
+    ('nose.plugins.collect', 'CollectOnly'),
+    )
+
+for module, cls in builtins:
+    try:
+        plugmod = __import__(module, globals(), locals(), [cls])
+    except KeyboardInterrupt:
+        raise
+    except:
+        continue
+    plug = getattr(plugmod, cls)
+    plugins.append(plug)
+    globals()[cls] = plug
+
diff --git a/lib/spack/external/nose/plugins/capture.py b/lib/spack/external/nose/plugins/capture.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa4e5dcaaf63091102f014f758bcf6b78df5dbe6
--- /dev/null
+++ b/lib/spack/external/nose/plugins/capture.py
@@ -0,0 +1,115 @@
+"""
+This plugin captures stdout during test execution. If the test fails
+or raises an error, the captured output will be appended to the error
+or failure output. It is enabled by default but can be disabled with
+the options ``-s`` or ``--nocapture``.
+
+:Options:
+  ``--nocapture``
+    Don't capture stdout (any stdout output will be printed immediately)
+
+"""
+import logging
+import os
+import sys
+from nose.plugins.base import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.util import ln
+from StringIO import StringIO
+
+
+log = logging.getLogger(__name__)
+
+class Capture(Plugin):
+    """
+    Output capture plugin. Enabled by default. Disable with ``-s`` or
+    ``--nocapture``. This plugin captures stdout during test execution,
+    appending any output captured to the error or failure output,
+    should the test fail or raise an error.
+    """
+    enabled = True
+    env_opt = 'NOSE_NOCAPTURE'
+    name = 'capture'
+    score = 1600
+
+    def __init__(self):
+        self.stdout = []
+        self._buf = None
+
+    def options(self, parser, env):
+        """Register commandline options
+        """
+        parser.add_option(
+            "-s", "--nocapture", action="store_false",
+            default=not env.get(self.env_opt), dest="capture",
+            help="Don't capture stdout (any stdout output "
+            "will be printed immediately) [NOSE_NOCAPTURE]")
+
+    def configure(self, options, conf):
+        """Configure plugin. Plugin is enabled by default.
+        """
+        self.conf = conf
+        if not options.capture:
+            self.enabled = False
+
+    def afterTest(self, test):
+        """Clear capture buffer.
+        """
+        self.end()
+        self._buf = None
+
+    def begin(self):
+        """Replace sys.stdout with capture buffer.
+        """
+        self.start() # get an early handle on sys.stdout
+
+    def beforeTest(self, test):
+        """Flush capture buffer.
+        """
+        self.start()
+
+    def formatError(self, test, err):
+        """Add captured output to error report.
+        """
+        test.capturedOutput = output = self.buffer
+        self._buf = None
+        if not output:
+            # Don't return None as that will prevent other
+            # formatters from formatting and remove earlier formatters
+            # formats, instead return the err we got
+            return err
+        ec, ev, tb = err
+        return (ec, self.addCaptureToErr(ev, output), tb)
+
+    def formatFailure(self, test, err):
+        """Add captured output to failure report.
+        """
+        return self.formatError(test, err)
+
+    def addCaptureToErr(self, ev, output):
+        ev = exc_to_unicode(ev)
+        output = force_unicode(output)
+        return u'\n'.join([ev, ln(u'>> begin captured stdout <<'),
+                           output, ln(u'>> end captured stdout <<')])
+
+    def start(self):
+        self.stdout.append(sys.stdout)
+        self._buf = StringIO()
+        sys.stdout = self._buf
+
+    def end(self):
+        if self.stdout:
+            sys.stdout = self.stdout.pop()
+
+    def finalize(self, result):
+        """Restore stdout.
+        """
+        while self.stdout:
+            self.end()
+
+    def _get_buffer(self):
+        if self._buf is not None:
+            return self._buf.getvalue()
+
+    buffer = property(_get_buffer, None, None,
+                      """Captured stdout output.""")
diff --git a/lib/spack/external/nose/plugins/collect.py b/lib/spack/external/nose/plugins/collect.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f9f0faa779ed44e29c3573d91f3cee080380ec7
--- /dev/null
+++ b/lib/spack/external/nose/plugins/collect.py
@@ -0,0 +1,94 @@
+"""
+This plugin bypasses the actual execution of tests, and instead just collects
+test names. Fixtures are also bypassed, so running nosetests with the 
+collection plugin enabled should be very quick.
+
+This plugin is useful in combination with the testid plugin (``--with-id``).
+Run both together to get an indexed list of all tests, which will enable you to
+run individual tests by index number.
+
+This plugin is also useful for counting tests in a test suite, and making
+people watching your demo think all of your tests pass.
+"""
+from nose.plugins.base import Plugin
+from nose.case import Test
+import logging
+import unittest
+
+log = logging.getLogger(__name__)
+
+
+class CollectOnly(Plugin):
+    """
+    Collect and output test names only, don't run any tests.
+    """
+    name = "collect-only"
+    enableOpt = 'collect_only'
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        parser.add_option('--collect-only',
+                          action='store_true',
+                          dest=self.enableOpt,
+                          default=env.get('NOSE_COLLECT_ONLY'),
+                          help="Enable collect-only: %s [COLLECT_ONLY]" %
+                          (self.help()))
+
+    def prepareTestLoader(self, loader):
+        """Install collect-only suite class in TestLoader.
+        """
+        # Disable context awareness
+        log.debug("Preparing test loader")
+        loader.suiteClass = TestSuiteFactory(self.conf)
+
+    def prepareTestCase(self, test):
+        """Replace actual test with dummy that always passes.
+        """
+        # Return something that always passes
+        log.debug("Preparing test case %s", test)
+        if not isinstance(test, Test):
+            return
+        def run(result):
+            # We need to make these plugin calls because there won't be
+            # a result proxy, due to using a stripped-down test suite
+            self.conf.plugins.startTest(test)
+            result.startTest(test)
+            self.conf.plugins.addSuccess(test)
+            result.addSuccess(test)
+            self.conf.plugins.stopTest(test)
+            result.stopTest(test)
+        return run
+
+
+class TestSuiteFactory:
+    """
+    Factory for producing configured test suites.
+    """
+    def __init__(self, conf):
+        self.conf = conf
+
+    def __call__(self, tests=(), **kw):
+        return TestSuite(tests, conf=self.conf)
+
+
+class TestSuite(unittest.TestSuite):
+    """
+    Basic test suite that bypasses most proxy and plugin calls, but does
+    wrap tests in a nose.case.Test so prepareTestCase will be called.
+    """
+    def __init__(self, tests=(), conf=None):
+        self.conf = conf
+        # Exec lazy suites: makes discovery depth-first
+        if callable(tests):
+            tests = tests()
+        log.debug("TestSuite(%r)", tests)
+        unittest.TestSuite.__init__(self, tests)
+
+    def addTest(self, test):
+        log.debug("Add test %s", test)
+        if isinstance(test, unittest.TestSuite):
+            self._tests.append(test)
+        else:
+            self._tests.append(Test(test, config=self.conf))
+
diff --git a/lib/spack/external/nose/plugins/cover.py b/lib/spack/external/nose/plugins/cover.py
new file mode 100644
index 0000000000000000000000000000000000000000..fbe2e30dcdd89d5b4beda74c7ddf306efc408021
--- /dev/null
+++ b/lib/spack/external/nose/plugins/cover.py
@@ -0,0 +1,271 @@
+"""If you have Ned Batchelder's coverage_ module installed, you may activate a
+coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
+environment variable. The coverage report will cover any python source module
+imported after the start of the test run, excluding modules that match
+testMatch. If you want to include those modules too, use the ``--cover-tests``
+switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
+restrict the coverage report to modules from a particular package or packages,
+use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment
+variable.
+
+.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
+"""
+import logging
+import re
+import sys
+import StringIO
+from nose.plugins.base import Plugin
+from nose.util import src, tolist
+
+log = logging.getLogger(__name__)
+
+
+class Coverage(Plugin):
+    """
+    Activate a coverage report using Ned Batchelder's coverage module.
+    """
+    coverTests = False
+    coverPackages = None
+    coverInstance = None
+    coverErase = False
+    coverMinPercentage = None
+    score = 200
+    status = {}
+
+    def options(self, parser, env):
+        """
+        Add options to command line.
+        """
+        super(Coverage, self).options(parser, env)
+        parser.add_option("--cover-package", action="append",
+                          default=env.get('NOSE_COVER_PACKAGE'),
+                          metavar="PACKAGE",
+                          dest="cover_packages",
+                          help="Restrict coverage output to selected packages "
+                          "[NOSE_COVER_PACKAGE]")
+        parser.add_option("--cover-erase", action="store_true",
+                          default=env.get('NOSE_COVER_ERASE'),
+                          dest="cover_erase",
+                          help="Erase previously collected coverage "
+                          "statistics before run")
+        parser.add_option("--cover-tests", action="store_true",
+                          dest="cover_tests",
+                          default=env.get('NOSE_COVER_TESTS'),
+                          help="Include test modules in coverage report "
+                          "[NOSE_COVER_TESTS]")
+        parser.add_option("--cover-min-percentage", action="store",
+                          dest="cover_min_percentage",
+                          default=env.get('NOSE_COVER_MIN_PERCENTAGE'),
+                          help="Minimum percentage of coverage for tests "
+                          "to pass [NOSE_COVER_MIN_PERCENTAGE]")
+        parser.add_option("--cover-inclusive", action="store_true",
+                          dest="cover_inclusive",
+                          default=env.get('NOSE_COVER_INCLUSIVE'),
+                          help="Include all python files under working "
+                          "directory in coverage report.  Useful for "
+                          "discovering holes in test coverage if not all "
+                          "files are imported by the test suite. "
+                          "[NOSE_COVER_INCLUSIVE]")
+        parser.add_option("--cover-html", action="store_true",
+                          default=env.get('NOSE_COVER_HTML'),
+                          dest='cover_html',
+                          help="Produce HTML coverage information")
+        parser.add_option('--cover-html-dir', action='store',
+                          default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
+                          dest='cover_html_dir',
+                          metavar='DIR',
+                          help='Produce HTML coverage information in dir')
+        parser.add_option("--cover-branches", action="store_true",
+                          default=env.get('NOSE_COVER_BRANCHES'),
+                          dest="cover_branches",
+                          help="Include branch coverage in coverage report "
+                          "[NOSE_COVER_BRANCHES]")
+        parser.add_option("--cover-xml", action="store_true",
+                          default=env.get('NOSE_COVER_XML'),
+                          dest="cover_xml",
+                          help="Produce XML coverage information")
+        parser.add_option("--cover-xml-file", action="store",
+                          default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'),
+                          dest="cover_xml_file",
+                          metavar="FILE",
+                          help="Produce XML coverage information in file")
+
+    def configure(self, options, conf):
+        """
+        Configure plugin.
+        """
+        try:
+            self.status.pop('active')
+        except KeyError:
+            pass
+        super(Coverage, self).configure(options, conf)
+        if self.enabled:
+            try:
+                import coverage
+                if not hasattr(coverage, 'coverage'):
+                    raise ImportError("Unable to import coverage module")
+            except ImportError:
+                log.error("Coverage not available: "
+                          "unable to import coverage module")
+                self.enabled = False
+                return
+        self.conf = conf
+        self.coverErase = options.cover_erase
+        self.coverTests = options.cover_tests
+        self.coverPackages = []
+        if options.cover_packages:
+            if isinstance(options.cover_packages, (list, tuple)):
+                cover_packages = options.cover_packages
+            else:
+                cover_packages = [options.cover_packages]
+            for pkgs in [tolist(x) for x in cover_packages]:
+                self.coverPackages.extend(pkgs)
+        self.coverInclusive = options.cover_inclusive
+        if self.coverPackages:
+            log.info("Coverage report will include only packages: %s",
+                     self.coverPackages)
+        self.coverHtmlDir = None
+        if options.cover_html:
+            self.coverHtmlDir = options.cover_html_dir
+            log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
+        self.coverBranches = options.cover_branches
+        self.coverXmlFile = None
+        if options.cover_min_percentage:
+            self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%'))
+        if options.cover_xml:
+            self.coverXmlFile = options.cover_xml_file
+            log.debug('Will put XML coverage report in %s', self.coverXmlFile)
+        if self.enabled:
+            self.status['active'] = True
+            self.coverInstance = coverage.coverage(auto_data=False,
+                branch=self.coverBranches, data_suffix=conf.worker,
+                source=self.coverPackages)
+            self.coverInstance._warn_no_data = False
+            self.coverInstance.is_worker = conf.worker
+            self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
+
+            log.debug("Coverage begin")
+            self.skipModules = sys.modules.keys()[:]
+            if self.coverErase:
+                log.debug("Clearing previously collected coverage statistics")
+                self.coverInstance.combine()
+                self.coverInstance.erase()
+
+            if not self.coverInstance.is_worker:
+                self.coverInstance.load()
+                self.coverInstance.start()
+
+
+    def beforeTest(self, *args, **kwargs):
+        """
+        Begin recording coverage information.
+        """
+
+        if self.coverInstance.is_worker:
+            self.coverInstance.load()
+            self.coverInstance.start()
+
+    def afterTest(self, *args, **kwargs):
+        """
+        Stop recording coverage information.
+        """
+
+        if self.coverInstance.is_worker:
+            self.coverInstance.stop()
+            self.coverInstance.save()
+
+
+    def report(self, stream):
+        """
+        Output code coverage report.
+        """
+        log.debug("Coverage report")
+        self.coverInstance.stop()
+        self.coverInstance.combine()
+        self.coverInstance.save()
+        modules = [module
+                    for name, module in sys.modules.items()
+                    if self.wantModuleCoverage(name, module)]
+        log.debug("Coverage report will cover modules: %s", modules)
+        self.coverInstance.report(modules, file=stream)
+
+        import coverage
+        if self.coverHtmlDir:
+            log.debug("Generating HTML coverage report")
+            try:
+                self.coverInstance.html_report(modules, self.coverHtmlDir)
+            except coverage.misc.CoverageException, e:
+                log.warning("Failed to generate HTML report: %s" % str(e))
+
+        if self.coverXmlFile:
+            log.debug("Generating XML coverage report")
+            try:
+                self.coverInstance.xml_report(modules, self.coverXmlFile)
+            except coverage.misc.CoverageException, e:
+                log.warning("Failed to generate XML report: %s" % str(e))
+
+        # make sure we have minimum required coverage
+        if self.coverMinPercentage:
+            f = StringIO.StringIO()
+            self.coverInstance.report(modules, file=f)
+
+            multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+                              r'\s+(\d+)%\s+\d*\s{0,1}$')
+            singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+                               r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$')
+
+            m = re.search(multiPackageRe, f.getvalue())
+            if m is None:
+                m = re.search(singlePackageRe, f.getvalue())
+
+            if m:
+                percentage = int(m.groups()[0])
+                if percentage < self.coverMinPercentage:
+                    log.error('TOTAL Coverage did not reach minimum '
+                              'required: %d%%' % self.coverMinPercentage)
+                    sys.exit(1)
+            else:
+                log.error("No total percentage was found in coverage output, "
+                          "something went wrong.")
+
+
+    def wantModuleCoverage(self, name, module):
+        if not hasattr(module, '__file__'):
+            log.debug("no coverage of %s: no __file__", name)
+            return False
+        module_file = src(module.__file__)
+        if not module_file or not module_file.endswith('.py'):
+            log.debug("no coverage of %s: not a python file", name)
+            return False
+        if self.coverPackages:
+            for package in self.coverPackages:
+                if (re.findall(r'^%s\b' % re.escape(package), name)
+                    and (self.coverTests
+                         or not self.conf.testMatch.search(name))):
+                    log.debug("coverage for %s", name)
+                    return True
+        if name in self.skipModules:
+            log.debug("no coverage for %s: loaded before coverage start",
+                      name)
+            return False
+        if self.conf.testMatch.search(name) and not self.coverTests:
+            log.debug("no coverage for %s: is a test", name)
+            return False
+        # accept any package that passed the previous tests, unless
+        # coverPackages is on -- in that case, if we wanted this
+        # module, we would have already returned True
+        return not self.coverPackages
+
+    def wantFile(self, file, package=None):
+        """If inclusive coverage enabled, return true for all source files
+        in wanted packages.
+        """
+        if self.coverInclusive:
+            if file.endswith(".py"):
+                if package and self.coverPackages:
+                    for want in self.coverPackages:
+                        if package.startswith(want):
+                            return True
+                else:
+                    return True
+        return None
diff --git a/lib/spack/external/nose/plugins/debug.py b/lib/spack/external/nose/plugins/debug.py
new file mode 100644
index 0000000000000000000000000000000000000000..78243e60d09c107c97767eac09734a59ef9cdb70
--- /dev/null
+++ b/lib/spack/external/nose/plugins/debug.py
@@ -0,0 +1,67 @@
+"""
+This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
+option will drop the test runner into pdb when it encounters an error. To
+drop into pdb on failure, use ``--pdb-failures``.
+"""
+
+import pdb
+from nose.plugins.base import Plugin
+
+class Pdb(Plugin):
+    """
+    Provides --pdb and --pdb-failures options that cause the test runner to
+    drop into pdb if it encounters an error or failure, respectively.
+    """
+    enabled_for_errors = False
+    enabled_for_failures = False
+    score = 5 # run last, among builtins
+    
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        parser.add_option(
+            "--pdb", action="store_true", dest="debugBoth",
+            default=env.get('NOSE_PDB', False),
+            help="Drop into debugger on failures or errors")
+        parser.add_option(
+            "--pdb-failures", action="store_true",
+            dest="debugFailures",
+            default=env.get('NOSE_PDB_FAILURES', False),
+            help="Drop into debugger on failures")
+        parser.add_option(
+            "--pdb-errors", action="store_true",
+            dest="debugErrors",
+            default=env.get('NOSE_PDB_ERRORS', False),
+            help="Drop into debugger on errors")
+
+    def configure(self, options, conf):
+        """Configure which kinds of exceptions trigger plugin.
+        """
+        self.conf = conf
+        self.enabled_for_errors = options.debugErrors or options.debugBoth
+        self.enabled_for_failures = options.debugFailures or options.debugBoth
+        self.enabled = self.enabled_for_failures or self.enabled_for_errors
+
+    def addError(self, test, err):
+        """Enter pdb if configured to debug errors.
+        """
+        if not self.enabled_for_errors:
+            return
+        self.debug(err)
+
+    def addFailure(self, test, err):
+        """Enter pdb if configured to debug failures.
+        """
+        if not self.enabled_for_failures:
+            return
+        self.debug(err)
+
+    def debug(self, err):
+        import sys # FIXME why is this import here?
+        ec, ev, tb = err
+        stdout = sys.stdout
+        sys.stdout = sys.__stdout__
+        try:
+            pdb.post_mortem(tb)
+        finally:
+            sys.stdout = stdout
diff --git a/lib/spack/external/nose/plugins/deprecated.py b/lib/spack/external/nose/plugins/deprecated.py
new file mode 100644
index 0000000000000000000000000000000000000000..461a26be631f8ab026946259cb19cf93de14f389
--- /dev/null
+++ b/lib/spack/external/nose/plugins/deprecated.py
@@ -0,0 +1,45 @@
+"""
+This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
+exception. When :class:`DeprecatedTest` is raised, the exception will be logged
+in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
+will be output, and the exception will not be counted as an error or failure.
+It is enabled by default, but can be turned off by using ``--no-deprecated``.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+class DeprecatedTest(Exception):
+    """Raise this exception to mark a test as deprecated.
+    """
+    pass
+
+
+class Deprecated(ErrorClassPlugin):
+    """
+    Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
+    by default.
+    """
+    enabled = True
+    deprecated = ErrorClass(DeprecatedTest,
+                            label='DEPRECATED',
+                            isfailure=False)
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        env_opt = 'NOSE_WITHOUT_DEPRECATED'
+        parser.add_option('--no-deprecated', action='store_true',
+                          dest='noDeprecated', default=env.get(env_opt, False),
+                          help="Disable special handling of DeprecatedTest "
+                          "exceptions.")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        if not self.can_configure:
+            return
+        self.conf = conf
+        disable = getattr(options, 'noDeprecated', False)
+        if disable:
+            self.enabled = False
diff --git a/lib/spack/external/nose/plugins/doctests.py b/lib/spack/external/nose/plugins/doctests.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ef65799f3adab9b865cafa05dd3890ef6b8d97a
--- /dev/null
+++ b/lib/spack/external/nose/plugins/doctests.py
@@ -0,0 +1,455 @@
+"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
+environment variable to enable collection and execution of :mod:`doctests
+<doctest>`.  Because doctests are usually included in the tested package
+(instead of being grouped into packages or modules of their own), nose only
+looks for them in the non-test packages it discovers in the working directory.
+
+Doctests may also be placed into files other than python modules, in which
+case they can be collected and executed by using the ``--doctest-extension``
+switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
+extension(s) to load.
+
+When loading doctests from non-module files, use the ``--doctest-fixtures``
+switch to specify how to find modules containing fixtures for the tests. A
+module name will be produced by appending the value of that switch to the base
+name of each doctest file loaded. For example, a doctest file "widgets.rst"
+with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
+``widgets_fixt.py``.
+
+A fixtures module may define any or all of the following functions:
+
+* setup([module]) or setup_module([module])
+   
+  Called before the test runs. You may raise SkipTest to skip all tests.
+  
+* teardown([module]) or teardown_module([module])
+
+  Called after the test runs, if setup/setup_module did not raise an
+  unhandled exception.
+
+* setup_test(test)
+
+  Called before the test. NOTE: the argument passed is a
+  doctest.DocTest instance, *not* a unittest.TestCase.
+  
+* teardown_test(test)
+ 
+  Called after the test, if setup_test did not raise an exception. NOTE: the
+  argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
+  
+Doctests are run like any other test, with the exception that output
+capture does not work; doctest does its own output capture while running a
+test.
+
+.. note ::
+
+   See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
+   additional documentation and examples.
+
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+from inspect import getmodule
+from nose.plugins.base import Plugin
+from nose.suite import ContextList
+from nose.util import anyp, getpackage, test_address, resolve_name, \
+     src, tolist, isproperty
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+import sys
+import __builtin__ as builtin_mod
+
+log = logging.getLogger(__name__)
+
+try:
+    import doctest
+    doctest.DocTestCase
+    # system version of doctest is acceptable, but needs a monkeypatch
+except (ImportError, AttributeError):
+    # system version is too old
+    import nose.ext.dtcompat as doctest
+
+
+#
+# Doctest and coverage don't get along, so we need to create
+# a monkeypatch that will replace the part of doctest that
+# interferes with coverage reports.
+#
+# The monkeypatch is based on this zope patch:
+# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
+#
+_orp = doctest._OutputRedirectingPdb
+
+class NoseOutputRedirectingPdb(_orp):
+    def __init__(self, out):
+        self.__debugger_used = False
+        _orp.__init__(self, out)
+
+    def set_trace(self):
+        self.__debugger_used = True
+        _orp.set_trace(self, sys._getframe().f_back)
+
+    def set_continue(self):
+        # Calling set_continue unconditionally would break unit test 
+        # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
+        if self.__debugger_used:
+            _orp.set_continue(self)
+doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb    
+
+
+class DoctestSuite(unittest.TestSuite):
+    """
+    Doctest suites are parallelizable at the module or file level only,
+    since they may be attached to objects that are not individually
+    addressable (like properties). This suite subclass is used when
+    loading doctests from a module to ensure that behavior.
+
+    This class is used only if the plugin is not fully prepared;
+    in normal use, the loader's suiteClass is used.
+    
+    """
+    can_split = False
+    
+    def __init__(self, tests=(), context=None, can_split=False):
+        self.context = context
+        self.can_split = can_split
+        unittest.TestSuite.__init__(self, tests=tests)
+
+    def address(self):
+        return test_address(self.context)
+
+    def __iter__(self):
+        # 2.3 compat
+        return iter(self._tests)
+
+    def __str__(self):
+        return str(self._tests)
+
+        
+class Doctest(Plugin):
+    """
+    Activate doctest plugin to find and run doctests in non-test modules.
+    """
+    extension = None
+    suiteClass = DoctestSuite
+    
+    def options(self, parser, env):
+        """Register commmandline options.
+        """
+        Plugin.options(self, parser, env)
+        parser.add_option('--doctest-tests', action='store_true',
+                          dest='doctest_tests',
+                          default=env.get('NOSE_DOCTEST_TESTS'),
+                          help="Also look for doctests in test modules. "
+                          "Note that classes, methods and functions should "
+                          "have either doctests or non-doctest tests, "
+                          "not both. [NOSE_DOCTEST_TESTS]")
+        parser.add_option('--doctest-extension', action="append",
+                          dest="doctestExtension",
+                          metavar="EXT",
+                          help="Also look for doctests in files with "
+                          "this extension [NOSE_DOCTEST_EXTENSION]")
+        parser.add_option('--doctest-result-variable',
+                          dest='doctest_result_var',
+                          default=env.get('NOSE_DOCTEST_RESULT_VAR'),
+                          metavar="VAR",
+                          help="Change the variable name set to the result of "
+                          "the last interpreter command from the default '_'. "
+                          "Can be used to avoid conflicts with the _() "
+                          "function used for text translation. "
+                          "[NOSE_DOCTEST_RESULT_VAR]")
+        parser.add_option('--doctest-fixtures', action="store",
+                          dest="doctestFixtures",
+                          metavar="SUFFIX",
+                          help="Find fixtures for a doctest file in module "
+                          "with this name appended to the base name "
+                          "of the doctest file")
+        parser.add_option('--doctest-options', action="append",
+                          dest="doctestOptions",
+                          metavar="OPTIONS",
+                          help="Specify options to pass to doctest. " +
+                          "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
+        # Set the default as a list, if given in env; otherwise
+        # an additional value set on the command line will cause
+        # an error.
+        env_setting = env.get('NOSE_DOCTEST_EXTENSION')
+        if env_setting is not None:
+            parser.set_defaults(doctestExtension=tolist(env_setting))
+
+    def configure(self, options, config):
+        """Configure plugin.
+        """
+        Plugin.configure(self, options, config)
+        self.doctest_result_var = options.doctest_result_var
+        self.doctest_tests = options.doctest_tests
+        self.extension = tolist(options.doctestExtension)
+        self.fixtures = options.doctestFixtures
+        self.finder = doctest.DocTestFinder()
+        self.optionflags = 0
+        if options.doctestOptions:
+            flags = ",".join(options.doctestOptions).split(',')
+            for flag in flags:
+                if not flag or flag[0] not in '+-':
+                    raise ValueError(
+                        "Must specify doctest options with starting " +
+                        "'+' or '-'.  Got %s" % (flag,))
+                mode, option_name = flag[0], flag[1:]
+                option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name)
+                if not option_flag:
+                    raise ValueError("Unknown doctest option %s" %
+                                     (option_name,))
+                if mode == '+':
+                    self.optionflags |= option_flag
+                elif mode == '-':
+                    self.optionflags &= ~option_flag
+
+    def prepareTestLoader(self, loader):
+        """Capture loader's suiteClass.
+
+        This is used to create test suites from doctest files.
+        
+        """
+        self.suiteClass = loader.suiteClass
+
+    def loadTestsFromModule(self, module):
+        """Load doctests from the module.
+        """
+        log.debug("loading from %s", module)
+        if not self.matches(module.__name__):
+            log.debug("Doctest doesn't want module %s", module)
+            return
+        try:
+            tests = self.finder.find(module)
+        except AttributeError:
+            log.exception("Attribute error loading from %s", module)
+            # nose allows module.__test__ = False; doctest does not and throws
+            # AttributeError
+            return
+        if not tests:
+            log.debug("No tests found in %s", module)
+            return
+        tests.sort()
+        module_file = src(module.__file__)
+        # FIXME this breaks the id plugin somehow (tests probably don't
+        # get wrapped in result proxy or something)
+        cases = []
+        for test in tests:
+            if not test.examples:
+                continue
+            if not test.filename:
+                test.filename = module_file
+            cases.append(DocTestCase(test,
+                                     optionflags=self.optionflags,
+                                     result_var=self.doctest_result_var))
+        if cases:
+            yield self.suiteClass(cases, context=module, can_split=False)
+            
+    def loadTestsFromFile(self, filename):
+        """Load doctests from the file.
+
+        Tests are loaded only if filename's extension matches
+        configured doctest extension.
+
+        """
+        if self.extension and anyp(filename.endswith, self.extension):
+            name = os.path.basename(filename)
+            dh = open(filename)
+            try:
+                doc = dh.read()
+            finally:
+                dh.close()
+
+            fixture_context = None
+            globs = {'__file__': filename}
+            if self.fixtures:
+                base, ext = os.path.splitext(name)
+                dirname = os.path.dirname(filename)
+                sys.path.append(dirname)
+                fixt_mod = base + self.fixtures
+                try:
+                    fixture_context = __import__(
+                        fixt_mod, globals(), locals(), ["nop"])
+                except ImportError, e:
+                    log.debug(
+                        "Could not import %s: %s (%s)", fixt_mod, e, sys.path)
+                log.debug("Fixture module %s resolved to %s",
+                          fixt_mod, fixture_context)
+                if hasattr(fixture_context, 'globs'):
+                    globs = fixture_context.globs(globs)                    
+            parser = doctest.DocTestParser()
+            test = parser.get_doctest(
+                doc, globs=globs, name=name,
+                filename=filename, lineno=0)
+            if test.examples:
+                case = DocFileCase(
+                    test,
+                    optionflags=self.optionflags,
+                    setUp=getattr(fixture_context, 'setup_test', None),
+                    tearDown=getattr(fixture_context, 'teardown_test', None),
+                    result_var=self.doctest_result_var)
+                if fixture_context:
+                    yield ContextList((case,), context=fixture_context)
+                else:
+                    yield case
+            else:
+                yield False # no tests to load
+            
+    def makeTest(self, obj, parent):
+        """Look for doctests in the given object, which will be a
+        function, method or class.
+        """
+        name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
+        doctests = self.finder.find(obj, module=getmodule(parent), name=name)
+        if doctests:
+            for test in doctests:
+                if len(test.examples) == 0:
+                    continue
+                yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
+                                  result_var=self.doctest_result_var)
+    
+    def matches(self, name):
+        # FIXME this seems wrong -- nothing is ever going to
+        # fail this test, since we're given a module NAME not FILE
+        if name == '__init__.py':
+            return False
+        # FIXME don't think we need include/exclude checks here?
+        return ((self.doctest_tests or not self.conf.testMatch.search(name)
+                 or (self.conf.include 
+                     and filter(None,
+                                [inc.search(name)
+                                 for inc in self.conf.include])))
+                and (not self.conf.exclude 
+                     or not filter(None,
+                                   [exc.search(name)
+                                    for exc in self.conf.exclude])))
+    
+    def wantFile(self, file):
+        """Override to select all modules and any file ending with
+        configured doctest extension.
+        """
+        # always want .py files
+        if file.endswith('.py'):
+            return True
+        # also want files that match my extension
+        if (self.extension
+            and anyp(file.endswith, self.extension)
+            and (not self.conf.exclude
+                 or not filter(None, 
+                               [exc.search(file)
+                                for exc in self.conf.exclude]))):
+            return True
+        return None
+
+
+class DocTestCase(doctest.DocTestCase):
+    """Overrides DocTestCase to
+    provide an address() method that returns the correct address for
+    the doctest case. To provide hints for address(), an obj may also
+    be passed -- this will be used as the test object for purposes of
+    determining the test address, if it is provided.
+    """
+    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+                 checker=None, obj=None, result_var='_'):
+        self._result_var = result_var
+        self._nose_obj = obj
+        super(DocTestCase, self).__init__(
+            test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+            checker=checker)
+    
+    def address(self):
+        if self._nose_obj is not None:
+            return test_address(self._nose_obj)
+        obj = resolve_name(self._dt_test.name)
+
+        if isproperty(obj):
+            # properties have no connection to the class they are in
+            # so we can't just look 'em up, we have to first look up
+            # the class, then stick the prop on the end
+            parts = self._dt_test.name.split('.')
+            class_name = '.'.join(parts[:-1])
+            cls = resolve_name(class_name)
+            base_addr = test_address(cls)
+            return (base_addr[0], base_addr[1],
+                    '.'.join([base_addr[2], parts[-1]]))
+        else:
+            return test_address(obj)
+    
+    # doctests loaded via find(obj) omit the module name
+    # so we need to override id, __repr__ and shortDescription
+    # bonus: this will squash a 2.3 vs 2.4 incompatiblity
+    def id(self):
+        name = self._dt_test.name
+        filename = self._dt_test.filename
+        if filename is not None:
+            pk = getpackage(filename)
+            if pk is None:
+                return name
+            if not name.startswith(pk):
+                name = "%s.%s" % (pk, name)
+        return name
+    
+    def __repr__(self):
+        name = self.id()
+        name = name.split('.')
+        return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+    __str__ = __repr__
+
+    def shortDescription(self):
+        return 'Doctest: %s' % self.id()
+
+    def setUp(self):
+        if self._result_var is not None:
+            self._old_displayhook = sys.displayhook
+            sys.displayhook = self._displayhook
+        super(DocTestCase, self).setUp()
+
+    def _displayhook(self, value):
+        if value is None:
+            return
+        setattr(builtin_mod, self._result_var,  value)
+        print repr(value)
+
+    def tearDown(self):
+        super(DocTestCase, self).tearDown()
+        if self._result_var is not None:
+            sys.displayhook = self._old_displayhook
+            delattr(builtin_mod, self._result_var)
+
+
+class DocFileCase(doctest.DocFileCase):
+    """Overrides to provide address() method that returns the correct
+    address for the doc file case.
+    """
+    def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+                 checker=None, result_var='_'):
+        self._result_var = result_var
+        super(DocFileCase, self).__init__(
+            test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+            checker=None)
+
+    def address(self):
+        return (self._dt_test.filename, None, None)
+
+    def setUp(self):
+        if self._result_var is not None:
+            self._old_displayhook = sys.displayhook
+            sys.displayhook = self._displayhook
+        super(DocFileCase, self).setUp()
+
+    def _displayhook(self, value):
+        if value is None:
+            return
+        setattr(builtin_mod, self._result_var, value)
+        print repr(value)
+
+    def tearDown(self):
+        super(DocFileCase, self).tearDown()
+        if self._result_var is not None:
+            sys.displayhook = self._old_displayhook
+            delattr(builtin_mod, self._result_var)
diff --git a/lib/spack/external/nose/plugins/errorclass.py b/lib/spack/external/nose/plugins/errorclass.py
new file mode 100644
index 0000000000000000000000000000000000000000..d1540e0070a7924059d0877306aca23f11f2a226
--- /dev/null
+++ b/lib/spack/external/nose/plugins/errorclass.py
@@ -0,0 +1,210 @@
+"""
+ErrorClass Plugins
+------------------
+
+ErrorClass plugins provide an easy way to add support for custom
+handling of particular classes of exceptions.
+
+An ErrorClass plugin defines one or more ErrorClasses and how each is
+handled and reported on. Each error class is stored in a different
+attribute on the result, and reported separately. Each error class must
+indicate the exceptions that fall under that class, the label to use
+for reporting, and whether exceptions of the class should be
+considered as failures for the whole test run.
+
+ErrorClasses use a declarative syntax. Assign an ErrorClass to the
+attribute you wish to add to the result object, defining the
+exceptions, label and isfailure attributes. For example, to declare an
+ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
+as an error class with the label 'TODO' that is considered a failure,
+do this:
+
+    >>> class Todo(Exception):
+    ...     pass
+    >>> class TodoError(ErrorClassPlugin):
+    ...     todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+The MetaErrorClass metaclass translates the ErrorClass declarations
+into the tuples used by the error handling and reporting functions in
+the result. This is an internal format and subject to change; you
+should always use the declarative syntax for attaching ErrorClasses to
+an ErrorClass plugin.
+
+    >>> TodoError.errorClasses # doctest: +ELLIPSIS
+    ((<class ...Todo...>, ('todo', 'TODO', True)),)
+
+Let's see the plugin in action. First some boilerplate.
+
+    >>> import sys
+    >>> import unittest
+    >>> try:
+    ...     # 2.7+
+    ...     from unittest.runner import _WritelnDecorator
+    ... except ImportError:
+    ...     from unittest import _WritelnDecorator
+    ...
+    >>> buf = _WritelnDecorator(sys.stdout)
+
+Now define a test case that raises a Todo.
+
+    >>> class TestTodo(unittest.TestCase):
+    ...     def runTest(self):
+    ...         raise Todo("I need to test something")
+    >>> case = TestTodo()
+
+Prepare the result using our plugin. Normally this happens during the
+course of test execution within nose -- you won't be doing this
+yourself. For the purposes of this testing document, I'm stepping
+through the internal process of nose so you can see what happens at
+each step.
+
+    >>> plugin = TodoError()
+    >>> from nose.result import _TextTestResult
+    >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
+    >>> plugin.prepareTestResult(result)
+
+Now run the test. TODO is printed.
+
+    >>> _ = case(result) # doctest: +ELLIPSIS
+    runTest (....TestTodo) ... TODO: I need to test something
+
+Errors and failures are empty, but todo has our test:
+
+    >>> result.errors
+    []
+    >>> result.failures
+    []
+    >>> result.todo # doctest: +ELLIPSIS
+    [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
+    >>> result.printErrors() # doctest: +ELLIPSIS
+    <BLANKLINE>
+    ======================================================================
+    TODO: runTest (....TestTodo)
+    ----------------------------------------------------------------------
+    Traceback (most recent call last):
+    ...
+    ...Todo: I need to test something
+    <BLANKLINE>
+
+Since we defined a Todo as a failure, the run was not successful.
+
+    >>> result.wasSuccessful()
+    False
+"""
+
+from nose.pyversion import make_instancemethod
+from nose.plugins.base import Plugin
+from nose.result import TextTestResult
+from nose.util import isclass
+
+class MetaErrorClass(type):
+    """Metaclass for ErrorClassPlugins that allows error classes to be
+    set up in a declarative manner.
+    """
+    def __init__(self, name, bases, attr):
+        errorClasses = []
+        for name, detail in attr.items():
+            if isinstance(detail, ErrorClass):
+                attr.pop(name)
+                for cls in detail:
+                    errorClasses.append(
+                        (cls, (name, detail.label, detail.isfailure)))
+        super(MetaErrorClass, self).__init__(name, bases, attr)
+        self.errorClasses = tuple(errorClasses)
+
+
+class ErrorClass(object):
+    def __init__(self, *errorClasses, **kw):
+        self.errorClasses = errorClasses
+        try:
+            for key in ('label', 'isfailure'):
+                setattr(self, key, kw.pop(key))
+        except KeyError:
+            raise TypeError("%r is a required named argument for ErrorClass"
+                            % key)
+
+    def __iter__(self):
+        return iter(self.errorClasses)
+
+
+class ErrorClassPlugin(Plugin):
+    """
+    Base class for ErrorClass plugins. Subclass this class and declare the
+    exceptions that you wish to handle as attributes of the subclass.
+    """
+    __metaclass__ = MetaErrorClass
+    score = 1000
+    errorClasses = ()
+
+    def addError(self, test, err):
+        err_cls, a, b = err
+        if not isclass(err_cls):
+            return
+        classes = [e[0] for e in self.errorClasses]
+        if filter(lambda c: issubclass(err_cls, c), classes):
+            return True
+
+    def prepareTestResult(self, result):
+        if not hasattr(result, 'errorClasses'):
+            self.patchResult(result)
+        for cls, (storage_attr, label, isfail) in self.errorClasses:
+            if cls not in result.errorClasses:
+                storage = getattr(result, storage_attr, [])
+                setattr(result, storage_attr, storage)
+                result.errorClasses[cls] = (storage, label, isfail)
+
+    def patchResult(self, result):
+        result.printLabel = print_label_patch(result)
+        result._orig_addError, result.addError = \
+            result.addError, add_error_patch(result)
+        result._orig_wasSuccessful, result.wasSuccessful = \
+            result.wasSuccessful, wassuccessful_patch(result)
+        if hasattr(result, 'printErrors'):
+            result._orig_printErrors, result.printErrors = \
+                result.printErrors, print_errors_patch(result)
+        if hasattr(result, 'addSkip'):
+            result._orig_addSkip, result.addSkip = \
+                result.addSkip, add_skip_patch(result)
+        result.errorClasses = {}
+
+
+def add_error_patch(result):
+    """Create a new addError method to patch into a result instance
+    that recognizes the errorClasses attribute and deals with
+    errorclasses correctly.
+    """
+    return make_instancemethod(TextTestResult.addError, result)
+
+
+def print_errors_patch(result):
+    """Create a new printErrors method that prints errorClasses items
+    as well.
+    """
+    return make_instancemethod(TextTestResult.printErrors, result)
+
+
+def print_label_patch(result):
+    """Create a new printLabel method that prints errorClasses items
+    as well.
+    """
+    return make_instancemethod(TextTestResult.printLabel, result)
+
+
+def wassuccessful_patch(result):
+    """Create a new wasSuccessful method that checks errorClasses for
+    exceptions that were put into other slots than error or failure
+    but that still count as not success.
+    """
+    return make_instancemethod(TextTestResult.wasSuccessful, result)
+
+
+def add_skip_patch(result):
+    """Create a new addSkip method to patch into a result instance
+    that delegates to addError.
+    """
+    return make_instancemethod(TextTestResult.addSkip, result)
+
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/failuredetail.py b/lib/spack/external/nose/plugins/failuredetail.py
new file mode 100644
index 0000000000000000000000000000000000000000..6462865dd0e34a602a64b47019f0dd5de11c64da
--- /dev/null
+++ b/lib/spack/external/nose/plugins/failuredetail.py
@@ -0,0 +1,49 @@
+"""
+This plugin provides assert introspection. When the plugin is enabled
+and a test failure occurs, the traceback is displayed with extra context
+around the line in which the exception was raised. Simple variable 
+substitution is also performed in the context output to provide more
+debugging information.
+"""
+    
+from nose.plugins import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.inspector import inspect_traceback
+
+class FailureDetail(Plugin):
+    """
+    Plugin that provides extra information in tracebacks of test failures.
+    """
+    score = 1600 # before capture
+    
+    def options(self, parser, env):
+        """Register commmandline options.
+        """
+        parser.add_option(
+            "-d", "--detailed-errors", "--failure-detail",
+            action="store_true",
+            default=env.get('NOSE_DETAILED_ERRORS'),
+            dest="detailedErrors", help="Add detail to error"
+            " output by attempting to evaluate failed"
+            " asserts [NOSE_DETAILED_ERRORS]")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        if not self.can_configure:
+            return
+        self.enabled = options.detailedErrors
+        self.conf = conf
+
+    def formatFailure(self, test, err):
+        """Add detail from traceback inspection to error message of a failure.
+        """
+        ec, ev, tb = err
+        tbinfo, str_ev = None, exc_to_unicode(ev)
+
+        if tb:
+            tbinfo = force_unicode(inspect_traceback(tb))
+            str_ev = '\n'.join([str_ev, tbinfo])
+        test.tbinfo = tbinfo
+        return (ec, str_ev, tb)
+
diff --git a/lib/spack/external/nose/plugins/isolate.py b/lib/spack/external/nose/plugins/isolate.py
new file mode 100644
index 0000000000000000000000000000000000000000..13235dfbd1ec6011c8c92ca2212bbba4f3e254e6
--- /dev/null
+++ b/lib/spack/external/nose/plugins/isolate.py
@@ -0,0 +1,103 @@
+"""The isolation plugin resets the contents of sys.modules after running
+each test module or package. Use it by setting ``--with-isolation`` or the
+NOSE_WITH_ISOLATION environment variable.
+
+The effects are similar to wrapping the following functions around the
+import and execution of each test module::
+
+    def setup(module):
+        module._mods = sys.modules.copy()
+    
+    def teardown(module):
+        to_del = [ m for m in sys.modules.keys() if m not in
+                   module._mods ]
+        for mod in to_del:
+            del sys.modules[mod]
+        sys.modules.update(module._mods)
+
+Isolation works only during lazy loading. In normal use, this is only
+during discovery of modules within a directory, where the process of
+importing, loading tests and running tests from each module is
+encapsulated in a single loadTestsFromName call. This plugin
+implements loadTestsFromNames to force the same lazy-loading there,
+which allows isolation to work in directed mode as well as discovery,
+at the cost of some efficiency: lazy-loading names forces full context
+setup and teardown to run for each name, defeating the grouping that
+is normally used to ensure that context setup and teardown are run the
+fewest possible times for a given set of names.
+
+.. warning ::
+
+    This plugin should not be used in conjunction with other plugins
+    that assume that modules, once imported, will stay imported; for
+    instance, it may cause very odd results when used with the coverage
+    plugin.
+
+"""
+
+import logging
+import sys
+
+from nose.plugins import Plugin
+
+
+log = logging.getLogger('nose.plugins.isolation')
+
+class IsolationPlugin(Plugin):
+    """
+    Activate the isolation plugin to isolate changes to external
+    modules to a single test module or package. The isolation plugin
+    resets the contents of sys.modules after each test module or
+    package runs to its state before the test. PLEASE NOTE that this
+    plugin should not be used with the coverage plugin, or in any other case
+    where module reloading may produce undesirable side-effects.
+    """
+    score = 10 # I want to be last
+    name = 'isolation'
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """        
+        Plugin.configure(self, options, conf)
+        self._mod_stack = []
+
+    def beforeContext(self):
+        """Copy sys.modules onto my mod stack
+        """
+        mods = sys.modules.copy()
+        self._mod_stack.append(mods)
+
+    def afterContext(self):
+        """Pop my mod stack and restore sys.modules to the state
+        it was in when mod stack was pushed.
+        """
+        mods = self._mod_stack.pop()
+        to_del = [ m for m in sys.modules.keys() if m not in mods ]
+        if to_del:
+            log.debug('removing sys modules entries: %s', to_del)
+            for mod in to_del:
+                del sys.modules[mod]
+        sys.modules.update(mods)
+
+    def loadTestsFromNames(self, names, module=None):
+        """Create a lazy suite that calls beforeContext and afterContext
+        around each name. The side-effect of this is that full context
+        fixtures will be set up and torn down around each test named.
+        """
+        # Fast path for when we don't care
+        if not names or len(names) == 1:
+            return 
+        loader = self.loader
+        plugins = self.conf.plugins
+        def lazy():
+            for name in names:
+                plugins.beforeContext()
+                yield loader.loadTestsFromName(name, module=module)
+                plugins.afterContext()
+        return (loader.suiteClass(lazy), [])
+
+    def prepareTestLoader(self, loader):
+        """Get handle on test loader so we can use it in loadTestsFromNames.
+        """
+        self.loader = loader
+
diff --git a/lib/spack/external/nose/plugins/logcapture.py b/lib/spack/external/nose/plugins/logcapture.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c9a79f6fd7ea0d85743c56582da122e3182159e
--- /dev/null
+++ b/lib/spack/external/nose/plugins/logcapture.py
@@ -0,0 +1,245 @@
+"""
+This plugin captures logging statements issued during test execution. When an
+error or failure occurs, the captured log messages are attached to the running
+test in the test.capturedLogging attribute, and displayed with the error failure
+output. It is enabled by default but can be turned off with the option
+``--nologcapture``.
+
+You can filter captured logging statements with the ``--logging-filter`` option. 
+If set, it specifies which logger(s) will be captured; loggers that do not match
+will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
+will ensure that only statements logged via sqlalchemy.engine, myapp
+or myapp.foo.bar logger will be logged.
+
+You can remove other installed logging handlers with the
+``--logging-clear-handlers`` option.
+"""
+
+import logging
+from logging import Handler
+import threading
+
+from nose.plugins.base import Plugin
+from nose.util import anyp, ln, safe_str
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+class FilterSet(object):
+    def __init__(self, filter_components):
+        self.inclusive, self.exclusive = self._partition(filter_components)
+
+    # @staticmethod
+    def _partition(components):
+        inclusive, exclusive = [], []
+        for component in components:
+            if component.startswith('-'):
+                exclusive.append(component[1:])
+            else:
+                inclusive.append(component)
+        return inclusive, exclusive
+    _partition = staticmethod(_partition)
+
+    def allow(self, record):
+        """returns whether this record should be printed"""
+        if not self:
+            # nothing to filter
+            return True
+        return self._allow(record) and not self._deny(record)
+
+    # @staticmethod
+    def _any_match(matchers, record):
+        """return the bool of whether `record` starts with
+        any item in `matchers`"""
+        def record_matches_key(key):
+            return record == key or record.startswith(key + '.')
+        return anyp(bool, map(record_matches_key, matchers))
+    _any_match = staticmethod(_any_match)
+
+    def _allow(self, record):
+        if not self.inclusive:
+            return True
+        return self._any_match(self.inclusive, record)
+
+    def _deny(self, record):
+        if not self.exclusive:
+            return False
+        return self._any_match(self.exclusive, record)
+
+
+class MyMemoryHandler(Handler):
+    def __init__(self, logformat, logdatefmt, filters):
+        Handler.__init__(self)
+        fmt = logging.Formatter(logformat, logdatefmt)
+        self.setFormatter(fmt)
+        self.filterset = FilterSet(filters)
+        self.buffer = []
+    def emit(self, record):
+        self.buffer.append(self.format(record))
+    def flush(self):
+        pass # do nothing
+    def truncate(self):
+        self.buffer = []
+    def filter(self, record):
+        if self.filterset.allow(record.name):
+            return Handler.filter(self, record)
+    def __getstate__(self):
+        state = self.__dict__.copy()
+        del state['lock']
+        return state
+    def __setstate__(self, state):
+        self.__dict__.update(state)
+        self.lock = threading.RLock()
+
+
+class LogCapture(Plugin):
+    """
+    Log capture plugin. Enabled by default. Disable with --nologcapture.
+    This plugin captures logging statements issued during test execution,
+    appending any output captured to the error or failure output,
+    should the test fail or raise an error.
+    """
+    enabled = True
+    env_opt = 'NOSE_NOLOGCAPTURE'
+    name = 'logcapture'
+    score = 500
+    logformat = '%(name)s: %(levelname)s: %(message)s'
+    logdatefmt = None
+    clear = False
+    filters = ['-nose']
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        parser.add_option(
+            "--nologcapture", action="store_false",
+            default=not env.get(self.env_opt), dest="logcapture",
+            help="Disable logging capture plugin. "
+                 "Logging configuration will be left intact."
+                 " [NOSE_NOLOGCAPTURE]")
+        parser.add_option(
+            "--logging-format", action="store", dest="logcapture_format",
+            default=env.get('NOSE_LOGFORMAT') or self.logformat,
+            metavar="FORMAT",
+            help="Specify custom format to print statements. "
+                 "Uses the same format as used by standard logging handlers."
+                 " [NOSE_LOGFORMAT]")
+        parser.add_option(
+            "--logging-datefmt", action="store", dest="logcapture_datefmt",
+            default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
+            metavar="FORMAT",
+            help="Specify custom date/time format to print statements. "
+                 "Uses the same format as used by standard logging handlers."
+                 " [NOSE_LOGDATEFMT]")
+        parser.add_option(
+            "--logging-filter", action="store", dest="logcapture_filters",
+            default=env.get('NOSE_LOGFILTER'),
+            metavar="FILTER",
+            help="Specify which statements to filter in/out. "
+                 "By default, everything is captured. If the output is too"
+                 " verbose,\nuse this option to filter out needless output.\n"
+                 "Example: filter=foo will capture statements issued ONLY to\n"
+                 " foo or foo.what.ever.sub but not foobar or other logger.\n"
+                 "Specify multiple loggers with comma: filter=foo,bar,baz.\n"
+                 "If any logger name is prefixed with a minus, eg filter=-foo,\n"
+                 "it will be excluded rather than included. Default: "
+                 "exclude logging messages from nose itself (-nose)."
+                 " [NOSE_LOGFILTER]\n")
+        parser.add_option(
+            "--logging-clear-handlers", action="store_true",
+            default=False, dest="logcapture_clear",
+            help="Clear all other logging handlers")
+        parser.add_option(
+            "--logging-level", action="store",
+            default='NOTSET', dest="logcapture_level",
+            help="Set the log level to capture")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        self.conf = conf
+        # Disable if explicitly disabled, or if logging is
+        # configured via logging config file
+        if not options.logcapture or conf.loggingConfig:
+            self.enabled = False
+        self.logformat = options.logcapture_format
+        self.logdatefmt = options.logcapture_datefmt
+        self.clear = options.logcapture_clear
+        self.loglevel = options.logcapture_level
+        if options.logcapture_filters:
+            self.filters = options.logcapture_filters.split(',')
+
+    def setupLoghandler(self):
+        # setup our handler with root logger
+        root_logger = logging.getLogger()
+        if self.clear:
+            if hasattr(root_logger, "handlers"):
+                for handler in root_logger.handlers:
+                    root_logger.removeHandler(handler)
+            for logger in logging.Logger.manager.loggerDict.values():
+                if hasattr(logger, "handlers"):
+                    for handler in logger.handlers:
+                        logger.removeHandler(handler)
+        # make sure there isn't one already
+        # you can't simply use "if self.handler not in root_logger.handlers"
+        # since at least in unit tests this doesn't work --
+        # LogCapture() is instantiated for each test case while root_logger
+        # is module global
+        # so we always add new MyMemoryHandler instance
+        for handler in root_logger.handlers[:]:
+            if isinstance(handler, MyMemoryHandler):
+                root_logger.handlers.remove(handler)
+        root_logger.addHandler(self.handler)
+        # to make sure everything gets captured
+        loglevel = getattr(self, "loglevel", "NOTSET")
+        root_logger.setLevel(getattr(logging, loglevel))
+
+    def begin(self):
+        """Set up logging handler before test run begins.
+        """
+        self.start()
+
+    def start(self):
+        self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
+                                       self.filters)
+        self.setupLoghandler()
+
+    def end(self):
+        pass
+
+    def beforeTest(self, test):
+        """Clear buffers and handlers before test.
+        """
+        self.setupLoghandler()
+
+    def afterTest(self, test):
+        """Clear buffers after test.
+        """
+        self.handler.truncate()
+
+    def formatFailure(self, test, err):
+        """Add captured log messages to failure output.
+        """
+        return self.formatError(test, err)
+
+    def formatError(self, test, err):
+        """Add captured log messages to error output.
+        """
+        # logic flow copied from Capture.formatError
+        test.capturedLogging = records = self.formatLogRecords()
+        if not records:
+            return err
+        ec, ev, tb = err
+        return (ec, self.addCaptureToErr(ev, records), tb)
+
+    def formatLogRecords(self):
+        return map(safe_str, self.handler.buffer)
+
+    def addCaptureToErr(self, ev, records):
+        return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
+                          records + \
+                          [ln('>> end captured logging <<')])
diff --git a/lib/spack/external/nose/plugins/manager.py b/lib/spack/external/nose/plugins/manager.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d2ed22b6fe74cbca5556e4cdca83d30f96286c7
--- /dev/null
+++ b/lib/spack/external/nose/plugins/manager.py
@@ -0,0 +1,460 @@
+"""
+Plugin Manager
+--------------
+
+A plugin manager class is used to load plugins, manage the list of
+loaded plugins, and proxy calls to those plugins.
+
+The plugin managers provided with nose are:
+
+:class:`PluginManager`
+    This manager doesn't implement loadPlugins, so it can only work
+    with a static list of plugins.
+
+:class:`BuiltinPluginManager`
+    This manager loads plugins referenced in ``nose.plugins.builtin``.
+
+:class:`EntryPointPluginManager`
+    This manager uses setuptools entrypoints to load plugins.
+
+:class:`ExtraPluginsPluginManager`
+    This manager loads extra plugins specified with the keyword
+    `addplugins`.
+
+:class:`DefaultPluginMananger`
+    This is the manager class that will be used by default. If
+    setuptools is installed, it is a subclass of
+    :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
+    otherwise, an alias to :class:`BuiltinPluginManager`.
+
+:class:`RestrictedPluginManager`
+    This manager is for use in test runs where some plugin calls are
+    not available, such as runs started with ``python setup.py test``,
+    where the test runner is the default unittest :class:`TextTestRunner`. It
+    is a subclass of :class:`DefaultPluginManager`.
+
+Writing a plugin manager
+========================
+
+If you want to load plugins via some other means, you can write a
+plugin manager and pass an instance of your plugin manager class when
+instantiating the :class:`nose.config.Config` instance that you pass to
+:class:`TestProgram` (or :func:`main` or :func:`run`).
+
+To implement your plugin loading scheme, implement ``loadPlugins()``,
+and in that method, call ``addPlugin()`` with an instance of each plugin
+you wish to make available. Make sure to call
+``super(self).loadPlugins()`` as well if have subclassed a manager
+other than ``PluginManager``.
+
+"""
+import inspect
+import logging
+import os
+import sys
+from itertools import chain as iterchain
+from warnings import warn
+import nose.config
+from nose.failure import Failure
+from nose.plugins.base import IPluginInterface
+from nose.pyversion import sort_list
+
+try:
+    import cPickle as pickle
+except:
+    import pickle
+try:
+    from cStringIO import StringIO
+except:
+    from StringIO import StringIO
+
+
+__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
+           'BuiltinPluginManager', 'RestrictedPluginManager']
+
+log = logging.getLogger(__name__)
+
+
+class PluginProxy(object):
+    """Proxy for plugin calls. Essentially a closure bound to the
+    given call and plugin list.
+
+    The plugin proxy also must be bound to a particular plugin
+    interface specification, so that it knows what calls are available
+    and any special handling that is required for each call.
+    """
+    interface = IPluginInterface
+    def __init__(self, call, plugins):
+        try:
+            self.method = getattr(self.interface, call)
+        except AttributeError:
+            raise AttributeError("%s is not a valid %s method"
+                                 % (call, self.interface.__name__))
+        self.call = self.makeCall(call)
+        self.plugins = []
+        for p in plugins:
+            self.addPlugin(p, call)
+
+    def __call__(self, *arg, **kw):
+        return self.call(*arg, **kw)
+
+    def addPlugin(self, plugin, call):
+        """Add plugin to my list of plugins to call, if it has the attribute
+        I'm bound to.
+        """
+        meth = getattr(plugin, call, None)
+        if meth is not None:
+            if call == 'loadTestsFromModule' and \
+                    len(inspect.getargspec(meth)[0]) == 2:
+                orig_meth = meth
+                meth = lambda module, path, **kwargs: orig_meth(module)
+            self.plugins.append((plugin, meth))
+
+    def makeCall(self, call):
+        if call == 'loadTestsFromNames':
+            # special case -- load tests from names behaves somewhat differently
+            # from other chainable calls, because plugins return a tuple, only
+            # part of which can be chained to the next plugin.
+            return self._loadTestsFromNames
+
+        meth = self.method
+        if getattr(meth, 'generative', False):
+            # call all plugins and yield a flattened iterator of their results
+            return lambda *arg, **kw: list(self.generate(*arg, **kw))
+        elif getattr(meth, 'chainable', False):
+            return self.chain
+        else:
+            # return a value from the first plugin that returns non-None
+            return self.simple
+
+    def chain(self, *arg, **kw):
+        """Call plugins in a chain, where the result of each plugin call is
+        sent to the next plugin as input. The final output result is returned.
+        """
+        result = None
+        # extract the static arguments (if any) from arg so they can
+        # be passed to each plugin call in the chain
+        static = [a for (static, a)
+                  in zip(getattr(self.method, 'static_args', []), arg)
+                  if static]
+        for p, meth in self.plugins:
+            result = meth(*arg, **kw)
+            arg = static[:]
+            arg.append(result)
+        return result
+
+    def generate(self, *arg, **kw):
+        """Call all plugins, yielding each item in each non-None result.
+        """
+        for p, meth in self.plugins:
+            result = None
+            try:
+                result = meth(*arg, **kw)
+                if result is not None:
+                    for r in result:
+                        yield r
+            except (KeyboardInterrupt, SystemExit):
+                raise
+            except:
+                exc = sys.exc_info()
+                yield Failure(*exc)
+                continue
+
+    def simple(self, *arg, **kw):
+        """Call all plugins, returning the first non-None result.
+        """
+        for p, meth in self.plugins:
+            result = meth(*arg, **kw)
+            if result is not None:
+                return result
+
+    def _loadTestsFromNames(self, names, module=None):
+        """Chainable but not quite normal. Plugins return a tuple of
+        (tests, names) after processing the names. The tests are added
+        to a suite that is accumulated throughout the full call, while
+        names are input for the next plugin in the chain.
+        """
+        suite = []
+        for p, meth in self.plugins:
+            result = meth(names, module=module)
+            if result is not None:
+                suite_part, names = result
+                if suite_part:
+                    suite.extend(suite_part)
+        return suite, names
+
+
+class NoPlugins(object):
+    """Null Plugin manager that has no plugins."""
+    interface = IPluginInterface
+    def __init__(self):
+        self._plugins = self.plugins = ()
+
+    def __iter__(self):
+        return ()
+
+    def _doNothing(self, *args, **kwds):
+        pass
+
+    def _emptyIterator(self, *args, **kwds):
+        return ()
+
+    def __getattr__(self, call):
+        method = getattr(self.interface, call)
+        if getattr(method, "generative", False):
+            return self._emptyIterator
+        else:
+            return self._doNothing
+
+    def addPlugin(self, plug):
+        raise NotImplementedError()
+
+    def addPlugins(self, plugins):
+        raise NotImplementedError()
+
+    def configure(self, options, config):
+        pass
+
+    def loadPlugins(self):
+        pass
+
+    def sort(self):
+        pass
+
+
+class PluginManager(object):
+    """Base class for plugin managers. PluginManager is intended to be
+    used only with a static list of plugins. The loadPlugins() implementation
+    only reloads plugins from _extraplugins to prevent those from being
+    overridden by a subclass.
+
+    The basic functionality of a plugin manager is to proxy all unknown
+    attributes through a ``PluginProxy`` to a list of plugins.
+
+    Note that the list of plugins *may not* be changed after the first plugin
+    call.
+    """
+    proxyClass = PluginProxy
+
+    def __init__(self, plugins=(), proxyClass=None):
+        self._plugins = []
+        self._extraplugins = ()
+        self._proxies = {}
+        if plugins:
+            self.addPlugins(plugins)
+        if proxyClass is not None:
+            self.proxyClass = proxyClass
+
+    def __getattr__(self, call):
+        try:
+            return self._proxies[call]
+        except KeyError:
+            proxy = self.proxyClass(call, self._plugins)
+            self._proxies[call] = proxy
+        return proxy
+
+    def __iter__(self):
+        return iter(self.plugins)
+
+    def addPlugin(self, plug):
+        # allow, for instance, plugins loaded via entry points to
+        # supplant builtin plugins.
+        new_name = getattr(plug, 'name', object())
+        self._plugins[:] = [p for p in self._plugins
+                            if getattr(p, 'name', None) != new_name]
+        self._plugins.append(plug)
+
+    def addPlugins(self, plugins=(), extraplugins=()):
+        """extraplugins are maintained in a separate list and
+        re-added by loadPlugins() to prevent their being overwritten
+        by plugins added by a subclass of PluginManager
+        """
+        self._extraplugins = extraplugins
+        for plug in iterchain(plugins, extraplugins):
+            self.addPlugin(plug)
+
+    def configure(self, options, config):
+        """Configure the set of plugins with the given options
+        and config instance. After configuration, disabled plugins
+        are removed from the plugins list.
+        """
+        log.debug("Configuring plugins")
+        self.config = config
+        cfg = PluginProxy('configure', self._plugins)
+        cfg(options, config)
+        enabled = [plug for plug in self._plugins if plug.enabled]
+        self.plugins = enabled
+        self.sort()
+        log.debug("Plugins enabled: %s", enabled)
+
+    def loadPlugins(self):
+        for plug in self._extraplugins:
+            self.addPlugin(plug)
+
+    def sort(self):
+        return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
+
+    def _get_plugins(self):
+        return self._plugins
+
+    def _set_plugins(self, plugins):
+        self._plugins = []
+        self.addPlugins(plugins)
+
+    plugins = property(_get_plugins, _set_plugins, None,
+                       """Access the list of plugins managed by
+                       this plugin manager""")
+
+
+class ZeroNinePlugin:
+    """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
+    """
+    def __init__(self, plugin):
+        self.plugin = plugin
+
+    def options(self, parser, env=os.environ):
+        self.plugin.add_options(parser, env)
+
+    def addError(self, test, err):
+        if not hasattr(self.plugin, 'addError'):
+            return
+        # switch off to addSkip, addDeprecated if those types
+        from nose.exc import SkipTest, DeprecatedTest
+        ec, ev, tb = err
+        if issubclass(ec, SkipTest):
+            if not hasattr(self.plugin, 'addSkip'):
+                return
+            return self.plugin.addSkip(test.test)
+        elif issubclass(ec, DeprecatedTest):
+            if not hasattr(self.plugin, 'addDeprecated'):
+                return
+            return self.plugin.addDeprecated(test.test)
+        # add capt
+        capt = test.capturedOutput
+        return self.plugin.addError(test.test, err, capt)
+
+    def loadTestsFromFile(self, filename):
+        if hasattr(self.plugin, 'loadTestsFromPath'):
+            return self.plugin.loadTestsFromPath(filename)
+
+    def addFailure(self, test, err):
+        if not hasattr(self.plugin, 'addFailure'):
+            return
+        # add capt and tbinfo
+        capt = test.capturedOutput
+        tbinfo = test.tbinfo
+        return self.plugin.addFailure(test.test, err, capt, tbinfo)
+
+    def addSuccess(self, test):
+        if not hasattr(self.plugin, 'addSuccess'):
+            return
+        capt = test.capturedOutput
+        self.plugin.addSuccess(test.test, capt)
+
+    def startTest(self, test):
+        if not hasattr(self.plugin, 'startTest'):
+            return
+        return self.plugin.startTest(test.test)
+
+    def stopTest(self, test):
+        if not hasattr(self.plugin, 'stopTest'):
+            return
+        return self.plugin.stopTest(test.test)
+
+    def __getattr__(self, val):
+        return getattr(self.plugin, val)
+
+
+class EntryPointPluginManager(PluginManager):
+    """Plugin manager that loads plugins from the `nose.plugins` and
+    `nose.plugins.0.10` entry points.
+    """
+    entry_points = (('nose.plugins.0.10', None),
+                    ('nose.plugins', ZeroNinePlugin))
+
+    def loadPlugins(self):
+        """Load plugins by iterating the `nose.plugins` entry point.
+        """
+        from pkg_resources import iter_entry_points
+        loaded = {}
+        for entry_point, adapt in self.entry_points:
+            for ep in iter_entry_points(entry_point):
+                if ep.name in loaded:
+                    continue
+                loaded[ep.name] = True
+                log.debug('%s load plugin %s', self.__class__.__name__, ep)
+                try:
+                    plugcls = ep.load()
+                except KeyboardInterrupt:
+                    raise
+                except Exception, e:
+                    # never want a plugin load to kill the test run
+                    # but we can't log here because the logger is not yet
+                    # configured
+                    warn("Unable to load plugin %s: %s" % (ep, e),
+                         RuntimeWarning)
+                    continue
+                if adapt:
+                    plug = adapt(plugcls())
+                else:
+                    plug = plugcls()
+                self.addPlugin(plug)
+        super(EntryPointPluginManager, self).loadPlugins()
+
+
+class BuiltinPluginManager(PluginManager):
+    """Plugin manager that loads plugins from the list in
+    `nose.plugins.builtin`.
+    """
+    def loadPlugins(self):
+        """Load plugins in nose.plugins.builtin
+        """
+        from nose.plugins import builtin
+        for plug in builtin.plugins:
+            self.addPlugin(plug())
+        super(BuiltinPluginManager, self).loadPlugins()
+
+try:
+    import pkg_resources
+    class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
+        pass
+
+except ImportError:
+    class DefaultPluginManager(BuiltinPluginManager):
+        pass
+
+class RestrictedPluginManager(DefaultPluginManager):
+    """Plugin manager that restricts the plugin list to those not
+    excluded by a list of exclude methods. Any plugin that implements
+    an excluded method will be removed from the manager's plugin list
+    after plugins are loaded.
+    """
+    def __init__(self, plugins=(), exclude=(), load=True):
+        DefaultPluginManager.__init__(self, plugins)
+        self.load = load
+        self.exclude = exclude
+        self.excluded = []
+        self._excludedOpts = None
+
+    def excludedOption(self, name):
+        if self._excludedOpts is None:
+            from optparse import OptionParser
+            self._excludedOpts = OptionParser(add_help_option=False)
+            for plugin in self.excluded:
+                plugin.options(self._excludedOpts, env={})
+        return self._excludedOpts.get_option('--' + name)
+
+    def loadPlugins(self):
+        if self.load:
+            DefaultPluginManager.loadPlugins(self)
+        allow = []
+        for plugin in self.plugins:
+            ok = True
+            for method in self.exclude:
+                if hasattr(plugin, method):
+                    ok = False
+                    self.excluded.append(plugin)
+                    break
+            if ok:
+                allow.append(plugin)
+        self.plugins = allow
diff --git a/lib/spack/external/nose/plugins/multiprocess.py b/lib/spack/external/nose/plugins/multiprocess.py
new file mode 100644
index 0000000000000000000000000000000000000000..2cae744a111b5e23a92999a4634f2bd5445d5457
--- /dev/null
+++ b/lib/spack/external/nose/plugins/multiprocess.py
@@ -0,0 +1,835 @@
+"""
+Overview
+========
+
+The multiprocess plugin enables you to distribute your test run among a set of
+worker processes that run tests in parallel. This can speed up CPU-bound test
+runs (as long as the number of work processeses is around the number of
+processors or cores available), but is mainly useful for IO-bound tests that
+spend most of their time waiting for data to arrive from someplace else.
+
+.. note ::
+
+   See :doc:`../doc_tests/test_multiprocess/multiprocess` for
+   additional documentation and examples. Use of this plugin on python
+   2.5 or earlier requires the multiprocessing_ module, also available
+   from PyPI.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process
+separately. This ideal is not attainable in all cases, however, because many
+test suites depend on context (class, module or package) fixtures.
+
+The plugin can't know (unless you tell it -- see below!) if a context fixture
+can be called many times concurrently (is re-entrant), or if it can be shared
+among tests running in different processes. Therefore, if a context has
+fixtures, the default behavior is to dispatch the entire suite to a worker as
+a unit.
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute concurrently and multiple times, typically once per test.
+
+If a context's fixtures can be shared by tests running in different processes
+-- such as a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. These fixtures will then execute in the primary nose process, and
+tests in those contexts will be individually dispatched to run in parallel.
+
+How results are collected and reported
+======================================
+
+As each test or suite executes in a worker process, results (failures, errors,
+and specially handled exceptions like SkipTest) are collected in that
+process. When the worker process finishes, it returns results to the main
+nose process. There, any progress output is printed (dots!), and the
+results from the test run are combined into a consolidated result
+set. When results have been received for all dispatched tests, or all
+workers have died, the result summary is output as normal.
+
+Beware!
+=======
+
+Not all test suites will benefit from, or even operate correctly using, this
+plugin. For example, CPU-bound tests will run more slowly if you don't have
+multiple processors. There are also some differences in plugin
+interactions and behaviors due to the way in which tests are dispatched and
+loaded. In general, test loading under this plugin operates as if it were
+always in directed mode instead of discovered mode. For instance, doctests
+in test modules will always be found when using this plugin with the doctest
+plugin.
+
+But the biggest issue you will face is probably concurrency. Unless you
+have kept your tests as religiously pure unit tests, with no side-effects, no
+ordering issues, and no external dependencies, chances are you will experience
+odd, intermittent and unexplainable failures and errors when using this
+plugin. This doesn't necessarily mean the plugin is broken; it may mean that
+your test suite is not safe for concurrency.
+
+New Features in 1.1.0
+=====================
+
+* functions generated by test generators are now added to the worker queue
+  making them multi-threaded.
+* fixed timeout functionality, now functions will be terminated with a
+  TimedOutException exception when they exceed their execution time. The
+  worker processes are not terminated.
+* added ``--process-restartworker`` option to restart workers once they are
+  done, this helps control memory usage. Sometimes memory leaks can accumulate
+  making long runs very difficult.
+* added global _instantiate_plugins to configure which plugins are started
+  on the worker processes.
+
+"""
+
+import logging
+import os
+import sys
+import time
+import traceback
+import unittest
+import pickle
+import signal
+import nose.case
+from nose.core import TextTestRunner
+from nose import failure
+from nose import loader
+from nose.plugins.base import Plugin
+from nose.pyversion import bytes_
+from nose.result import TextTestResult
+from nose.suite import ContextSuite
+from nose.util import test_address
+try:
+    # 2.7+
+    from unittest.runner import _WritelnDecorator
+except ImportError:
+    from unittest import _WritelnDecorator
+from Queue import Empty
+from warnings import warn
+try:
+    from cStringIO import StringIO
+except ImportError:
+    import StringIO
+
+# this is a list of plugin classes that will be checked for and created inside 
+# each worker process
+_instantiate_plugins = None
+
+log = logging.getLogger(__name__)
+
+Process = Queue = Pool = Event = Value = Array = None
+
+# have to inherit KeyboardInterrupt to it will interrupt process properly
+class TimedOutException(KeyboardInterrupt):
+    def __init__(self, value = "Timed Out"):
+        self.value = value
+    def __str__(self):
+        return repr(self.value)
+
+def _import_mp():
+    global Process, Queue, Pool, Event, Value, Array
+    try:
+        from multiprocessing import Manager, Process
+        #prevent the server process created in the manager which holds Python 
+        #objects and allows other processes to manipulate them using proxies
+        #to interrupt on SIGINT (keyboardinterrupt) so that the communication
+        #channel between subprocesses and main process is still usable after
+        #ctrl+C is received in the main process.
+        old=signal.signal(signal.SIGINT, signal.SIG_IGN)
+        m = Manager()
+        #reset it back so main process will receive a KeyboardInterrupt
+        #exception on ctrl+c
+        signal.signal(signal.SIGINT, old)
+        Queue, Pool, Event, Value, Array = (
+                m.Queue, m.Pool, m.Event, m.Value, m.Array
+        )
+    except ImportError:
+        warn("multiprocessing module is not available, multiprocess plugin "
+             "cannot be used", RuntimeWarning)
+
+
+class TestLet:
+    def __init__(self, case):
+        try:
+            self._id = case.id()
+        except AttributeError:
+            pass
+        self._short_description = case.shortDescription()
+        self._str = str(case)
+
+    def id(self):
+        return self._id
+
+    def shortDescription(self):
+        return self._short_description
+
+    def __str__(self):
+        return self._str
+
+class MultiProcess(Plugin):
+    """
+    Run tests in multiple processes. Requires processing module.
+    """
+    score = 1000
+    status = {}
+
+    def options(self, parser, env):
+        """
+        Register command-line options.
+        """
+        parser.add_option("--processes", action="store",
+                          default=env.get('NOSE_PROCESSES', 0),
+                          dest="multiprocess_workers",
+                          metavar="NUM",
+                          help="Spread test run among this many processes. "
+                          "Set a number equal to the number of processors "
+                          "or cores in your machine for best results. "
+                          "Pass a negative number to have the number of "
+                          "processes automatically set to the number of "
+                          "cores. Passing 0 means to disable parallel "
+                          "testing. Default is 0 unless NOSE_PROCESSES is "
+                          "set. "
+                          "[NOSE_PROCESSES]")
+        parser.add_option("--process-timeout", action="store",
+                          default=env.get('NOSE_PROCESS_TIMEOUT', 10),
+                          dest="multiprocess_timeout",
+                          metavar="SECONDS",
+                          help="Set timeout for return of results from each "
+                          "test runner process. Default is 10. "
+                          "[NOSE_PROCESS_TIMEOUT]")
+        parser.add_option("--process-restartworker", action="store_true",
+                          default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
+                          dest="multiprocess_restartworker",
+                          help="If set, will restart each worker process once"
+                          " their tests are done, this helps control memory "
+                          "leaks from killing the system. "
+                          "[NOSE_PROCESS_RESTARTWORKER]")
+
+    def configure(self, options, config):
+        """
+        Configure plugin.
+        """
+        try:
+            self.status.pop('active')
+        except KeyError:
+            pass
+        if not hasattr(options, 'multiprocess_workers'):
+            self.enabled = False
+            return
+        # don't start inside of a worker process
+        if config.worker:
+            return
+        self.config = config
+        try:
+            workers = int(options.multiprocess_workers)
+        except (TypeError, ValueError):
+            workers = 0
+        if workers:
+            _import_mp()
+            if Process is None:
+                self.enabled = False
+                return
+            # Negative number of workers will cause multiprocessing to hang.
+            # Set the number of workers to the CPU count to avoid this.
+            if workers < 0:
+                try:
+                    import multiprocessing
+                    workers = multiprocessing.cpu_count()
+                except NotImplementedError:
+                    self.enabled = False
+                    return
+            self.enabled = True
+            self.config.multiprocess_workers = workers
+            t = float(options.multiprocess_timeout)
+            self.config.multiprocess_timeout = t
+            r = int(options.multiprocess_restartworker)
+            self.config.multiprocess_restartworker = r
+            self.status['active'] = True
+
+    def prepareTestLoader(self, loader):
+        """Remember loader class so MultiProcessTestRunner can instantiate
+        the right loader.
+        """
+        self.loaderClass = loader.__class__
+
+    def prepareTestRunner(self, runner):
+        """Replace test runner with MultiProcessTestRunner.
+        """
+        # replace with our runner class
+        return MultiProcessTestRunner(stream=runner.stream,
+                                      verbosity=self.config.verbosity,
+                                      config=self.config,
+                                      loaderClass=self.loaderClass)
+
+def signalhandler(sig, frame):
+    raise TimedOutException()
+
+class MultiProcessTestRunner(TextTestRunner):
+    waitkilltime = 5.0 # max time to wait to terminate a process that does not
+                       # respond to SIGILL
+    def __init__(self, **kw):
+        self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
+        super(MultiProcessTestRunner, self).__init__(**kw)
+
+    def collect(self, test, testQueue, tasks, to_teardown, result):
+        # dispatch and collect results
+        # put indexes only on queue because tests aren't picklable
+        for case in self.nextBatch(test):
+            log.debug("Next batch %s (%s)", case, type(case))
+            if (isinstance(case, nose.case.Test) and
+                isinstance(case.test, failure.Failure)):
+                log.debug("Case is a Failure")
+                case(result) # run here to capture the failure
+                continue
+            # handle shared fixtures
+            if isinstance(case, ContextSuite) and case.context is failure.Failure:
+                log.debug("Case is a Failure")
+                case(result) # run here to capture the failure
+                continue
+            elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
+                log.debug("%s has shared fixtures", case)
+                try:
+                    case.setUp()
+                except (KeyboardInterrupt, SystemExit):
+                    raise
+                except:
+                    log.debug("%s setup failed", sys.exc_info())
+                    result.addError(case, sys.exc_info())
+                else:
+                    to_teardown.append(case)
+                    if case.factory:
+                        ancestors=case.factory.context.get(case, [])
+                        for an in ancestors[:2]:
+                            #log.debug('reset ancestor %s', an)
+                            if getattr(an, '_multiprocess_shared_', False):
+                                an._multiprocess_can_split_=True
+                            #an._multiprocess_shared_=False
+                    self.collect(case, testQueue, tasks, to_teardown, result)
+
+            else:
+                test_addr = self.addtask(testQueue,tasks,case)
+                log.debug("Queued test %s (%s) to %s",
+                          len(tasks), test_addr, testQueue)
+
+    def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
+        currentaddr = Value('c',bytes_(''))
+        currentstart = Value('d',time.time())
+        keyboardCaught = Event()
+        p = Process(target=runner,
+                   args=(iworker, testQueue,
+                         resultQueue,
+                         currentaddr,
+                         currentstart,
+                         keyboardCaught,
+                         shouldStop,
+                         self.loaderClass,
+                         result.__class__,
+                         pickle.dumps(self.config)))
+        p.currentaddr = currentaddr
+        p.currentstart = currentstart
+        p.keyboardCaught = keyboardCaught
+        old = signal.signal(signal.SIGILL, signalhandler)
+        p.start()
+        signal.signal(signal.SIGILL, old)
+        return p
+
+    def run(self, test):
+        """
+        Execute the test (which may be a test suite). If the test is a suite,
+        distribute it out among as many processes as have been configured, at
+        as fine a level as is possible given the context fixtures defined in
+        the suite or any sub-suites.
+
+        """
+        log.debug("%s.run(%s) (%s)", self, test, os.getpid())
+        wrapper = self.config.plugins.prepareTest(test)
+        if wrapper is not None:
+            test = wrapper
+
+        # plugins can decorate or capture the output stream
+        wrapped = self.config.plugins.setOutputStream(self.stream)
+        if wrapped is not None:
+            self.stream = wrapped
+
+        testQueue = Queue()
+        resultQueue = Queue()
+        tasks = []
+        completed = []
+        workers = []
+        to_teardown = []
+        shouldStop = Event()
+
+        result = self._makeResult()
+        start = time.time()
+
+        self.collect(test, testQueue, tasks, to_teardown, result)
+
+        log.debug("Starting %s workers", self.config.multiprocess_workers)
+        for i in range(self.config.multiprocess_workers):
+            p = self.startProcess(i, testQueue, resultQueue, shouldStop, result)
+            workers.append(p)
+            log.debug("Started worker process %s", i+1)
+
+        total_tasks = len(tasks)
+        # need to keep track of the next time to check for timeouts in case
+        # more than one process times out at the same time.
+        nexttimeout=self.config.multiprocess_timeout
+        thrownError = None
+
+        try:
+            while tasks:
+                log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
+                          len(completed), total_tasks,nexttimeout)
+                try:
+                    iworker, addr, newtask_addrs, batch_result = resultQueue.get(
+                                                            timeout=nexttimeout)
+                    log.debug('Results received for worker %d, %s, new tasks: %d',
+                              iworker,addr,len(newtask_addrs))
+                    try:
+                        try:
+                            tasks.remove(addr)
+                        except ValueError:
+                            log.warn('worker %s failed to remove from tasks: %s',
+                                     iworker,addr)
+                        total_tasks += len(newtask_addrs)
+                        tasks.extend(newtask_addrs)
+                    except KeyError:
+                        log.debug("Got result for unknown task? %s", addr)
+                        log.debug("current: %s",str(list(tasks)[0]))
+                    else:
+                        completed.append([addr,batch_result])
+                    self.consolidate(result, batch_result)
+                    if (self.config.stopOnError
+                        and not result.wasSuccessful()):
+                        # set the stop condition
+                        shouldStop.set()
+                        break
+                    if self.config.multiprocess_restartworker:
+                        log.debug('joining worker %s',iworker)
+                        # wait for working, but not that important if worker
+                        # cannot be joined in fact, for workers that add to
+                        # testQueue, they will not terminate until all their
+                        # items are read
+                        workers[iworker].join(timeout=1)
+                        if not shouldStop.is_set() and not testQueue.empty():
+                            log.debug('starting new process on worker %s',iworker)
+                            workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+                except Empty:
+                    log.debug("Timed out with %s tasks pending "
+                              "(empty testQueue=%r): %s",
+                              len(tasks),testQueue.empty(),str(tasks))
+                    any_alive = False
+                    for iworker, w in enumerate(workers):
+                        if w.is_alive():
+                            worker_addr = bytes_(w.currentaddr.value,'ascii')
+                            timeprocessing = time.time() - w.currentstart.value
+                            if ( len(worker_addr) == 0
+                                    and timeprocessing > self.config.multiprocess_timeout-0.1):
+                                log.debug('worker %d has finished its work item, '
+                                          'but is not exiting? do we wait for it?',
+                                          iworker)
+                            else:
+                                any_alive = True
+                            if (len(worker_addr) > 0
+                                and timeprocessing > self.config.multiprocess_timeout-0.1):
+                                log.debug('timed out worker %s: %s',
+                                          iworker,worker_addr)
+                                w.currentaddr.value = bytes_('')
+                                # If the process is in C++ code, sending a SIGILL
+                                # might not send a python KeybordInterrupt exception
+                                # therefore, send multiple signals until an
+                                # exception is caught. If this takes too long, then
+                                # terminate the process
+                                w.keyboardCaught.clear()
+                                startkilltime = time.time()
+                                while not w.keyboardCaught.is_set() and w.is_alive():
+                                    if time.time()-startkilltime > self.waitkilltime:
+                                        # have to terminate...
+                                        log.error("terminating worker %s",iworker)
+                                        w.terminate()
+                                        # there is a small probability that the
+                                        # terminated process might send a result,
+                                        # which has to be specially handled or
+                                        # else processes might get orphaned.
+                                        workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+                                        break
+                                    os.kill(w.pid, signal.SIGILL)
+                                    time.sleep(0.1)
+                    if not any_alive and testQueue.empty():
+                        log.debug("All workers dead")
+                        break
+                nexttimeout=self.config.multiprocess_timeout
+                for w in workers:
+                    if w.is_alive() and len(w.currentaddr.value) > 0:
+                        timeprocessing = time.time()-w.currentstart.value
+                        if timeprocessing <= self.config.multiprocess_timeout:
+                            nexttimeout = min(nexttimeout,
+                                self.config.multiprocess_timeout-timeprocessing)
+            log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
+
+        except (KeyboardInterrupt, SystemExit), e:
+            log.info('parent received ctrl-c when waiting for test results')
+            thrownError = e
+            #resultQueue.get(False)
+                
+            result.addError(test, sys.exc_info())
+
+        try:
+            for case in to_teardown:
+                log.debug("Tearing down shared fixtures for %s", case)
+                try:
+                    case.tearDown()
+                except (KeyboardInterrupt, SystemExit):
+                    raise
+                except:
+                    result.addError(case, sys.exc_info())
+
+            stop = time.time()
+
+            # first write since can freeze on shutting down processes
+            result.printErrors()
+            result.printSummary(start, stop)
+            self.config.plugins.finalize(result)
+
+            if thrownError is None:
+                log.debug("Tell all workers to stop")
+                for w in workers:
+                    if w.is_alive():
+                        testQueue.put('STOP', block=False)
+
+            # wait for the workers to end
+            for iworker,worker in enumerate(workers):
+                if worker.is_alive():
+                    log.debug('joining worker %s',iworker)
+                    worker.join()
+                    if worker.is_alive():
+                        log.debug('failed to join worker %s',iworker)
+        except (KeyboardInterrupt, SystemExit):
+            log.info('parent received ctrl-c when shutting down: stop all processes')
+            for worker in workers:
+                if worker.is_alive():
+                    worker.terminate()
+
+            if thrownError: raise thrownError
+            else: raise
+
+        return result
+
+    def addtask(testQueue,tasks,case):
+        arg = None
+        if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
+            # this removes the top level descriptor and allows real function
+            # name to be returned
+            case.test.descriptor = None
+            arg = case.test.arg
+        test_addr = MultiProcessTestRunner.address(case)
+        testQueue.put((test_addr,arg), block=False)
+        if arg is not None:
+            test_addr += str(arg)
+        if tasks is not None:
+            tasks.append(test_addr)
+        return test_addr
+    addtask = staticmethod(addtask)
+
+    def address(case):
+        if hasattr(case, 'address'):
+            file, mod, call = case.address()
+        elif hasattr(case, 'context'):
+            file, mod, call = test_address(case.context)
+        else:
+            raise Exception("Unable to convert %s to address" % case)
+        parts = []
+        if file is None:
+            if mod is None:
+                raise Exception("Unaddressable case %s" % case)
+            else:
+                parts.append(mod)
+        else:
+            # strip __init__.py(c) from end of file part
+            # if present, having it there confuses loader
+            dirname, basename = os.path.split(file)
+            if basename.startswith('__init__'):
+                file = dirname
+            parts.append(file)
+        if call is not None:
+            parts.append(call)
+        return ':'.join(map(str, parts))
+    address = staticmethod(address)
+
+    def nextBatch(self, test):
+        # allows tests or suites to mark themselves as not safe
+        # for multiprocess execution
+        if hasattr(test, 'context'):
+            if not getattr(test.context, '_multiprocess_', True):
+                return
+
+        if ((isinstance(test, ContextSuite)
+             and test.hasFixtures(self.checkCanSplit))
+            or not getattr(test, 'can_split', True)
+            or not isinstance(test, unittest.TestSuite)):
+            # regular test case, or a suite with context fixtures
+
+            # special case: when run like nosetests path/to/module.py
+            # the top-level suite has only one item, and it shares
+            # the same context as that item. In that case, we want the
+            # item, not the top-level suite
+            if isinstance(test, ContextSuite):
+                contained = list(test)
+                if (len(contained) == 1
+                    and getattr(contained[0],
+                                'context', None) == test.context):
+                    test = contained[0]
+            yield test
+        else:
+            # Suite is without fixtures at this level; but it may have
+            # fixtures at any deeper level, so we need to examine it all
+            # the way down to the case level
+            for case in test:
+                for batch in self.nextBatch(case):
+                    yield batch
+
+    def checkCanSplit(context, fixt):
+        """
+        Callback that we use to check whether the fixtures found in a
+        context or ancestor are ones we care about.
+
+        Contexts can tell us that their fixtures are reentrant by setting
+        _multiprocess_can_split_. So if we see that, we return False to
+        disregard those fixtures.
+        """
+        if not fixt:
+            return False
+        if getattr(context, '_multiprocess_can_split_', False):
+            return False
+        return True
+    checkCanSplit = staticmethod(checkCanSplit)
+
+    def sharedFixtures(self, case):
+        context = getattr(case, 'context', None)
+        if not context:
+            return False
+        return getattr(context, '_multiprocess_shared_', False)
+
+    def consolidate(self, result, batch_result):
+        log.debug("batch result is %s" , batch_result)
+        try:
+            output, testsRun, failures, errors, errorClasses = batch_result
+        except ValueError:
+            log.debug("result in unexpected format %s", batch_result)
+            failure.Failure(*sys.exc_info())(result)
+            return
+        self.stream.write(output)
+        result.testsRun += testsRun
+        result.failures.extend(failures)
+        result.errors.extend(errors)
+        for key, (storage, label, isfail) in errorClasses.items():
+            if key not in result.errorClasses:
+                # Ordinarily storage is result attribute
+                # but it's only processed through the errorClasses
+                # dict, so it's ok to fake it here
+                result.errorClasses[key] = ([], label, isfail)
+            mystorage, _junk, _junk = result.errorClasses[key]
+            mystorage.extend(storage)
+        log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
+
+
+def runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+           keyboardCaught, shouldStop, loaderClass, resultClass, config):
+    try:
+        try:
+            return __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+                    keyboardCaught, shouldStop, loaderClass, resultClass, config)
+        except KeyboardInterrupt:
+            log.debug('Worker %s keyboard interrupt, stopping',ix)
+    except Empty:
+        log.debug("Worker %s timed out waiting for tasks", ix)
+
+def __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+           keyboardCaught, shouldStop, loaderClass, resultClass, config):
+
+    config = pickle.loads(config)
+    dummy_parser = config.parserClass()
+    if _instantiate_plugins is not None:
+        for pluginclass in _instantiate_plugins:
+            plugin = pluginclass()
+            plugin.addOptions(dummy_parser,{})
+            config.plugins.addPlugin(plugin)
+    config.plugins.configure(config.options,config)
+    config.plugins.begin()
+    log.debug("Worker %s executing, pid=%d", ix,os.getpid())
+    loader = loaderClass(config=config)
+    loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
+
+    def get():
+        return testQueue.get(timeout=config.multiprocess_timeout)
+
+    def makeResult():
+        stream = _WritelnDecorator(StringIO())
+        result = resultClass(stream, descriptions=1,
+                             verbosity=config.verbosity,
+                             config=config)
+        plug_result = config.plugins.prepareTestResult(result)
+        if plug_result:
+            return plug_result
+        return result
+
+    def batch(result):
+        failures = [(TestLet(c), err) for c, err in result.failures]
+        errors = [(TestLet(c), err) for c, err in result.errors]
+        errorClasses = {}
+        for key, (storage, label, isfail) in result.errorClasses.items():
+            errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
+                                 label, isfail)
+        return (
+            result.stream.getvalue(),
+            result.testsRun,
+            failures,
+            errors,
+            errorClasses)
+    for test_addr, arg in iter(get, 'STOP'):
+        if shouldStop.is_set():
+            log.exception('Worker %d STOPPED',ix)
+            break
+        result = makeResult()
+        test = loader.loadTestsFromNames([test_addr])
+        test.testQueue = testQueue
+        test.tasks = []
+        test.arg = arg
+        log.debug("Worker %s Test is %s (%s)", ix, test_addr, test)
+        try:
+            if arg is not None:
+                test_addr = test_addr + str(arg)
+            currentaddr.value = bytes_(test_addr)
+            currentstart.value = time.time()
+            test(result)
+            currentaddr.value = bytes_('')
+            resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+        except KeyboardInterrupt, e: #TimedOutException:
+            timeout = isinstance(e, TimedOutException)
+            if timeout:
+                keyboardCaught.set()
+            if len(currentaddr.value):
+                if timeout:
+                    msg = 'Worker %s timed out, failing current test %s'
+                else:
+                    msg = 'Worker %s keyboard interrupt, failing current test %s'
+                log.exception(msg,ix,test_addr)
+                currentaddr.value = bytes_('')
+                failure.Failure(*sys.exc_info())(result)
+                resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+            else:
+                if timeout:
+                    msg = 'Worker %s test %s timed out'
+                else:
+                    msg = 'Worker %s test %s keyboard interrupt'
+                log.debug(msg,ix,test_addr)
+                resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+            if not timeout:
+                raise
+        except SystemExit:
+            currentaddr.value = bytes_('')
+            log.exception('Worker %s system exit',ix)
+            raise
+        except:
+            currentaddr.value = bytes_('')
+            log.exception("Worker %s error running test or returning "
+                            "results",ix)
+            failure.Failure(*sys.exc_info())(result)
+            resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+        if config.multiprocess_restartworker:
+            break
+    log.debug("Worker %s ending", ix)
+
+
+class NoSharedFixtureContextSuite(ContextSuite):
+    """
+    Context suite that never fires shared fixtures.
+
+    When a context sets _multiprocess_shared_, fixtures in that context
+    are executed by the main process. Using this suite class prevents them
+    from executing in the runner process as well.
+
+    """
+    testQueue = None
+    tasks = None
+    arg = None
+    def setupContext(self, context):
+        if getattr(context, '_multiprocess_shared_', False):
+            return
+        super(NoSharedFixtureContextSuite, self).setupContext(context)
+
+    def teardownContext(self, context):
+        if getattr(context, '_multiprocess_shared_', False):
+            return
+        super(NoSharedFixtureContextSuite, self).teardownContext(context)
+    def run(self, result):
+        """Run tests in suite inside of suite fixtures.
+        """
+        # proxy the result for myself
+        log.debug("suite %s (%s) run called, tests: %s",
+                  id(self), self, self._tests)
+        if self.resultProxy:
+            result, orig = self.resultProxy(result, self), result
+        else:
+            result, orig = result, result
+        try:
+            #log.debug('setUp for %s', id(self));
+            self.setUp()
+        except KeyboardInterrupt:
+            raise
+        except:
+            self.error_context = 'setup'
+            result.addError(self, self._exc_info())
+            return
+        try:
+            for test in self._tests:
+                if (isinstance(test,nose.case.Test)
+                    and self.arg is not None):
+                    test.test.arg = self.arg
+                else:
+                    test.arg = self.arg
+                test.testQueue = self.testQueue
+                test.tasks = self.tasks
+                if result.shouldStop:
+                    log.debug("stopping")
+                    break
+                # each nose.case.Test will create its own result proxy
+                # so the cases need the original result, to avoid proxy
+                # chains
+                #log.debug('running test %s in suite %s', test, self);
+                try:
+                    test(orig)
+                except KeyboardInterrupt, e:
+                    timeout = isinstance(e, TimedOutException)
+                    if timeout:
+                        msg = 'Timeout when running test %s in suite %s'
+                    else:
+                        msg = 'KeyboardInterrupt when running test %s in suite %s'
+                    log.debug(msg, test, self)
+                    err = (TimedOutException,TimedOutException(str(test)),
+                           sys.exc_info()[2])
+                    test.config.plugins.addError(test,err)
+                    orig.addError(test,err)
+                    if not timeout:
+                        raise
+        finally:
+            self.has_run = True
+            try:
+                #log.debug('tearDown for %s', id(self));
+                self.tearDown()
+            except KeyboardInterrupt:
+                raise
+            except:
+                self.error_context = 'teardown'
+                result.addError(self, self._exc_info())
diff --git a/lib/spack/external/nose/plugins/plugintest.py b/lib/spack/external/nose/plugins/plugintest.py
new file mode 100644
index 0000000000000000000000000000000000000000..76d0d2c48cf6fd0f31fe8e49225d9f0ee315de48
--- /dev/null
+++ b/lib/spack/external/nose/plugins/plugintest.py
@@ -0,0 +1,416 @@
+"""
+Testing Plugins
+===============
+
+The plugin interface is well-tested enough to safely unit test your
+use of its hooks with some level of confidence. However, there is also
+a mixin for unittest.TestCase called PluginTester that's designed to
+test plugins in their native runtime environment.
+
+Here's a simple example with a do-nothing plugin and a composed suite.
+
+    >>> import unittest
+    >>> from nose.plugins import Plugin, PluginTester
+    >>> class FooPlugin(Plugin):
+    ...     pass
+    >>> class TestPluginFoo(PluginTester, unittest.TestCase):
+    ...     activate = '--with-foo'
+    ...     plugins = [FooPlugin()]
+    ...     def test_foo(self):
+    ...         for line in self.output:
+    ...             # i.e. check for patterns
+    ...             pass
+    ...
+    ...         # or check for a line containing ...
+    ...         assert "ValueError" in self.output
+    ...     def makeSuite(self):
+    ...         class TC(unittest.TestCase):
+    ...             def runTest(self):
+    ...                 raise ValueError("I hate foo")
+    ...         return [TC('runTest')]
+    ...
+    >>> res = unittest.TestResult()
+    >>> case = TestPluginFoo('test_foo')
+    >>> _ = case(res)
+    >>> res.errors
+    []
+    >>> res.failures
+    []
+    >>> res.wasSuccessful()
+    True
+    >>> res.testsRun
+    1
+
+And here is a more complex example of testing a plugin that has extra
+arguments and reads environment variables.
+
+    >>> import unittest, os
+    >>> from nose.plugins import Plugin, PluginTester
+    >>> class FancyOutputter(Plugin):
+    ...     name = "fancy"
+    ...     def configure(self, options, conf):
+    ...         Plugin.configure(self, options, conf)
+    ...         if not self.enabled:
+    ...             return
+    ...         self.fanciness = 1
+    ...         if options.more_fancy:
+    ...             self.fanciness = 2
+    ...         if 'EVEN_FANCIER' in self.env:
+    ...             self.fanciness = 3
+    ...
+    ...     def options(self, parser, env=os.environ):
+    ...         self.env = env
+    ...         parser.add_option('--more-fancy', action='store_true')
+    ...         Plugin.options(self, parser, env=env)
+    ...
+    ...     def report(self, stream):
+    ...         stream.write("FANCY " * self.fanciness)
+    ...
+    >>> class TestFancyOutputter(PluginTester, unittest.TestCase):
+    ...     activate = '--with-fancy' # enables the plugin
+    ...     plugins = [FancyOutputter()]
+    ...     args = ['--more-fancy']
+    ...     env = {'EVEN_FANCIER': '1'}
+    ...
+    ...     def test_fancy_output(self):
+    ...         assert "FANCY FANCY FANCY" in self.output, (
+    ...                                         "got: %s" % self.output)
+    ...     def makeSuite(self):
+    ...         class TC(unittest.TestCase):
+    ...             def runTest(self):
+    ...                 raise ValueError("I hate fancy stuff")
+    ...         return [TC('runTest')]
+    ...
+    >>> res = unittest.TestResult()
+    >>> case = TestFancyOutputter('test_fancy_output')
+    >>> _ = case(res)
+    >>> res.errors
+    []
+    >>> res.failures
+    []
+    >>> res.wasSuccessful()
+    True
+    >>> res.testsRun
+    1
+
+"""
+
+import re
+import sys
+from warnings import warn
+
+try:
+    from cStringIO import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+__all__ = ['PluginTester', 'run']
+
+from os import getpid
+class MultiProcessFile(object):
+    """
+    helper for testing multiprocessing
+
+    multiprocessing poses a problem for doctests, since the strategy
+    of replacing sys.stdout/stderr with file-like objects then
+    inspecting the results won't work: the child processes will
+    write to the objects, but the data will not be reflected
+    in the parent doctest-ing process.
+
+    The solution is to create file-like objects which will interact with
+    multiprocessing in a more desirable way.
+
+    All processes can write to this object, but only the creator can read.
+    This allows the testing system to see a unified picture of I/O.
+    """
+    def __init__(self):
+        # per advice at:
+        #    http://docs.python.org/library/multiprocessing.html#all-platforms
+        self.__master = getpid()
+        self.__queue = Manager().Queue()
+        self.__buffer = StringIO()
+        self.softspace = 0
+
+    def buffer(self):
+        if getpid() != self.__master:
+            return
+
+        from Queue import Empty
+        from collections import defaultdict
+        cache = defaultdict(str)
+        while True:
+            try:
+                pid, data = self.__queue.get_nowait()
+            except Empty:
+                break
+            if pid == ():
+                #show parent output after children
+                #this is what users see, usually
+                pid = ( 1e100, ) # googol!
+            cache[pid] += data
+        for pid in sorted(cache):
+            #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG
+            self.__buffer.write( cache[pid] )
+    def write(self, data):
+        # note that these pids are in the form of current_process()._identity
+        # rather than OS pids
+        from multiprocessing import current_process
+        pid = current_process()._identity
+        self.__queue.put((pid, data))
+    def __iter__(self):
+        "getattr doesn't work for iter()"
+        self.buffer()
+        return self.__buffer
+    def seek(self, offset, whence=0):
+        self.buffer()
+        return self.__buffer.seek(offset, whence)
+    def getvalue(self):
+        self.buffer()
+        return self.__buffer.getvalue()
+    def __getattr__(self, attr):
+        return getattr(self.__buffer, attr)
+
+try:
+    from multiprocessing import Manager
+    Buffer = MultiProcessFile
+except ImportError:
+    Buffer = StringIO
+
+class PluginTester(object):
+    """A mixin for testing nose plugins in their runtime environment.
+
+    Subclass this and mix in unittest.TestCase to run integration/functional
+    tests on your plugin.  When setUp() is called, the stub test suite is
+    executed with your plugin so that during an actual test you can inspect the
+    artifacts of how your plugin interacted with the stub test suite.
+
+    - activate
+
+      - the argument to send nosetests to activate the plugin
+
+    - suitepath
+
+      - if set, this is the path of the suite to test. Otherwise, you
+        will need to use the hook, makeSuite()
+
+    - plugins
+
+      - the list of plugins to make available during the run. Note
+        that this does not mean these plugins will be *enabled* during
+        the run -- only the plugins enabled by the activate argument
+        or other settings in argv or env will be enabled.
+
+    - args
+
+      - a list of arguments to add to the nosetests command, in addition to
+        the activate argument
+
+    - env
+
+      - optional dict of environment variables to send nosetests
+
+    """
+    activate = None
+    suitepath = None
+    args = None
+    env = {}
+    argv = None
+    plugins = []
+    ignoreFiles = None
+
+    def makeSuite(self):
+        """returns a suite object of tests to run (unittest.TestSuite())
+
+        If self.suitepath is None, this must be implemented. The returned suite
+        object will be executed with all plugins activated.  It may return
+        None.
+
+        Here is an example of a basic suite object you can return ::
+
+            >>> import unittest
+            >>> class SomeTest(unittest.TestCase):
+            ...     def runTest(self):
+            ...         raise ValueError("Now do something, plugin!")
+            ...
+            >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS
+            <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]>
+
+        """
+        raise NotImplementedError
+
+    def _execPlugin(self):
+        """execute the plugin on the internal test suite.
+        """
+        from nose.config import Config
+        from nose.core import TestProgram
+        from nose.plugins.manager import PluginManager
+
+        suite = None
+        stream = Buffer()
+        conf = Config(env=self.env,
+                      stream=stream,
+                      plugins=PluginManager(plugins=self.plugins))
+        if self.ignoreFiles is not None:
+            conf.ignoreFiles = self.ignoreFiles
+        if not self.suitepath:
+            suite = self.makeSuite()
+
+        self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
+                                exit=False)
+        self.output = AccessDecorator(stream)
+
+    def setUp(self):
+        """runs nosetests with the specified test suite, all plugins
+        activated.
+        """
+        self.argv = ['nosetests', self.activate]
+        if self.args:
+            self.argv.extend(self.args)
+        if self.suitepath:
+            self.argv.append(self.suitepath)
+
+        self._execPlugin()
+
+
+class AccessDecorator(object):
+    stream = None
+    _buf = None
+    def __init__(self, stream):
+        self.stream = stream
+        stream.seek(0)
+        self._buf = stream.read()
+        stream.seek(0)
+    def __contains__(self, val):
+        return val in self._buf
+    def __iter__(self):
+        return iter(self.stream)
+    def __str__(self):
+        return self._buf
+
+
+def blankline_separated_blocks(text):
+    "a bunch of === characters is also considered a blank line"
+    block = []
+    for line in text.splitlines(True):
+        block.append(line)
+        line = line.strip()
+        if not line or line.startswith('===') and not line.strip('='):
+            yield "".join(block)
+            block = []
+    if block:
+        yield "".join(block)
+
+
+def remove_stack_traces(out):
+    # this regexp taken from Python 2.5's doctest
+    traceback_re = re.compile(r"""
+        # Grab the traceback header.  Different versions of Python have
+        # said different things on the first traceback line.
+        ^(?P<hdr> Traceback\ \(
+            (?: most\ recent\ call\ last
+            |   innermost\ last
+            ) \) :
+        )
+        \s* $                   # toss trailing whitespace on the header.
+        (?P<stack> .*?)         # don't blink: absorb stuff until...
+        ^(?=\w)                 #     a line *starts* with alphanum.
+        .*?(?P<exception> \w+ ) # exception name
+        (?P<msg> [:\n] .*)      # the rest
+        """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+    blocks = []
+    for block in blankline_separated_blocks(out):
+        blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block))
+    return "".join(blocks)
+
+
+def simplify_warnings(out):
+    warn_re = re.compile(r"""
+        # Cut the file and line no, up to the warning name
+        ^.*:\d+:\s
+        (?P<category>\w+): \s+        # warning category
+        (?P<detail>.+) $ \n?          # warning message
+        ^ .* $                        # stack frame
+        """, re.VERBOSE | re.MULTILINE)
+    return warn_re.sub(r"\g<category>: \g<detail>", out)
+
+
+def remove_timings(out):
+    return re.sub(
+        r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out)
+
+
+def munge_nose_output_for_doctest(out):
+    """Modify nose output to make it easy to use in doctests."""
+    out = remove_stack_traces(out)
+    out = simplify_warnings(out)
+    out = remove_timings(out)
+    return out.strip()
+
+
+def run(*arg, **kw):
+    """
+    Specialized version of nose.run for use inside of doctests that
+    test test runs.
+
+    This version of run() prints the result output to stdout.  Before
+    printing, the output is processed by replacing the timing
+    information with an ellipsis (...), removing traceback stacks, and
+    removing trailing whitespace.
+
+    Use this version of run wherever you are writing a doctest that
+    tests nose (or unittest) test result output.
+
+    Note: do not use doctest: +ELLIPSIS when testing nose output,
+    since ellipses ("test_foo ... ok") in your expected test runner
+    output may match multiple lines of output, causing spurious test
+    passes!
+    """
+    from nose import run
+    from nose.config import Config
+    from nose.plugins.manager import PluginManager
+
+    buffer = Buffer()
+    if 'config' not in kw:
+        plugins = kw.pop('plugins', [])
+        if isinstance(plugins, list):
+            plugins = PluginManager(plugins=plugins)
+        env = kw.pop('env', {})
+        kw['config'] = Config(env=env, plugins=plugins)
+    if 'argv' not in kw:
+        kw['argv'] = ['nosetests', '-v']
+    kw['config'].stream = buffer
+
+    # Set up buffering so that all output goes to our buffer,
+    # or warn user if deprecated behavior is active. If this is not
+    # done, prints and warnings will either be out of place or
+    # disappear.
+    stderr = sys.stderr
+    stdout = sys.stdout
+    if kw.pop('buffer_all', False):
+        sys.stdout = sys.stderr = buffer
+        restore = True
+    else:
+        restore = False
+        warn("The behavior of nose.plugins.plugintest.run() will change in "
+             "the next release of nose. The current behavior does not "
+             "correctly account for output to stdout and stderr. To enable "
+             "correct behavior, use run_buffered() instead, or pass "
+             "the keyword argument buffer_all=True to run().",
+             DeprecationWarning, stacklevel=2)
+    try:
+        run(*arg, **kw)
+    finally:
+        if restore:
+            sys.stderr = stderr
+            sys.stdout = stdout
+    out = buffer.getvalue()
+    print munge_nose_output_for_doctest(out)
+
+
+def run_buffered(*arg, **kw):
+    kw['buffer_all'] = True
+    run(*arg, **kw)
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/lib/spack/external/nose/plugins/prof.py b/lib/spack/external/nose/plugins/prof.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d304a934bdb5d9d1f8203d6e521dbcbba9f0b5b
--- /dev/null
+++ b/lib/spack/external/nose/plugins/prof.py
@@ -0,0 +1,154 @@
+"""This plugin will run tests using the hotshot profiler, which is part
+of the standard library. To turn it on, use the ``--with-profile`` option
+or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
+controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
+and the profiler output file may be changed with ``--profile-stats-file``.
+
+See the `hotshot documentation`_ in the standard library documentation for
+more details on the various output options.
+
+.. _hotshot documentation: http://docs.python.org/library/hotshot.html
+"""
+
+try:
+    import hotshot
+    from hotshot import stats
+except ImportError:
+    hotshot, stats = None, None
+import logging
+import os
+import sys
+import tempfile
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins')
+
+class Profile(Plugin):
+    """
+    Use this plugin to run tests using the hotshot profiler. 
+    """
+    pfile = None
+    clean_stats_file = False
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        if not self.available():
+            return
+        Plugin.options(self, parser, env)
+        parser.add_option('--profile-sort', action='store', dest='profile_sort',
+                          default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
+                          metavar="SORT",
+                          help="Set sort order for profiler output")
+        parser.add_option('--profile-stats-file', action='store',
+                          dest='profile_stats_file',
+                          metavar="FILE",
+                          default=env.get('NOSE_PROFILE_STATS_FILE'),
+                          help='Profiler stats file; default is a new '
+                          'temp file on each run')
+        parser.add_option('--profile-restrict', action='append',
+                          dest='profile_restrict',
+                          metavar="RESTRICT",
+                          default=env.get('NOSE_PROFILE_RESTRICT'),
+                          help="Restrict profiler output. See help for "
+                          "pstats.Stats for details")
+
+    def available(cls):
+        return hotshot is not None
+    available = classmethod(available)
+
+    def begin(self):
+        """Create profile stats file and load profiler.
+        """
+        if not self.available():
+            return
+        self._create_pfile()
+        self.prof = hotshot.Profile(self.pfile)
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        if not self.available():
+            self.enabled = False
+            return
+        Plugin.configure(self, options, conf)
+        self.conf = conf
+        if options.profile_stats_file:
+            self.pfile = options.profile_stats_file
+            self.clean_stats_file = False
+        else:
+            self.pfile = None
+            self.clean_stats_file = True
+        self.fileno = None
+        self.sort = options.profile_sort
+        self.restrict = tolist(options.profile_restrict)
+
+    def prepareTest(self, test):
+        """Wrap entire test run in :func:`prof.runcall`.
+        """
+        if not self.available():
+            return
+        log.debug('preparing test %s' % test)
+        def run_and_profile(result, prof=self.prof, test=test):
+            self._create_pfile()
+            prof.runcall(test, result)
+        return run_and_profile
+
+    def report(self, stream):
+        """Output profiler report.
+        """
+        log.debug('printing profiler report')
+        self.prof.close()
+        prof_stats = stats.load(self.pfile)
+        prof_stats.sort_stats(self.sort)
+
+        # 2.5 has completely different stream handling from 2.4 and earlier.
+        # Before 2.5, stats objects have no stream attribute; in 2.5 and later
+        # a reference sys.stdout is stored before we can tweak it.
+        compat_25 = hasattr(prof_stats, 'stream')
+        if compat_25:
+            tmp = prof_stats.stream
+            prof_stats.stream = stream
+        else:
+            tmp = sys.stdout
+            sys.stdout = stream
+        try:
+            if self.restrict:
+                log.debug('setting profiler restriction to %s', self.restrict)
+                prof_stats.print_stats(*self.restrict)
+            else:
+                prof_stats.print_stats()
+        finally:
+            if compat_25:
+                prof_stats.stream = tmp
+            else:
+                sys.stdout = tmp
+
+    def finalize(self, result):
+        """Clean up stats file, if configured to do so.
+        """
+        if not self.available():
+            return
+        try:
+            self.prof.close()
+        except AttributeError:
+            # TODO: is this trying to catch just the case where not
+            # hasattr(self.prof, "close")?  If so, the function call should be
+            # moved out of the try: suite.
+            pass
+        if self.clean_stats_file:
+            if self.fileno:
+                try:
+                    os.close(self.fileno)
+                except OSError:
+                    pass
+            try:
+                os.unlink(self.pfile)
+            except OSError:
+                pass
+        return None
+
+    def _create_pfile(self):
+        if not self.pfile:
+            self.fileno, self.pfile = tempfile.mkstemp()
+            self.clean_stats_file = True
diff --git a/lib/spack/external/nose/plugins/skip.py b/lib/spack/external/nose/plugins/skip.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d1ac8f6042724ad4e78e5ed4c3494229f836417
--- /dev/null
+++ b/lib/spack/external/nose/plugins/skip.py
@@ -0,0 +1,63 @@
+"""
+This plugin installs a SKIP error class for the SkipTest exception.
+When SkipTest is raised, the exception will be logged in the skipped
+attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
+the exception will not be counted as an error or failure. This plugin
+is enabled by default but may be disabled with the ``--no-skip`` option.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+# on SkipTest:
+#  - unittest SkipTest is first preference, but it's only available
+#    for >= 2.7
+#  - unittest2 SkipTest is second preference for older pythons.  This
+#    mirrors logic for choosing SkipTest exception in testtools
+#  - if none of the above, provide custom class
+try:
+    from unittest.case import SkipTest
+except ImportError:
+    try:
+        from unittest2.case import SkipTest
+    except ImportError:
+        class SkipTest(Exception):
+            """Raise this exception to mark a test as skipped.
+            """
+            pass
+
+
+class Skip(ErrorClassPlugin):
+    """
+    Plugin that installs a SKIP error class for the SkipTest
+    exception.  When SkipTest is raised, the exception will be logged
+    in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
+    will be output, and the exception will not be counted as an error
+    or failure.
+    """
+    enabled = True
+    skipped = ErrorClass(SkipTest,
+                         label='SKIP',
+                         isfailure=False)
+
+    def options(self, parser, env):
+        """
+        Add my options to command line.
+        """
+        env_opt = 'NOSE_WITHOUT_SKIP'
+        parser.add_option('--no-skip', action='store_true',
+                          dest='noSkip', default=env.get(env_opt, False),
+                          help="Disable special handling of SkipTest "
+                          "exceptions.")
+
+    def configure(self, options, conf):
+        """
+        Configure plugin. Skip plugin is enabled by default.
+        """
+        if not self.can_configure:
+            return
+        self.conf = conf
+        disable = getattr(options, 'noSkip', False)
+        if disable:
+            self.enabled = False
+
diff --git a/lib/spack/external/nose/plugins/testid.py b/lib/spack/external/nose/plugins/testid.py
new file mode 100644
index 0000000000000000000000000000000000000000..ae8119bd0107922ccc6bea3916e9d1ef0fb1ff04
--- /dev/null
+++ b/lib/spack/external/nose/plugins/testid.py
@@ -0,0 +1,311 @@
+"""
+This plugin adds a test id (like #1) to each test name output. After
+you've run once to generate test ids, you can re-run individual
+tests by activating the plugin and passing the ids (with or
+without the # prefix) instead of test names.
+
+For example, if your normal test run looks like::
+
+  % nosetests -v
+  tests.test_a ... ok
+  tests.test_b ... ok
+  tests.test_c ... ok
+
+When adding ``--with-id`` you'll see::
+
+  % nosetests -v --with-id
+  #1 tests.test_a ... ok
+  #2 tests.test_b ... ok
+  #3 tests.test_c ... ok
+
+Then you can re-run individual tests by supplying just an id number::
+
+  % nosetests -v --with-id 2
+  #2 tests.test_b ... ok
+
+You can also pass multiple id numbers::
+
+  % nosetests -v --with-id 2 3
+  #2 tests.test_b ... ok
+  #3 tests.test_c ... ok
+  
+Since most shells consider '#' a special character, you can leave it out when
+specifying a test id.
+
+Note that when run without the -v switch, no special output is displayed, but
+the ids file is still written.
+
+Looping over failed tests
+-------------------------
+
+This plugin also adds a mode that will direct the test runner to record
+failed tests. Subsequent test runs will then run only the tests that failed
+last time. Activate this mode with the ``--failed`` switch::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+ #4 test.test_d ... ok
+ 
+On the second run, only tests #2 and #3 will run::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+
+As you correct errors and tests pass, they'll drop out of subsequent runs.
+
+First::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ok
+ #3 test.test_c ... FAILED
+
+Second::
+
+ % nosetests -v --failed
+ #3 test.test_c ... FAILED
+
+When all tests pass, the full set will run on the next invocation.
+
+First::
+
+ % nosetests -v --failed
+ #3 test.test_c ... ok
+
+Second::
+ 
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ok
+ #3 test.test_c ... ok
+ #4 test.test_d ... ok
+
+.. note ::
+
+  If you expect to use ``--failed`` regularly, it's a good idea to always run
+  using the ``--with-id`` option. This will ensure that an id file is always
+  created, allowing you to add ``--failed`` to the command line as soon as
+  you have failing tests. Otherwise, your first run using ``--failed`` will
+  (perhaps surprisingly) run *all* tests, because there won't be an id file
+  containing the record of failed tests from your previous run.
+  
+"""
+__test__ = False
+
+import logging
+import os
+from nose.plugins import Plugin
+from nose.util import src, set
+
+try:
+    from cPickle import dump, load
+except ImportError:
+    from pickle import dump, load
+
+log = logging.getLogger(__name__)
+
+
+class TestId(Plugin):
+    """
+    Activate to add a test id (like #1) to each test name output. Activate
+    with --failed to rerun failing tests only.
+    """
+    name = 'id'
+    idfile = None
+    collecting = True
+    loopOnFailed = False
+
+    def options(self, parser, env):
+        """Register commandline options.
+        """
+        Plugin.options(self, parser, env)
+        parser.add_option('--id-file', action='store', dest='testIdFile',
+                          default='.noseids', metavar="FILE",
+                          help="Store test ids found in test runs in this "
+                          "file. Default is the file .noseids in the "
+                          "working directory.")
+        parser.add_option('--failed', action='store_true',
+                          dest='failed', default=False,
+                          help="Run the tests that failed in the last "
+                          "test run.")
+
+    def configure(self, options, conf):
+        """Configure plugin.
+        """
+        Plugin.configure(self, options, conf)
+        if options.failed:
+            self.enabled = True
+            self.loopOnFailed = True
+            log.debug("Looping on failed tests")
+        self.idfile = os.path.expanduser(options.testIdFile)
+        if not os.path.isabs(self.idfile):
+            self.idfile = os.path.join(conf.workingDir, self.idfile)
+        self.id = 1
+        # Ids and tests are mirror images: ids are {id: test address} and
+        # tests are {test address: id}
+        self.ids = {}
+        self.tests = {}
+        self.failed = []
+        self.source_names = []
+        # used to track ids seen when tests is filled from
+        # loaded ids file
+        self._seen = {}
+        self._write_hashes = conf.verbosity >= 2
+
+    def finalize(self, result):
+        """Save new ids file, if needed.
+        """
+        if result.wasSuccessful():
+            self.failed = []
+        if self.collecting:
+            ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
+        else:
+            ids = self.ids
+        fh = open(self.idfile, 'wb')
+        dump({'ids': ids,
+              'failed': self.failed,
+              'source_names': self.source_names}, fh)
+        fh.close()
+        log.debug('Saved test ids: %s, failed %s to %s',
+                  ids, self.failed, self.idfile)
+
+    def loadTestsFromNames(self, names, module=None):
+        """Translate ids in the list of requested names into their
+        test addresses, if they are found in my dict of tests.
+        """
+        log.debug('ltfn %s %s', names, module)
+        try:
+            fh = open(self.idfile, 'rb')
+            data = load(fh)
+            if 'ids' in data:
+                self.ids = data['ids']
+                self.failed = data['failed']
+                self.source_names = data['source_names']
+            else:
+                # old ids field
+                self.ids = data
+                self.failed = []
+                self.source_names = names
+            if self.ids:
+                self.id = max(self.ids) + 1
+                self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
+            else:
+                self.id = 1
+            log.debug(
+                'Loaded test ids %s tests %s failed %s sources %s from %s',
+                self.ids, self.tests, self.failed, self.source_names,
+                self.idfile)
+            fh.close()
+        except ValueError, e:
+            # load() may throw a ValueError when reading the ids file, if it
+            # was generated with a newer version of Python than we are currently
+            # running.
+            log.debug('Error loading %s : %s', self.idfile, str(e))
+        except IOError:
+            log.debug('IO error reading %s', self.idfile)
+
+        if self.loopOnFailed and self.failed:
+            self.collecting = False
+            names = self.failed
+            self.failed = []
+        # I don't load any tests myself, only translate names like '#2'
+        # into the associated test addresses
+        translated = []
+        new_source = []
+        really_new = []
+        for name in names:
+            trans = self.tr(name)
+            if trans != name:
+                translated.append(trans)
+            else:
+                new_source.append(name)
+        # names that are not ids and that are not in the current
+        # list of source names go into the list for next time
+        if new_source:
+            new_set = set(new_source)
+            old_set = set(self.source_names)
+            log.debug("old: %s new: %s", old_set, new_set)
+            really_new = [s for s in new_source
+                          if not s in old_set]
+            if really_new:
+                # remember new sources
+                self.source_names.extend(really_new)
+            if not translated:
+                # new set of source names, no translations
+                # means "run the requested tests"
+                names = new_source
+        else:
+            # no new names to translate and add to id set
+            self.collecting = False
+        log.debug("translated: %s new sources %s names %s",
+                  translated, really_new, names)
+        return (None, translated + really_new or names)
+
+    def makeName(self, addr):
+        log.debug("Make name %s", addr)
+        filename, module, call = addr
+        if filename is not None:
+            head = src(filename)
+        else:
+            head = module
+        if call is not None:
+            return "%s:%s" % (head, call)
+        return head
+
+    def setOutputStream(self, stream):
+        """Get handle on output stream so the plugin can print id #s
+        """
+        self.stream = stream
+
+    def startTest(self, test):
+        """Maybe output an id # before the test name.
+
+        Example output::
+
+          #1 test.test ... ok
+          #2 test.test_two ... ok
+
+        """
+        adr = test.address()
+        log.debug('start test %s (%s)', adr, adr in self.tests)
+        if adr in self.tests:
+            if adr in self._seen:
+                self.write('   ')
+            else:
+                self.write('#%s ' % self.tests[adr])
+                self._seen[adr] = 1
+            return
+        self.tests[adr] = self.id
+        self.write('#%s ' % self.id)
+        self.id += 1
+
+    def afterTest(self, test):
+        # None means test never ran, False means failed/err
+        if test.passed is False:
+            try:
+                key = str(self.tests[test.address()])
+            except KeyError:
+                # never saw this test -- startTest didn't run
+                pass
+            else:
+                if key not in self.failed:
+                    self.failed.append(key)
+
+    def tr(self, name):
+        log.debug("tr '%s'", name)
+        try:
+            key = int(name.replace('#', ''))
+        except ValueError:
+            return name
+        log.debug("Got key %s", key)
+        # I'm running tests mapped from the ids file,
+        # not collecting new ones
+        if key in self.ids:
+            return self.makeName(self.ids[key])
+        return name
+
+    def write(self, output):
+        if self._write_hashes:
+            self.stream.write(output)
diff --git a/lib/spack/external/nose/plugins/xunit.py b/lib/spack/external/nose/plugins/xunit.py
new file mode 100644
index 0000000000000000000000000000000000000000..90b52f5f6101427a3e520eb6e083f7aab8671b3c
--- /dev/null
+++ b/lib/spack/external/nose/plugins/xunit.py
@@ -0,0 +1,341 @@
+"""This plugin provides test results in the standard XUnit XML format.
+
+It's designed for the `Jenkins`_ (previously Hudson) continuous build
+system, but will probably work for anything else that understands an
+XUnit-formatted XML representation of test results.
+
+Add this shell command to your builder ::
+
+    nosetests --with-xunit
+
+And by default a file named nosetests.xml will be written to the
+working directory.
+
+In a Jenkins builder, tick the box named "Publish JUnit test result report"
+under the Post-build Actions and enter this value for Test report XMLs::
+
+    **/nosetests.xml
+
+If you need to change the name or location of the file, you can set the
+``--xunit-file`` option.
+
+If you need to change the name of the test suite, you can set the
+``--xunit-testsuite-name`` option.
+
+Here is an abbreviated version of what an XML test report might look like::
+
+    <?xml version="1.0" encoding="UTF-8"?>
+    <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
+        <testcase classname="path_to_test_suite.TestSomething"
+                  name="test_it" time="0">
+            <error type="exceptions.TypeError" message="oops, wrong type">
+            Traceback (most recent call last):
+            ...
+            TypeError: oops, wrong type
+            </error>
+        </testcase>
+    </testsuite>
+
+.. _Jenkins: http://jenkins-ci.org/
+
+"""
+import codecs
+import doctest
+import os
+import sys
+import traceback
+import re
+import inspect
+from StringIO import StringIO
+from time import time
+from xml.sax import saxutils
+
+from nose.plugins.base import Plugin
+from nose.exc import SkipTest
+from nose.pyversion import force_unicode, format_exception
+
+# Invalid XML characters, control characters 0-31 sans \t, \n and \r
+CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
+
+TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
+
+def xml_safe(value):
+    """Replaces invalid XML characters with '?'."""
+    return CONTROL_CHARACTERS.sub('?', value)
+
+def escape_cdata(cdata):
+    """Escape a string for an XML CDATA section."""
+    return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
+
+def id_split(idval):
+    m = TEST_ID.match(idval)
+    if m:
+        name, fargs = m.groups()
+        head, tail = name.rsplit(".", 1)
+        return [head, tail+fargs]
+    else:
+        return idval.rsplit(".", 1)
+
+def nice_classname(obj):
+    """Returns a nice name for class object or class instance.
+
+        >>> nice_classname(Exception()) # doctest: +ELLIPSIS
+        '...Exception'
+        >>> nice_classname(Exception) # doctest: +ELLIPSIS
+        '...Exception'
+
+    """
+    if inspect.isclass(obj):
+        cls_name = obj.__name__
+    else:
+        cls_name = obj.__class__.__name__
+    mod = inspect.getmodule(obj)
+    if mod:
+        name = mod.__name__
+        # jython
+        if name.startswith('org.python.core.'):
+            name = name[len('org.python.core.'):]
+        return "%s.%s" % (name, cls_name)
+    else:
+        return cls_name
+
+def exc_message(exc_info):
+    """Return the exception's message."""
+    exc = exc_info[1]
+    if exc is None:
+        # str exception
+        result = exc_info[0]
+    else:
+        try:
+            result = str(exc)
+        except UnicodeEncodeError:
+            try:
+                result = unicode(exc)
+            except UnicodeError:
+                # Fallback to args as neither str nor
+                # unicode(Exception(u'\xe6')) work in Python < 2.6
+                result = exc.args[0]
+    result = force_unicode(result, 'UTF-8')
+    return xml_safe(result)
+
+class Tee(object):
+    def __init__(self, encoding, *args):
+        self._encoding = encoding
+        self._streams = args
+
+    def write(self, data):
+        data = force_unicode(data, self._encoding)
+        for s in self._streams:
+            s.write(data)
+
+    def writelines(self, lines):
+        for line in lines:
+            self.write(line)
+
+    def flush(self):
+        for s in self._streams:
+            s.flush()
+
+    def isatty(self):
+        return False
+
+
+class Xunit(Plugin):
+    """This plugin provides test results in the standard XUnit XML format."""
+    name = 'xunit'
+    score = 1500
+    encoding = 'UTF-8'
+    error_report_file = None
+
+    def __init__(self):
+        super(Xunit, self).__init__()
+        self._capture_stack = []
+        self._currentStdout = None
+        self._currentStderr = None
+
+    def _timeTaken(self):
+        if hasattr(self, '_timer'):
+            taken = time() - self._timer
+        else:
+            # test died before it ran (probably error in setup())
+            # or success/failure added before test started probably
+            # due to custom TestResult munging
+            taken = 0.0
+        return taken
+
+    def _quoteattr(self, attr):
+        """Escape an XML attribute. Value can be unicode."""
+        attr = xml_safe(attr)
+        return saxutils.quoteattr(attr)
+
+    def options(self, parser, env):
+        """Sets additional command line options."""
+        Plugin.options(self, parser, env)
+        parser.add_option(
+            '--xunit-file', action='store',
+            dest='xunit_file', metavar="FILE",
+            default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
+            help=("Path to xml file to store the xunit report in. "
+                  "Default is nosetests.xml in the working directory "
+                  "[NOSE_XUNIT_FILE]"))
+
+        parser.add_option(
+            '--xunit-testsuite-name', action='store',
+            dest='xunit_testsuite_name', metavar="PACKAGE",
+            default=env.get('NOSE_XUNIT_TESTSUITE_NAME', 'nosetests'),
+            help=("Name of the testsuite in the xunit xml, generated by plugin. "
+                  "Default test suite name is nosetests."))
+
+    def configure(self, options, config):
+        """Configures the xunit plugin."""
+        Plugin.configure(self, options, config)
+        self.config = config
+        if self.enabled:
+            self.stats = {'errors': 0,
+                          'failures': 0,
+                          'passes': 0,
+                          'skipped': 0
+                          }
+            self.errorlist = []
+            self.error_report_file_name = os.path.realpath(options.xunit_file)
+            self.xunit_testsuite_name = options.xunit_testsuite_name
+
+    def report(self, stream):
+        """Writes an Xunit-formatted XML file
+
+        The file includes a report of test errors and failures.
+
+        """
+        self.error_report_file = codecs.open(self.error_report_file_name, 'w',
+                                             self.encoding, 'replace')
+        self.stats['encoding'] = self.encoding
+        self.stats['testsuite_name'] = self.xunit_testsuite_name
+        self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+                               + self.stats['passes'] + self.stats['skipped'])
+        self.error_report_file.write(
+            u'<?xml version="1.0" encoding="%(encoding)s"?>'
+            u'<testsuite name="%(testsuite_name)s" tests="%(total)d" '
+            u'errors="%(errors)d" failures="%(failures)d" '
+            u'skip="%(skipped)d">' % self.stats)
+        self.error_report_file.write(u''.join([force_unicode(e, self.encoding)
+                                               for e in self.errorlist]))
+        self.error_report_file.write(u'</testsuite>')
+        self.error_report_file.close()
+        if self.config.verbosity > 1:
+            stream.writeln("-" * 70)
+            stream.writeln("XML: %s" % self.error_report_file.name)
+
+    def _startCapture(self):
+        self._capture_stack.append((sys.stdout, sys.stderr))
+        self._currentStdout = StringIO()
+        self._currentStderr = StringIO()
+        sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
+        sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
+
+    def startContext(self, context):
+        self._startCapture()
+
+    def stopContext(self, context):
+        self._endCapture()
+
+    def beforeTest(self, test):
+        """Initializes a timer before starting a test."""
+        self._timer = time()
+        self._startCapture()
+
+    def _endCapture(self):
+        if self._capture_stack:
+            sys.stdout, sys.stderr = self._capture_stack.pop()
+
+    def afterTest(self, test):
+        self._endCapture()
+        self._currentStdout = None
+        self._currentStderr = None
+
+    def finalize(self, test):
+        while self._capture_stack:
+            self._endCapture()
+
+    def _getCapturedStdout(self):
+        if self._currentStdout:
+            value = self._currentStdout.getvalue()
+            if value:
+                return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
+                        value)
+        return ''
+
+    def _getCapturedStderr(self):
+        if self._currentStderr:
+            value = self._currentStderr.getvalue()
+            if value:
+                return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
+                        value)
+        return ''
+
+    def addError(self, test, err, capt=None):
+        """Add error output to Xunit report.
+        """
+        taken = self._timeTaken()
+
+        if issubclass(err[0], SkipTest):
+            type = 'skipped'
+            self.stats['skipped'] += 1
+        else:
+            type = 'error'
+            self.stats['errors'] += 1
+
+        tb = format_exception(err, self.encoding)
+        id = test.id()
+
+        self.errorlist.append(
+            u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+            u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+            u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
+            {'cls': self._quoteattr(id_split(id)[0]),
+             'name': self._quoteattr(id_split(id)[-1]),
+             'taken': taken,
+             'type': type,
+             'errtype': self._quoteattr(nice_classname(err[0])),
+             'message': self._quoteattr(exc_message(err)),
+             'tb': escape_cdata(tb),
+             'systemout': self._getCapturedStdout(),
+             'systemerr': self._getCapturedStderr(),
+             })
+
+    def addFailure(self, test, err, capt=None, tb_info=None):
+        """Add failure output to Xunit report.
+        """
+        taken = self._timeTaken()
+        tb = format_exception(err, self.encoding)
+        self.stats['failures'] += 1
+        id = test.id()
+
+        self.errorlist.append(
+            u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+            u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+            u'</failure>%(systemout)s%(systemerr)s</testcase>' %
+            {'cls': self._quoteattr(id_split(id)[0]),
+             'name': self._quoteattr(id_split(id)[-1]),
+             'taken': taken,
+             'errtype': self._quoteattr(nice_classname(err[0])),
+             'message': self._quoteattr(exc_message(err)),
+             'tb': escape_cdata(tb),
+             'systemout': self._getCapturedStdout(),
+             'systemerr': self._getCapturedStderr(),
+             })
+
+    def addSuccess(self, test, capt=None):
+        """Add success output to Xunit report.
+        """
+        taken = self._timeTaken()
+        self.stats['passes'] += 1
+        id = test.id()
+        self.errorlist.append(
+            '<testcase classname=%(cls)s name=%(name)s '
+            'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
+            {'cls': self._quoteattr(id_split(id)[0]),
+             'name': self._quoteattr(id_split(id)[-1]),
+             'taken': taken,
+             'systemout': self._getCapturedStdout(),
+             'systemerr': self._getCapturedStderr(),
+             })
diff --git a/lib/spack/external/nose/proxy.py b/lib/spack/external/nose/proxy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2676cb1952b53396efbd57f70be613f1870f5b8
--- /dev/null
+++ b/lib/spack/external/nose/proxy.py
@@ -0,0 +1,188 @@
+"""
+Result Proxy
+------------
+
+The result proxy wraps the result instance given to each test. It
+performs two functions: enabling extended error/failure reporting
+and calling plugins.
+
+As each result event is fired, plugins are called with the same event;
+however, plugins are called with the nose.case.Test instance that
+wraps the actual test. So when a test fails and calls
+result.addFailure(self, err), the result proxy calls
+addFailure(self.test, err) for each plugin. This allows plugins to
+have a single stable interface for all test types, and also to
+manipulate the test object itself by setting the `test` attribute of
+the nose.case.Test that they receive.
+"""
+import logging
+from nose.config import Config
+
+
+log = logging.getLogger(__name__)
+
+
+def proxied_attribute(local_attr, proxied_attr, doc):
+    """Create a property that proxies attribute ``proxied_attr`` through
+    the local attribute ``local_attr``.
+    """
+    def fget(self):
+        return getattr(getattr(self, local_attr), proxied_attr)
+    def fset(self, value):
+        setattr(getattr(self, local_attr), proxied_attr, value)
+    def fdel(self):
+        delattr(getattr(self, local_attr), proxied_attr)
+    return property(fget, fset, fdel, doc)
+
+
+class ResultProxyFactory(object):
+    """Factory for result proxies. Generates a ResultProxy bound to each test
+    and the result passed to the test.
+    """
+    def __init__(self, config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+        self.__prepared = False
+        self.__result = None
+
+    def __call__(self, result, test):
+        """Return a ResultProxy for the current test.
+
+        On first call, plugins are given a chance to replace the
+        result used for the remaining tests. If a plugin returns a
+        value from prepareTestResult, that object will be used as the
+        result for all tests.
+        """
+        if not self.__prepared:
+            self.__prepared = True
+            plug_result = self.config.plugins.prepareTestResult(result)
+            if plug_result is not None:
+                self.__result = result = plug_result
+        if self.__result is not None:
+            result = self.__result
+        return ResultProxy(result, test, config=self.config)
+
+
+class ResultProxy(object):
+    """Proxy to TestResults (or other results handler).
+
+    One ResultProxy is created for each nose.case.Test. The result
+    proxy calls plugins with the nose.case.Test instance (instead of
+    the wrapped test case) as each result call is made. Finally, the
+    real result method is called, also with the nose.case.Test
+    instance as the test parameter.
+
+    """
+    def __init__(self, result, test, config=None):
+        if config is None:
+            config = Config()
+        self.config = config
+        self.plugins = config.plugins
+        self.result = result
+        self.test = test
+
+    def __repr__(self):
+        return repr(self.result)
+
+    def _prepareErr(self, err):
+        if not isinstance(err[1], Exception) and isinstance(err[0], type):
+            # Turn value back into an Exception (required in Python 3.x).
+            # Plugins do all sorts of crazy things with exception values.
+            # Convert it to a custom subclass of Exception with the same
+            # name as the actual exception to make it print correctly.
+            value = type(err[0].__name__, (Exception,), {})(err[1])
+            err = (err[0], value, err[2])
+        return err
+
+    def assertMyTest(self, test):
+        # The test I was called with must be my .test or my
+        # .test's .test. or my .test.test's .case
+
+        case = getattr(self.test, 'test', None)
+        assert (test is self.test
+                or test is case
+                or test is getattr(case, '_nose_case', None)), (
+                "ResultProxy for %r (%s) was called with test %r (%s)"
+                % (self.test, id(self.test), test, id(test)))
+
+    def afterTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.afterTest(self.test)
+        if hasattr(self.result, "afterTest"):
+            self.result.afterTest(self.test)
+
+    def beforeTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.beforeTest(self.test)
+        if hasattr(self.result, "beforeTest"):
+            self.result.beforeTest(self.test)
+
+    def addError(self, test, err):
+        self.assertMyTest(test)
+        plugins = self.plugins
+        plugin_handled = plugins.handleError(self.test, err)
+        if plugin_handled:
+            return
+        # test.passed is set in result, to account for error classes
+        formatted = plugins.formatError(self.test, err)
+        if formatted is not None:
+            err = formatted
+        plugins.addError(self.test, err)
+        self.result.addError(self.test, self._prepareErr(err))
+        if not self.result.wasSuccessful() and self.config.stopOnError:
+            self.shouldStop = True
+
+    def addFailure(self, test, err):
+        self.assertMyTest(test)
+        plugins = self.plugins
+        plugin_handled = plugins.handleFailure(self.test, err)
+        if plugin_handled:
+            return
+        self.test.passed = False
+        formatted = plugins.formatFailure(self.test, err)
+        if formatted is not None:
+            err = formatted
+        plugins.addFailure(self.test, err)
+        self.result.addFailure(self.test, self._prepareErr(err))
+        if self.config.stopOnError:
+            self.shouldStop = True
+
+    def addSkip(self, test, reason):
+        # 2.7 compat shim
+        from nose.plugins.skip import SkipTest
+        self.assertMyTest(test)
+        plugins = self.plugins
+        if not isinstance(reason, Exception):
+            # for Python 3.2+
+            reason = Exception(reason)
+        plugins.addError(self.test, (SkipTest, reason, None))
+        self.result.addSkip(self.test, reason)
+
+    def addSuccess(self, test):
+        self.assertMyTest(test)
+        self.plugins.addSuccess(self.test)
+        self.result.addSuccess(self.test)
+
+    def startTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.startTest(self.test)
+        self.result.startTest(self.test)
+
+    def stop(self):
+        self.result.stop()
+
+    def stopTest(self, test):
+        self.assertMyTest(test)
+        self.plugins.stopTest(self.test)
+        self.result.stopTest(self.test)
+
+    # proxied attributes
+    shouldStop = proxied_attribute('result', 'shouldStop',
+                                    """Should the test run stop?""")
+    errors = proxied_attribute('result', 'errors',
+                               """Tests that raised an exception""")
+    failures = proxied_attribute('result', 'failures',
+                                 """Tests that failed""")
+    testsRun = proxied_attribute('result', 'testsRun',
+                                 """Number of tests run""")
diff --git a/lib/spack/external/nose/pyversion.py b/lib/spack/external/nose/pyversion.py
new file mode 100644
index 0000000000000000000000000000000000000000..091238da7563a0e74416608e40bf8a4510e3a5c1
--- /dev/null
+++ b/lib/spack/external/nose/pyversion.py
@@ -0,0 +1,215 @@
+"""
+This module contains fixups for using nose under different versions of Python.
+"""
+import sys
+import os
+import traceback
+import types
+import inspect
+import nose.util
+
+__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType',
+           'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod',
+           'bytes_', 'is_base_exception', 'force_unicode', 'exc_to_unicode',
+           'format_exception']
+
+# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x
+# source will be replaced with 'str()' when running 2to3, so this test will
+# then become true)
+UNICODE_STRINGS = (type(unicode()) == type(str()))
+
+if sys.version_info[:2] < (3, 0):
+    def force_unicode(s, encoding='UTF-8'):
+        try:
+            s = unicode(s)
+        except UnicodeDecodeError:
+            s = str(s).decode(encoding, 'replace')
+
+        return s
+else:
+    def force_unicode(s, encoding='UTF-8'):
+        return str(s)
+
+# new.instancemethod() is obsolete for new-style classes (Python 3.x)
+# We need to use descriptor methods instead.
+try:
+    import new
+    def make_instancemethod(function, instance):
+        return new.instancemethod(function.im_func, instance,
+                                  instance.__class__)
+except ImportError:
+    def make_instancemethod(function, instance):
+        return function.__get__(instance, instance.__class__)
+
+# To be forward-compatible, we do all list sorts using keys instead of cmp
+# functions.  However, part of the unittest.TestLoader API involves a
+# user-provideable cmp function, so we need some way to convert that.
+def cmp_to_key(mycmp):
+    'Convert a cmp= function into a key= function'
+    class Key(object):
+        def __init__(self, obj):
+            self.obj = obj
+        def __lt__(self, other):
+            return mycmp(self.obj, other.obj) < 0
+        def __gt__(self, other):
+            return mycmp(self.obj, other.obj) > 0
+        def __eq__(self, other):
+            return mycmp(self.obj, other.obj) == 0
+    return Key
+
+# Python 2.3 also does not support list-sorting by key, so we need to convert
+# keys to cmp functions if we're running on old Python..
+if sys.version_info < (2, 4):
+    def sort_list(l, key, reverse=False):
+        if reverse:
+            return l.sort(lambda a, b: cmp(key(b), key(a)))
+        else:
+            return l.sort(lambda a, b: cmp(key(a), key(b)))
+else:
+    def sort_list(l, key, reverse=False):
+        return l.sort(key=key, reverse=reverse)
+
+# In Python 3.x, all objects are "new style" objects descended from 'type', and
+# thus types.ClassType and types.TypeType don't exist anymore.  For
+# compatibility, we make sure they still work.
+if hasattr(types, 'ClassType'):
+    ClassType = types.ClassType
+    TypeType = types.TypeType
+else:
+    ClassType = type
+    TypeType = type
+
+# The following emulates the behavior (we need) of an 'unbound method' under
+# Python 3.x (namely, the ability to have a class associated with a function
+# definition so that things can do stuff based on its associated class)
+class UnboundMethod:
+    def __init__(self, cls, func):
+        # Make sure we have all the same attributes as the original function,
+        # so that the AttributeSelector plugin will work correctly...
+        self.__dict__ = func.__dict__.copy()
+        self._func = func
+        self.__self__ = UnboundSelf(cls)
+        if sys.version_info < (3, 0):
+            self.im_class = cls
+        self.__doc__ = getattr(func, '__doc__', None)
+
+    def address(self):
+        cls = self.__self__.cls
+        modname = cls.__module__
+        module = sys.modules[modname]
+        filename = getattr(module, '__file__', None)
+        if filename is not None:
+            filename = os.path.abspath(filename)
+        return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__,
+                                                        self._func.__name__))
+
+    def __call__(self, *args, **kwargs):
+        return self._func(*args, **kwargs)
+
+    def __getattr__(self, attr):
+        return getattr(self._func, attr)
+
+    def __repr__(self):
+        return '<unbound method %s.%s>' % (self.__self__.cls.__name__,
+                                           self._func.__name__)
+
+class UnboundSelf:
+    def __init__(self, cls):
+        self.cls = cls
+
+    # We have to do this hackery because Python won't let us override the
+    # __class__ attribute...
+    def __getattribute__(self, attr):
+        if attr == '__class__':
+            return self.cls
+        else:
+            return object.__getattribute__(self, attr)
+
+def unbound_method(cls, func):
+    if inspect.ismethod(func):
+        return func
+    if not inspect.isfunction(func):
+        raise TypeError('%s is not a function' % (repr(func),))
+    return UnboundMethod(cls, func)
+
+def ismethod(obj):
+    return inspect.ismethod(obj) or isinstance(obj, UnboundMethod)
+
+
+# Make a pseudo-bytes function that can be called without the encoding arg:
+if sys.version_info >= (3, 0):
+    def bytes_(s, encoding='utf8'):
+        if isinstance(s, bytes):
+            return s
+        return bytes(s, encoding)
+else:
+    def bytes_(s, encoding=None):
+        return str(s)
+
+
+if sys.version_info[:2] >= (2, 6):
+    def isgenerator(o):
+        if isinstance(o, UnboundMethod):
+            o = o._func
+        return inspect.isgeneratorfunction(o) or inspect.isgenerator(o)
+else:
+    try:
+        from compiler.consts import CO_GENERATOR
+    except ImportError:
+        # IronPython doesn't have a complier module
+        CO_GENERATOR=0x20
+
+    def isgenerator(func):
+        try:
+            return func.func_code.co_flags & CO_GENERATOR != 0
+        except AttributeError:
+            return False
+
+# Make a function to help check if an exception is derived from BaseException.
+# In Python 2.4, we just use Exception instead.
+if sys.version_info[:2] < (2, 5):
+    def is_base_exception(exc):
+        return isinstance(exc, Exception)
+else:
+    def is_base_exception(exc):
+        return isinstance(exc, BaseException)
+
+if sys.version_info[:2] < (3, 0):
+    def exc_to_unicode(ev, encoding='utf-8'):
+        if is_base_exception(ev):
+            if not hasattr(ev, '__unicode__'):
+                # 2.5-
+                if not hasattr(ev, 'message'):
+                    # 2.4
+                    msg = len(ev.args) and ev.args[0] or ''
+                else:
+                    msg = ev.message
+                msg = force_unicode(msg, encoding=encoding)
+                clsname = force_unicode(ev.__class__.__name__,
+                        encoding=encoding)
+                ev = u'%s: %s' % (clsname, msg)
+        elif not isinstance(ev, unicode):
+            ev = repr(ev)
+
+        return force_unicode(ev, encoding=encoding)
+else:
+    def exc_to_unicode(ev, encoding='utf-8'):
+        return str(ev)
+
+def format_exception(exc_info, encoding='UTF-8'):
+    ec, ev, tb = exc_info
+
+    # Our exception object may have been turned into a string, and Python 3's
+    # traceback.format_exception() doesn't take kindly to that (it expects an
+    # actual exception object).  So we work around it, by doing the work
+    # ourselves if ev is not an exception object.
+    if not is_base_exception(ev):
+        tb_data = force_unicode(
+                ''.join(traceback.format_tb(tb)),
+                encoding)
+        ev = exc_to_unicode(ev)
+        return tb_data + ev
+    else:
+        return force_unicode(
+                ''.join(traceback.format_exception(*exc_info)),
+                encoding)
diff --git a/lib/spack/external/nose/result.py b/lib/spack/external/nose/result.py
new file mode 100644
index 0000000000000000000000000000000000000000..f974a14ae24229ff93d2d13a82d89a27756c5162
--- /dev/null
+++ b/lib/spack/external/nose/result.py
@@ -0,0 +1,200 @@
+"""
+Test Result
+-----------
+
+Provides a TextTestResult that extends unittest's _TextTestResult to
+provide support for error classes (such as the builtin skip and
+deprecated classes), and hooks for plugins to take over or extend
+reporting.
+"""
+
+import logging
+try:
+    # 2.7+
+    from unittest.runner import _TextTestResult
+except ImportError:
+    from unittest import _TextTestResult
+from nose.config import Config
+from nose.util import isclass, ln as _ln # backwards compat
+
+log = logging.getLogger('nose.result')
+
+
+def _exception_detail(exc):
+    # this is what stdlib module traceback does
+    try:
+        return str(exc)
+    except:
+        return '<unprintable %s object>' % type(exc).__name__
+
+
+class TextTestResult(_TextTestResult):
+    """Text test result that extends unittest's default test result
+    support for a configurable set of errorClasses (eg, Skip,
+    Deprecated, TODO) that extend the errors/failures/success triad.
+    """
+    def __init__(self, stream, descriptions, verbosity, config=None,
+                 errorClasses=None):
+        if errorClasses is None:
+            errorClasses = {}
+        self.errorClasses = errorClasses
+        if config is None:
+            config = Config()
+        self.config = config
+        _TextTestResult.__init__(self, stream, descriptions, verbosity)
+
+    def addSkip(self, test, reason):
+        # 2.7 skip compat
+        from nose.plugins.skip import SkipTest
+        if SkipTest in self.errorClasses:
+            storage, label, isfail = self.errorClasses[SkipTest]
+            storage.append((test, reason))
+            self.printLabel(label, (SkipTest, reason, None))
+
+    def addError(self, test, err):
+        """Overrides normal addError to add support for
+        errorClasses. If the exception is a registered class, the
+        error will be added to the list for that class, not errors.
+        """
+        ec, ev, tb = err
+        try:
+            exc_info = self._exc_info_to_string(err, test)
+        except TypeError:
+            # 2.3 compat
+            exc_info = self._exc_info_to_string(err)
+        for cls, (storage, label, isfail) in self.errorClasses.items():
+            #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
+            #    from nose.tools import set_trace
+            #    set_trace()
+            if isclass(ec) and issubclass(ec, cls):
+                if isfail:
+                    test.passed = False
+                storage.append((test, exc_info))
+                self.printLabel(label, err)
+                return
+        self.errors.append((test, exc_info))
+        test.passed = False
+        self.printLabel('ERROR')
+
+    # override to bypass changes in 2.7
+    def getDescription(self, test):
+        if self.descriptions:
+            return test.shortDescription() or str(test)
+        else:
+            return str(test)
+
+    def printLabel(self, label, err=None):
+        # Might get patched into a streamless result
+        stream = getattr(self, 'stream', None)
+        if stream is not None:
+            if self.showAll:
+                message = [label]
+                if err:
+                    detail = _exception_detail(err[1])
+                    if detail:
+                        message.append(detail)
+                stream.writeln(": ".join(message))
+            elif self.dots:
+                stream.write(label[:1])
+
+    def printErrors(self):
+        """Overrides to print all errorClasses errors as well.
+        """
+        _TextTestResult.printErrors(self)
+        for cls in self.errorClasses.keys():
+            storage, label, isfail = self.errorClasses[cls]
+            if isfail:
+                self.printErrorList(label, storage)
+        # Might get patched into a result with no config
+        if hasattr(self, 'config'):
+            self.config.plugins.report(self.stream)
+
+    def printSummary(self, start, stop):
+        """Called by the test runner to print the final summary of test
+        run results.
+        """
+        write = self.stream.write
+        writeln = self.stream.writeln
+        taken = float(stop - start)
+        run = self.testsRun
+        plural = run != 1 and "s" or ""
+
+        writeln(self.separator2)
+        writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
+        writeln()
+
+        summary = {}
+        eckeys = self.errorClasses.keys()
+        for cls in eckeys:
+            storage, label, isfail = self.errorClasses[cls]
+            count = len(storage)
+            if not count:
+                continue
+            summary[label] = count
+        if len(self.failures):
+            summary['failures'] = len(self.failures)
+        if len(self.errors):
+            summary['errors'] = len(self.errors)
+
+        if not self.wasSuccessful():
+            write("FAILED")
+        else:
+            write("OK")
+        items = summary.items()
+        if items:
+            items.sort()
+            write(" (")
+            write(", ".join(["%s=%s" % (label, count) for
+                             label, count in items]))
+            writeln(")")
+        else:
+            writeln()
+
+    def wasSuccessful(self):
+        """Overrides to check that there are no errors in errorClasses
+        lists that are marked as errors and should cause a run to
+        fail.
+        """
+        if self.errors or self.failures:
+            return False
+        for cls in self.errorClasses.keys():
+            storage, label, isfail = self.errorClasses[cls]
+            if not isfail:
+                continue
+            if storage:
+                return False
+        return True
+
+    def _addError(self, test, err):
+        try:
+            exc_info = self._exc_info_to_string(err, test)
+        except TypeError:
+            # 2.3: does not take test arg
+            exc_info = self._exc_info_to_string(err)
+        self.errors.append((test, exc_info))
+        if self.showAll:
+            self.stream.write('ERROR')
+        elif self.dots:
+            self.stream.write('E')
+
+    def _exc_info_to_string(self, err, test=None):
+        # 2.7 skip compat
+        from nose.plugins.skip import SkipTest
+        if isclass(err[0]) and issubclass(err[0], SkipTest):
+            return str(err[1])
+        # 2.3/2.4 -- 2.4 passes test, 2.3 does not
+        try:
+            return _TextTestResult._exc_info_to_string(self, err, test)
+        except TypeError:
+            # 2.3: does not take test arg
+            return _TextTestResult._exc_info_to_string(self, err)
+
+
+def ln(*arg, **kw):
+    from warnings import warn
+    warn("ln() has moved to nose.util from nose.result and will be removed "
+         "from nose.result in a future release. Please update your imports ",
+         DeprecationWarning)
+    return _ln(*arg, **kw)
+
+
diff --git a/lib/spack/external/nose/selector.py b/lib/spack/external/nose/selector.py
new file mode 100644
index 0000000000000000000000000000000000000000..b63f7af0b153ca8448d9f214256cadf4ef9cb6e2
--- /dev/null
+++ b/lib/spack/external/nose/selector.py
@@ -0,0 +1,251 @@
+"""
+Test Selection
+--------------
+
+Test selection is handled by a Selector. The test loader calls the
+appropriate selector method for each object it encounters that it
+thinks may be a test.
+"""
+import logging
+import os
+import unittest
+from nose.config import Config
+from nose.util import split_test_name, src, getfilename, getpackage, ispackage, is_executable
+
+log = logging.getLogger(__name__)
+
+__all__ = ['Selector', 'defaultSelector', 'TestAddress']
+
+
+# for efficiency and easier mocking
+op_join = os.path.join
+op_basename = os.path.basename
+op_exists = os.path.exists
+op_splitext = os.path.splitext
+op_isabs = os.path.isabs
+op_abspath = os.path.abspath
+
+
+class Selector(object):
+    """Core test selector. Examines test candidates and determines whether,
+    given the specified configuration, the test candidate should be selected
+    as a test.
+    """
+    def __init__(self, config):
+        if config is None:
+            config = Config()
+        self.configure(config)
+
+    def configure(self, config):
+        self.config = config
+        self.exclude = config.exclude
+        self.ignoreFiles = config.ignoreFiles
+        self.include = config.include
+        self.plugins = config.plugins
+        self.match = config.testMatch
+        
+    def matches(self, name):
+        """Does the name match my requirements?
+
+        To match, a name must match config.testMatch OR config.include
+        and it must not match config.exclude
+        """
+        return ((self.match.search(name)
+                 or (self.include and
+                     filter(None,
+                            [inc.search(name) for inc in self.include])))
+                and ((not self.exclude)
+                     or not filter(None,
+                                   [exc.search(name) for exc in self.exclude])
+                 ))
+    
+    def wantClass(self, cls):
+        """Is the class a wanted test class?
+
+        A class must be a unittest.TestCase subclass, or match test name
+        requirements. Classes that start with _ are always excluded.
+        """
+        declared = getattr(cls, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = (not cls.__name__.startswith('_')
+                      and (issubclass(cls, unittest.TestCase)
+                           or self.matches(cls.__name__)))
+        
+        plug_wants = self.plugins.wantClass(cls)        
+        if plug_wants is not None:
+            log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
+            wanted = plug_wants
+        log.debug("wantClass %s? %s", cls, wanted)
+        return wanted
+
+    def wantDirectory(self, dirname):
+        """Is the directory a wanted test directory?
+
+        All package directories match, so long as they do not match exclude. 
+        All other directories must match test requirements.
+        """
+        tail = op_basename(dirname)
+        if ispackage(dirname):
+            wanted = (not self.exclude
+                      or not filter(None,
+                                    [exc.search(tail) for exc in self.exclude]
+                                    ))
+        else:
+            wanted = (self.matches(tail)
+                      or (self.config.srcDirs
+                          and tail in self.config.srcDirs))
+        plug_wants = self.plugins.wantDirectory(dirname)
+        if plug_wants is not None:
+            log.debug("Plugin setting selection of %s to %s",
+                      dirname, plug_wants)
+            wanted = plug_wants
+        log.debug("wantDirectory %s? %s", dirname, wanted)
+        return wanted
+    
+    def wantFile(self, file):
+        """Is the file a wanted test file?
+
+        The file must be a python source file and match testMatch or
+        include, and not match exclude. Files that match ignore are *never*
+        wanted, regardless of plugin, testMatch, include or exclude settings.
+        """
+        # never, ever load files that match anything in ignore
+        # (.* _* and *setup*.py by default)
+        base = op_basename(file)
+        ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
+                           if ignore_this.search(base) ]
+        if ignore_matches:
+            log.debug('%s matches ignoreFiles pattern; skipped',
+                      base) 
+            return False
+        if not self.config.includeExe and is_executable(file):
+            log.info('%s is executable; skipped', file)
+            return False
+        dummy, ext = op_splitext(base)
+        pysrc = ext == '.py'
+
+        wanted = pysrc and self.matches(base) 
+        plug_wants = self.plugins.wantFile(file)
+        if plug_wants is not None:
+            log.debug("plugin setting want %s to %s", file, plug_wants)
+            wanted = plug_wants
+        log.debug("wantFile %s? %s", file, wanted)
+        return wanted
+
+    def wantFunction(self, function):
+        """Is the function a test function?
+        """
+        try:
+            if hasattr(function, 'compat_func_name'):
+                funcname = function.compat_func_name
+            else:
+                funcname = function.__name__
+        except AttributeError:
+            # not a function
+            return False
+        declared = getattr(function, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = not funcname.startswith('_') and self.matches(funcname)
+        plug_wants = self.plugins.wantFunction(function)
+        if plug_wants is not None:
+            wanted = plug_wants
+        log.debug("wantFunction %s? %s", function, wanted)
+        return wanted
+
+    def wantMethod(self, method):
+        """Is the method a test method?
+        """
+        try:
+            method_name = method.__name__
+        except AttributeError:
+            # not a method
+            return False
+        if method_name.startswith('_'):
+            # never collect 'private' methods
+            return False
+        declared = getattr(method, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = self.matches(method_name)
+        plug_wants = self.plugins.wantMethod(method)
+        if plug_wants is not None:
+            wanted = plug_wants
+        log.debug("wantMethod %s? %s", method, wanted)
+        return wanted
+    
+    def wantModule(self, module):
+        """Is the module a test module?
+
+        The tail of the module name must match test requirements. One exception:
+        we always want __main__.
+        """
+        declared = getattr(module, '__test__', None)
+        if declared is not None:
+            wanted = declared
+        else:
+            wanted = self.matches(module.__name__.split('.')[-1]) \
+                     or module.__name__ == '__main__'
+        plug_wants = self.plugins.wantModule(module)
+        if plug_wants is not None:
+            wanted = plug_wants
+        log.debug("wantModule %s? %s", module, wanted)
+        return wanted
+        
+defaultSelector = Selector        
+
+
+class TestAddress(object):
+    """A test address represents a user's request to run a particular
+    test. The user may specify a filename or module (or neither),
+    and/or a callable (a class, function, or method). The naming
+    format for test addresses is:
+
+    filename_or_module:callable
+
+    Filenames that are not absolute will be made absolute relative to
+    the working dir.
+
+    The filename or module part will be considered a module name if it
+    doesn't look like a file, that is, if it doesn't exist on the file
+    system and it doesn't contain any directory separators and it
+    doesn't end in .py.
+
+    Callables may be a class name, function name, method name, or
+    class.method specification.
+    """
+    def __init__(self, name, workingDir=None):
+        if workingDir is None:
+            workingDir = os.getcwd()
+        self.name = name
+        self.workingDir = workingDir
+        self.filename, self.module, self.call = split_test_name(name)
+        log.debug('Test name %s resolved to file %s, module %s, call %s',
+                  name, self.filename, self.module, self.call)
+        if self.filename is None:
+            if self.module is not None:
+                self.filename = getfilename(self.module, self.workingDir)
+        if self.filename:
+            self.filename = src(self.filename)
+            if not op_isabs(self.filename):
+                self.filename = op_abspath(op_join(workingDir,
+                                                   self.filename))
+            if self.module is None:
+                self.module = getpackage(self.filename)
+        log.debug(
+            'Final resolution of test name %s: file %s module %s call %s',
+            name, self.filename, self.module, self.call)
+
+    def totuple(self):
+        return (self.filename, self.module, self.call)
+        
+    def __str__(self):
+        return self.name
+
+    def __repr__(self):
+        return "%s: (%s, %s, %s)" % (self.name, self.filename,
+                                     self.module, self.call)
diff --git a/lib/spack/external/nose/sphinx/__init__.py b/lib/spack/external/nose/sphinx/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ae28399f5fda2dfb1b04405f4a3b4895f5fac1e
--- /dev/null
+++ b/lib/spack/external/nose/sphinx/__init__.py
@@ -0,0 +1 @@
+pass
diff --git a/lib/spack/external/nose/sphinx/pluginopts.py b/lib/spack/external/nose/sphinx/pluginopts.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2b284ab2760d40ce87ac55615599b148f984eb9
--- /dev/null
+++ b/lib/spack/external/nose/sphinx/pluginopts.py
@@ -0,0 +1,189 @@
+"""
+Adds a sphinx directive that can be used to automatically document a plugin.
+
+this::
+
+ .. autoplugin :: nose.plugins.foo
+    :plugin: Pluggy
+    
+produces::
+
+  .. automodule :: nose.plugins.foo
+  
+  Options
+  -------
+
+  .. cmdoption :: --foo=BAR, --fooble=BAR
+
+    Do the foo thing to the new thing.
+
+  Plugin
+  ------
+
+  .. autoclass :: nose.plugins.foo.Pluggy
+     :members:
+
+  Source
+  ------
+
+  .. include :: path/to/nose/plugins/foo.py
+     :literal:
+
+"""
+import os
+try:
+    from docutils import nodes, utils
+    from docutils.statemachine import ViewList
+    from docutils.parsers.rst import directives
+except ImportError:
+    pass # won't run anyway
+
+from nose.util import resolve_name
+from nose.plugins.base import Plugin
+from nose.plugins.manager import BuiltinPluginManager
+from nose.config import Config
+from nose.core import TestProgram
+from inspect import isclass
+
+
+def autoplugin_directive(dirname, arguments, options, content, lineno,
+                         content_offset, block_text, state, state_machine):
+    mod_name = arguments[0]
+    mod = resolve_name(mod_name)
+    plug_name = options.get('plugin', None)
+    if plug_name:
+        obj = getattr(mod, plug_name)
+    else:
+        for entry in dir(mod):
+            obj = getattr(mod, entry)
+            if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin:
+                plug_name = '%s.%s' % (mod_name, entry)
+                break
+    
+    # mod docstring
+    rst = ViewList()
+    rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    # options
+    rst.append('Options', '<autodoc>')
+    rst.append('-------', '<autodoc>')
+    rst.append('', '<autodoc>')
+
+    plug = obj()
+    opts = OptBucket()
+    plug.options(opts, {})
+    for opt in opts:
+        rst.append(opt.options(), '<autodoc>')
+        rst.append('   \n', '<autodoc>')
+        rst.append('   ' + opt.help + '\n', '<autodoc>')
+        rst.append('\n', '<autodoc>')
+        
+    # plugin class
+    rst.append('Plugin', '<autodoc>')
+    rst.append('------', '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>')
+    rst.append('   :members:\n', '<autodoc>')
+    rst.append('   :show-inheritance:\n', '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    # source
+    rst.append('Source', '<autodoc>')
+    rst.append('------', '<autodoc>')
+    rst.append(
+            '.. include :: %s\n' % utils.relative_path(
+                state_machine.document['source'],
+                os.path.abspath(mod.__file__.replace('.pyc', '.py'))),
+            '<autodoc>')
+    rst.append('   :literal:\n', '<autodoc>')
+    rst.append('', '<autodoc>')
+    
+    node = nodes.section()
+    node.document = state.document
+    surrounding_title_styles = state.memo.title_styles
+    surrounding_section_level = state.memo.section_level
+    state.memo.title_styles = []
+    state.memo.section_level = 0
+    state.nested_parse(rst, 0, node, match_titles=1)
+    state.memo.title_styles = surrounding_title_styles
+    state.memo.section_level = surrounding_section_level
+
+    return node.children
+
+
+def autohelp_directive(dirname, arguments, options, content, lineno,
+                       content_offset, block_text, state, state_machine):
+    """produces rst from nose help"""
+    config = Config(parserClass=OptBucket,
+                    plugins=BuiltinPluginManager())
+    parser = config.getParser(TestProgram.usage())
+    rst = ViewList()
+    for line in parser.format_help().split('\n'):
+        rst.append(line, '<autodoc>')
+
+    rst.append('Options', '<autodoc>')
+    rst.append('-------', '<autodoc>')
+    rst.append('', '<autodoc>')
+    for opt in parser:
+        rst.append(opt.options(), '<autodoc>')
+        rst.append('   \n', '<autodoc>')
+        rst.append('   ' + opt.help + '\n', '<autodoc>')
+        rst.append('\n', '<autodoc>')    
+    node = nodes.section()
+    node.document = state.document
+    surrounding_title_styles = state.memo.title_styles
+    surrounding_section_level = state.memo.section_level
+    state.memo.title_styles = []
+    state.memo.section_level = 0
+    state.nested_parse(rst, 0, node, match_titles=1)
+    state.memo.title_styles = surrounding_title_styles
+    state.memo.section_level = surrounding_section_level
+
+    return node.children
+
+    
+class OptBucket(object):
+    def __init__(self, doc=None, prog='nosetests'):
+        self.opts = []
+        self.doc = doc
+        self.prog = prog
+
+    def __iter__(self):
+        return iter(self.opts)
+
+    def format_help(self):
+        return self.doc.replace('%prog', self.prog).replace(':\n', '::\n')
+    
+    def add_option(self, *arg, **kw):
+        self.opts.append(Opt(*arg, **kw))
+
+
+class Opt(object):
+    def __init__(self, *arg, **kw):
+        self.opts = arg
+        self.action = kw.pop('action', None)
+        self.default = kw.pop('default', None)
+        self.metavar = kw.pop('metavar', None)
+        self.help = kw.pop('help', None)
+
+    def options(self):
+        buf = []
+        for optstring in self.opts:
+            desc = optstring
+            if self.action not in ('store_true', 'store_false'):
+                desc += '=%s' % self.meta(optstring)
+            buf.append(desc)
+        return '.. cmdoption :: ' + ', '.join(buf)
+
+    def meta(self, optstring):
+        # FIXME optparser default metavar?
+        return self.metavar or 'DEFAULT'
+
+    
+def setup(app):
+    app.add_directive('autoplugin',
+                      autoplugin_directive, 1, (1, 0, 1),
+                      plugin=directives.unchanged)
+    app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1))
diff --git a/lib/spack/external/nose/suite.py b/lib/spack/external/nose/suite.py
new file mode 100644
index 0000000000000000000000000000000000000000..a831105e340c12955386dd04ab2e6362bfd1606c
--- /dev/null
+++ b/lib/spack/external/nose/suite.py
@@ -0,0 +1,609 @@
+"""
+Test Suites
+-----------
+
+Provides a LazySuite, which is a suite whose test list is a generator
+function, and ContextSuite,which can run fixtures (setup/teardown
+functions or methods) for the context that contains its tests.
+
+"""
+from __future__ import generators
+
+import logging
+import sys
+import unittest
+from nose.case import Test
+from nose.config import Config
+from nose.proxy import ResultProxyFactory
+from nose.util import isclass, resolve_name, try_run
+
+if sys.platform == 'cli':
+    if sys.version_info[:2] < (2, 6):
+        import clr
+        clr.AddReference("IronPython")
+        from IronPython.Runtime.Exceptions import StringException
+    else:
+        class StringException(Exception):
+            pass
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# Singleton for default value -- see ContextSuite.__init__ below
+_def = object()
+
+
+def _strclass(cls):
+    return "%s.%s" % (cls.__module__, cls.__name__)
+
+class MixedContextError(Exception):
+    """Error raised when a context suite sees tests from more than
+    one context.
+    """
+    pass
+
+
+class LazySuite(unittest.TestSuite):
+    """A suite that may use a generator as its list of tests
+    """
+    def __init__(self, tests=()):
+        """Initialize the suite. tests may be an iterable or a generator
+        """
+        super(LazySuite, self).__init__()
+        self._set_tests(tests)
+
+    def __iter__(self):
+        return iter(self._tests)
+
+    def __repr__(self):
+        return "<%s tests=generator (%s)>" % (
+            _strclass(self.__class__), id(self))
+
+    def __hash__(self):
+        return object.__hash__(self)
+
+    __str__ = __repr__
+
+    def addTest(self, test):
+        self._precache.append(test)
+
+    # added to bypass run changes in 2.7's unittest
+    def run(self, result):
+        for test in self._tests:
+            if result.shouldStop:
+                break
+            test(result)
+        return result
+
+    def __nonzero__(self):
+        log.debug("tests in %s?", id(self))
+        if self._precache:
+            return True
+        if self.test_generator is None:
+            return False
+        try:
+            test = self.test_generator.next()
+            if test is not None:
+                self._precache.append(test)
+                return True
+        except StopIteration:
+            pass
+        return False
+
+    def _get_tests(self):
+        log.debug("precache is %s", self._precache)
+        for test in self._precache:
+            yield test
+        if self.test_generator is None:
+            return
+        for test in self.test_generator:
+            yield test
+
+    def _set_tests(self, tests):
+        self._precache = []
+        is_suite = isinstance(tests, unittest.TestSuite)
+        if callable(tests) and not is_suite:
+            self.test_generator = tests()
+        elif is_suite:
+            # Suites need special treatment: they must be called like
+            # tests for their setup/teardown to run (if any)
+            self.addTests([tests])
+            self.test_generator = None
+        else:
+            self.addTests(tests)
+            self.test_generator = None
+
+    _tests = property(_get_tests, _set_tests, None,
+                      "Access the tests in this suite. Access is through a "
+                      "generator, so iteration may not be repeatable.")
+
+
+class ContextSuite(LazySuite):
+    """A suite with context.
+
+    A ContextSuite executes fixtures (setup and teardown functions or
+    methods) for the context containing its tests.
+
+    The context may be explicitly passed. If it is not, a context (or
+    nested set of contexts) will be constructed by examining the tests
+    in the suite.
+    """
+    failureException = unittest.TestCase.failureException
+    was_setup = False
+    was_torndown = False
+    classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
+                     'setUpClass', 'setUpAll')
+    classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
+                     'teardownAll', 'tearDownClass', 'tearDownAll')
+    moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
+                   'setUp')
+    moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
+                      'teardown', 'tearDown')
+    packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
+    packageTeardown = ('teardown_package', 'teardownPackage',
+                       'tearDownPackage')
+
+    def __init__(self, tests=(), context=None, factory=None,
+                 config=None, resultProxy=None, can_split=True):
+        log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
+        self.context = context
+        self.factory = factory
+        if config is None:
+            config = Config()
+        self.config = config
+        self.resultProxy = resultProxy
+        self.has_run = False
+        self.can_split = can_split
+        self.error_context = None
+        super(ContextSuite, self).__init__(tests)
+
+    def __repr__(self):
+        return "<%s context=%s>" % (
+            _strclass(self.__class__),
+            getattr(self.context, '__name__', self.context))
+    __str__ = __repr__
+
+    def id(self):
+        if self.error_context:
+            return '%s:%s' % (repr(self), self.error_context)
+        else:
+            return repr(self)
+
+    def __hash__(self):
+        return object.__hash__(self)
+
+    # 2.3 compat -- force 2.4 call sequence
+    def __call__(self, *arg, **kw):
+        return self.run(*arg, **kw)
+
+    def exc_info(self):
+        """Hook for replacing error tuple output
+        """
+        return sys.exc_info()
+
+    def _exc_info(self):
+        """Bottleneck to fix up IronPython string exceptions
+        """
+        e = self.exc_info()
+        if sys.platform == 'cli':
+            if isinstance(e[0], StringException):
+                # IronPython throws these StringExceptions, but
+                # traceback checks type(etype) == str. Make a real
+                # string here.
+                e = (str(e[0]), e[1], e[2])
+
+        return e
+
+    def run(self, result):
+        """Run tests in suite inside of suite fixtures.
+        """
+        # proxy the result for myself
+        log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
+        #import pdb
+        #pdb.set_trace()
+        if self.resultProxy:
+            result, orig = self.resultProxy(result, self), result
+        else:
+            result, orig = result, result
+        try:
+            self.setUp()
+        except KeyboardInterrupt:
+            raise
+        except:
+            self.error_context = 'setup'
+            result.addError(self, self._exc_info())
+            return
+        try:
+            for test in self._tests:
+                if result.shouldStop:
+                    log.debug("stopping")
+                    break
+                # each nose.case.Test will create its own result proxy
+                # so the cases need the original result, to avoid proxy
+                # chains
+                test(orig)
+        finally:
+            self.has_run = True
+            try:
+                self.tearDown()
+            except KeyboardInterrupt:
+                raise
+            except:
+                self.error_context = 'teardown'
+                result.addError(self, self._exc_info())
+
+    def hasFixtures(self, ctx_callback=None):
+        context = self.context
+        if context is None:
+            return False
+        if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
+            return True
+        # My context doesn't have any, but its ancestors might
+        factory = self.factory
+        if factory:
+            ancestors = factory.context.get(self, [])
+            for ancestor in ancestors:
+                if self.implementsAnyFixture(
+                    ancestor, ctx_callback=ctx_callback):
+                    return True
+        return False
+
+    def implementsAnyFixture(self, context, ctx_callback):
+        if isclass(context):
+            names = self.classSetup + self.classTeardown
+        else:
+            names = self.moduleSetup + self.moduleTeardown
+            if hasattr(context, '__path__'):
+                names += self.packageSetup + self.packageTeardown
+        # If my context has any fixture attribute, I have fixtures
+        fixt = False
+        for m in names:
+            if hasattr(context, m):
+                fixt = True
+                break
+        if ctx_callback is None:
+            return fixt
+        return ctx_callback(context, fixt)
+
+    def setUp(self):
+        log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
+        if not self:
+            # I have no tests
+            log.debug("suite %s has no tests", id(self))
+            return
+        if self.was_setup:
+            log.debug("suite %s already set up", id(self))
+            return
+        context = self.context
+        if context is None:
+            return
+        # before running my own context's setup, I need to
+        # ask the factory if my context's contexts' setups have been run
+        factory = self.factory
+        if factory:
+            # get a copy, since we'll be destroying it as we go
+            ancestors = factory.context.get(self, [])[:]
+            while ancestors:
+                ancestor = ancestors.pop()
+                log.debug("ancestor %s may need setup", ancestor)
+                if ancestor in factory.was_setup:
+                    continue
+                log.debug("ancestor %s does need setup", ancestor)
+                self.setupContext(ancestor)
+            if not context in factory.was_setup:
+                self.setupContext(context)
+        else:
+            self.setupContext(context)
+        self.was_setup = True
+        log.debug("completed suite setup")
+
+    def setupContext(self, context):
+        self.config.plugins.startContext(context)
+        log.debug("%s setup context %s", self, context)
+        if self.factory:
+            if context in self.factory.was_setup:
+                return
+            # note that I ran the setup for this context, so that I'll run
+            # the teardown in my teardown
+            self.factory.was_setup[context] = self
+        if isclass(context):
+            names = self.classSetup
+        else:
+            names = self.moduleSetup
+            if hasattr(context, '__path__'):
+                names = self.packageSetup + names
+        try_run(context, names)
+
+    def shortDescription(self):
+        if self.context is None:
+            return "test suite"
+        return "test suite for %s" % self.context
+
+    def tearDown(self):
+        log.debug('context teardown')
+        if not self.was_setup or self.was_torndown:
+            log.debug(
+                "No reason to teardown (was_setup? %s was_torndown? %s)"
+                % (self.was_setup, self.was_torndown))
+            return
+        self.was_torndown = True
+        context = self.context
+        if context is None:
+            log.debug("No context to tear down")
+            return
+
+        # for each ancestor... if the ancestor was setup
+        # and I did the setup, I can do teardown
+        factory = self.factory
+        if factory:
+            ancestors = factory.context.get(self, []) + [context]
+            for ancestor in ancestors:
+                log.debug('ancestor %s may need teardown', ancestor)
+                if not ancestor in factory.was_setup:
+                    log.debug('ancestor %s was not setup', ancestor)
+                    continue
+                if ancestor in factory.was_torndown:
+                    log.debug('ancestor %s already torn down', ancestor)
+                    continue
+                setup = factory.was_setup[ancestor]
+                log.debug("%s setup ancestor %s", setup, ancestor)
+                if setup is self:
+                    self.teardownContext(ancestor)
+        else:
+            self.teardownContext(context)
+
+    def teardownContext(self, context):
+        log.debug("%s teardown context %s", self, context)
+        if self.factory:
+            if context in self.factory.was_torndown:
+                return
+            self.factory.was_torndown[context] = self
+        if isclass(context):
+            names = self.classTeardown
+        else:
+            names = self.moduleTeardown
+            if hasattr(context, '__path__'):
+                names = self.packageTeardown + names
+        try_run(context, names)
+        self.config.plugins.stopContext(context)
+
+    # FIXME the wrapping has to move to the factory?
+    def _get_wrapped_tests(self):
+        for test in self._get_tests():
+            if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+                yield test
+            else:
+                yield Test(test,
+                           config=self.config,
+                           resultProxy=self.resultProxy)
+
+    _tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
+                      "Access the tests in this suite. Tests are returned "
+                      "inside of a context wrapper.")
+
+
+class ContextSuiteFactory(object):
+    """Factory for ContextSuites. Called with a collection of tests,
+    the factory decides on a hierarchy of contexts by introspecting
+    the collection or the tests themselves to find the objects
+    containing the test objects. It always returns one suite, but that
+    suite may consist of a hierarchy of nested suites.
+    """
+    suiteClass = ContextSuite
+    def __init__(self, config=None, suiteClass=None, resultProxy=_def):
+        if config is None:
+            config = Config()
+        self.config = config
+        if suiteClass is not None:
+            self.suiteClass = suiteClass
+        # Using a singleton to represent default instead of None allows
+        # passing resultProxy=None to turn proxying off.
+        if resultProxy is _def:
+            resultProxy = ResultProxyFactory(config=config)
+        self.resultProxy = resultProxy
+        self.suites = {}
+        self.context = {}
+        self.was_setup = {}
+        self.was_torndown = {}
+
+    def __call__(self, tests, **kw):
+        """Return ``ContextSuite`` for tests. ``tests`` may either
+        be a callable (in which case the resulting ContextSuite will
+        have no parent context and be evaluated lazily) or an
+        iterable. In that case the tests will wrapped in
+        nose.case.Test, be examined and the context of each found and a
+        suite of suites returned, organized into a stack with the
+        outermost suites belonging to the outermost contexts.
+        """
+        log.debug("Create suite for %s", tests)
+        context = kw.pop('context', getattr(tests, 'context', None))
+        log.debug("tests %s context %s", tests, context)
+        if context is None:
+            tests = self.wrapTests(tests)
+            try:
+                context = self.findContext(tests)
+            except MixedContextError:
+                return self.makeSuite(self.mixedSuites(tests), None, **kw)
+        return self.makeSuite(tests, context, **kw)
+
+    def ancestry(self, context):
+        """Return the ancestry of the context (that is, all of the
+        packages and modules containing the context), in order of
+        descent with the outermost ancestor last.
+        This method is a generator.
+        """
+        log.debug("get ancestry %s", context)
+        if context is None:
+            return
+        # Methods include reference to module they are defined in, we
+        # don't want that, instead want the module the class is in now
+        # (classes are re-ancestored elsewhere).
+        if hasattr(context, 'im_class'):
+            context = context.im_class
+        elif hasattr(context, '__self__'):
+            context = context.__self__.__class__
+        if hasattr(context, '__module__'):
+            ancestors = context.__module__.split('.')
+        elif hasattr(context, '__name__'):
+            ancestors = context.__name__.split('.')[:-1]
+        else:
+            raise TypeError("%s has no ancestors?" % context)
+        while ancestors:
+            log.debug(" %s ancestors %s", context, ancestors)
+            yield resolve_name('.'.join(ancestors))
+            ancestors.pop()
+
+    def findContext(self, tests):
+        if callable(tests) or isinstance(tests, unittest.TestSuite):
+            return None
+        context = None
+        for test in tests:
+            # Don't look at suites for contexts, only tests
+            ctx = getattr(test, 'context', None)
+            if ctx is None:
+                continue
+            if context is None:
+                context = ctx
+            elif context != ctx:
+                raise MixedContextError(
+                    "Tests with different contexts in same suite! %s != %s"
+                    % (context, ctx))
+        return context
+
+    def makeSuite(self, tests, context, **kw):
+        suite = self.suiteClass(
+            tests, context=context, config=self.config, factory=self,
+            resultProxy=self.resultProxy, **kw)
+        if context is not None:
+            self.suites.setdefault(context, []).append(suite)
+            self.context.setdefault(suite, []).append(context)
+            log.debug("suite %s has context %s", suite,
+                      getattr(context, '__name__', None))
+            for ancestor in self.ancestry(context):
+                self.suites.setdefault(ancestor, []).append(suite)
+                self.context[suite].append(ancestor)
+                log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
+        return suite
+
+    def mixedSuites(self, tests):
+        """The complex case where there are tests that don't all share
+        the same context. Groups tests into suites with common ancestors,
+        according to the following (essentially tail-recursive) procedure:
+
+        Starting with the context of the first test, if it is not
+        None, look for tests in the remaining tests that share that
+        ancestor. If any are found, group into a suite with that
+        ancestor as the context, and replace the current suite with
+        that suite. Continue this process for each ancestor of the
+        first test, until all ancestors have been processed. At this
+        point if any tests remain, recurse with those tests as the
+        input, returning a list of the common suite (which may be the
+        suite or test we started with, if no common tests were found)
+        plus the results of recursion.
+        """
+        if not tests:
+            return []
+        head = tests.pop(0)
+        if not tests:
+            return [head] # short circuit when none are left to combine
+        suite = head # the common ancestry suite, so far
+        tail = tests[:]
+        context = getattr(head, 'context', None)
+        if context is not None:
+            ancestors = [context] + [a for a in self.ancestry(context)]
+            for ancestor in ancestors:
+                common = [suite] # tests with ancestor in common, so far
+                remain = [] # tests that remain to be processed
+                for test in tail:
+                    found_common = False
+                    test_ctx = getattr(test, 'context', None)
+                    if test_ctx is None:
+                        remain.append(test)
+                        continue
+                    if test_ctx is ancestor:
+                        common.append(test)
+                        continue
+                    for test_ancestor in self.ancestry(test_ctx):
+                        if test_ancestor is ancestor:
+                            common.append(test)
+                            found_common = True
+                            break
+                    if not found_common:
+                        remain.append(test)
+                if common:
+                    suite = self.makeSuite(common, ancestor)
+                tail = self.mixedSuites(remain)
+        return [suite] + tail
+
+    def wrapTests(self, tests):
+        log.debug("wrap %s", tests)
+        if callable(tests) or isinstance(tests, unittest.TestSuite):
+            log.debug("I won't wrap")
+            return tests
+        wrapped = []
+        for test in tests:
+            log.debug("wrapping %s", test)
+            if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+                wrapped.append(test)
+            elif isinstance(test, ContextList):
+                wrapped.append(self.makeSuite(test, context=test.context))
+            else:
+                wrapped.append(
+                    Test(test, config=self.config, resultProxy=self.resultProxy)
+                    )
+        return wrapped
+
+
+class ContextList(object):
+    """Not quite a suite -- a group of tests in a context. This is used
+    to hint the ContextSuiteFactory about what context the tests
+    belong to, in cases where it may be ambiguous or missing.
+    """
+    def __init__(self, tests, context=None):
+        self.tests = tests
+        self.context = context
+
+    def __iter__(self):
+        return iter(self.tests)
+
+
+class FinalizingSuiteWrapper(unittest.TestSuite):
+    """Wraps suite and calls final function after suite has
+    executed. Used to call final functions in cases (like running in
+    the standard test runner) where test running is not under nose's
+    control.
+    """
+    def __init__(self, suite, finalize):
+        super(FinalizingSuiteWrapper, self).__init__()
+        self.suite = suite
+        self.finalize = finalize
+
+    def __call__(self, *arg, **kw):
+        return self.run(*arg, **kw)
+
+    # 2.7 compat
+    def __iter__(self):
+        return iter(self.suite)
+
+    def run(self, *arg, **kw):
+        try:
+            return self.suite(*arg, **kw)
+        finally:
+            self.finalize(*arg, **kw)
+
+
+# backwards compat -- sort of
+class TestDir:
+    def __init__(*arg, **kw):
+        raise NotImplementedError(
+            "TestDir is not usable with nose 0.10. The class is present "
+            "in nose.suite for backwards compatibility purposes but it "
+            "may not be used.")
+
+
+class TestModule:
+    def __init__(*arg, **kw):
+        raise NotImplementedError(
+            "TestModule is not usable with nose 0.10. The class is present "
+            "in nose.suite for backwards compatibility purposes but it "
+            "may not be used.")
diff --git a/lib/spack/external/nose/tools/__init__.py b/lib/spack/external/nose/tools/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..74dab16a749c38c870a31b837030ab1b11da801d
--- /dev/null
+++ b/lib/spack/external/nose/tools/__init__.py
@@ -0,0 +1,15 @@
+"""
+Tools for testing
+-----------------
+
+nose.tools provides a few convenience functions to make writing tests
+easier. You don't have to use them; nothing in the rest of nose depends
+on any of these methods.
+
+"""
+from nose.tools.nontrivial import *
+from nose.tools.nontrivial import __all__ as nontrivial_all
+from nose.tools.trivial import *
+from nose.tools.trivial import __all__ as trivial_all
+
+__all__ = trivial_all + nontrivial_all
diff --git a/lib/spack/external/nose/tools/nontrivial.py b/lib/spack/external/nose/tools/nontrivial.py
new file mode 100644
index 0000000000000000000000000000000000000000..283973245b4abba32fcc5f59505c53ecae94d644
--- /dev/null
+++ b/lib/spack/external/nose/tools/nontrivial.py
@@ -0,0 +1,151 @@
+"""Tools not exempt from being descended into in tracebacks"""
+
+import time
+
+
+__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup',
+           'TimeExpired', 'istest', 'nottest']
+
+
+class TimeExpired(AssertionError):
+    pass
+
+
+def make_decorator(func):
+    """
+    Wraps a test decorator so as to properly replicate metadata
+    of the decorated function, including nose's additional stuff
+    (namely, setup and teardown).
+    """
+    def decorate(newfunc):
+        if hasattr(func, 'compat_func_name'):
+            name = func.compat_func_name
+        else:
+            name = func.__name__
+        newfunc.__dict__ = func.__dict__
+        newfunc.__doc__ = func.__doc__
+        newfunc.__module__ = func.__module__
+        if not hasattr(newfunc, 'compat_co_firstlineno'):
+            newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
+        try:
+            newfunc.__name__ = name
+        except TypeError:
+            # can't set func name in 2.3
+            newfunc.compat_func_name = name
+        return newfunc
+    return decorate
+
+
+def raises(*exceptions):
+    """Test must raise one of expected exceptions to pass.
+
+    Example use::
+
+      @raises(TypeError, ValueError)
+      def test_raises_type_error():
+          raise TypeError("This test passes")
+
+      @raises(Exception)
+      def test_that_fails_by_passing():
+          pass
+
+    If you want to test many assertions about exceptions in a single test,
+    you may want to use `assert_raises` instead.
+    """
+    valid = ' or '.join([e.__name__ for e in exceptions])
+    def decorate(func):
+        name = func.__name__
+        def newfunc(*arg, **kw):
+            try:
+                func(*arg, **kw)
+            except exceptions:
+                pass
+            except:
+                raise
+            else:
+                message = "%s() did not raise %s" % (name, valid)
+                raise AssertionError(message)
+        newfunc = make_decorator(func)(newfunc)
+        return newfunc
+    return decorate
+
+
+def set_trace():
+    """Call pdb.set_trace in the calling frame, first restoring
+    sys.stdout to the real output stream. Note that sys.stdout is NOT
+    reset to whatever it was before the call once pdb is done!
+    """
+    import pdb
+    import sys
+    stdout = sys.stdout
+    sys.stdout = sys.__stdout__
+    pdb.Pdb().set_trace(sys._getframe().f_back)
+
+
+def timed(limit):
+    """Test must finish within specified time limit to pass.
+
+    Example use::
+
+      @timed(.1)
+      def test_that_fails():
+          time.sleep(.2)
+    """
+    def decorate(func):
+        def newfunc(*arg, **kw):
+            start = time.time()
+            result = func(*arg, **kw)
+            end = time.time()
+            if end - start > limit:
+                raise TimeExpired("Time limit (%s) exceeded" % limit)
+            return result
+        newfunc = make_decorator(func)(newfunc)
+        return newfunc
+    return decorate
+
+
+def with_setup(setup=None, teardown=None):
+    """Decorator to add setup and/or teardown methods to a test function::
+
+      @with_setup(setup, teardown)
+      def test_something():
+          " ... "
+
+    Note that `with_setup` is useful *only* for test functions, not for test
+    methods or inside of TestCase subclasses.
+    """
+    def decorate(func, setup=setup, teardown=teardown):
+        if setup:
+            if hasattr(func, 'setup'):
+                _old_s = func.setup
+                def _s():
+                    setup()
+                    _old_s()
+                func.setup = _s
+            else:
+                func.setup = setup
+        if teardown:
+            if hasattr(func, 'teardown'):
+                _old_t = func.teardown
+                def _t():
+                    _old_t()
+                    teardown()
+                func.teardown = _t
+            else:
+                func.teardown = teardown
+        return func
+    return decorate
+
+
+def istest(func):
+    """Decorator to mark a function or method as a test
+    """
+    func.__test__ = True
+    return func
+
+
+def nottest(func):
+    """Decorator to mark a function or method as *not* a test
+    """
+    func.__test__ = False
+    return func
diff --git a/lib/spack/external/nose/tools/trivial.py b/lib/spack/external/nose/tools/trivial.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf83efeda5b3320c1bdea3cbfbb44c5552e24b9b
--- /dev/null
+++ b/lib/spack/external/nose/tools/trivial.py
@@ -0,0 +1,54 @@
+"""Tools so trivial that tracebacks should not descend into them
+
+We define the ``__unittest`` symbol in their module namespace so unittest will
+skip them when printing tracebacks, just as it does for their corresponding
+methods in ``unittest`` proper.
+
+"""
+import re
+import unittest
+
+
+__all__ = ['ok_', 'eq_']
+
+# Use the same flag as unittest itself to prevent descent into these functions:
+__unittest = 1
+
+
+def ok_(expr, msg=None):
+    """Shorthand for assert. Saves 3 whole characters!
+    """
+    if not expr:
+        raise AssertionError(msg)
+
+
+def eq_(a, b, msg=None):
+    """Shorthand for 'assert a == b, "%r != %r" % (a, b)
+    """
+    if not a == b:
+        raise AssertionError(msg or "%r != %r" % (a, b))
+
+
+#
+# Expose assert* from unittest.TestCase
+# - give them pep8 style names
+#
+caps = re.compile('([A-Z])')
+
+def pep8(name):
+    return caps.sub(lambda m: '_' + m.groups()[0].lower(), name)
+
+class Dummy(unittest.TestCase):
+    def nop():
+        pass
+_t = Dummy('nop')
+
+for at in [ at for at in dir(_t)
+            if at.startswith('assert') and not '_' in at ]:
+    pepd = pep8(at)
+    vars()[pepd] = getattr(_t, at)
+    __all__.append(pepd)
+
+del Dummy
+del _t
+del pep8
diff --git a/lib/spack/external/nose/twistedtools.py b/lib/spack/external/nose/twistedtools.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d9c6ffe9b339fb50c5cfb15c4cb5c736564bc1b
--- /dev/null
+++ b/lib/spack/external/nose/twistedtools.py
@@ -0,0 +1,173 @@
+"""
+Twisted integration
+-------------------
+
+This module provides a very simple way to integrate your tests with the
+Twisted_ event loop.
+
+You must import this module *before* importing anything from Twisted itself!
+
+Example::
+
+  from nose.twistedtools import reactor, deferred
+  
+  @deferred()
+  def test_resolve():
+      return reactor.resolve("www.python.org")
+
+Or, more realistically::
+
+  @deferred(timeout=5.0)
+  def test_resolve():
+      d = reactor.resolve("www.python.org")
+      def check_ip(ip):
+          assert ip == "67.15.36.43"
+      d.addCallback(check_ip)
+      return d
+
+.. _Twisted: http://twistedmatrix.com/trac/
+"""
+
+import sys
+from Queue import Queue, Empty
+from nose.tools import make_decorator, TimeExpired
+
+__all__ = [
+    'threaded_reactor', 'reactor', 'deferred', 'TimeExpired',
+    'stop_reactor'
+]
+
+_twisted_thread = None
+
+def threaded_reactor():
+    """
+    Start the Twisted reactor in a separate thread, if not already done.
+    Returns the reactor.
+    The thread will automatically be destroyed when all the tests are done.
+    """
+    global _twisted_thread
+    try:
+        from twisted.internet import reactor
+    except ImportError:
+        return None, None
+    if not _twisted_thread:
+        from twisted.python import threadable
+        from threading import Thread
+        _twisted_thread = Thread(target=lambda: reactor.run( \
+                installSignalHandlers=False))
+        _twisted_thread.setDaemon(True)
+        _twisted_thread.start()
+    return reactor, _twisted_thread
+
+# Export global reactor variable, as Twisted does
+reactor, reactor_thread = threaded_reactor()
+
+
+def stop_reactor():
+    """Stop the reactor and join the reactor thread until it stops.
+    Call this function in teardown at the module or package level to
+    reset the twisted system after your tests. You *must* do this if
+    you mix tests using these tools and tests using twisted.trial.
+    """
+    global _twisted_thread
+
+    def stop_reactor():
+        '''Helper for calling stop from withing the thread.'''
+        reactor.stop()
+
+    reactor.callFromThread(stop_reactor)
+    reactor_thread.join()
+    for p in reactor.getDelayedCalls():
+        if p.active():
+            p.cancel()
+    _twisted_thread = None
+
+
+def deferred(timeout=None):
+    """
+    By wrapping a test function with this decorator, you can return a
+    twisted Deferred and the test will wait for the deferred to be triggered.
+    The whole test function will run inside the Twisted event loop.
+
+    The optional timeout parameter specifies the maximum duration of the test.
+    The difference with timed() is that timed() will still wait for the test
+    to end, while deferred() will stop the test when its timeout has expired.
+    The latter is more desireable when dealing with network tests, because
+    the result may actually never arrive.
+
+    If the callback is triggered, the test has passed.
+    If the errback is triggered or the timeout expires, the test has failed.
+
+    Example::
+    
+        @deferred(timeout=5.0)
+        def test_resolve():
+            return reactor.resolve("www.python.org")
+
+    Attention! If you combine this decorator with other decorators (like
+    "raises"), deferred() must be called *first*!
+
+    In other words, this is good::
+        
+        @raises(DNSLookupError)
+        @deferred()
+        def test_error():
+            return reactor.resolve("xxxjhjhj.biz")
+
+    and this is bad::
+        
+        @deferred()
+        @raises(DNSLookupError)
+        def test_error():
+            return reactor.resolve("xxxjhjhj.biz")
+    """
+    reactor, reactor_thread = threaded_reactor()
+    if reactor is None:
+        raise ImportError("twisted is not available or could not be imported")
+    # Check for common syntax mistake
+    # (otherwise, tests can be silently ignored
+    # if one writes "@deferred" instead of "@deferred()")
+    try:
+        timeout is None or timeout + 0
+    except TypeError:
+        raise TypeError("'timeout' argument must be a number or None")
+
+    def decorate(func):
+        def wrapper(*args, **kargs):
+            q = Queue()
+            def callback(value):
+                q.put(None)
+            def errback(failure):
+                # Retrieve and save full exception info
+                try:
+                    failure.raiseException()
+                except:
+                    q.put(sys.exc_info())
+            def g():
+                try:
+                    d = func(*args, **kargs)
+                    try:
+                        d.addCallbacks(callback, errback)
+                    # Check for a common mistake and display a nice error
+                    # message
+                    except AttributeError:
+                        raise TypeError("you must return a twisted Deferred "
+                                        "from your test case!")
+                # Catch exceptions raised in the test body (from the
+                # Twisted thread)
+                except:
+                    q.put(sys.exc_info())
+            reactor.callFromThread(g)
+            try:
+                error = q.get(timeout=timeout)
+            except Empty:
+                raise TimeExpired("timeout expired before end of test (%f s.)"
+                                  % timeout)
+            # Re-raise all exceptions
+            if error is not None:
+                exc_type, exc_value, tb = error
+                raise exc_type, exc_value, tb
+        wrapper = make_decorator(func)(wrapper)
+        return wrapper
+    return decorate
+
diff --git a/lib/spack/external/nose/usage.txt b/lib/spack/external/nose/usage.txt
new file mode 100644
index 0000000000000000000000000000000000000000..bc96894ab78e85d2fec7dc908c783e7c7caa9055
--- /dev/null
+++ b/lib/spack/external/nose/usage.txt
@@ -0,0 +1,115 @@
+nose collects tests automatically from python source files,
+directories and packages found in its working directory (which
+defaults to the current working directory). Any python source file,
+directory or package that matches the testMatch regular expression
+(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or
+source for collection of tests). In addition, all other packages
+found in the working directory will be examined for python source files
+or directories that match testMatch. Package discovery descends all
+the way down the tree, so package.tests and package.sub.tests and
+package.sub.sub2.tests will all be collected.
+
+Within a test directory or package, any python source file matching
+testMatch will be examined for test cases. Within a test module,
+functions and classes whose names match testMatch and TestCase
+subclasses with any name will be loaded and executed as tests. Tests
+may use the assert keyword or raise AssertionErrors to indicate test
+failure. TestCase subclasses may do the same or use the various
+TestCase methods available.
+
+**It is important to note that the default behavior of nose is to
+not include tests from files which are executable.**  To include
+tests from such files, remove their executable bit or use
+the --exe flag (see 'Options' section below).
+
+Selecting Tests
+---------------
+
+To specify which tests to run, pass test names on the command line:
+
+  %prog only_test_this.py
+  
+Test names specified may be file or module names, and may optionally
+indicate the test case to run by separating the module or file name
+from the test case name with a colon. Filenames may be relative or
+absolute. Examples:
+
+  %prog test.module
+  %prog another.test:TestCase.test_method
+  %prog a.test:TestCase
+  %prog /path/to/test/file.py:test_function
+  
+You may also change the working directory where nose looks for tests
+by using the -w switch:
+
+  %prog -w /path/to/tests
+
+Note, however, that support for multiple -w arguments is now deprecated
+and will be removed in a future release. As of nose 0.10, you can get
+the same behavior by specifying the target directories *without*
+the -w switch:
+
+  %prog /path/to/tests /another/path/to/tests
+
+Further customization of test selection and loading is possible
+through the use of plugins.
+
+Test result output is identical to that of unittest, except for
+the additional features (error classes, and plugin-supplied
+features such as output capture and assert introspection) detailed
+in the options below.
+
+Configuration
+-------------
+
+In addition to passing command-line options, you may also put
+configuration options in your project's *setup.cfg* file, or a .noserc
+or nose.cfg file in your home directory. In any of these standard
+ini-style config files, you put your nosetests configuration in a
+``[nosetests]`` section. Options are the same as on the command line,
+with the -- prefix removed. For options that are simple switches, you
+must supply a value:
+
+  [nosetests]
+  verbosity=3
+  with-doctest=1
+
+All configuration files that are found will be loaded and their
+options combined. You can override the standard config file loading
+with the ``-c`` option.
+
+Using Plugins
+-------------
+
+There are numerous nose plugins available via easy_install and
+elsewhere. To use a plugin, just install it. The plugin will add
+command line options to nosetests. To verify that the plugin is installed,
+run:
+
+  nosetests --plugins
+
+You can add -v or -vv to that command to show more information
+about each plugin.
+
+If you are running nose.main() or nose.run() from a script, you
+can specify a list of plugins to use by passing a list of plugins
+with the plugins keyword argument.
+
+0.9 plugins
+-----------
+
+nose 1.0 can use SOME plugins that were written for nose 0.9. The
+default plugin manager inserts a compatibility wrapper around 0.9
+plugins that adapts the changed plugin api calls. However, plugins
+that access nose internals are likely to fail, especially if they
+attempt to access test case or test suite classes. For example,
+plugins that try to determine if a test passed to startTest is an
+individual test or a suite will fail, partly because suites are no
+longer passed to startTest and partly because it's likely that the
+plugin is trying to find out if the test is an instance of a class
+that no longer exists.
+
+0.10 and 0.11 plugins
+---------------------
+
+All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
diff --git a/lib/spack/external/nose/util.py b/lib/spack/external/nose/util.py
new file mode 100644
index 0000000000000000000000000000000000000000..bfe16589ea1814115596cdc70ba58353bed80cb4
--- /dev/null
+++ b/lib/spack/external/nose/util.py
@@ -0,0 +1,668 @@
+"""Utility functions and classes used by nose internally.
+"""
+import inspect
+import itertools
+import logging
+import stat
+import os
+import re
+import sys
+import types
+import unittest
+from nose.pyversion import ClassType, TypeType, isgenerator, ismethod
+
+
+log = logging.getLogger('nose')
+
+ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
+class_types = (ClassType, TypeType)
+skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
+
+try:
+    set()
+    set = set # make from nose.util import set happy
+except NameError:
+    try:
+        from sets import Set as set
+    except ImportError:
+        pass
+
+
+def ls_tree(dir_path="",
+            skip_pattern=skip_pattern,
+            indent="|-- ", branch_indent="|   ",
+            last_indent="`-- ", last_branch_indent="    "):
+    # TODO: empty directories look like non-directory files
+    return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
+                                    indent, branch_indent,
+                                    last_indent, last_branch_indent))
+
+
+def _ls_tree_lines(dir_path, skip_pattern,
+                   indent, branch_indent, last_indent, last_branch_indent):
+    if dir_path == "":
+        dir_path = os.getcwd()
+
+    lines = []
+
+    names = os.listdir(dir_path)
+    names.sort()
+    dirs, nondirs = [], []
+    for name in names:
+        if re.match(skip_pattern, name):
+            continue
+        if os.path.isdir(os.path.join(dir_path, name)):
+            dirs.append(name)
+        else:
+            nondirs.append(name)
+
+    # list non-directories first
+    entries = list(itertools.chain([(name, False) for name in nondirs],
+                                   [(name, True) for name in dirs]))
+    def ls_entry(name, is_dir, ind, branch_ind):
+        if not is_dir:
+            yield ind + name
+        else:
+            path = os.path.join(dir_path, name)
+            if not os.path.islink(path):
+                yield ind + name
+                subtree = _ls_tree_lines(path, skip_pattern,
+                                         indent, branch_indent,
+                                         last_indent, last_branch_indent)
+                for x in subtree:
+                    yield branch_ind + x
+    for name, is_dir in entries[:-1]:
+        for line in ls_entry(name, is_dir, indent, branch_indent):
+            yield line
+    if entries:
+        name, is_dir = entries[-1]
+        for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
+            yield line
+
+
+def absdir(path):
+    """Return absolute, normalized path to directory, if it exists; None
+    otherwise.
+    """
+    if not os.path.isabs(path):
+        path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+                                                             path)))
+    if path is None or not os.path.isdir(path):
+        return None
+    return path
+
+
+def absfile(path, where=None):
+    """Return absolute, normalized path to file (optionally in directory
+    where), or None if the file can't be found either in where or the current
+    working directory.
+    """
+    orig = path
+    if where is None:
+        where = os.getcwd()
+    if isinstance(where, list) or isinstance(where, tuple):
+        for maybe_path in where:
+            maybe_abs = absfile(path, maybe_path)
+            if maybe_abs is not None:
+                return maybe_abs
+        return None
+    if not os.path.isabs(path):
+        path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
+    if path is None or not os.path.exists(path):
+        if where != os.getcwd():
+            # try the cwd instead
+            path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+                                                                 orig)))
+    if path is None or not os.path.exists(path):
+        return None
+    if os.path.isdir(path):
+        # might want an __init__.py from pacakge
+        init = os.path.join(path,'__init__.py')
+        if os.path.isfile(init):
+            return init
+    elif os.path.isfile(path):
+        return path
+    return None
+
+
+def anyp(predicate, iterable):
+    for item in iterable:
+        if predicate(item):
+            return True
+    return False
+
+
+def file_like(name):
+    """A name is file-like if it is a path that exists, or it has a
+    directory part, or it ends in .py, or it isn't a legal python
+    identifier.
+    """
+    return (os.path.exists(name)
+            or os.path.dirname(name)
+            or name.endswith('.py')
+            or not ident_re.match(os.path.splitext(name)[0]))
+
+
+def func_lineno(func):
+    """Get the line number of a function. First looks for
+    compat_co_firstlineno, then func_code.co_first_lineno.
+    """
+    try:
+        return func.compat_co_firstlineno
+    except AttributeError:
+        try:
+            return func.func_code.co_firstlineno
+        except AttributeError:
+            return -1
+
+
+def isclass(obj):
+    """Is obj a class? Inspect's isclass is too liberal and returns True
+    for objects that can't be subclasses of anything.
+    """
+    obj_type = type(obj)
+    return obj_type in class_types or issubclass(obj_type, type)
+
+
+# backwards compat (issue #64)
+is_generator = isgenerator
+
+
+def ispackage(path):
+    """
+    Is this path a package directory?
+
+    >>> ispackage('nose')
+    True
+    >>> ispackage('unit_tests')
+    False
+    >>> ispackage('nose/plugins')
+    True
+    >>> ispackage('nose/loader.py')
+    False
+    """
+    if os.path.isdir(path):
+        # at least the end of the path must be a legal python identifier
+        # and __init__.py[co] must exist
+        end = os.path.basename(path)
+        if ident_re.match(end):
+            for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
+                if os.path.isfile(os.path.join(path, init)):
+                    return True
+            if sys.platform.startswith('java') and \
+                    os.path.isfile(os.path.join(path, '__init__$py.class')):
+                return True
+    return False
+
+
+def isproperty(obj):
+    """
+    Is this a property?
+
+    >>> class Foo:
+    ...     def got(self):
+    ...         return 2
+    ...     def get(self):
+    ...         return 1
+    ...     get = property(get)
+
+    >>> isproperty(Foo.got)
+    False
+    >>> isproperty(Foo.get)
+    True
+    """
+    return type(obj) == property
+
+
+def getfilename(package, relativeTo=None):
+    """Find the python source file for a package, relative to a
+    particular directory (defaults to current working directory if not
+    given).
+    """
+    if relativeTo is None:
+        relativeTo = os.getcwd()
+    path = os.path.join(relativeTo, os.sep.join(package.split('.')))
+    if os.path.exists(path + '/__init__.py'):
+        return path
+    filename = path + '.py'
+    if os.path.exists(filename):
+        return filename
+    return None
+
+
+def getpackage(filename):
+    """
+    Find the full dotted package name for a given python source file
+    name. Returns None if the file is not a python source file.
+
+    >>> getpackage('foo.py')
+    'foo'
+    >>> getpackage('biff/baf.py')
+    'baf'
+    >>> getpackage('nose/util.py')
+    'nose.util'
+
+    Works for directories too.
+
+    >>> getpackage('nose')
+    'nose'
+    >>> getpackage('nose/plugins')
+    'nose.plugins'
+
+    And __init__ files stuck onto directories
+
+    >>> getpackage('nose/plugins/__init__.py')
+    'nose.plugins'
+
+    Absolute paths also work.
+
+    >>> path = os.path.abspath(os.path.join('nose', 'plugins'))
+    >>> getpackage(path)
+    'nose.plugins'
+    """
+    src_file = src(filename)
+    if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
+        return None
+    base, ext = os.path.splitext(os.path.basename(src_file))
+    if base == '__init__':
+        mod_parts = []
+    else:
+        mod_parts = [base]
+    path, part = os.path.split(os.path.split(src_file)[0])
+    while part:
+        if ispackage(os.path.join(path, part)):
+            mod_parts.append(part)
+        else:
+            break
+        path, part = os.path.split(path)
+    mod_parts.reverse()
+    return '.'.join(mod_parts)
+
+
+def ln(label):
+    """Draw a 70-char-wide divider, with label in the middle.
+
+    >>> ln('hello there')
+    '---------------------------- hello there -----------------------------'
+    """
+    label_len = len(label) + 2
+    chunk = (70 - label_len) // 2
+    out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
+    pad = 70 - len(out)
+    if pad > 0:
+        out = out + ('-' * pad)
+    return out
+
+
+def resolve_name(name, module=None):
+    """Resolve a dotted name to a module and its parts. This is stolen
+    wholesale from unittest.TestLoader.loadTestByName.
+
+    >>> resolve_name('nose.util') #doctest: +ELLIPSIS
+    <module 'nose.util' from...>
+    >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
+    <function resolve_name at...>
+    """
+    parts = name.split('.')
+    parts_copy = parts[:]
+    if module is None:
+        while parts_copy:
+            try:
+                log.debug("__import__ %s", name)
+                module = __import__('.'.join(parts_copy))
+                break
+            except ImportError:
+                del parts_copy[-1]
+                if not parts_copy:
+                    raise
+        parts = parts[1:]
+    obj = module
+    log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
+    for part in parts:
+        obj = getattr(obj, part)
+    return obj
+
+
+def split_test_name(test):
+    """Split a test name into a 3-tuple containing file, module, and callable
+    names, any of which (but not all) may be blank.
+
+    Test names are in the form:
+
+    file_or_module:callable
+
+    Either side of the : may be dotted. To change the splitting behavior, you
+    can alter nose.util.split_test_re.
+    """
+    norm = os.path.normpath
+    file_or_mod = test
+    fn = None
+    if not ':' in test:
+        # only a file or mod part
+        if file_like(test):
+            return (norm(test), None, None)
+        else:
+            return (None, test, None)
+
+    # could be path|mod:callable, or a : in the file path someplace
+    head, tail = os.path.split(test)
+    if not head:
+        # this is a case like 'foo:bar' -- generally a module
+        # name followed by a callable, but also may be a windows
+        # drive letter followed by a path
+        try:
+            file_or_mod, fn = test.split(':')
+            if file_like(fn):
+                # must be a funny path
+                file_or_mod, fn = test, None
+        except ValueError:
+            # more than one : in the test
+            # this is a case like c:\some\path.py:a_test
+            parts = test.split(':')
+            if len(parts[0]) == 1:
+                file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
+            else:
+                # nonsense like foo:bar:baz
+                raise ValueError("Test name '%s' could not be parsed. Please "
+                                 "format test names as path:callable or "
+                                 "module:callable." % (test,))
+    elif not tail:
+        # this is a case like 'foo:bar/'
+        # : must be part of the file path, so ignore it
+        file_or_mod = test
+    else:
+        if ':' in tail:
+            file_part, fn = tail.split(':')
+        else:
+            file_part = tail
+        file_or_mod = os.sep.join([head, file_part])
+    if file_or_mod:
+        if file_like(file_or_mod):
+            return (norm(file_or_mod), None, fn)
+        else:
+            return (None, file_or_mod, fn)
+    else:
+        return (None, None, fn)
+split_test_name.__test__ = False # do not collect
+
+
+def test_address(test):
+    """Find the test address for a test, which may be a module, filename,
+    class, method or function.
+    """
+    if hasattr(test, "address"):
+        return test.address()
+    # type-based polymorphism sucks in general, but I believe is
+    # appropriate here
+    t = type(test)
+    file = module = call = None
+    if t == types.ModuleType:
+        file = getattr(test, '__file__', None)
+        module = getattr(test, '__name__', None)
+        return (src(file), module, call)
+    if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
+        module = getattr(test, '__module__', None)
+        if module is not None:
+            m = sys.modules[module]
+            file = getattr(m, '__file__', None)
+            if file is not None:
+                file = os.path.abspath(file)
+        call = getattr(test, '__name__', None)
+        return (src(file), module, call)
+    if t == types.MethodType:
+        cls_adr = test_address(test.im_class)
+        return (src(cls_adr[0]), cls_adr[1],
+                "%s.%s" % (cls_adr[2], test.__name__))
+    # handle unittest.TestCase instances
+    if isinstance(test, unittest.TestCase):
+        if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
+            or hasattr(test, '_testFunc')):              # 2.7
+            # unittest FunctionTestCase
+            try:
+                return test_address(test._FunctionTestCase__testFunc)
+            except AttributeError:
+                return test_address(test._testFunc)
+        # regular unittest.TestCase
+        cls_adr = test_address(test.__class__)
+        # 2.5 compat: __testMethodName changed to _testMethodName
+        try:
+            method_name = test._TestCase__testMethodName
+        except AttributeError:
+            method_name = test._testMethodName
+        return (src(cls_adr[0]), cls_adr[1],
+                "%s.%s" % (cls_adr[2], method_name))
+    if (hasattr(test, '__class__') and
+            test.__class__.__module__ not in ('__builtin__', 'builtins')):
+        return test_address(test.__class__)
+    raise TypeError("I don't know what %s is (%s)" % (test, t))
+test_address.__test__ = False # do not collect
+
+
+def try_run(obj, names):
+    """Given a list of possible method names, try to run them with the
+    provided object. Keep going until something works. Used to run
+    setup/teardown methods for module, package, and function tests.
+    """
+    for name in names:
+        func = getattr(obj, name, None)
+        if func is not None:
+            if type(obj) == types.ModuleType:
+                # py.test compatibility
+                if isinstance(func, types.FunctionType):
+                    args, varargs, varkw, defaults = \
+                        inspect.getargspec(func)
+                else:
+                    # Not a function. If it's callable, call it anyway
+                    if hasattr(func, '__call__') and not inspect.ismethod(func):
+                        func = func.__call__
+                    try:
+                        args, varargs, varkw, defaults = \
+                            inspect.getargspec(func)
+                        args.pop(0) # pop the self off
+                    except TypeError:
+                        raise TypeError("Attribute %s of %r is not a python "
+                                        "function. Only functions or callables"
+                                        " may be used as fixtures." %
+                                        (name, obj))
+                if len(args):
+                    log.debug("call fixture %s.%s(%s)", obj, name, obj)
+                    return func(obj)
+            log.debug("call fixture %s.%s", obj, name)
+            return func()
+
+
+def src(filename):
+    """Find the python source file for a .pyc, .pyo or $py.class file on
+    jython. Returns the filename provided if it is not a python source
+    file.
+    """
+    if filename is None:
+        return filename
+    if sys.platform.startswith('java') and filename.endswith('$py.class'):
+        return '.'.join((filename[:-9], 'py'))
+    base, ext = os.path.splitext(filename)
+    if ext in ('.pyc', '.pyo', '.py'):
+        return '.'.join((base, 'py'))
+    return filename
+
+
+def regex_last_key(regex):
+    """Sort key function factory that puts items that match a
+    regular expression last.
+
+    >>> from nose.config import Config
+    >>> from nose.pyversion import sort_list
+    >>> c = Config()
+    >>> regex = c.testMatch
+    >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
+    >>> sort_list(entries, regex_last_key(regex))
+    >>> entries
+    ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
+    """
+    def k(obj):
+        if regex.search(obj):
+            return (1, obj)
+        return (0, obj)
+    return k
+
+
+def tolist(val):
+    """Convert a value that may be a list or a (possibly comma-separated)
+    string into a list. The exception: None is returned as None, not [None].
+
+    >>> tolist(["one", "two"])
+    ['one', 'two']
+    >>> tolist("hello")
+    ['hello']
+    >>> tolist("separate,values, with, commas,  spaces , are    ,ok")
+    ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
+    """
+    if val is None:
+        return None
+    try:
+        # might already be a list
+        val.extend([])
+        return val
+    except AttributeError:
+        pass
+    # might be a string
+    try:
+        return re.split(r'\s*,\s*', val)
+    except TypeError:
+        # who knows...
+        return list(val)
+
+
+class odict(dict):
+    """Simple ordered dict implementation, based on:
+
+    http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
+    """
+    def __init__(self, *arg, **kw):
+        self._keys = []
+        super(odict, self).__init__(*arg, **kw)
+
+    def __delitem__(self, key):
+        super(odict, self).__delitem__(key)
+        self._keys.remove(key)
+
+    def __setitem__(self, key, item):
+        super(odict, self).__setitem__(key, item)
+        if key not in self._keys:
+            self._keys.append(key)
+
+    def __str__(self):
+        return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
+
+    def clear(self):
+        super(odict, self).clear()
+        self._keys = []
+
+    def copy(self):
+        d = super(odict, self).copy()
+        d._keys = self._keys[:]
+        return d
+
+    def items(self):
+        return zip(self._keys, self.values())
+
+    def keys(self):
+        return self._keys[:]
+
+    def setdefault(self, key, failobj=None):
+        item = super(odict, self).setdefault(key, failobj)
+        if key not in self._keys:
+            self._keys.append(key)
+        return item
+
+    def update(self, dict):
+        super(odict, self).update(dict)
+        for key in dict.keys():
+            if key not in self._keys:
+                self._keys.append(key)
+
+    def values(self):
+        return map(self.get, self._keys)
+
+
+def transplant_func(func, module):
+    """
+    Make a function imported from module A appear as if it is located
+    in module B.
+
+    >>> from pprint import pprint
+    >>> pprint.__module__
+    'pprint'
+    >>> pp = transplant_func(pprint, __name__)
+    >>> pp.__module__
+    'nose.util'
+
+    The original function is not modified.
+
+    >>> pprint.__module__
+    'pprint'
+
+    Calling the transplanted function calls the original.
+
+    >>> pp([1, 2])
+    [1, 2]
+    >>> pprint([1,2])
+    [1, 2]
+
+    """
+    from nose.tools import make_decorator
+    if isgenerator(func):
+        def newfunc(*arg, **kw):
+            for v in func(*arg, **kw):
+                yield v
+    else:
+        def newfunc(*arg, **kw):
+            return func(*arg, **kw)
+
+    newfunc = make_decorator(func)(newfunc)
+    newfunc.__module__ = module
+    return newfunc
+
+
+def transplant_class(cls, module):
+    """
+    Make a class appear to reside in `module`, rather than the module in which
+    it is actually defined.
+
+    >>> from nose.failure import Failure
+    >>> Failure.__module__
+    'nose.failure'
+    >>> Nf = transplant_class(Failure, __name__)
+    >>> Nf.__module__
+    'nose.util'
+    >>> Nf.__name__
+    'Failure'
+
+    """
+    class C(cls):
+        pass
+    C.__module__ = module
+    C.__name__ = cls.__name__
+    return C
+
+
+def safe_str(val, encoding='utf-8'):
+    try:
+        return str(val)
+    except UnicodeEncodeError:
+        if isinstance(val, Exception):
+            return ' '.join([safe_str(arg, encoding)
+                             for arg in val])
+        return unicode(val).encode(encoding)
+
+
+def is_executable(file):
+    if not os.path.exists(file):
+        return False
+    st = os.stat(file)
+    return bool(st.st_mode & (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
+
+
+if __name__ == '__main__':
+    import doctest
+    doctest.testmod()
diff --git a/lib/spack/llnl/util/lang.py b/lib/spack/llnl/util/lang.py
index c84828951d45521a91c02385f454dc5df405e583..1c4d1ed623eddd189ae19c6b19e630db753bf456 100644
--- a/lib/spack/llnl/util/lang.py
+++ b/lib/spack/llnl/util/lang.py
@@ -26,6 +26,7 @@
 import re
 import sys
 import functools
+import collections
 import inspect
 
 # Ignore emacs backups when listing modules
@@ -167,16 +168,32 @@ def has_method(cls, name):
     return False
 
 
-def memoized(obj):
+class memoized(object):
     """Decorator that caches the results of a function, storing them
        in an attribute of that function."""
-    cache = obj.cache = {}
-    @functools.wraps(obj)
-    def memoizer(*args, **kwargs):
-        if args not in cache:
-            cache[args] = obj(*args, **kwargs)
-        return cache[args]
-    return memoizer
+    def __init__(self, func):
+        self.func = func
+        self.cache = {}
+
+
+    def __call__(self, *args):
+        if not isinstance(args, collections.Hashable):
+            # Not hashable, so just call the function.
+            return self.func(*args)
+
+        if args not in self.cache:
+            self.cache[args] = self.func(*args)
+        return self.cache[args]
+
+
+    def __get__(self, obj, objtype):
+        """Support instance methods."""
+        return functools.partial(self.__call__, obj)
+
+
+    def clear(self):
+        """Expunge cache so that self.func will be called again."""
+        self.cache.clear()
 
 
 def list_modules(directory, **kwargs):
diff --git a/lib/spack/llnl/util/tty/__init__.py b/lib/spack/llnl/util/tty/__init__.py
index 203f429a481f8158f5a8923b440970a2ade6b7dd..3ecd3a4ac2e2e03f7586d3567ed61b33f3471646 100644
--- a/lib/spack/llnl/util/tty/__init__.py
+++ b/lib/spack/llnl/util/tty/__init__.py
@@ -63,35 +63,46 @@ def msg(message, *args):
 def info(message, *args, **kwargs):
     format = kwargs.get('format', '*b')
     stream = kwargs.get('stream', sys.stdout)
+    wrap   = kwargs.get('wrap', False)
 
     cprint("@%s{==>} %s" % (format, cescape(str(message))), stream=stream)
     for arg in args:
-        lines = textwrap.wrap(
-            str(arg), initial_indent=indent, subsequent_indent=indent)
-        for line in lines:
-            stream.write(line + '\n')
+        if wrap:
+            lines = textwrap.wrap(
+                str(arg), initial_indent=indent, subsequent_indent=indent)
+            for line in lines:
+                stream.write(line + '\n')
+        else:
+            stream.write(indent + str(arg) + '\n')
 
 
-def verbose(message, *args):
+def verbose(message, *args, **kwargs):
     if _verbose:
-        info(message, *args, format='c')
+        kwargs.setdefault('format', 'c')
+        info(message, *args, **kwargs)
 
 
-def debug(message, *args):
+def debug(message, *args, **kwargs):
     if _debug:
-        info(message, *args, format='g', stream=sys.stderr)
+        kwargs.setdefault('format', 'g')
+        kwargs.setdefault('stream', sys.stderr)
+        info(message, *args, **kwargs)
 
 
-def error(message, *args):
-    info("Error: " + str(message), *args, format='*r', stream=sys.stderr)
+def error(message, *args, **kwargs):
+    kwargs.setdefault('format', '*r')
+    kwargs.setdefault('stream', sys.stderr)
+    info("Error: " + str(message), *args, **kwargs)
 
 
-def warn(message, *args):
-    info("Warning: " + str(message), *args, format='*Y', stream=sys.stderr)
+def warn(message, *args, **kwargs):
+    kwargs.setdefault('format', '*Y')
+    kwargs.setdefault('stream', sys.stderr)
+    info("Warning: " + str(message), *args, **kwargs)
 
 
-def die(message, *args):
-    error(message, *args)
+def die(message, *args, **kwargs):
+    error(message, *args, **kwargs)
     sys.exit(1)
 
 
diff --git a/lib/spack/llnl/util/tty/colify.py b/lib/spack/llnl/util/tty/colify.py
index 5545cf0311c0adcc24fe6e6e20d857cfa9a2ac56..47c3cc4f8f673f0e8cde2e181f6bc8c8281d84a8 100644
--- a/lib/spack/llnl/util/tty/colify.py
+++ b/lib/spack/llnl/util/tty/colify.py
@@ -210,6 +210,13 @@ def colify(elts, **options):
 
 
 def colify_table(table, **options):
+    """Version of colify() for data expressed in rows, (list of lists).
+
+       Same as regular colify but takes a list of lists, where each
+       sub-list must be the same length, and each is interpreted as a
+       row in a table.  Regular colify displays a sequential list of
+       values in columns.
+    """
     if table is None:
         raise TypeError("Can't call colify_table on NoneType")
     elif not table or not table[0]:
diff --git a/lib/spack/llnl/util/tty/log.py b/lib/spack/llnl/util/tty/log.py
index 2819cd40df3f4612b443b04d659494d9f826c0b5..22f1087e53648470cdd57723178a0d2f9ed6ea0e 100644
--- a/lib/spack/llnl/util/tty/log.py
+++ b/lib/spack/llnl/util/tty/log.py
@@ -122,6 +122,10 @@ def __init__(self, stream, echo=False, force_color=False, debug=False):
         self.force_color = force_color
         self.debug = debug
 
+        # Default is to try file-descriptor reassignment unless the system 
+        # out/err streams do not have an associated file descriptor
+        self.directAssignment = False
+
     def trace(self, frame, event, arg):
         """Jumps to __exit__ on the child process."""
         raise _SkipWithBlock()
@@ -185,13 +189,21 @@ def __enter__(self):
             # Child: redirect output, execute the with block.
             os.close(read)
 
-            # Save old stdout and stderr
-            self._stdout = os.dup(sys.stdout.fileno())
-            self._stderr = os.dup(sys.stderr.fileno())
-
-            # redirect to the pipe.
-            os.dup2(write, sys.stdout.fileno())
-            os.dup2(write, sys.stderr.fileno())
+            try:
+                # Save old stdout and stderr
+                self._stdout = os.dup(sys.stdout.fileno())
+                self._stderr = os.dup(sys.stderr.fileno())
+
+                # redirect to the pipe.
+                os.dup2(write, sys.stdout.fileno())
+                os.dup2(write, sys.stderr.fileno())
+            except AttributeError:
+                self.directAssignment = True
+                self._stdout = sys.stdout
+                self._stderr = sys.stderr
+                output_redirect = os.fdopen(write, 'w')
+                sys.stdout = output_redirect
+                sys.stderr = output_redirect
 
             if self.force_color:
                 color._force_color = True
@@ -218,8 +230,12 @@ def __exit__(self, exc_type, exception, traceback):
                 #
                 # TODO: think about how this works outside install.
                 # TODO: ideally would propagate exception to parent...
-                os.dup2(self._stdout, sys.stdout.fileno())
-                os.dup2(self._stderr, sys.stderr.fileno())
+                if self.directAssignment:
+                    sys.stdout = self._stdout
+                    sys.stderr = self._stderr
+                else:
+                    os.dup2(self._stdout, sys.stdout.fileno())
+                    os.dup2(self._stderr, sys.stderr.fileno())                    
 
                 return False
 
diff --git a/lib/spack/spack/__init__.py b/lib/spack/spack/__init__.py
index 92cb417a85099de889906755727b84659c2a8ecb..ab78ecef30848fac2aff0455b636ac9c91206733 100644
--- a/lib/spack/spack/__init__.py
+++ b/lib/spack/spack/__init__.py
@@ -23,9 +23,11 @@
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
 import os
+import sys
 import tempfile
 import getpass
 from llnl.util.filesystem import *
+import llnl.util.tty as tty
 
 # This lives in $prefix/lib/spack/spack/__file__
 spack_root = ancestor(__file__, 4)
@@ -42,6 +44,7 @@
 hooks_path     = join_path(module_path, "hooks")
 var_path       = join_path(spack_root, "var", "spack")
 stage_path     = join_path(var_path, "stage")
+repos_path     = join_path(var_path, "repos")
 share_path     = join_path(spack_root, "share", "spack")
 
 prefix = spack_root
@@ -50,11 +53,14 @@
 etc_path       = join_path(prefix, "etc")
 
 #
-# Set up the packages database.
+# Set up the default packages database.
 #
-from spack.packages import PackageDB
-packages_path = join_path(var_path, "packages")
-db = PackageDB(packages_path)
+import spack.repository
+try:
+    repo = spack.repository.RepoPath()
+    sys.meta_path.append(repo)
+except spack.error.SpackError, e:
+    tty.die('while initializing Spack RepoPath:', e.message)
 
 #
 # Set up the installed packages database
@@ -63,13 +69,10 @@
 installed_db = Database(install_path)
 
 #
-# Paths to mock files for testing.
+# Paths to built-in Spack repositories.
 #
-mock_packages_path = join_path(var_path, "mock_packages")
-
-mock_config_path = join_path(var_path, "mock_configs")
-mock_site_config = join_path(mock_config_path, "site_spackconfig")
-mock_user_config = join_path(mock_config_path, "user_spackconfig")
+packages_path      = join_path(repos_path, "builtin")
+mock_packages_path = join_path(repos_path, "builtin.mock")
 
 #
 # This controls how spack lays out install prefixes and
@@ -149,7 +152,7 @@
 # When packages call 'from spack import *', this extra stuff is brought in.
 #
 # Spack internal code should call 'import spack' and accesses other
-# variables (spack.db, paths, etc.) directly.
+# variables (spack.repo, paths, etc.) directly.
 #
 # TODO: maybe this should be separated out and should go in build_environment.py?
 # TODO: it's not clear where all the stuff that needs to be included in packages
diff --git a/lib/spack/spack/build_environment.py b/lib/spack/spack/build_environment.py
index 84c2f6015b52b42f673324b8a66743f3cb9adf4a..b2db83acb7b7aa27fb29cad97daccf7842baac15 100644
--- a/lib/spack/spack/build_environment.py
+++ b/lib/spack/spack/build_environment.py
@@ -173,7 +173,7 @@ def add_env_path(path):
     path_set("PKG_CONFIG_PATH", pkg_config_dirs)
 
 
-def set_module_variables_for_package(pkg):
+def set_module_variables_for_package(pkg, m):
     """Populate the module scope of install() with some useful functions.
        This makes things easier for package writers.
     """
@@ -244,11 +244,32 @@ def get_rpaths(pkg):
     return rpaths
 
 
+def parent_class_modules(cls):
+    """Get list of super class modules that are all descend from spack.Package"""
+    if not issubclass(cls, spack.Package) or issubclass(spack.Package, cls):
+        return []
+    result = []
+    module = sys.modules.get(cls.__module__)
+    if module:
+        result = [ module ]
+    for c in cls.__bases__:
+        result.extend(parent_class_modules(c))
+    return result
+
+
 def setup_package(pkg):
     """Execute all environment setup routines."""
     set_compiler_environment_variables(pkg)
     set_build_environment_variables(pkg)
-    set_module_variables_for_package(pkg)
+
+    # If a user makes their own package repo, e.g.
+    # spack.repos.mystuff.libelf.Libelf, and they inherit from
+    # an existing class like spack.repos.original.libelf.Libelf,
+    # then set the module variables for both classes so the
+    # parent class can still use them if it gets called.
+    modules = parent_class_modules(pkg.__class__)
+    for mod in modules:
+        set_module_variables_for_package(pkg, mod)
 
     # Allow dependencies to set up environment as well.
     for dep_spec in pkg.spec.traverse(root=False):
diff --git a/lib/spack/spack/cmd/__init__.py b/lib/spack/spack/cmd/__init__.py
index 926e7ac14ab9b50fec0b0f73e0a15affeed43bc2..6c635a1e6c7c6367bd9cd1d519572405b5793581 100644
--- a/lib/spack/spack/cmd/__init__.py
+++ b/lib/spack/spack/cmd/__init__.py
@@ -31,6 +31,15 @@
 
 import spack
 import spack.spec
+import spack.config
+
+#
+# Settings for commands that modify configuration
+#
+# Commands that modify confguration By default modify the *highest* priority scope.
+default_modify_scope = spack.config.highest_precedence_scope().name
+# Commands that list confguration list *all* scopes by default.
+default_list_scope = None
 
 # cmd has a submodule called "list" so preserve the python list module
 python_list = list
diff --git a/lib/spack/spack/cmd/bootstrap.py b/lib/spack/spack/cmd/bootstrap.py
index e4ec7da35dbf5d165c5989b66ddbb181f4ac3ec1..bdbd623b39a6d8b5657f79437b367c592fc35546 100644
--- a/lib/spack/spack/cmd/bootstrap.py
+++ b/lib/spack/spack/cmd/bootstrap.py
@@ -42,7 +42,7 @@ def get_origin_url():
     git = which('git', required=True)
     origin_url = git(
         '--git-dir=%s' % git_dir, 'config', '--get', 'remote.origin.url',
-        return_output=True)
+        output=str)
     return origin_url.strip()
 
 
diff --git a/lib/spack/spack/cmd/checksum.py b/lib/spack/spack/cmd/checksum.py
index 6b7022a7a110b9ca07fb9df0b35dfa1fc7570606..b1ad89dbb8a6f36de3fe31f99ee2f10673fbd46e 100644
--- a/lib/spack/spack/cmd/checksum.py
+++ b/lib/spack/spack/cmd/checksum.py
@@ -81,7 +81,7 @@ def get_checksums(versions, urls, **kwargs):
 
 def checksum(parser, args):
     # get the package we're going to generate checksums for
-    pkg = spack.db.get(args.package)
+    pkg = spack.repo.get(args.package)
 
     # If the user asked for specific versions, use those.
     if args.versions:
diff --git a/lib/spack/spack/cmd/clean.py b/lib/spack/spack/cmd/clean.py
index c3409887fb0f82d5ecfa636b87c2376392a53b2b..6e7179122c159cd44bd9d41bfe08e4483270d03e 100644
--- a/lib/spack/spack/cmd/clean.py
+++ b/lib/spack/spack/cmd/clean.py
@@ -42,5 +42,5 @@ def clean(parser, args):
 
     specs = spack.cmd.parse_specs(args.packages, concretize=True)
     for spec in specs:
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         package.do_clean()
diff --git a/lib/spack/spack/cmd/compiler.py b/lib/spack/spack/cmd/compiler.py
index 589ca87fb57137fffc026c72a387add7c6f48eae..75b51f6b495deda09f6a8dc80d9eaae9d5eac890 100644
--- a/lib/spack/spack/cmd/compiler.py
+++ b/lib/spack/spack/cmd/compiler.py
@@ -22,6 +22,7 @@
 # along with this program; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import sys
 import argparse
 
 import llnl.util.tty as tty
@@ -41,17 +42,32 @@ def setup_parser(subparser):
     sp = subparser.add_subparsers(
         metavar='SUBCOMMAND', dest='compiler_command')
 
-    update_parser = sp.add_parser(
-        'add', help='Add compilers to the Spack configuration.')
-    update_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
-
-    remove_parser = sp.add_parser('remove', help='remove compiler')
-    remove_parser.add_argument('path')
-
-    list_parser   = sp.add_parser('list', help='list available compilers')
-
-    info_parser   = sp.add_parser('info', help='Show compiler paths.')
+    scopes = spack.config.config_scopes
+
+    # Add
+    add_parser = sp.add_parser('add', help='Add compilers to the Spack configuration.')
+    add_parser.add_argument('add_paths', nargs=argparse.REMAINDER)
+    add_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+                            help="Configuration scope to modify.")
+
+    # Remove
+    remove_parser = sp.add_parser('remove', aliases=['rm'], help='Remove compiler by spec.')
+    remove_parser.add_argument(
+        '-a', '--all', action='store_true', help='Remove ALL compilers that match spec.')
+    remove_parser.add_argument('compiler_spec')
+    remove_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+                               help="Configuration scope to modify.")
+
+    # List
+    list_parser = sp.add_parser('list', help='list available compilers')
+    list_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_list_scope,
+                             help="Configuration scope to read from.")
+
+    # Info
+    info_parser = sp.add_parser('info', help='Show compiler paths.')
     info_parser.add_argument('compiler_spec')
+    info_parser.add_argument('--scope', choices=scopes, default=spack.cmd.default_list_scope,
+                             help="Configuration scope to read from.")
 
 
 def compiler_add(args):
@@ -62,26 +78,40 @@ def compiler_add(args):
         paths = get_path('PATH')
 
     compilers = [c for c in spack.compilers.find_compilers(*args.add_paths)
-                 if c.spec not in spack.compilers.all_compilers()]
+                 if c.spec not in spack.compilers.all_compilers(scope=args.scope)]
 
     if compilers:
-        spack.compilers.add_compilers_to_config('user', *compilers)
+        spack.compilers.add_compilers_to_config(compilers, scope=args.scope)
         n = len(compilers)
-        tty.msg("Added %d new compiler%s to %s" % (
-            n, 's' if n > 1 else '', spack.config.get_config_scope_filename('user', 'compilers')))
+        s = 's' if n > 1 else ''
+        filename = spack.config.get_config_filename(args.scope, 'compilers')
+        tty.msg("Added %d new compiler%s to %s" % (n, s, filename))
         colify(reversed(sorted(c.spec for c in compilers)), indent=4)
     else:
         tty.msg("Found no new compilers")
 
 
 def compiler_remove(args):
-    pass
+    cspec = CompilerSpec(args.compiler_spec)
+    compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
+
+    if not compilers:
+        tty.die("No compilers match spec %s." % cspec)
+    elif not args.all and len(compilers) > 1:
+        tty.error("Multiple compilers match spec %s. Choose one:" % cspec)
+        colify(reversed(sorted([c.spec for c in compilers])), indent=4)
+        tty.msg("Or, you can use `spack compiler remove -a` to remove all of them.")
+        sys.exit(1)
+
+    for compiler in compilers:
+        spack.compilers.remove_compiler_from_config(compiler.spec, scope=args.scope)
+        tty.msg("Removed compiler %s." % compiler.spec)
 
 
 def compiler_info(args):
     """Print info about all compilers matching a spec."""
     cspec = CompilerSpec(args.compiler_spec)
-    compilers = spack.compilers.compilers_for_spec(cspec)
+    compilers = spack.compilers.compilers_for_spec(cspec, scope=args.scope)
 
     if not compilers:
         tty.error("No compilers match spec %s." % cspec)
@@ -96,7 +126,7 @@ def compiler_info(args):
 
 def compiler_list(args):
     tty.msg("Available compilers")
-    index = index_by(spack.compilers.all_compilers(), 'name')
+    index = index_by(spack.compilers.all_compilers(scope=args.scope), 'name')
     for i, (name, compilers) in enumerate(index.items()):
         if i >= 1: print
 
@@ -108,6 +138,7 @@ def compiler_list(args):
 def compiler(parser, args):
     action = { 'add'    : compiler_add,
                'remove' : compiler_remove,
+               'rm'     : compiler_remove,
                'info'   : compiler_info,
                'list'   : compiler_list }
     action[args.compiler_command](args)
diff --git a/lib/spack/spack/cmd/compilers.py b/lib/spack/spack/cmd/compilers.py
index c485a910ebd277473bd97624ad937e8f6e3fa799..7e09016f2dc6f7313a4cca5fc0117cf43368afeb 100644
--- a/lib/spack/spack/cmd/compilers.py
+++ b/lib/spack/spack/cmd/compilers.py
@@ -26,9 +26,14 @@
 from llnl.util.tty.colify import colify
 from llnl.util.lang import index_by
 
+import spack
 from spack.cmd.compiler import compiler_list
 
 description = "List available compilers. Same as 'spack compiler list'."
 
+def setup_parser(subparser):
+    subparser.add_argument('--scope', choices=spack.config.config_scopes,
+                           help="Configuration scope to read/modify.")
+
 def compilers(parser, args):
     compiler_list(args)
diff --git a/lib/spack/spack/cmd/config.py b/lib/spack/spack/cmd/config.py
index a6e914131ee3cf0ad17ab6ccf471b3bae66f21fc..5e6d4e4d7d7dfdfb9175216c9a17015d36a08925 100644
--- a/lib/spack/spack/cmd/config.py
+++ b/lib/spack/spack/cmd/config.py
@@ -44,22 +44,22 @@ def setup_parser(subparser):
     sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='config_command')
 
     get_parser = sp.add_parser('get', help='Print configuration values.')
-    get_parser.add_argument('category', help="Configuration category to print.")
+    get_parser.add_argument('section', help="Configuration section to print.")
 
     edit_parser = sp.add_parser('edit', help='Edit configuration file.')
-    edit_parser.add_argument('category', help="Configuration category to edit")
+    edit_parser.add_argument('section', help="Configuration section to edit")
 
 
 def config_get(args):
-    spack.config.print_category(args.category)
+    spack.config.print_section(args.section)
 
 
 def config_edit(args):
     if not args.scope:
         args.scope = 'user'
-    if not args.category:
-        args.category = None
-    config_file = spack.config.get_config_scope_filename(args.scope, args.category)
+    if not args.section:
+        args.section = None
+    config_file = spack.config.get_config_filename(args.scope, args.section)
     spack.editor(config_file)
 
 
diff --git a/lib/spack/spack/cmd/create.py b/lib/spack/spack/cmd/create.py
index 9ecb709110ca2c8e7387f552b3d1a93ab33ab8eb..edcea0718cd502e7767586b3afc1a8f4f9bc3349 100644
--- a/lib/spack/spack/cmd/create.py
+++ b/lib/spack/spack/cmd/create.py
@@ -36,7 +36,9 @@
 import spack.cmd.checksum
 import spack.url
 import spack.util.web
+from spack.spec import Spec
 from spack.util.naming import *
+from spack.repository import Repo, RepoError
 import spack.util.crypto as crypto
 
 from spack.util.executable import which
@@ -85,18 +87,34 @@ def install(self, spec, prefix):
 """)
 
 
+def make_version_calls(ver_hash_tuples):
+    """Adds a version() call to the package for each version found."""
+    max_len = max(len(str(v)) for v, h in ver_hash_tuples)
+    format = "    version(%%-%ds, '%%s')" % (max_len + 2)
+    return '\n'.join(format % ("'%s'" % v, h) for v, h in ver_hash_tuples)
+
+
 def setup_parser(subparser):
     subparser.add_argument('url', nargs='?', help="url of package archive")
     subparser.add_argument(
-        '--keep-stage', action='store_true', dest='keep_stage',
+        '--keep-stage', action='store_true',
         help="Don't clean up staging area when command completes.")
     subparser.add_argument(
-        '-n', '--name', dest='alternate_name', default=None,
+        '-n', '--name', dest='alternate_name', default=None, metavar='NAME',
         help="Override the autodetected name for the created package.")
+    subparser.add_argument(
+        '-r', '--repo', default=None,
+        help="Path to a repository where the package should be created.")
+    subparser.add_argument(
+        '-N', '--namespace',
+        help="Specify a namespace for the package. Must be the namespace of "
+        "a repository registered with Spack.")
     subparser.add_argument(
         '-f', '--force', action='store_true', dest='force',
         help="Overwrite any existing package file with the same name.")
 
+    setup_parser.subparser = subparser
+
 
 class ConfigureGuesser(object):
     def __call__(self, stage):
@@ -114,7 +132,7 @@ def __call__(self, stage):
         # Peek inside the tarball.
         tar = which('tar')
         output = tar(
-            "--exclude=*/*/*", "-tf", stage.archive_file, return_output=True)
+            "--exclude=*/*/*", "-tf", stage.archive_file, output=str)
         lines = output.split("\n")
 
         # Set the configure line to the one that matched.
@@ -134,16 +152,7 @@ def __call__(self, stage):
         self.build_system = build_system
 
 
-def make_version_calls(ver_hash_tuples):
-    """Adds a version() call to the package for each version found."""
-    max_len = max(len(str(v)) for v, h in ver_hash_tuples)
-    format = "    version(%%-%ds, '%%s')" % (max_len + 2)
-    return '\n'.join(format % ("'%s'" % v, h) for v, h in ver_hash_tuples)
-
-
-def create(parser, args):
-    url = args.url
-
+def guess_name_and_version(url, args):
     # Try to deduce name and version of the new package from the URL
     version = spack.url.parse_version(url)
     if not version:
@@ -160,12 +169,52 @@ def create(parser, args):
             tty.die("Couldn't guess a name for this package. Try running:", "",
                     "spack create --name <name> <url>")
 
-    if not valid_module_name(name):
+    if not valid_fully_qualified_module_name(name):
         tty.die("Package name can only contain A-Z, a-z, 0-9, '_' and '-'")
 
-    tty.msg("This looks like a URL for %s version %s." % (name, version))
-    tty.msg("Creating template for package %s" % name)
+    return name, version
+
 
+def find_repository(spec, args):
+    # figure out namespace for spec
+    if spec.namespace and args.namespace and spec.namespace != args.namespace:
+        tty.die("Namespaces '%s' and '%s' do not match." % (spec.namespace, args.namespace))
+
+    if not spec.namespace and args.namespace:
+        spec.namespace = args.namespace
+
+    # Figure out where the new package should live.
+    repo_path = args.repo
+    if repo_path is not None:
+        try:
+            repo = Repo(repo_path)
+            if spec.namespace and spec.namespace != repo.namespace:
+                tty.die("Can't create package with namespace %s in repo with namespace %s."
+                        % (spec.namespace, repo.namespace))
+        except RepoError as e:
+            tty.die(str(e))
+    else:
+        if spec.namespace:
+            repo = spack.repo.get_repo(spec.namespace, None)
+            if not repo:
+                tty.die("Unknown namespace: %s" % spec.namespace)
+        else:
+            repo = spack.repo.first_repo()
+
+    # Set the namespace on the spec if it's not there already
+    if not spec.namespace:
+        spec.namespace = repo.namespace
+
+    return repo
+
+
+def fetch_tarballs(url, name, args):
+    """Try to find versions of the supplied archive by scraping the web.
+
+    Prompts the user to select how many to download if many are found.
+
+
+    """
     versions = spack.util.web.find_versions_of_archive(url)
     rkeys = sorted(versions.keys(), reverse=True)
     versions = OrderedDict(zip(rkeys, (versions[v] for v in rkeys)))
@@ -184,13 +233,35 @@ def create(parser, args):
             default=5, abort='q')
 
         if not archives_to_fetch:
-            tty.msg("Aborted.")
-            return
+            tty.die("Aborted.")
+
+    sorted_versions = sorted(versions.keys(), reverse=True)
+    sorted_urls = [versions[v] for v in sorted_versions]
+    return sorted_versions[:archives_to_fetch], sorted_urls[:archives_to_fetch]
+
+
+def create(parser, args):
+    url = args.url
+    if not url:
+        setup_parser.subparser.print_help()
+        return
+
+    # Figure out a name and repo for the package.
+    name, version = guess_name_and_version(url, args)
+    spec = Spec(name)
+    name = spec.name  # factors out namespace, if any
+    repo = find_repository(spec, args)
+
+    tty.msg("This looks like a URL for %s version %s." % (name, version))
+    tty.msg("Creating template for package %s" % name)
+
+    # Fetch tarballs (prompting user if necessary)
+    versions, urls = fetch_tarballs(url, name, args)
 
+    # Try to guess what configure system is used.
     guesser = ConfigureGuesser()
     ver_hash_tuples = spack.cmd.checksum.get_checksums(
-        versions.keys()[:archives_to_fetch],
-        [versions[v] for v in versions.keys()[:archives_to_fetch]],
+        versions, urls,
         first_stage_function=guesser,
         keep_stage=args.keep_stage)
 
@@ -202,7 +273,7 @@ def create(parser, args):
         name = 'py-%s' % name
 
     # Create a directory for the new package.
-    pkg_path = spack.db.filename_for_package_name(name)
+    pkg_path = repo.filename_for_package_name(name)
     if os.path.exists(pkg_path) and not args.force:
         tty.die("%s already exists." % pkg_path)
     else:
diff --git a/lib/spack/spack/cmd/deactivate.py b/lib/spack/spack/cmd/deactivate.py
index a0c78bf755ba20edf836586ca6718767c8e87fa6..d6b23d6a08eda1b392bd33b549e7c43e691ad6df 100644
--- a/lib/spack/spack/cmd/deactivate.py
+++ b/lib/spack/spack/cmd/deactivate.py
@@ -37,7 +37,7 @@ def setup_parser(subparser):
         help="Run deactivation even if spec is NOT currently activated.")
     subparser.add_argument(
         '-a', '--all', action='store_true',
-        help="Deactivate all extensions of an extendable pacakge, or "
+        help="Deactivate all extensions of an extendable package, or "
         "deactivate an extension AND its dependencies.")
     subparser.add_argument(
         'spec', nargs=argparse.REMAINDER, help="spec of package extension to deactivate.")
diff --git a/lib/spack/spack/cmd/diy.py b/lib/spack/spack/cmd/diy.py
index ebe8424f09e4d536cd6e664a640546b48680c1ee..9df53312f80583285eaa0a4f339cef962eb056db 100644
--- a/lib/spack/spack/cmd/diy.py
+++ b/lib/spack/spack/cmd/diy.py
@@ -61,7 +61,7 @@ def diy(self, args):
     # Take a write lock before checking for existence.
     with spack.installed_db.write_transaction():
         spec = specs[0]
-        if not spack.db.exists(spec.name):
+        if not spack.repo.exists(spec.name):
             tty.warn("No such package: %s" % spec.name)
             create = tty.get_yes_or_no("Create this package?", default=False)
             if not create:
@@ -69,14 +69,14 @@ def diy(self, args):
                 sys.exit(1)
             else:
                 tty.msg("Running 'spack edit -f %s'" % spec.name)
-                edit_package(spec.name, True)
+                edit_package(spec.name, spack.repo.first_repo(), None, True)
                 return
 
         if not spec.version.concrete:
             tty.die("spack diy spec must have a single, concrete version.")
 
         spec.concretize()
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
 
         if package.installed:
             tty.error("Already installed in %s" % package.prefix)
diff --git a/lib/spack/spack/cmd/edit.py b/lib/spack/spack/cmd/edit.py
index b168d967b93170475bae70a1e8dc36199de32b4e..a20e40df9bdf74981c22091a05bc48f8de8ac0cf 100644
--- a/lib/spack/spack/cmd/edit.py
+++ b/lib/spack/spack/cmd/edit.py
@@ -30,6 +30,8 @@
 
 import spack
 import spack.cmd
+from spack.spec import Spec
+from spack.repository import Repo
 from spack.util.naming import mod_to_class
 
 description = "Open package files in $EDITOR"
@@ -53,9 +55,16 @@ def install(self, spec, prefix):
 """)
 
 
-def edit_package(name, force=False):
-    path = spack.db.filename_for_package_name(name)
+def edit_package(name, repo_path, namespace, force=False):
+    if repo_path:
+        repo = Repo(repo_path)
+    elif namespace:
+        repo = spack.repo.get_repo(namespace)
+    else:
+        repo = spack.repo
+    path = repo.filename_for_package_name(name)
 
+    spec = Spec(name)
     if os.path.exists(path):
         if not os.path.isfile(path):
             tty.die("Something's wrong.  '%s' is not a file!" % path)
@@ -63,13 +72,13 @@ def edit_package(name, force=False):
             tty.die("Insufficient permissions on '%s'!" % path)
     elif not force:
         tty.die("No package '%s'.  Use spack create, or supply -f/--force "
-                "to edit a new file." % name)
+                "to edit a new file." % spec.name)
     else:
         mkdirp(os.path.dirname(path))
         with open(path, "w") as pkg_file:
             pkg_file.write(
                 package_template.substitute(
-                    name=name, class_name=mod_to_class(name)))
+                    name=spec.name, class_name=mod_to_class(spec.name)))
 
     spack.editor(path)
 
@@ -78,9 +87,26 @@ def setup_parser(subparser):
     subparser.add_argument(
         '-f', '--force', dest='force', action='store_true',
         help="Open a new file in $EDITOR even if package doesn't exist.")
-    subparser.add_argument(
-        '-c', '--command', dest='edit_command', action='store_true',
-        help="Edit the command with the supplied name instead of a package.")
+
+    excl_args = subparser.add_mutually_exclusive_group()
+
+    # Various filetypes you can edit directly from the cmd line.
+    excl_args.add_argument(
+        '-c', '--command', dest='path', action='store_const',
+        const=spack.cmd.command_path, help="Edit the command with the supplied name.")
+    excl_args.add_argument(
+        '-t', '--test', dest='path', action='store_const',
+        const=spack.test_path, help="Edit the test with the supplied name.")
+    excl_args.add_argument(
+        '-m', '--module', dest='path', action='store_const',
+        const=spack.module_path, help="Edit the main spack module with the supplied name.")
+
+    # Options for editing packages
+    excl_args.add_argument(
+        '-r', '--repo', default=None, help="Path to repo to edit package in.")
+    excl_args.add_argument(
+        '-N', '--namespace', default=None, help="Namespace of package to edit.")
+
     subparser.add_argument(
         'name', nargs='?', default=None, help="name of package to edit")
 
@@ -88,19 +114,17 @@ def setup_parser(subparser):
 def edit(parser, args):
     name = args.name
 
-    if args.edit_command:
-        if not name:
-            path = spack.cmd.command_path
-        else:
-            path = join_path(spack.cmd.command_path, name + ".py")
-            if not os.path.exists(path):
+    path = spack.packages_path
+    if args.path:
+        path = args.path
+        if name:
+            path = join_path(path, name + ".py")
+            if not args.force and not os.path.exists(path):
                 tty.die("No command named '%s'." % name)
         spack.editor(path)
 
+    elif name:
+        edit_package(name, args.repo, args.namespace, args.force)
     else:
         # By default open the directory where packages or commands live.
-        if not name:
-            path = spack.packages_path
-            spack.editor(path)
-        else:
-            edit_package(name, args.force)
+        spack.editor(path)
diff --git a/lib/spack/spack/cmd/extensions.py b/lib/spack/spack/cmd/extensions.py
index 2ce6f406caeb039cc88aeefda908ca84fb9cdd56..ccb0fe4e1fbe91bbfac4184ceef370b2b5579816 100644
--- a/lib/spack/spack/cmd/extensions.py
+++ b/lib/spack/spack/cmd/extensions.py
@@ -74,8 +74,7 @@ def extensions(parser, args):
 
     #
     # List package names of extensions
-    #
-    extensions = spack.db.extensions_for(spec)
+    extensions = spack.repo.extensions_for(spec)
     if not extensions:
         tty.msg("%s has no extensions." % spec.cshort_spec)
         return
diff --git a/lib/spack/spack/cmd/fetch.py b/lib/spack/spack/cmd/fetch.py
index 57d6f6b63b3e98124406069c53707ca8c5516db2..adad545cae37f2cc5fc734aeeaab7151b4b0a720 100644
--- a/lib/spack/spack/cmd/fetch.py
+++ b/lib/spack/spack/cmd/fetch.py
@@ -52,10 +52,10 @@ def fetch(parser, args):
         if args.missing or args.dependencies:
             to_fetch = set()
             for s in spec.traverse():
-                package = spack.db.get(s)
+                package = spack.repo.get(s)
                 if args.missing and package.installed:
                     continue
                 package.do_fetch()
 
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         package.do_fetch()
diff --git a/lib/spack/spack/cmd/find.py b/lib/spack/spack/cmd/find.py
index c7a376fd8dac8f048068633cd7db138393169420..714f1d514b36c151b84641e7ef6fb4edd6f9e6fd 100644
--- a/lib/spack/spack/cmd/find.py
+++ b/lib/spack/spack/cmd/find.py
@@ -40,6 +40,9 @@
 
 def setup_parser(subparser):
     format_group = subparser.add_mutually_exclusive_group()
+    format_group.add_argument(
+        '-s', '--short', action='store_const', dest='mode', const='short',
+        help='Show only specs (default)')
     format_group.add_argument(
         '-p', '--paths', action='store_const', dest='mode', const='paths',
         help='Show paths to package install directories')
@@ -48,21 +51,24 @@ def setup_parser(subparser):
         help='Show full dependency DAG of installed packages')
 
     subparser.add_argument(
-        '-l', '--long', action='store_true', dest='long',
+        '-l', '--long', action='store_true',
         help='Show dependency hashes as well as versions.')
     subparser.add_argument(
-        '-L', '--very-long', action='store_true', dest='very_long',
+        '-L', '--very-long', action='store_true',
         help='Show dependency hashes as well as versions.')
 
     subparser.add_argument(
-        '-u', '--unknown', action='store_true', dest='unknown',
+        '-u', '--unknown', action='store_true',
         help='Show only specs Spack does not have a package for.')
     subparser.add_argument(
-        '-m', '--missing', action='store_true', dest='missing',
+        '-m', '--missing', action='store_true',
         help='Show missing dependencies as well as installed specs.')
     subparser.add_argument(
-        '-M', '--only-missing', action='store_true', dest='only_missing',
+        '-M', '--only-missing', action='store_true',
         help='Show only missing dependencies.')
+    subparser.add_argument(
+        '-N', '--namespace', action='store_true',
+        help='Show fully qualified package names.')
 
     subparser.add_argument(
         'query_specs', nargs=argparse.REMAINDER,
@@ -76,6 +82,7 @@ def gray_hash(spec, length):
 def display_specs(specs, **kwargs):
     mode = kwargs.get('mode', 'short')
     hashes = kwargs.get('long', False)
+    namespace = kwargs.get('namespace', False)
 
     hlen = 7
     if kwargs.get('very_long', False):
@@ -97,7 +104,8 @@ def display_specs(specs, **kwargs):
         specs = index[(architecture,compiler)]
         specs.sort()
 
-        abbreviated = [s.format('$_$@$+', color=True) for s in specs]
+        nfmt = '.' if namespace else '_'
+        abbreviated = [s.format('$%s$@$+' % nfmt, color=True) for s in specs]
         if mode == 'paths':
             # Print one spec per line along with prefix path
             width = max(len(s) for s in abbreviated)
@@ -112,7 +120,7 @@ def display_specs(specs, **kwargs):
         elif mode == 'deps':
             for spec in specs:
                 print spec.tree(
-                    format='$_$@$+',
+                    format='$%s$@$+' % nfmt,
                     color=True,
                     indent=4,
                     prefix=(lambda s: gray_hash(s, hlen)) if hashes else None)
@@ -122,7 +130,7 @@ def fmt(s):
                 string = ""
                 if hashes:
                     string += gray_hash(s, hlen) + ' '
-                string += s.format('$-_$@$+', color=True)
+                string += s.format('$-%s$@$+' % nfmt, color=True)
 
                 return string
             colify(fmt(s) for s in specs)
@@ -137,7 +145,7 @@ def find(parser, args):
     # Filter out specs that don't exist.
     query_specs = spack.cmd.parse_specs(args.query_specs)
     query_specs, nonexisting = partition_list(
-        query_specs, lambda s: spack.db.exists(s.name))
+        query_specs, lambda s: spack.repo.exists(s.name))
 
     if nonexisting:
         msg = "No such package%s: " % ('s' if len(nonexisting) > 1 else '')
@@ -171,4 +179,5 @@ def find(parser, args):
         tty.msg("%d installed packages." % len(specs))
     display_specs(specs, mode=args.mode,
                   long=args.long,
-                  very_long=args.very_long)
+                  very_long=args.very_long,
+                  namespace=args.namespace)
diff --git a/lib/spack/spack/cmd/info.py b/lib/spack/spack/cmd/info.py
index 8040e2393682dddb16d0bc510e701c3c0be1f92f..e7abe7f4a5921d6219d8aa9b9d6ca7e2d987127c 100644
--- a/lib/spack/spack/cmd/info.py
+++ b/lib/spack/spack/cmd/info.py
@@ -105,5 +105,5 @@ def print_text_info(pkg):
 
 
 def info(parser, args):
-    pkg = spack.db.get(args.name)
+    pkg = spack.repo.get(args.name)
     print_text_info(pkg)
diff --git a/lib/spack/spack/cmd/install.py b/lib/spack/spack/cmd/install.py
index 5ee7bc01b7109986da00b7a130ada8207126a507..1fef750c80262450a82b45de49a60c07ca0f7e13 100644
--- a/lib/spack/spack/cmd/install.py
+++ b/lib/spack/spack/cmd/install.py
@@ -70,7 +70,7 @@ def install(parser, args):
 
     specs = spack.cmd.parse_specs(args.packages, concretize=True)
     for spec in specs:
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         with spack.installed_db.write_transaction():
             package.do_install(
                 keep_prefix=args.keep_prefix,
diff --git a/lib/spack/spack/cmd/list.py b/lib/spack/spack/cmd/list.py
index b51d5b429a6a56a5b6c89d824fff9b0ffb19c559..7c50ccb9cd58dfd398547760874fdd193f93f230 100644
--- a/lib/spack/spack/cmd/list.py
+++ b/lib/spack/spack/cmd/list.py
@@ -43,7 +43,7 @@ def setup_parser(subparser):
 
 def list(parser, args):
     # Start with all package names.
-    pkgs = spack.db.all_package_names()
+    pkgs = spack.repo.all_package_names()
 
     # filter if a filter arg was provided
     if args.filter:
diff --git a/lib/spack/spack/cmd/location.py b/lib/spack/spack/cmd/location.py
index e805cc4012621fc0150c7d554b375edc086d7b50..307ee8982d5fb8c8719620372b4420e0ebb9e706 100644
--- a/lib/spack/spack/cmd/location.py
+++ b/lib/spack/spack/cmd/location.py
@@ -32,7 +32,7 @@
 import spack
 import spack.cmd
 
-description="Print out locations of various diectories used by Spack"
+description="Print out locations of various directories used by Spack"
 
 def setup_parser(subparser):
     global directories
@@ -72,7 +72,7 @@ def location(parser, args):
         print spack.prefix
 
     elif args.packages:
-        print spack.db.root
+        print spack.repo.root
 
     elif args.stages:
         print spack.stage_path
@@ -94,12 +94,12 @@ def location(parser, args):
 
             if args.package_dir:
                 # This one just needs the spec name.
-                print join_path(spack.db.root, spec.name)
+                print join_path(spack.repo.root, spec.name)
 
             else:
                 # These versions need concretized specs.
                 spec.concretize()
-                pkg = spack.db.get(spec)
+                pkg = spack.repo.get(spec)
 
                 if args.stage_dir:
                     print pkg.stage.path
diff --git a/lib/spack/spack/cmd/mirror.py b/lib/spack/spack/cmd/mirror.py
index 89d51bbe0465647b5b30fef3626b0e08b255dca9..8e9438c1a374d09dc2ea3d445a167c246f935b28 100644
--- a/lib/spack/spack/cmd/mirror.py
+++ b/lib/spack/spack/cmd/mirror.py
@@ -36,6 +36,7 @@
 import spack.mirror
 from spack.spec import Spec
 from spack.error import SpackError
+from spack.util.spack_yaml import syaml_dict
 
 description = "Manage mirrors."
 
@@ -47,28 +48,46 @@ def setup_parser(subparser):
     sp = subparser.add_subparsers(
         metavar='SUBCOMMAND', dest='mirror_command')
 
+    # Create
     create_parser = sp.add_parser('create', help=mirror_create.__doc__)
     create_parser.add_argument('-d', '--directory', default=None,
                                help="Directory in which to create mirror.")
     create_parser.add_argument(
-        'specs', nargs=argparse.REMAINDER, help="Specs of packages to put in mirror")
+        'specs', nargs=argparse.REMAINDER,
+        help="Specs of packages to put in mirror")
     create_parser.add_argument(
         '-f', '--file', help="File with specs of packages to put in mirror.")
     create_parser.add_argument(
-        '-D', '--dependencies', action='store_true', help="Also fetch all dependencies")
+        '-D', '--dependencies', action='store_true',
+        help="Also fetch all dependencies")
     create_parser.add_argument(
         '-o', '--one-version-per-spec', action='store_const', const=1, default=0,
         help="Only fetch one 'preferred' version per spec, not all known versions.")
 
+    scopes = spack.config.config_scopes
+
+    # Add
     add_parser = sp.add_parser('add', help=mirror_add.__doc__)
     add_parser.add_argument('name', help="Mnemonic name for mirror.")
     add_parser.add_argument(
         'url', help="URL of mirror directory created by 'spack mirror create'.")
+    add_parser.add_argument(
+        '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+        help="Configuration scope to modify.")
 
-    remove_parser = sp.add_parser('remove', help=mirror_remove.__doc__)
+    # Remove
+    remove_parser = sp.add_parser('remove', aliases=['rm'],
+                                  help=mirror_remove.__doc__)
     remove_parser.add_argument('name')
+    remove_parser.add_argument(
+        '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+        help="Configuration scope to modify.")
 
+    # List
     list_parser = sp.add_parser('list', help=mirror_list.__doc__)
+    list_parser.add_argument(
+        '--scope', choices=scopes, default=spack.cmd.default_list_scope,
+        help="Configuration scope to read from.")
 
 
 def mirror_add(args):
@@ -77,43 +96,65 @@ def mirror_add(args):
     if url.startswith('/'):
         url = 'file://' + url
 
-    mirror_dict = { args.name : url }
-    spack.config.add_to_mirror_config({ args.name : url })
+    mirrors = spack.config.get_config('mirrors', scope=args.scope)
+    if not mirrors:
+        mirrors = syaml_dict()
+
+    for name, u in mirrors.items():
+        if name == args.name:
+            tty.die("Mirror with name %s already exists." % name)
+        if u == url:
+            tty.die("Mirror with url %s already exists." % url)
+        # should only be one item per mirror dict.
+
+    items = [(n,u) for n,u in mirrors.items()]
+    items.insert(0, (args.name, url))
+    mirrors = syaml_dict(items)
+    spack.config.update_config('mirrors', mirrors, scope=args.scope)
 
 
 def mirror_remove(args):
     """Remove a mirror by name."""
     name = args.name
 
-    rmd_something = spack.config.remove_from_config('mirrors', name)
-    if not rmd_something:
-        tty.die("No such mirror: %s" % name)
+    mirrors = spack.config.get_config('mirrors', scope=args.scope)
+    if not mirrors:
+        mirrors = syaml_dict()
+
+    if not name in mirrors:
+        tty.die("No mirror with name %s" % name)
+
+    old_value = mirrors.pop(name)
+    spack.config.update_config('mirrors', mirrors, scope=args.scope)
+    tty.msg("Removed mirror %s with url %s." % (name, old_value))
 
 
 def mirror_list(args):
     """Print out available mirrors to the console."""
-    sec_names = spack.config.get_mirror_config()
-    if not sec_names:
+    mirrors = spack.config.get_config('mirrors', scope=args.scope)
+    if not mirrors:
         tty.msg("No mirrors configured.")
         return
 
-    max_len = max(len(s) for s in sec_names)
+    max_len = max(len(n) for n in mirrors.keys())
     fmt = "%%-%ds%%s" % (max_len + 4)
 
-    for name, val in sec_names.iteritems():
-        print fmt % (name, val)
+    for name in mirrors:
+        print fmt % (name, mirrors[name])
 
 
 def _read_specs_from_file(filename):
+    specs = []
     with open(filename, "r") as stream:
         for i, string in enumerate(stream):
             try:
                 s = Spec(string)
                 s.package
-                args.specs.append(s)
+                specs.append(s)
             except SpackError, e:
                 tty.die("Parse error in %s, line %d:" % (args.file, i+1),
                         ">>> " + string, str(e))
+    return specs
 
 
 def mirror_create(args):
@@ -130,9 +171,10 @@ def mirror_create(args):
 
     # If nothing is passed, use all packages.
     if not specs:
-        specs = [Spec(n) for n in spack.db.all_package_names()]
+        specs = [Spec(n) for n in spack.repo.all_package_names()]
         specs.sort(key=lambda s: s.format("$_$@").lower())
 
+    # If the user asked for dependencies, traverse spec DAG get them.
     if args.dependencies:
         new_specs = set()
         for spec in specs:
@@ -175,6 +217,7 @@ def mirror(parser, args):
     action = { 'create' : mirror_create,
                'add'    : mirror_add,
                'remove' : mirror_remove,
+               'rm'     : mirror_remove,
                'list'   : mirror_list }
 
     action[args.mirror_command](args)
diff --git a/lib/spack/spack/cmd/package-list.py b/lib/spack/spack/cmd/package-list.py
index eca9f918f10c70ee2aba884d290594d41fd7a967..5e37d5c16b545c4234c94656e24c388b322047f4 100644
--- a/lib/spack/spack/cmd/package-list.py
+++ b/lib/spack/spack/cmd/package-list.py
@@ -48,7 +48,7 @@ def rst_table(elts):
 
 def print_rst_package_list():
     """Print out information on all packages in restructured text."""
-    pkgs = sorted(spack.db.all_packages(), key=lambda s:s.name.lower())
+    pkgs = sorted(spack.repo.all_packages(), key=lambda s:s.name.lower())
 
     print ".. _package-list:"
     print
diff --git a/lib/spack/spack/cmd/patch.py b/lib/spack/spack/cmd/patch.py
index 8fc6f1383ed5e6175f7422d9c30d383bb2395efe..44fc8696db35c1077014f12d492cbd22ce0f5ce8 100644
--- a/lib/spack/spack/cmd/patch.py
+++ b/lib/spack/spack/cmd/patch.py
@@ -47,5 +47,5 @@ def patch(parser, args):
 
     specs = spack.cmd.parse_specs(args.packages, concretize=True)
     for spec in specs:
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         package.do_patch()
diff --git a/lib/spack/spack/cmd/pkg.py b/lib/spack/spack/cmd/pkg.py
index 1ebc5ce3ee52f86ee0adca6ea71d0ba97a4fe3f6..cf478d3763e1b7c9efc8f378dc784467b143203b 100644
--- a/lib/spack/spack/cmd/pkg.py
+++ b/lib/spack/spack/cmd/pkg.py
@@ -79,13 +79,13 @@ def list_packages(rev):
     git = get_git()
     relpath = spack.packages_path[len(spack.prefix + os.path.sep):] + os.path.sep
     output = git('ls-tree', '--full-tree', '--name-only', rev, relpath,
-                 return_output=True)
+                 output=str)
     return sorted(line[len(relpath):] for line in output.split('\n') if line)
 
 
 def pkg_add(args):
     for pkg_name in args.packages:
-        filename = spack.db.filename_for_package_name(pkg_name)
+        filename = spack.repo.filename_for_package_name(pkg_name)
         if not os.path.isfile(filename):
             tty.die("No such package: %s.  Path does not exist:" % pkg_name, filename)
 
diff --git a/lib/spack/spack/cmd/providers.py b/lib/spack/spack/cmd/providers.py
index 0472f9bbe4c51f1acfe575271f8b7f82266008f8..49d6ac192af076fc66fb05c724328723152c18f2 100644
--- a/lib/spack/spack/cmd/providers.py
+++ b/lib/spack/spack/cmd/providers.py
@@ -39,4 +39,4 @@ def setup_parser(subparser):
 
 def providers(parser, args):
     for spec in spack.cmd.parse_specs(args.vpkg_spec):
-        colify(sorted(spack.db.providers_for(spec)), indent=4)
+        colify(sorted(spack.repo.providers_for(spec)), indent=4)
diff --git a/lib/spack/spack/cmd/repo.py b/lib/spack/spack/cmd/repo.py
new file mode 100644
index 0000000000000000000000000000000000000000..34c755fb67f0f7800ab93332208121876f33c056
--- /dev/null
+++ b/lib/spack/spack/cmd/repo.py
@@ -0,0 +1,218 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import os
+import re
+import shutil
+
+from external import argparse
+import llnl.util.tty as tty
+from llnl.util.filesystem import join_path, mkdirp
+
+import spack.spec
+import spack.config
+from spack.util.environment import get_path
+from spack.repository import *
+
+description = "Manage package source repositories."
+
+def setup_parser(subparser):
+    sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='repo_command')
+    scopes = spack.config.config_scopes
+
+    # Create
+    create_parser = sp.add_parser('create', help=repo_create.__doc__)
+    create_parser.add_argument(
+        'directory', help="Directory to create the repo in.")
+    create_parser.add_argument(
+        'namespace', help="Namespace to identify packages in the repository. "
+        "Defaults to the directory name.", nargs='?')
+
+    # List
+    list_parser = sp.add_parser('list', help=repo_list.__doc__)
+    list_parser.add_argument(
+        '--scope', choices=scopes, default=spack.cmd.default_list_scope,
+        help="Configuration scope to read from.")
+
+    # Add
+    add_parser = sp.add_parser('add', help=repo_add.__doc__)
+    add_parser.add_argument('path', help="Path to a Spack package repository directory.")
+    add_parser.add_argument(
+        '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+        help="Configuration scope to modify.")
+
+    # Remove
+    remove_parser = sp.add_parser('remove', help=repo_remove.__doc__, aliases=['rm'])
+    remove_parser.add_argument(
+        'path_or_namespace',
+        help="Path or namespace of a Spack package repository.")
+    remove_parser.add_argument(
+        '--scope', choices=scopes, default=spack.cmd.default_modify_scope,
+        help="Configuration scope to modify.")
+
+
+def repo_create(args):
+    """Create a new package repository."""
+    root = canonicalize_path(args.directory)
+    namespace = args.namespace
+
+    if not args.namespace:
+        namespace = os.path.basename(root)
+
+    if not re.match(r'\w[\.\w-]*', namespace):
+        tty.die("'%s' is not a valid namespace." % namespace)
+
+    existed = False
+    if os.path.exists(root):
+        if os.path.isfile(root):
+            tty.die('File %s already exists and is not a directory' % root)
+        elif os.path.isdir(root):
+            if not os.access(root, os.R_OK | os.W_OK):
+                tty.die('Cannot create new repo in %s: cannot access directory.' % root)
+            if os.listdir(root):
+                tty.die('Cannot create new repo in %s: directory is not empty.' % root)
+        existed = True
+
+    full_path = os.path.realpath(root)
+    parent = os.path.dirname(full_path)
+    if not os.access(parent, os.R_OK | os.W_OK):
+        tty.die("Cannot create repository in %s: can't access parent!" % root)
+
+    try:
+        config_path = os.path.join(root, repo_config_name)
+        packages_path = os.path.join(root, packages_dir_name)
+
+        mkdirp(packages_path)
+        with open(config_path, 'w') as config:
+            config.write("repo:\n")
+            config.write("  namespace: '%s'\n" % namespace)
+
+    except (IOError, OSError) as e:
+        tty.die('Failed to create new repository in %s.' % root,
+                "Caused by %s: %s" % (type(e), e))
+
+        # try to clean up.
+        if existed:
+            shutil.rmtree(config_path, ignore_errors=True)
+            shutil.rmtree(packages_path, ignore_errors=True)
+        else:
+            shutil.rmtree(root, ignore_errors=True)
+
+    tty.msg("Created repo with namespace '%s'." % namespace)
+    tty.msg("To register it with spack, run this command:",
+            'spack repo add %s' % full_path)
+
+
+def repo_add(args):
+    """Add a package source to Spack's configuration."""
+    path = args.path
+
+    # real_path is absolute and handles substitution.
+    canon_path = canonicalize_path(path)
+
+    # check if the path exists
+    if not os.path.exists(canon_path):
+        tty.die("No such file or directory: '%s'." % path)
+
+    # Make sure the path is a directory.
+    if not os.path.isdir(canon_path):
+        tty.die("Not a Spack repository: '%s'." % path)
+
+    # Make sure it's actually a spack repository by constructing it.
+    repo = Repo(canon_path)
+
+    # If that succeeds, finally add it to the configuration.
+    repos = spack.config.get_config('repos', args.scope)
+    if not repos: repos = []
+
+    if repo.root in repos or path in repos:
+        tty.die("Repository is already registered with Spack: '%s'" % path)
+
+    repos.insert(0, canon_path)
+    spack.config.update_config('repos', repos, args.scope)
+    tty.msg("Created repo with namespace '%s'." % repo.namespace)
+
+
+def repo_remove(args):
+    """Remove a repository from Spack's configuration."""
+    repos = spack.config.get_config('repos', args.scope)
+    path_or_namespace = args.path_or_namespace
+
+    # If the argument is a path, remove that repository from config.
+    canon_path = canonicalize_path(path_or_namespace)
+    for repo_path in repos:
+        repo_canon_path = canonicalize_path(repo_path)
+        if canon_path == repo_canon_path:
+            repos.remove(repo_path)
+            spack.config.update_config('repos', repos, args.scope)
+            tty.msg("Removed repository '%s'." % repo_path)
+            return
+
+    # If it is a namespace, remove corresponding repo
+    for path in repos:
+        try:
+            repo = Repo(path)
+            if repo.namespace == path_or_namespace:
+                repos.remove(path)
+                spack.config.update_config('repos', repos, args.scope)
+                tty.msg("Removed repository '%s' with namespace %s."
+                        % (repo.root, repo.namespace))
+                return
+        except RepoError as e:
+            continue
+
+    tty.die("No repository with path or namespace: '%s'"
+            % path_or_namespace)
+
+
+def repo_list(args):
+    """Show registered repositories and their namespaces."""
+    roots = spack.config.get_config('repos', args.scope)
+    repos = []
+    for r in roots:
+        try:
+            repos.append(Repo(r))
+        except RepoError as e:
+            continue
+
+    msg = "%d package repositor" % len(repos)
+    msg += "y." if len(repos) == 1 else "ies."
+    tty.msg(msg)
+
+    if not repos:
+        return
+
+    max_ns_len = max(len(r.namespace) for r in repos)
+    for repo in repos:
+        fmt = "%%-%ds%%s" % (max_ns_len + 4)
+        print fmt % (repo.namespace, repo.root)
+
+
+def repo(parser, args):
+    action = { 'create' : repo_create,
+               'list'   : repo_list,
+               'add'    : repo_add,
+               'remove' : repo_remove,
+               'rm'     : repo_remove}
+    action[args.repo_command](args)
diff --git a/lib/spack/spack/cmd/restage.py b/lib/spack/spack/cmd/restage.py
index 703ae30a04be1772df522ea77ec545a061bc56f5..540c2ef2a564d3a10423efd63e4dce0da543da51 100644
--- a/lib/spack/spack/cmd/restage.py
+++ b/lib/spack/spack/cmd/restage.py
@@ -42,5 +42,5 @@ def restage(parser, args):
 
     specs = spack.cmd.parse_specs(args.packages, concretize=True)
     for spec in specs:
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         package.do_restage()
diff --git a/lib/spack/spack/cmd/stage.py b/lib/spack/spack/cmd/stage.py
index 7638cf31c4757f5a0a824f0ed32130d970ab842e..5786780efb43e8b9e7f1dabc3550bb8121e1b904 100644
--- a/lib/spack/spack/cmd/stage.py
+++ b/lib/spack/spack/cmd/stage.py
@@ -49,5 +49,5 @@ def stage(parser, args):
 
     specs = spack.cmd.parse_specs(args.specs, concretize=True)
     for spec in specs:
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         package.do_stage()
diff --git a/lib/spack/spack/cmd/test-install.py b/lib/spack/spack/cmd/test-install.py
index e37554155f340ed6b078ef94c25bc743ca3c5618..656873a2f036f1811a149489ac5a51748ee89e8d 100644
--- a/lib/spack/spack/cmd/test-install.py
+++ b/lib/spack/spack/cmd/test-install.py
@@ -37,20 +37,20 @@
 from spack.fetch_strategy import FetchError
 import spack.cmd
 
-description = "Treat package installations as unit tests and output formatted test results"
+description = "Run package installation as a unit test, output formatted results."
 
 def setup_parser(subparser):
     subparser.add_argument(
         '-j', '--jobs', action='store', type=int,
         help="Explicitly set number of make jobs.  Default is #cpus.")
-    
+
     subparser.add_argument(
         '-n', '--no-checksum', action='store_true', dest='no_checksum',
         help="Do not check packages against checksum")
-    
+
     subparser.add_argument(
         '-o', '--output', action='store', help="test output goes in this file")
-    
+
     subparser.add_argument(
         'package', nargs=argparse.REMAINDER, help="spec of package to install")
 
@@ -59,10 +59,10 @@ class JunitResultFormat(object):
     def __init__(self):
         self.root = ET.Element('testsuite')
         self.tests = []
-        
+
     def add_test(self, buildId, testResult, buildInfo=None):
         self.tests.append((buildId, testResult, buildInfo))
-    
+
     def write_to(self, stream):
         self.root.set('tests', '{0}'.format(len(self.tests)))
         for buildId, testResult, buildInfo in self.tests:
@@ -84,25 +84,25 @@ class TestResult(object):
     PASSED = 0
     FAILED = 1
     SKIPPED = 2
-    
+
 
 class BuildId(object):
     def __init__(self, spec):
         self.name = spec.name
         self.version = spec.version
         self.hashId = spec.dag_hash()
-    
+
     def stringId(self):
         return "-".join(str(x) for x in (self.name, self.version, self.hashId))
 
     def __hash__(self):
         return hash((self.name, self.version, self.hashId))
-    
+
     def __eq__(self, other):
         if not isinstance(other, BuildId):
             return False
-            
-        return ((self.name, self.version, self.hashId) == 
+
+        return ((self.name, self.version, self.hashId) ==
             (other.name, other.version, other.hashId))
 
 
@@ -114,19 +114,19 @@ def fetch_log(path):
 
 
 def failed_dependencies(spec):
-    return set(childSpec for childSpec in spec.dependencies.itervalues() if not 
-        spack.db.get(childSpec).installed)
+    return set(childSpec for childSpec in spec.dependencies.itervalues() if not
+        spack.repo.get(childSpec).installed)
 
 
 def create_test_output(topSpec, newInstalls, output, getLogFunc=fetch_log):
-    # Post-order traversal is not strictly required but it makes sense to output 
+    # Post-order traversal is not strictly required but it makes sense to output
     # tests for dependencies first.
     for spec in topSpec.traverse(order='post'):
         if spec not in newInstalls:
             continue
 
         failedDeps = failed_dependencies(spec)
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         if failedDeps:
             result = TestResult.SKIPPED
             dep = iter(failedDeps).next()
@@ -143,12 +143,12 @@ def create_test_output(topSpec, newInstalls, output, getLogFunc=fetch_log):
                 re.search('error:', line, re.IGNORECASE))
             errOutput = errMessages if errMessages else lines[-10:]
             errOutput = '\n'.join(itertools.chain(
-                    [spec.to_yaml(), "Errors:"], errOutput, 
+                    [spec.to_yaml(), "Errors:"], errOutput,
                     ["Build Log:", package.build_log_path]))
         else:
             result = TestResult.PASSED
             errOutput = None
-        
+
         bId = BuildId(spec)
         output.add_test(bId, result, errOutput)
 
@@ -163,18 +163,18 @@ def test_install(parser, args):
 
     if args.no_checksum:
         spack.do_checksum = False        # TODO: remove this global.
-    
+
     specs = spack.cmd.parse_specs(args.package, concretize=True)
     if len(specs) > 1:
         tty.die("Only 1 top-level package can be specified")
     topSpec = iter(specs).next()
-    
+
     newInstalls = set()
     for spec in topSpec.traverse():
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         if not package.installed:
             newInstalls.add(spec)
-    
+
     if not args.output:
         bId = BuildId(topSpec)
         outputDir = join_path(os.getcwd(), "test-output")
@@ -183,12 +183,12 @@ def test_install(parser, args):
         outputFpath = join_path(outputDir, "test-{0}.xml".format(bId.stringId()))
     else:
         outputFpath = args.output
-    
+
     for spec in topSpec.traverse(order='post'):
         # Calling do_install for the top-level package would be sufficient but
         # this attempts to keep going if any package fails (other packages which
         # are not dependents may succeed)
-        package = spack.db.get(spec)
+        package = spack.repo.get(spec)
         if (not failed_dependencies(spec)) and (not package.installed):
             try:
                 package.do_install(
@@ -202,7 +202,7 @@ def test_install(parser, args):
                 pass
             except FetchError:
                 pass
-   
+
     jrf = JunitResultFormat()
     handled = {}
     create_test_output(topSpec, newInstalls, jrf)
diff --git a/lib/spack/spack/cmd/test.py b/lib/spack/spack/cmd/test.py
index 1669ec4cc98fc2ae9b293701f9f4253b44addc39..ddc6cb4fceeae0dd5549a9c1bb17bbe199c005af 100644
--- a/lib/spack/spack/cmd/test.py
+++ b/lib/spack/spack/cmd/test.py
@@ -22,8 +22,10 @@
 # along with this program; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import os
 from pprint import pprint
 
+from llnl.util.filesystem import join_path, mkdirp
 from llnl.util.tty.colify import colify
 from llnl.util.lang import list_modules
 
@@ -37,6 +39,12 @@ def setup_parser(subparser):
         'names', nargs='*', help="Names of tests to run.")
     subparser.add_argument(
         '-l', '--list', action='store_true', dest='list', help="Show available tests")
+    subparser.add_argument(
+        '--createXmlOutput', action='store_true', dest='createXmlOutput', 
+        help="Create JUnit XML from test results")
+    subparser.add_argument(
+        '--xmlOutputDir', dest='xmlOutputDir', 
+        help="Nose creates XML files in this directory")
     subparser.add_argument(
         '-v', '--verbose', action='store_true', dest='verbose',
         help="verbose output")
@@ -48,4 +56,14 @@ def test(parser, args):
         colify(spack.test.list_tests(), indent=2)
 
     else:
-        spack.test.run(args.names, args.verbose)
+        if not args.createXmlOutput:
+            outputDir = None
+        else:
+            if not args.xmlOutputDir:
+                outputDir = join_path(os.getcwd(), "test-output")
+            else:
+                outputDir = os.path.abspath(args.xmlOutputDir)
+            
+            if not os.path.exists(outputDir):
+                mkdirp(outputDir)
+        spack.test.run(args.names, outputDir, args.verbose)
diff --git a/lib/spack/spack/cmd/uninstall.py b/lib/spack/spack/cmd/uninstall.py
index 03873bb5f80a74047f2c9b788201ff5bb913c65f..d01aa2136b09f5ff8cff8298407104dd65680c93 100644
--- a/lib/spack/spack/cmd/uninstall.py
+++ b/lib/spack/spack/cmd/uninstall.py
@@ -30,7 +30,7 @@
 
 import spack
 import spack.cmd
-import spack.packages
+import spack.repository
 from spack.cmd.find import display_specs
 from spack.package import PackageStillNeededError
 
@@ -79,10 +79,9 @@ def uninstall(parser, args):
                 try:
                     # should work if package is known to spack
                     pkgs.append(s.package)
-
-                except spack.packages.UnknownPackageError, e:
-                    # The package.py file has gone away -- but still want to
-                    # uninstall.
+                except spack.repository.UnknownPackageError, e:
+                    # The package.py file has gone away -- but still
+                    # want to uninstall.
                     spack.Package(s).do_uninstall(force=True)
 
         # Sort packages to be uninstalled by the number of installed dependents
diff --git a/lib/spack/spack/cmd/urls.py b/lib/spack/spack/cmd/urls.py
index a544b6153b6399463e46ce1c51868d85dd8ae857..ebab65f7d1919ee94635fa2221fef3b5dc30213d 100644
--- a/lib/spack/spack/cmd/urls.py
+++ b/lib/spack/spack/cmd/urls.py
@@ -41,7 +41,7 @@ def setup_parser(subparser):
 
 def urls(parser, args):
     urls = set()
-    for pkg in spack.db.all_packages():
+    for pkg in spack.repo.all_packages():
         url = getattr(pkg.__class__, 'url', None)
         if url:
             urls.add(url)
diff --git a/lib/spack/spack/cmd/versions.py b/lib/spack/spack/cmd/versions.py
index 494f13d36d5a8b1fadf6c693cb3910c65ffdec1a..bba75dae968b94d7b3fbee8766445407a85f1231 100644
--- a/lib/spack/spack/cmd/versions.py
+++ b/lib/spack/spack/cmd/versions.py
@@ -34,7 +34,7 @@ def setup_parser(subparser):
 
 
 def versions(parser, args):
-    pkg = spack.db.get(args.package)
+    pkg = spack.repo.get(args.package)
 
     safe_versions = pkg.versions
     fetched_versions = pkg.fetch_remote_versions()
diff --git a/lib/spack/spack/compiler.py b/lib/spack/spack/compiler.py
index b9abf943e80ea2204ba97cb6604a6dbbab37bc84..12c02e0ea2b37931e08c7464d43ccfb2ea813e98 100644
--- a/lib/spack/spack/compiler.py
+++ b/lib/spack/spack/compiler.py
@@ -51,7 +51,7 @@ def _verify_executables(*paths):
 def get_compiler_version(compiler_path, version_arg, regex='(.*)'):
     if not compiler_path in _version_cache:
         compiler = Executable(compiler_path)
-        output = compiler(version_arg, return_output=True, error=None)
+        output = compiler(version_arg, output=str, error=str)
 
         match = re.search(regex, output)
         _version_cache[compiler_path] = match.group(1) if match else 'unknown'
diff --git a/lib/spack/spack/compilers/__init__.py b/lib/spack/spack/compilers/__init__.py
index 66e608cf79fc101f39352f7f86753d748b6192f4..6159ef576cac40589472b77a258523c58fa7aa71 100644
--- a/lib/spack/spack/compilers/__init__.py
+++ b/lib/spack/spack/compilers/__init__.py
@@ -27,6 +27,7 @@
 """
 import imp
 import os
+import platform
 
 from llnl.util.lang import memoized, list_modules
 from llnl.util.filesystem import join_path
@@ -35,6 +36,7 @@
 import spack.error
 import spack.spec
 import spack.config
+import spack.architecture
 
 from spack.util.multiproc import parmap
 from spack.compiler import Compiler
@@ -45,50 +47,128 @@
 _imported_compilers_module = 'spack.compilers'
 _required_instance_vars = ['cc', 'cxx', 'f77', 'fc']
 
-_default_order = ['gcc', 'intel', 'pgi', 'clang', 'xlc']
+# TODO: customize order in config file
+if platform.system() == 'Darwin':
+    _default_order = ['clang', 'gcc', 'intel']
+else:
+    _default_order = ['gcc', 'intel', 'pgi', 'clang', 'xlc', 'nag']
+
 
 def _auto_compiler_spec(function):
-    def converter(cspec_like):
+    def converter(cspec_like, *args, **kwargs):
         if not isinstance(cspec_like, spack.spec.CompilerSpec):
             cspec_like = spack.spec.CompilerSpec(cspec_like)
-        return function(cspec_like)
+        return function(cspec_like, *args, **kwargs)
     return converter
 
 
-def _get_config():
-    """Get a Spack config, but make sure it has compiler configuration
-       first."""
+def _to_dict(compiler):
+    """Return a dict version of compiler suitable to insert in YAML."""
+    return {
+        str(compiler.spec) : dict(
+            (attr, getattr(compiler, attr, None))
+            for attr in _required_instance_vars)
+    }
+
+
+def get_compiler_config(arch=None, scope=None):
+    """Return the compiler configuration for the specified architecture.
+    """
     # If any configuration file has compilers, just stick with the
     # ones already configured.
-    config = spack.config.get_compilers_config()
-    existing = [spack.spec.CompilerSpec(s)
-                for s in config]
-    if existing:
-        return config
+    config = spack.config.get_config('compilers', scope=scope)
+
+    my_arch = spack.architecture.sys_type()
+    if arch is None:
+        arch = my_arch
+
+    if arch in config:
+        return config[arch]
+
+    # Only for the current arch in *highest* scope: automatically try to
+    # find compilers if none are configured yet.
+    if arch == my_arch and scope == 'user':
+        config[arch] = {}
+        compilers = find_compilers(*get_path('PATH'))
+        for compiler in compilers:
+            config[arch].update(_to_dict(compiler))
+        spack.config.update_config('compilers', config, scope=scope)
+        return config[arch]
+
+    return {}
+
+
+def add_compilers_to_config(compilers, arch=None, scope=None):
+    """Add compilers to the config for the specified architecture.
+
+    Arguments:
+      - compilers: a list of Compiler objects.
+      - arch:      arch to add compilers for.
+      - scope:     configuration scope to modify.
+    """
+    if arch is None:
+        arch = spack.architecture.sys_type()
+
+    compiler_config = get_compiler_config(arch, scope)
+    for compiler in compilers:
+        compiler_config[str(compiler.spec)] = dict(
+            (c, getattr(compiler, c, "None"))
+            for c in _required_instance_vars)
+
+    update = { arch : compiler_config }
+    spack.config.update_config('compilers', update, scope)
+
+
+@_auto_compiler_spec
+def remove_compiler_from_config(compiler_spec, arch=None, scope=None):
+    """Remove compilers from the config, by spec.
+
+    Arguments:
+      - compiler_specs: a list of CompilerSpec objects.
+      - arch:           arch to add compilers for.
+      - scope:          configuration scope to modify.
+    """
+    if arch is None:
+        arch = spack.architecture.sys_type()
 
-    compilers = find_compilers(*get_path('PATH'))
-    add_compilers_to_config('user', *compilers)
+    compiler_config = get_compiler_config(arch, scope)
+    del compiler_config[str(compiler_spec)]
+    update = { arch : compiler_config }
 
-    # After writing compilers to the user config, return a full config
-    # from all files.
-    return spack.config.get_compilers_config()
+    spack.config.update_config('compilers', update, scope)
+
+
+def all_compilers_config(arch=None, scope=None):
+    """Return a set of specs for all the compiler versions currently
+       available to build with.  These are instances of CompilerSpec.
+    """
+    # Get compilers for this architecture.
+    arch_config = get_compiler_config(arch, scope)
+
+    # Merge 'all' compilers with arch-specific ones.
+    # Arch-specific compilers have higher precedence.
+    merged_config = get_compiler_config('all', scope=scope)
+    merged_config = spack.config._merge_yaml(merged_config, arch_config)
+
+    return merged_config
+
+
+def all_compilers(arch=None, scope=None):
+    # Return compiler specs from the merged config.
+    return [spack.spec.CompilerSpec(s)
+            for s in all_compilers_config(arch, scope)]
 
 
-_cached_default_compiler = None
 def default_compiler():
-    global _cached_default_compiler
-    if _cached_default_compiler:
-        return _cached_default_compiler
     versions = []
-    for name in _default_order:  # TODO: customize order.
+    for name in _default_order:
         versions = find(name)
-        if versions: break
-
-    if not versions:
+        if versions:
+            break
+    else:
         raise NoCompilersError()
 
-    _cached_default_compiler = sorted(versions)[-1]
-    return _cached_default_compiler
+    return sorted(versions)[-1]
 
 
 def find_compilers(*path):
@@ -123,20 +203,6 @@ def find_compilers(*path):
     return clist
 
 
-def add_compilers_to_config(scope, *compilers):
-    compiler_config_tree = {}
-    for compiler in compilers:
-        compiler_entry = {}
-        for c in _required_instance_vars:
-            val = getattr(compiler, c)
-            if not val:
-                val = "None"
-            compiler_entry[c] = val
-        compiler_config_tree[str(compiler.spec)] = compiler_entry
-    spack.config.add_to_compiler_config(compiler_config_tree, scope)
-
-
-
 def supported_compilers():
     """Return a set of names of compilers supported by Spack.
 
@@ -152,27 +218,19 @@ def supported(compiler_spec):
     return compiler_spec.name in supported_compilers()
 
 
-def all_compilers():
-    """Return a set of specs for all the compiler versions currently
-       available to build with.  These are instances of CompilerSpec.
-    """
-    configuration = _get_config()
-    return [spack.spec.CompilerSpec(s) for s in configuration]
-
-
 @_auto_compiler_spec
-def find(compiler_spec):
+def find(compiler_spec, arch=None, scope=None):
     """Return specs of available compilers that match the supplied
        compiler spec.  Return an list if nothing found."""
-    return [c for c in all_compilers() if c.satisfies(compiler_spec)]
+    return [c for c in all_compilers(arch, scope) if c.satisfies(compiler_spec)]
 
 
 @_auto_compiler_spec
-def compilers_for_spec(compiler_spec):
+def compilers_for_spec(compiler_spec, arch=None, scope=None):
     """This gets all compilers that satisfy the supplied CompilerSpec.
        Returns an empty list if none are found.
     """
-    config = _get_config()
+    config = all_compilers_config(arch, scope)
 
     def get_compiler(cspec):
         items = config[str(cspec)]
@@ -191,7 +249,7 @@ def get_compiler(cspec):
 
         return cls(cspec, *compiler_paths)
 
-    matches = find(compiler_spec)
+    matches = find(compiler_spec, arch, scope)
     return [get_compiler(cspec) for cspec in matches]
 
 
diff --git a/lib/spack/spack/compilers/clang.py b/lib/spack/spack/compilers/clang.py
index 340051019cc376278bac8b3598b3064e8590a05a..e406d86a24525b1deeae0fe5646d4054aa59cf93 100644
--- a/lib/spack/spack/compilers/clang.py
+++ b/lib/spack/spack/compilers/clang.py
@@ -22,7 +22,10 @@
 # along with this program; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import re
+import spack.compiler as cpr
 from spack.compiler import *
+from spack.util.executable import *
 
 class Clang(Compiler):
     # Subclasses use possible names of C compiler
@@ -47,11 +50,34 @@ class Clang(Compiler):
     @classmethod
     def default_version(self, comp):
         """The '--version' option works for clang compilers.
-           Output looks like this::
+           On most platforms, output looks like this::
 
                clang version 3.1 (trunk 149096)
                Target: x86_64-unknown-linux-gnu
                Thread model: posix
+
+          On Mac OS X, it looks like this:
+
+               Apple LLVM version 7.0.2 (clang-700.1.81)
+               Target: x86_64-apple-darwin15.2.0
+               Thread model: posix
+
         """
-        return get_compiler_version(
-            comp, '--version', r'(?:clang version|based on LLVM) ([^ )]+)')
+        if comp not in cpr._version_cache:
+            compiler = Executable(comp)
+            output = compiler('--version', output=str, error=str)
+
+            ver = 'unknown'
+            match = re.search(r'^Apple LLVM version ([^ )]+)', output)
+            if match:
+                # Apple's LLVM compiler has its own versions, so suffix them.
+                ver = match.group(1) + '-apple'
+            else:
+                # Normal clang compiler versions are left as-is
+                match = re.search(r'^clang version ([^ )]+)', output)
+                if match:
+                    ver = match.group(1)
+
+            cpr._version_cache[comp] = ver
+
+        return cpr._version_cache[comp]
diff --git a/lib/spack/spack/compilers/nag.py b/lib/spack/spack/compilers/nag.py
new file mode 100644
index 0000000000000000000000000000000000000000..527a05a090cf100f649c7dcd433a71fce5027b96
--- /dev/null
+++ b/lib/spack/spack/compilers/nag.py
@@ -0,0 +1,33 @@
+from spack.compiler import *
+
+class Nag(Compiler):
+    # Subclasses use possible names of C compiler
+    cc_names = []
+
+    # Subclasses use possible names of C++ compiler
+    cxx_names = []
+
+    # Subclasses use possible names of Fortran 77 compiler
+    f77_names = ['nagfor']
+
+    # Subclasses use possible names of Fortran 90 compiler
+    fc_names = ['nagfor']
+
+    # Named wrapper links within spack.build_env_path
+    link_paths = { # Use default wrappers for C and C++, in case provided in compilers.yaml
+                   'cc'  : 'cc',
+                   'cxx' : 'c++',
+                   'f77' : 'nag/nagfor',
+                   'fc'  : 'nag/nagfor' }
+
+    @classmethod
+    def default_version(self, comp):
+        """The '-V' option works for nag compilers.
+           Output looks like this::
+
+               NAG Fortran Compiler Release 6.0(Hibiya) Build 1037
+               Product NPL6A60NA for x86-64 Linux
+               Copyright 1990-2015 The Numerical Algorithms Group Ltd., Oxford, U.K.
+        """
+        return get_compiler_version(
+            comp, '-V', r'NAG Fortran Compiler Release ([0-9.]+)')
diff --git a/lib/spack/spack/config.py b/lib/spack/spack/config.py
index c127f6a28f89a89f810ee0e1eb1022a3a84f32c3..6fecde998017d040224a17b0a371ec600279adb5 100644
--- a/lib/spack/spack/config.py
+++ b/lib/spack/spack/config.py
@@ -1,5 +1,5 @@
 ##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
 # Produced at the Lawrence Livermore National Laboratory.
 #
 # This file is part of Spack.
@@ -45,11 +45,11 @@
 Configuration file format
 ===============================
 
-Configuration files are formatted using YAML syntax.
-This format is implemented by Python's
-yaml class, and it's easy to read and versatile.
+Configuration files are formatted using YAML syntax.  This format is
+implemented by libyaml (included with Spack as an external module),
+and it's easy to read and versatile.
 
-The config files are structured as trees, like this ``compiler`` section::
+Config files are structured as trees, like this ``compiler`` section::
 
      compilers:
        chaos_5_x86_64_ib:
@@ -67,274 +67,475 @@
 categorize entries beneath them in the tree.  At the root of the tree,
 entries like ''cc'' and ''cxx'' are specified as name/value pairs.
 
-Spack returns these trees as nested dicts.  The dict for the above example
-would looks like:
-
-  { 'compilers' :
-      { 'chaos_5_x86_64_ib' :
-         { 'gcc@4.4.7' :
-             { 'cc' : '/usr/bin/gcc',
-               'cxx' : '/usr/bin/g++'
-               'f77' : '/usr/bin/gfortran'
-               'fc' : '/usr/bin/gfortran' }
-         }
-     { 'bgqos_0' :
-         { 'cc' : '/usr/local/bin/mpixlc' }
-     }
-  }
-
-Some routines, like get_mirrors_config and get_compilers_config may strip
-off the top-levels of the tree and return subtrees.
+``config.get_config()`` returns these trees as nested dicts, but it
+strips the first level off.  So, ``config.get_config('compilers')``
+would return something like this for the above example:
+
+   { 'chaos_5_x86_64_ib' :
+       { 'gcc@4.4.7' :
+           { 'cc' : '/usr/bin/gcc',
+             'cxx' : '/usr/bin/g++'
+             'f77' : '/usr/bin/gfortran'
+             'fc' : '/usr/bin/gfortran' }
+           }
+       { 'bgqos_0' :
+          { 'cc' : '/usr/local/bin/mpixlc' } }
+
+Likewise, the ``mirrors.yaml`` file's first line must be ``mirrors:``,
+but ``get_config()`` strips that off too.
+
+Precedence
+===============================
+
+``config.py`` routines attempt to recursively merge configuration
+across scopes.  So if there are ``compilers.py`` files in both the
+site scope and the user scope, ``get_config('compilers')`` will return
+merged dictionaries of *all* the compilers available.  If a user
+compiler conflicts with a site compiler, Spack will overwrite the site
+configuration wtih the user configuration.  If both the user and site
+``mirrors.yaml`` files contain lists of mirrors, then ``get_config()``
+will return a concatenated list of mirrors, with the user config items
+first.
+
+Sometimes, it is useful to *completely* override a site setting with a
+user one.  To accomplish this, you can use *two* colons at the end of
+a key in a configuration file.  For example, this:
+
+     compilers::
+       chaos_5_x86_64_ib:
+          gcc@4.4.7:
+            cc: /usr/bin/gcc
+            cxx: /usr/bin/g++
+            f77: /usr/bin/gfortran
+            fc: /usr/bin/gfortran
+       bgqos_0:
+          xlc@12.1:
+            cc: /usr/local/bin/mpixlc
+            ...
+
+Will make Spack take compilers *only* from the user configuration, and
+the site configuration will be ignored.
+
 """
 import os
-import exceptions
+import re
 import sys
-
-from ordereddict_backport import OrderedDict
-from llnl.util.lang import memoized
-import spack.error
-
+import copy
+import jsonschema
+from jsonschema import Draft4Validator, validators
 import yaml
 from yaml.error import MarkedYAMLError
+from ordereddict_backport import OrderedDict
+
 import llnl.util.tty as tty
 from llnl.util.filesystem import mkdirp
 
-_config_sections = {}
-class _ConfigCategory:
-    name = None
-    filename = None
-    merge = True
-    def __init__(self, n, f, m):
-        self.name = n
-        self.filename = f
-        self.merge = m
-        self.files_read_from = []
-        self.result_dict = {}
-        _config_sections[n] = self
-
-_ConfigCategory('compilers', 'compilers.yaml', True)
-_ConfigCategory('mirrors', 'mirrors.yaml', True)
-_ConfigCategory('view', 'views.yaml', True)
-_ConfigCategory('order', 'orders.yaml', True)
-
-"""Names of scopes and their corresponding configuration files."""
-config_scopes = [('site', os.path.join(spack.etc_path, 'spack')),
-                 ('user', os.path.expanduser('~/.spack'))]
-
-_compiler_by_arch = {}
-_read_config_file_result = {}
-def _read_config_file(filename):
-    """Read a given YAML configuration file"""
-    global _read_config_file_result
-    if filename in _read_config_file_result:
-        return _read_config_file_result[filename]
+import spack
+from spack.error import SpackError
+
+# Hacked yaml for configuration files preserves line numbers.
+import spack.util.spack_yaml as syaml
+
+
+"""Dict from section names -> schema for that section."""
+section_schemas = {
+    'compilers': {
+        '$schema': 'http://json-schema.org/schema#',
+        'title': 'Spack compiler configuration file schema',
+        'type': 'object',
+        'additionalProperties': False,
+        'patternProperties': {
+            'compilers:?': { # optional colon for overriding site config.
+                'type': 'object',
+                'default': {},
+                'additionalProperties': False,
+                'patternProperties': {
+                    r'\w[\w-]*': {           # architecture
+                        'type': 'object',
+                        'additionalProperties': False,
+                        'patternProperties': {
+                            r'\w[\w-]*@\w[\w-]*': {   # compiler spec
+                                'type': 'object',
+                                'additionalProperties': False,
+                                'required': ['cc', 'cxx', 'f77', 'fc'],
+                                'properties': {
+                                    'cc':  { 'anyOf': [ {'type' : 'string' },
+                                                        {'type' : 'null' }]},
+                                    'cxx': { 'anyOf': [ {'type' : 'string' },
+                                                        {'type' : 'null' }]},
+                                    'f77': { 'anyOf': [ {'type' : 'string' },
+                                                        {'type' : 'null' }]},
+                                    'fc':  { 'anyOf': [ {'type' : 'string' },
+                                                        {'type' : 'null' }]},
+                                },},},},},},},},
+
+    'mirrors': {
+        '$schema': 'http://json-schema.org/schema#',
+        'title': 'Spack mirror configuration file schema',
+        'type': 'object',
+        'additionalProperties': False,
+        'patternProperties': {
+            r'mirrors:?': {
+                'type': 'object',
+                'default': {},
+                'additionalProperties': False,
+                'patternProperties': {
+                    r'\w[\w-]*': {
+                        'type': 'string'},},},},},
+
+    'repos': {
+        '$schema': 'http://json-schema.org/schema#',
+        'title': 'Spack repository configuration file schema',
+        'type': 'object',
+        'additionalProperties': False,
+        'patternProperties': {
+            r'repos:?': {
+                'type': 'array',
+                'default': [],
+                'items': {
+                    'type': 'string'},},},},
+}
+
+"""OrderedDict of config scopes keyed by name.
+   Later scopes will override earlier scopes.
+"""
+config_scopes = OrderedDict()
+
+
+def validate_section_name(section):
+    """Raise a ValueError if the section is not a valid section."""
+    if section not in section_schemas:
+        raise ValueError("Invalid config section: '%s'.  Options are %s."
+                         % (section, section_schemas))
+
+
+def extend_with_default(validator_class):
+    """Add support for the 'default' attribute for properties and patternProperties.
+
+       jsonschema does not handle this out of the box -- it only
+       validates.  This allows us to set default values for configs
+       where certain fields are `None` b/c they're deleted or
+       commented out.
+
+    """
+    validate_properties = validator_class.VALIDATORS["properties"]
+    validate_pattern_properties = validator_class.VALIDATORS["patternProperties"]
+
+    def set_defaults(validator, properties, instance, schema):
+        for property, subschema in properties.iteritems():
+            if "default" in subschema:
+                instance.setdefault(property, subschema["default"])
+        for err in validate_properties(validator, properties, instance, schema):
+            yield err
 
+    def set_pp_defaults(validator, properties, instance, schema):
+        for property, subschema in properties.iteritems():
+            if "default" in subschema:
+                if isinstance(instance, dict):
+                    for key, val in instance.iteritems():
+                        if re.match(property, key) and val is None:
+                            instance[key] = subschema["default"]
+
+        for err in validate_pattern_properties(validator, properties, instance, schema):
+            yield err
+
+    return validators.extend(validator_class, {
+        "properties" : set_defaults,
+        "patternProperties" : set_pp_defaults
+    })
+
+
+DefaultSettingValidator = extend_with_default(Draft4Validator)
+
+def validate_section(data, schema):
+    """Validate data read in from a Spack YAML file.
+
+    This leverages the line information (start_mark, end_mark) stored
+    on Spack YAML structures.
+
+    """
     try:
+        DefaultSettingValidator(schema).validate(data)
+    except jsonschema.ValidationError as e:
+        raise ConfigFormatError(e, data)
+
+
+class ConfigScope(object):
+    """This class represents a configuration scope.
+
+       A scope is one directory containing named configuration files.
+       Each file is a config "section" (e.g., mirrors, compilers, etc).
+    """
+
+    def __init__(self, name, path):
+        self.name = name           # scope name.
+        self.path = path           # path to directory containing configs.
+        self.sections = {}         # sections read from config files.
+
+        # Register in a dict of all ConfigScopes
+        # TODO: make this cleaner.  Mocking up for testing is brittle.
+        global config_scopes
+        config_scopes[name] = self
+
+    def get_section_filename(self, section):
+        validate_section_name(section)
+        return os.path.join(self.path, "%s.yaml" % section)
+
+
+    def get_section(self, section):
+        if not section in self.sections:
+            path   = self.get_section_filename(section)
+            schema = section_schemas[section]
+            data   = _read_config_file(path, schema)
+            self.sections[section] = data
+        return self.sections[section]
+
+
+    def write_section(self, section):
+        filename = self.get_section_filename(section)
+        data = self.get_section(section)
+        try:
+            mkdirp(self.path)
+            with open(filename, 'w') as f:
+                validate_section(data, section_schemas[section])
+                syaml.dump(data, stream=f, default_flow_style=False)
+        except jsonschema.ValidationError as e:
+            raise ConfigSanityError(e, data)
+        except (yaml.YAMLError, IOError) as e:
+            raise ConfigFileError("Error writing to config file: '%s'" % str(e))
+
+
+    def clear(self):
+        """Empty cached config information."""
+        self.sections = {}
+
+
+ConfigScope('site', os.path.join(spack.etc_path, 'spack')),
+ConfigScope('user', os.path.expanduser('~/.spack'))
+
+
+def highest_precedence_scope():
+    """Get the scope with highest precedence (prefs will override others)."""
+    return config_scopes.values()[-1]
+
+
+def validate_scope(scope):
+    """Ensure that scope is valid, and return a valid scope if it is None.
+
+       This should be used by routines in ``config.py`` to validate
+       scope name arguments, and to determine a default scope where no
+       scope is specified.
+
+    """
+    if scope is None:
+        # default to the scope with highest precedence.
+        return highest_precedence_scope()
+
+    elif scope in config_scopes:
+        return config_scopes[scope]
+
+    else:
+        raise ValueError("Invalid config scope: '%s'.  Must be one of %s."
+                         % (scope, config_scopes.keys()))
+
+
+def _read_config_file(filename, schema):
+    """Read a YAML configuration file."""
+    # Ignore nonexisting files.
+    if not os.path.exists(filename):
+        return None
+
+    elif not os.path.isfile(filename):
+        raise ConfigFileError(
+            "Invlaid configuration. %s exists but is not a file." % filename)
+
+    elif not os.access(filename, os.R_OK):
+        raise ConfigFileError("Config file is not readable: %s." % filename)
+
+    try:
+        tty.debug("Reading config file %s" % filename)
         with open(filename) as f:
-            ydict = yaml.load(f)
+            data = syaml.load(f)
+
+        if data:
+            validate_section(data, schema)
+        return data
+
     except MarkedYAMLError, e:
-        tty.die("Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))
-    except exceptions.IOError, e:
-        _read_config_file_result[filename] = None
-        return None
-    _read_config_file_result[filename] = ydict
-    return ydict
+        raise ConfigFileError(
+            "Error parsing yaml%s: %s" % (str(e.context_mark), e.problem))
+
+    except IOError, e:
+        raise ConfigFileError(
+            "Error reading configuration file %s: %s" % (filename, str(e)))
 
 
 def clear_config_caches():
     """Clears the caches for configuration files, which will cause them
        to be re-read upon the next request"""
-    for key,s in _config_sections.iteritems():
-        s.files_read_from = []
-        s.result_dict = {}
-    spack.config._read_config_file_result = {}
-    spack.config._compiler_by_arch = {}
-    spack.compilers._cached_default_compiler = None
-
-
-def _merge_dicts(d1, d2):
-    """Recursively merges two configuration trees, with entries
-       in d2 taking precedence over d1"""
-    if not d1:
-        return d2.copy()
-    if not d2:
-        return d1
-
-    for key2, val2 in d2.iteritems():
-        if not key2 in d1:
-            d1[key2] = val2
-            continue
-        val1 = d1[key2]
-        if isinstance(val1, dict) and isinstance(val2, dict):
-            d1[key2] = _merge_dicts(val1, val2)
-            continue
-        if isinstance(val1, list) and isinstance(val2, list):
-            val1.extend(val2)
-            seen = set()
-            d1[key2] = [ x for x in val1 if not (x in seen or seen.add(x)) ]
-            continue
-        d1[key2] = val2
-    return d1
-
-
-def get_config(category_name):
-    """Get the confguration tree for the names category.  Strips off the
-       top-level category entry from the dict"""
-    global config_scopes
-    category = _config_sections[category_name]
-    if category.result_dict:
-        return category.result_dict
-
-    category.result_dict = {}
-    for scope, scope_path in config_scopes:
-        path = os.path.join(scope_path, category.filename)
-        result = _read_config_file(path)
-        if not result:
-            continue
-        if not category_name in result:
-            continue
-        category.files_read_from.insert(0, path)
-        result = result[category_name]
-        if category.merge:
-            category.result_dict = _merge_dicts(category.result_dict, result)
-        else:
-            category.result_dict = result
-    return category.result_dict
-
-
-def get_compilers_config(arch=None):
-    """Get the compiler configuration from config files for the given
-       architecture.  Strips off the architecture component of the
-       configuration"""
-    global _compiler_by_arch
-    if not arch:
-        arch = spack.architecture.sys_type()
-    if arch in _compiler_by_arch:
-        return _compiler_by_arch[arch]
-
-    cc_config = get_config('compilers')
-    if arch in cc_config and 'all' in cc_config:
-        arch_compiler = dict(cc_config[arch])
-        _compiler_by_arch[arch] = _merge_dict(arch_compiler, cc_config['all'])
-    elif arch in cc_config:
-        _compiler_by_arch[arch] = cc_config[arch]
-    elif 'all' in cc_config:
-        _compiler_by_arch[arch] = cc_config['all']
+    for scope in config_scopes.values():
+        scope.clear()
+
+
+def _merge_yaml(dest, source):
+    """Merges source into dest; entries in source take precedence over dest.
+
+    This routine may modify dest and should be assigned to dest, in
+    case dest was None to begin with, e.g.:
+
+       dest = _merge_yaml(dest, source)
+
+    Config file authors can optionally end any attribute in a dict
+    with `::` instead of `:`, and the key will override that of the
+    parent instead of merging.
+
+    """
+    def they_are(t):
+        return isinstance(dest, t) and isinstance(source, t)
+
+    # If both are None, handle specially and return None.
+    if source is None and dest is None:
+        return None
+
+    # If source is None, overwrite with source.
+    elif source is None:
+        return None
+
+    # Source list is prepended (for precedence)
+    if they_are(list):
+        seen = set(source)
+        dest[:] = source + [x for x in dest if x not in seen]
+        return dest
+
+    # Source dict is merged into dest.
+    elif they_are(dict):
+        for sk, sv in source.iteritems():
+            if not sk in dest:
+                dest[sk] = copy.copy(sv)
+            else:
+                dest[sk] = _merge_yaml(dest[sk], source[sk])
+        return dest
+
+    # In any other case, overwrite with a copy of the source value.
     else:
-        _compiler_by_arch[arch] = {}
-    return _compiler_by_arch[arch]
-
-
-def get_mirror_config():
-    """Get the mirror configuration from config files"""
-    return get_config('mirrors')
-
-
-def get_config_scope_dirname(scope):
-    """For a scope return the config directory"""
-    global config_scopes
-    for s,p in config_scopes:
-        if s == scope:
-            return p
-    tty.die("Unknown scope %s.  Valid options are %s" %
-            (scope, ", ".join([s for s,p in config_scopes])))
-
-
-def get_config_scope_filename(scope, category_name):
-    """For some scope and category, get the name of the configuration file"""
-    if not category_name in _config_sections:
-        tty.die("Unknown config category %s.  Valid options are: %s" %
-                (category_name, ", ".join([s for s in _config_sections])))
-    return os.path.join(get_config_scope_dirname(scope), _config_sections[category_name].filename)
-
-
-def add_to_config(category_name, addition_dict, scope=None):
-    """Merge a new dict into a configuration tree and write the new
-       configuration to disk"""
-    global _read_config_file_result
-    get_config(category_name)
-    category = _config_sections[category_name]
-
-    #If scope is specified, use it.  Otherwise use the last config scope that
-    #we successfully parsed data from.
-    file = None
-    path = None
-    if not scope and not category.files_read_from:
-        scope = 'user'
-    if scope:
-        try:
-            dir = get_config_scope_dirname(scope)
-            if not os.path.exists(dir):
-                mkdirp(dir)
-            path = os.path.join(dir, category.filename)
-            file = open(path, 'w')
-        except exceptions.IOError, e:
-            pass
+        return copy.copy(source)
+
+
+def get_config(section, scope=None):
+    """Get configuration settings for a section.
+
+       Strips off the top-level section name from the YAML dict.
+    """
+    validate_section_name(section)
+    merged_section = syaml.syaml_dict()
+
+    if scope is None:
+        scopes = config_scopes.values()
     else:
-        for p in category.files_read_from:
-            try:
-                file = open(p, 'w')
-            except exceptions.IOError, e:
-                pass
-            if file:
-                path = p
-                break;
-    if not file:
-        tty.die('Unable to write to config file %s' % path)
-
-    #Merge the new information into the existing file info, then write to disk
-    new_dict = _read_config_file_result[path]
-    if new_dict and category_name in new_dict:
-        new_dict = new_dict[category_name]
-    new_dict = _merge_dicts(new_dict, addition_dict)
-    new_dict = { category_name : new_dict }
-    _read_config_file_result[path] = new_dict
-    yaml.dump(new_dict, stream=file, default_flow_style=False)
-    file.close()
-
-    #Merge the new information into the cached results
-    category.result_dict = _merge_dicts(category.result_dict, addition_dict)
-
-
-def add_to_mirror_config(addition_dict, scope=None):
-    """Add mirrors to the configuration files"""
-    add_to_config('mirrors', addition_dict, scope)
-
-
-def add_to_compiler_config(addition_dict, scope=None, arch=None):
-    """Add compilerss to the configuration files"""
-    if not arch:
-        arch = spack.architecture.sys_type()
-    add_to_config('compilers', { arch : addition_dict }, scope)
-    clear_config_caches()
-
-
-def remove_from_config(category_name, key_to_rm, scope=None):
-    """Remove a configuration key and write a new configuration to disk"""
-    global config_scopes
-    get_config(category_name)
-    scopes_to_rm_from = [scope] if scope else [s for s,p in config_scopes]
-    category = _config_sections[category_name]
-
-    rmd_something = False
-    for s in scopes_to_rm_from:
-        path = get_config_scope_filename(scope, category_name)
-        result = _read_config_file(path)
-        if not result:
+        scopes = [validate_scope(scope)]
+
+    for scope in scopes:
+        # read potentially cached data from the scope.
+        data = scope.get_section(section)
+
+        # Skip empty configs
+        if not data or not isinstance(data, dict):
             continue
-        if not key_to_rm in result[category_name]:
+
+        # Allow complete override of site config with '<section>::'
+        override_key = section + ':'
+        if not (section in data or override_key in data):
+            tty.warn("Skipping bad configuration file: '%s'" % scope.path)
             continue
-        with open(path, 'w') as f:
-            result[category_name].pop(key_to_rm, None)
-            yaml.dump(result, stream=f, default_flow_style=False)
-            category.result_dict.pop(key_to_rm, None)
-            rmd_something = True
-    return rmd_something
-
-
-"""Print a configuration to stdout"""
-def print_category(category_name):
-    if not category_name in _config_sections:
-        tty.die("Unknown config category %s.  Valid options are: %s" %
-                (category_name, ", ".join([s for s in _config_sections])))
-    yaml.dump(get_config(category_name), stream=sys.stdout, default_flow_style=False)
+
+        if override_key in data:
+            merged_section = data[override_key]
+        else:
+            merged_section = _merge_yaml(merged_section, data[section])
+
+    return merged_section
+
+
+def get_config_filename(scope, section):
+    """For some scope and section, get the name of the configuration file"""
+    scope = validate_scope(scope)
+    return scope.get_section_filename(section)
+
+
+def update_config(section, update_data, scope=None):
+    """Update the configuration file for a particular scope.
+
+       Overwrites contents of a section in a scope with update_data,
+       then writes out the config file.
+
+       update_data should have the top-level section name stripped off
+       (it will be re-added).  Data itself can be a list, dict, or any
+       other yaml-ish structure.
+
+    """
+    # read in the config to ensure we've got current data
+    get_config(section)
+
+    validate_section_name(section)       # validate section name
+    scope = validate_scope(scope)   # get ConfigScope object from string.
+
+    # read only the requested section's data.
+    scope.sections[section] = { section : update_data }
+    scope.write_section(section)
+
+
+def print_section(section):
+    """Print a configuration to stdout."""
+    try:
+        data = syaml.syaml_dict()
+        data[section] = get_config(section)
+        syaml.dump(data, stream=sys.stdout, default_flow_style=False)
+    except (yaml.YAMLError, IOError) as e:
+        raise ConfigError("Error reading configuration: %s" % section)
+
+
+class ConfigError(SpackError): pass
+class ConfigFileError(ConfigError): pass
+
+def get_path(path, data):
+    if path:
+        return get_path(path[1:], data[path[0]])
+    else:
+        return data
+
+class ConfigFormatError(ConfigError):
+    """Raised when a configuration format does not match its schema."""
+    def __init__(self, validation_error, data):
+        # Try to get line number from erroneous instance and its parent
+        instance_mark = getattr(validation_error.instance, '_start_mark', None)
+        parent_mark = getattr(validation_error.parent, '_start_mark', None)
+        path = getattr(validation_error, 'path', None)
+
+        # Try really hard to get the parent (which sometimes is not
+        # set) This digs it out of the validated structure if it's not
+        # on the validation_error.
+        if path and not parent_mark:
+            parent_path = list(path)[:-1]
+            parent = get_path(parent_path, data)
+            if path[-1] in parent:
+                if isinstance(parent, dict):
+                    keylist = parent.keys()
+                elif isinstance(parent, list):
+                    keylist = parent
+                idx = keylist.index(path[-1])
+                parent_mark = getattr(keylist[idx], '_start_mark', None)
+
+        if instance_mark:
+            location = '%s:%d' % (instance_mark.name, instance_mark.line + 1)
+        elif parent_mark:
+            location = '%s:%d' % (parent_mark.name, parent_mark.line + 1)
+        elif path:
+            location = 'At ' + ':'.join(path)
+        else:
+            location = '<unknown line>'
+
+        message = '%s: %s' % (location, validation_error.message)
+        super(ConfigError, self).__init__(message)
+
+class ConfigSanityError(ConfigFormatError):
+    """Same as ConfigFormatError, raised when config is written by Spack."""
diff --git a/lib/spack/spack/database.py b/lib/spack/spack/database.py
index a6f1cc507774e6d5bd9350c80497f5d4ee1d16ce..9cbe7de44a388342b6dbb503ac0f2c22e32030f0 100644
--- a/lib/spack/spack/database.py
+++ b/lib/spack/spack/database.py
@@ -54,7 +54,7 @@
 from spack.version import Version
 from spack.spec import Spec
 from spack.error import SpackError
-from spack.packages import UnknownPackageError
+from spack.repository import UnknownPackageError
 
 # DB goes in this directory underneath the root
 _db_dirname = '.spack-db'
@@ -215,7 +215,6 @@ def _read_spec_from_yaml(self, hash_key, installs, parent_key=None):
         # Specs from the database need to be marked concrete because
         # they represent actual installations.
         spec._mark_concrete()
-
         return spec
 
 
@@ -490,7 +489,7 @@ def remove(self, spec):
           1. Marks the spec as not installed.
           2. Removes the spec if it has no more dependents.
           3. If removed, recursively updates dependencies' ref counts
-             and remvoes them if they are no longer needed.
+             and removes them if they are no longer needed.
 
         """
         # Take a lock around the entire removal.
@@ -554,7 +553,7 @@ def query(self, query_spec=any, known=any, installed=True):
             for key, rec in self._data.items():
                 if installed is not any and rec.installed != installed:
                     continue
-                if known is not any and spack.db.exists(rec.spec.name) != known:
+                if known is not any and spack.repo.exists(rec.spec.name) != known:
                     continue
                 if query_spec is any or rec.spec.satisfies(query_spec):
                     results.append(rec.spec)
diff --git a/lib/spack/spack/directives.py b/lib/spack/spack/directives.py
index 02477bb904a0f04eb16c70688c55b2651d1483e5..9187fbd2035a7855102ecd8d899bee620db103c0 100644
--- a/lib/spack/spack/directives.py
+++ b/lib/spack/spack/directives.py
@@ -248,11 +248,10 @@ def patch(pkg, url_or_filename, level=1, when=None):
     if when is None:
         when = pkg.name
     when_spec = parse_anonymous_spec(when, pkg.name)
-
     cur_patches = pkg.patches.setdefault(when_spec, [])
     # if this spec is identical to some other, then append this
     # patch to the existing list.
-    cur_patches.append(Patch(pkg.name, url_or_filename, level))
+    cur_patches.append(Patch(pkg, url_or_filename, level))
 
 
 @directive('variants')
diff --git a/lib/spack/spack/directory_layout.py b/lib/spack/spack/directory_layout.py
index d91fbe9f4e9544db7bca0ba3908461bb1325825e..3e416a6a1fc40a605024e51c4cfffb8965041077 100644
--- a/lib/spack/spack/directory_layout.py
+++ b/lib/spack/spack/directory_layout.py
@@ -213,7 +213,6 @@ def read_spec(self, path):
 
         # Specs read from actual installations are always concrete
         spec._mark_concrete()
-
         return spec
 
 
diff --git a/lib/spack/spack/fetch_strategy.py b/lib/spack/spack/fetch_strategy.py
index 0657146bf63dac0bc0187a647821275a5b78efc5..337dd1e19885e5078957da458ce6be7e95e48b40 100644
--- a/lib/spack/spack/fetch_strategy.py
+++ b/lib/spack/spack/fetch_strategy.py
@@ -154,7 +154,7 @@ def fetch(self):
 
         # Run curl but grab the mime type from the http headers
         headers = spack.curl(
-            *curl_args, return_output=True, fail_on_error=False)
+            *curl_args, output=str, fail_on_error=False)
 
         if spack.curl.returncode != 0:
             # clean up archive on failure.
@@ -375,7 +375,7 @@ def __init__(self, **kwargs):
 
     @property
     def git_version(self):
-        vstring = self.git('--version', return_output=True).lstrip('git version ')
+        vstring = self.git('--version', output=str).lstrip('git version ')
         return Version(vstring)
 
 
@@ -518,7 +518,7 @@ def fetch(self):
 
     def _remove_untracked_files(self):
         """Removes untracked files in an svn repository."""
-        status = self.svn('status', '--no-ignore', return_output=True)
+        status = self.svn('status', '--no-ignore', output=str)
         self.svn('status', '--no-ignore')
         for line in status.split('\n'):
             if not re.match('^[I?]', line):
diff --git a/lib/spack/spack/graph.py b/lib/spack/spack/graph.py
index e8c5cfb08089548f7b425f248a796405bd76baf5..f3732dfbff8565b3cf3fa109da724e8cb1f6cd33 100644
--- a/lib/spack/spack/graph.py
+++ b/lib/spack/spack/graph.py
@@ -523,7 +523,7 @@ def quote(string):
         return '"%s"' % string
 
     if not specs:
-        specs = [p.name for p in spack.db.all_packages()]
+        specs = [p.name for p in spack.repo.all_packages()]
     else:
         roots = specs
         specs = set()
diff --git a/lib/spack/spack/mirror.py b/lib/spack/spack/mirror.py
index 1d9b0e7ef25496cd4afeabff6cd57a38c535c239..341cc4cb8858bccccc71719f5a9bfe87fed57a7f 100644
--- a/lib/spack/spack/mirror.py
+++ b/lib/spack/spack/mirror.py
@@ -78,6 +78,7 @@ def get_matching_versions(specs, **kwargs):
             continue
 
         num_versions = kwargs.get('num_versions', 0)
+        matching_spec = []
         for i, v in enumerate(reversed(sorted(pkg.versions))):
             # Generate no more than num_versions versions for each spec.
             if num_versions and i >= num_versions:
@@ -88,7 +89,11 @@ def get_matching_versions(specs, **kwargs):
                 s = Spec(pkg.name)
                 s.versions = VersionList([v])
                 s.variants = spec.variants.copy()
-                matching.append(s)
+                matching_spec.append(s)
+
+        if not matching_spec:
+            tty.warn("No known version matches spec: %s" % spec)
+        matching.extend(matching_spec)
 
     return matching
 
@@ -147,7 +152,11 @@ def create(path, specs, **kwargs):
     # Get the absolute path of the root before we start jumping around.
     mirror_root = os.path.abspath(path)
     if not os.path.isdir(mirror_root):
-        mkdirp(mirror_root)
+        try:
+            mkdirp(mirror_root)
+        except OSError as e:
+            raise MirrorError(
+                "Cannot create directory '%s':" % mirror_root, str(e))
 
     # Things to keep track of while parsing specs.
     present  = []
@@ -164,7 +173,11 @@ def create(path, specs, **kwargs):
             # create a subdirectory for the current package@version
             archive_path = os.path.abspath(join_path(mirror_root, mirror_archive_path(spec)))
             subdir = os.path.dirname(archive_path)
-            mkdirp(subdir)
+            try:
+                mkdirp(subdir)
+            except OSError as e:
+                raise MirrorError(
+                    "Cannot create directory '%s':" % subdir, str(e))
 
             if os.path.exists(archive_path):
                 tty.msg("Already added %s" % spec.format("$_$@"))
diff --git a/lib/spack/spack/package.py b/lib/spack/spack/package.py
index 84bcb15f7fe34142ccec8e7d7340e479ce7211e6..8cb947c276c4cd2163630169669bf3392becc805 100644
--- a/lib/spack/spack/package.py
+++ b/lib/spack/spack/package.py
@@ -34,7 +34,9 @@
 README.
 """
 import os
+import errno
 import re
+import shutil
 import time
 import itertools
 import subprocess
@@ -372,7 +374,7 @@ def __init__(self, spec):
         self._total_time = 0.0
 
         if self.is_extension:
-            spack.db.get(self.extendee_spec)._check_extendable()
+            spack.repo.get(self.extendee_spec)._check_extendable()
 
 
     @property
@@ -564,7 +566,7 @@ def preorder_traversal(self, visited=None, **kwargs):
                     yield spec
                 continue
 
-            for pkg in spack.db.get(name).preorder_traversal(visited, **kwargs):
+            for pkg in spack.repo.get(name).preorder_traversal(visited, **kwargs):
                 yield pkg
 
 
@@ -629,7 +631,7 @@ def remove_prefix(self):
         spack.install_layout.remove_install_directory(self.spec)
 
 
-    def do_fetch(self):
+    def do_fetch(self, mirror_only=False):
         """Creates a stage directory and downloads the taball for this package.
            Working directory will be set to the stage directory.
         """
@@ -654,7 +656,7 @@ def do_fetch(self):
                 raise FetchError(
                     "Will not fetch %s." % self.spec.format('$_$@'), checksum_msg)
 
-        self.stage.fetch()
+        self.stage.fetch(mirror_only)
 
         ##########
         # Fetch resources
@@ -675,7 +677,8 @@ def do_fetch(self):
         if spack.do_checksum and self.version in self.versions:
             self.stage.check()
 
-    def do_stage(self):
+
+    def do_stage(self, mirror_only=False):
         """Unpacks the fetched tarball, then changes into the expanded tarball
            directory."""
         if not self.spec.concrete:
@@ -689,14 +692,15 @@ def _expand_archive(stage, name=self.name):
             else:
                 tty.msg("Already staged %s in %s." % (name, stage.path))
 
-
-        self.do_fetch()
+        self.do_fetch(mirror_only)
         _expand_archive(self.stage)
 
         ##########
         # Stage resources in appropriate path
         resources = self._get_resources()
-        for resource in resources:
+        # TODO: this is to allow nested resources, a better solution would be
+        # good
+        for resource in sorted(resources, key=lambda res: len(res.destination)):
             stage = resource.fetcher.stage
             _expand_archive(stage, resource.name)
             # Turn placement into a dict with relative paths
@@ -705,11 +709,23 @@ def _expand_archive(stage, name=self.name):
                 placement = {'': placement}
             # Make the paths in the dictionary absolute and link
             for key, value in placement.iteritems():
-                link_path = join_path(self.stage.source_path, resource.destination, value)
+                target_path = join_path(self.stage.source_path, resource.destination)
+                link_path = join_path(target_path, value)
                 source_path = join_path(stage.source_path, key)
+
+                try:
+                    os.makedirs(target_path)
+                except OSError as err:
+                    if err.errno == errno.EEXIST and os.path.isdir(target_path):
+                        pass
+                    else: raise
+
+                # NOTE: a reasonable fix for the TODO above might be to have
+                # these expand in place, but expand_archive does not offer
+                # this
+
                 if not os.path.exists(link_path):
-                    # Create a symlink
-                    os.symlink(source_path, link_path)
+                    shutil.move(source_path, link_path)
         ##########
         self.stage.chdir_to_source()
 
@@ -792,6 +808,12 @@ def do_patch(self):
             touch(no_patches_file)
 
 
+    @property
+    def namespace(self):
+        namespace, dot, module = self.__module__.rpartition('.')
+        return namespace
+
+
     def do_fake_install(self):
         """Make a fake install directory contaiing a 'fake' file in bin."""
         mkdirp(self.prefix.bin)
@@ -813,10 +835,6 @@ def _resource_stage(self, resource):
         resource_stage_folder = '-'.join(pieces)
         return resource_stage_folder
 
-    def _build_logger(self, log_path):
-        """Create a context manager to log build output."""
-
-
 
     def do_install(self,
                    keep_prefix=False,  keep_stage=False, ignore_deps=False,
@@ -870,7 +888,7 @@ def cleanup():
                 tty.warn("Keeping install prefix in place despite error.",
                          "Spack will think this package is installed." +
                          "Manually remove this directory to fix:",
-                         self.prefix)
+                         self.prefix, wrap=True)
 
 
         def real_work():
diff --git a/lib/spack/spack/packages.py b/lib/spack/spack/packages.py
deleted file mode 100644
index 080644fb90049afca58f0ca644781dac208784a5..0000000000000000000000000000000000000000
--- a/lib/spack/spack/packages.py
+++ /dev/null
@@ -1,210 +0,0 @@
-##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (as published by
-# the Free Software Foundation) version 2.1 dated February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-import os
-import sys
-import inspect
-import glob
-import imp
-
-import llnl.util.tty as tty
-from llnl.util.filesystem import join_path
-from llnl.util.lang import *
-
-import spack.error
-import spack.spec
-from spack.virtual import ProviderIndex
-from spack.util.naming import mod_to_class, validate_module_name
-
-# Name of module under which packages are imported
-_imported_packages_module = 'spack.packages'
-
-# Name of the package file inside a package directory
-_package_file_name = 'package.py'
-
-
-def _autospec(function):
-    """Decorator that automatically converts the argument of a single-arg
-       function to a Spec."""
-    def converter(self, spec_like, **kwargs):
-        if not isinstance(spec_like, spack.spec.Spec):
-            spec_like = spack.spec.Spec(spec_like)
-        return function(self, spec_like, **kwargs)
-    return converter
-
-
-class PackageDB(object):
-    def __init__(self, root):
-        """Construct a new package database from a root directory."""
-        self.root = root
-        self.instances = {}
-        self.provider_index = None
-
-
-    @_autospec
-    def get(self, spec, **kwargs):
-        if spec.virtual:
-            raise UnknownPackageError(spec.name)
-
-        key = hash(spec)
-        if kwargs.get('new', False):
-            if key in self.instances:
-                del self.instances[key]
-
-        if not key in self.instances:
-            package_class = self.get_class_for_package_name(spec.name)
-            try:
-                copy = spec.copy() # defensive copy.  Package owns its spec.
-                self.instances[key] = package_class(copy)
-            except Exception, e:
-                if spack.debug:
-                    sys.excepthook(*sys.exc_info())
-                raise FailedConstructorError(spec.name, e)
-
-        return self.instances[key]
-
-
-    @_autospec
-    def delete(self, spec):
-        """Force a package to be recreated."""
-        del self.instances[spec.dag_hash()]
-
-
-    def purge(self):
-        """Clear entire package instance cache."""
-        self.instances.clear()
-
-
-    @_autospec
-    def providers_for(self, vpkg_spec):
-        if self.provider_index is None:
-            self.provider_index = ProviderIndex(self.all_package_names())
-
-        providers = self.provider_index.providers_for(vpkg_spec)
-        if not providers:
-            raise UnknownPackageError(vpkg_spec.name)
-        return providers
-
-
-    @_autospec
-    def extensions_for(self, extendee_spec):
-        return [p for p in self.all_packages() if p.extends(extendee_spec)]
-
-
-    def dirname_for_package_name(self, pkg_name):
-        """Get the directory name for a particular package.  This is the
-           directory that contains its package.py file."""
-        return join_path(self.root, pkg_name)
-
-
-    def filename_for_package_name(self, pkg_name):
-        """Get the filename for the module we should load for a particular
-           package.  Packages for a pacakge DB live in
-           ``$root/<package_name>/package.py``
-
-           This will return a proper package.py path even if the
-           package doesn't exist yet, so callers will need to ensure
-           the package exists before importing.
-        """
-        validate_module_name(pkg_name)
-        pkg_dir = self.dirname_for_package_name(pkg_name)
-        return join_path(pkg_dir, _package_file_name)
-
-
-    @memoized
-    def all_package_names(self):
-        """Generator function for all packages.  This looks for
-           ``<pkg_name>/package.py`` files within the root direcotry"""
-        all_package_names = []
-        for pkg_name in os.listdir(self.root):
-            pkg_dir  = join_path(self.root, pkg_name)
-            pkg_file = join_path(pkg_dir, _package_file_name)
-            if os.path.isfile(pkg_file):
-                all_package_names.append(pkg_name)
-            all_package_names.sort()
-        return all_package_names
-
-
-    def all_packages(self):
-        for name in self.all_package_names():
-            yield self.get(name)
-
-
-    @memoized
-    def exists(self, pkg_name):
-        """Whether a package with the supplied name exists ."""
-        return os.path.exists(self.filename_for_package_name(pkg_name))
-
-
-    @memoized
-    def get_class_for_package_name(self, pkg_name):
-        """Get an instance of the class for a particular package.
-
-           This method uses Python's ``imp`` package to load python
-           source from a Spack package's ``package.py`` file.  A
-           normal python import would only load each package once, but
-           because we do this dynamically, the method needs to be
-           memoized to ensure there is only ONE package class
-           instance, per package, per database.
-        """
-        file_path = self.filename_for_package_name(pkg_name)
-
-        if os.path.exists(file_path):
-            if not os.path.isfile(file_path):
-                tty.die("Something's wrong. '%s' is not a file!" % file_path)
-            if not os.access(file_path, os.R_OK):
-                tty.die("Cannot read '%s'!" % file_path)
-        else:
-            raise UnknownPackageError(pkg_name)
-
-        class_name = mod_to_class(pkg_name)
-        try:
-            module_name = _imported_packages_module + '.' + pkg_name
-            module = imp.load_source(module_name, file_path)
-
-        except ImportError, e:
-            tty.die("Error while importing %s from %s:\n%s" % (
-                pkg_name, file_path, e.message))
-
-        cls = getattr(module, class_name)
-        if not inspect.isclass(cls):
-            tty.die("%s.%s is not a class" % (pkg_name, class_name))
-
-        return cls
-
-
-class UnknownPackageError(spack.error.SpackError):
-    """Raised when we encounter a package spack doesn't have."""
-    def __init__(self, name):
-        super(UnknownPackageError, self).__init__("Package '%s' not found." % name)
-        self.name = name
-
-
-class FailedConstructorError(spack.error.SpackError):
-    """Raised when a package's class constructor fails."""
-    def __init__(self, name, reason):
-        super(FailedConstructorError, self).__init__(
-            "Class constructor failed for package '%s'." % name,
-            str(reason))
-        self.name = name
diff --git a/lib/spack/spack/patch.py b/lib/spack/spack/patch.py
index 42f2105f52cf01c1f0685ac6344f5aa4f1e0d34c..b82a047753d04e101b7c11376fbd14829eb180cf 100644
--- a/lib/spack/spack/patch.py
+++ b/lib/spack/spack/patch.py
@@ -41,8 +41,8 @@ class Patch(object):
     """This class describes a patch to be applied to some expanded
        source code."""
 
-    def __init__(self, pkg_name, path_or_url, level):
-        self.pkg_name = pkg_name
+    def __init__(self, pkg, path_or_url, level):
+        self.pkg_name = pkg.name
         self.path_or_url = path_or_url
         self.path = None
         self.url = None
@@ -54,7 +54,7 @@ def __init__(self, pkg_name, path_or_url, level):
         if '://' in path_or_url:
             self.url = path_or_url
         else:
-            pkg_dir = spack.db.dirname_for_package_name(pkg_name)
+            pkg_dir = spack.repo.dirname_for_package_name(self.pkg_name)
             self.path = join_path(pkg_dir, path_or_url)
             if not os.path.isfile(self.path):
                 raise NoSuchPatchFileError(pkg_name, self.path)
diff --git a/lib/spack/spack/repository.py b/lib/spack/spack/repository.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5df1168b66e5f70403a8cf09dd3402ddda30f3f
--- /dev/null
+++ b/lib/spack/spack/repository.py
@@ -0,0 +1,752 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import os
+import exceptions
+import sys
+import inspect
+import imp
+import re
+import traceback
+from bisect import bisect_left
+from external import yaml
+
+import llnl.util.tty as tty
+from llnl.util.filesystem import join_path
+
+import spack.error
+import spack.config
+import spack.spec
+from spack.virtual import ProviderIndex
+from spack.util.naming import *
+
+#
+# Super-namespace for all packages.
+# Package modules are imported as spack.pkg.<namespace>.<pkg-name>.
+#
+repo_namespace     = 'spack.pkg'
+
+#
+# These names describe how repos should be laid out in the filesystem.
+#
+repo_config_name   = 'repo.yaml'   # Top-level filename for repo config.
+packages_dir_name  = 'packages'    # Top-level repo directory containing pkgs.
+package_file_name  = 'package.py'  # Filename for packages in a repository.
+
+# Guaranteed unused default value for some functions.
+NOT_PROVIDED = object()
+
+
+def _autospec(function):
+    """Decorator that automatically converts the argument of a single-arg
+       function to a Spec."""
+    def converter(self, spec_like, *args, **kwargs):
+        if not isinstance(spec_like, spack.spec.Spec):
+            spec_like = spack.spec.Spec(spec_like)
+        return function(self, spec_like, *args, **kwargs)
+    return converter
+
+
+def _make_namespace_module(ns):
+    module = imp.new_module(ns)
+    module.__file__ = "(spack namespace)"
+    module.__path__ = []
+    module.__package__ = ns
+    return module
+
+
+def substitute_spack_prefix(path):
+    """Replaces instances of $spack with Spack's prefix."""
+    return re.sub(r'^\$spack', spack.prefix, path)
+
+
+def canonicalize_path(path):
+    """Substitute $spack, expand user home, take abspath."""
+    path = substitute_spack_prefix(path)
+    path = os.path.expanduser(path)
+    path = os.path.abspath(path)
+    return path
+
+
+class RepoPath(object):
+    """A RepoPath is a list of repos that function as one.
+
+       It functions exactly like a Repo, but it operates on the
+       combined results of the Repos in its list instead of on a
+       single package repository.
+    """
+    def __init__(self, *repo_dirs, **kwargs):
+        # super-namespace for all packages in the RepoPath
+        self.super_namespace = kwargs.get('namespace', repo_namespace)
+
+        self.repos = []
+        self.by_namespace = NamespaceTrie()
+        self.by_path = {}
+
+        self._all_package_names = []
+        self._provider_index = None
+
+        # If repo_dirs is empty, just use the configuration
+        if not repo_dirs:
+            repo_dirs = spack.config.get_config('repos')
+            if not repo_dirs:
+                raise NoRepoConfiguredError(
+                    "Spack configuration contains no package repositories.")
+
+        # Add each repo to this path.
+        for root in repo_dirs:
+            try:
+                repo = Repo(root, self.super_namespace)
+                self.put_last(repo)
+            except RepoError as e:
+                tty.warn("Failed to initialize repository at '%s'." % root,
+                         e.message,
+                         "To remove the bad repository, run this command:",
+                         "    spack repo rm %s" % root)
+
+
+    def swap(self, other):
+        """Convenience function to make swapping repostiories easier.
+
+        This is currently used by mock tests.
+        TODO: Maybe there is a cleaner way.
+
+        """
+        attrs = ['repos',
+                 'by_namespace',
+                 'by_path',
+                 '_all_package_names',
+                 '_provider_index']
+        for attr in attrs:
+            tmp = getattr(self, attr)
+            setattr(self, attr, getattr(other, attr))
+            setattr(other, attr, tmp)
+
+
+    def _add(self, repo):
+        """Add a repository to the namespace and path indexes.
+
+        Checks for duplicates -- two repos can't have the same root
+        directory, and they provide have the same namespace.
+
+        """
+        if repo.root in self.by_path:
+            raise DuplicateRepoError("Duplicate repository: '%s'" % repo.root)
+
+        if repo.namespace in self.by_namespace:
+            raise DuplicateRepoError(
+                "Package repos '%s' and '%s' both provide namespace %s."
+                % (repo.root, self.by_namespace[repo.namespace].root, repo.namespace))
+
+        # Add repo to the pkg indexes
+        self.by_namespace[repo.full_namespace] = repo
+        self.by_path[repo.root] = repo
+
+        # add names to the cached name list
+        new_pkgs = set(repo.all_package_names())
+        new_pkgs.update(set(self._all_package_names))
+        self._all_package_names = sorted(new_pkgs, key=lambda n:n.lower())
+
+
+    def put_first(self, repo):
+        """Add repo first in the search path."""
+        self._add(repo)
+        self.repos.insert(0, repo)
+
+
+    def put_last(self, repo):
+        """Add repo last in the search path."""
+        self._add(repo)
+        self.repos.append(repo)
+
+
+    def remove(self, repo):
+        """Remove a repo from the search path."""
+        if repo in self.repos:
+            self.repos.remove(repo)
+
+
+    def get_repo(self, namespace, default=NOT_PROVIDED):
+        """Get a repository by namespace.
+        Arguments
+          namespace
+            Look up this namespace in the RepoPath, and return
+            it if found.
+
+        Optional Arguments
+          default
+            If default is provided, return it when the namespace
+            isn't found.  If not, raise an UnknownNamespaceError.
+        """
+        fullspace = '%s.%s' % (self.super_namespace, namespace)
+        if fullspace not in self.by_namespace:
+            if default == NOT_PROVIDED:
+                raise UnknownNamespaceError(namespace)
+            return default
+        return self.by_namespace[fullspace]
+
+
+    def first_repo(self):
+        """Get the first repo in precedence order."""
+        return self.repos[0] if self.repos else None
+
+
+    def all_package_names(self):
+        """Return all unique package names in all repositories."""
+        return self._all_package_names
+
+
+    def all_packages(self):
+        for name in self.all_package_names():
+            yield self.get(name)
+
+
+    @_autospec
+    def providers_for(self, vpkg_spec):
+        if self._provider_index is None:
+            self._provider_index = ProviderIndex(self.all_package_names())
+
+        providers = self._provider_index.providers_for(vpkg_spec)
+        if not providers:
+            raise UnknownPackageError(vpkg_spec.name)
+        return providers
+
+
+    @_autospec
+    def extensions_for(self, extendee_spec):
+        return [p for p in self.all_packages() if p.extends(extendee_spec)]
+
+
+    def find_module(self, fullname, path=None):
+        """Implements precedence for overlaid namespaces.
+
+        Loop checks each namespace in self.repos for packages, and
+        also handles loading empty containing namespaces.
+
+        """
+        # namespaces are added to repo, and package modules are leaves.
+        namespace, dot, module_name = fullname.rpartition('.')
+
+        # If it's a module in some repo, or if it is the repo's
+        # namespace, let the repo handle it.
+        for repo in self.repos:
+            if namespace == repo.full_namespace:
+                if repo.real_name(module_name):
+                    return repo
+            elif fullname == repo.full_namespace:
+                return repo
+
+        # No repo provides the namespace, but it is a valid prefix of
+        # something in the RepoPath.
+        if self.by_namespace.is_prefix(fullname):
+            return self
+
+        return None
+
+
+    def load_module(self, fullname):
+        """Handles loading container namespaces when necessary.
+
+        See ``Repo`` for how actual package modules are loaded.
+        """
+        if fullname in sys.modules:
+            return sys.modules[fullname]
+
+        # partition fullname into prefix and module name.
+        namespace, dot, module_name = fullname.rpartition('.')
+
+        if not self.by_namespace.is_prefix(fullname):
+            raise ImportError("No such Spack repo: %s" % fullname)
+
+        module = _make_namespace_module(namespace)
+        module.__loader__ = self
+        sys.modules[fullname] = module
+        return module
+
+
+    @_autospec
+    def repo_for_pkg(self, spec):
+        """Given a spec, get the repository for its package."""
+        # If the spec already has a namespace, then return the
+        # corresponding repo if we know about it.
+        if spec.namespace:
+            fullspace = '%s.%s' % (self.super_namespace, spec.namespace)
+            if fullspace not in self.by_namespace:
+                raise UnknownNamespaceError(spec.namespace)
+            return self.by_namespace[fullspace]
+
+        # If there's no namespace, search in the RepoPath.
+        for repo in self.repos:
+            if spec.name in repo:
+                return repo
+        else:
+            raise UnknownPackageError(spec.name)
+
+
+    @_autospec
+    def get(self, spec, new=False):
+        """Find a repo that contains the supplied spec's package.
+
+           Raises UnknownPackageError if not found.
+        """
+        return self.repo_for_pkg(spec).get(spec)
+
+
+    def dirname_for_package_name(self, pkg_name):
+        return self.repo_for_pkg(pkg_name).dirname_for_package_name(pkg_name)
+
+
+    def filename_for_package_name(self, pkg_name):
+        return self.repo_for_pkg(pkg_name).filename_for_package_name(pkg_name)
+
+
+    def exists(self, pkg_name):
+        return any(repo.exists(pkg_name) for repo in self.repos)
+
+
+    def __contains__(self, pkg_name):
+        return self.exists(pkg_name)
+
+
+
+class Repo(object):
+    """Class representing a package repository in the filesystem.
+
+    Each package repository must have a top-level configuration file
+    called `repo.yaml`.
+
+    Currently, `repo.yaml` this must define:
+
+    `namespace`:
+        A Python namespace where the repository's packages should live.
+
+    """
+    def __init__(self, root, namespace=repo_namespace):
+        """Instantiate a package repository from a filesystem path.
+
+        Arguments:
+        root        The root directory of the repository.
+
+        namespace   A super-namespace that will contain the repo-defined
+                    namespace (this is generally jsut `spack.pkg`). The
+                    super-namespace is Spack's way of separating repositories
+                    from other python namespaces.
+
+        """
+        # Root directory, containing _repo.yaml and package dirs
+        # Allow roots to by spack-relative by starting with '$spack'
+        self.root = canonicalize_path(root)
+
+        # super-namespace for all packages in the Repo
+        self.super_namespace = namespace
+
+        # check and raise BadRepoError on fail.
+        def check(condition, msg):
+            if not condition: raise BadRepoError(msg)
+
+        # Validate repository layout.
+        self.config_file   = join_path(self.root, repo_config_name)
+        check(os.path.isfile(self.config_file),
+              "No %s found in '%s'" % (repo_config_name, root))
+        self.packages_path = join_path(self.root, packages_dir_name)
+        check(os.path.isdir(self.packages_path),
+              "No directory '%s' found in '%s'" % (repo_config_name, root))
+
+        # Read configuration and validate namespace
+        config = self._read_config()
+        check('namespace' in config, '%s must define a namespace.'
+              % join_path(root, repo_config_name))
+
+        self.namespace = config['namespace']
+        check(re.match(r'[a-zA-Z][a-zA-Z0-9_.]+', self.namespace),
+              ("Invalid namespace '%s' in repo '%s'. "  % (self.namespace, self.root)) +
+              "Namespaces must be valid python identifiers separated by '.'")
+
+        # Set up 'full_namespace' to include the super-namespace
+        if self.super_namespace:
+            self.full_namespace = "%s.%s" % (self.super_namespace, self.namespace)
+        else:
+            self.full_namespace = self.namespace
+
+        # Keep name components around for checking prefixes.
+        self._names = self.full_namespace.split('.')
+
+        # These are internal cache variables.
+        self._modules = {}
+        self._classes = {}
+        self._instances = {}
+        self._provider_index = None
+        self._all_package_names = None
+
+        # make sure the namespace for packages in this repo exists.
+        self._create_namespace()
+
+
+    def _create_namespace(self):
+        """Create this repo's namespace module and insert it into sys.modules.
+
+        Ensures that modules loaded via the repo have a home, and that
+        we don't get runtime warnings from Python's module system.
+
+        """
+        parent = None
+        for l in range(1, len(self._names)+1):
+            ns = '.'.join(self._names[:l])
+            if not ns in sys.modules:
+                module = _make_namespace_module(ns)
+                module.__loader__ = self
+                sys.modules[ns] = module
+
+                # Ensure the namespace is an atrribute of its parent,
+                # if it has not been set by something else already.
+                #
+                # This ensures that we can do things like:
+                #    import spack.pkg.builtin.mpich as mpich
+                if parent:
+                    modname = self._names[l-1]
+                    if not hasattr(parent, modname):
+                        setattr(parent, modname, module)
+            else:
+                # no need to set up a module, but keep track of the parent.
+                module = sys.modules[ns]
+            parent = module
+
+
+    def real_name(self, import_name):
+        """Allow users to import Spack packages using Python identifiers.
+
+        A python identifier might map to many different Spack package
+        names due to hyphen/underscore ambiguity.
+
+        Easy example:
+            num3proxy   -> 3proxy
+
+        Ambiguous:
+            foo_bar -> foo_bar, foo-bar
+
+        More ambiguous:
+            foo_bar_baz -> foo_bar_baz, foo-bar-baz, foo_bar-baz, foo-bar_baz
+        """
+        if import_name in self:
+            return import_name
+
+        options = possible_spack_module_names(import_name)
+        options.remove(import_name)
+        for name in options:
+            if name in self:
+                return name
+        return None
+
+
+    def is_prefix(self, fullname):
+        """True if fullname is a prefix of this Repo's namespace."""
+        parts = fullname.split('.')
+        return self._names[:len(parts)] == parts
+
+
+    def find_module(self, fullname, path=None):
+        """Python find_module import hook.
+
+        Returns this Repo if it can load the module; None if not.
+        """
+        if self.is_prefix(fullname):
+            return self
+
+        namespace, dot, module_name = fullname.rpartition('.')
+        if namespace == self.full_namespace:
+            if self.real_name(module_name):
+                return self
+
+        return None
+
+
+    def load_module(self, fullname):
+        """Python importer load hook.
+
+        Tries to load the module; raises an ImportError if it can't.
+        """
+        if fullname in sys.modules:
+            return sys.modules[fullname]
+
+        namespace, dot, module_name = fullname.rpartition('.')
+
+        if self.is_prefix(fullname):
+            module = _make_namespace_module(fullname)
+
+        elif namespace == self.full_namespace:
+            real_name = self.real_name(module_name)
+            if not real_name:
+                raise ImportError("No module %s in %s" % (module_name, self))
+            module = self._get_pkg_module(real_name)
+
+        else:
+            raise ImportError("No module %s in %s" % (fullname, self))
+
+        module.__loader__ = self
+        sys.modules[fullname] = module
+        return module
+
+
+    def _read_config(self):
+        """Check for a YAML config file in this db's root directory."""
+        try:
+            with open(self.config_file) as reponame_file:
+                yaml_data = yaml.load(reponame_file)
+
+                if (not yaml_data or 'repo' not in yaml_data or
+                    not isinstance(yaml_data['repo'], dict)):
+                    tty.die("Invalid %s in repository %s"
+                            % (repo_config_name, self.root))
+
+                return yaml_data['repo']
+
+        except exceptions.IOError, e:
+            tty.die("Error reading %s when opening %s"
+                    % (self.config_file, self.root))
+
+
+    @_autospec
+    def get(self, spec, new=False):
+        if spec.virtual:
+            raise UnknownPackageError(spec.name)
+
+        if spec.namespace and spec.namespace != self.namespace:
+            raise UnknownPackageError("Repository %s does not contain package %s."
+                                      % (self.namespace, spec.fullname))
+
+        key = hash(spec)
+        if new or key not in self._instances:
+            package_class = self._get_pkg_class(spec.name)
+            try:
+                copy = spec.copy() # defensive copy.  Package owns its spec.
+                self._instances[key] = package_class(copy)
+            except Exception, e:
+                if spack.debug:
+                    sys.excepthook(*sys.exc_info())
+                raise FailedConstructorError(spec.fullname, *sys.exc_info())
+
+        return self._instances[key]
+
+
+    def purge(self):
+        """Clear entire package instance cache."""
+        self._instances.clear()
+
+
+    @_autospec
+    def providers_for(self, vpkg_spec):
+        if self._provider_index is None:
+            self._provider_index = ProviderIndex(self.all_package_names())
+
+        providers = self._provider_index.providers_for(vpkg_spec)
+        if not providers:
+            raise UnknownPackageError(vpkg_spec.name)
+        return providers
+
+
+    @_autospec
+    def extensions_for(self, extendee_spec):
+        return [p for p in self.all_packages() if p.extends(extendee_spec)]
+
+
+    def _check_namespace(self, spec):
+        """Check that the spec's namespace is the same as this repository's."""
+        if spec.namespace and spec.namespace != self.namespace:
+            raise UnknownNamespaceError(spec.namespace)
+
+
+    @_autospec
+    def dirname_for_package_name(self, spec):
+        """Get the directory name for a particular package.  This is the
+           directory that contains its package.py file."""
+        self._check_namespace(spec)
+        return join_path(self.packages_path, spec.name)
+
+
+    @_autospec
+    def filename_for_package_name(self, spec):
+        """Get the filename for the module we should load for a particular
+           package.  Packages for a Repo live in
+           ``$root/<package_name>/package.py``
+
+           This will return a proper package.py path even if the
+           package doesn't exist yet, so callers will need to ensure
+           the package exists before importing.
+        """
+        self._check_namespace(spec)
+        pkg_dir = self.dirname_for_package_name(spec.name)
+        return join_path(pkg_dir, package_file_name)
+
+
+    def all_package_names(self):
+        """Returns a sorted list of all package names in the Repo."""
+        if self._all_package_names is None:
+            self._all_package_names = []
+
+            for pkg_name in os.listdir(self.packages_path):
+                # Skip non-directories in the package root.
+                pkg_dir = join_path(self.packages_path, pkg_name)
+                if not os.path.isdir(pkg_dir):
+                    continue
+
+                # Skip directories without a package.py in them.
+                pkg_file = join_path(self.packages_path, pkg_name, package_file_name)
+                if not os.path.isfile(pkg_file):
+                    continue
+
+                # Warn about invalid names that look like packages.
+                if not valid_module_name(pkg_name):
+                    tty.warn("Skipping package at %s. '%s' is not a valid Spack module name."
+                             % (pkg_dir, pkg_name))
+                    continue
+
+                # All checks passed.  Add it to the list.
+                self._all_package_names.append(pkg_name)
+            self._all_package_names.sort()
+
+        return self._all_package_names
+
+
+    def all_packages(self):
+        for name in self.all_package_names():
+            yield self.get(name)
+
+
+    def exists(self, pkg_name):
+        """Whether a package with the supplied name exists."""
+        # This does a binary search in the sorted list.
+        idx = bisect_left(self.all_package_names(), pkg_name)
+        return (idx < len(self._all_package_names) and
+                self._all_package_names[idx] == pkg_name)
+
+
+    def _get_pkg_module(self, pkg_name):
+        """Create a module for a particular package.
+
+        This caches the module within this Repo *instance*.  It does
+        *not* add it to ``sys.modules``.  So, you can construct
+        multiple Repos for testing and ensure that the module will be
+        loaded once per repo.
+
+        """
+        if pkg_name not in self._modules:
+            file_path = self.filename_for_package_name(pkg_name)
+
+            if not os.path.exists(file_path):
+                raise UnknownPackageError(pkg_name, self)
+
+            if not os.path.isfile(file_path):
+                tty.die("Something's wrong. '%s' is not a file!" % file_path)
+
+            if not os.access(file_path, os.R_OK):
+                tty.die("Cannot read '%s'!" % file_path)
+
+            # e.g., spack.pkg.builtin.mpich
+            fullname = "%s.%s" % (self.full_namespace, pkg_name)
+
+            module = imp.load_source(fullname, file_path)
+            module.__package__ = self.full_namespace
+            module.__loader__ = self
+            self._modules[pkg_name] = module
+
+        return self._modules[pkg_name]
+
+
+    def _get_pkg_class(self, pkg_name):
+        """Get the class for the package out of its module.
+
+        First loads (or fetches from cache) a module for the
+        package. Then extracts the package class from the module
+        according to Spack's naming convention.
+        """
+        class_name = mod_to_class(pkg_name)
+        module = self._get_pkg_module(pkg_name)
+
+        cls = getattr(module, class_name)
+        if not inspect.isclass(cls):
+            tty.die("%s.%s is not a class" % (pkg_name, class_name))
+
+        return cls
+
+
+    def __str__(self):
+        return "[Repo '%s' at '%s']" % (self.namespace, self.root)
+
+
+    def __repr__(self):
+        return self.__str__()
+
+
+    def __contains__(self, pkg_name):
+        return self.exists(pkg_name)
+
+
+class RepoError(spack.error.SpackError):
+    """Superclass for repository-related errors."""
+
+
+class NoRepoConfiguredError(RepoError):
+    """Raised when there are no repositories configured."""
+
+
+class BadRepoError(RepoError):
+    """Raised when repo layout is invalid."""
+
+
+class DuplicateRepoError(RepoError):
+    """Raised when duplicate repos are added to a RepoPath."""
+
+
+class PackageLoadError(spack.error.SpackError):
+    """Superclass for errors related to loading packages."""
+
+
+class UnknownPackageError(PackageLoadError):
+    """Raised when we encounter a package spack doesn't have."""
+    def __init__(self, name, repo=None):
+        msg = None
+        if repo:
+            msg = "Package %s not found in repository %s." % (name, repo)
+        else:
+            msg = "Package %s not found." % name
+        super(UnknownPackageError, self).__init__(msg)
+        self.name = name
+
+
+class UnknownNamespaceError(PackageLoadError):
+    """Raised when we encounter an unknown namespace"""
+    def __init__(self, namespace):
+        super(UnknownNamespaceError, self).__init__(
+            "Unknown namespace: %s" % namespace)
+
+
+class FailedConstructorError(PackageLoadError):
+    """Raised when a package's class constructor fails."""
+    def __init__(self, name, exc_type, exc_obj, exc_tb):
+        super(FailedConstructorError, self).__init__(
+            "Class constructor failed for package '%s'." % name,
+            '\nCaused by:\n' +
+            ('%s: %s\n' % (exc_type.__name__, exc_obj)) +
+            ''.join(traceback.format_tb(exc_tb)))
+        self.name = name
diff --git a/lib/spack/spack/resource.py b/lib/spack/spack/resource.py
index 8d081b45c9625c7511fe9f446811bace5b328fc9..2bf92947fd3beb864a5e86de791ab12a5deb705a 100644
--- a/lib/spack/spack/resource.py
+++ b/lib/spack/spack/resource.py
@@ -6,7 +6,7 @@
 # Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
 # LLNL-CODE-647188
 #
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://llnl.github.io/spack
 # Please also see the LICENSE file for our notice and the LGPL.
 #
 # This program is free software; you can redistribute it and/or modify
@@ -22,9 +22,11 @@
 # along with this program; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
-"""
-Describes an optional resource needed for a build. Typically a bunch of sources that can be built in-tree within another
+"""Describes an optional resource needed for a build.
+
+Typically a bunch of sources that can be built in-tree within another
 package to enable optional features.
+
 """
 
 
diff --git a/lib/spack/spack/spec.py b/lib/spack/spack/spec.py
index 037ec97a5e8ae7910358276e29dd3cddacc4be12..10e246bf2e607d1ee4f9b4f25fd363afa3ef98a2 100644
--- a/lib/spack/spack/spec.py
+++ b/lib/spack/spack/spec.py
@@ -412,6 +412,7 @@ def __init__(self, spec_like, *dep_like, **kwargs):
         self.dependencies = other.dependencies
         self.variants = other.variants
         self.variants.spec = self
+        self.namespace = other.namespace
 
         # Specs are by default not assumed to be normal, but in some
         # cases we've read them from a file want to assume normal.
@@ -464,6 +465,13 @@ def _add_dependency(self, spec):
         self.dependencies[spec.name] = spec
         spec.dependents[self.name] = self
 
+    #
+    # Public interface
+    #
+    @property
+    def fullname(self):
+        return '%s.%s' % (self.namespace, self.name) if self.namespace else self.name
+
 
     @property
     def root(self):
@@ -486,7 +494,7 @@ def root(self):
 
     @property
     def package(self):
-        return spack.db.get(self)
+        return spack.repo.get(self)
 
 
     @property
@@ -504,7 +512,7 @@ def virtual(self):
     @staticmethod
     def is_virtual(name):
         """Test if a name is virtual without requiring a Spec."""
-        return not spack.db.exists(name)
+        return not spack.repo.exists(name)
 
 
     @property
@@ -517,11 +525,13 @@ def concrete(self):
             return True
 
         self._concrete = bool(not self.virtual
+                              and self.namespace is not None
                               and self.versions.concrete
                               and self.variants.concrete
                               and self.architecture
                               and self.compiler and self.compiler.concrete
                               and self.dependencies.concrete)
+
         return self._concrete
 
 
@@ -657,6 +667,12 @@ def to_node_dict(self):
             'dependencies' : dict((d, self.dependencies[d].dag_hash())
                                   for d in sorted(self.dependencies))
         }
+
+        # Older concrete specs do not have a namespace.  Omit for
+        # consistent hashing.
+        if not self.concrete or self.namespace:
+            d['namespace'] = self.namespace
+
         if self.compiler:
             d.update(self.compiler.to_dict())
         else:
@@ -681,6 +697,7 @@ def from_node_dict(node):
         node = node[name]
 
         spec = Spec(name)
+        spec.namespace = node.get('namespace', None)
         spec.versions = VersionList.from_dict(node)
         spec.architecture = node['arch']
 
@@ -797,7 +814,7 @@ def _expand_virtual_packages(self):
                 return changed
 
             for spec in virtuals:
-                providers = spack.db.providers_for(spec)
+                providers = spack.repo.providers_for(spec)
                 concrete = spack.concretizer.choose_provider(spec, providers)
                 concrete = concrete.copy()
                 spec._replace_with(concrete)
@@ -833,6 +850,19 @@ def concretize(self):
             changed = any(changes)
             force=True
 
+        for s in self.traverse():
+            # After concretizing, assign namespaces to anything left.
+            # Note that this doesn't count as a "change".  The repository
+            # configuration is constant throughout a spack run, and
+            # normalize and concretize evaluate Packages using Repo.get(),
+            # which respects precedence.  So, a namespace assignment isn't
+            # changing how a package name would have been interpreted and
+            # we can do it as late as possible to allow as much
+            # compatibility across repositories as possible.
+            if s.namespace is None:
+                s.namespace = spack.repo.repo_for_pkg(s.name).namespace
+
+        # Mark everything in the spec as concrete, as well.
         self._mark_concrete()
 
 
@@ -919,7 +949,7 @@ def _evaluate_dependency_conditions(self, name):
         the dependency.  If no conditions are True (and we don't
         depend on it), return None.
         """
-        pkg = spack.db.get(self.name)
+        pkg = spack.repo.get(self.fullname)
         conditions = pkg.dependencies[name]
 
         # evaluate when specs to figure out constraints on the dependency.
@@ -1047,7 +1077,7 @@ def _normalize_helper(self, visited, spec_deps, provider_index):
         any_change = False
         changed = True
 
-        pkg = spack.db.get(self.name)
+        pkg = spack.repo.get(self.fullname)
         while changed:
             changed = False
             for dep_name in pkg.dependencies:
@@ -1068,18 +1098,17 @@ def normalize(self, force=False):
            the root, and ONLY the ones that were explicitly provided are there.
            Normalization turns a partial flat spec into a DAG, where:
 
-           1. ALL dependencies of the root package are in the DAG.
-           2. Each node's dependencies dict only contains its direct deps.
+           1. Known dependencies of the root package are in the DAG.
+           2. Each node's dependencies dict only contains its known direct deps.
            3. There is only ONE unique spec for each package in the DAG.
 
               * This includes virtual packages.  If there a non-virtual
                 package that provides a virtual package that is in the spec,
                 then we replace the virtual package with the non-virtual one.
 
-           4. The spec DAG matches package DAG, including default variant values.
-
            TODO: normalize should probably implement some form of cycle detection,
            to ensure that the spec is actually a DAG.
+
         """
         if self._normal and not force:
             return False
@@ -1125,7 +1154,7 @@ def validate_names(self):
         for spec in self.traverse():
             # Don't get a package for a virtual name.
             if not spec.virtual:
-                spack.db.get(spec.name)
+                spack.repo.get(spec.fullname)
 
             # validate compiler in addition to the package name.
             if spec.compiler:
@@ -1148,6 +1177,10 @@ def constrain(self, other, deps=True):
         if not self.name == other.name:
             raise UnsatisfiableSpecNameError(self.name, other.name)
 
+        if other.namespace is not None:
+            if self.namespace is not None and other.namespace != self.namespace:
+                raise UnsatisfiableSpecNameError(self.fullname, other.fullname)
+
         if not self.versions.overlaps(other.versions):
             raise UnsatisfiableVersionSpecError(self.versions, other.versions)
 
@@ -1191,7 +1224,7 @@ def _constrain_dependencies(self, other):
 
         # TODO: might want more detail than this, e.g. specific deps
         # in violation. if this becomes a priority get rid of this
-        # check and be more specici about what's wrong.
+        # check and be more specific about what's wrong.
         if not other.satisfies_dependencies(self):
             raise UnsatisfiableDependencySpecError(other, self)
 
@@ -1264,7 +1297,7 @@ def satisfies(self, other, deps=True, strict=False):
 
         # A concrete provider can satisfy a virtual dependency.
         if not self.virtual and other.virtual:
-            pkg = spack.db.get(self.name)
+            pkg = spack.repo.get(self.fullname)
             if pkg.provides(other.name):
                 for provided, when_spec in pkg.provided.items():
                     if self.satisfies(when_spec, deps=False, strict=strict):
@@ -1276,6 +1309,11 @@ def satisfies(self, other, deps=True, strict=False):
         if self.name != other.name:
             return False
 
+        # namespaces either match, or other doesn't require one.
+        if other.namespace is not None:
+            if self.namespace is not None and self.namespace != other.namespace:
+                return False
+
         if self.versions and other.versions:
             if not self.versions.satisfies(other.versions, strict=strict):
                 return False
@@ -1375,6 +1413,7 @@ def _dup(self, other, **kwargs):
         self.dependencies = DependencyMap()
         self.variants = other.variants.copy()
         self.variants.spec = self
+        self.namespace = other.namespace
 
         # If we copy dependencies, preserve DAG structure in the new spec
         if kwargs.get('deps', True):
@@ -1493,6 +1532,7 @@ def ne_dag(self, other):
     def _cmp_node(self):
         """Comparison key for just *this node* and not its deps."""
         return (self.name,
+                self.namespace,
                 self.versions,
                 self.variants,
                 self.architecture,
@@ -1530,6 +1570,7 @@ def format(self, format_string='$_$@$%@$+$=', **kwargs):
            in the format string.  The format strings you can provide are::
 
                $_   Package name
+               $.   Full package name (with namespace)
                $@   Version
                $%   Compiler
                $%@  Compiler & compiler version
@@ -1577,6 +1618,8 @@ def write(s, c):
 
                 if c == '_':
                     out.write(fmt % self.name)
+                elif c == '.':
+                    out.write(fmt % self.fullname)
                 elif c == '@':
                     if self.versions and self.versions != _any_version:
                         write(fmt % (c + str(self.versions)), c)
@@ -1725,17 +1768,23 @@ def parse_compiler(self, text):
     def spec(self):
         """Parse a spec out of the input.  If a spec is supplied, then initialize
            and return it instead of creating a new one."""
-        self.check_identifier()
+
+        spec_namespace, dot, spec_name = self.token.value.rpartition('.')
+        if not spec_namespace:
+            spec_namespace = None
+
+        self.check_identifier(spec_name)
 
         # This will init the spec without calling __init__.
         spec = Spec.__new__(Spec)
-        spec.name = self.token.value
+        spec.name = spec_name
         spec.versions = VersionList()
         spec.variants = VariantMap(spec)
         spec.architecture = None
         spec.compiler = None
         spec.dependents   = DependencyMap()
         spec.dependencies = DependencyMap()
+        spec.namespace = spec_namespace
 
         spec._normal = False
         spec._concrete = False
@@ -1829,12 +1878,14 @@ def compiler(self):
         return compiler
 
 
-    def check_identifier(self):
+    def check_identifier(self, id=None):
         """The only identifiers that can contain '.' are versions, but version
            ids are context-sensitive so we have to check on a case-by-case
            basis. Call this if we detect a version id where it shouldn't be.
         """
-        if '.' in self.token.value:
+        if not id:
+            id = self.token.value
+        if '.' in id:
             self.last_token_error("Identifier cannot contain '.'")
 
 
diff --git a/lib/spack/spack/stage.py b/lib/spack/spack/stage.py
index 76ca7273cb91e5b9685036587216fcae9dc825c9..79c9030e20f17d3aece1ff21271ea24ba3aaa03a 100644
--- a/lib/spack/spack/stage.py
+++ b/lib/spack/spack/stage.py
@@ -26,6 +26,7 @@
 import re
 import shutil
 import tempfile
+from urlparse import urljoin
 
 import llnl.util.tty as tty
 from llnl.util.filesystem import *
@@ -96,7 +97,6 @@ def __init__(self, url_or_fetch_strategy, **kwargs):
 
         self.name = kwargs.get('name')
         self.mirror_path = kwargs.get('mirror_path')
-
         self.tmp_root = find_tmp_root()
 
         self.path = None
@@ -239,18 +239,27 @@ def chdir(self):
             tty.die("Setup failed: no such directory: " + self.path)
 
 
-    def fetch(self):
+    def fetch(self, mirror_only=False):
         """Downloads an archive or checks out code from a repository."""
         self.chdir()
 
-        fetchers = [self.default_fetcher]
+        fetchers = []
+        if not mirror_only:
+            fetchers.append(self.default_fetcher)
 
         # TODO: move mirror logic out of here and clean it up!
         # TODO: Or @alalazo may have some ideas about how to use a
         # TODO: CompositeFetchStrategy here.
         self.skip_checksum_for_mirror = True
         if self.mirror_path:
-            urls = ["%s/%s" % (m, self.mirror_path) for m in _get_mirrors()]
+            mirrors = spack.config.get_config('mirrors')
+
+            # Join URLs of mirror roots with mirror paths. Because
+            # urljoin() will strip everything past the final '/' in
+            # the root, so we add a '/' if it is not present.
+            mirror_roots = [root if root.endswith('/') else root + '/'
+                            for root in mirrors.values()]
+            urls = [urljoin(root, self.mirror_path) for root in mirror_roots]
 
             # If this archive is normally fetched from a tarball URL,
             # then use the same digest.  `spack mirror` ensures that
@@ -259,10 +268,11 @@ def fetch(self):
             if isinstance(self.default_fetcher, fs.URLFetchStrategy):
                 digest = self.default_fetcher.digest
 
-            # Have to skip the checkesum for things archived from
+            # Have to skip the checksum for things archived from
             # repositories.  How can this be made safer?
             self.skip_checksum_for_mirror = not bool(digest)
 
+            # Add URL strategies for all the mirrors with the digest
             for url in urls:
                 fetchers.insert(0, fs.URLFetchStrategy(url, digest))
 
@@ -370,7 +380,7 @@ def destroy(self):
 
 def _get_mirrors():
     """Get mirrors from spack configuration."""
-    config = spack.config.get_mirror_config()
+    config = spack.config.get_config('mirrors')
     return [val for name, val in config.iteritems()]
 
 
diff --git a/lib/spack/spack/test/__init__.py b/lib/spack/spack/test/__init__.py
index 13cb1d2b789282aeb5461dcea7de9a9628c393c1..a569cbbf359ec533e6205f071c08e49b26753c33 100644
--- a/lib/spack/spack/test/__init__.py
+++ b/lib/spack/spack/test/__init__.py
@@ -24,7 +24,10 @@
 ##############################################################################
 import sys
 import unittest
+import nose
 
+from spack.test.tally_plugin import Tally
+from llnl.util.filesystem import join_path
 import llnl.util.tty as tty
 from llnl.util.tty.colify import colify
 
@@ -59,7 +62,9 @@
               'configure_guess',
               'unit_install',
               'lock',
-              'database']
+              'database',
+              'namespace_trie',
+              'yaml']
 
 
 def list_tests():
@@ -67,7 +72,7 @@ def list_tests():
     return test_names
 
 
-def run(names, verbose=False):
+def run(names, outputDir, verbose=False):
     """Run tests with the supplied names.  Names should be a list.  If
        it's empty, run ALL of Spack's tests."""
     verbosity = 1 if not verbose else 2
@@ -81,28 +86,31 @@ def run(names, verbose=False):
                           "Valid names are:")
                 colify(sorted(test_names), indent=4)
                 sys.exit(1)
-
-    runner = unittest.TextTestRunner(verbosity=verbosity)
-
-    testsRun = errors = failures = 0
+    
+    tally = Tally()
     for test in names:
         module = 'spack.test.' + test
         print module
-        suite = unittest.defaultTestLoader.loadTestsFromName(module)
-
+        
         tty.msg("Running test: %s" % test)
-        result = runner.run(suite)
-        testsRun += result.testsRun
-        errors   += len(result.errors)
-        failures += len(result.failures)
+        
+        runOpts = ["--with-%s" % spack.test.tally_plugin.Tally.name]
+        
+        if outputDir:
+            xmlOutputFname = "unittests-{0}.xml".format(test)
+            xmlOutputPath = join_path(outputDir, xmlOutputFname)
+            runOpts += ["--with-xunit", 
+                "--xunit-file={0}".format(xmlOutputPath)]
+        argv = [""] + runOpts + [module]
+        result = nose.run(argv=argv, addplugins=[tally])
 
-    succeeded = not errors and not failures
+    succeeded = not tally.failCount and not tally.errorCount
     tty.msg("Tests Complete.",
-            "%5d tests run" % testsRun,
-            "%5d failures" % failures,
-            "%5d errors" % errors)
+            "%5d tests run" % tally.numberOfTestsRun,
+            "%5d failures" % tally.failCount,
+            "%5d errors" % tally.errorCount)
 
-    if not errors and not failures:
+    if succeeded:
         tty.info("OK", format='g')
     else:
         tty.info("FAIL", format='r')
diff --git a/lib/spack/spack/test/cc.py b/lib/spack/spack/test/cc.py
index 4188b8d5505f50d2e35451afa48a37b67c4eee98..905af28a06d4394178f40be1cf3d757339825843 100644
--- a/lib/spack/spack/test/cc.py
+++ b/lib/spack/spack/test/cc.py
@@ -65,17 +65,17 @@ def setUp(self):
 
     def check_cc(self, command, args, expected):
         os.environ['SPACK_TEST_COMMAND'] = command
-        self.assertEqual(self.cc(*args, return_output=True).strip(), expected)
+        self.assertEqual(self.cc(*args, output=str).strip(), expected)
 
 
     def check_ld(self, command, args, expected):
         os.environ['SPACK_TEST_COMMAND'] = command
-        self.assertEqual(self.ld(*args, return_output=True).strip(), expected)
+        self.assertEqual(self.ld(*args, output=str).strip(), expected)
 
 
     def check_cpp(self, command, args, expected):
         os.environ['SPACK_TEST_COMMAND'] = command
-        self.assertEqual(self.cpp(*args, return_output=True).strip(), expected)
+        self.assertEqual(self.cpp(*args, output=str).strip(), expected)
 
 
     def test_vcheck_mode(self):
diff --git a/lib/spack/spack/test/concretize.py b/lib/spack/spack/test/concretize.py
index 2f8e0c7ec0e958d413338ba303edd320bfdb6459..7f2938aec5aa0495c9c35bec59868893fd31bea1 100644
--- a/lib/spack/spack/test/concretize.py
+++ b/lib/spack/spack/test/concretize.py
@@ -125,22 +125,22 @@ def test_concretize_with_provides_when(self):
            we ask for some advanced version.
         """
         self.assertTrue(not any(spec.satisfies('mpich2@:1.0')
-                                for spec in spack.db.providers_for('mpi@2.1')))
+                                for spec in spack.repo.providers_for('mpi@2.1')))
 
         self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
-                                for spec in spack.db.providers_for('mpi@2.2')))
+                                for spec in spack.repo.providers_for('mpi@2.2')))
 
         self.assertTrue(not any(spec.satisfies('mpich2@:1.1')
-                                for spec in spack.db.providers_for('mpi@2.2')))
+                                for spec in spack.repo.providers_for('mpi@2.2')))
 
         self.assertTrue(not any(spec.satisfies('mpich@:1')
-                                for spec in spack.db.providers_for('mpi@2')))
+                                for spec in spack.repo.providers_for('mpi@2')))
 
         self.assertTrue(not any(spec.satisfies('mpich@:1')
-                                for spec in spack.db.providers_for('mpi@3')))
+                                for spec in spack.repo.providers_for('mpi@3')))
 
         self.assertTrue(not any(spec.satisfies('mpich2')
-                                for spec in spack.db.providers_for('mpi@3')))
+                                for spec in spack.repo.providers_for('mpi@3')))
 
 
     def test_virtual_is_fully_expanded_for_callpath(self):
diff --git a/lib/spack/spack/test/config.py b/lib/spack/spack/test/config.py
index ed11e34c691487c5d7a7e0ee82a352f7713bc6ab..d8be5a855b8e9304e027fd3dd0915a0ea23316a6 100644
--- a/lib/spack/spack/test/config.py
+++ b/lib/spack/spack/test/config.py
@@ -26,49 +26,95 @@
 import shutil
 import os
 from tempfile import mkdtemp
+from ordereddict_backport import OrderedDict
 import spack
-from spack.packages import PackageDB
+import spack.config
 from spack.test.mock_packages_test import *
 
+# Some sample compiler config data
+a_comps =  {
+    "all": {
+        "gcc@4.7.3" : {
+            "cc" : "/gcc473",
+            "cxx": "/g++473",
+            "f77": None,
+            "fc" : None },
+        "gcc@4.5.0" : {
+            "cc" : "/gcc450",
+            "cxx": "/g++450",
+            "f77": "/gfortran",
+            "fc" : "/gfortran" },
+        "clang@3.3"  : {
+            "cc" : "<overwritten>",
+            "cxx": "<overwritten>",
+            "f77": "<overwritten>",
+            "fc" : "<overwritten>" }
+    }
+}
+
+b_comps = {
+    "all": {
+        "icc@10.0" : {
+            "cc" : "/icc100",
+            "cxx": "/icc100",
+            "f77": None,
+            "fc" : None },
+        "icc@11.1" : {
+            "cc" : "/icc111",
+            "cxx": "/icp111",
+            "f77": "/ifort",
+            "fc" : "/ifort" },
+        "clang@3.3" : {
+            "cc" : "/clang",
+            "cxx": "/clang++",
+            "f77": None,
+            "fc" : None}
+    }
+}
+
 class ConfigTest(MockPackagesTest):
 
     def setUp(self):
-        self.initmock()
+        super(ConfigTest, self).setUp()
         self.tmp_dir = mkdtemp('.tmp', 'spack-config-test-')
-        spack.config.config_scopes = [('test_low_priority', os.path.join(self.tmp_dir, 'low')),
-                                      ('test_high_priority', os.path.join(self.tmp_dir, 'high'))]
+        spack.config.config_scopes = OrderedDict()
+        spack.config.ConfigScope('test_low_priority', os.path.join(self.tmp_dir, 'low'))
+        spack.config.ConfigScope('test_high_priority', os.path.join(self.tmp_dir, 'high'))
 
     def tearDown(self):
-        self.cleanmock()
+        super(ConfigTest, self).tearDown()
         shutil.rmtree(self.tmp_dir, True)
 
-    def check_config(self, comps):
-        config = spack.config.get_compilers_config()
-        compiler_list = ['cc', 'cxx', 'f77', 'f90']
-        for key in comps:
+
+    def check_config(self, comps, *compiler_names):
+        """Check that named compilers in comps match Spack's config."""
+        config = spack.config.get_config('compilers')
+        compiler_list = ['cc', 'cxx', 'f77', 'fc']
+        for key in compiler_names:
             for c in compiler_list:
-                if comps[key][c] == '/bad':
-                    continue
-                self.assertEqual(comps[key][c], config[key][c])
+                expected = comps['all'][key][c]
+                actual = config['all'][key][c]
+                self.assertEqual(expected, actual)
 
 
-    def test_write_key(self):
-        a_comps =  {"gcc@4.7.3" : { "cc" : "/gcc473", "cxx" : "/g++473", "f77" : None, "f90" : None },
-         "gcc@4.5.0" : { "cc" : "/gcc450", "cxx" : "/g++450", "f77" : "/gfortran", "f90" : "/gfortran" },
-         "clang@3.3"  : { "cc" : "/bad", "cxx" : "/bad", "f77" : "/bad", "f90" : "/bad" }}
+    def test_write_key_in_memory(self):
+        # Write b_comps "on top of" a_comps.
+        spack.config.update_config('compilers', a_comps, 'test_low_priority')
+        spack.config.update_config('compilers', b_comps, 'test_high_priority')
 
-        b_comps = {"icc@10.0" : { "cc" : "/icc100", "cxx" : "/icc100", "f77" : None, "f90" : None },
-         "icc@11.1" : { "cc" : "/icc111", "cxx" : "/icp111", "f77" : "/ifort", "f90" : "/ifort" },
-         "clang@3.3" : { "cc" : "/clang", "cxx" : "/clang++", "f77" :  None, "f90" : None}}
+        # Make sure the config looks how we expect.
+        self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0')
+        self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3')
 
-        spack.config.add_to_compiler_config(a_comps, 'test_low_priority')
-        spack.config.add_to_compiler_config(b_comps, 'test_high_priority')
 
-        self.check_config(a_comps)
-        self.check_config(b_comps)
+    def test_write_key_to_disk(self):
+        # Write b_comps "on top of" a_comps.
+        spack.config.update_config('compilers', a_comps, 'test_low_priority')
+        spack.config.update_config('compilers', b_comps, 'test_high_priority')
 
+        # Clear caches so we're forced to read from disk.
         spack.config.clear_config_caches()
 
-        self.check_config(a_comps)
-        self.check_config(b_comps)
-
+        # Same check again, to ensure consistency.
+        self.check_config(a_comps, 'gcc@4.7.3', 'gcc@4.5.0')
+        self.check_config(b_comps, 'icc@10.0', 'icc@11.1', 'clang@3.3')
diff --git a/lib/spack/spack/test/database.py b/lib/spack/spack/test/database.py
index c07d32686e481c9b61decc42c8770091b30948b7..0205f4b8ce23027af134cb7939ab26aad68ea6dc 100644
--- a/lib/spack/spack/test/database.py
+++ b/lib/spack/spack/test/database.py
@@ -79,7 +79,8 @@ class DatabaseTest(MockPackagesTest):
 
     def _mock_install(self, spec):
         s = Spec(spec)
-        pkg = spack.db.get(s.concretized())
+        s.concretize()
+        pkg = spack.repo.get(s)
         pkg.do_install(fake=True)
 
 
diff --git a/lib/spack/spack/test/directory_layout.py b/lib/spack/spack/test/directory_layout.py
index 703ac1b867219486959b49a7f319c6741ec3dc5b..925cb648eddfead76aaab13e6539eaeb343b1fcc 100644
--- a/lib/spack/spack/test/directory_layout.py
+++ b/lib/spack/spack/test/directory_layout.py
@@ -34,23 +34,27 @@
 
 import spack
 from spack.spec import Spec
-from spack.packages import PackageDB
+from spack.repository import RepoPath
 from spack.directory_layout import YamlDirectoryLayout
+from spack.test.mock_packages_test import *
+
 
 # number of packages to test (to reduce test time)
 max_packages = 10
 
 
-class DirectoryLayoutTest(unittest.TestCase):
+class DirectoryLayoutTest(MockPackagesTest):
     """Tests that a directory layout works correctly and produces a
        consistent install path."""
 
     def setUp(self):
+        super(DirectoryLayoutTest, self).setUp()
         self.tmpdir = tempfile.mkdtemp()
         self.layout = YamlDirectoryLayout(self.tmpdir)
 
 
     def tearDown(self):
+        super(DirectoryLayoutTest, self).tearDown()
         shutil.rmtree(self.tmpdir, ignore_errors=True)
         self.layout = None
 
@@ -62,7 +66,7 @@ def test_read_and_write_spec(self):
            finally that the directory can be removed by the directory
            layout.
         """
-        packages = list(spack.db.all_packages())[:max_packages]
+        packages = list(spack.repo.all_packages())[:max_packages]
 
         for pkg in packages:
             spec = pkg.spec
@@ -123,17 +127,17 @@ def test_handle_unknown_package(self):
            information about installed packages' specs to uninstall
            or query them again if the package goes away.
         """
-        mock_db = PackageDB(spack.mock_packages_path)
+        mock_db = RepoPath(spack.mock_packages_path)
 
         not_in_mock = set.difference(
-            set(spack.db.all_package_names()),
+            set(spack.repo.all_package_names()),
             set(mock_db.all_package_names()))
         packages = list(not_in_mock)[:max_packages]
 
         # Create all the packages that are not in mock.
         installed_specs = {}
         for pkg_name in packages:
-            spec = spack.db.get(pkg_name).spec
+            spec = spack.repo.get(pkg_name).spec
 
             # If a spec fails to concretize, just skip it.  If it is a
             # real error, it will be caught by concretization tests.
@@ -145,8 +149,7 @@ def test_handle_unknown_package(self):
             self.layout.create_install_directory(spec)
             installed_specs[spec] = self.layout.path_for_spec(spec)
 
-        tmp = spack.db
-        spack.db = mock_db
+        spack.repo.swap(mock_db)
 
         # Now check that even without the package files, we know
         # enough to read a spec from the spec file.
@@ -161,12 +164,12 @@ def test_handle_unknown_package(self):
             self.assertTrue(spec.eq_dag(spec_from_file))
             self.assertEqual(spec.dag_hash(), spec_from_file.dag_hash())
 
-        spack.db = tmp
+        spack.repo.swap(mock_db)
 
 
     def test_find(self):
         """Test that finding specs within an install layout works."""
-        packages = list(spack.db.all_packages())[:max_packages]
+        packages = list(spack.repo.all_packages())[:max_packages]
 
         # Create install prefixes for all packages in the list
         installed_specs = {}
diff --git a/lib/spack/spack/test/git_fetch.py b/lib/spack/spack/test/git_fetch.py
index 244680b5d0599d6de7cfefeacb4a5237476d8b0f..d84433176a89b0725665bbea084d90213c502375 100644
--- a/lib/spack/spack/test/git_fetch.py
+++ b/lib/spack/spack/test/git_fetch.py
@@ -50,16 +50,13 @@ def setUp(self):
 
         spec = Spec('git-test')
         spec.concretize()
-        self.pkg = spack.db.get(spec, new=True)
+        self.pkg = spack.repo.get(spec, new=True)
 
 
     def tearDown(self):
         """Destroy the stage space used by this test."""
         super(GitFetchTest, self).tearDown()
-
-        if self.repo.stage is not None:
-            self.repo.stage.destroy()
-
+        self.repo.destroy()
         self.pkg.do_clean()
 
 
diff --git a/lib/spack/spack/test/hg_fetch.py b/lib/spack/spack/test/hg_fetch.py
index f8c6571bda918df44f9f04fb2e1d641afea48d9f..bbcb64e4c1a35fbab5fee19141bed3a5e2a4fe62 100644
--- a/lib/spack/spack/test/hg_fetch.py
+++ b/lib/spack/spack/test/hg_fetch.py
@@ -47,16 +47,13 @@ def setUp(self):
 
         spec = Spec('hg-test')
         spec.concretize()
-        self.pkg = spack.db.get(spec, new=True)
+        self.pkg = spack.repo.get(spec, new=True)
 
 
     def tearDown(self):
         """Destroy the stage space used by this test."""
         super(HgFetchTest, self).tearDown()
-
-        if self.repo.stage is not None:
-            self.repo.stage.destroy()
-
+        self.repo.destroy()
         self.pkg.do_clean()
 
 
diff --git a/lib/spack/spack/test/install.py b/lib/spack/spack/test/install.py
index 1ef4171fb2f082f169d1b140e1ad17e934d503e7..c09bd24c8e1ab47384586fdd034db4f82d0c54a0 100644
--- a/lib/spack/spack/test/install.py
+++ b/lib/spack/spack/test/install.py
@@ -59,9 +59,7 @@ def setUp(self):
 
     def tearDown(self):
         super(InstallTest, self).tearDown()
-
-        if self.repo.stage is not None:
-            self.repo.stage.destroy()
+        self.repo.destroy()
 
         # Turn checksumming back on
         spack.do_checksum = True
@@ -78,7 +76,7 @@ def test_install_and_uninstall(self):
         self.assertTrue(spec.concrete)
 
         # Get the package
-        pkg = spack.db.get(spec)
+        pkg = spack.repo.get(spec)
 
         # Fake the URL for the package so it downloads from a file.
         pkg.fetcher = URLFetchStrategy(self.repo.url)
diff --git a/lib/spack/spack/test/make_executable.py b/lib/spack/spack/test/make_executable.py
index 09efec858057780c56c45eacc38a6342a9b3bdd2..d568a28d443cc7cd5acdc05d662d8d43c1ef35ed 100644
--- a/lib/spack/spack/test/make_executable.py
+++ b/lib/spack/spack/test/make_executable.py
@@ -56,47 +56,47 @@ def tearDown(self):
 
     def test_make_normal(self):
         make = MakeExecutable('make', 8)
-        self.assertEqual(make(return_output=True).strip(), '-j8')
-        self.assertEqual(make('install', return_output=True).strip(), '-j8 install')
+        self.assertEqual(make(output=str).strip(), '-j8')
+        self.assertEqual(make('install', output=str).strip(), '-j8 install')
 
 
     def test_make_explicit(self):
         make = MakeExecutable('make', 8)
-        self.assertEqual(make(parallel=True, return_output=True).strip(), '-j8')
-        self.assertEqual(make('install', parallel=True, return_output=True).strip(), '-j8 install')
+        self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
+        self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install')
 
 
     def test_make_one_job(self):
         make = MakeExecutable('make', 1)
-        self.assertEqual(make(return_output=True).strip(), '')
-        self.assertEqual(make('install', return_output=True).strip(), 'install')
+        self.assertEqual(make(output=str).strip(), '')
+        self.assertEqual(make('install', output=str).strip(), 'install')
 
 
     def test_make_parallel_false(self):
         make = MakeExecutable('make', 8)
-        self.assertEqual(make(parallel=False, return_output=True).strip(), '')
-        self.assertEqual(make('install', parallel=False, return_output=True).strip(), 'install')
+        self.assertEqual(make(parallel=False, output=str).strip(), '')
+        self.assertEqual(make('install', parallel=False, output=str).strip(), 'install')
 
 
     def test_make_parallel_disabled(self):
         make = MakeExecutable('make', 8)
 
         os.environ['SPACK_NO_PARALLEL_MAKE'] = 'true'
-        self.assertEqual(make(return_output=True).strip(), '')
-        self.assertEqual(make('install', return_output=True).strip(), 'install')
+        self.assertEqual(make(output=str).strip(), '')
+        self.assertEqual(make('install', output=str).strip(), 'install')
 
         os.environ['SPACK_NO_PARALLEL_MAKE'] = '1'
-        self.assertEqual(make(return_output=True).strip(), '')
-        self.assertEqual(make('install', return_output=True).strip(), 'install')
+        self.assertEqual(make(output=str).strip(), '')
+        self.assertEqual(make('install', output=str).strip(), 'install')
 
         # These don't disable (false and random string)
         os.environ['SPACK_NO_PARALLEL_MAKE'] = 'false'
-        self.assertEqual(make(return_output=True).strip(), '-j8')
-        self.assertEqual(make('install', return_output=True).strip(), '-j8 install')
+        self.assertEqual(make(output=str).strip(), '-j8')
+        self.assertEqual(make('install', output=str).strip(), '-j8 install')
 
         os.environ['SPACK_NO_PARALLEL_MAKE'] = 'foobar'
-        self.assertEqual(make(return_output=True).strip(), '-j8')
-        self.assertEqual(make('install', return_output=True).strip(), '-j8 install')
+        self.assertEqual(make(output=str).strip(), '-j8')
+        self.assertEqual(make('install', output=str).strip(), '-j8 install')
 
         del os.environ['SPACK_NO_PARALLEL_MAKE']
 
@@ -106,20 +106,20 @@ def test_make_parallel_precedence(self):
 
         # These should work
         os.environ['SPACK_NO_PARALLEL_MAKE'] = 'true'
-        self.assertEqual(make(parallel=True, return_output=True).strip(), '')
-        self.assertEqual(make('install', parallel=True, return_output=True).strip(), 'install')
+        self.assertEqual(make(parallel=True, output=str).strip(), '')
+        self.assertEqual(make('install', parallel=True, output=str).strip(), 'install')
 
         os.environ['SPACK_NO_PARALLEL_MAKE'] = '1'
-        self.assertEqual(make(parallel=True, return_output=True).strip(), '')
-        self.assertEqual(make('install', parallel=True, return_output=True).strip(), 'install')
+        self.assertEqual(make(parallel=True, output=str).strip(), '')
+        self.assertEqual(make('install', parallel=True, output=str).strip(), 'install')
 
         # These don't disable (false and random string)
         os.environ['SPACK_NO_PARALLEL_MAKE'] = 'false'
-        self.assertEqual(make(parallel=True, return_output=True).strip(), '-j8')
-        self.assertEqual(make('install', parallel=True, return_output=True).strip(), '-j8 install')
+        self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
+        self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install')
 
         os.environ['SPACK_NO_PARALLEL_MAKE'] = 'foobar'
-        self.assertEqual(make(parallel=True, return_output=True).strip(), '-j8')
-        self.assertEqual(make('install', parallel=True, return_output=True).strip(), '-j8 install')
+        self.assertEqual(make(parallel=True, output=str).strip(), '-j8')
+        self.assertEqual(make('install', parallel=True, output=str).strip(), '-j8 install')
 
         del os.environ['SPACK_NO_PARALLEL_MAKE']
diff --git a/lib/spack/spack/test/mirror.py b/lib/spack/spack/test/mirror.py
index 189a85fb1a867d63d589d66dd0dcafdf9b8100d7..046ec56604cc4c44b0d9abd68fd15da3cd992ece 100644
--- a/lib/spack/spack/test/mirror.py
+++ b/lib/spack/spack/test/mirror.py
@@ -44,8 +44,16 @@ def setUp(self):
         self.repos = {}
 
 
+    def tearDown(self):
+        """Destroy all the stages created by the repos in setup."""
+        super(MirrorTest, self).tearDown()
+        for repo in self.repos.values():
+            repo.destroy()
+        self.repos.clear()
+
+
     def set_up_package(self, name, MockRepoClass, url_attr):
-        """Use this to set up a mock package to be mirrored.
+        """Set up a mock package to be mirrored.
            Each package needs us to:
              1. Set up a mock repo/archive to fetch from.
              2. Point the package's version args at that repo.
@@ -55,7 +63,7 @@ def set_up_package(self, name, MockRepoClass, url_attr):
         spec.concretize()
 
         # Get the package and fix its fetch args to point to a mock repo
-        pkg = spack.db.get(spec)
+        pkg = spack.repo.get(spec)
         repo = MockRepoClass()
         self.repos[name] = repo
 
@@ -65,21 +73,14 @@ def set_up_package(self, name, MockRepoClass, url_attr):
         pkg.versions[v][url_attr] = repo.url
 
 
-    def tearDown(self):
-        """Destroy all the stages created by the repos in setup."""
-        super(MirrorTest, self).tearDown()
-
-        for name, repo in self.repos.items():
-            if repo.stage:
-                pass #repo.stage.destroy()
-
-        self.repos.clear()
-
-
     def check_mirror(self):
         stage = Stage('spack-mirror-test')
         mirror_root = join_path(stage.path, 'test-mirror')
 
+        # register mirror with spack config
+        mirrors = { 'spack-mirror-test' : 'file://' + mirror_root }
+        spack.config.update_config('mirrors', mirrors)
+
         try:
             os.chdir(stage.path)
             spack.mirror.create(
@@ -88,7 +89,7 @@ def check_mirror(self):
             # Stage directory exists
             self.assertTrue(os.path.isdir(mirror_root))
 
-            # subdirs for each package
+            # check that there are subdirs for each package
             for name in self.repos:
                 subdir = join_path(mirror_root, name)
                 self.assertTrue(os.path.isdir(subdir))
@@ -96,40 +97,37 @@ def check_mirror(self):
                 files = os.listdir(subdir)
                 self.assertEqual(len(files), 1)
 
-                # Decompress archive in the mirror
-                archive = files[0]
-                archive_path = join_path(subdir, archive)
-                decomp = decompressor_for(archive_path)
-
-                with working_dir(subdir):
-                    decomp(archive_path)
+            # Now try to fetch each package.
+            for name, mock_repo in self.repos.items():
+                spec = Spec(name).concretized()
+                pkg = spec.package
 
-                    # Find the untarred archive directory.
-                    files = os.listdir(subdir)
-                    self.assertEqual(len(files), 2)
-                    self.assertTrue(archive in files)
-                    files.remove(archive)
-
-                    expanded_archive = join_path(subdir, files[0])
-                    self.assertTrue(os.path.isdir(expanded_archive))
+                saved_checksum_setting = spack.do_checksum
+                try:
+                    # Stage the archive from the mirror and cd to it.
+                    spack.do_checksum = False
+                    pkg.do_stage(mirror_only=True)
 
                     # Compare the original repo with the expanded archive
-                    repo = self.repos[name]
-                    if not 'svn' in name:
-                        original_path = repo.path
-                    else:
-                        co = 'checked_out'
-                        svn('checkout', repo.url, co)
-                        original_path = join_path(subdir, co)
+                    original_path = mock_repo.path
+                    if 'svn' in name:
+                        # have to check out the svn repo to compare.
+                        original_path = join_path(mock_repo.path, 'checked_out')
+                        svn('checkout', mock_repo.url, original_path)
 
-                    dcmp = dircmp(original_path, expanded_archive)
+                    dcmp = dircmp(original_path, pkg.stage.source_path)
 
                     # make sure there are no new files in the expanded tarball
                     self.assertFalse(dcmp.right_only)
+
+                    # and that all original files are present.
                     self.assertTrue(all(l in exclude for l in dcmp.left_only))
 
+                finally:
+                    spack.do_checksum = saved_checksum_setting
+                    pkg.do_clean()
         finally:
-            pass #stage.destroy()
+            stage.destroy()
 
 
     def test_git_mirror(self):
diff --git a/lib/spack/spack/test/mock_packages_test.py b/lib/spack/spack/test/mock_packages_test.py
index e4e1b21b53e2b6bbba32b183fad4cffac5e4f1ff..e9f1f95df550b59c31745bd4135094814bf23d31 100644
--- a/lib/spack/spack/test/mock_packages_test.py
+++ b/lib/spack/spack/test/mock_packages_test.py
@@ -22,43 +22,96 @@
 # along with this program; if not, write to the Free Software Foundation,
 # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 ##############################################################################
+import sys
+import os
+import shutil
 import unittest
+import tempfile
+from ordereddict_backport import OrderedDict
+
+from llnl.util.filesystem import mkdirp
 
 import spack
 import spack.config
-from spack.packages import PackageDB
+from spack.repository import RepoPath
 from spack.spec import Spec
 
-
-def set_pkg_dep(pkg, spec):
-    """Alters dependence information for a package.
-       Use this to mock up constraints.
-    """
-    spec = Spec(spec)
-    spack.db.get(pkg).dependencies[spec.name] = { Spec(pkg) : spec }
-
+mock_compiler_config = """\
+compilers:
+  all:
+    clang@3.3:
+      cc: /path/to/clang
+      cxx: /path/to/clang++
+      f77: None
+      fc: None
+    gcc@4.5.0:
+      cc: /path/to/gcc
+      cxx: /path/to/g++
+      f77: /path/to/gfortran
+      fc: /path/to/gfortran
+"""
 
 class MockPackagesTest(unittest.TestCase):
     def initmock(self):
         # Use the mock packages database for these tests.  This allows
         # us to set up contrived packages that don't interfere with
         # real ones.
-        self.real_db = spack.db
-        spack.db = PackageDB(spack.mock_packages_path)
+        self.db = RepoPath(spack.mock_packages_path)
+        spack.repo.swap(self.db)
 
         spack.config.clear_config_caches()
         self.real_scopes = spack.config.config_scopes
-        spack.config.config_scopes = [
-            ('site', spack.mock_site_config),
-            ('user', spack.mock_user_config)]
+
+        # Mock up temporary configuration directories
+        self.temp_config = tempfile.mkdtemp()
+        self.mock_site_config = os.path.join(self.temp_config, 'site')
+        self.mock_user_config = os.path.join(self.temp_config, 'user')
+        mkdirp(self.mock_site_config)
+        mkdirp(self.mock_user_config)
+        comp_yaml = os.path.join(self.mock_site_config, 'compilers.yaml')
+        with open(comp_yaml, 'w') as f:
+            f.write(mock_compiler_config)
+
+        # TODO: Mocking this up is kind of brittle b/c ConfigScope
+        # TODO: constructor modifies config_scopes.  Make it cleaner.
+        spack.config.config_scopes = OrderedDict()
+        spack.config.ConfigScope('site', self.mock_site_config)
+        spack.config.ConfigScope('user', self.mock_user_config)
+
+        # Store changes to the package's dependencies so we can
+        # restore later.
+        self.saved_deps = {}
+
+
+    def set_pkg_dep(self, pkg_name, spec):
+        """Alters dependence information for a package.
+
+        Adds a dependency on <spec> to pkg.
+        Use this to mock up constraints.
+        """
+        spec = Spec(spec)
+
+        # Save original dependencies before making any changes.
+        pkg = spack.repo.get(pkg_name)
+        if pkg_name not in self.saved_deps:
+            self.saved_deps[pkg_name] = (pkg, pkg.dependencies.copy())
+
+        # Change dep spec
+        pkg.dependencies[spec.name] = { Spec(pkg_name) : spec }
 
 
     def cleanmock(self):
         """Restore the real packages path after any test."""
-        spack.db = self.real_db
+        spack.repo.swap(self.db)
         spack.config.config_scopes = self.real_scopes
+        shutil.rmtree(self.temp_config, ignore_errors=True)
         spack.config.clear_config_caches()
 
+        # Restore dependency changes that happened during the test
+        for pkg_name, (pkg, deps) in self.saved_deps.items():
+            pkg.dependencies.clear()
+            pkg.dependencies.update(deps)
+
 
     def setUp(self):
         self.initmock()
@@ -66,5 +119,3 @@ def setUp(self):
 
     def tearDown(self):
         self.cleanmock()
-
-
diff --git a/lib/spack/spack/test/mock_repo.py b/lib/spack/spack/test/mock_repo.py
index c454b1f106c6e21cd75d839c96f6aca167e53d92..ed94023b0eb294ab3f9f80a3a307d81ccc07fc69 100644
--- a/lib/spack/spack/test/mock_repo.py
+++ b/lib/spack/spack/test/mock_repo.py
@@ -55,6 +55,12 @@ def __init__(self, stage_name, repo_name):
         mkdirp(self.path)
 
 
+    def destroy(self):
+        """Destroy resources associated with this mock repo."""
+        if self.stage:
+            self.stage.destroy()
+
+
 class MockArchive(MockRepo):
     """Creates a very simple archive directory with a configure script and a
        makefile that installs to a prefix.  Tars it up into an archive."""
@@ -141,7 +147,7 @@ def __init__(self):
             self.url = self.path
 
     def rev_hash(self, rev):
-        return git('rev-parse', rev, return_output=True).strip()
+        return git('rev-parse', rev, output=str).strip()
 
 
 class MockSvnRepo(MockVCSRepo):
@@ -193,4 +199,4 @@ def __init__(self):
 
     def get_rev(self):
         """Get current mercurial revision."""
-        return hg('id', '-i', return_output=True).strip()
+        return hg('id', '-i', output=str).strip()
diff --git a/lib/spack/spack/test/multimethod.py b/lib/spack/spack/test/multimethod.py
index d8d61d14c8cdf0f4b2e36f7d29617560c98110f4..7bf4ff0a0a5f69bb30012f556cf3d5995617b9ff 100644
--- a/lib/spack/spack/test/multimethod.py
+++ b/lib/spack/spack/test/multimethod.py
@@ -38,92 +38,92 @@
 class MultiMethodTest(MockPackagesTest):
 
     def test_no_version_match(self):
-        pkg = spack.db.get('multimethod@2.0')
+        pkg = spack.repo.get('multimethod@2.0')
         self.assertRaises(NoSuchMethodError, pkg.no_version_2)
 
 
     def test_one_version_match(self):
-        pkg = spack.db.get('multimethod@1.0')
+        pkg = spack.repo.get('multimethod@1.0')
         self.assertEqual(pkg.no_version_2(), 1)
 
-        pkg = spack.db.get('multimethod@3.0')
+        pkg = spack.repo.get('multimethod@3.0')
         self.assertEqual(pkg.no_version_2(), 3)
 
-        pkg = spack.db.get('multimethod@4.0')
+        pkg = spack.repo.get('multimethod@4.0')
         self.assertEqual(pkg.no_version_2(), 4)
 
 
     def test_version_overlap(self):
-        pkg = spack.db.get('multimethod@2.0')
+        pkg = spack.repo.get('multimethod@2.0')
         self.assertEqual(pkg.version_overlap(), 1)
 
-        pkg = spack.db.get('multimethod@5.0')
+        pkg = spack.repo.get('multimethod@5.0')
         self.assertEqual(pkg.version_overlap(), 2)
 
 
     def test_mpi_version(self):
-        pkg = spack.db.get('multimethod^mpich@3.0.4')
+        pkg = spack.repo.get('multimethod^mpich@3.0.4')
         self.assertEqual(pkg.mpi_version(), 3)
 
-        pkg = spack.db.get('multimethod^mpich2@1.2')
+        pkg = spack.repo.get('multimethod^mpich2@1.2')
         self.assertEqual(pkg.mpi_version(), 2)
 
-        pkg = spack.db.get('multimethod^mpich@1.0')
+        pkg = spack.repo.get('multimethod^mpich@1.0')
         self.assertEqual(pkg.mpi_version(), 1)
 
 
     def test_undefined_mpi_version(self):
-        pkg = spack.db.get('multimethod^mpich@0.4')
+        pkg = spack.repo.get('multimethod^mpich@0.4')
         self.assertEqual(pkg.mpi_version(), 1)
 
-        pkg = spack.db.get('multimethod^mpich@1.4')
+        pkg = spack.repo.get('multimethod^mpich@1.4')
         self.assertEqual(pkg.mpi_version(), 1)
 
 
     def test_default_works(self):
-        pkg = spack.db.get('multimethod%gcc')
+        pkg = spack.repo.get('multimethod%gcc')
         self.assertEqual(pkg.has_a_default(), 'gcc')
 
-        pkg = spack.db.get('multimethod%intel')
+        pkg = spack.repo.get('multimethod%intel')
         self.assertEqual(pkg.has_a_default(), 'intel')
 
-        pkg = spack.db.get('multimethod%pgi')
+        pkg = spack.repo.get('multimethod%pgi')
         self.assertEqual(pkg.has_a_default(), 'default')
 
 
     def test_architecture_match(self):
-        pkg = spack.db.get('multimethod=x86_64')
+        pkg = spack.repo.get('multimethod=x86_64')
         self.assertEqual(pkg.different_by_architecture(), 'x86_64')
 
-        pkg = spack.db.get('multimethod=ppc64')
+        pkg = spack.repo.get('multimethod=ppc64')
         self.assertEqual(pkg.different_by_architecture(), 'ppc64')
 
-        pkg = spack.db.get('multimethod=ppc32')
+        pkg = spack.repo.get('multimethod=ppc32')
         self.assertEqual(pkg.different_by_architecture(), 'ppc32')
 
-        pkg = spack.db.get('multimethod=arm64')
+        pkg = spack.repo.get('multimethod=arm64')
         self.assertEqual(pkg.different_by_architecture(), 'arm64')
 
-        pkg = spack.db.get('multimethod=macos')
+        pkg = spack.repo.get('multimethod=macos')
         self.assertRaises(NoSuchMethodError, pkg.different_by_architecture)
 
 
     def test_dependency_match(self):
-        pkg = spack.db.get('multimethod^zmpi')
+        pkg = spack.repo.get('multimethod^zmpi')
         self.assertEqual(pkg.different_by_dep(), 'zmpi')
 
-        pkg = spack.db.get('multimethod^mpich')
+        pkg = spack.repo.get('multimethod^mpich')
         self.assertEqual(pkg.different_by_dep(), 'mpich')
 
         # If we try to switch on some entirely different dep, it's ambiguous,
         # but should take the first option
-        pkg = spack.db.get('multimethod^foobar')
+        pkg = spack.repo.get('multimethod^foobar')
         self.assertEqual(pkg.different_by_dep(), 'mpich')
 
 
     def test_virtual_dep_match(self):
-        pkg = spack.db.get('multimethod^mpich2')
+        pkg = spack.repo.get('multimethod^mpich2')
         self.assertEqual(pkg.different_by_virtual_dep(), 2)
 
-        pkg = spack.db.get('multimethod^mpich@1.0')
+        pkg = spack.repo.get('multimethod^mpich@1.0')
         self.assertEqual(pkg.different_by_virtual_dep(), 1)
diff --git a/lib/spack/spack/test/namespace_trie.py b/lib/spack/spack/test/namespace_trie.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0d809004d536b0b69d64afb0b78ffa796779c51
--- /dev/null
+++ b/lib/spack/spack/test/namespace_trie.py
@@ -0,0 +1,114 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+import unittest
+from spack.util.naming import NamespaceTrie
+
+
+class NamespaceTrieTest(unittest.TestCase):
+
+    def setUp(self):
+        self.trie = NamespaceTrie()
+
+
+    def test_add_single(self):
+        self.trie['foo'] = 'bar'
+
+        self.assertTrue(self.trie.is_prefix('foo'))
+        self.assertTrue(self.trie.has_value('foo'))
+        self.assertEqual(self.trie['foo'], 'bar')
+
+
+    def test_add_multiple(self):
+        self.trie['foo.bar'] = 'baz'
+
+        self.assertFalse(self.trie.has_value('foo'))
+        self.assertTrue(self.trie.is_prefix('foo'))
+
+        self.assertTrue(self.trie.is_prefix('foo.bar'))
+        self.assertTrue(self.trie.has_value('foo.bar'))
+        self.assertEqual(self.trie['foo.bar'], 'baz')
+
+        self.assertFalse(self.trie.is_prefix('foo.bar.baz'))
+        self.assertFalse(self.trie.has_value('foo.bar.baz'))
+
+
+    def test_add_three(self):
+        # add a three-level namespace
+        self.trie['foo.bar.baz'] = 'quux'
+
+        self.assertTrue(self.trie.is_prefix('foo'))
+        self.assertFalse(self.trie.has_value('foo'))
+
+        self.assertTrue(self.trie.is_prefix('foo.bar'))
+        self.assertFalse(self.trie.has_value('foo.bar'))
+
+        self.assertTrue(self.trie.is_prefix('foo.bar.baz'))
+        self.assertTrue(self.trie.has_value('foo.bar.baz'))
+        self.assertEqual(self.trie['foo.bar.baz'], 'quux')
+
+        self.assertFalse(self.trie.is_prefix('foo.bar.baz.quux'))
+        self.assertFalse(self.trie.has_value('foo.bar.baz.quux'))
+
+        # Try to add a second element in a prefix namespace
+        self.trie['foo.bar'] = 'blah'
+
+        self.assertTrue(self.trie.is_prefix('foo'))
+        self.assertFalse(self.trie.has_value('foo'))
+
+        self.assertTrue(self.trie.is_prefix('foo.bar'))
+        self.assertTrue(self.trie.has_value('foo.bar'))
+        self.assertEqual(self.trie['foo.bar'], 'blah')
+
+        self.assertTrue(self.trie.is_prefix('foo.bar.baz'))
+        self.assertTrue(self.trie.has_value('foo.bar.baz'))
+        self.assertEqual(self.trie['foo.bar.baz'], 'quux')
+
+        self.assertFalse(self.trie.is_prefix('foo.bar.baz.quux'))
+        self.assertFalse(self.trie.has_value('foo.bar.baz.quux'))
+
+
+    def test_add_none_single(self):
+        self.trie['foo'] = None
+        self.assertTrue(self.trie.is_prefix('foo'))
+        self.assertTrue(self.trie.has_value('foo'))
+        self.assertEqual(self.trie['foo'], None)
+
+        self.assertFalse(self.trie.is_prefix('foo.bar'))
+        self.assertFalse(self.trie.has_value('foo.bar'))
+
+
+
+    def test_add_none_multiple(self):
+        self.trie['foo.bar'] = None
+
+        self.assertTrue(self.trie.is_prefix('foo'))
+        self.assertFalse(self.trie.has_value('foo'))
+
+        self.assertTrue(self.trie.is_prefix('foo.bar'))
+        self.assertTrue(self.trie.has_value('foo.bar'))
+        self.assertEqual(self.trie['foo.bar'], None)
+
+        self.assertFalse(self.trie.is_prefix('foo.bar.baz'))
+        self.assertFalse(self.trie.has_value('foo.bar.baz'))
diff --git a/lib/spack/spack/test/package_sanity.py b/lib/spack/spack/test/package_sanity.py
index 370cf676ef3ce61c38648dd8346754cef5d82307..ee09040d0d892354de0e3d7fa67f7c14f81d54c3 100644
--- a/lib/spack/spack/test/package_sanity.py
+++ b/lib/spack/spack/test/package_sanity.py
@@ -28,16 +28,15 @@
 import unittest
 
 import spack
-import spack.url as url
-from spack.packages import PackageDB
+from spack.repository import RepoPath
 
 
 class PackageSanityTest(unittest.TestCase):
 
     def check_db(self):
         """Get all packages in a DB to make sure they work."""
-        for name in spack.db.all_package_names():
-            spack.db.get(name)
+        for name in spack.repo.all_package_names():
+            spack.repo.get(name)
 
 
     def test_get_all_packages(self):
@@ -47,15 +46,15 @@ def test_get_all_packages(self):
 
     def test_get_all_mock_packages(self):
         """Get the mock packages once each too."""
-        tmp = spack.db
-        spack.db = PackageDB(spack.mock_packages_path)
+        db = RepoPath(spack.mock_packages_path)
+        spack.repo.swap(db)
         self.check_db()
-        spack.db = tmp
+        spack.repo.swap(db)
 
 
     def test_url_versions(self):
         """Check URLs for regular packages, if they are explicitly defined."""
-        for pkg in spack.db.all_packages():
+        for pkg in spack.repo.all_packages():
             for v, vattrs in pkg.versions.items():
                 if 'url' in vattrs:
                     # If there is a url for the version check it.
diff --git a/lib/spack/spack/test/packages.py b/lib/spack/spack/test/packages.py
index b2daea7b7b235f4d99b052b3ed99d17c148750d0..83984dc5f657238c9c7b44701da1289f92038dc3 100644
--- a/lib/spack/spack/test/packages.py
+++ b/lib/spack/spack/test/packages.py
@@ -27,7 +27,7 @@
 from llnl.util.filesystem import join_path
 
 import spack
-import spack.packages as packages
+from spack.repository import Repo
 from spack.util.naming import mod_to_class
 from spack.test.mock_packages_test import *
 
@@ -35,27 +35,32 @@
 class PackagesTest(MockPackagesTest):
 
     def test_load_package(self):
-        pkg = spack.db.get('mpich')
+        pkg = spack.repo.get('mpich')
 
 
     def test_package_name(self):
-        pkg = spack.db.get('mpich')
+        pkg = spack.repo.get('mpich')
         self.assertEqual(pkg.name, 'mpich')
 
 
     def test_package_filename(self):
-        filename = spack.db.filename_for_package_name('mpich')
-        self.assertEqual(filename, join_path(spack.mock_packages_path, 'mpich', 'package.py'))
+        repo = Repo(spack.mock_packages_path)
+        filename = repo.filename_for_package_name('mpich')
+        self.assertEqual(filename,
+                         join_path(spack.mock_packages_path, 'packages', 'mpich', 'package.py'))
 
 
     def test_package_name(self):
-        pkg = spack.db.get('mpich')
+        pkg = spack.repo.get('mpich')
         self.assertEqual(pkg.name, 'mpich')
 
 
     def test_nonexisting_package_filename(self):
-        filename = spack.db.filename_for_package_name('some-nonexisting-package')
-        self.assertEqual(filename, join_path(spack.mock_packages_path, 'some-nonexisting-package', 'package.py'))
+        repo = Repo(spack.mock_packages_path)
+        filename = repo.filename_for_package_name('some-nonexisting-package')
+        self.assertEqual(
+            filename,
+            join_path(spack.mock_packages_path, 'packages', 'some-nonexisting-package', 'package.py'))
 
 
     def test_package_class_names(self):
@@ -64,3 +69,38 @@ def test_package_class_names(self):
         self.assertEqual('PmgrCollective', mod_to_class('pmgr-collective'))
         self.assertEqual('Pmgrcollective', mod_to_class('PmgrCollective'))
         self.assertEqual('_3db',        mod_to_class('3db'))
+
+
+    #
+    # Below tests target direct imports of spack packages from the
+    # spack.pkg namespace
+    #
+
+    def test_import_package(self):
+        import spack.pkg.builtin.mock.mpich
+
+
+    def test_import_package_as(self):
+        import spack.pkg.builtin.mock.mpich as mp
+
+
+    def test_import_class_from_package(self):
+        from spack.pkg.builtin.mock.mpich import Mpich
+
+
+    def test_import_module_from_package(self):
+        from spack.pkg.builtin.mock import mpich
+
+
+    def test_import_namespace_container_modules(self):
+        import spack.pkg
+        import spack.pkg as p
+        from spack import pkg
+
+        import spack.pkg.builtin
+        import spack.pkg.builtin as b
+        from spack.pkg import builtin
+
+        import spack.pkg.builtin.mock
+        import spack.pkg.builtin.mock as m
+        from spack.pkg.builtin import mock
diff --git a/lib/spack/spack/test/python_version.py b/lib/spack/spack/test/python_version.py
index 2ea5febb11d210ff16d44d635dcd64b82e4aebbe..d74d3b9b7d7e3a46977ef3377a8b700825b3ed9e 100644
--- a/lib/spack/spack/test/python_version.py
+++ b/lib/spack/spack/test/python_version.py
@@ -54,8 +54,8 @@ def pyfiles(self, *search_paths):
 
 
     def package_py_files(self):
-        for name in spack.db.all_package_names():
-            yield spack.db.filename_for_package_name(name)
+        for name in spack.repo.all_package_names():
+            yield spack.repo.filename_for_package_name(name)
 
 
     def check_python_versions(self, *files):
diff --git a/lib/spack/spack/test/spec_dag.py b/lib/spack/spack/test/spec_dag.py
index d3a4d77b32db8316a3b0453da56e646c2065b130..632f777cdeb7d0d1520d289c71a25f247e8e8420 100644
--- a/lib/spack/spack/test/spec_dag.py
+++ b/lib/spack/spack/test/spec_dag.py
@@ -40,8 +40,8 @@
 class SpecDagTest(MockPackagesTest):
 
     def test_conflicting_package_constraints(self):
-        set_pkg_dep('mpileaks', 'mpich@1.0')
-        set_pkg_dep('callpath', 'mpich@2.0')
+        self.set_pkg_dep('mpileaks', 'mpich@1.0')
+        self.set_pkg_dep('callpath', 'mpich@2.0')
 
         spec = Spec('mpileaks ^mpich ^callpath ^dyninst ^libelf ^libdwarf')
 
@@ -223,25 +223,25 @@ def test_dependents_and_dependencies_are_correct(self):
 
 
     def test_unsatisfiable_version(self):
-        set_pkg_dep('mpileaks', 'mpich@1.0')
+        self.set_pkg_dep('mpileaks', 'mpich@1.0')
         spec = Spec('mpileaks ^mpich@2.0 ^callpath ^dyninst ^libelf ^libdwarf')
         self.assertRaises(spack.spec.UnsatisfiableVersionSpecError, spec.normalize)
 
 
     def test_unsatisfiable_compiler(self):
-        set_pkg_dep('mpileaks', 'mpich%gcc')
+        self.set_pkg_dep('mpileaks', 'mpich%gcc')
         spec = Spec('mpileaks ^mpich%intel ^callpath ^dyninst ^libelf ^libdwarf')
         self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, spec.normalize)
 
 
     def test_unsatisfiable_compiler_version(self):
-        set_pkg_dep('mpileaks', 'mpich%gcc@4.6')
+        self.set_pkg_dep('mpileaks', 'mpich%gcc@4.6')
         spec = Spec('mpileaks ^mpich%gcc@4.5 ^callpath ^dyninst ^libelf ^libdwarf')
         self.assertRaises(spack.spec.UnsatisfiableCompilerSpecError, spec.normalize)
 
 
     def test_unsatisfiable_architecture(self):
-        set_pkg_dep('mpileaks', 'mpich=bgqos_0')
+        self.set_pkg_dep('mpileaks', 'mpich=bgqos_0')
         spec = Spec('mpileaks ^mpich=sles_10_ppc64 ^callpath ^dyninst ^libelf ^libdwarf')
         self.assertRaises(spack.spec.UnsatisfiableArchitectureSpecError, spec.normalize)
 
diff --git a/lib/spack/spack/test/spec_semantics.py b/lib/spack/spack/test/spec_semantics.py
index 1381556aad5c25ac977cc422af22cc81b3e18d3e..44a09cbd7fb32507e9557084944282d44fabe55a 100644
--- a/lib/spack/spack/test/spec_semantics.py
+++ b/lib/spack/spack/test/spec_semantics.py
@@ -35,7 +35,10 @@ class SpecSematicsTest(MockPackagesTest):
     # ================================================================================
     def check_satisfies(self, spec, anon_spec, concrete=False):
         left = Spec(spec, concrete=concrete)
-        right = parse_anonymous_spec(anon_spec, left.name)
+        try:
+            right = Spec(anon_spec)  # if it's not anonymous, allow it.
+        except:
+            right = parse_anonymous_spec(anon_spec, left.name)
 
         # Satisfies is one-directional.
         self.assertTrue(left.satisfies(right))
@@ -48,7 +51,10 @@ def check_satisfies(self, spec, anon_spec, concrete=False):
 
     def check_unsatisfiable(self, spec, anon_spec, concrete=False):
         left = Spec(spec, concrete=concrete)
-        right = parse_anonymous_spec(anon_spec, left.name)
+        try:
+            right = Spec(anon_spec)  # if it's not anonymous, allow it.
+        except:
+            right = parse_anonymous_spec(anon_spec, left.name)
 
         self.assertFalse(left.satisfies(right))
         self.assertFalse(left.satisfies(anon_spec))
@@ -88,6 +94,28 @@ def test_satisfies(self):
         self.check_satisfies('libdwarf^libelf@0.8.13', '^libelf@0:1')
 
 
+    def test_satisfies_namespace(self):
+        self.check_satisfies('builtin.mpich', 'mpich')
+        self.check_satisfies('builtin.mock.mpich', 'mpich')
+
+        # TODO: only works for deps now, but shouldn't we allow this for root spec?
+        # self.check_satisfies('builtin.mock.mpich', 'mpi')
+
+        self.check_satisfies('builtin.mock.mpich', 'builtin.mock.mpich')
+
+        self.check_unsatisfiable('builtin.mock.mpich', 'builtin.mpich')
+
+
+    def test_satisfies_namespaced_dep(self):
+        """Ensure spec from same or unspecified namespace satisfies namespace constraint."""
+        self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpich')
+
+        self.check_satisfies('mpileaks ^builtin.mock.mpich', '^mpi')
+        self.check_satisfies('mpileaks ^builtin.mock.mpich', '^builtin.mock.mpich')
+
+        self.check_unsatisfiable('mpileaks ^builtin.mock.mpich', '^builtin.mpich')
+
+
     def test_satisfies_compiler(self):
         self.check_satisfies('foo%gcc', '%gcc')
         self.check_satisfies('foo%intel', '%intel')
diff --git a/lib/spack/spack/test/svn_fetch.py b/lib/spack/spack/test/svn_fetch.py
index 9229af76d49279292867cfdf89de78e1654963f8..454a7f1d1ffefabc50c0fede7349b5de438e1ea8 100644
--- a/lib/spack/spack/test/svn_fetch.py
+++ b/lib/spack/spack/test/svn_fetch.py
@@ -49,23 +49,20 @@ def setUp(self):
 
         spec = Spec('svn-test')
         spec.concretize()
-        self.pkg = spack.db.get(spec, new=True)
+        self.pkg = spack.repo.get(spec, new=True)
 
 
     def tearDown(self):
         """Destroy the stage space used by this test."""
         super(SvnFetchTest, self).tearDown()
-
-        if self.repo.stage is not None:
-            self.repo.stage.destroy()
-
+        self.repo.destroy()
         self.pkg.do_clean()
 
 
     def assert_rev(self, rev):
         """Check that the current revision is equal to the supplied rev."""
         def get_rev():
-            output = svn('info', return_output=True)
+            output = svn('info', output=str)
             self.assertTrue("Revision" in output)
             for line in output.split('\n'):
                 match = re.match(r'Revision: (\d+)', line)
diff --git a/lib/spack/spack/test/tally_plugin.py b/lib/spack/spack/test/tally_plugin.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ca898c47ce0f01686aecbcdfaa82453918df1e9
--- /dev/null
+++ b/lib/spack/spack/test/tally_plugin.py
@@ -0,0 +1,59 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://scalability-llnl.github.io/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from nose.plugins import Plugin
+
+import os
+
+class Tally(Plugin):
+    name = 'tally'
+
+    def __init__(self):
+        super(Tally, self).__init__()
+        self.successCount = 0
+        self.failCount = 0
+        self.errorCount = 0
+    
+    @property
+    def numberOfTestsRun(self):
+        """Excludes skipped tests"""
+        return self.errorCount + self.failCount + self.successCount
+
+    def options(self, parser, env=os.environ):
+        super(Tally, self).options(parser, env=env)
+
+    def configure(self, options, conf):
+        super(Tally, self).configure(options, conf)
+
+    def addSuccess(self, test):
+        self.successCount += 1
+        
+    def addError(self, test, err):
+        self.errorCount += 1
+            
+    def addFailure(self, test, err):
+        self.failCount += 1
+
+    def finalize(self, result):
+        pass
diff --git a/lib/spack/spack/test/unit_install.py b/lib/spack/spack/test/unit_install.py
index 41c76a6dfae8c8bad91202b55368aa50d85f2346..ccc409dd6023ea59afa4733524d136e22785fef5 100644
--- a/lib/spack/spack/test/unit_install.py
+++ b/lib/spack/spack/test/unit_install.py
@@ -26,16 +26,16 @@
 import itertools
 
 import spack
-test_install = __import__("spack.cmd.test-install", 
+test_install = __import__("spack.cmd.test-install",
     fromlist=["BuildId", "create_test_output", "TestResult"])
 
 class MockOutput(object):
     def __init__(self):
         self.results = {}
-    
+
     def add_test(self, buildId, passed=True, buildInfo=None):
         self.results[buildId] = passed
-    
+
     def write_to(self, stream):
         pass
 
@@ -45,14 +45,14 @@ def __init__(self, name, version, hashStr=None):
         self.name = name
         self.version = version
         self.hash = hashStr if hashStr else hash((name, version))
-    
+
     def traverse(self, order=None):
-        allDeps = itertools.chain.from_iterable(i.traverse() for i in 
+        allDeps = itertools.chain.from_iterable(i.traverse() for i in
             self.dependencies.itervalues())
         return set(itertools.chain([self], allDeps))
-    
+
     def dag_hash(self):
-        return self.hash 
+        return self.hash
 
     def to_yaml(self):
         return "<<<MOCK YAML {0}>>>".format(test_install.BuildId(self).stringId())
@@ -75,47 +75,51 @@ class UnitInstallTest(unittest.TestCase):
 
     def setUp(self):
         super(UnitInstallTest, self).setUp()
-        
+
         pkgX.installed = False
         pkgY.installed = False
 
+        self.saved_db = spack.repo
         pkgDb = MockPackageDb({specX:pkgX, specY:pkgY})
-        spack.db = pkgDb
+        spack.repo = pkgDb
+
 
     def tearDown(self):
         super(UnitInstallTest, self).tearDown()
-        
+
+        spack.repo = self.saved_db
+
     def test_installing_both(self):
         mo = MockOutput()
-        
+
         pkgX.installed = True
         pkgY.installed = True
-        test_install.create_test_output(specX, [specX, specY], mo, getLogFunc=test_fetch_log)
-        
-        self.assertEqual(mo.results, 
-            {bIdX:test_install.TestResult.PASSED, 
-            bIdY:test_install.TestResult.PASSED})
+        test_install.create_test_output(specX, [specX, specY], mo, getLogFunc=mock_fetch_log)
+
+        self.assertEqual(mo.results,
+            {bIdX:test_install.TestResult.PASSED,
+             bIdY:test_install.TestResult.PASSED})
+
 
     def test_dependency_already_installed(self):
         mo = MockOutput()
-        
+
         pkgX.installed = True
         pkgY.installed = True
-        test_install.create_test_output(specX, [specX], mo, getLogFunc=test_fetch_log)
-        
+        test_install.create_test_output(specX, [specX], mo, getLogFunc=mock_fetch_log)
         self.assertEqual(mo.results, {bIdX:test_install.TestResult.PASSED})
 
     #TODO: add test(s) where Y fails to install
 
+
 class MockPackageDb(object):
     def __init__(self, init=None):
         self.specToPkg = {}
         if init:
             self.specToPkg.update(init)
-        
+
     def get(self, spec):
         return self.specToPkg[spec]
 
-def test_fetch_log(path):
+def mock_fetch_log(path):
     return []
-
diff --git a/lib/spack/spack/test/url_substitution.py b/lib/spack/spack/test/url_substitution.py
index 8b90ee086ae5a43ced282ade86959247cb3d7d95..aec8baf4eaaaaae078dcf62dd6e86425e9d6b5a8 100644
--- a/lib/spack/spack/test/url_substitution.py
+++ b/lib/spack/spack/test/url_substitution.py
@@ -29,7 +29,6 @@
 
 import spack
 import spack.url as url
-from spack.packages import PackageDB
 
 
 class PackageSanityTest(unittest.TestCase):
diff --git a/lib/spack/spack/test/yaml.py b/lib/spack/spack/test/yaml.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a357b8e69697a2785225734e572f0030b25060e
--- /dev/null
+++ b/lib/spack/spack/test/yaml.py
@@ -0,0 +1,93 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""
+Test Spack's custom YAML format.
+"""
+import unittest
+import spack.util.spack_yaml as syaml
+
+test_file = """\
+config_file:
+  x86_64:
+    foo: /path/to/foo
+    bar: /path/to/bar
+    baz: /path/to/baz
+  some_list:
+    - item 1
+    - item 2
+    - item 3
+  another_list:
+    [ 1, 2, 3 ]
+  some_key: some_string
+"""
+
+test_data = {
+    'config_file' : syaml.syaml_dict([
+        ('x86_64', syaml.syaml_dict([
+            ('foo', '/path/to/foo'),
+            ('bar', '/path/to/bar'),
+            ('baz', '/path/to/baz' )])),
+        ('some_list', [ 'item 1', 'item 2', 'item 3' ]),
+        ('another_list', [ 1, 2, 3 ]),
+        ('some_key', 'some_string')
+    ])}
+
+class YamlTest(unittest.TestCase):
+
+    def setUp(self):
+        self.data = syaml.load(test_file)
+
+
+    def test_parse(self):
+        self.assertEqual(test_data, self.data)
+
+
+    def test_dict_order(self):
+        self.assertEqual(
+            ['x86_64', 'some_list', 'another_list', 'some_key'],
+            self.data['config_file'].keys())
+
+        self.assertEqual(
+            ['foo', 'bar', 'baz'],
+            self.data['config_file']['x86_64'].keys())
+
+
+    def test_line_numbers(self):
+        def check(obj, start_line, end_line):
+            self.assertEqual(obj._start_mark.line, start_line)
+            self.assertEqual(obj._end_mark.line, end_line)
+
+        check(self.data,                                  0, 12)
+        check(self.data['config_file'],                   1, 12)
+        check(self.data['config_file']['x86_64'],         2,  5)
+        check(self.data['config_file']['x86_64']['foo'],  2,  2)
+        check(self.data['config_file']['x86_64']['bar'],  3,  3)
+        check(self.data['config_file']['x86_64']['baz'],  4,  4)
+        check(self.data['config_file']['some_list'],      6,  9)
+        check(self.data['config_file']['some_list'][0],   6,  6)
+        check(self.data['config_file']['some_list'][1],   7,  7)
+        check(self.data['config_file']['some_list'][2],   8,  8)
+        check(self.data['config_file']['another_list'],  10, 10)
+        check(self.data['config_file']['some_key'],      11, 11)
diff --git a/lib/spack/spack/util/executable.py b/lib/spack/spack/util/executable.py
index 4f9958062b15523892b03bfdfbf8225165a2c511..fc27b789d062ea8ff2da153fcb6af6d2a018c8c7 100644
--- a/lib/spack/spack/util/executable.py
+++ b/lib/spack/spack/util/executable.py
@@ -55,24 +55,80 @@ def command(self):
 
 
     def __call__(self, *args, **kwargs):
-        """Run the executable with subprocess.check_output, return output."""
-        return_output = kwargs.get("return_output", False)
-        fail_on_error = kwargs.get("fail_on_error", True)
-        ignore_errors = kwargs.get("ignore_errors", ())
+        """Run this executable in a subprocess.
 
-        output        = kwargs.get("output", sys.stdout)
-        error         = kwargs.get("error", sys.stderr)
-        input         = kwargs.get("input", None)
+        Arguments
+          args
+            command line arguments to the executable to run.
+
+        Optional arguments
+
+          fail_on_error
+
+            Raise an exception if the subprocess returns an
+            error. Default is True.  When not set, the return code is
+            avaiale as `exe.returncode`.
+
+          ignore_errors
+
+            An optional list/tuple of error codes that can be
+            *ignored*.  i.e., if these codes are returned, this will
+            not raise an exception when `fail_on_error` is `True`.
+
+          output, error
+
+            These arguments allow you to specify new stdout and stderr
+            values.  They default to `None`, which means the
+            subprocess will inherit the parent's file descriptors.
+
+            You can set these to:
+            - python streams, e.g. open Python file objects, or os.devnull;
+            - filenames, which will be automatically opened for writing; or
+            - `str`, as in the Python string type. If you set these to `str`,
+               output and error will be written to pipes and returned as
+               a string.  If both `output` and `error` are set to `str`,
+               then one string is returned containing output concatenated
+               with error.
+
+          input
+
+            Same as output, error, but `str` is not an allowed value.
+
+        Deprecated arguments
+
+          return_output[=False]
+
+            Setting this to True is the same as setting output=str.
+            This argument may be removed in future Spack versions.
+
+        """
+        fail_on_error = kwargs.pop("fail_on_error", True)
+        ignore_errors = kwargs.pop("ignore_errors", ())
+
+        # TODO: This is deprecated.  Remove in a future version.
+        return_output = kwargs.pop("return_output", False)
+
+        # Default values of None says to keep parent's file descriptors.
+        if return_output:
+            output = str
+        else:
+            output = kwargs.pop("output", None)
+
+        error         = kwargs.pop("error", None)
+        input         = kwargs.pop("input", None)
+        if input is str:
+            raise ValueError("Cannot use `str` as input stream.")
 
         def streamify(arg, mode):
             if isinstance(arg, basestring):
                 return open(arg, mode), True
-            elif arg is None and mode != 'r':
-                return open(os.devnull, mode), True
-            return arg, False
-        output, ostream = streamify(output, 'w')
-        error,  estream = streamify(error,  'w')
-        input,  istream = streamify(input,  'r')
+            elif arg is str:
+                return subprocess.PIPE, False
+            else:
+                return arg, False
+        ostream, close_ostream = streamify(output, 'w')
+        estream, close_estream = streamify(error,  'w')
+        istream, close_istream = streamify(input,  'r')
 
         # if they just want to ignore one error code, make it a tuple.
         if isinstance(ignore_errors, int):
@@ -93,19 +149,19 @@ def streamify(arg, mode):
 
         try:
             proc = subprocess.Popen(
-                cmd,
-                stdin=input,
-                stderr=error,
-                stdout=subprocess.PIPE if return_output else output)
+                cmd, stdin=istream, stderr=estream, stdout=ostream)
             out, err = proc.communicate()
-            self.returncode = proc.returncode
 
-            rc = proc.returncode
+            rc = self.returncode = proc.returncode
             if fail_on_error and rc != 0 and (rc not in ignore_errors):
                 raise ProcessError("Command exited with status %d:"
                                    % proc.returncode, cmd_line)
-            if return_output:
-                return out
+
+            if output is str or error is str:
+                result = ''
+                if output is str: result += out
+                if error is str:  result += err
+                return result
 
         except OSError, e:
             raise ProcessError(
@@ -120,9 +176,9 @@ def streamify(arg, mode):
                     % (proc.returncode, cmd_line))
 
         finally:
-            if ostream: output.close()
-            if estream: error.close()
-            if istream: input.close()
+            if close_ostream: output.close()
+            if close_estream: error.close()
+            if close_istream: input.close()
 
 
     def __eq__(self, other):
diff --git a/lib/spack/spack/util/naming.py b/lib/spack/spack/util/naming.py
index 782afbd4bbdb4fe8e8e87e3f14935e92edc63db1..5025f1502787a3d8a4da64eaab84291e54f03bd4 100644
--- a/lib/spack/spack/util/naming.py
+++ b/lib/spack/spack/util/naming.py
@@ -1,13 +1,22 @@
 # Need this because of spack.util.string
 from __future__ import absolute_import
 import string
+import itertools
 import re
+from StringIO import StringIO
 
 import spack
 
+__all__ = ['mod_to_class', 'spack_module_to_python_module', 'valid_module_name',
+           'valid_fully_qualified_module_name', 'validate_fully_qualified_module_name',
+           'validate_module_name', 'possible_spack_module_names', 'NamespaceTrie']
+
 # Valid module names can contain '-' but can't start with it.
 _valid_module_re = r'^\w[\w-]*$'
 
+# Valid module names can contain '-' but can't start with it.
+_valid_fully_qualified_module_re = r'^(\w[\w-]*)(\.\w[\w-]*)*$'
+
 
 def mod_to_class(mod_name):
     """Convert a name from module style to class name style.  Spack mostly
@@ -42,20 +51,160 @@ def mod_to_class(mod_name):
     return class_name
 
 
+def spack_module_to_python_module(mod_name):
+    """Given a Spack module name, returns the name by which it can be
+       imported in Python.
+    """
+    if re.match(r'[0-9]', mod_name):
+        mod_name = 'num' + mod_name
+
+    return mod_name.replace('-', '_')
+
+
+def possible_spack_module_names(python_mod_name):
+    """Given a Python module name, return a list of all possible spack module
+       names that could correspond to it."""
+    mod_name = re.sub(r'^num(\d)', r'\1', python_mod_name)
+
+    parts = re.split(r'(_)', mod_name)
+    options = [['_', '-']] * mod_name.count('_')
+
+    results = []
+    for subs in itertools.product(*options):
+        s = list(parts)
+        s[1::2] = subs
+        results.append(''.join(s))
+
+    return results
+
+
 def valid_module_name(mod_name):
-    """Return whether the mod_name is valid for use in Spack."""
+    """Return whether mod_name is valid for use in Spack."""
     return bool(re.match(_valid_module_re, mod_name))
 
 
+def valid_fully_qualified_module_name(mod_name):
+    """Return whether mod_name is a valid namespaced module name."""
+    return bool(re.match(_valid_fully_qualified_module_re, mod_name))
+
+
 def validate_module_name(mod_name):
     """Raise an exception if mod_name is not valid."""
     if not valid_module_name(mod_name):
         raise InvalidModuleNameError(mod_name)
 
 
+def validate_fully_qualified_module_name(mod_name):
+    """Raise an exception if mod_name is not a valid namespaced module name."""
+    if not valid_fully_qualified_module_name(mod_name):
+        raise InvalidFullyQualifiedModuleNameError(mod_name)
+
+
 class InvalidModuleNameError(spack.error.SpackError):
     """Raised when we encounter a bad module name."""
     def __init__(self, name):
         super(InvalidModuleNameError, self).__init__(
             "Invalid module name: " + name)
         self.name = name
+
+
+class InvalidFullyQualifiedModuleNameError(spack.error.SpackError):
+    """Raised when we encounter a bad full package name."""
+    def __init__(self, name):
+        super(InvalidFullyQualifiedModuleNameError, self).__init__(
+            "Invalid fully qualified package name: " + name)
+        self.name = name
+
+
+class NamespaceTrie(object):
+    class Element(object):
+        def __init__(self, value):
+            self.value = value
+
+
+    def __init__(self, separator='.'):
+        self._subspaces = {}
+        self._value = None
+        self._sep = separator
+
+
+    def __setitem__(self, namespace, value):
+        first, sep, rest = namespace.partition(self._sep)
+
+        if not first:
+            self._value = NamespaceTrie.Element(value)
+            return
+
+        if first not in self._subspaces:
+            self._subspaces[first] = NamespaceTrie()
+
+        self._subspaces[first][rest] = value
+
+
+    def _get_helper(self, namespace, full_name):
+        first, sep, rest = namespace.partition(self._sep)
+        if not first:
+            if not self._value:
+                raise KeyError("Can't find namespace '%s' in trie" % full_name)
+            return self._value.value
+        elif first not in self._subspaces:
+            raise KeyError("Can't find namespace '%s' in trie" % full_name)
+        else:
+            return self._subspaces[first]._get_helper(rest, full_name)
+
+
+    def __getitem__(self, namespace):
+        return self._get_helper(namespace, namespace)
+
+
+    def is_prefix(self, namespace):
+        """True if the namespace has a value, or if it's the prefix of one that does."""
+        first, sep, rest = namespace.partition(self._sep)
+        if not first:
+            return True
+        elif first not in self._subspaces:
+            return False
+        else:
+            return self._subspaces[first].is_prefix(rest)
+
+
+    def is_leaf(self, namespace):
+        """True if this namespace has no children in the trie."""
+        first, sep, rest = namespace.partition(self._sep)
+        if not first:
+            return bool(self._subspaces)
+        elif first not in self._subspaces:
+            return False
+        else:
+            return self._subspaces[first].is_leaf(rest)
+
+
+    def has_value(self, namespace):
+        """True if there is a value set for the given namespace."""
+        first, sep, rest = namespace.partition(self._sep)
+        if not first:
+            return self._value is not None
+        elif first not in self._subspaces:
+            return False
+        else:
+            return self._subspaces[first].has_value(rest)
+
+
+    def __contains__(self, namespace):
+        """Returns whether a value has been set for the namespace."""
+        return self.has_value(namespace)
+
+
+    def _str_helper(self, stream, level=0):
+        indent = (level * '    ')
+        for name in sorted(self._subspaces):
+            stream.write(indent + name + '\n')
+            if self._value:
+                stream.write(indent + '  ' + repr(self._value.value))
+            stream.write(self._subspaces[name]._str_helper(stream, level+1))
+
+
+    def __str__(self):
+        stream = StringIO()
+        self._str_helper(stream)
+        return stream.getvalue()
diff --git a/lib/spack/spack/util/spack_yaml.py b/lib/spack/spack/util/spack_yaml.py
new file mode 100644
index 0000000000000000000000000000000000000000..728e86b8ee305964b42e57980e0b527edf20f396
--- /dev/null
+++ b/lib/spack/spack/util/spack_yaml.py
@@ -0,0 +1,201 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+"""Enhanced YAML parsing for Spack.
+
+- ``load()`` preserves YAML Marks on returned objects -- this allows
+  us to access file and line information later.
+
+- ``Our load methods use ``OrderedDict`` class instead of YAML's
+  default unorderd dict.
+
+"""
+import yaml
+from yaml.nodes import *
+from yaml.constructor import ConstructorError
+from yaml.representer import SafeRepresenter
+from ordereddict_backport import OrderedDict
+
+# Only export load and dump
+__all__ = ['load', 'dump']
+
+# Make new classes so we can add custom attributes.
+# Also, use OrderedDict instead of just dict.
+class syaml_dict(OrderedDict):
+    def __repr__(self):
+        mappings = ('%r: %r' % (k,v) for k,v in self.items())
+        return '{%s}' % ', '.join(mappings)
+class syaml_list(list):
+    __repr__ = list.__repr__
+class syaml_str(str):
+    __repr__ = str.__repr__
+
+def mark(obj, node):
+    """Add start and end markers to an object."""
+    obj._start_mark = node.start_mark
+    obj._end_mark = node.end_mark
+
+
+class OrderedLineLoader(yaml.Loader):
+    """YAML loader that preserves order and line numbers.
+
+       Mappings read in by this loader behave like an ordered dict.
+       Sequences, mappings, and strings also have new attributes,
+       ``_start_mark`` and ``_end_mark``, that preserve YAML line
+       information in the output data.
+
+    """
+    #
+    # Override construct_yaml_* so that they build our derived types,
+    # which allows us to add new attributes to them.
+    #
+    # The standard YAML constructors return empty instances and fill
+    # in with mappings later.  We preserve this behavior.
+    #
+    def construct_yaml_str(self, node):
+        value = self.construct_scalar(node)
+        try:
+            value = value.encode('ascii')
+        except UnicodeEncodeError:
+            pass
+        value = syaml_str(value)
+        mark(value, node)
+        return value
+
+
+    def construct_yaml_seq(self, node):
+        data = syaml_list()
+        mark(data, node)
+        yield data
+        data.extend(self.construct_sequence(node))
+
+
+    def construct_yaml_map(self, node):
+        data = syaml_dict()
+        mark(data, node)
+        yield data
+        value = self.construct_mapping(node)
+        data.update(value)
+
+    #
+    # Override the ``construct_*`` routines. These fill in empty
+    # objects after yielded by the above ``construct_yaml_*`` methods.
+    #
+    def construct_sequence(self, node, deep=False):
+        if not isinstance(node, SequenceNode):
+            raise ConstructorError(None, None,
+                    "expected a sequence node, but found %s" % node.id,
+                    node.start_mark)
+        value =  syaml_list(self.construct_object(child, deep=deep)
+                             for child in node.value)
+        mark(value, node)
+        return value
+
+
+    def construct_mapping(self, node, deep=False):
+        """Store mappings as OrderedDicts instead of as regular python
+           dictionaries to preserve file ordering."""
+        if not isinstance(node, MappingNode):
+            raise ConstructorError(None, None,
+                    "expected a mapping node, but found %s" % node.id,
+                    node.start_mark)
+
+        mapping = syaml_dict()
+        for key_node, value_node in node.value:
+            key = self.construct_object(key_node, deep=deep)
+            try:
+                hash(key)
+            except TypeError, exc:
+                raise ConstructorError("while constructing a mapping", node.start_mark,
+                        "found unacceptable key (%s)" % exc, key_node.start_mark)
+            value = self.construct_object(value_node, deep=deep)
+            if key in mapping:
+                raise ConstructorError("while constructing a mapping", node.start_mark,
+                                       "found already in-use key (%s)" % key, key_node.start_mark)
+            mapping[key] = value
+
+        mark(mapping, node)
+        return mapping
+
+# register above new constructors
+OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:map', OrderedLineLoader.construct_yaml_map)
+OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:seq', OrderedLineLoader.construct_yaml_seq)
+OrderedLineLoader.add_constructor(u'tag:yaml.org,2002:str', OrderedLineLoader.construct_yaml_str)
+
+
+
+class OrderedLineDumper(yaml.Dumper):
+    """Dumper that preserves ordering and formats ``syaml_*`` objects.
+
+      This dumper preserves insertion ordering ``syaml_dict`` objects
+      when they're written out.  It also has some custom formatters
+      for ``syaml_*`` objects so that they are formatted like their
+      regular Python equivalents, instead of ugly YAML pyobjects.
+
+    """
+    def represent_mapping(self, tag, mapping, flow_style=None):
+        value = []
+        node = MappingNode(tag, value, flow_style=flow_style)
+        if self.alias_key is not None:
+            self.represented_objects[self.alias_key] = node
+        best_style = True
+        if hasattr(mapping, 'items'):
+            # if it's a syaml_dict, preserve OrderedDict order.
+            # Otherwise do the default thing.
+            sort = not isinstance(mapping, syaml_dict)
+            mapping = mapping.items()
+            if sort:
+                mapping.sort()
+
+        for item_key, item_value in mapping:
+            node_key = self.represent_data(item_key)
+            node_value = self.represent_data(item_value)
+            if not (isinstance(node_key, ScalarNode) and not node_key.style):
+                best_style = False
+            if not (isinstance(node_value, ScalarNode) and not node_value.style):
+                best_style = False
+            value.append((node_key, node_value))
+        if flow_style is None:
+            if self.default_flow_style is not None:
+                node.flow_style = self.default_flow_style
+            else:
+                node.flow_style = best_style
+        return node
+
+# Make our special objects look like normal YAML ones.
+OrderedLineDumper.add_representer(syaml_dict, OrderedLineDumper.represent_dict)
+OrderedLineDumper.add_representer(syaml_list, OrderedLineDumper.represent_list)
+OrderedLineDumper.add_representer(syaml_str, OrderedLineDumper.represent_str)
+
+
+def load(*args, **kwargs):
+    """Load but modify the loader instance so that it will add __line__
+       atrributes to the returned object."""
+    kwargs['Loader'] = OrderedLineLoader
+    return yaml.load(*args, **kwargs)
+
+
+def dump(*args, **kwargs):
+    kwargs['Dumper'] = OrderedLineDumper
+    return yaml.dump(*args, **kwargs)
diff --git a/share/spack/setup-env.sh b/share/spack/setup-env.sh
index 6503728d8f256abe6087a1b459a66738b2f4d5fc..586a5b836b1245917acb87e302a47ec48eac30c4 100755
--- a/share/spack/setup-env.sh
+++ b/share/spack/setup-env.sh
@@ -58,7 +58,7 @@
 
 function spack {
     # save raw arguments into an array before butchering them
-    declare -a args=( "$@" )
+    args=( "$@" )
 
     # accumulate initial flags for main spack command
     _sp_flags=""
diff --git a/var/spack/mock_configs/site_spackconfig/compilers.yaml b/var/spack/mock_configs/site_spackconfig/compilers.yaml
deleted file mode 100644
index 0a2dc893e293bfdbee05deb35fd70893dfdebde8..0000000000000000000000000000000000000000
--- a/var/spack/mock_configs/site_spackconfig/compilers.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
-compilers:
-  all:
-    clang@3.3:
-      cc: /path/to/clang
-      cxx: /path/to/clang++
-      f77: None
-      fc: None
-    gcc@4.5.0:
-      cc: /path/to/gcc
-      cxx: /path/to/g++
-      f77: /path/to/gfortran
-      fc: /path/to/gfortran
diff --git a/var/spack/packages/R/package.py b/var/spack/packages/R/package.py
deleted file mode 100644
index 2e6f65a7429ac41f251682e8c8722e4766f73f91..0000000000000000000000000000000000000000
--- a/var/spack/packages/R/package.py
+++ /dev/null
@@ -1,33 +0,0 @@
-from spack import *
-
-class R(Package):
-    """R is 'GNU S', a freely available language and environment for
-       statistical computing and graphics which provides a wide va
-       riety of statistical and graphical techniques: linear and
-       nonlinear modelling, statistical tests, time series analysis,
-       classification, clustering, etc. Please consult the R project
-       homepage for further information."""
-    homepage = "http://www.example.com"
-    url      = "http://cran.cnr.berkeley.edu/src/base/R-3/R-3.1.2.tar.gz"
-
-    version('3.1.2', '3af29ec06704cbd08d4ba8d69250ae74')
-
-    depends_on("readline")
-    depends_on("ncurses")
-    depends_on("icu")
-    depends_on("glib")
-    depends_on("zlib")
-    depends_on("libtiff")
-    depends_on("jpeg")
-    depends_on("cairo")
-    depends_on("pango")
-    depends_on("freetype")
-    depends_on("tcl")
-    depends_on("tk")
-
-    def install(self, spec, prefix):
-        configure("--prefix=%s" % prefix,
-                  "--enable-R-shlib",
-                  "--enable-BLAS-shlib")
-        make()
-        make("install")
diff --git a/var/spack/packages/boost/package.py b/var/spack/packages/boost/package.py
deleted file mode 100644
index 81dadbbf611e20589466e879e537e0ec7a81d6ff..0000000000000000000000000000000000000000
--- a/var/spack/packages/boost/package.py
+++ /dev/null
@@ -1,138 +0,0 @@
-from spack import *
-
-class Boost(Package):
-    """Boost provides free peer-reviewed portable C++ source
-       libraries, emphasizing libraries that work well with the C++
-       Standard Library.
-
-       Boost libraries are intended to be widely useful, and usable
-       across a broad spectrum of applications. The Boost license
-       encourages both commercial and non-commercial use.
-    """
-    homepage = "http://www.boost.org"
-    url      = "http://downloads.sourceforge.net/project/boost/boost/1.55.0/boost_1_55_0.tar.bz2"
-    list_url = "http://sourceforge.net/projects/boost/files/boost/"
-    list_depth = 2
-
-    version('1.59.0', '6aa9a5c6a4ca1016edd0ed1178e3cb87')
-    version('1.58.0', 'b8839650e61e9c1c0a89f371dd475546')
-    version('1.57.0', '1be49befbdd9a5ce9def2983ba3e7b76')
-    version('1.56.0', 'a744cf167b05d72335f27c88115f211d')
-    version('1.55.0', 'd6eef4b4cacb2183f2bf265a5a03a354')
-    version('1.54.0', '15cb8c0803064faef0c4ddf5bc5ca279')
-    version('1.53.0', 'a00d22605d5dbcfb4c9936a9b35bc4c2')
-    version('1.52.0', '3a855e0f919107e0ca4de4d84ad3f750')
-    version('1.51.0', '4b6bd483b692fd138aef84ed2c8eb679')
-    version('1.50.0', '52dd00be775e689f55a987baebccc462')
-    version('1.49.0', '0d202cb811f934282dea64856a175698')
-    version('1.48.0', 'd1e9a7a7f532bb031a3c175d86688d95')
-    version('1.47.0', 'a2dc343f7bc7f83f8941e47ed4a18200')
-    version('1.46.1', '7375679575f4c8db605d426fc721d506')
-    version('1.46.0', '37b12f1702319b73876b0097982087e0')
-    version('1.45.0', 'd405c606354789d0426bc07bea617e58')
-    version('1.44.0', 'f02578f5218f217a9f20e9c30e119c6a')
-    version('1.43.0', 'dd49767bfb726b0c774f7db0cef91ed1')
-    version('1.42.0', '7bf3b4eb841b62ffb0ade2b82218ebe6')
-    version('1.41.0', '8bb65e133907db727a2a825c5400d0a6')
-    version('1.40.0', 'ec3875caeac8c52c7c129802a8483bd7')
-    version('1.39.0', 'a17281fd88c48e0d866e1a12deecbcc0')
-    version('1.38.0', '5eca2116d39d61382b8f8235915cb267')
-    version('1.37.0', '8d9f990bfb7e83769fa5f1d6f065bc92')
-    version('1.36.0', '328bfec66c312150e4c2a78dcecb504b')
-    version('1.35.0', 'dce952a7214e72d6597516bcac84048b')
-    version('1.34.1', '2d938467e8a448a2c9763e0a9f8ca7e5')
-    version('1.34.0', 'ed5b9291ffad776f8757a916e1726ad0')
-
-    variant('debug', default=False, description='Switch to the debug version of Boost')
-    variant('python', default=False, description='Activate the component Boost.Python')
-    variant('mpi', default=False, description='Activate the component Boost.MPI')
-    variant('compression', default=True, description='Activate the compression Boost.iostreams')
-
-    depends_on('mpi', when='+mpi')
-    depends_on('python', when='+python')
-    depends_on('zlib', when='+compression')
-    depends_on('bzip2', when='+compression')
-    
-    def url_for_version(self, version):
-        """Handle Boost's weird URLs, which write the version two different ways."""
-        parts = [str(p) for p in Version(version)]
-        dots = ".".join(parts)
-        underscores = "_".join(parts)
-        return "http://downloads.sourceforge.net/project/boost/boost/%s/boost_%s.tar.bz2" % (
-            dots, underscores)
-
-    def determine_toolset(self):
-        toolsets = {'gcc': 'gcc',
-                    'icpc': 'intel',
-                    'clang++': 'clang'}
-
-        for cc, toolset in toolsets.iteritems():
-            if(cc in self.compiler.cxx_names):
-                return toolset
-
-        # fallback to gcc if no toolset found
-        return 'gcc'
-
-    def determine_bootstrap_options(self, spec, options):
-        options.append('--with-toolset=%s' % self.determine_toolset())
-
-        without_libs = []
-        if '~mpi' in spec:
-            without_libs.append('mpi')
-        if '~python' in spec:
-            without_libs.append('python')
-        else:
-            options.append('--with-python=%s' % (spec['python'].prefix.bin + '/python'))
-
-        if without_libs:
-            options.append('--without-libraries=%s' % ','.join(without_libs))
-
-        with open('user-config.jam', 'w') as f:
-            if '+mpi' in spec:
-                f.write('using mpi : %s ;\n' % (spec['mpi'].prefix.bin + '/mpicxx'))
-            if '+python' in spec:
-                f.write('using python : %s : %s ;\n' % (spec['python'].version,
-                                                      (spec['python'].prefix.bin + '/python')))
-
-    def determine_b2_options(self, spec, options):
-        if '+debug' in spec:
-            options.append('variant=debug')
-        else:
-            options.append('variant=release')
-
-        if '~compression' in spec:
-            options.extend(['-s NO_BZIP2=1',
-                            '-s NO_ZLIB=1',
-            ])
-
-        if '+compression' in spec:
-            options.extend(['-s BZIP2_INCLUDE=%s' % spec['bzip2'].prefix.include,
-                            '-s BZIP2_LIBPATH=%s' % spec['bzip2'].prefix.lib,
-                            '-s ZLIB_INCLUDE=%s' % spec['zlib'].prefix.include,
-                            '-s ZLIB_LIBPATH=%s' % spec['zlib'].prefix.lib])
-
-        options.extend(['toolset=%s' % self.determine_toolset(),
-                       'link=static,shared',
-                       '--layout=tagged'])
-
-    def install(self, spec, prefix):
-        # to make him find the user-config.jam
-        env['BOOST_BUILD_PATH'] = './'
-
-        bootstrap = Executable('./bootstrap.sh')
-
-        bootstrap_options = ['--prefix=%s' % prefix]
-        self.determine_bootstrap_options(spec, bootstrap_options)
-
-        bootstrap(*bootstrap_options)
-
-        # b2 used to be called bjam, before 1.47 (sigh)
-        b2name = './b2' if spec.satisfies('@1.47:') else './bjam'
-
-        b2 = Executable(b2name)
-        b2_options = ['-j %s' % make_jobs]
-
-        self.determine_b2_options(spec, b2_options)
-
-        b2('install', 'threading=single', *b2_options)
-        b2('install', 'threading=multi', *b2_options)
diff --git a/var/spack/packages/clang/package.py b/var/spack/packages/clang/package.py
deleted file mode 100644
index e46e08d5f11b7609ee5835d37f105fd07aa5a6a8..0000000000000000000000000000000000000000
--- a/var/spack/packages/clang/package.py
+++ /dev/null
@@ -1,95 +0,0 @@
-##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (as published by
-# the Free Software Foundation) version 2.1 dated February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-
-
-from spack import *
-
-import os
-import os.path
-
-class Clang(Package):
-    """The goal of the Clang project is to create a new C, C++,
-       Objective C and Objective C++ front-end for the LLVM compiler.
-    """
-    homepage = 'http://clang.llvm.org'
-    url = 'http://llvm.org/releases/3.7.0/cfe-3.7.0.src.tar.xz'
-
-    depends_on('llvm@3.7.0', when='@3.7.0')
-    depends_on('llvm@3.6.2', when='@3.6.2')
-    depends_on('llvm@3.5.1', when='@3.5.1')
-
-    version('3.7.0', '8f9d27335e7331cf0a4711e952f21f01', url='http://llvm.org/releases/3.7.0/cfe-3.7.0.src.tar.xz')
-    version('3.6.2', 'ff862793682f714bb7862325b9c06e20', url='http://llvm.org/releases/3.6.2/cfe-3.6.2.src.tar.xz')
-    version('3.5.1', '93f9532f8f7e6f1d8e5c1116907051cb', url='http://llvm.org/releases/3.5.1/cfe-3.5.1.src.tar.xz')
-
-    ##########
-    # @3.7.0
-    resource(name='clang-tools-extra',
-             url='http://llvm.org/releases/3.7.0/clang-tools-extra-3.7.0.src.tar.xz',
-             md5='d5a87dacb65d981a427a536f6964642e', destination='tools', when='@3.7.0')
-    ##########
-
-    def install(self, spec, prefix):
-        env['CXXFLAGS'] = self.compiler.cxx11_flag
-
-        with working_dir('spack-build', create=True):
-
-            options = []
-            if '@3.7.0:' in spec:
-                options.append('-DCLANG_DEFAULT_OPENMP_RUNTIME:STRING=libomp')
-            options.extend(std_cmake_args)
-
-            cmake('..',
-                  '-DCLANG_PATH_TO_LLVM_BUILD:PATH=%s' % spec['llvm'].prefix,
-                  '-DLLVM_MAIN_SRC_DIR:PATH=%s' % spec['llvm'].prefix,
-                  *options)
-            make()
-            make("install")
-            # CLang doesn't look in llvm folders for system headers...
-            self.link_llvm_directories(spec)
-
-    def link_llvm_directories(self, spec):
-
-        def clang_include_dir_at(root):
-            return join_path(root, 'include')
-
-        def clang_lib_dir_at(root):
-            return join_path(root, 'lib/clang/', str(self.version), 'include')
-
-        def do_link(source_dir, destination_dir):
-            if os.path.exists(source_dir):
-                for name in os.listdir(source_dir):
-                    source = join_path(source_dir, name)
-                    link = join_path(destination_dir, name)
-                    os.symlink(source, link)
-
-        # Link folder and files in include
-        llvm_dir = clang_include_dir_at(spec['llvm'].prefix)
-        clang_dir = clang_include_dir_at(self.prefix)
-        do_link(llvm_dir, clang_dir)
-        # Link folder and files in lib
-        llvm_dir = clang_lib_dir_at(spec['llvm'].prefix)
-        clang_dir = clang_lib_dir_at(self.prefix)
-        do_link(llvm_dir, clang_dir)
\ No newline at end of file
diff --git a/var/spack/packages/hdf5/package.py b/var/spack/packages/hdf5/package.py
deleted file mode 100644
index adac79d9bbdf7d69feca9643feafdeb0cc16cdb3..0000000000000000000000000000000000000000
--- a/var/spack/packages/hdf5/package.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from spack import *
-
-class Hdf5(Package):
-    """HDF5 is a data model, library, and file format for storing and managing
-       data. It supports an unlimited variety of datatypes, and is designed for
-       flexible and efficient I/O and for high volume and complex data.
-    """
-
-    homepage = "http://www.hdfgroup.org/HDF5/"
-    url      = "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-1.8.13/src/hdf5-1.8.13.tar.gz"
-    list_url = "http://www.hdfgroup.org/ftp/HDF5/releases"
-    list_depth = 3
-    
-    version('1.8.16', 'b8ed9a36ae142317f88b0c7ef4b9c618')
-    version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
-    version('1.8.13', 'c03426e9e77d7766944654280b467289')
-
-    variant('mpi', default=False, description='Enable MPI support')
-
-    depends_on("mpi", when='+mpi')
-    depends_on("zlib")
-
-    # TODO: currently hard-coded to use OpenMPI
-    def install(self, spec, prefix):
-        extra_args = []
-        if '+mpi' in spec:
-            extra_args.extend([
-                "--enable-parallel",
-                "CC=%s" % spec['mpi'].prefix.bin + "/mpicc",
-                "CXX=%s" % spec['mpi'].prefix.bin + "/mpic++",
-            ])
-
-        configure(
-            "--prefix=%s" % prefix,
-            "--with-zlib=%s" % spec['zlib'].prefix,
-            "--enable-shared",
-            *extra_args)
-
-        make()
-        make("install")
-
-    def url_for_version(self, version):
-        v = str(version)
-
-        if version == Version("1.2.2"):
-            return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + ".tar.gz"
-        elif version < Version("1.7"):
-            return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + version.up_to(2) + "/hdf5-" + v + ".tar.gz"
-        else:
-            return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + "/src/hdf5-" + v + ".tar.gz"
diff --git a/var/spack/packages/llvm/package.py b/var/spack/packages/llvm/package.py
deleted file mode 100644
index a3307584e08b2b5bc57fe98af798ca375d963579..0000000000000000000000000000000000000000
--- a/var/spack/packages/llvm/package.py
+++ /dev/null
@@ -1,72 +0,0 @@
-##############################################################################
-# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
-# Produced at the Lawrence Livermore National Laboratory.
-#
-# This file is part of Spack.
-# Written by David Beckingsale, david@llnl.gov, All rights reserved.
-# LLNL-CODE-647188
-#
-# For details, see https://github.com/llnl/spack
-# Please also see the LICENSE file for our notice and the LGPL.
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (as published by
-# the Free Software Foundation) version 2.1 dated February 1999.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
-# conditions of the GNU General Public License for more details.
-#
-# You should have received a copy of the GNU Lesser General Public License
-# along with this program; if not, write to the Free Software Foundation,
-# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-##############################################################################
-from spack import *
-
-
-class Llvm(Package):
-    """The LLVM Project is a collection of modular and reusable compiler and
-       toolchain technologies. Despite its name, LLVM has little to do with
-       traditional virtual machines, though it does provide helpful libraries
-       that can be used to build them. The name "LLVM" itself is not an acronym;
-       it is the full name of the project.
-    """
-    homepage = 'http://llvm.org/'
-    url = 'http://llvm.org/releases/3.7.0/llvm-3.7.0.src.tar.xz'
-
-    version('3.7.0', 'b98b9495e5655a672d6cb83e1a180f8e', url='http://llvm.org/releases/3.7.0/llvm-3.7.0.src.tar.xz')
-    version('3.6.2', '0c1ee3597d75280dee603bae9cbf5cc2', url='http://llvm.org/releases/3.6.2/llvm-3.6.2.src.tar.xz')
-    version('3.5.1', '2d3d8004f38852aa679e5945b8ce0b14', url='http://llvm.org/releases/3.5.1/llvm-3.5.1.src.tar.xz')
-    version('3.0', 'a8e5f5f1c1adebae7b4a654c376a6005', url='http://llvm.org/releases/3.0/llvm-3.0.tar.gz') # currently required by mesa package
-
-    depends_on('python@2.7:')
-
-    variant('libcxx', default=False, description="Builds the LLVM Standard C++ library targeting C++11")
-
-    ##########
-    # @3.7.0
-    resource(name='compiler-rt',
-             url='http://llvm.org/releases/3.7.0/compiler-rt-3.7.0.src.tar.xz', md5='383c10affd513026f08936b5525523f5',
-             destination='projects', when='@3.7.0')
-    resource(name='openmp',
-             url='http://llvm.org/releases/3.7.0/openmp-3.7.0.src.tar.xz', md5='f482c86fdead50ba246a1a2b0bbf206f',
-             destination='projects', when='@3.7.0')
-    resource(name='libcxx',
-             url='http://llvm.org/releases/3.7.0/libcxx-3.7.0.src.tar.xz', md5='46aa5175cbe1ad42d6e9c995968e56dd',
-             destination='projects', placement='libcxx', when='+libcxx@3.7.0')
-    resource(name='libcxxabi',
-             url='http://llvm.org/releases/3.7.0/libcxxabi-3.7.0.src.tar.xz', md5='5aa769e2fca79fa5335cfae8f6258772',
-             destination='projects', placement='libcxxabi', when='+libcxx@3.7.0')
-    ##########
-
-    def install(self, spec, prefix):
-        env['CXXFLAGS'] = self.compiler.cxx11_flag
-
-        with working_dir('spack-build', create=True):
-            cmake('..',
-                  '-DLLVM_REQUIRES_RTTI:BOOL=ON',
-                  '-DPYTHON_EXECUTABLE:PATH=%s/bin/python' % spec['python'].prefix,
-                  *std_cmake_args)
-            make()
-            make("install")
diff --git a/var/spack/packages/metis/package.py b/var/spack/packages/metis/package.py
deleted file mode 100644
index 7ce5ae1925d7774ea82b836efef531021aa05893..0000000000000000000000000000000000000000
--- a/var/spack/packages/metis/package.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from spack import *
-
-class Metis(Package):
-    """METIS is a set of serial programs for partitioning graphs,
-       partitioning finite element meshes, and producing fill reducing
-       orderings for sparse matrices. The algorithms implemented in
-       METIS are based on the multilevel recursive-bisection,
-       multilevel k-way, and multi-constraint partitioning schemes."""
-
-    homepage = "http://glaros.dtc.umn.edu/gkhome/metis/metis/overview"
-    url      = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz"
-
-    version('5.1.0', '5465e67079419a69e0116de24fce58fe')
-
-    depends_on('mpi')
-
-    def install(self, spec, prefix):
-        cmake(".",
-              '-DGKLIB_PATH=%s/GKlib' % pwd(),
-              '-DSHARED=1',
-              '-DCMAKE_C_COMPILER=mpicc',
-              '-DCMAKE_CXX_COMPILER=mpicxx',
-              '-DSHARED=1',
-              *std_cmake_args)
-
-        make()
-        make("install")
diff --git a/var/spack/packages/netcdf/netcdf-4.3.3-mpi.patch b/var/spack/packages/netcdf/netcdf-4.3.3-mpi.patch
deleted file mode 100644
index 46dda5fc9de0157e125f206b5ed5bfbd018b3655..0000000000000000000000000000000000000000
--- a/var/spack/packages/netcdf/netcdf-4.3.3-mpi.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-diff -Nur netcdf-4.3.3/CMakeLists.txt netcdf-4.3.3.mpi/CMakeLists.txt
---- netcdf-4.3.3/CMakeLists.txt	2015-02-12 16:44:35.000000000 -0500
-+++ netcdf-4.3.3.mpi/CMakeLists.txt	2015-10-14 16:44:41.176300658 -0400
-@@ -753,6 +753,7 @@
-     SET(USE_PARALLEL OFF CACHE BOOL "")
-     MESSAGE(STATUS "Cannot find HDF5 library built with parallel support. Disabling parallel build.")
-   ELSE()
-+    FIND_PACKAGE(MPI REQUIRED)
-     SET(USE_PARALLEL ON CACHE BOOL "")
-     SET(STATUS_PARALLEL "ON")
-   ENDIF()
-diff -Nur netcdf-4.3.3/liblib/CMakeLists.txt netcdf-4.3.3.mpi/liblib/CMakeLists.txt
---- netcdf-4.3.3/liblib/CMakeLists.txt	2015-02-12 16:44:35.000000000 -0500
-+++ netcdf-4.3.3.mpi/liblib/CMakeLists.txt	2015-10-14 16:44:57.757793634 -0400
-@@ -71,6 +71,10 @@
-   SET(TLL_LIBS ${TLL_LIBS} ${CURL_LIBRARY})
- ENDIF()
- 
-+IF(USE_PARALLEL)
-+  SET(TLL_LIBS ${TLL_LIBS} ${MPI_C_LIBRARIES})
-+ENDIF()
-+
- IF(USE_HDF4)
-   SET(TLL_LIBS ${TLL_LIBS} ${HDF4_LIBRARIES})
- ENDIF()
diff --git a/var/spack/packages/netcdf/package.py b/var/spack/packages/netcdf/package.py
deleted file mode 100644
index e1e0d836c62e87fe9e104cdf000fd079b5fedb93..0000000000000000000000000000000000000000
--- a/var/spack/packages/netcdf/package.py
+++ /dev/null
@@ -1,27 +0,0 @@
-from spack import *
-
-class Netcdf(Package):
-    """NetCDF is a set of software libraries and self-describing, machine-independent
-        data formats that support the creation, access, and sharing of array-oriented
-        scientific data."""
-
-    homepage = "http://www.unidata.ucar.edu/software/netcdf/"
-    url      = "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.3.3.tar.gz"
-
-    version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
-
-    patch('netcdf-4.3.3-mpi.patch')
-
-    # Dependencies:
-        # >HDF5
-    depends_on("hdf5")
-
-    def install(self, spec, prefix):
-        with working_dir('spack-build', create=True):
-            cmake('..',
-                "-DCMAKE_INSTALL_PREFIX:PATH=%s" % prefix,
-                "-DENABLE_DAP:BOOL=OFF", # Disable DAP.
-                "-DBUILD_SHARED_LIBS:BOOL=OFF") # Don't build shared libraries (use static libs).
-
-            make()
-            make("install")
diff --git a/var/spack/packages/parmetis/package.py b/var/spack/packages/parmetis/package.py
deleted file mode 100644
index d8cd33730477bfe4623710a4d1d8ad656ad72ce7..0000000000000000000000000000000000000000
--- a/var/spack/packages/parmetis/package.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from spack import *
-
-class Parmetis(Package):
-    """ParMETIS is an MPI-based parallel library that implements a
-       variety of algorithms for partitioning unstructured graphs,
-       meshes, and for computing fill-reducing orderings of sparse
-       matrices."""
-    homepage = "http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview"
-    url      = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/parmetis/parmetis-4.0.3.tar.gz"
-
-    version('4.0.3', 'f69c479586bf6bb7aff6a9bc0c739628')
-
-    depends_on('mpi')
-
-    def install(self, spec, prefix):
-        cmake(".",
-              '-DGKLIB_PATH=%s/metis/GKlib' % pwd(),
-              '-DMETIS_PATH=%s/metis' % pwd(),
-              '-DSHARED=1',
-              '-DCMAKE_C_COMPILER=mpicc',
-              '-DCMAKE_CXX_COMPILER=mpicxx',
-              '-DSHARED=1',
-              *std_cmake_args)
-
-        make()
-        make("install")
diff --git a/var/spack/mock_packages/a/package.py b/var/spack/repos/builtin.mock/packages/a/package.py
similarity index 100%
rename from var/spack/mock_packages/a/package.py
rename to var/spack/repos/builtin.mock/packages/a/package.py
diff --git a/var/spack/mock_packages/b/package.py b/var/spack/repos/builtin.mock/packages/b/package.py
similarity index 100%
rename from var/spack/mock_packages/b/package.py
rename to var/spack/repos/builtin.mock/packages/b/package.py
diff --git a/var/spack/mock_packages/c/package.py b/var/spack/repos/builtin.mock/packages/c/package.py
similarity index 100%
rename from var/spack/mock_packages/c/package.py
rename to var/spack/repos/builtin.mock/packages/c/package.py
diff --git a/var/spack/mock_packages/callpath/package.py b/var/spack/repos/builtin.mock/packages/callpath/package.py
similarity index 100%
rename from var/spack/mock_packages/callpath/package.py
rename to var/spack/repos/builtin.mock/packages/callpath/package.py
diff --git a/var/spack/mock_packages/direct_mpich/package.py b/var/spack/repos/builtin.mock/packages/direct_mpich/package.py
similarity index 100%
rename from var/spack/mock_packages/direct_mpich/package.py
rename to var/spack/repos/builtin.mock/packages/direct_mpich/package.py
diff --git a/var/spack/mock_packages/dyninst/package.py b/var/spack/repos/builtin.mock/packages/dyninst/package.py
similarity index 100%
rename from var/spack/mock_packages/dyninst/package.py
rename to var/spack/repos/builtin.mock/packages/dyninst/package.py
diff --git a/var/spack/mock_packages/e/package.py b/var/spack/repos/builtin.mock/packages/e/package.py
similarity index 100%
rename from var/spack/mock_packages/e/package.py
rename to var/spack/repos/builtin.mock/packages/e/package.py
diff --git a/var/spack/mock_packages/fake/package.py b/var/spack/repos/builtin.mock/packages/fake/package.py
similarity index 100%
rename from var/spack/mock_packages/fake/package.py
rename to var/spack/repos/builtin.mock/packages/fake/package.py
diff --git a/var/spack/mock_packages/git-test/package.py b/var/spack/repos/builtin.mock/packages/git-test/package.py
similarity index 100%
rename from var/spack/mock_packages/git-test/package.py
rename to var/spack/repos/builtin.mock/packages/git-test/package.py
diff --git a/var/spack/mock_packages/hg-test/package.py b/var/spack/repos/builtin.mock/packages/hg-test/package.py
similarity index 100%
rename from var/spack/mock_packages/hg-test/package.py
rename to var/spack/repos/builtin.mock/packages/hg-test/package.py
diff --git a/var/spack/mock_packages/indirect_mpich/package.py b/var/spack/repos/builtin.mock/packages/indirect_mpich/package.py
similarity index 100%
rename from var/spack/mock_packages/indirect_mpich/package.py
rename to var/spack/repos/builtin.mock/packages/indirect_mpich/package.py
diff --git a/var/spack/mock_packages/libdwarf/package.py b/var/spack/repos/builtin.mock/packages/libdwarf/package.py
similarity index 100%
rename from var/spack/mock_packages/libdwarf/package.py
rename to var/spack/repos/builtin.mock/packages/libdwarf/package.py
diff --git a/var/spack/mock_packages/libelf/package.py b/var/spack/repos/builtin.mock/packages/libelf/package.py
similarity index 100%
rename from var/spack/mock_packages/libelf/package.py
rename to var/spack/repos/builtin.mock/packages/libelf/package.py
diff --git a/var/spack/mock_packages/mpich/package.py b/var/spack/repos/builtin.mock/packages/mpich/package.py
similarity index 100%
rename from var/spack/mock_packages/mpich/package.py
rename to var/spack/repos/builtin.mock/packages/mpich/package.py
diff --git a/var/spack/mock_packages/mpich2/package.py b/var/spack/repos/builtin.mock/packages/mpich2/package.py
similarity index 100%
rename from var/spack/mock_packages/mpich2/package.py
rename to var/spack/repos/builtin.mock/packages/mpich2/package.py
diff --git a/var/spack/mock_packages/mpileaks/package.py b/var/spack/repos/builtin.mock/packages/mpileaks/package.py
similarity index 100%
rename from var/spack/mock_packages/mpileaks/package.py
rename to var/spack/repos/builtin.mock/packages/mpileaks/package.py
diff --git a/var/spack/mock_packages/multimethod/package.py b/var/spack/repos/builtin.mock/packages/multimethod/package.py
similarity index 100%
rename from var/spack/mock_packages/multimethod/package.py
rename to var/spack/repos/builtin.mock/packages/multimethod/package.py
diff --git a/var/spack/mock_packages/netlib-blas/package.py b/var/spack/repos/builtin.mock/packages/netlib-blas/package.py
similarity index 100%
rename from var/spack/mock_packages/netlib-blas/package.py
rename to var/spack/repos/builtin.mock/packages/netlib-blas/package.py
diff --git a/var/spack/mock_packages/netlib-lapack/package.py b/var/spack/repos/builtin.mock/packages/netlib-lapack/package.py
similarity index 100%
rename from var/spack/mock_packages/netlib-lapack/package.py
rename to var/spack/repos/builtin.mock/packages/netlib-lapack/package.py
diff --git a/var/spack/mock_packages/openblas/package.py b/var/spack/repos/builtin.mock/packages/openblas/package.py
similarity index 100%
rename from var/spack/mock_packages/openblas/package.py
rename to var/spack/repos/builtin.mock/packages/openblas/package.py
diff --git a/var/spack/mock_packages/optional-dep-test-2/package.py b/var/spack/repos/builtin.mock/packages/optional-dep-test-2/package.py
similarity index 100%
rename from var/spack/mock_packages/optional-dep-test-2/package.py
rename to var/spack/repos/builtin.mock/packages/optional-dep-test-2/package.py
diff --git a/var/spack/mock_packages/optional-dep-test-3/package.py b/var/spack/repos/builtin.mock/packages/optional-dep-test-3/package.py
similarity index 100%
rename from var/spack/mock_packages/optional-dep-test-3/package.py
rename to var/spack/repos/builtin.mock/packages/optional-dep-test-3/package.py
diff --git a/var/spack/mock_packages/optional-dep-test/package.py b/var/spack/repos/builtin.mock/packages/optional-dep-test/package.py
similarity index 100%
rename from var/spack/mock_packages/optional-dep-test/package.py
rename to var/spack/repos/builtin.mock/packages/optional-dep-test/package.py
diff --git a/var/spack/mock_packages/svn-test/package.py b/var/spack/repos/builtin.mock/packages/svn-test/package.py
similarity index 100%
rename from var/spack/mock_packages/svn-test/package.py
rename to var/spack/repos/builtin.mock/packages/svn-test/package.py
diff --git a/var/spack/mock_packages/trivial_install_test_package/package.py b/var/spack/repos/builtin.mock/packages/trivial_install_test_package/package.py
similarity index 100%
rename from var/spack/mock_packages/trivial_install_test_package/package.py
rename to var/spack/repos/builtin.mock/packages/trivial_install_test_package/package.py
diff --git a/var/spack/mock_packages/zmpi/package.py b/var/spack/repos/builtin.mock/packages/zmpi/package.py
similarity index 100%
rename from var/spack/mock_packages/zmpi/package.py
rename to var/spack/repos/builtin.mock/packages/zmpi/package.py
diff --git a/var/spack/repos/builtin.mock/repo.yaml b/var/spack/repos/builtin.mock/repo.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..30b068da13d7f2bdb315300139cd5172868b199e
--- /dev/null
+++ b/var/spack/repos/builtin.mock/repo.yaml
@@ -0,0 +1,2 @@
+repo:
+  namespace: builtin.mock
diff --git a/var/spack/packages/ImageMagick/package.py b/var/spack/repos/builtin/packages/ImageMagick/package.py
similarity index 100%
rename from var/spack/packages/ImageMagick/package.py
rename to var/spack/repos/builtin/packages/ImageMagick/package.py
diff --git a/var/spack/packages/Mitos/package.py b/var/spack/repos/builtin/packages/Mitos/package.py
similarity index 100%
rename from var/spack/packages/Mitos/package.py
rename to var/spack/repos/builtin/packages/Mitos/package.py
diff --git a/var/spack/repos/builtin/packages/R/package.py b/var/spack/repos/builtin/packages/R/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..2471dff09b6d24cbbb0d5e8ca267283ffdbe4b41
--- /dev/null
+++ b/var/spack/repos/builtin/packages/R/package.py
@@ -0,0 +1,49 @@
+from spack import *
+
+
+class R(Package):
+    """
+    R is 'GNU S', a freely available language and environment for statistical computing and graphics which provides a
+    wide variety of statistical and graphical techniques: linear and nonlinear modelling, statistical tests, time series
+    analysis, classification, clustering, etc. Please consult the R project homepage for further information.
+    """
+    homepage = "https://www.r-project.org"
+    url = "http://cran.cnr.berkeley.edu/src/base/R-3/R-3.1.2.tar.gz"
+
+    version('3.2.3', '1ba3dac113efab69e706902810cc2970')
+    version('3.2.2', '57cef5c2e210a5454da1979562a10e5b')
+    version('3.2.1', 'c2aac8b40f84e08e7f8c9068de9239a3')
+    version('3.2.0', '66fa17ad457d7e618191aa0f52fc402e')
+    version('3.1.3', '53a85b884925aa6b5811dfc361d73fc4')
+    version('3.1.2', '3af29ec06704cbd08d4ba8d69250ae74')
+
+    variant('external-lapack', default=False, description='Links to externally installed BLAS/LAPACK')
+
+    # Virtual dependencies
+    depends_on('blas', when='+external-lapack')
+    depends_on('lapack', when='+external-lapack')
+
+    # Concrete dependencies
+    depends_on('readline')
+    depends_on('ncurses')
+    depends_on('icu')
+    depends_on('glib')
+    depends_on('zlib')
+    depends_on('libtiff')
+    depends_on('jpeg')
+    depends_on('cairo')
+    depends_on('pango')
+    depends_on('freetype')
+    depends_on('tcl')
+    depends_on('tk')
+
+    def install(self, spec, prefix):
+        options = ['--prefix=%s' % prefix,
+                   '--enable-R-shlib',
+                   '--enable-BLAS-shlib']
+        if '+external-lapack' in spec:
+            options.extend(['--with-blas', '--with-lapack'])
+
+        configure(*options)
+        make()
+        make('install')
diff --git a/var/spack/packages/SAMRAI/no-tool-build.patch b/var/spack/repos/builtin/packages/SAMRAI/no-tool-build.patch
similarity index 100%
rename from var/spack/packages/SAMRAI/no-tool-build.patch
rename to var/spack/repos/builtin/packages/SAMRAI/no-tool-build.patch
diff --git a/var/spack/packages/SAMRAI/package.py b/var/spack/repos/builtin/packages/SAMRAI/package.py
similarity index 100%
rename from var/spack/packages/SAMRAI/package.py
rename to var/spack/repos/builtin/packages/SAMRAI/package.py
diff --git a/var/spack/packages/activeharmony/package.py b/var/spack/repos/builtin/packages/activeharmony/package.py
similarity index 100%
rename from var/spack/packages/activeharmony/package.py
rename to var/spack/repos/builtin/packages/activeharmony/package.py
diff --git a/var/spack/packages/adept-utils/package.py b/var/spack/repos/builtin/packages/adept-utils/package.py
similarity index 100%
rename from var/spack/packages/adept-utils/package.py
rename to var/spack/repos/builtin/packages/adept-utils/package.py
diff --git a/var/spack/packages/apex/package.py b/var/spack/repos/builtin/packages/apex/package.py
similarity index 100%
rename from var/spack/packages/apex/package.py
rename to var/spack/repos/builtin/packages/apex/package.py
diff --git a/var/spack/packages/arpack/package.py b/var/spack/repos/builtin/packages/arpack/package.py
similarity index 100%
rename from var/spack/packages/arpack/package.py
rename to var/spack/repos/builtin/packages/arpack/package.py
diff --git a/var/spack/packages/asciidoc/package.py b/var/spack/repos/builtin/packages/asciidoc/package.py
similarity index 100%
rename from var/spack/packages/asciidoc/package.py
rename to var/spack/repos/builtin/packages/asciidoc/package.py
diff --git a/var/spack/packages/atk/package.py b/var/spack/repos/builtin/packages/atk/package.py
similarity index 100%
rename from var/spack/packages/atk/package.py
rename to var/spack/repos/builtin/packages/atk/package.py
diff --git a/var/spack/packages/atlas/package.py b/var/spack/repos/builtin/packages/atlas/package.py
similarity index 100%
rename from var/spack/packages/atlas/package.py
rename to var/spack/repos/builtin/packages/atlas/package.py
diff --git a/var/spack/repos/builtin/packages/atop/package.py b/var/spack/repos/builtin/packages/atop/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..346ab0763c20eda0c18d191d80a7d426c4da4b19
--- /dev/null
+++ b/var/spack/repos/builtin/packages/atop/package.py
@@ -0,0 +1,16 @@
+from spack import *
+
+class Atop(Package):
+    """Atop is an ASCII full-screen performance monitor for Linux"""
+    homepage = "http://www.atoptool.nl/index.php"
+    url      = "http://www.atoptool.nl/download/atop-2.2-3.tar.gz"
+
+    version('2.2-3', '034dc1544f2ec4e4d2c739d320dc326d')
+
+    def install(self, spec, prefix):
+        make()
+        mkdirp(prefix.bin)
+        install("atop", join_path(prefix.bin, "atop"))
+        mkdirp(join_path(prefix.man, "man1"))
+        install(join_path("man", "atop.1"),
+        join_path(prefix.man, "man1", "atop.1"))
diff --git a/var/spack/packages/autoconf/package.py b/var/spack/repos/builtin/packages/autoconf/package.py
similarity index 100%
rename from var/spack/packages/autoconf/package.py
rename to var/spack/repos/builtin/packages/autoconf/package.py
diff --git a/var/spack/packages/automaded/package.py b/var/spack/repos/builtin/packages/automaded/package.py
similarity index 100%
rename from var/spack/packages/automaded/package.py
rename to var/spack/repos/builtin/packages/automaded/package.py
diff --git a/var/spack/packages/automake/package.py b/var/spack/repos/builtin/packages/automake/package.py
similarity index 100%
rename from var/spack/packages/automake/package.py
rename to var/spack/repos/builtin/packages/automake/package.py
diff --git a/var/spack/packages/bear/package.py b/var/spack/repos/builtin/packages/bear/package.py
similarity index 100%
rename from var/spack/packages/bear/package.py
rename to var/spack/repos/builtin/packages/bear/package.py
diff --git a/var/spack/packages/bib2xhtml/package.py b/var/spack/repos/builtin/packages/bib2xhtml/package.py
similarity index 100%
rename from var/spack/packages/bib2xhtml/package.py
rename to var/spack/repos/builtin/packages/bib2xhtml/package.py
diff --git a/var/spack/packages/binutils/binutilskrell-2.24.patch b/var/spack/repos/builtin/packages/binutils/binutilskrell-2.24.patch
similarity index 100%
rename from var/spack/packages/binutils/binutilskrell-2.24.patch
rename to var/spack/repos/builtin/packages/binutils/binutilskrell-2.24.patch
diff --git a/var/spack/repos/builtin/packages/binutils/cr16.patch b/var/spack/repos/builtin/packages/binutils/cr16.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2727c70b23857a73fc10df61c91abe5af91fd445
--- /dev/null
+++ b/var/spack/repos/builtin/packages/binutils/cr16.patch
@@ -0,0 +1,26 @@
+--- old/opcodes/cr16-dis.c	2014-10-14 03:32:04.000000000 -0400
++++ new/opcodes/cr16-dis.c	2016-01-14 21:54:26.000000000 -0500
+@@ -78,7 +78,7 @@
+ REG_ARG_TYPE;
+ 
+ /* Current opcode table entry we're disassembling.  */
+-const inst *instruction;
++extern const inst *instruction;
+ /* Current instruction we're disassembling.  */
+ ins cr16_currInsn;
+ /* The current instruction is read into 3 consecutive words.  */
+@@ -86,12 +86,12 @@
+ /* Contains all words in appropriate order.  */
+ ULONGLONG cr16_allWords;
+ /* Holds the current processed argument number.  */
+-int processing_argument_number;
++extern int processing_argument_number;
+ /* Nonzero means a IMM4 instruction.  */
+ int imm4flag;
+ /* Nonzero means the instruction's original size is
+    incremented (escape sequence is used).  */
+-int size_changed;
++extern int size_changed;
+ 
+ 
+ /* Print the constant expression length.  */
diff --git a/var/spack/packages/binutils/package.py b/var/spack/repos/builtin/packages/binutils/package.py
similarity index 89%
rename from var/spack/packages/binutils/package.py
rename to var/spack/repos/builtin/packages/binutils/package.py
index 123f4598f696a52495042a2f367421ed4bdbd92e..de04221e33ab4a6b6eb012460c3f28b1c5c84048 100644
--- a/var/spack/packages/binutils/package.py
+++ b/var/spack/repos/builtin/packages/binutils/package.py
@@ -11,8 +11,11 @@ class Binutils(Package):
 
     # Add a patch that creates binutils libiberty_pic.a which is preferred by OpenSpeedShop and cbtf-krell
     variant('krellpatch', default=False, description="build with openspeedshop based patch.")
+    variant('gold', default=True, description="build the gold linker")
     patch('binutilskrell-2.24.patch', when='@2.24+krellpatch')
 
+    patch('cr16.patch')
+
     variant('libiberty', default=False, description='Also install libiberty.')
 
     def install(self, spec, prefix):
@@ -26,6 +29,9 @@ def install(self, spec, prefix):
             '--enable-targets=all',
             '--with-sysroot=/']
 
+        if '+gold' in spec:
+            configure_args.append('--enable-gold')
+
         if '+libiberty' in spec:
             configure_args.append('--enable-install-libiberty')
 
diff --git a/var/spack/packages/bison/package.py b/var/spack/repos/builtin/packages/bison/package.py
similarity index 100%
rename from var/spack/packages/bison/package.py
rename to var/spack/repos/builtin/packages/bison/package.py
diff --git a/var/spack/repos/builtin/packages/boost/boost_11856.patch b/var/spack/repos/builtin/packages/boost/boost_11856.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3b4052ca187396ce0cf83785bf7cd7309b0009c7
--- /dev/null
+++ b/var/spack/repos/builtin/packages/boost/boost_11856.patch
@@ -0,0 +1,34 @@
+--- a/libs/container/src/pool_resource.cpp	2015-11-06 12:49:55.000000000 -0800
++++ b/libs/container/src/pool_resource.cpp	2015-12-22 07:54:36.202131121 -0800
+@@ -32,11 +32,11 @@
+ class pool_data_t
+    : public block_slist_base<>
+ {
+-   typedef block_slist_base<> block_slist_base;
++   typedef block_slist_base<> block_slist_base_t;
+ 
+    public:
+    explicit pool_data_t(std::size_t initial_blocks_per_chunk)
+-      : block_slist_base(), next_blocks_per_chunk(initial_blocks_per_chunk)
++      : block_slist_base_t(), next_blocks_per_chunk(initial_blocks_per_chunk)
+    {  slist_algo::init_header(&free_slist);  }
+ 
+    void *allocate_block() BOOST_NOEXCEPT
+@@ -59,7 +59,7 @@
+    void release(memory_resource &upstream)
+    {
+       slist_algo::init_header(&free_slist);
+-      this->block_slist_base::release(upstream);
++      this->block_slist_base_t::release(upstream);
+       next_blocks_per_chunk = pool_options_minimum_max_blocks_per_chunk;
+    }
+ 
+@@ -72,7 +72,7 @@
+       
+       //Minimum block size is at least max_align, so all pools allocate sizes that are multiple of max_align,
+       //meaning that all blocks are max_align-aligned.
+-      char *p = static_cast<char *>(block_slist_base::allocate(blocks_per_chunk*pool_block, mr));
++      char *p = static_cast<char *>(block_slist_base_t::allocate(blocks_per_chunk*pool_block, mr));
+ 
+       //Create header types. This is no-throw
+       for(std::size_t i = 0, max = blocks_per_chunk; i != max; ++i){
diff --git a/var/spack/repos/builtin/packages/boost/package.py b/var/spack/repos/builtin/packages/boost/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb1f5daee7f371e33d0f69a7a758e93c5bd63f0d
--- /dev/null
+++ b/var/spack/repos/builtin/packages/boost/package.py
@@ -0,0 +1,210 @@
+from spack import *
+import spack
+
+class Boost(Package):
+    """Boost provides free peer-reviewed portable C++ source
+       libraries, emphasizing libraries that work well with the C++
+       Standard Library.
+
+       Boost libraries are intended to be widely useful, and usable
+       across a broad spectrum of applications. The Boost license
+       encourages both commercial and non-commercial use.
+    """
+    homepage = "http://www.boost.org"
+    url      = "http://downloads.sourceforge.net/project/boost/boost/1.55.0/boost_1_55_0.tar.bz2"
+    list_url = "http://sourceforge.net/projects/boost/files/boost/"
+    list_depth = 2
+
+    version('1.60.0', '65a840e1a0b13a558ff19eeb2c4f0cbe')
+    version('1.59.0', '6aa9a5c6a4ca1016edd0ed1178e3cb87')
+    version('1.58.0', 'b8839650e61e9c1c0a89f371dd475546')
+    version('1.57.0', '1be49befbdd9a5ce9def2983ba3e7b76')
+    version('1.56.0', 'a744cf167b05d72335f27c88115f211d')
+    version('1.55.0', 'd6eef4b4cacb2183f2bf265a5a03a354')
+    version('1.54.0', '15cb8c0803064faef0c4ddf5bc5ca279')
+    version('1.53.0', 'a00d22605d5dbcfb4c9936a9b35bc4c2')
+    version('1.52.0', '3a855e0f919107e0ca4de4d84ad3f750')
+    version('1.51.0', '4b6bd483b692fd138aef84ed2c8eb679')
+    version('1.50.0', '52dd00be775e689f55a987baebccc462')
+    version('1.49.0', '0d202cb811f934282dea64856a175698')
+    version('1.48.0', 'd1e9a7a7f532bb031a3c175d86688d95')
+    version('1.47.0', 'a2dc343f7bc7f83f8941e47ed4a18200')
+    version('1.46.1', '7375679575f4c8db605d426fc721d506')
+    version('1.46.0', '37b12f1702319b73876b0097982087e0')
+    version('1.45.0', 'd405c606354789d0426bc07bea617e58')
+    version('1.44.0', 'f02578f5218f217a9f20e9c30e119c6a')
+    version('1.43.0', 'dd49767bfb726b0c774f7db0cef91ed1')
+    version('1.42.0', '7bf3b4eb841b62ffb0ade2b82218ebe6')
+    version('1.41.0', '8bb65e133907db727a2a825c5400d0a6')
+    version('1.40.0', 'ec3875caeac8c52c7c129802a8483bd7')
+    version('1.39.0', 'a17281fd88c48e0d866e1a12deecbcc0')
+    version('1.38.0', '5eca2116d39d61382b8f8235915cb267')
+    version('1.37.0', '8d9f990bfb7e83769fa5f1d6f065bc92')
+    version('1.36.0', '328bfec66c312150e4c2a78dcecb504b')
+    version('1.35.0', 'dce952a7214e72d6597516bcac84048b')
+    version('1.34.1', '2d938467e8a448a2c9763e0a9f8ca7e5')
+    version('1.34.0', 'ed5b9291ffad776f8757a916e1726ad0')
+
+    default_install_libs = set(['atomic', 
+        'chrono', 
+        'date_time', 
+        'filesystem', 
+        'graph',
+        'iostreams',
+        'locale',
+        'log',
+        'math', 
+        'program_options',
+        'random', 
+        'regex', 
+        'serialization', 
+        'signals', 
+        'system', 
+        'test', 
+        'thread', 
+        'wave'])
+
+    # mpi/python are not installed by default because they pull in many 
+    # dependencies and/or because there is a great deal of customization 
+    # possible (and it would be difficult to choose sensible defaults)
+    default_noinstall_libs = set(['mpi', 'python'])
+
+    all_libs = default_install_libs | default_noinstall_libs
+
+    for lib in all_libs:
+        variant(lib, default=(lib not in default_noinstall_libs), 
+            description="Compile with {0} library".format(lib))
+
+    variant('debug', default=False, description='Switch to the debug version of Boost')
+    variant('shared', default=True, description="Additionally build shared libraries")
+    variant('multithreaded', default=True, description="Build multi-threaded versions of libraries")
+    variant('singlethreaded', default=True, description="Build single-threaded versions of libraries")
+    variant('icu_support', default=False, description="Include ICU support (for regex/locale libraries)")
+
+    depends_on('icu', when='+icu_support')
+    depends_on('python', when='+python')
+    depends_on('mpi', when='+mpi')
+    depends_on('bzip2', when='+iostreams')
+    depends_on('zlib', when='+iostreams')
+
+    # Patch fix from https://svn.boost.org/trac/boost/ticket/11856
+    patch('boost_11856.patch', when='@1.60.0%gcc@4.4.7')
+
+    def url_for_version(self, version):
+        """Handle Boost's weird URLs, which write the version two different ways."""
+        parts = [str(p) for p in Version(version)]
+        dots = ".".join(parts)
+        underscores = "_".join(parts)
+        return "http://downloads.sourceforge.net/project/boost/boost/%s/boost_%s.tar.bz2" % (
+            dots, underscores)
+
+    def determine_toolset(self, spec):
+        if spec.satisfies("=darwin-x86_64"):
+            return 'darwin'
+
+        toolsets = {'g++': 'gcc',
+                    'icpc': 'intel',
+                    'clang++': 'clang'}
+
+        for cc, toolset in toolsets.iteritems():
+            if cc in self.compiler.cxx_names:
+                return toolset
+
+        # fallback to gcc if no toolset found
+        return 'gcc'
+
+    def determine_bootstrap_options(self, spec, withLibs, options):
+        boostToolsetId = self.determine_toolset(spec)
+        options.append('--with-toolset=%s' % boostToolsetId)
+        options.append("--with-libraries=%s" % ','.join(withLibs))
+
+        if '+python' in spec:
+            options.append('--with-python=%s' %
+                join_path(spec['python'].prefix.bin, 'python'))
+
+        with open('user-config.jam', 'w') as f:
+            compiler_wrapper = join_path(spack.build_env_path, 'c++')
+            f.write("using {0} : : {1} ;\n".format(boostToolsetId, 
+                compiler_wrapper))
+            
+            if '+mpi' in spec:
+                f.write('using mpi : %s ;\n' %
+                    join_path(spec['mpi'].prefix.bin, 'mpicxx'))
+            if '+python' in spec:
+                f.write('using python : %s : %s ;\n' %
+                    (spec['python'].version,
+                    join_path(spec['python'].prefix.bin, 'python')))
+
+    def determine_b2_options(self, spec, options):
+        if '+debug' in spec:
+            options.append('variant=debug')
+        else:
+            options.append('variant=release')
+
+        if '+icu_support' in spec:
+            options.extend(['-s', 'ICU_PATH=%s' % spec['icu'].prefix])
+
+        if '+iostreams' in spec:
+            options.extend([
+                '-s', 'BZIP2_INCLUDE=%s' % spec['bzip2'].prefix.include,
+                '-s', 'BZIP2_LIBPATH=%s' % spec['bzip2'].prefix.lib,
+                '-s', 'ZLIB_INCLUDE=%s' % spec['zlib'].prefix.include,
+                '-s', 'ZLIB_LIBPATH=%s' % spec['zlib'].prefix.lib,
+                ])
+
+        linkTypes = ['static']
+        if '+shared' in spec:
+            linkTypes.append('shared')
+        
+        threadingOpts = []
+        if '+multithreaded' in spec:
+            threadingOpts.append('multi')
+        if '+singlethreaded' in spec:
+            threadingOpts.append('single')
+        if not threadingOpts:
+            raise RuntimeError("At least one of {singlethreaded, multithreaded} must be enabled")
+        
+        options.extend([
+            'toolset=%s' % self.determine_toolset(spec),
+            'link=%s' % ','.join(linkTypes),
+            '--layout=tagged'])
+        
+        return threadingOpts
+
+    def install(self, spec, prefix):
+        withLibs = list()
+        for lib in Boost.all_libs:
+            if "+{0}".format(lib) in spec:
+                withLibs.append(lib)
+        if not withLibs:
+            # if no libraries are specified for compilation, then you dont have 
+            # to configure/build anything, just copy over to the prefix directory.
+            src = join_path(self.stage.source_path, 'boost')
+            mkdirp(join_path(prefix, 'include'))
+            dst = join_path(prefix, 'include', 'boost')
+            install_tree(src, dst)
+            return
+    
+        # to make Boost find the user-config.jam
+        env['BOOST_BUILD_PATH'] = './'
+
+        bootstrap = Executable('./bootstrap.sh')
+
+        bootstrap_options = ['--prefix=%s' % prefix]
+        self.determine_bootstrap_options(spec, withLibs, bootstrap_options)
+
+        bootstrap(*bootstrap_options)
+
+        # b2 used to be called bjam, before 1.47 (sigh)
+        b2name = './b2' if spec.satisfies('@1.47:') else './bjam'
+
+        b2 = Executable(b2name)
+        b2_options = ['-j', '%s' % make_jobs]
+
+        threadingOpts = self.determine_b2_options(spec, b2_options)
+
+        # In theory it could be done on one call but it fails on
+        # Boost.MPI if the threading options are not separated.
+        for threadingOpt in threadingOpts:
+            b2('install', 'threading=%s' % threadingOpt, *b2_options)
+        
diff --git a/var/spack/packages/bowtie2/bowtie2-2.5.patch b/var/spack/repos/builtin/packages/bowtie2/bowtie2-2.5.patch
similarity index 100%
rename from var/spack/packages/bowtie2/bowtie2-2.5.patch
rename to var/spack/repos/builtin/packages/bowtie2/bowtie2-2.5.patch
diff --git a/var/spack/packages/bowtie2/package.py b/var/spack/repos/builtin/packages/bowtie2/package.py
similarity index 100%
rename from var/spack/packages/bowtie2/package.py
rename to var/spack/repos/builtin/packages/bowtie2/package.py
diff --git a/var/spack/packages/boxlib/package.py b/var/spack/repos/builtin/packages/boxlib/package.py
similarity index 100%
rename from var/spack/packages/boxlib/package.py
rename to var/spack/repos/builtin/packages/boxlib/package.py
diff --git a/var/spack/packages/bzip2/package.py b/var/spack/repos/builtin/packages/bzip2/package.py
similarity index 100%
rename from var/spack/packages/bzip2/package.py
rename to var/spack/repos/builtin/packages/bzip2/package.py
diff --git a/var/spack/packages/cairo/package.py b/var/spack/repos/builtin/packages/cairo/package.py
similarity index 89%
rename from var/spack/packages/cairo/package.py
rename to var/spack/repos/builtin/packages/cairo/package.py
index e1ac8aaa7d76ed99c3797c82dc9db93fba0a2865..8255e869bea222285418c162e16e96200b18f1c8 100644
--- a/var/spack/packages/cairo/package.py
+++ b/var/spack/repos/builtin/packages/cairo/package.py
@@ -14,6 +14,7 @@ class Cairo(Package):
 
     def install(self, spec, prefix):
         configure("--prefix=%s" % prefix,
+                  "--disable-trace", # can cause problems with libiberty
                   "--enable-tee")
         make()
         make("install")
diff --git a/var/spack/packages/callpath/package.py b/var/spack/repos/builtin/packages/callpath/package.py
similarity index 100%
rename from var/spack/packages/callpath/package.py
rename to var/spack/repos/builtin/packages/callpath/package.py
diff --git a/var/spack/packages/cblas/package.py b/var/spack/repos/builtin/packages/cblas/package.py
similarity index 100%
rename from var/spack/packages/cblas/package.py
rename to var/spack/repos/builtin/packages/cblas/package.py
diff --git a/var/spack/packages/cbtf-argonavis/package.py b/var/spack/repos/builtin/packages/cbtf-argonavis/package.py
similarity index 100%
rename from var/spack/packages/cbtf-argonavis/package.py
rename to var/spack/repos/builtin/packages/cbtf-argonavis/package.py
diff --git a/var/spack/packages/cbtf-krell/package.py b/var/spack/repos/builtin/packages/cbtf-krell/package.py
similarity index 100%
rename from var/spack/packages/cbtf-krell/package.py
rename to var/spack/repos/builtin/packages/cbtf-krell/package.py
diff --git a/var/spack/packages/cbtf-lanl/package.py b/var/spack/repos/builtin/packages/cbtf-lanl/package.py
similarity index 100%
rename from var/spack/packages/cbtf-lanl/package.py
rename to var/spack/repos/builtin/packages/cbtf-lanl/package.py
diff --git a/var/spack/packages/cbtf/package.py b/var/spack/repos/builtin/packages/cbtf/package.py
similarity index 100%
rename from var/spack/packages/cbtf/package.py
rename to var/spack/repos/builtin/packages/cbtf/package.py
diff --git a/var/spack/repos/builtin/packages/cereal/Werror.patch b/var/spack/repos/builtin/packages/cereal/Werror.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d39eaaffdb8f2f3ba6ca0a20d8e2a6f7175d210e
--- /dev/null
+++ b/var/spack/repos/builtin/packages/cereal/Werror.patch
@@ -0,0 +1,33 @@
+--- old/sandbox/CMakeLists.txt
++++ new/sandbox/CMakeLists.txt
+@@ -4,9 +4,11 @@
+ add_executable(sandbox_json sandbox_json.cpp)
+ add_executable(sandbox_rtti sandbox_rtti.cpp)
+
++if(Boost_FOUND)
+ add_executable(sandbox_vs sandbox_vs.cpp)
+ target_link_libraries(sandbox_vs sandbox_vs_dll)
+ include_directories(sandbox_shared_lib)
++endif(Boost_FOUND)
+
+ if(Boost_FOUND)
+   add_executable(performance performance.cpp)
+--- old/include/cereal/types/common.hpp
++++ new/include/cereal/types/common.hpp
+@@ -106,14 +106,16 @@
+     t = reinterpret_cast<typename common_detail::is_enum<T>::type const &>( value );
+   }
+
++#ifndef CEREAL_ENABLE_RAW_POINTER_SERIALIZATION
+   //! Serialization for raw pointers
+   /*! This exists only to throw a static_assert to let users know we don't support raw pointers. */
+   template <class Archive, class T> inline
+   void CEREAL_SERIALIZE_FUNCTION_NAME( Archive &, T * & )
+   {
+     static_assert(cereal::traits::detail::delay_static_assert<T>::value,
+       "Cereal does not support serializing raw pointers - please use a smart pointer");
+   }
++#endif
+
+   //! Serialization for C style arrays
+   template <class Archive, class T> inline
diff --git a/var/spack/repos/builtin/packages/cereal/package.py b/var/spack/repos/builtin/packages/cereal/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..a83927456f1dc798e4a459990702f057ec0b4952
--- /dev/null
+++ b/var/spack/repos/builtin/packages/cereal/package.py
@@ -0,0 +1,34 @@
+from spack import *
+import shutil
+
+class Cereal(Package):
+    """cereal is a header-only C++11 serialization library. cereal takes arbitrary data types and reversibly turns them into different representations, such as compact binary encodings, XML, or JSON. cereal was designed to be fast, light-weight, and easy to extend - it has no external dependencies and can be easily bundled with other code or used standalone."""
+    homepage = "http://uscilab.github.io/cereal/"
+    url      = "https://github.com/USCiLab/cereal/archive/v1.1.2.tar.gz"
+
+    version('1.1.2', '34d4ad174acbff005c36d4d10e48cbb9')
+    version('1.1.1', '0ceff308c38f37d5b5f6df3927451c27')
+    version('1.1.0', '9f2d5f72e935c54f4c6d23e954ce699f')
+    version('1.0.0', 'd1bacca70a95cec0ddbff68b0871296b')
+    version('0.9.1', '8872d4444ff274ce6cd1ed364d0fc0ad')
+
+    patch("Werror.patch")
+
+    depends_on("cmake @2.6.2:")
+
+    def install(self, spec, prefix):
+        # Don't use -Werror
+        filter_file(r'-Werror', '', 'CMakeLists.txt')
+
+        # configure
+        # Boost is only used for self-tests, which we are not running (yet?)
+        cmake('.', '-DCMAKE_DISABLE_FIND_PACKAGE_Boost=TRUE', *std_cmake_args)
+
+        # Build
+        make()
+
+        # Install
+        shutil.rmtree(join_path(prefix, 'doc'), ignore_errors=True)
+        shutil.rmtree(join_path(prefix, 'include'), ignore_errors=True)
+        shutil.copytree('doc', join_path(prefix, 'doc'), symlinks=True)
+        shutil.copytree('include', join_path(prefix, 'include'), symlinks=True)
diff --git a/var/spack/packages/cfitsio/package.py b/var/spack/repos/builtin/packages/cfitsio/package.py
similarity index 100%
rename from var/spack/packages/cfitsio/package.py
rename to var/spack/repos/builtin/packages/cfitsio/package.py
diff --git a/var/spack/repos/builtin/packages/cgal/package.py b/var/spack/repos/builtin/packages/cgal/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..97356433be1f3c303ae2d16c2f0bdd7cda6c2eb2
--- /dev/null
+++ b/var/spack/repos/builtin/packages/cgal/package.py
@@ -0,0 +1,73 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+from spack import *
+
+
+class Cgal(Package):
+    """
+    CGAL is a software project that provides easy access to efficient and reliable geometric algorithms in the form of
+    a C++ library. CGAL is used in various areas needing geometric computation, such as geographic information systems,
+    computer aided design, molecular biology, medical imaging, computer graphics, and robotics.
+    """
+    homepage = 'http://www.cgal.org/'
+    url = 'https://github.com/CGAL/cgal/archive/releases/CGAL-4.7.tar.gz'
+
+    version('4.7', '4826714810f3b4c65cac96b90fb03b67')
+    version('4.6.3', 'e8ee2ecc8d2b09b94a121c09257b576d')
+
+    # Installation instructions : http://doc.cgal.org/latest/Manual/installation.html
+    variant('shared', default=True, description='Enables the build of shared libraries')
+    variant('debug', default=False, description='Builds a debug version of the libraries')
+
+    depends_on('boost')
+    depends_on('mpfr')
+    depends_on('gmp')
+    depends_on('zlib')
+
+    # FIXME : Qt5 dependency missing (needs Qt5 and OpenGL)
+    # FIXME : Optional third party libraries missing
+
+    def install(self, spec, prefix):
+
+        options = []
+        options.extend(std_cmake_args)
+        # CGAL supports only Release and Debug build type. Any other build type will raise an error at configure time
+        if '+debug' in spec:
+            options.append('-DCMAKE_BUILD_TYPE:STRING=Debug')
+        else:
+            options.append('-DCMAKE_BUILD_TYPE:STRING=Release')
+
+        if '+shared' in spec:
+            options.append('-DBUILD_SHARED_LIBS:BOOL=ON')
+        else:
+            options.append('-DBUILD_SHARED_LIBS:BOOL=OFF')
+
+        build_directory = join_path(self.stage.path, 'spack-build')
+        source_directory = self.stage.source_path
+        with working_dir(build_directory, create=True):
+            cmake(source_directory, *options)
+            make()
+            make("install")
diff --git a/var/spack/packages/cgm/package.py b/var/spack/repos/builtin/packages/cgm/package.py
similarity index 100%
rename from var/spack/packages/cgm/package.py
rename to var/spack/repos/builtin/packages/cgm/package.py
diff --git a/var/spack/packages/cityhash/package.py b/var/spack/repos/builtin/packages/cityhash/package.py
similarity index 100%
rename from var/spack/packages/cityhash/package.py
rename to var/spack/repos/builtin/packages/cityhash/package.py
diff --git a/var/spack/packages/cleverleaf/package.py b/var/spack/repos/builtin/packages/cleverleaf/package.py
similarity index 100%
rename from var/spack/packages/cleverleaf/package.py
rename to var/spack/repos/builtin/packages/cleverleaf/package.py
diff --git a/var/spack/packages/cloog/package.py b/var/spack/repos/builtin/packages/cloog/package.py
similarity index 100%
rename from var/spack/packages/cloog/package.py
rename to var/spack/repos/builtin/packages/cloog/package.py
diff --git a/var/spack/packages/cmake/package.py b/var/spack/repos/builtin/packages/cmake/package.py
similarity index 90%
rename from var/spack/packages/cmake/package.py
rename to var/spack/repos/builtin/packages/cmake/package.py
index cb54c92d692db206f7cb28cd62e9bb4b60bf2873..f67ae21ebd0c1b3b92ed92cbaa5b809e556e9dcd 100644
--- a/var/spack/packages/cmake/package.py
+++ b/var/spack/repos/builtin/packages/cmake/package.py
@@ -31,15 +31,16 @@ class Cmake(Package):
 
     version('2.8.10.2', '097278785da7182ec0aea8769d06860c',
             url = 'http://www.cmake.org/files/v2.8/cmake-2.8.10.2.tar.gz')
- 
+
     version('3.0.2', 'db4c687a31444a929d2fdc36c4dfb95f',
             url = 'http://www.cmake.org/files/v3.0/cmake-3.0.2.tar.gz')
- 
+
     version('3.4.0', 'cd3034e0a44256a0917e254167217fc8',
-            url = 'https://cmake.org/files/v3.4/cmake-3.4.0.tar.gz')
+            url = 'http://cmake.org/files/v3.4/cmake-3.4.0.tar.gz')
+
+    variant('ncurses', default=True, description='Enables the build of the ncurses gui')
 
-#    version('3.0.1', 'e2e05d84cb44a42f1371d9995631dcf5')
-#    version('3.0.0', '21a1c85e1a3b803c4b48e7ff915a863e')
+    depends_on('ncurses', when='+ncurses')
 
     def install(self, spec, prefix):
         configure('--prefix='   + prefix,
diff --git a/var/spack/repos/builtin/packages/cmocka/package.py b/var/spack/repos/builtin/packages/cmocka/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..7377016a6b0166b589c885b4b8a13d05f789a31e
--- /dev/null
+++ b/var/spack/repos/builtin/packages/cmocka/package.py
@@ -0,0 +1,16 @@
+from spack import *
+
+class Cmocka(Package):
+    """Unit-testing framework in pure C"""
+    homepage = "https://cmocka.org/"
+    url      = "https://cmocka.org/files/1.0/cmocka-1.0.1.tar.xz"
+
+    version('1.0.1', 'ed861e501a21a92b2af63e466df2015e')
+    parallel = False
+
+    def install(self, spec, prefix):
+        with working_dir('spack-build', create=True):
+	    cmake('..', *std_cmake_args)
+
+	    make()
+	    make("install")
diff --git a/var/spack/packages/coreutils/package.py b/var/spack/repos/builtin/packages/coreutils/package.py
similarity index 100%
rename from var/spack/packages/coreutils/package.py
rename to var/spack/repos/builtin/packages/coreutils/package.py
diff --git a/var/spack/packages/cppcheck/package.py b/var/spack/repos/builtin/packages/cppcheck/package.py
similarity index 100%
rename from var/spack/packages/cppcheck/package.py
rename to var/spack/repos/builtin/packages/cppcheck/package.py
diff --git a/var/spack/packages/cram/package.py b/var/spack/repos/builtin/packages/cram/package.py
similarity index 100%
rename from var/spack/packages/cram/package.py
rename to var/spack/repos/builtin/packages/cram/package.py
diff --git a/var/spack/packages/cscope/package.py b/var/spack/repos/builtin/packages/cscope/package.py
similarity index 100%
rename from var/spack/packages/cscope/package.py
rename to var/spack/repos/builtin/packages/cscope/package.py
diff --git a/var/spack/packages/cube/package.py b/var/spack/repos/builtin/packages/cube/package.py
similarity index 100%
rename from var/spack/packages/cube/package.py
rename to var/spack/repos/builtin/packages/cube/package.py
diff --git a/var/spack/packages/curl/package.py b/var/spack/repos/builtin/packages/curl/package.py
similarity index 100%
rename from var/spack/packages/curl/package.py
rename to var/spack/repos/builtin/packages/curl/package.py
diff --git a/var/spack/packages/czmq/package.py b/var/spack/repos/builtin/packages/czmq/package.py
similarity index 100%
rename from var/spack/packages/czmq/package.py
rename to var/spack/repos/builtin/packages/czmq/package.py
diff --git a/var/spack/packages/damselfly/package.py b/var/spack/repos/builtin/packages/damselfly/package.py
similarity index 100%
rename from var/spack/packages/damselfly/package.py
rename to var/spack/repos/builtin/packages/damselfly/package.py
diff --git a/var/spack/packages/dbus/package.py b/var/spack/repos/builtin/packages/dbus/package.py
similarity index 100%
rename from var/spack/packages/dbus/package.py
rename to var/spack/repos/builtin/packages/dbus/package.py
diff --git a/var/spack/packages/docbook-xml/package.py b/var/spack/repos/builtin/packages/docbook-xml/package.py
similarity index 100%
rename from var/spack/packages/docbook-xml/package.py
rename to var/spack/repos/builtin/packages/docbook-xml/package.py
diff --git a/var/spack/packages/doxygen/package.py b/var/spack/repos/builtin/packages/doxygen/package.py
similarity index 100%
rename from var/spack/packages/doxygen/package.py
rename to var/spack/repos/builtin/packages/doxygen/package.py
diff --git a/var/spack/packages/dri2proto/package.py b/var/spack/repos/builtin/packages/dri2proto/package.py
similarity index 100%
rename from var/spack/packages/dri2proto/package.py
rename to var/spack/repos/builtin/packages/dri2proto/package.py
diff --git a/var/spack/packages/dtcmp/package.py b/var/spack/repos/builtin/packages/dtcmp/package.py
similarity index 100%
rename from var/spack/packages/dtcmp/package.py
rename to var/spack/repos/builtin/packages/dtcmp/package.py
diff --git a/var/spack/packages/dyninst/package.py b/var/spack/repos/builtin/packages/dyninst/package.py
similarity index 100%
rename from var/spack/packages/dyninst/package.py
rename to var/spack/repos/builtin/packages/dyninst/package.py
diff --git a/var/spack/repos/builtin/packages/eigen/package.py b/var/spack/repos/builtin/packages/eigen/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..44ee6819f5861e7b503b383620e62cf3a735fff3
--- /dev/null
+++ b/var/spack/repos/builtin/packages/eigen/package.py
@@ -0,0 +1,68 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by David Beckingsale, david@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+from spack import *
+
+
+class Eigen(Package):
+    """
+    Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms
+    """
+
+    homepage = 'http://eigen.tuxfamily.org/'
+    url = 'http://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2'
+
+    version('3.2.7', 'cc1bacbad97558b97da6b77c9644f184', url='http://bitbucket.org/eigen/eigen/get/3.2.7.tar.bz2')
+
+    variant('debug', default=False, description='Builds the library in debug mode')
+
+    variant('metis', default=True, description='Enables metis backend')
+    variant('scotch', default=True, description='Enables scotch backend')
+    variant('fftw', default=True, description='Enables FFTW backend')
+
+    # TODO : dependency on SuiteSparse, googlehash, superlu, adolc missing
+
+    depends_on('metis', when='+metis')
+    depends_on('scotch', when='+scotch')
+    depends_on('fftw', when='+fftw')
+
+    depends_on('mpfr@2.3.0:')  # Eigen 3.2.7 requires at least 2.3.0
+    depends_on('gmp')
+
+    def install(self, spec, prefix):
+
+        options = []
+        options.extend(std_cmake_args)
+
+        build_directory = join_path(self.stage.path, 'spack-build')
+        source_directory = self.stage.source_path
+
+        if '+debug' in spec:
+            options.append('-DCMAKE_BUILD_TYPE:STRING=Debug')
+
+        with working_dir(build_directory, create=True):
+            cmake(source_directory, *options)
+            make()
+            make("install")
diff --git a/var/spack/packages/elfutils/package.py b/var/spack/repos/builtin/packages/elfutils/package.py
similarity index 100%
rename from var/spack/packages/elfutils/package.py
rename to var/spack/repos/builtin/packages/elfutils/package.py
diff --git a/var/spack/repos/builtin/packages/elpa/package.py b/var/spack/repos/builtin/packages/elpa/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ade5b0b37b466f44a9cbba95c0ccb9905e3bfb6
--- /dev/null
+++ b/var/spack/repos/builtin/packages/elpa/package.py
@@ -0,0 +1,55 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+from spack import *
+
+
+class Elpa(Package):
+    """
+    Eigenvalue solvers for Petaflop-Applications (ELPA)
+    """
+
+    homepage = 'http://elpa.mpcdf.mpg.de/'
+    url = 'http://elpa.mpcdf.mpg.de/elpa-2015.11.001.tar.gz'
+
+    version('2015.11.001', 'de0f35b7ee7c971fd0dca35c900b87e6', url='http://elpa.mpcdf.mpg.de/elpa-2015.11.001.tar.gz')
+
+    variant('openmp', default=False, description='Activates OpenMP support')
+
+    depends_on('mpi')
+    depends_on('blas')
+    depends_on('lapack')
+    depends_on('scalapack')
+
+    def install(self, spec, prefix):
+
+        options = ["--prefix=%s" % prefix]
+
+        if '+openmp' in spec:
+            options.append("--enable-openmp")
+
+        configure(*options)
+        make()
+        make("install")
diff --git a/var/spack/packages/expat/package.py b/var/spack/repos/builtin/packages/expat/package.py
similarity index 100%
rename from var/spack/packages/expat/package.py
rename to var/spack/repos/builtin/packages/expat/package.py
diff --git a/var/spack/packages/extrae/package.py b/var/spack/repos/builtin/packages/extrae/package.py
similarity index 100%
rename from var/spack/packages/extrae/package.py
rename to var/spack/repos/builtin/packages/extrae/package.py
diff --git a/var/spack/packages/exuberant-ctags/package.py b/var/spack/repos/builtin/packages/exuberant-ctags/package.py
similarity index 100%
rename from var/spack/packages/exuberant-ctags/package.py
rename to var/spack/repos/builtin/packages/exuberant-ctags/package.py
diff --git a/var/spack/packages/fftw/package.py b/var/spack/repos/builtin/packages/fftw/package.py
similarity index 62%
rename from var/spack/packages/fftw/package.py
rename to var/spack/repos/builtin/packages/fftw/package.py
index 5f71762c4ff7332218a909db722c24fea11525a8..4d2b9642426fdb9216a917f41d759db9265d7596 100644
--- a/var/spack/packages/fftw/package.py
+++ b/var/spack/repos/builtin/packages/fftw/package.py
@@ -39,54 +39,21 @@ class Fftw(Package):
 
     version('3.3.4', '2edab8c06b24feeb3b82bbb3ebf3e7b3')
 
-    ##########
-    # Floating point precision
-    FLOAT = 'float'
-    LONG_DOUBLE = 'long_double'
-    QUAD_PRECISION = 'quad'
-    PRECISION_OPTIONS = {
-        FLOAT: '--enable-float',
-        LONG_DOUBLE: '--enable--long-double',
-        QUAD_PRECISION: '--enable-quad-precision'
-    }
-    variant(FLOAT, default=False, description='Produces a single precision version of the library')
-    variant(LONG_DOUBLE, default=False, description='Produces a long double precision version of the library')
-    variant(QUAD_PRECISION, default=False, description='Produces a quad precision version of the library (works only with GCC and libquadmath)')
-    ##########
+    variant('float', default=True, description='Produces a single precision version of the library')
+    variant('long_double', default=True, description='Produces a long double precision version of the library')
+    variant('quad', default=False, description='Produces a quad precision version of the library (works only with GCC and libquadmath)')
 
     variant('mpi', default=False, description='Activate MPI support')
 
     depends_on('mpi', when='+mpi')
 
-    @staticmethod
-    def enabled(x):
-        """
-        Given a variant name returns the string that means the variant is enabled
-
-        :param x: variant name
-        """
-        # FIXME : duplicated from MVAPICH2
-        return '+' + x
-
-    def check_fortran_availability(self, options):
-        if not self.compiler.f77 or not self.compiler.fc:
-            options.append("--disable-fortran")
-
-    def set_floating_point_precision(self, spec, options):
-        l = [option for variant, option in Fftw.PRECISION_OPTIONS.iteritems() if self.enabled(variant) in spec]
-        if len(l) > 1:
-            raise RuntimeError('At most one floating point precision variant may activated per build.')
-        options.extend(l)
-
     def install(self, spec, prefix):
-
         options = ['--prefix=%s' % prefix,
                    '--enable-shared',
                    '--enable-threads',
                    '--enable-openmp']
-        self.check_fortran_availability(options)
-        self.set_floating_point_precision(spec, options)
-
+        if not self.compiler.f77 or not self.compiler.fc:
+            options.append("--disable-fortran")
         if '+mpi' in spec:
             options.append('--enable-mpi')
 
@@ -94,3 +61,15 @@ def install(self, spec, prefix):
         make()
         make("install")
 
+        if '+float' in spec:
+            configure('--enable-float', *options)
+            make()
+            make("install")
+        if '+long_double' in spec:
+            configure('--enable-long-double', *options)
+            make()
+            make("install")
+        if '+quad' in spec:
+            configure('--enable-quad-precision', *options)
+            make()
+            make("install")
diff --git a/var/spack/packages/fish/package.py b/var/spack/repos/builtin/packages/fish/package.py
similarity index 100%
rename from var/spack/packages/fish/package.py
rename to var/spack/repos/builtin/packages/fish/package.py
diff --git a/var/spack/packages/flex/package.py b/var/spack/repos/builtin/packages/flex/package.py
similarity index 100%
rename from var/spack/packages/flex/package.py
rename to var/spack/repos/builtin/packages/flex/package.py
diff --git a/var/spack/repos/builtin/packages/fltk/font.patch b/var/spack/repos/builtin/packages/fltk/font.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7706a1b4eeef535e851be646890c3f5527c4cc95
--- /dev/null
+++ b/var/spack/repos/builtin/packages/fltk/font.patch
@@ -0,0 +1,44 @@
+Index: FL/x.H
+===================================================================
+--- a/FL/x.H	(revision 10476)
++++ b/FL/x.H	(working copy)
+@@ -132,6 +132,7 @@
+   XFontStruct *ptr;
+ };
+ extern FL_EXPORT Fl_XFont_On_Demand fl_xfont;
++extern FL_EXPORT XFontStruct* fl_core_font();
+
+ // this object contains all X-specific stuff about a window:
+ // Warning: this object is highly subject to change!
+Index: src/fl_font.cxx
+===================================================================
+--- a/src/fl_font.cxx	(revision 10476)
++++ b/src/fl_font.cxx	(working copy)
+@@ -55,6 +55,14 @@
+ #  include "fl_font_x.cxx"
+ #endif // WIN32
+
++#ifdef WIN32
++#elif defined(__APPLE__)
++#else
++XFontStruct *fl_core_font()
++{
++  return fl_xfont.value();
++}
++#endif
+
+ double fl_width(const char* c) {
+   if (c) return fl_width(c, (int) strlen(c));
+Index: src/gl_draw.cxx
+===================================================================
+--- a/src/gl_draw.cxx	(revision 10476)
++++ b/src/gl_draw.cxx	(working copy)
+@@ -84,7 +84,7 @@
+  * then sorting through them at draw time (for normal X rendering) to find which one can
+  * render the current glyph... But for now, just use the first font in the list for GL...
+  */
+-    XFontStruct *font = fl_xfont;
++    XFontStruct *font = fl_core_font();
+     int base = font->min_char_or_byte2;
+     int count = font->max_char_or_byte2-base+1;
+     fl_fontsize->listbase = glGenLists(256);
diff --git a/var/spack/repos/builtin/packages/fltk/package.py b/var/spack/repos/builtin/packages/fltk/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b462f83f8df4ff6cd234b4bbf8b144918b55654
--- /dev/null
+++ b/var/spack/repos/builtin/packages/fltk/package.py
@@ -0,0 +1,58 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+
+
+class Fltk(Package):
+    """
+    FLTK (pronounced "fulltick") is a cross-platform C++ GUI toolkit for UNIX/Linux (X11), Microsoft Windows, and
+    MacOS X. FLTK provides modern GUI functionality without the bloat and supports 3D graphics via OpenGL and its
+    built-in GLUT emulation.
+
+    FLTK is designed to be small and modular enough to be statically linked, but works fine as a shared library. FLTK
+    also includes an excellent UI builder called FLUID that can be used to create applications in minutes.
+    """
+    homepage = 'http://www.fltk.org/'
+    url = 'http://fltk.org/pub/fltk/1.3.3/fltk-1.3.3-source.tar.gz'
+
+    version('1.3.3', '9ccdb0d19dc104b87179bd9fd10822e3')
+
+    patch('font.patch', when='@1.3.3')
+
+    variant('shared', default=True, description='Enables the build of shared libraries')
+
+    def install(self, spec, prefix):
+        options = ['--prefix=%s' % prefix,
+                   '--enable-localjpeg',
+                   '--enable-localpng',
+                   '--enable-localzlib']
+
+        if '+shared' in spec:
+            options.append('--enable-shared')
+
+        # FLTK needs to be built in-source
+        configure(*options)
+        make()
+        make('install')
diff --git a/var/spack/packages/flux/package.py b/var/spack/repos/builtin/packages/flux/package.py
similarity index 100%
rename from var/spack/packages/flux/package.py
rename to var/spack/repos/builtin/packages/flux/package.py
diff --git a/var/spack/packages/fontconfig/package.py b/var/spack/repos/builtin/packages/fontconfig/package.py
similarity index 100%
rename from var/spack/packages/fontconfig/package.py
rename to var/spack/repos/builtin/packages/fontconfig/package.py
diff --git a/var/spack/packages/freetype/package.py b/var/spack/repos/builtin/packages/freetype/package.py
similarity index 100%
rename from var/spack/packages/freetype/package.py
rename to var/spack/repos/builtin/packages/freetype/package.py
diff --git a/var/spack/packages/gasnet/package.py b/var/spack/repos/builtin/packages/gasnet/package.py
similarity index 100%
rename from var/spack/packages/gasnet/package.py
rename to var/spack/repos/builtin/packages/gasnet/package.py
diff --git a/var/spack/packages/gcc/package.py b/var/spack/repos/builtin/packages/gcc/package.py
similarity index 95%
rename from var/spack/packages/gcc/package.py
rename to var/spack/repos/builtin/packages/gcc/package.py
index 7ec160d5957513f3136d13c6443a62469570aa29..3e5895cfb83e127edf9f174278abc287cac58777 100644
--- a/var/spack/packages/gcc/package.py
+++ b/var/spack/repos/builtin/packages/gcc/package.py
@@ -48,11 +48,14 @@ class Gcc(Package):
     version('4.7.4', '4c696da46297de6ae77a82797d2abe28')
     version('4.6.4', 'b407a3d1480c11667f293bfb1f17d1a4')
     version('4.5.4', '27e459c2566b8209ab064570e1b378f7')
-    
+
+    variant('gold', default=True, description="Build the gold linker plugin for ld-based LTO")
+
     depends_on("mpfr")
     depends_on("gmp")
     depends_on("mpc")     # when @4.5:
-    depends_on("binutils~libiberty")
+    depends_on("binutils~libiberty", when='~gold')
+    depends_on("binutils~libiberty+gold", when='+gold')
 
     # Save these until we can do optional deps.
     depends_on("isl", when=DEPENDS_ON_ISL_PREDICATE)
@@ -99,7 +102,7 @@ def install(self, spec, prefix):
             configure(*options)
             make()
             make("install")
-            
+
         self.write_rpath_specs()
 
 
@@ -118,7 +121,7 @@ def write_rpath_specs(self):
             return
 
         gcc = Executable(join_path(self.prefix.bin, 'gcc'))
-        lines = gcc('-dumpspecs', return_output=True).strip().split("\n")
+        lines = gcc('-dumpspecs', output=str).strip().split("\n")
         specs_file = join_path(self.spec_dir, 'specs')
         with closing(open(specs_file, 'w')) as out:
             for line in lines:
diff --git a/var/spack/packages/gdb/package.py b/var/spack/repos/builtin/packages/gdb/package.py
similarity index 80%
rename from var/spack/packages/gdb/package.py
rename to var/spack/repos/builtin/packages/gdb/package.py
index fd567f346b48cd019db09c9e1b13ba130afcd035..dd02b426b94674952de76bd7a0473e04725ab994 100644
--- a/var/spack/packages/gdb/package.py
+++ b/var/spack/repos/builtin/packages/gdb/package.py
@@ -6,7 +6,7 @@
 # Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
 # LLNL-CODE-647188
 #
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://llnl.github.io/spack
 # Please also see the LICENSE file for our notice and the LGPL.
 #
 # This program is free software; you can redistribute it and/or modify
@@ -32,13 +32,13 @@ class Gdb(Package):
     -- or what another program was doing at the moment it crashed.
     """
     homepage = "https://www.gnu.org/software/gdb"
-    url = "http://ftp.gnu.org/gnu/gdb/gdb-7.10.tar.xz"
+    url = "http://ftp.gnu.org/gnu/gdb/gdb-7.10.tar.gz"
 
-    version('7.10.1', '39e654460c9cdd80200a29ac020cfe11')
-    version('7.10', '2a35bac41fa8e10bf04f3a0dd7f7f363')
-    version('7.9.1', '35374c77a70884eb430c97061053a36e')
-    version('7.9', 'e6279f26559d839f0b4218a482bcb43e')
-    version('7.8.2', 'a80cf252ed2e775d4e4533341bbf2459')
+    version('7.10.1', 'b93a2721393e5fa226375b42d567d90b')
+    version('7.10', 'fa6827ad0fd2be1daa418abb11a54d86')
+    version('7.9.1', 'f3b97de919a9dba84490b2e076ec4cb0')
+    version('7.9', '8f8ced422fe462a00e0135a643544f17')
+    version('7.8.2', '8b0ea8b3559d3d90b3ff4952f0aeafbc')
 
     depends_on('texinfo')
 
diff --git a/var/spack/packages/gdk-pixbuf/package.py b/var/spack/repos/builtin/packages/gdk-pixbuf/package.py
similarity index 100%
rename from var/spack/packages/gdk-pixbuf/package.py
rename to var/spack/repos/builtin/packages/gdk-pixbuf/package.py
diff --git a/var/spack/packages/geos/package.py b/var/spack/repos/builtin/packages/geos/package.py
similarity index 100%
rename from var/spack/packages/geos/package.py
rename to var/spack/repos/builtin/packages/geos/package.py
diff --git a/var/spack/packages/gflags/package.py b/var/spack/repos/builtin/packages/gflags/package.py
similarity index 100%
rename from var/spack/packages/gflags/package.py
rename to var/spack/repos/builtin/packages/gflags/package.py
diff --git a/var/spack/packages/ghostscript/package.py b/var/spack/repos/builtin/packages/ghostscript/package.py
similarity index 100%
rename from var/spack/packages/ghostscript/package.py
rename to var/spack/repos/builtin/packages/ghostscript/package.py
diff --git a/var/spack/packages/git/package.py b/var/spack/repos/builtin/packages/git/package.py
similarity index 81%
rename from var/spack/packages/git/package.py
rename to var/spack/repos/builtin/packages/git/package.py
index 28c7aa8161f1daa1bac267d93e172f65cdc1c200..ddc5078c4d34b1d3ad7cd74a35ec5d29c0732927 100644
--- a/var/spack/packages/git/package.py
+++ b/var/spack/repos/builtin/packages/git/package.py
@@ -5,14 +5,14 @@ class Git(Package):
        system designed to handle everything from small to very large
        projects with speed and efficiency."""
     homepage = "http://git-scm.com"
-    url      = "https://www.kernel.org/pub/software/scm/git/git-2.2.1.tar.xz"
-
-    version('2.6.3', '5a6375349c3f13c8dbbabfc327bae429')
-    version('2.6.2', '32ae5ad29763fc927bfcaeab55385fd9')
-    version('2.6.1', 'dd4a3a7fe96598c553edd39d40c9c290')
-    version('2.6.0', '6b7d43d615fb3f0dfecf4d131e23f438')
-    version('2.5.4', 'ec118fcd1cf984edc17eb6588b78e81b')
-    version('2.2.1', '43e01f9d96ba8c11611e0eef0d9f9f28')
+    url      = "https://www.kernel.org/pub/software/scm/git/git-2.2.1.tar.gz"
+
+    version('2.6.3', 'b711be7628a4a2c25f38d859ee81b423')
+    version('2.6.2', 'da293290da69f45a86a311ad3cd43dc8')
+    version('2.6.1', '4c62ee9c5991fe93d99cf2a6b68397fd')
+    version('2.6.0', 'eb76a07148d94802a1745d759716a57e')
+    version('2.5.4', '3eca2390cf1fa698b48e2a233563a76b')
+    version('2.2.1', 'ff41fdb094eed1ec430aed8ee9b9849c')
 
 
     # Git compiles with curl support by default on but if your system
diff --git a/var/spack/packages/glib/package.py b/var/spack/repos/builtin/packages/glib/package.py
similarity index 94%
rename from var/spack/packages/glib/package.py
rename to var/spack/repos/builtin/packages/glib/package.py
index baca1a5a459b2cd3be3c233d9ded27b69dd8b26b..67ead5f9416b8ece41d191c6cc799232f5f8fcda 100644
--- a/var/spack/packages/glib/package.py
+++ b/var/spack/repos/builtin/packages/glib/package.py
@@ -16,4 +16,4 @@ class Glib(Package):
     def install(self, spec, prefix):
         configure("--prefix=%s" % prefix)
         make()
-        make("install")
+        make("install", parallel=False)
diff --git a/var/spack/packages/glm/package.py b/var/spack/repos/builtin/packages/glm/package.py
similarity index 100%
rename from var/spack/packages/glm/package.py
rename to var/spack/repos/builtin/packages/glm/package.py
diff --git a/var/spack/packages/global/package.py b/var/spack/repos/builtin/packages/global/package.py
similarity index 100%
rename from var/spack/packages/global/package.py
rename to var/spack/repos/builtin/packages/global/package.py
diff --git a/var/spack/packages/glog/package.py b/var/spack/repos/builtin/packages/glog/package.py
similarity index 100%
rename from var/spack/packages/glog/package.py
rename to var/spack/repos/builtin/packages/glog/package.py
diff --git a/var/spack/repos/builtin/packages/glpk/package.py b/var/spack/repos/builtin/packages/glpk/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..855f459fb38c39cfb659a048dd31ae356d01d44e
--- /dev/null
+++ b/var/spack/repos/builtin/packages/glpk/package.py
@@ -0,0 +1,53 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+from spack import *
+
+
+class Glpk(Package):
+    """
+    The GLPK (GNU Linear Programming Kit) package is intended for solving large-scale linear programming (LP), mixed
+    integer programming (MIP), and other related problems. It is a set of routines written in ANSI C and organized in
+    the form of a callable library
+    """
+    homepage = "https://www.gnu.org/software/glpk"
+    url = "http://ftp.gnu.org/gnu/glpk/glpk-4.57.tar.gz"
+
+    version('4.57', '237531a54f73155842f8defe51aedb0f')
+
+    variant('gmp', default=False, description='Activates support for GMP library')
+
+    depends_on('gmp', when='+gmp')
+
+    def install(self, spec, prefix):
+
+        options = ['--prefix=%s' % prefix]
+
+        if '+gmp' in spec:
+            options.append('--with-gmp')
+
+        configure(*options)
+        make()
+        make("install")
diff --git a/var/spack/packages/gmp/package.py b/var/spack/repos/builtin/packages/gmp/package.py
similarity index 100%
rename from var/spack/packages/gmp/package.py
rename to var/spack/repos/builtin/packages/gmp/package.py
diff --git a/var/spack/repos/builtin/packages/gmsh/package.py b/var/spack/repos/builtin/packages/gmsh/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d759303cbb0301fe83b04ae97d9c1eec1b7de89
--- /dev/null
+++ b/var/spack/repos/builtin/packages/gmsh/package.py
@@ -0,0 +1,84 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+
+
+class Gmsh(Package):
+    """
+    Gmsh is a free 3D finite element grid generator with a built-in CAD engine and post-processor. Its design goal is
+    to provide a fast, light and user-friendly meshing tool with parametric input and advanced visualization
+    capabilities. Gmsh is built around four modules: geometry, mesh, solver and post-processing. The specification of
+    any input to these modules is done either interactively using the graphical user interface or in ASCII text files
+    using Gmsh's own scripting language.
+    """
+    homepage = 'http://gmsh.info'
+    url = 'http://gmsh.info/src/gmsh-2.11.0-source.tgz'
+
+    version('2.11.0', 'f15b6e7ac9ca649c9a74440e1259d0db')
+
+    # FIXME : Misses dependencies on gmm, PetsC, TetGen
+
+    variant('shared', default=True, description='Enables the build of shared libraries')
+    variant('debug', default=False, description='Builds the library in debug mode')
+    variant('mpi', default=False, description='Builds MPI support for parser and solver')
+    variant('fltk', default=False, description='Enables the build of the FLTK GUI')
+    variant('hdf5', default=False, description='Enables HDF5 support')
+    variant('compression', default=True, description='Enables IO compression through zlib')
+
+    depends_on('blas')
+    depends_on('lapack')
+    depends_on('gmp')
+    depends_on('mpi', when='+mpi')
+    depends_on('fltk', when='+fltk')  # Assumes OpenGL with GLU is already provided by the system
+    depends_on('hdf5', when='+hdf5')
+    depends_on('zlib', when='+compression')
+
+    def install(self, spec, prefix):
+
+        options = []
+        options.extend(std_cmake_args)
+
+        build_directory = join_path(self.stage.path, 'spack-build')
+        source_directory = self.stage.source_path
+
+        if '+shared' in spec:
+            options.extend(['-DENABLE_BUILD_SHARED:BOOL=ON',
+                            '-DENABLE_BUILD_DYNAMIC:BOOL=ON'])  # Builds dynamic executable and installs shared library
+        else:
+            options.append('-DENABLE_BUILD_LIB:BOOL=ON')  # Builds and installs static library
+
+        if '+debug' in spec:
+            options.append('-DCMAKE_BUILD_TYPE:STRING=Debug')
+
+        if '+mpi' in spec:
+            options.append('-DENABLE_MPI:BOOL=ON')
+
+        if '+compression' in spec:
+            options.append('-DENABLE_COMPRESSED_IO:BOOL=ON')
+
+        with working_dir(build_directory, create=True):
+            cmake(source_directory, *options)
+            make()
+            make('install')
diff --git a/var/spack/packages/gnuplot/package.py b/var/spack/repos/builtin/packages/gnuplot/package.py
similarity index 100%
rename from var/spack/packages/gnuplot/package.py
rename to var/spack/repos/builtin/packages/gnuplot/package.py
diff --git a/var/spack/packages/gnutls/package.py b/var/spack/repos/builtin/packages/gnutls/package.py
similarity index 100%
rename from var/spack/packages/gnutls/package.py
rename to var/spack/repos/builtin/packages/gnutls/package.py
diff --git a/var/spack/packages/gperf/package.py b/var/spack/repos/builtin/packages/gperf/package.py
similarity index 100%
rename from var/spack/packages/gperf/package.py
rename to var/spack/repos/builtin/packages/gperf/package.py
diff --git a/var/spack/packages/gperftools/package.py b/var/spack/repos/builtin/packages/gperftools/package.py
similarity index 90%
rename from var/spack/packages/gperftools/package.py
rename to var/spack/repos/builtin/packages/gperftools/package.py
index 0ba44c9329684a8880980582a0263b33d8532950..22b2e6c424e9f66e69ccb8614f8a3a1716bf021d 100644
--- a/var/spack/packages/gperftools/package.py
+++ b/var/spack/repos/builtin/packages/gperftools/package.py
@@ -30,8 +30,11 @@ class Gperftools(Package):
     homepage = "https://code.google.com/p/gperftools"
     url      = "https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz"
 
+    version('2.4', '2171cea3bbe053036fb5d5d25176a160', url="https://github.com/gperftools/gperftools/releases/download/gperftools-2.4/gperftools-2.4.tar.gz")
     version('2.3', 'f54dd119f0e46ac1f13264f8d97adf90', url="https://googledrive.com/host/0B6NtGsLhIcf7MWxMMF9JdTN3UVk/gperftools-2.3.tar.gz")
 
+    depends_on("libunwind")
+
     def install(self, spec, prefix):
         configure("--prefix=" + prefix)
         make()
diff --git a/var/spack/packages/graphlib/package.py b/var/spack/repos/builtin/packages/graphlib/package.py
similarity index 100%
rename from var/spack/packages/graphlib/package.py
rename to var/spack/repos/builtin/packages/graphlib/package.py
diff --git a/var/spack/packages/graphviz/package.py b/var/spack/repos/builtin/packages/graphviz/package.py
similarity index 100%
rename from var/spack/packages/graphviz/package.py
rename to var/spack/repos/builtin/packages/graphviz/package.py
diff --git a/var/spack/packages/gsl/package.py b/var/spack/repos/builtin/packages/gsl/package.py
similarity index 100%
rename from var/spack/packages/gsl/package.py
rename to var/spack/repos/builtin/packages/gsl/package.py
diff --git a/var/spack/packages/gtkplus/package.py b/var/spack/repos/builtin/packages/gtkplus/package.py
similarity index 100%
rename from var/spack/packages/gtkplus/package.py
rename to var/spack/repos/builtin/packages/gtkplus/package.py
diff --git a/var/spack/packages/harfbuzz/package.py b/var/spack/repos/builtin/packages/harfbuzz/package.py
similarity index 100%
rename from var/spack/packages/harfbuzz/package.py
rename to var/spack/repos/builtin/packages/harfbuzz/package.py
diff --git a/var/spack/repos/builtin/packages/hdf/package.py b/var/spack/repos/builtin/packages/hdf/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac6435f2a24cec090656ffa63d86815a2742ed80
--- /dev/null
+++ b/var/spack/repos/builtin/packages/hdf/package.py
@@ -0,0 +1,45 @@
+from spack import *
+
+class Hdf(Package):
+    """HDF4 (also known as HDF) is a library and multi-object
+    file format for storing and managing data between machines."""
+
+    homepage = "https://www.hdfgroup.org/products/hdf4/"
+    url      = "https://www.hdfgroup.org/ftp/HDF/releases/HDF4.2.11/src/hdf-4.2.11.tar.gz"
+    list_url = "https://www.hdfgroup.org/ftp/HDF/releases/"
+    list_depth = 3
+
+    version('4.2.11', '063f9928f3a19cc21367b71c3b8bbf19')
+
+    variant('szip', default=False, description="Enable szip support")
+
+    depends_on("jpeg")
+    depends_on("szip", when='+szip')
+    depends_on("zlib")
+
+
+    def url_for_version(self, version):
+       return "https://www.hdfgroup.org/ftp/HDF/releases/HDF" + str(version) + "/src/hdf-" + str(version) + ".tar.gz"
+
+
+    def install(self, spec, prefix):
+        config_args = [
+            'CFLAGS=-fPIC',
+            '--prefix=%s' % prefix,
+            '--with-jpeg=%s' % spec['jpeg'].prefix,
+            '--with-zlib=%s' % spec['zlib'].prefix,
+            '--disable-netcdf',  # must be disabled to build NetCDF with HDF4 support
+            '--enable-fortran',
+            '--disable-shared',  # fortran and shared libraries are not compatible
+            '--enable-static',
+            '--enable-production'
+        ]
+
+        # SZip support
+        if '+szip' in spec:
+            config_args.append('--with-szlib=%s' % spec['szip'].prefix)
+
+        configure(*config_args)
+
+        make()
+        make("install")
diff --git a/var/spack/repos/builtin/packages/hdf5/package.py b/var/spack/repos/builtin/packages/hdf5/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..5321a191f0e4f1c80d7abc3cf0be0fae33742940
--- /dev/null
+++ b/var/spack/repos/builtin/packages/hdf5/package.py
@@ -0,0 +1,135 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+from spack import *
+
+
+class Hdf5(Package):
+    """HDF5 is a data model, library, and file format for storing and managing
+       data. It supports an unlimited variety of datatypes, and is designed for
+       flexible and efficient I/O and for high volume and complex data.
+    """
+
+    homepage = "http://www.hdfgroup.org/HDF5/"
+    url = "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-1.8.13/src/hdf5-1.8.13.tar.gz"
+    list_url = "http://www.hdfgroup.org/ftp/HDF5/releases"
+    list_depth = 3
+
+    version('1.8.16', 'b8ed9a36ae142317f88b0c7ef4b9c618')
+    version('1.8.15', '03cccb5b33dbe975fdcd8ae9dc021f24')
+    version('1.8.13', 'c03426e9e77d7766944654280b467289')
+
+    variant('debug', default=False, description='Builds a debug version of the library')
+
+    variant('cxx', default=True, description='Enable C++ support')
+    variant('fortran', default=True, description='Enable Fortran support')
+    variant('unsupported', default=False, description='Enables unsupported configuration options')
+
+    variant('mpi', default=False, description='Enable MPI support')
+    variant('szip', default=False, description='Enable szip support')
+    variant('threadsafe', default=False, description='Enable thread-safe capabilities')
+
+    depends_on("mpi", when='+mpi')
+    depends_on("szip", when='+szip')
+    depends_on("zlib")
+
+    def validate(self, spec):
+        """
+        Checks if incompatible variants have been activated at the same time
+
+        :param spec: spec of the package
+        :raises RuntimeError: in case of inconsistencies
+        """
+        if '+fortran' in spec and not self.compiler.fc:
+            msg = 'cannot build a fortran variant without a fortran compiler'
+            raise RuntimeError(msg)
+
+        if '+threadsafe' in spec and ('+cxx' in spec or '+fortran' in spec):
+                raise RuntimeError("cannot use variant +threadsafe with either +cxx or +fortran")
+
+    def install(self, spec, prefix):
+        self.validate(spec)
+        # Handle compilation after spec validation
+        extra_args = []
+        if '+debug' in spec:
+            extra_args.append('--enable-debug=all')
+        else:
+            extra_args.append('--enable-production')
+
+        if '+unsupported' in spec:
+            extra_args.append("--enable-unsupported")
+
+        if '+cxx' in spec:
+            extra_args.append('--enable-cxx')
+
+        if '+fortran' in spec:
+            extra_args.extend([
+                '--enable-fortran',
+                '--enable-fortran2003'
+            ])
+
+        if '+mpi' in spec:
+            # The HDF5 configure script warns if cxx and mpi are enabled
+            # together. There doesn't seem to be a real reason for this, except
+            # that parts of the MPI interface are not accessible via the C++
+            # interface. Since they are still accessible via the C interface,
+            # this is not actually a problem.
+            extra_args.extend([
+                "--enable-parallel",
+                "CC=%s" % spec['mpi'].prefix.bin + "/mpicc",
+            ])
+
+            if '+cxx' in spec:
+                extra_args.append("CXX=%s" % spec['mpi'].prefix.bin + "/mpic++")
+
+            if '+fortran' in spec:
+                extra_args.append("FC=%s" % spec['mpi'].prefix.bin + "/mpifort")
+
+        if '+szip' in spec:
+            extra_args.append("--with-szlib=%s" % spec['szip'].prefix)
+
+        if '+threadsafe' in spec:
+            extra_args.extend([
+                '--enable-threadsafe',
+                '--disable-hl',
+            ])
+
+        configure(
+            "--prefix=%s" % prefix,
+            "--with-zlib=%s" % spec['zlib'].prefix,
+            "--enable-shared",  # TODO : this should be enabled by default, remove it?
+            *extra_args)
+        make()
+        make("install")
+
+    def url_for_version(self, version):
+        v = str(version)
+
+        if version == Version("1.2.2"):
+            return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + ".tar.gz"
+        elif version < Version("1.7"):
+            return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + version.up_to(2) + "/hdf5-" + v + ".tar.gz"
+        else:
+            return "http://www.hdfgroup.org/ftp/HDF5/releases/hdf5-" + v + "/src/hdf5-" + v + ".tar.gz"
diff --git a/var/spack/packages/hwloc/package.py b/var/spack/repos/builtin/packages/hwloc/package.py
similarity index 71%
rename from var/spack/packages/hwloc/package.py
rename to var/spack/repos/builtin/packages/hwloc/package.py
index 7ebede76a3b5900079f8274569059c7f8740f986..60b315119ba8999ca0064426768b8e4870290a1b 100644
--- a/var/spack/packages/hwloc/package.py
+++ b/var/spack/repos/builtin/packages/hwloc/package.py
@@ -14,15 +14,18 @@ class Hwloc(Package):
        efficiently."""
     homepage = "http://www.open-mpi.org/projects/hwloc/"
     url      = "http://www.open-mpi.org/software/hwloc/v1.9/downloads/hwloc-1.9.tar.gz"
+    list_url = "http://www.open-mpi.org/software/hwloc/"
+    list_depth = 3
 
-    version('1.11.2', '486169cbe111cdea57be12638828ebbf',
-            url='http://www.open-mpi.org/software/hwloc/v1.11/downloads/hwloc-1.11.2.tar.bz2')
-    version('1.11.1', '002742efd3a8431f98d6315365a2b543',
-            url='http://www.open-mpi.org/software/hwloc/v1.11/downloads/hwloc-1.11.1.tar.bz2')
-    version('1.9', '1f9f9155682fe8946a97c08896109508')
+    version('1.11.2', '486169cbe111cdea57be12638828ebbf')
+    version('1.11.1', '002742efd3a8431f98d6315365a2b543')
+    version('1.9',    '1f9f9155682fe8946a97c08896109508')
 
     depends_on('libpciaccess')
 
+    def url_for_version(self, version):
+        return "http://www.open-mpi.org/software/hwloc/v%s/downloads/hwloc-%s.tar.gz" % (version.up_to(2), version)
+
     def install(self, spec, prefix):
         configure("--prefix=%s" % prefix)
 
diff --git a/var/spack/packages/hypre/package.py b/var/spack/repos/builtin/packages/hypre/package.py
similarity index 100%
rename from var/spack/packages/hypre/package.py
rename to var/spack/repos/builtin/packages/hypre/package.py
diff --git a/var/spack/packages/icu/package.py b/var/spack/repos/builtin/packages/icu/package.py
similarity index 100%
rename from var/spack/packages/icu/package.py
rename to var/spack/repos/builtin/packages/icu/package.py
diff --git a/var/spack/packages/icu4c/package.py b/var/spack/repos/builtin/packages/icu4c/package.py
similarity index 100%
rename from var/spack/packages/icu4c/package.py
rename to var/spack/repos/builtin/packages/icu4c/package.py
diff --git a/var/spack/packages/isl/package.py b/var/spack/repos/builtin/packages/isl/package.py
similarity index 100%
rename from var/spack/packages/isl/package.py
rename to var/spack/repos/builtin/packages/isl/package.py
diff --git a/var/spack/packages/jdk/package.py b/var/spack/repos/builtin/packages/jdk/package.py
similarity index 100%
rename from var/spack/packages/jdk/package.py
rename to var/spack/repos/builtin/packages/jdk/package.py
diff --git a/var/spack/packages/jemalloc/package.py b/var/spack/repos/builtin/packages/jemalloc/package.py
similarity index 100%
rename from var/spack/packages/jemalloc/package.py
rename to var/spack/repos/builtin/packages/jemalloc/package.py
diff --git a/var/spack/packages/jpeg/package.py b/var/spack/repos/builtin/packages/jpeg/package.py
similarity index 100%
rename from var/spack/packages/jpeg/package.py
rename to var/spack/repos/builtin/packages/jpeg/package.py
diff --git a/var/spack/packages/judy/package.py b/var/spack/repos/builtin/packages/judy/package.py
similarity index 100%
rename from var/spack/packages/judy/package.py
rename to var/spack/repos/builtin/packages/judy/package.py
diff --git a/var/spack/repos/builtin/packages/julia/gc.patch b/var/spack/repos/builtin/packages/julia/gc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6db69c6c1b078f4608fe31b455c0a8fdb1f16068
--- /dev/null
+++ b/var/spack/repos/builtin/packages/julia/gc.patch
@@ -0,0 +1,11 @@
+--- julia/src/gc.c
++++ julia/src/gc.c
+@@ -162,7 +162,7 @@
+ // A region is contiguous storage for up to REGION_PG_COUNT naturally aligned GC_PAGE_SZ pages
+ // It uses a very naive allocator (see malloc_page & free_page)
+ #if defined(_P64) && !defined(_COMPILER_MICROSOFT_)
+-#define REGION_PG_COUNT 16*8*4096 // 8G because virtual memory is cheap
++#define REGION_PG_COUNT 8*4096 // 512M
+ #else
+ #define REGION_PG_COUNT 8*4096 // 512M
+ #endif
diff --git a/var/spack/packages/julia/package.py b/var/spack/repos/builtin/packages/julia/package.py
similarity index 94%
rename from var/spack/packages/julia/package.py
rename to var/spack/repos/builtin/packages/julia/package.py
index 9fd946c90543e9d99a97757cf6581f33596a0640..6900af38e4f13941f4fc0d10c51b0766f7632526 100644
--- a/var/spack/packages/julia/package.py
+++ b/var/spack/repos/builtin/packages/julia/package.py
@@ -6,13 +6,17 @@ class Julia(Package):
     homepage = "http://julialang.org"
     url      = "http://github.com/JuliaLang/julia/releases/download/v0.4.2/julia-0.4.2.tar.gz"
 
+    version('0.4.3', '7b9f096798fca4bef262a64674bc2b52')
     version('0.4.2', 'ccfeb4f4090c8b31083f5e1ccb03eb06')
 
+    patch('gc.patch')
+
     # Build-time dependencies
-    # depends_on("cmake")
+    depends_on("cmake @2.8:")
     # depends_on("awk")
     # depends_on("m4")
     # depends_on("pkg-config")
+    depends_on("python @2.6:2.9")
 
     # I think that Julia requires the dependencies above, but it builds find (on
     # my system) without these. We should enable them as necessary.
diff --git a/var/spack/packages/launchmon/package.py b/var/spack/repos/builtin/packages/launchmon/package.py
similarity index 82%
rename from var/spack/packages/launchmon/package.py
rename to var/spack/repos/builtin/packages/launchmon/package.py
index f97384a249fc347f7e8ec99528110745f82f7a3f..aec2fd6fa7eab908921b0e554cb209d7ef4f75d2 100644
--- a/var/spack/packages/launchmon/package.py
+++ b/var/spack/repos/builtin/packages/launchmon/package.py
@@ -37,6 +37,16 @@ class Launchmon(Package):
     depends_on('automake')
     depends_on('libtool')
 
+
+    def patch(self):
+        # This patch makes libgcrypt compile correctly with newer gcc versions.
+        mf = FileFilter('tools/libgcrypt/tests/Makefile.in')
+        mf.filter(r'(basic_LDADD\s*=\s*.*)',     r'\1 -lgpg-error')
+        mf.filter(r'(tsexp_LDADD\s*=\s*.*)',     r'\1 -lgpg-error')
+        mf.filter(r'(keygen_LDADD\s*=\s*.*)',    r'\1 -lgpg-error')
+        mf.filter(r'(benchmark_LDADD\s*=\s*.*)', r'\1 -lgpg-error')
+
+
     def install(self, spec, prefix):
         configure(
             "--prefix=" + prefix,
diff --git a/var/spack/packages/launchmon/patch.lmon_install_dir b/var/spack/repos/builtin/packages/launchmon/patch.lmon_install_dir
similarity index 100%
rename from var/spack/packages/launchmon/patch.lmon_install_dir
rename to var/spack/repos/builtin/packages/launchmon/patch.lmon_install_dir
diff --git a/var/spack/packages/lcms/package.py b/var/spack/repos/builtin/packages/lcms/package.py
similarity index 100%
rename from var/spack/packages/lcms/package.py
rename to var/spack/repos/builtin/packages/lcms/package.py
diff --git a/var/spack/packages/leveldb/package.py b/var/spack/repos/builtin/packages/leveldb/package.py
similarity index 100%
rename from var/spack/packages/leveldb/package.py
rename to var/spack/repos/builtin/packages/leveldb/package.py
diff --git a/var/spack/packages/libNBC/package.py b/var/spack/repos/builtin/packages/libNBC/package.py
similarity index 100%
rename from var/spack/packages/libNBC/package.py
rename to var/spack/repos/builtin/packages/libNBC/package.py
diff --git a/var/spack/packages/libarchive/package.py b/var/spack/repos/builtin/packages/libarchive/package.py
similarity index 100%
rename from var/spack/packages/libarchive/package.py
rename to var/spack/repos/builtin/packages/libarchive/package.py
diff --git a/var/spack/packages/libcerf/package.py b/var/spack/repos/builtin/packages/libcerf/package.py
similarity index 100%
rename from var/spack/packages/libcerf/package.py
rename to var/spack/repos/builtin/packages/libcerf/package.py
diff --git a/var/spack/packages/libcircle/package.py b/var/spack/repos/builtin/packages/libcircle/package.py
similarity index 100%
rename from var/spack/packages/libcircle/package.py
rename to var/spack/repos/builtin/packages/libcircle/package.py
diff --git a/var/spack/packages/libdrm/package.py b/var/spack/repos/builtin/packages/libdrm/package.py
similarity index 100%
rename from var/spack/packages/libdrm/package.py
rename to var/spack/repos/builtin/packages/libdrm/package.py
diff --git a/var/spack/packages/libdwarf/package.py b/var/spack/repos/builtin/packages/libdwarf/package.py
similarity index 100%
rename from var/spack/packages/libdwarf/package.py
rename to var/spack/repos/builtin/packages/libdwarf/package.py
diff --git a/var/spack/packages/libedit/package.py b/var/spack/repos/builtin/packages/libedit/package.py
similarity index 100%
rename from var/spack/packages/libedit/package.py
rename to var/spack/repos/builtin/packages/libedit/package.py
diff --git a/var/spack/packages/libelf/package.py b/var/spack/repos/builtin/packages/libelf/package.py
similarity index 100%
rename from var/spack/packages/libelf/package.py
rename to var/spack/repos/builtin/packages/libelf/package.py
diff --git a/var/spack/packages/libevent/package.py b/var/spack/repos/builtin/packages/libevent/package.py
similarity index 100%
rename from var/spack/packages/libevent/package.py
rename to var/spack/repos/builtin/packages/libevent/package.py
diff --git a/var/spack/packages/libffi/package.py b/var/spack/repos/builtin/packages/libffi/package.py
similarity index 100%
rename from var/spack/packages/libffi/package.py
rename to var/spack/repos/builtin/packages/libffi/package.py
diff --git a/var/spack/packages/libgcrypt/package.py b/var/spack/repos/builtin/packages/libgcrypt/package.py
similarity index 100%
rename from var/spack/packages/libgcrypt/package.py
rename to var/spack/repos/builtin/packages/libgcrypt/package.py
diff --git a/var/spack/packages/libgd/package.py b/var/spack/repos/builtin/packages/libgd/package.py
similarity index 100%
rename from var/spack/packages/libgd/package.py
rename to var/spack/repos/builtin/packages/libgd/package.py
diff --git a/var/spack/packages/libgpg-error/package.py b/var/spack/repos/builtin/packages/libgpg-error/package.py
similarity index 100%
rename from var/spack/packages/libgpg-error/package.py
rename to var/spack/repos/builtin/packages/libgpg-error/package.py
diff --git a/var/spack/packages/libjpeg-turbo/package.py b/var/spack/repos/builtin/packages/libjpeg-turbo/package.py
similarity index 100%
rename from var/spack/packages/libjpeg-turbo/package.py
rename to var/spack/repos/builtin/packages/libjpeg-turbo/package.py
diff --git a/var/spack/packages/libjson-c/package.py b/var/spack/repos/builtin/packages/libjson-c/package.py
similarity index 100%
rename from var/spack/packages/libjson-c/package.py
rename to var/spack/repos/builtin/packages/libjson-c/package.py
diff --git a/var/spack/packages/libmng/package.py b/var/spack/repos/builtin/packages/libmng/package.py
similarity index 100%
rename from var/spack/packages/libmng/package.py
rename to var/spack/repos/builtin/packages/libmng/package.py
diff --git a/var/spack/packages/libmonitor/libmonitorkrell-0000.patch b/var/spack/repos/builtin/packages/libmonitor/libmonitorkrell-0000.patch
similarity index 100%
rename from var/spack/packages/libmonitor/libmonitorkrell-0000.patch
rename to var/spack/repos/builtin/packages/libmonitor/libmonitorkrell-0000.patch
diff --git a/var/spack/packages/libmonitor/libmonitorkrell-0001.patch b/var/spack/repos/builtin/packages/libmonitor/libmonitorkrell-0001.patch
similarity index 100%
rename from var/spack/packages/libmonitor/libmonitorkrell-0001.patch
rename to var/spack/repos/builtin/packages/libmonitor/libmonitorkrell-0001.patch
diff --git a/var/spack/packages/libmonitor/libmonitorkrell-0002.patch b/var/spack/repos/builtin/packages/libmonitor/libmonitorkrell-0002.patch
similarity index 100%
rename from var/spack/packages/libmonitor/libmonitorkrell-0002.patch
rename to var/spack/repos/builtin/packages/libmonitor/libmonitorkrell-0002.patch
diff --git a/var/spack/packages/libmonitor/package.py b/var/spack/repos/builtin/packages/libmonitor/package.py
similarity index 100%
rename from var/spack/packages/libmonitor/package.py
rename to var/spack/repos/builtin/packages/libmonitor/package.py
diff --git a/var/spack/packages/libpciaccess/package.py b/var/spack/repos/builtin/packages/libpciaccess/package.py
similarity index 63%
rename from var/spack/packages/libpciaccess/package.py
rename to var/spack/repos/builtin/packages/libpciaccess/package.py
index 403bafbbd2ccc407390affdf5878ba27f86214ce..0c0847d3232d62431c4db6e9691ca148152f2cb3 100644
--- a/var/spack/packages/libpciaccess/package.py
+++ b/var/spack/repos/builtin/packages/libpciaccess/package.py
@@ -5,12 +5,10 @@ class Libpciaccess(Package):
     """Generic PCI access library."""
 
     homepage = "http://cgit.freedesktop.org/xorg/lib/libpciaccess/"
-    url      = "http://cgit.freedesktop.org/xorg/lib/libpciaccess/"
+    url      = "http://xorg.freedesktop.org/archive/individual/lib/libpciaccess-0.13.4.tar.bz2"
 
-    version('0.13.4', git='http://anongit.freedesktop.org/git/xorg/lib/libpciaccess.git',
-            tag='libpciaccess-0.13.4')
+    version('0.13.4', 'ace78aec799b1cf6dfaea55d3879ed9f')
 
-    depends_on('autoconf')
     depends_on('libtool')
 
     def install(self, spec, prefix):
@@ -20,9 +18,6 @@ def install(self, spec, prefix):
             mkdir(prefix.lib)
             return
 
-        from subprocess import call
-        call(["./autogen.sh"])
         configure("--prefix=%s" % prefix)
-
         make()
         make("install")
diff --git a/var/spack/packages/libpng/package.py b/var/spack/repos/builtin/packages/libpng/package.py
similarity index 100%
rename from var/spack/packages/libpng/package.py
rename to var/spack/repos/builtin/packages/libpng/package.py
diff --git a/var/spack/packages/libsodium/package.py b/var/spack/repos/builtin/packages/libsodium/package.py
similarity index 100%
rename from var/spack/packages/libsodium/package.py
rename to var/spack/repos/builtin/packages/libsodium/package.py
diff --git a/var/spack/packages/libtiff/package.py b/var/spack/repos/builtin/packages/libtiff/package.py
similarity index 100%
rename from var/spack/packages/libtiff/package.py
rename to var/spack/repos/builtin/packages/libtiff/package.py
diff --git a/var/spack/packages/libtool/package.py b/var/spack/repos/builtin/packages/libtool/package.py
similarity index 87%
rename from var/spack/packages/libtool/package.py
rename to var/spack/repos/builtin/packages/libtool/package.py
index a07daf978115a8f1a63af32e246431ae540b8771..82a54953b23e29f49900e192c32823aa1cd4699c 100644
--- a/var/spack/packages/libtool/package.py
+++ b/var/spack/repos/builtin/packages/libtool/package.py
@@ -5,6 +5,7 @@ class Libtool(Package):
     homepage = "https://www.gnu.org/software/libtool/"
     url      = "http://ftpmirror.gnu.org/libtool/libtool-2.4.2.tar.gz"
 
+    version('2.4.6' , 'addf44b646ddb4e3919805aa88fa7c5e')
     version('2.4.2' , 'd2f3b7d4627e69e13514a40e72a24d50')
 
     def install(self, spec, prefix):
diff --git a/var/spack/packages/libunwind/package.py b/var/spack/repos/builtin/packages/libunwind/package.py
similarity index 100%
rename from var/spack/packages/libunwind/package.py
rename to var/spack/repos/builtin/packages/libunwind/package.py
diff --git a/var/spack/packages/libuuid/package.py b/var/spack/repos/builtin/packages/libuuid/package.py
similarity index 100%
rename from var/spack/packages/libuuid/package.py
rename to var/spack/repos/builtin/packages/libuuid/package.py
diff --git a/var/spack/packages/libxcb/package.py b/var/spack/repos/builtin/packages/libxcb/package.py
similarity index 69%
rename from var/spack/packages/libxcb/package.py
rename to var/spack/repos/builtin/packages/libxcb/package.py
index 16a5525c0d328ab061cf9ef85868fe2a76afbab4..1dd5954c9978bb2ad4bad9f53aba8b2ff39c5aaf 100644
--- a/var/spack/packages/libxcb/package.py
+++ b/var/spack/repos/builtin/packages/libxcb/package.py
@@ -1,9 +1,9 @@
 from spack import *
 
 class Libxcb(Package):
-    """The X protocol C-language Binding (XCB) is a replacement 
-    for Xlib featuring a small footprint, latency hiding, direct 
-    access to the protocol, improved threading support, and 
+    """The X protocol C-language Binding (XCB) is a replacement
+    for Xlib featuring a small footprint, latency hiding, direct
+    access to the protocol, improved threading support, and
     extensibility."""
 
     homepage = "http://xcb.freedesktop.org/"
@@ -14,6 +14,10 @@ class Libxcb(Package):
     depends_on("python")
     depends_on("xcb-proto")
 
+    def patch(self):
+        filter_file('typedef struct xcb_auth_info_t {', 'typedef struct {', 'src/xcb.h')
+
+
     def install(self, spec, prefix):
         configure("--prefix=%s" % prefix)
 
diff --git a/var/spack/packages/libxml2/package.py b/var/spack/repos/builtin/packages/libxml2/package.py
similarity index 100%
rename from var/spack/packages/libxml2/package.py
rename to var/spack/repos/builtin/packages/libxml2/package.py
diff --git a/var/spack/packages/libxshmfence/package.py b/var/spack/repos/builtin/packages/libxshmfence/package.py
similarity index 100%
rename from var/spack/packages/libxshmfence/package.py
rename to var/spack/repos/builtin/packages/libxshmfence/package.py
diff --git a/var/spack/packages/libxslt/package.py b/var/spack/repos/builtin/packages/libxslt/package.py
similarity index 100%
rename from var/spack/packages/libxslt/package.py
rename to var/spack/repos/builtin/packages/libxslt/package.py
diff --git a/var/spack/packages/llvm-lld/package.py b/var/spack/repos/builtin/packages/llvm-lld/package.py
similarity index 100%
rename from var/spack/packages/llvm-lld/package.py
rename to var/spack/repos/builtin/packages/llvm-lld/package.py
diff --git a/var/spack/repos/builtin/packages/llvm/package.py b/var/spack/repos/builtin/packages/llvm/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2b2c6ecccb3776654ab5c5c3a3d3ddfb5da3cea
--- /dev/null
+++ b/var/spack/repos/builtin/packages/llvm/package.py
@@ -0,0 +1,218 @@
+##############################################################################
+# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by David Beckingsale, david@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+from spack import *
+import os, shutil
+
+
+class Llvm(Package):
+    """The LLVM Project is a collection of modular and reusable compiler and
+       toolchain technologies. Despite its name, LLVM has little to do with
+       traditional virtual machines, though it does provide helpful libraries
+       that can be used to build them. The name "LLVM" itself is not an acronym;
+       it is the full name of the project.
+    """
+    homepage = 'http://llvm.org/'
+    url = 'http://llvm.org/releases/3.7.0/llvm-3.7.0.src.tar.xz'
+
+    version('3.0', 'a8e5f5f1c1adebae7b4a654c376a6005', url='http://llvm.org/releases/3.0/llvm-3.0.tar.gz') # currently required by mesa package
+
+    variant('debug', default=False, description="Build a debug version of LLVM, this increases binary size by an order of magnitude, make sure you have 20-30gb of space available to build this")
+    variant('clang', default=True, description="Build the LLVM C/C++/Objective-C compiler frontend")
+    variant('lldb', default=True, description="Build the LLVM debugger")
+    variant('internal_unwind', default=True, description="Build the libcxxabi libunwind")
+    variant('polly', default=True, description="Build the LLVM polyhedral optimization plugin, only builds for 3.7.0+")
+    variant('libcxx', default=True, description="Build the LLVM C++ standard library")
+    variant('compiler-rt', default=True, description="Build the LLVM compiler runtime, including sanitizers")
+    variant('gold', default=True, description="Add support for LTO with the gold linker plugin")
+
+
+    # Build dependency
+    depends_on('cmake @2.8.12.2:')
+
+    # Universal dependency
+    depends_on('python@2.7:')
+
+    # lldb dependencies
+    depends_on('ncurses', when='+lldb')
+    depends_on('swig', when='+lldb')
+    depends_on('libedit', when='+lldb')
+
+    # gold support
+    depends_on('binutils+gold', when='+gold')
+
+    # polly plugin
+    depends_on('gmp', when='@:3.6.999 +polly')
+    depends_on('isl', when='@:3.6.999 +polly')
+
+    base_url =  'http://llvm.org/releases/%%(version)s/%(pkg)s-%%(version)s.src.tar.xz'
+    llvm_url = base_url % { 'pkg' : 'llvm'}
+
+    resources = {
+                    'compiler-rt' : {
+                        'url' : base_url % { 'pkg' : 'compiler-rt'},
+                        'destination' : 'projects',
+                        'placement' : 'compiler-rt',
+                    },
+                    'openmp' : {
+                        'url' : base_url % { 'pkg' : 'openmp'},
+                        'destination' : 'projects',
+                        'placement' : 'openmp',
+                    },
+                    'libcxx' : {
+                        'url' : base_url % { 'pkg' : 'libcxx'},
+                        'destination' : 'projects',
+                        'placement' : 'libcxx',
+                    },
+                    'libcxxabi' : {
+                        'url' :  base_url % { 'pkg' : 'libcxxabi'},
+                        'destination' : 'projects',
+                        'placement' : 'libcxxabi',
+                    },
+                    'clang' : {
+                        'url' :  base_url % { 'pkg' : 'cfe'},
+                        'destination' : 'tools',
+                        'placement' : 'clang',
+                    },
+                    'clang-tools-extra' : {
+                        'url' :  base_url % { 'pkg' : 'clang-tools-extra'},
+                        'destination' : 'tools/clang/tools',
+                        'placement' : 'extra',
+                    },
+                    'lldb' : {
+                        'url' :  base_url % { 'pkg' : 'lldb'},
+                        'destination' : 'tools',
+                        'placement' : 'lldb',
+                    },
+                    'polly' : {
+                        'url' :  base_url % { 'pkg' : 'polly'},
+                        'destination' : 'tools',
+                        'placement' : 'polly',
+                    },
+                    'llvm-libunwind' : {
+                        'url' :  base_url % { 'pkg' : 'libunwind'},
+                        'destination' : 'projects',
+                        'placement' : 'libunwind',
+                    },
+                }
+    releases = [
+                  {
+                    'version' : '3.7.0',
+                    'md5':'b98b9495e5655a672d6cb83e1a180f8e',
+                    'resources' : {
+                        'compiler-rt' : '383c10affd513026f08936b5525523f5',
+                        'openmp' : 'f482c86fdead50ba246a1a2b0bbf206f',
+                        'polly' : '32f93ffc9cc7e042df22089761558f8b',
+                        'libcxx' : '46aa5175cbe1ad42d6e9c995968e56dd',
+                        'libcxxabi' : '5aa769e2fca79fa5335cfae8f6258772',
+                        'clang' : '8f9d27335e7331cf0a4711e952f21f01',
+                        'clang-tools-extra' : 'd5a87dacb65d981a427a536f6964642e',
+                        'lldb' : 'e5931740400d1dc3e7db4c7ba2ceff68',
+                        'llvm-libunwind' : '9a75392eb7eb8ed5c0840007e212baf5',
+                        }
+                  },
+                  {
+                    'version' : '3.6.2',
+                    'md5':'0c1ee3597d75280dee603bae9cbf5cc2',
+                    'resources' : {
+                        'compiler-rt' : 'e3bc4eb7ba8c39a6fe90d6c988927f3c',
+                        'openmp' : '65dd5863b9b270960a96817e9152b123',
+                        'libcxx' : '22214c90697636ef960a49aef7c1823a',
+                        'libcxxabi' : '17518e361e4e228f193dd91e8ef54ba2',
+                        'clang' : 'ff862793682f714bb7862325b9c06e20',
+                        'clang-tools-extra' : '3ebc1dc41659fcec3db1b47d81575e06',
+                        'lldb' : '51e5eb552f777b950bb0ff326e60d5f0',
+                        }
+                  },
+                  {
+                    'version' : '3.5.1',
+                    'md5':'2d3d8004f38852aa679e5945b8ce0b14',
+                    'resources' : {
+                        'compiler-rt' : 'd626cfb8a9712cb92b820798ab5bc1f8',
+                        'openmp' : '121ddb10167d7fc38b1f7e4b029cf059',
+                        'libcxx' : '406f09b1dab529f3f7879f4d548329d2',
+                        'libcxxabi' : 'b22c707e8d474a99865ad3c521c3d464',
+                        'clang' : '93f9532f8f7e6f1d8e5c1116907051cb',
+                        'clang-tools-extra' : 'f13f31ed3038acadc6fa63fef812a246',
+                        'lldb' : 'cc5ea8a414c62c33e760517f8929a204',
+                        }
+                  },
+               ]
+
+    for release in releases:
+        version(release['version'], release['md5'], url=llvm_url % release)
+
+        for name, md5 in release['resources'].items():
+            resource(name=name,
+                     url=resources[name]['url'] % release,
+                     md5=md5,
+                     destination=resources[name]['destination'],
+                     when='@%(version)s' % release,
+                     placement=resources[name].get('placement', None))
+
+    def install(self, spec, prefix):
+        env['CXXFLAGS'] = self.compiler.cxx11_flag
+        cmake_args = [ arg for arg in std_cmake_args if 'BUILD_TYPE' not in arg ]
+
+        build_type = 'RelWithDebInfo' if '+debug' in spec else 'Release'
+        cmake_args.extend([
+                '..',
+                '-DCMAKE_BUILD_TYPE=' + build_type,
+                '-DLLVM_REQUIRES_RTTI:BOOL=ON',
+                '-DCLANG_DEFAULT_OPENMP_RUNTIME:STRING=libomp',
+                '-DPYTHON_EXECUTABLE:PATH=%s/bin/python' % spec['python'].prefix ])
+
+        if '+gold' in spec:
+            cmake_args.append('-DLLVM_BINUTILS_INCDIR=' + os.path.join( spec['binutils'].prefix, 'include'))
+        if '+polly' in spec:
+            cmake_args.append('-DLINK_POLLY_INTO_TOOLS:Bool=ON')
+        else:
+            cmake_args.append('-DLLVM_EXTERNAL_POLLY_BUILD:Bool=OFF')
+
+        if '+clang' not in spec:
+            cmake_args.append('-DLLVM_EXTERNAL_CLANG_BUILD:Bool=OFF')
+        if '+lldb' not in spec:
+            cmake_args.append('-DLLVM_EXTERNAL_LLDB_BUILD:Bool=OFF')
+        if '+internal_unwind' not in spec:
+            cmake_args.append('-DLLVM_EXTERNAL_LIBUNWIND_BUILD:Bool=OFF')
+        if '+libcxx' not in spec:
+            cmake_args.append('-DLLVM_EXTERNAL_LIBCXX_BUILD:Bool=OFF')
+            cmake_args.append('-DLLVM_EXTERNAL_LIBCXXABI_BUILD:Bool=OFF')
+        if '+compiler-rt' not in spec:
+            cmake_args.append('-DLLVM_EXTERNAL_COMPILER_RT_BUILD:Bool=OFF')
+
+        if  '+clang' not in spec:
+            if '+clang_extra' in spec:
+                raise SpackException('The clang_extra variant requires the clang variant to be selected')
+            if '+lldb' in spec:
+                raise SpackException('The lldb variant requires the clang variant to be selected')
+
+        with working_dir('spack-build', create=True):
+            cmake(*cmake_args)
+            make()
+            make("install")
+            query_path = os.path.join('bin', 'clang-query')
+            # Manually install clang-query, because llvm doesn't...
+            if os.path.exists(query_path):
+                shutil.copy(query_path, os.path.join(prefix, 'bin'))
diff --git a/var/spack/packages/lmdb/package.py b/var/spack/repos/builtin/packages/lmdb/package.py
similarity index 100%
rename from var/spack/packages/lmdb/package.py
rename to var/spack/repos/builtin/packages/lmdb/package.py
diff --git a/var/spack/packages/lmod/package.py b/var/spack/repos/builtin/packages/lmod/package.py
similarity index 100%
rename from var/spack/packages/lmod/package.py
rename to var/spack/repos/builtin/packages/lmod/package.py
diff --git a/var/spack/packages/lua/package.py b/var/spack/repos/builtin/packages/lua/package.py
similarity index 70%
rename from var/spack/packages/lua/package.py
rename to var/spack/repos/builtin/packages/lua/package.py
index 6d8f7806d98cf58e2ca74527b75dd71092d10112..ca8cfc536576a3b0f39c9658845c4a21520867a6 100644
--- a/var/spack/packages/lua/package.py
+++ b/var/spack/repos/builtin/packages/lua/package.py
@@ -6,6 +6,7 @@ class Lua(Package):
     homepage = "http://www.lua.org"
     url      = "http://www.lua.org/ftp/lua-5.1.5.tar.gz"
 
+    version('5.3.2', '33278c2ab5ee3c1a875be8d55c1ca2a1')
     version('5.3.1', '797adacada8d85761c079390ff1d9961')
     version('5.3.0', 'a1b0a7e92d0c85bbff7a8d27bf29f8af')
     version('5.2.4', '913fdb32207046b273fdb17aad70be13')
@@ -18,11 +19,16 @@ class Lua(Package):
     version('5.1.3', 'a70a8dfaa150e047866dc01a46272599')
 
     depends_on('ncurses')
+    depends_on('readline')
 
     def install(self, spec, prefix):
+        if spec.satisfies("=darwin-i686") or spec.satisfies("=darwin-x86_64"):
+            target = 'macosx'
+        else:
+            target = 'linux'
         make('INSTALL_TOP=%s' % prefix,
-             'MYLDFLAGS="-L%s/lib -Wl,-rpath,%s"' % (spec['ncurses'].prefix,spec['ncurses'].prefix),
-             'linux')
+             'MYLDFLAGS=-L%s -lncurses' % spec['ncurses'].prefix.lib,
+             target)
         make('INSTALL_TOP=%s' % prefix,
-             'MYLDFLAGS="-L%s/lib -Wl,-rpath,%s"' % (spec['ncurses'].prefix,spec['ncurses'].prefix),
+             'MYLDFLAGS=-L%s -lncurses' % spec['ncurses'].prefix.lib,
              'install')
diff --git a/var/spack/packages/lwgrp/package.py b/var/spack/repos/builtin/packages/lwgrp/package.py
similarity index 100%
rename from var/spack/packages/lwgrp/package.py
rename to var/spack/repos/builtin/packages/lwgrp/package.py
diff --git a/var/spack/packages/lwm2/package.py b/var/spack/repos/builtin/packages/lwm2/package.py
similarity index 100%
rename from var/spack/packages/lwm2/package.py
rename to var/spack/repos/builtin/packages/lwm2/package.py
diff --git a/var/spack/packages/matio/package.py b/var/spack/repos/builtin/packages/matio/package.py
similarity index 100%
rename from var/spack/packages/matio/package.py
rename to var/spack/repos/builtin/packages/matio/package.py
diff --git a/var/spack/repos/builtin/packages/mbedtls/package.py b/var/spack/repos/builtin/packages/mbedtls/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..3da00cf417f70216dbcbc0cb073127138710dedf
--- /dev/null
+++ b/var/spack/repos/builtin/packages/mbedtls/package.py
@@ -0,0 +1,22 @@
+from spack import *
+
+class Mbedtls(Package):
+    """
+    mbed TLS (formerly known as PolarSSL) makes it trivially easy for developers to include cryptographic and SSL/TLS capabilities in their (embedded) products, facilitating this functionality with a minimal coding footprint.
+    """
+    homepage = "https://tls.mbed.org"
+    url      = "https://github.com/ARMmbed/mbedtls/archive/mbedtls-2.2.1.tar.gz"
+
+    version('2.2.1' , '73a38f96898d6d03e32f55dd9f9a67be')
+    version('2.2.0' , 'eaf4586c1ef93ae872e606b6c1203942')
+    version('2.1.4' , '40cdf67b6c6d92c9cbcfd552d39ea3ae')
+    version('2.1.3' , '7eb4cf1dfa68578a2c8dbd0b6fa752dd')
+    version('1.3.16', '4144d7320c691f721aeb9e67a1bc38e0')
+
+    depends_on('cmake')
+
+    def install(self, spec, prefix):
+        cmake('.', *std_cmake_args)
+
+        make()
+        make("install")
diff --git a/var/spack/packages/memaxes/package.py b/var/spack/repos/builtin/packages/memaxes/package.py
similarity index 100%
rename from var/spack/packages/memaxes/package.py
rename to var/spack/repos/builtin/packages/memaxes/package.py
diff --git a/var/spack/packages/mesa/package.py b/var/spack/repos/builtin/packages/mesa/package.py
similarity index 100%
rename from var/spack/packages/mesa/package.py
rename to var/spack/repos/builtin/packages/mesa/package.py
diff --git a/var/spack/repos/builtin/packages/metis/package.py b/var/spack/repos/builtin/packages/metis/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfc4de7d1d129b66290f1f8a833aa422cff1e1e
--- /dev/null
+++ b/var/spack/repos/builtin/packages/metis/package.py
@@ -0,0 +1,83 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+from spack import *
+
+
+class Metis(Package):
+    """
+    METIS is a set of serial programs for partitioning graphs, partitioning finite element meshes, and producing fill
+    reducing orderings for sparse matrices. The algorithms implemented in METIS are based on the multilevel
+    recursive-bisection, multilevel k-way, and multi-constraint partitioning schemes.
+    """
+
+    homepage = 'http://glaros.dtc.umn.edu/gkhome/metis/metis/overview'
+    url = "http://glaros.dtc.umn.edu/gkhome/fetch/sw/metis/metis-5.1.0.tar.gz"
+
+    version('5.1.0', '5465e67079419a69e0116de24fce58fe')
+
+    variant('shared', default=True, description='Enables the build of shared libraries')
+    variant('debug', default=False, description='Builds the library in debug mode')
+    variant('gdb', default=False, description='Enables gdb support')
+
+    variant('idx64', default=False, description='Use int64_t as default index type')
+    variant('double', default=False, description='Use double precision floating point types')
+
+    depends_on('cmake @2.8:')  # build-time dependency
+
+    depends_on('gdb', when='+gdb')
+
+    def install(self, spec, prefix):
+
+        options = []
+        options.extend(std_cmake_args)
+
+        build_directory = join_path(self.stage.path, 'spack-build')
+        source_directory = self.stage.source_path
+
+        options.append('-DGKLIB_PATH:PATH={metis_source}/GKlib'.format(metis_source=source_directory))
+
+        if '+shared' in spec:
+            options.append('-DSHARED:BOOL=ON')
+
+        if '+debug' in spec:
+            options.extend(['-DDEBUG:BOOL=ON',
+                            '-DCMAKE_BUILD_TYPE:STRING=Debug'])
+
+        if '+gdb' in spec:
+            options.append('-DGDB:BOOL=ON')
+
+        metis_header = join_path(source_directory, 'include', 'metis.h')
+
+        if '+idx64' in spec:
+            filter_file('IDXTYPEWIDTH 32', 'IDXTYPEWIDTH 64', metis_header)
+
+        if '+double' in spec:
+            filter_file('REALTYPEWIDTH 32', 'REALTYPEWIDTH 64', metis_header)
+
+        with working_dir(build_directory, create=True):
+            cmake(source_directory, *options)
+            make()
+            make("install")
\ No newline at end of file
diff --git a/var/spack/packages/mpc/package.py b/var/spack/repos/builtin/packages/mpc/package.py
similarity index 100%
rename from var/spack/packages/mpc/package.py
rename to var/spack/repos/builtin/packages/mpc/package.py
diff --git a/var/spack/packages/mpe2/mpe2.patch b/var/spack/repos/builtin/packages/mpe2/mpe2.patch
similarity index 100%
rename from var/spack/packages/mpe2/mpe2.patch
rename to var/spack/repos/builtin/packages/mpe2/mpe2.patch
diff --git a/var/spack/packages/mpe2/package.py b/var/spack/repos/builtin/packages/mpe2/package.py
similarity index 100%
rename from var/spack/packages/mpe2/package.py
rename to var/spack/repos/builtin/packages/mpe2/package.py
diff --git a/var/spack/packages/mpfr/package.py b/var/spack/repos/builtin/packages/mpfr/package.py
similarity index 96%
rename from var/spack/packages/mpfr/package.py
rename to var/spack/repos/builtin/packages/mpfr/package.py
index 0f2baac0041c04e1b3a40589384b1a42dd2982ee..a1bd7529cfe0bb621e88ed5571d0436bdc69bc8f 100644
--- a/var/spack/packages/mpfr/package.py
+++ b/var/spack/repos/builtin/packages/mpfr/package.py
@@ -31,7 +31,7 @@ class Mpfr(Package):
     url      = "http://www.mpfr.org/mpfr-current/mpfr-3.1.3.tar.bz2"
 
     version('3.1.3', '5fdfa3cfa5c86514ee4a241a1affa138')
-    # version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19')
+    version('3.1.2', 'ee2c3ac63bf0c2359bf08fc3ee094c19')
 
     depends_on('gmp')
 
diff --git a/var/spack/packages/mpibash/mpibash-4.3.patch b/var/spack/repos/builtin/packages/mpibash/mpibash-4.3.patch
similarity index 100%
rename from var/spack/packages/mpibash/mpibash-4.3.patch
rename to var/spack/repos/builtin/packages/mpibash/mpibash-4.3.patch
diff --git a/var/spack/packages/mpibash/package.py b/var/spack/repos/builtin/packages/mpibash/package.py
similarity index 100%
rename from var/spack/packages/mpibash/package.py
rename to var/spack/repos/builtin/packages/mpibash/package.py
diff --git a/var/spack/packages/mpich/package.py b/var/spack/repos/builtin/packages/mpich/package.py
similarity index 91%
rename from var/spack/packages/mpich/package.py
rename to var/spack/repos/builtin/packages/mpich/package.py
index 7cfa0a3b619d53268c9bd47db4e8380671a97836..c856cfe277fe8b8308a35dc38ae6ec8ba246fc3f 100644
--- a/var/spack/packages/mpich/package.py
+++ b/var/spack/repos/builtin/packages/mpich/package.py
@@ -33,13 +33,16 @@ class Mpich(Package):
     list_url   = "http://www.mpich.org/static/downloads/"
     list_depth = 2
 
+    version('3.2',   'f414cfa77099cd1fa1a5ae4e22db508a')
     version('3.1.4', '2ab544607986486562e076b83937bba2')
     version('3.1.3', '93cb17f91ac758cbf9174ecb03563778')
     version('3.1.2', '7fbf4b81dcb74b07ae85939d1ceee7f1')
     version('3.1.1', '40dc408b1e03cc36d80209baaa2d32b7')
-    version('3.1', '5643dd176499bfb7d25079aaff25f2ec')
+    version('3.1',   '5643dd176499bfb7d25079aaff25f2ec')
     version('3.0.4', '9c5d5d4fe1e17dd12153f40bc5b6dbc0')
 
+    variant('verbs', default=False, description='Build support for OpenFabrics verbs.')
+
     provides('mpi@:3.0', when='@3:')
     provides('mpi@:1.3', when='@1:')
 
@@ -55,6 +58,12 @@ def install(self, spec, prefix):
         config_args = ["--prefix=" + prefix,
                        "--enable-shared"]
 
+        # Variants
+        if '+verbs' in spec:
+            config_args.append("--with-ibverbs")
+        else:
+            config_args.append("--without-ibverbs")
+
         # TODO: Spack should make it so that you can't actually find
         # these compilers if they're "disabled" for the current
         # compiler configuration.
diff --git a/var/spack/packages/mpileaks/package.py b/var/spack/repos/builtin/packages/mpileaks/package.py
similarity index 100%
rename from var/spack/packages/mpileaks/package.py
rename to var/spack/repos/builtin/packages/mpileaks/package.py
diff --git a/var/spack/packages/mrnet/package.py b/var/spack/repos/builtin/packages/mrnet/package.py
similarity index 100%
rename from var/spack/packages/mrnet/package.py
rename to var/spack/repos/builtin/packages/mrnet/package.py
diff --git a/var/spack/repos/builtin/packages/mumps/Makefile.inc b/var/spack/repos/builtin/packages/mumps/Makefile.inc
new file mode 100644
index 0000000000000000000000000000000000000000..2e6a041878ce35d1344fa0cf759dff0435621dd8
--- /dev/null
+++ b/var/spack/repos/builtin/packages/mumps/Makefile.inc
@@ -0,0 +1,38 @@
+LPORDDIR = $(topdir)/PORD/lib/
+IPORD = -I$(topdir)/PORD/include/
+LPORD = -L$(LPORDDIR) -lpord
+
+ORDERINGSC = $(ORDERINGSF)
+LORDERINGS = $(LMETIS) $(LPORD) $(LSCOTCH)
+IORDERINGSF = $(ISCOTCH)
+IORDERINGSC = $(IMETIS) $(IPORD) $(ISCOTCH)
+
+PLAT    =
+LIBEXT  = .a
+OUTC    = -o 
+OUTF    = -o
+RM = /bin/rm -f
+AR = ar vr 
+RANLIB = ranlib
+
+INCSEQ  = -I$(topdir)/libseq
+LIBSEQ  = -L$(topdir)/libseq -lmpiseq
+
+INCPAR  =
+LIBPAR  = $(SCALAP)
+
+LIBOTHERS = -lpthread
+
+#Sequential:
+ifeq ($(MUMPS_TYPE),seq)
+INCS = $(INCSEQ)
+LIBS = $(LIBSEQ)
+LIBSEQNEEDED = libseqneeded
+endif
+
+#Parallel:
+ifeq ($(MUMPS_TYPE),par)
+INCS = $(INCPAR)
+LIBS = $(LIBPAR)
+LIBSEQNEEDED =
+endif
diff --git a/var/spack/repos/builtin/packages/mumps/package.py b/var/spack/repos/builtin/packages/mumps/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..44a37903cc8dc9d05134d2e3c212d5eced651c5a
--- /dev/null
+++ b/var/spack/repos/builtin/packages/mumps/package.py
@@ -0,0 +1,139 @@
+from spack import *
+import os
+
+
+class Mumps(Package):
+    """MUMPS: a MUltifrontal Massively Parallel sparse direct Solver"""
+
+    homepage = "http://mumps.enseeiht.fr"
+    url      = "http://mumps.enseeiht.fr/MUMPS_5.0.1.tar.gz"
+
+    version('5.0.1', 'b477573fdcc87babe861f62316833db0')
+
+    variant('mpi', default=True, description='Activate the compilation of MUMPS with the MPI support')
+    variant('scotch', default=False, description='Activate Scotch as a possible ordering library')
+    variant('ptscotch', default=False, description='Activate PT-Scotch as a possible ordering library')
+    variant('metis', default=False, description='Activate Metis as a possible ordering library')
+    variant('parmetis', default=False, description='Activate Parmetis as a possible ordering library')
+    variant('double', default=True, description='Activate the compilation of dmumps')
+    variant('float', default=True, description='Activate the compilation of smumps')
+    variant('complex', default=True, description='Activate the compilation of cmumps and/or zmumps')
+    variant('idx64', default=False, description='Use int64_t/integer*8 as default index type')
+
+    
+    depends_on('scotch + esmumps', when='~ptscotch+scotch')
+    depends_on('scotch + esmumps + mpi', when='+ptscotch')
+    depends_on('metis', when='~parmetis+metis')
+    depends_on('parmetis', when="+parmetis")
+    depends_on('blas')
+    depends_on('lapack')
+    depends_on('scalapack', when='+mpi')
+    depends_on('mpi', when='+mpi')
+
+    # this function is not a patch function because in case scalapack
+    # is needed it uses self.spec['scalapack'].fc_link set by the
+    # setup_dependent_environment in scalapck. This happen after patch
+    # end before install
+    # def patch(self):
+    def write_makefile_inc(self):
+        if ('+parmetis' in self.spec or '+ptscotch' in self.spec) and '+mpi' not in self.spec:
+            raise RuntimeError('You cannot use the variants parmetis or ptscotch without mpi')
+        
+        makefile_conf = ["LIBBLAS = -L%s -lblas" % self.spec['blas'].prefix.lib]
+
+        orderings = ['-Dpord']
+        
+        if '+ptscotch' in self.spec or '+scotch' in self.spec:
+            join_lib = ' -l%s' % ('pt' if '+ptscotch' in self.spec else '')
+            makefile_conf.extend(
+                ["ISCOTCH = -I%s" % self.spec['scotch'].prefix.include,
+                 "LSCOTCH = -L%s %s%s" % (self.spec['scotch'].prefix.lib,
+                                          join_lib,
+                                          join_lib.join(['esmumps', 'scotch', 'scotcherr']))])
+            orderings.append('-Dscotch')
+            if '+ptscotch' in self.spec:
+                orderings.append('-Dptscotch')
+
+        if '+parmetis' in self.spec or '+metis' in self.spec:
+            libname = 'parmetis' if '+parmetis' in self.spec else 'metis'
+            makefile_conf.extend(
+                ["IMETIS = -I%s" % self.spec[libname].prefix.include,
+                 "LMETIS = -L%s -l%s" % (self.spec[libname].prefix.lib, libname)])
+
+            orderings.append('-Dmetis')
+            if '+parmetis' in self.spec:
+                orderings.append('-Dparmetis')
+
+        makefile_conf.append("ORDERINGSF = %s" % (' '.join(orderings)))
+
+        # TODO: test this part, it needs a full blas, scalapack and
+        # partitionning environment with 64bit integers
+        if '+idx64' in self.spec:
+            makefile_conf.extend(
+                # the fortran compilation flags most probably are
+                # working only for intel and gnu compilers this is
+                # perhaps something the compiler should provide
+                ['OPTF    = -O  -DALLOW_NON_INIT %s' % '-fdefault-integer-8' if self.compiler.name == "gcc" else '-i8',
+                 'OPTL    = -O ',
+                 'OPTC    = -O -DINTSIZE64'])
+        else:
+            makefile_conf.extend(
+                ['OPTF    = -O  -DALLOW_NON_INIT',
+                 'OPTL    = -O ',
+                 'OPTC    = -O '])
+
+
+        if '+mpi' in self.spec:
+            makefile_conf.extend(
+                ["CC = %s" % join_path(self.spec['mpi'].prefix.bin, 'mpicc'),
+                 "FC = %s" % join_path(self.spec['mpi'].prefix.bin, 'mpif90'),
+                 "FL = %s" % join_path(self.spec['mpi'].prefix.bin, 'mpif90'),
+                 "SCALAP = %s" % self.spec['scalapack'].fc_link,
+                 "MUMPS_TYPE = par"])
+        else:
+            makefile_conf.extend(
+                ["CC = cc",
+                 "FC = fc",
+                 "FL = fc",
+                 "MUMPS_TYPE = seq"])
+
+        # TODO: change the value to the correct one according to the
+        # compiler possible values are -DAdd_, -DAdd__ and/or -DUPPER
+        makefile_conf.append("CDEFS   = -DAdd_")
+
+        
+        makefile_inc_template = join_path(os.path.dirname(self.module.__file__),
+                                          'Makefile.inc')
+        with open(makefile_inc_template, "r") as fh:
+            makefile_conf.extend(fh.read().split('\n'))
+        
+        with working_dir('.'):
+            with open("Makefile.inc", "w") as fh:
+                makefile_inc = '\n'.join(makefile_conf)
+                fh.write(makefile_inc)
+
+
+
+    def install(self, spec, prefix):
+        make_libs = []
+
+        # the coice to compile ?examples is to have kind of a sanity
+        # check on the libraries generated.
+        if '+float' in spec:
+            make_libs.append('sexamples')
+            if '+complex' in spec:
+                make_libs.append('cexamples')
+
+        if '+double' in spec:
+            make_libs.append('dexamples')
+            if '+complex' in spec:
+                make_libs.append('zexamples')
+
+        self.write_makefile_inc()
+                
+        make(*make_libs)
+
+        install_tree('lib', prefix.lib)
+        install_tree('include', prefix.include)
+        if '~mpi' in spec:
+            install('libseq/libmpiseq.a', prefix.lib)
diff --git a/var/spack/packages/munge/package.py b/var/spack/repos/builtin/packages/munge/package.py
similarity index 100%
rename from var/spack/packages/munge/package.py
rename to var/spack/repos/builtin/packages/munge/package.py
diff --git a/var/spack/packages/muster/package.py b/var/spack/repos/builtin/packages/muster/package.py
similarity index 100%
rename from var/spack/packages/muster/package.py
rename to var/spack/repos/builtin/packages/muster/package.py
diff --git a/var/spack/packages/mvapich2/ad_lustre_rwcontig_open_source.patch b/var/spack/repos/builtin/packages/mvapich2/ad_lustre_rwcontig_open_source.patch
similarity index 100%
rename from var/spack/packages/mvapich2/ad_lustre_rwcontig_open_source.patch
rename to var/spack/repos/builtin/packages/mvapich2/ad_lustre_rwcontig_open_source.patch
diff --git a/var/spack/packages/mvapich2/package.py b/var/spack/repos/builtin/packages/mvapich2/package.py
similarity index 86%
rename from var/spack/packages/mvapich2/package.py
rename to var/spack/repos/builtin/packages/mvapich2/package.py
index 23a11b3171ab5ffc459fdfbe7fad4ae917bec4c3..af5ed1b08886977fbc0ab028af74351718d4765d 100644
--- a/var/spack/packages/mvapich2/package.py
+++ b/var/spack/repos/builtin/packages/mvapich2/package.py
@@ -4,15 +4,13 @@
 class Mvapich2(Package):
     """MVAPICH2 is an MPI implementation for Infiniband networks."""
     homepage = "http://mvapich.cse.ohio-state.edu/"
+    url = "http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.2b.tar.gz"
 
-    version('2.2a', 'b8ceb4fc5f5a97add9b3ff1b9cbe39d2',
-            url='http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.2a.tar.gz')
+    version('2.2b', '5651e8b7a72d7c77ca68da48f3a5d108')
+    version('2.2a', 'b8ceb4fc5f5a97add9b3ff1b9cbe39d2')
+    version('2.0',  '9fbb68a4111a8b6338e476dc657388b4')
+    version('1.9',  '5dc58ed08fd3142c260b70fe297e127c')
 
-    version('2.0', '9fbb68a4111a8b6338e476dc657388b4',
-            url='http://mvapich.cse.ohio-state.edu/download/mvapich/mv2/mvapich2-2.0.tar.gz')
-
-    version('1.9', '5dc58ed08fd3142c260b70fe297e127c',
-            url="http://mvapich.cse.ohio-state.edu/download/mvapich2/mv2/mvapich2-1.9.tgz")
     patch('ad_lustre_rwcontig_open_source.patch', when='@1.9')
 
     provides('mpi@:2.2', when='@1.9')  # MVAPICH2-1.9 supports MPI 2.2
@@ -41,16 +39,25 @@ class Mvapich2(Package):
     NEMESISIBTCP = 'nemesisibtcp'
     NEMESISIB = 'nemesisib'
     NEMESIS = 'nemesis'
+    MRAIL = 'mrail'
     SUPPORTED_NETWORKS = (PSM, SOCK, NEMESIS, NEMESISIB, NEMESISIBTCP)
     variant(PSM, default=False, description='Configures a build for QLogic PSM-CH3')
     variant(SOCK, default=False, description='Configures a build for TCP/IP-CH3')
     variant(NEMESISIBTCP, default=False, description='Configures a build for both OFA-IB-Nemesis and TCP/IP-Nemesis')
     variant(NEMESISIB, default=False, description='Configures a build for OFA-IB-Nemesis')
     variant(NEMESIS, default=False, description='Configures a build for TCP/IP-Nemesis')
+    variant(MRAIL, default=False, description='Configures a build for OFA-IB-CH3')
     ##########
 
     # FIXME : CUDA support is missing
 
+    def url_for_version(self, version):
+        base_url = "http://mvapich.cse.ohio-state.edu/download"
+        if version < Version('2.0'):
+            return "%s/mvapich2/mv2/mvapich2-%s.tar.gz" % (base_url, version)
+        else:
+            return "%s/mvapich/mv2/mvapich2-%s.tar.gz"  % (base_url, version)
+
     @staticmethod
     def enabled(x):
         """
@@ -117,7 +124,7 @@ def set_network_type(self, spec, configure_args):
         if count > 1:
             raise RuntimeError('network variants are mutually exclusive (only one can be selected at a time)')
 
-        # From here on I can suppose that ony one variant has been selected
+        # From here on I can suppose that only one variant has been selected
         if self.enabled(Mvapich2.PSM) in spec:
             network_options = ["--with-device=ch3:psm"]
         elif self.enabled(Mvapich2.SOCK) in spec:
@@ -128,7 +135,7 @@ def set_network_type(self, spec, configure_args):
             network_options = ["--with-device=ch3:nemesis:ib"]
         elif self.enabled(Mvapich2.NEMESIS) in spec:
             network_options = ["--with-device=ch3:nemesis"]
-        else:
+        elif self.enabled(Mvapich2.MRAIL) in spec:
             network_options = ["--with-device=ch3:mrail", "--with-rdma=gen2"]
 
         configure_args.extend(network_options)
@@ -141,7 +148,14 @@ def install(self, spec, prefix):
             "--enable-romio",
             "--disable-silent-rules",
         ]
-        if not self.compiler.f77 and not self.compiler.fc:
+
+        if self.compiler.f77 and self.compiler.fc:
+            configure_args.append("--enable-fortran=all")
+        elif self.compiler.f77:
+            configure_args.append("--enable-fortran=f77")
+        elif self.compiler.fc:
+            configure_args.append("--enable-fortran=fc")
+        else:
             configure_args.append("--enable-fortran=none")
 
         # Set the type of the build (debug, release)
diff --git a/var/spack/packages/nasm/package.py b/var/spack/repos/builtin/packages/nasm/package.py
similarity index 100%
rename from var/spack/packages/nasm/package.py
rename to var/spack/repos/builtin/packages/nasm/package.py
diff --git a/var/spack/packages/ncdu/package.py b/var/spack/repos/builtin/packages/ncdu/package.py
similarity index 100%
rename from var/spack/packages/ncdu/package.py
rename to var/spack/repos/builtin/packages/ncdu/package.py
diff --git a/var/spack/packages/ncurses/package.py b/var/spack/repos/builtin/packages/ncurses/package.py
similarity index 66%
rename from var/spack/packages/ncurses/package.py
rename to var/spack/repos/builtin/packages/ncurses/package.py
index 31f53b6c43e63d98a7a1df9575045bafc4d7a63a..8dc808caaccafe5d51ec14e2232262e4990ecb3e 100644
--- a/var/spack/packages/ncurses/package.py
+++ b/var/spack/repos/builtin/packages/ncurses/package.py
@@ -17,19 +17,14 @@ class Ncurses(Package):
     patch('patch_gcc_5.txt', when='%gcc@5.0:')
 
     def install(self, spec, prefix):
-        configure("--prefix=%s" % prefix,
-                  "--with-shared",
-                  "--enable-widec",
-                  "--disable-pc-files",
-                  "--without-ada")
+        opts = [
+            "--prefix=%s" % prefix,
+            "--with-shared",
+            "--with-cxx-shared",
+            "--enable-widec",
+            "--enable-overwrite",
+            "--disable-lib-suffixes",
+            "--without-ada"]
+        configure(*opts)
         make()
         make("install")
-
-        configure("--prefix=%s" % prefix,
-                  "--with-shared",
-                  "--disable-widec",
-                  "--disable-pc-files",
-                  "--without-ada")
-        make()
-        make("install")
-
diff --git a/var/spack/packages/ncurses/patch_gcc_5.txt b/var/spack/repos/builtin/packages/ncurses/patch_gcc_5.txt
similarity index 100%
rename from var/spack/packages/ncurses/patch_gcc_5.txt
rename to var/spack/repos/builtin/packages/ncurses/patch_gcc_5.txt
diff --git a/var/spack/repos/builtin/packages/netcdf/package.py b/var/spack/repos/builtin/packages/netcdf/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..3cd0b2ee7a893a5200a75692eaf14a683062ec3d
--- /dev/null
+++ b/var/spack/repos/builtin/packages/netcdf/package.py
@@ -0,0 +1,80 @@
+from spack import *
+
+class Netcdf(Package):
+    """NetCDF is a set of software libraries and self-describing, machine-independent
+    data formats that support the creation, access, and sharing of array-oriented
+    scientific data."""
+
+    homepage = "http://www.unidata.ucar.edu/software/netcdf/"
+    url      = "ftp://ftp.unidata.ucar.edu/pub/netcdf/netcdf-4.3.3.tar.gz"
+
+    version('4.4.0', 'f01cb26a0126dd9a6224e76472d25f6c')
+    version('4.3.3', '5fbd0e108a54bd82cb5702a73f56d2ae')
+
+    variant('fortran', default=False, description="Download and install NetCDF-Fortran")
+    variant('hdf4',    default=False, description="Enable HDF4 support")
+
+    # Dependencies:
+    depends_on("curl")  # required for DAP support
+    depends_on("hdf", when='+hdf4')
+    depends_on("hdf5")  # required for NetCDF-4 support
+    depends_on("zlib")  # required for NetCDF-4 support
+
+    def install(self, spec, prefix):
+        # Environment variables
+        CPPFLAGS = []
+        LDFLAGS  = []
+        LIBS     = []
+
+        config_args = [
+            "--prefix=%s" % prefix,
+            "--enable-fsync",
+            "--enable-v2",
+            "--enable-utilities",
+            "--enable-shared",
+            "--enable-static",
+            "--enable-largefile",
+            # necessary for HDF5 support
+            "--enable-netcdf-4",
+            "--enable-dynamic-loading",
+            # necessary for DAP support
+            "--enable-dap"
+        ]
+
+        CPPFLAGS.append("-I%s/include" % spec['hdf5'].prefix)
+        LDFLAGS.append( "-L%s/lib"     % spec['hdf5'].prefix)
+
+        # HDF4 support
+        # As of NetCDF 4.1.3, "--with-hdf4=..." is no longer a valid option
+        # You must use the environment variables CPPFLAGS and LDFLAGS
+        if '+hdf4' in spec:
+            config_args.append("--enable-hdf4")
+            CPPFLAGS.append("-I%s/include" % spec['hdf'].prefix)
+            LDFLAGS.append( "-L%s/lib"     % spec['hdf'].prefix)
+            LIBS.append(    "-l%s"         % "jpeg")
+
+        if 'szip' in spec:
+            CPPFLAGS.append("-I%s/include" % spec['szip'].prefix)
+            LDFLAGS.append( "-L%s/lib"     % spec['szip'].prefix)
+            LIBS.append(    "-l%s"         % "sz")
+
+        # Fortran support
+        # In version 4.2+, NetCDF-C and NetCDF-Fortran have split.
+        # They can be installed separately, but this bootstrap procedure
+        # should be able to install both at the same time.
+        # Note: this is a new experimental feature.
+        if '+fortran' in spec:
+            config_args.append("--enable-remote-fortran-bootstrap")
+
+        config_args.append('CPPFLAGS=%s' % ' '.join(CPPFLAGS))
+        config_args.append('LDFLAGS=%s'  % ' '.join(LDFLAGS))
+        config_args.append('LIBS=%s'     % ' '.join(LIBS))
+
+        configure(*config_args)
+        make()
+        make("install")
+
+        # After installing NetCDF-C, install NetCDF-Fortran
+        if '+fortran' in spec:
+            make("build-netcdf-fortran")
+            make("install-netcdf-fortran")
diff --git a/var/spack/packages/netgauge/package.py b/var/spack/repos/builtin/packages/netgauge/package.py
similarity index 100%
rename from var/spack/packages/netgauge/package.py
rename to var/spack/repos/builtin/packages/netgauge/package.py
diff --git a/var/spack/packages/netlib-blas/package.py b/var/spack/repos/builtin/packages/netlib-blas/package.py
similarity index 100%
rename from var/spack/packages/netlib-blas/package.py
rename to var/spack/repos/builtin/packages/netlib-blas/package.py
diff --git a/var/spack/packages/netlib-lapack/package.py b/var/spack/repos/builtin/packages/netlib-lapack/package.py
similarity index 100%
rename from var/spack/packages/netlib-lapack/package.py
rename to var/spack/repos/builtin/packages/netlib-lapack/package.py
diff --git a/var/spack/repos/builtin/packages/netlib-scalapack/package.py b/var/spack/repos/builtin/packages/netlib-scalapack/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..5be91c4a40c0e6b6fea35dec862e31b79f2756df
--- /dev/null
+++ b/var/spack/repos/builtin/packages/netlib-scalapack/package.py
@@ -0,0 +1,50 @@
+from spack import *
+
+class NetlibScalapack(Package):
+    """ScaLAPACK is a library of high-performance linear algebra routines for parallel distributed memory machines"""
+    
+    homepage = "http://www.netlib.org/scalapack/"
+    url      = "http://www.netlib.org/scalapack/scalapack-2.0.2.tgz"
+
+    version('2.0.2', '2f75e600a2ba155ed9ce974a1c4b536f')
+    version('2.0.1', '17b8cde589ea0423afe1ec43e7499161')
+    version('2.0.0', '9e76ae7b291be27faaad47cfc256cbfe')
+    # versions before 2.0.0 are not using cmake and requires blacs as
+    # a separated package
+    
+    variant('shared', default=True, description='Build the shared library version')
+    variant('fpic', default=False, description="Build with -fpic compiler option")
+    
+    provides('scalapack')
+    
+    depends_on('mpi')
+    depends_on('lapack')
+    
+    def install(self, spec, prefix):       
+        options = [
+            "-DBUILD_SHARED_LIBS:BOOL=%s" % 'ON' if '+shared' in spec else 'OFF',
+            "-DBUILD_STATIC_LIBS:BOOL=%s" % 'OFF' if '+shared' in spec else 'ON',
+            "-DUSE_OPTIMIZED_LAPACK_BLAS:BOOL=ON", # forces scalapack to use find_package(LAPACK)
+            ]
+
+        if '+fpic' in spec:
+            options.extend([
+                "-DCMAKE_C_FLAGS=-fPIC",
+                "-DCMAKE_Fortran_FLAGS=-fPIC"
+            ])
+           
+        options.extend(std_cmake_args)
+        
+        with working_dir('spack-build', create=True):
+            cmake('..', *options)
+            make()
+            make("install")
+
+    def setup_dependent_environment(self, module, spec, dependent_spec):
+        # TODO treat OS that are not Linux...
+        lib_suffix = '.so' if '+shared' in spec['scalapack'] else '.a'
+
+        spec['scalapack'].fc_link = '-L%s -lscalapack' % spec['scalapack'].prefix.lib
+        spec['scalapack'].cc_link = spec['scalapack'].fc_link
+        spec['scalapack'].libraries = [join_path(spec['scalapack'].prefix.lib,
+                                                 'libscalapack%s' % lib_suffix)]
diff --git a/var/spack/packages/nettle/package.py b/var/spack/repos/builtin/packages/nettle/package.py
similarity index 100%
rename from var/spack/packages/nettle/package.py
rename to var/spack/repos/builtin/packages/nettle/package.py
diff --git a/var/spack/packages/ninja/package.py b/var/spack/repos/builtin/packages/ninja/package.py
similarity index 100%
rename from var/spack/packages/ninja/package.py
rename to var/spack/repos/builtin/packages/ninja/package.py
diff --git a/var/spack/packages/ompss/package.py b/var/spack/repos/builtin/packages/ompss/package.py
similarity index 100%
rename from var/spack/packages/ompss/package.py
rename to var/spack/repos/builtin/packages/ompss/package.py
diff --git a/var/spack/packages/ompt-openmp/package.py b/var/spack/repos/builtin/packages/ompt-openmp/package.py
similarity index 100%
rename from var/spack/packages/ompt-openmp/package.py
rename to var/spack/repos/builtin/packages/ompt-openmp/package.py
diff --git a/var/spack/packages/opari2/package.py b/var/spack/repos/builtin/packages/opari2/package.py
similarity index 100%
rename from var/spack/packages/opari2/package.py
rename to var/spack/repos/builtin/packages/opari2/package.py
diff --git a/var/spack/packages/openblas/package.py b/var/spack/repos/builtin/packages/openblas/package.py
similarity index 81%
rename from var/spack/packages/openblas/package.py
rename to var/spack/repos/builtin/packages/openblas/package.py
index e01467c05ae720117a67cff210e95f4c6c35c642..9c8fa1c694b6c8544c691bba5c3742301aa1aa11 100644
--- a/var/spack/packages/openblas/package.py
+++ b/var/spack/repos/builtin/packages/openblas/package.py
@@ -19,3 +19,7 @@ def install(self, spec, prefix):
         with working_dir(prefix.lib):
             symlink('libopenblas.a', 'blas.a')
             symlink('libopenblas.a', 'libblas.a')
+
+        # Lapack virtual package should provide liblapack.a
+        with working_dir(prefix.lib):
+            symlink('libopenblas.a', 'liblapack.a')
diff --git a/var/spack/packages/openmpi/ad_lustre_rwcontig_open_source.patch b/var/spack/repos/builtin/packages/openmpi/ad_lustre_rwcontig_open_source.patch
similarity index 100%
rename from var/spack/packages/openmpi/ad_lustre_rwcontig_open_source.patch
rename to var/spack/repos/builtin/packages/openmpi/ad_lustre_rwcontig_open_source.patch
diff --git a/var/spack/packages/openmpi/configure.patch b/var/spack/repos/builtin/packages/openmpi/configure.patch
similarity index 100%
rename from var/spack/packages/openmpi/configure.patch
rename to var/spack/repos/builtin/packages/openmpi/configure.patch
diff --git a/var/spack/packages/openmpi/llnl-platforms.patch b/var/spack/repos/builtin/packages/openmpi/llnl-platforms.patch
similarity index 100%
rename from var/spack/packages/openmpi/llnl-platforms.patch
rename to var/spack/repos/builtin/packages/openmpi/llnl-platforms.patch
diff --git a/var/spack/packages/openmpi/package.py b/var/spack/repos/builtin/packages/openmpi/package.py
similarity index 65%
rename from var/spack/packages/openmpi/package.py
rename to var/spack/repos/builtin/packages/openmpi/package.py
index be2202fbbd7f9fd51b2272de76cf0eb66abc17c8..e4484af8c5ed9d8424e92efd2be3d64ff8d19bbf 100644
--- a/var/spack/packages/openmpi/package.py
+++ b/var/spack/repos/builtin/packages/openmpi/package.py
@@ -13,41 +13,61 @@ class Openmpi(Package):
     """
 
     homepage = "http://www.open-mpi.org"
+    url = "http://www.open-mpi.org/software/ompi/v1.10/downloads/openmpi-1.10.1.tar.bz2"
+    list_url = "http://www.open-mpi.org/software/ompi/"
+    list_depth = 3
 
-    version('1.10.1', 'f0fcd77ed345b7eafb431968124ba16e',
-            url = "http://www.open-mpi.org/software/ompi/v1.10/downloads/openmpi-1.10.1.tar.bz2")
-    version('1.10.0', '280cf952de68369cebaca886c5ce0304',
-            url = "http://www.open-mpi.org/software/ompi/v1.10/downloads/openmpi-1.10.0.tar.bz2")
-    version('1.8.8', '0dab8e602372da1425e9242ae37faf8c',
-            url = 'http://www.open-mpi.org/software/ompi/v1.8/downloads/openmpi-1.8.8.tar.bz2')
-    version('1.6.5', '03aed2a4aa4d0b27196962a2a65fc475',
-            url = "http://www.open-mpi.org/software/ompi/v1.6/downloads/openmpi-1.6.5.tar.bz2")
+    version('1.10.2', 'b2f43d9635d2d52826e5ef9feb97fd4c')
+    version('1.10.1', 'f0fcd77ed345b7eafb431968124ba16e')
+    version('1.10.0', '280cf952de68369cebaca886c5ce0304')
+    version('1.8.8', '0dab8e602372da1425e9242ae37faf8c')
+    version('1.6.5', '03aed2a4aa4d0b27196962a2a65fc475')
 
     patch('ad_lustre_rwcontig_open_source.patch', when="@1.6.5")
     patch('llnl-platforms.patch', when="@1.6.5")
-    patch('configure.patch', when="@1.10.0:")
+    patch('configure.patch', when="@1.10.0:1.10.1")
 
-    provides('mpi@:2.2', when='@1.6.5')    # Open MPI 1.6.5 supports MPI-2.2
-    provides('mpi@:3.0', when='@1.8.8')    # Open MPI 1.8.8 supports MPI-3.0
-    provides('mpi@:3.0', when='@1.10.0')   # Open MPI 1.10.0 supports MPI-3.0
-    provides('mpi@:3.0', when='@1.10.1')   # Open MPI 1.10.1 supports MPI-3.0
+    variant('psm', default=False, description='Build support for the PSM library.')
+    variant('verbs', default=False, description='Build support for OpenFabrics verbs.')
 
+    # TODO : variant support for other schedulers is missing
+    variant('tm', default=False, description='Build TM (Torque, PBSPro, and compatible) support')
+
+    provides('mpi@:2.2', when='@1.6.5')
+    provides('mpi@:3.0', when='@1.7.5:')
 
     depends_on('hwloc')
 
+    def url_for_version(self, version):
+        return "http://www.open-mpi.org/software/ompi/v%s/downloads/openmpi-%s.tar.bz2" % (version.up_to(2), version)
 
     def setup_dependent_environment(self, module, spec, dep_spec):
         """For dependencies, make mpicc's use spack wrapper."""
-        os.environ['OMPI_CC']  = 'cc'
+        os.environ['OMPI_CC'] = 'cc'
         os.environ['OMPI_CXX'] = 'c++'
         os.environ['OMPI_FC'] = 'f90'
         os.environ['OMPI_F77'] = 'f77'
 
-
     def install(self, spec, prefix):
-        config_args = ["--prefix=%s" % prefix]
-
-        config_args.append("--with-hwloc=%s" % spec['hwloc'].prefix)
+        config_args = ["--prefix=%s" % prefix,
+                       "--with-hwloc=%s" % spec['hwloc'].prefix,
+                       "--enable-shared",
+                       "--enable-static"]
+
+        # Variants
+        if '+tm' in spec:
+            config_args.append("--with-tm")  # necessary for Torque support
+
+        if '+psm' in spec:
+            config_args.append("--with-psm")
+
+        if '+verbs' in spec:
+            # Up through version 1.6, this option was previously named --with-openib
+            if spec.satisfies('@:1.6'):
+                config_args.append("--with-openib")
+            # In version 1.7, it was renamed to be --with-verbs
+            elif spec.satisfies('@1.7:'):
+                config_args.append("--with-verbs")
 
         # TODO: use variants for this, e.g. +lanl, +llnl, etc.
         # use this for LANL builds, but for LLNL builds, we need:
@@ -67,7 +87,6 @@ def install(self, spec, prefix):
 
         self.filter_compilers()
 
-
     def filter_compilers(self):
         """Run after install to make the MPI compilers use the
            compilers that Spack built the package with.
@@ -76,7 +95,7 @@ def filter_compilers(self):
            to Spack's generic cc, c++ and f90.  We want them to
            be bound to whatever compiler they were built with.
         """
-        kwargs = { 'ignore_absent' : True, 'backup' : False, 'string' : False }
+        kwargs = {'ignore_absent': True, 'backup': False, 'string': False}
         dir = os.path.join(self.prefix, 'share/openmpi/')
 
         cc_wrappers = ['mpicc-vt-wrapper-data.txt', 'mpicc-wrapper-data.txt',
@@ -114,5 +133,3 @@ def filter_compilers(self):
             if not os.path.islink(path):
                 filter_file('compiler=.*', 'compiler=%s' % self.compiler.fc,
                             path, **kwargs)
-
-
diff --git a/var/spack/packages/openspeedshop/package.py b/var/spack/repos/builtin/packages/openspeedshop/package.py
similarity index 100%
rename from var/spack/packages/openspeedshop/package.py
rename to var/spack/repos/builtin/packages/openspeedshop/package.py
diff --git a/var/spack/packages/openssl/package.py b/var/spack/repos/builtin/packages/openssl/package.py
similarity index 63%
rename from var/spack/packages/openssl/package.py
rename to var/spack/repos/builtin/packages/openssl/package.py
index 1fa288a36dc155afec47489d0089967fe3c376f1..8f0427796be64ee9c403cf7e1517d83bc9972093 100644
--- a/var/spack/packages/openssl/package.py
+++ b/var/spack/repos/builtin/packages/openssl/package.py
@@ -19,12 +19,24 @@ class Openssl(Package):
     parallel = False
 
     def install(self, spec, prefix):
+        # OpenSSL uses a variable APPS in its Makefile. If it happens to be set
+        # in the environment, then this will override what is set in the
+        # Makefile, leading to build errors.
+        env.pop('APPS', None)
+        if spec.satisfies("=darwin-x86_64") or spec.satisfies("=ppc64"):
+            # This needs to be done for all 64-bit architectures (except Linux,
+            # where it happens automatically?)
+            env['KERNEL_BITS'] = '64'
         config = Executable("./config")
         config("--prefix=%s" % prefix,
-               "--openssldir=%s/etc/openssl" % prefix,
+               "--openssldir=%s" % join_path(prefix, 'etc', 'openssl'),
                "zlib",
                "no-krb5",
                "shared")
+        # Remove non-standard compiler options if present. These options are
+        # present e.g. on Darwin. They are non-standard, i.e. most compilers
+        # (e.g. gcc) will not accept them.
+        filter_file(r'-arch x86_64', '', 'Makefile')
 
         make()
         make("install")
diff --git a/var/spack/packages/otf/package.py b/var/spack/repos/builtin/packages/otf/package.py
similarity index 100%
rename from var/spack/packages/otf/package.py
rename to var/spack/repos/builtin/packages/otf/package.py
diff --git a/var/spack/packages/otf2/package.py b/var/spack/repos/builtin/packages/otf2/package.py
similarity index 100%
rename from var/spack/packages/otf2/package.py
rename to var/spack/repos/builtin/packages/otf2/package.py
diff --git a/var/spack/packages/pango/package.py b/var/spack/repos/builtin/packages/pango/package.py
similarity index 100%
rename from var/spack/packages/pango/package.py
rename to var/spack/repos/builtin/packages/pango/package.py
diff --git a/var/spack/packages/papi/package.py b/var/spack/repos/builtin/packages/papi/package.py
similarity index 100%
rename from var/spack/packages/papi/package.py
rename to var/spack/repos/builtin/packages/papi/package.py
diff --git a/var/spack/packages/paraver/package.py b/var/spack/repos/builtin/packages/paraver/package.py
similarity index 100%
rename from var/spack/packages/paraver/package.py
rename to var/spack/repos/builtin/packages/paraver/package.py
diff --git a/var/spack/packages/paraview/package.py b/var/spack/repos/builtin/packages/paraview/package.py
similarity index 94%
rename from var/spack/packages/paraview/package.py
rename to var/spack/repos/builtin/packages/paraview/package.py
index 1d99b348997b7518b9fd9c7edb7870000bf366e0..aaab352e66294a846ae70dcf2102a7408e14cca3 100644
--- a/var/spack/packages/paraview/package.py
+++ b/var/spack/repos/builtin/packages/paraview/package.py
@@ -60,6 +60,10 @@ def nfeature_to_bool(feature):
 
             feature_args.extend(std_cmake_args)
 
+            if 'darwin' in self.spec.architecture:
+                feature_args.append('-DVTK_USE_X:BOOL=OFF')
+                feature_args.append('-DPARAVIEW_DO_UNIX_STYLE_INSTALLS:BOOL=ON')
+
             cmake('..',
                 '-DCMAKE_INSTALL_PREFIX:PATH=%s' % prefix,
                 '-DBUILD_TESTING:BOOL=OFF',
diff --git a/var/spack/repos/builtin/packages/parmetis/package.py b/var/spack/repos/builtin/packages/parmetis/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..c897dec7e44b4768e4e9abe9e94f5b41b4a4a1a1
--- /dev/null
+++ b/var/spack/repos/builtin/packages/parmetis/package.py
@@ -0,0 +1,95 @@
+##############################################################################
+# Copyright (c) 2013-2015, Lawrence Livermore National Security, LLC.
+# Produced at the Lawrence Livermore National Laboratory.
+#
+# This file is part of Spack.
+# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
+# LLNL-CODE-647188
+#
+# For details, see https://github.com/llnl/spack
+# Please also see the LICENSE file for our notice and the LGPL.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License (as published by
+# the Free Software Foundation) version 2.1 dated February 1999.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
+# conditions of the GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+##############################################################################
+
+from spack import *
+
+# FIXME : lot of code is duplicated from packages/metis/package.py . Inheriting from there may reduce
+# FIXME : the installation rules to just a few lines
+
+
+class Parmetis(Package):
+    """
+    ParMETIS is an MPI-based parallel library that implements a variety of algorithms for partitioning unstructured
+    graphs, meshes, and for computing fill-reducing orderings of sparse matrices.
+    """
+    homepage = 'http://glaros.dtc.umn.edu/gkhome/metis/parmetis/overview'
+    url = 'http://glaros.dtc.umn.edu/gkhome/fetch/sw/parmetis/parmetis-4.0.3.tar.gz'
+
+    version('4.0.3', 'f69c479586bf6bb7aff6a9bc0c739628')
+
+    variant('shared', default=True, description='Enables the build of shared libraries')
+    variant('debug', default=False, description='Builds the library in debug mode')
+    variant('gdb', default=False, description='Enables gdb support')
+
+    variant('idx64', default=False, description='Use int64_t as default index type')
+    variant('double', default=False, description='Use double precision floating point types')
+
+    depends_on('cmake @2.8:')  # build dependency
+    depends_on('mpi')
+
+    # FIXME : this should conflict with metis as it builds its own version internally
+
+    depends_on('gdb', when='+gdb')
+
+    def install(self, spec, prefix):
+        options = []
+        options.extend(std_cmake_args)
+
+        build_directory = join_path(self.stage.path, 'spack-build')
+        source_directory = self.stage.source_path
+        metis_source = join_path(source_directory, 'metis')
+
+        # FIXME : Once a contract is defined, MPI compilers should be retrieved indirectly via spec['mpi'] in case
+        # FIXME : they use a non-standard name
+        options.extend(['-DGKLIB_PATH:PATH={metis_source}/GKlib'.format(metis_source=metis_source),
+                        '-DMETIS_PATH:PATH={metis_source}'.format(metis_source=metis_source),
+                        '-DCMAKE_C_COMPILER:STRING=mpicc',
+                        '-DCMAKE_CXX_COMPILER:STRING=mpicxx'])
+
+        if '+shared' in spec:
+            options.append('-DSHARED:BOOL=ON')
+
+        if '+debug' in spec:
+            options.extend(['-DDEBUG:BOOL=ON',
+                            '-DCMAKE_BUILD_TYPE:STRING=Debug'])
+
+        if '+gdb' in spec:
+            options.append('-DGDB:BOOL=ON')
+
+        metis_header = join_path(metis_source, 'include', 'metis.h')
+
+        if '+idx64' in spec:
+            filter_file('IDXTYPEWIDTH 32', 'IDXTYPEWIDTH 64', metis_header)
+
+        if '+double' in spec:
+            filter_file('REALTYPEWIDTH 32', 'REALTYPEWIDTH 64', metis_header)
+
+        with working_dir(build_directory, create=True):
+            cmake(source_directory, *options)
+            make()
+            make("install")
+            # Parmetis build system doesn't allow for an external metis to be used, but doesn't copy the required
+            # metis header either
+            install(metis_header, self.prefix.include)
diff --git a/var/spack/packages/parpack/package.py b/var/spack/repos/builtin/packages/parpack/package.py
similarity index 100%
rename from var/spack/packages/parpack/package.py
rename to var/spack/repos/builtin/packages/parpack/package.py
diff --git a/var/spack/repos/builtin/packages/patchelf/package.py b/var/spack/repos/builtin/packages/patchelf/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..036dc6bd17b4e4e4a01614f74b86dcd32c6d6453
--- /dev/null
+++ b/var/spack/repos/builtin/packages/patchelf/package.py
@@ -0,0 +1,16 @@
+from spack import *
+
+class Patchelf(Package):
+    """PatchELF is a small utility to modify the dynamic linker and RPATH of ELF executables."""
+
+    homepage = "https://nixos.org/patchelf.html"
+    url      = "http://nixos.org/releases/patchelf/patchelf-0.8/patchelf-0.8.tar.gz"
+    list_url = "http://nixos.org/releases/patchelf/"
+    list_depth = 2
+
+    version('0.8', '407b229e6a681ffb0e2cdd5915cb2d01')
+
+    def install(self, spec, prefix):
+        configure('--prefix=%s' % prefix)
+        make()
+        make("install")
diff --git a/var/spack/packages/pcre/package.py b/var/spack/repos/builtin/packages/pcre/package.py
similarity index 100%
rename from var/spack/packages/pcre/package.py
rename to var/spack/repos/builtin/packages/pcre/package.py
diff --git a/var/spack/packages/pcre2/package.py b/var/spack/repos/builtin/packages/pcre2/package.py
similarity index 100%
rename from var/spack/packages/pcre2/package.py
rename to var/spack/repos/builtin/packages/pcre2/package.py
diff --git a/var/spack/packages/pdt/package.py b/var/spack/repos/builtin/packages/pdt/package.py
similarity index 100%
rename from var/spack/packages/pdt/package.py
rename to var/spack/repos/builtin/packages/pdt/package.py
diff --git a/var/spack/packages/petsc/package.py b/var/spack/repos/builtin/packages/petsc/package.py
similarity index 91%
rename from var/spack/packages/petsc/package.py
rename to var/spack/repos/builtin/packages/petsc/package.py
index f3ed3d72ec5073603e1bff99253ee15e9d9e4a5a..87f700629df8c5401a5aa5199256113634ce4934 100644
--- a/var/spack/packages/petsc/package.py
+++ b/var/spack/repos/builtin/packages/petsc/package.py
@@ -12,6 +12,8 @@ class Petsc(Package):
     version('3.5.2', 'ad170802b3b058b5deb9cd1f968e7e13')
     version('3.5.1', 'a557e029711ebf425544e117ffa44d8f')
 
+    depends_on("python @2.6:2.9")   # requires Python for building
+
     depends_on("boost")
     depends_on("blas")
     depends_on("lapack")
@@ -23,9 +25,6 @@ class Petsc(Package):
 
     def install(self, spec, prefix):
         configure("--prefix=%s" % prefix,
-                  "CC=cc",
-                  "CXX=c++",
-                  "FC=f90",
                   "--with-blas-lib=%s/libblas.a"     % spec['blas'].prefix.lib,
                   "--with-lapack-lib=%s/liblapack.a" % spec['lapack'].prefix.lib,
                   "--with-boost-dir=%s"              % spec['boost'].prefix,
@@ -33,6 +32,7 @@ def install(self, spec, prefix):
                   "--with-parmetis-dir=%s"           % spec['parmetis'].prefix,
                   "--with-metis-dir=%s"              % spec['metis'].prefix,
                   "--with-hdf5-dir=%s"               % spec['hdf5'].prefix,
+                  "--with-mpi-dir=%s"                % spec['mpi'].prefix,
                   "--with-shared-libraries=0")
 
         # PETSc has its own way of doing parallel make.
diff --git a/var/spack/packages/pidx/package.py b/var/spack/repos/builtin/packages/pidx/package.py
similarity index 100%
rename from var/spack/packages/pidx/package.py
rename to var/spack/repos/builtin/packages/pidx/package.py
diff --git a/var/spack/packages/pixman/package.py b/var/spack/repos/builtin/packages/pixman/package.py
similarity index 100%
rename from var/spack/packages/pixman/package.py
rename to var/spack/repos/builtin/packages/pixman/package.py
diff --git a/var/spack/packages/pkg-config/package.py b/var/spack/repos/builtin/packages/pkg-config/package.py
similarity index 100%
rename from var/spack/packages/pkg-config/package.py
rename to var/spack/repos/builtin/packages/pkg-config/package.py
diff --git a/var/spack/packages/pmgr_collective/package.py b/var/spack/repos/builtin/packages/pmgr_collective/package.py
similarity index 100%
rename from var/spack/packages/pmgr_collective/package.py
rename to var/spack/repos/builtin/packages/pmgr_collective/package.py
diff --git a/var/spack/packages/postgresql/package.py b/var/spack/repos/builtin/packages/postgresql/package.py
similarity index 100%
rename from var/spack/packages/postgresql/package.py
rename to var/spack/repos/builtin/packages/postgresql/package.py
diff --git a/var/spack/packages/ppl/package.py b/var/spack/repos/builtin/packages/ppl/package.py
similarity index 100%
rename from var/spack/packages/ppl/package.py
rename to var/spack/repos/builtin/packages/ppl/package.py
diff --git a/var/spack/packages/protobuf/package.py b/var/spack/repos/builtin/packages/protobuf/package.py
similarity index 100%
rename from var/spack/packages/protobuf/package.py
rename to var/spack/repos/builtin/packages/protobuf/package.py
diff --git a/var/spack/repos/builtin/packages/py-astropy/package.py b/var/spack/repos/builtin/packages/py-astropy/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..d138a514f68e7ffe8e05fd4c784c09224b17d4cb
--- /dev/null
+++ b/var/spack/repos/builtin/packages/py-astropy/package.py
@@ -0,0 +1,28 @@
+from spack import *
+
+class PyAstropy(Package):
+    """
+    The Astropy Project is a community effort to develop a single core
+    package for Astronomy in Python and foster interoperability between
+    Python astronomy packages.
+    """
+    homepage = 'http://www.astropy.org/'
+
+    version('1.1.post1', 'b52919f657a37d45cc45f5cb0f58c44d')
+
+    def url_for_version(self, version):
+        return 'https://pypi.python.org/packages/source/a/astropy/astropy-{0}.tar.gz'.format(version)
+
+    extends('python')
+
+    depends_on('cfitsio')
+    depends_on('expat')
+    depends_on('py-h5py')
+    depends_on('py-numpy')
+    depends_on('py-scipy')
+
+    def install(self, spec, prefix):
+        python('setup.py', 'build', '--use-system-cfitsio',
+                                    '--use-system-expat')
+        python('setup.py', 'install', '--prefix=' + prefix)
+
diff --git a/var/spack/packages/py-basemap/package.py b/var/spack/repos/builtin/packages/py-basemap/package.py
similarity index 100%
rename from var/spack/packages/py-basemap/package.py
rename to var/spack/repos/builtin/packages/py-basemap/package.py
diff --git a/var/spack/packages/py-biopython/package.py b/var/spack/repos/builtin/packages/py-biopython/package.py
similarity index 100%
rename from var/spack/packages/py-biopython/package.py
rename to var/spack/repos/builtin/packages/py-biopython/package.py
diff --git a/var/spack/packages/py-blessings/package.py b/var/spack/repos/builtin/packages/py-blessings/package.py
similarity index 100%
rename from var/spack/packages/py-blessings/package.py
rename to var/spack/repos/builtin/packages/py-blessings/package.py
diff --git a/var/spack/packages/py-cffi/package.py b/var/spack/repos/builtin/packages/py-cffi/package.py
similarity index 100%
rename from var/spack/packages/py-cffi/package.py
rename to var/spack/repos/builtin/packages/py-cffi/package.py
diff --git a/var/spack/packages/py-coverage/package.py b/var/spack/repos/builtin/packages/py-coverage/package.py
similarity index 100%
rename from var/spack/packages/py-coverage/package.py
rename to var/spack/repos/builtin/packages/py-coverage/package.py
diff --git a/var/spack/packages/py-cython/package.py b/var/spack/repos/builtin/packages/py-cython/package.py
similarity index 100%
rename from var/spack/packages/py-cython/package.py
rename to var/spack/repos/builtin/packages/py-cython/package.py
diff --git a/var/spack/packages/py-dateutil/package.py b/var/spack/repos/builtin/packages/py-dateutil/package.py
similarity index 100%
rename from var/spack/packages/py-dateutil/package.py
rename to var/spack/repos/builtin/packages/py-dateutil/package.py
diff --git a/var/spack/packages/py-epydoc/package.py b/var/spack/repos/builtin/packages/py-epydoc/package.py
similarity index 100%
rename from var/spack/packages/py-epydoc/package.py
rename to var/spack/repos/builtin/packages/py-epydoc/package.py
diff --git a/var/spack/repos/builtin/packages/py-funcsigs/package.py b/var/spack/repos/builtin/packages/py-funcsigs/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..a42889028870868c7d167c26087705eee77d6b35
--- /dev/null
+++ b/var/spack/repos/builtin/packages/py-funcsigs/package.py
@@ -0,0 +1,19 @@
+from spack import *
+import os
+
+class PyFuncsigs(Package):
+    """Python function signatures from PEP362 for Python 2.6, 2.7 and 3.2."""
+    homepage = "https://pypi.python.org/pypi/funcsigs"
+    url      = "https://pypi.python.org/packages/source/f/funcsigs/funcsigs-0.4.tar.gz"
+
+    version('0.4', 'fb1d031f284233e09701f6db1281c2a5')
+
+    extends('python')
+
+    depends_on('py-setuptools')
+
+    def install(self, spec, prefix):
+        python('setup.py', 'install', '--prefix=%s' % prefix)
+
+
+
diff --git a/var/spack/packages/py-genders/package.py b/var/spack/repos/builtin/packages/py-genders/package.py
similarity index 100%
rename from var/spack/packages/py-genders/package.py
rename to var/spack/repos/builtin/packages/py-genders/package.py
diff --git a/var/spack/packages/py-gnuplot/package.py b/var/spack/repos/builtin/packages/py-gnuplot/package.py
similarity index 100%
rename from var/spack/packages/py-gnuplot/package.py
rename to var/spack/repos/builtin/packages/py-gnuplot/package.py
diff --git a/var/spack/packages/py-h5py/package.py b/var/spack/repos/builtin/packages/py-h5py/package.py
similarity index 100%
rename from var/spack/packages/py-h5py/package.py
rename to var/spack/repos/builtin/packages/py-h5py/package.py
diff --git a/var/spack/packages/py-ipython/package.py b/var/spack/repos/builtin/packages/py-ipython/package.py
similarity index 100%
rename from var/spack/packages/py-ipython/package.py
rename to var/spack/repos/builtin/packages/py-ipython/package.py
diff --git a/var/spack/packages/py-libxml2/package.py b/var/spack/repos/builtin/packages/py-libxml2/package.py
similarity index 100%
rename from var/spack/packages/py-libxml2/package.py
rename to var/spack/repos/builtin/packages/py-libxml2/package.py
diff --git a/var/spack/packages/py-lockfile/package.py b/var/spack/repos/builtin/packages/py-lockfile/package.py
similarity index 100%
rename from var/spack/packages/py-lockfile/package.py
rename to var/spack/repos/builtin/packages/py-lockfile/package.py
diff --git a/var/spack/packages/py-mako/package.py b/var/spack/repos/builtin/packages/py-mako/package.py
similarity index 100%
rename from var/spack/packages/py-mako/package.py
rename to var/spack/repos/builtin/packages/py-mako/package.py
diff --git a/var/spack/packages/py-matplotlib/package.py b/var/spack/repos/builtin/packages/py-matplotlib/package.py
similarity index 91%
rename from var/spack/packages/py-matplotlib/package.py
rename to var/spack/repos/builtin/packages/py-matplotlib/package.py
index a5fee39d42e19139b39094b9c95262543ab89835..2167735fb89b78f1282261c0ea1f57d82e68f9a1 100644
--- a/var/spack/packages/py-matplotlib/package.py
+++ b/var/spack/repos/builtin/packages/py-matplotlib/package.py
@@ -12,7 +12,7 @@ class PyMatplotlib(Package):
     variant('gui', default=False, description='Enable GUI')
     variant('ipython', default=False, description='Enable ipython support')
 
-    extends('python', ignore=r'bin/nosetests.*$')
+    extends('python', ignore=r'bin/nosetests.*$|bin/pbr$')
 
     depends_on('py-pyside', when='+gui')
     depends_on('py-ipython', when='+ipython')
@@ -22,7 +22,11 @@ class PyMatplotlib(Package):
     depends_on('py-pytz')
     depends_on('py-nose')
     depends_on('py-numpy')
+    depends_on('py-mock')
+    depends_on('py-pbr')
+    depends_on('py-funcsigs')
 
+    depends_on('freetype')
     depends_on('qt', when='+gui')
     depends_on('bzip2')
     depends_on('tcl', when='+gui')
diff --git a/var/spack/packages/py-mock/package.py b/var/spack/repos/builtin/packages/py-mock/package.py
similarity index 95%
rename from var/spack/packages/py-mock/package.py
rename to var/spack/repos/builtin/packages/py-mock/package.py
index 3b08428ba0c09cbfef999dfaf3f1f22f5065e7d1..e89af8802aa893045b8323ffc1e69e1e7ce2423a 100644
--- a/var/spack/packages/py-mock/package.py
+++ b/var/spack/repos/builtin/packages/py-mock/package.py
@@ -11,6 +11,7 @@ class PyMock(Package):
     version('1.3.0', '73ee8a4afb3ff4da1b4afa287f39fdeb')
 
     extends('python')
+    depends_on('py-pbr')
     depends_on('py-setuptools@17.1:')
 
     def install(self, spec, prefix):
diff --git a/var/spack/packages/py-mpi4py/package.py b/var/spack/repos/builtin/packages/py-mpi4py/package.py
similarity index 100%
rename from var/spack/packages/py-mpi4py/package.py
rename to var/spack/repos/builtin/packages/py-mpi4py/package.py
diff --git a/var/spack/packages/py-mx/package.py b/var/spack/repos/builtin/packages/py-mx/package.py
similarity index 100%
rename from var/spack/packages/py-mx/package.py
rename to var/spack/repos/builtin/packages/py-mx/package.py
diff --git a/var/spack/packages/py-mysqldb1/package.py b/var/spack/repos/builtin/packages/py-mysqldb1/package.py
similarity index 100%
rename from var/spack/packages/py-mysqldb1/package.py
rename to var/spack/repos/builtin/packages/py-mysqldb1/package.py
diff --git a/var/spack/packages/py-nose/package.py b/var/spack/repos/builtin/packages/py-nose/package.py
similarity index 100%
rename from var/spack/packages/py-nose/package.py
rename to var/spack/repos/builtin/packages/py-nose/package.py
diff --git a/var/spack/repos/builtin/packages/py-numexpr/package.py b/var/spack/repos/builtin/packages/py-numexpr/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..89f8a525b12f5abe27d24c69b5ba2ec01ab1096d
--- /dev/null
+++ b/var/spack/repos/builtin/packages/py-numexpr/package.py
@@ -0,0 +1,15 @@
+from spack import *
+import re
+
+class PyNumexpr(Package):
+    """Fast numerical expression evaluator for NumPy"""
+    homepage = "https://pypi.python.org/pypi/numexpr"
+    url      = "https://pypi.python.org/packages/source/n/numexpr/numexpr-2.4.6.tar.gz"
+
+    version('2.4.6', '17ac6fafc9ea1ce3eb970b9abccb4fbd')
+
+    extends('python')
+    depends_on('py-numpy')
+
+    def install(self, spec, prefix):
+        python('setup.py', 'install', '--prefix=%s' % prefix)
diff --git a/var/spack/packages/py-numpy/package.py b/var/spack/repos/builtin/packages/py-numpy/package.py
similarity index 56%
rename from var/spack/packages/py-numpy/package.py
rename to var/spack/repos/builtin/packages/py-numpy/package.py
index 4c085fba6e582d96a57dc419c31720973bb07df3..03548111863f49e3a0e38556abf0f03a6aeeecae 100644
--- a/var/spack/packages/py-numpy/package.py
+++ b/var/spack/repos/builtin/packages/py-numpy/package.py
@@ -7,15 +7,18 @@ class PyNumpy(Package):
 
     version('1.9.1', '78842b73560ec378142665e712ae4ad9')
     version('1.9.2', 'a1ed53432dbcd256398898d35bc8e645')
-    
+
+    variant('blas', default=True)
+
     extends('python')
     depends_on('py-nose')
-    depends_on('netlib-blas+fpic')
-    depends_on('netlib-lapack+shared')
+    depends_on('netlib-blas+fpic', when='+blas')
+    depends_on('netlib-lapack+shared', when='+blas')
 
     def install(self, spec, prefix):
-        with open('site.cfg', 'w') as f:
-            f.write('[DEFAULT]\n')
-            f.write('libraries=lapack,blas\n')
-            f.write('library_dirs=%s/lib:%s/lib\n' % (spec['blas'].prefix, spec['lapack'].prefix))
+        if '+blas' in spec:
+            with open('site.cfg', 'w') as f:
+                f.write('[DEFAULT]\n')
+                f.write('libraries=lapack,blas\n')
+                f.write('library_dirs=%s/lib:%s/lib\n' % (spec['blas'].prefix, spec['lapack'].prefix))
         python('setup.py', 'install', '--prefix=%s' % prefix)
diff --git a/var/spack/packages/py-pandas/package.py b/var/spack/repos/builtin/packages/py-pandas/package.py
similarity index 100%
rename from var/spack/packages/py-pandas/package.py
rename to var/spack/repos/builtin/packages/py-pandas/package.py
diff --git a/var/spack/repos/builtin/packages/py-pbr/package.py b/var/spack/repos/builtin/packages/py-pbr/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..02957483d4ec8aa55251feff8894cdac1a314026
--- /dev/null
+++ b/var/spack/repos/builtin/packages/py-pbr/package.py
@@ -0,0 +1,18 @@
+from spack import *
+import os
+
+class PyPbr(Package):
+    """PBR is a library that injects some useful and sensible default behaviors into your setuptools run."""
+    homepage = "https://pypi.python.org/pypi/pbr"
+    url      = "https://pypi.python.org/packages/source/p/pbr/pbr-1.8.1.tar.gz"
+
+    version('1.8.1', 'c8f9285e1a4ca6f9654c529b158baa3a')
+
+    extends('python')
+
+    depends_on('py-setuptools')
+
+    def install(self, spec, prefix):
+        python('setup.py', 'install', '--prefix=%s' % prefix)
+
+
diff --git a/var/spack/repos/builtin/packages/py-periodictable/package.py b/var/spack/repos/builtin/packages/py-periodictable/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a495a1cc83bd765d48ce8903ec674fd94dda0bc
--- /dev/null
+++ b/var/spack/repos/builtin/packages/py-periodictable/package.py
@@ -0,0 +1,17 @@
+from spack import *
+
+class PyPeriodictable(Package):
+    """nose extends the test loading and running features of unittest,
+    making it easier to write, find and run tests."""
+
+    homepage = "https://pypi.python.org/pypi/periodictable"
+    url      = "https://pypi.python.org/packages/source/p/periodictable/periodictable-1.4.1.tar.gz"
+
+    version('1.4.1', '7246b63cc0b6b1be6e86b6616f9e866e')
+
+    depends_on('py-numpy')
+    depends_on('py-pyparsing')
+    extends('python')
+
+    def install(self, spec, prefix):
+        python('setup.py', 'install', '--prefix=%s' % prefix)
diff --git a/var/spack/packages/py-pexpect/package.py b/var/spack/repos/builtin/packages/py-pexpect/package.py
similarity index 100%
rename from var/spack/packages/py-pexpect/package.py
rename to var/spack/repos/builtin/packages/py-pexpect/package.py
diff --git a/var/spack/packages/py-pil/package.py b/var/spack/repos/builtin/packages/py-pil/package.py
similarity index 100%
rename from var/spack/packages/py-pil/package.py
rename to var/spack/repos/builtin/packages/py-pil/package.py
diff --git a/var/spack/packages/py-pillow/package.py b/var/spack/repos/builtin/packages/py-pillow/package.py
similarity index 100%
rename from var/spack/packages/py-pillow/package.py
rename to var/spack/repos/builtin/packages/py-pillow/package.py
diff --git a/var/spack/packages/py-pmw/package.py b/var/spack/repos/builtin/packages/py-pmw/package.py
similarity index 100%
rename from var/spack/packages/py-pmw/package.py
rename to var/spack/repos/builtin/packages/py-pmw/package.py
diff --git a/var/spack/packages/py-pychecker/package.py b/var/spack/repos/builtin/packages/py-pychecker/package.py
similarity index 100%
rename from var/spack/packages/py-pychecker/package.py
rename to var/spack/repos/builtin/packages/py-pychecker/package.py
diff --git a/var/spack/packages/py-pycparser/package.py b/var/spack/repos/builtin/packages/py-pycparser/package.py
similarity index 100%
rename from var/spack/packages/py-pycparser/package.py
rename to var/spack/repos/builtin/packages/py-pycparser/package.py
diff --git a/var/spack/packages/py-pyelftools/package.py b/var/spack/repos/builtin/packages/py-pyelftools/package.py
similarity index 100%
rename from var/spack/packages/py-pyelftools/package.py
rename to var/spack/repos/builtin/packages/py-pyelftools/package.py
diff --git a/var/spack/packages/py-pygments/package.py b/var/spack/repos/builtin/packages/py-pygments/package.py
similarity index 100%
rename from var/spack/packages/py-pygments/package.py
rename to var/spack/repos/builtin/packages/py-pygments/package.py
diff --git a/var/spack/packages/py-pylint/package.py b/var/spack/repos/builtin/packages/py-pylint/package.py
similarity index 100%
rename from var/spack/packages/py-pylint/package.py
rename to var/spack/repos/builtin/packages/py-pylint/package.py
diff --git a/var/spack/packages/py-pypar/package.py b/var/spack/repos/builtin/packages/py-pypar/package.py
similarity index 100%
rename from var/spack/packages/py-pypar/package.py
rename to var/spack/repos/builtin/packages/py-pypar/package.py
diff --git a/var/spack/packages/py-pyparsing/package.py b/var/spack/repos/builtin/packages/py-pyparsing/package.py
similarity index 100%
rename from var/spack/packages/py-pyparsing/package.py
rename to var/spack/repos/builtin/packages/py-pyparsing/package.py
diff --git a/var/spack/packages/py-pyqt/package.py b/var/spack/repos/builtin/packages/py-pyqt/package.py
similarity index 100%
rename from var/spack/packages/py-pyqt/package.py
rename to var/spack/repos/builtin/packages/py-pyqt/package.py
diff --git a/var/spack/packages/py-pyside/package.py b/var/spack/repos/builtin/packages/py-pyside/package.py
similarity index 100%
rename from var/spack/packages/py-pyside/package.py
rename to var/spack/repos/builtin/packages/py-pyside/package.py
diff --git a/var/spack/repos/builtin/packages/py-pytables/package.py b/var/spack/repos/builtin/packages/py-pytables/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5b1e78ab3bd3be89d2971541012a7558da9fa0a
--- /dev/null
+++ b/var/spack/repos/builtin/packages/py-pytables/package.py
@@ -0,0 +1,19 @@
+from spack import *
+import re
+
+class PyPytables(Package):
+    """PyTables is a package for managing hierarchical datasets and designed to efficiently and easily cope with extremely large amounts of data."""
+    homepage = "http://www.pytables.org/"
+    url      = "https://github.com/PyTables/PyTables/archive/v.3.2.2.tar.gz"
+
+    version('3.2.2', '7cbb0972e4d6580f629996a5bed92441')
+
+    extends('python')
+    depends_on('hdf5')
+    depends_on('py-numpy')
+    depends_on('py-numexpr')
+    depends_on('py-cython')
+
+    def install(self, spec, prefix):
+        env["HDF5_DIR"] = spec['hdf5'].prefix
+        python('setup.py', 'install', '--prefix=%s' % prefix)
diff --git a/var/spack/packages/py-python-daemon/package.py b/var/spack/repos/builtin/packages/py-python-daemon/package.py
similarity index 100%
rename from var/spack/packages/py-python-daemon/package.py
rename to var/spack/repos/builtin/packages/py-python-daemon/package.py
diff --git a/var/spack/packages/py-pytz/package.py b/var/spack/repos/builtin/packages/py-pytz/package.py
similarity index 100%
rename from var/spack/packages/py-pytz/package.py
rename to var/spack/repos/builtin/packages/py-pytz/package.py
diff --git a/var/spack/packages/py-rpy2/package.py b/var/spack/repos/builtin/packages/py-rpy2/package.py
similarity index 100%
rename from var/spack/packages/py-rpy2/package.py
rename to var/spack/repos/builtin/packages/py-rpy2/package.py
diff --git a/var/spack/packages/py-scientificpython/package.py b/var/spack/repos/builtin/packages/py-scientificpython/package.py
similarity index 100%
rename from var/spack/packages/py-scientificpython/package.py
rename to var/spack/repos/builtin/packages/py-scientificpython/package.py
diff --git a/var/spack/packages/py-scikit-learn/package.py b/var/spack/repos/builtin/packages/py-scikit-learn/package.py
similarity index 100%
rename from var/spack/packages/py-scikit-learn/package.py
rename to var/spack/repos/builtin/packages/py-scikit-learn/package.py
diff --git a/var/spack/packages/py-scipy/package.py b/var/spack/repos/builtin/packages/py-scipy/package.py
similarity index 100%
rename from var/spack/packages/py-scipy/package.py
rename to var/spack/repos/builtin/packages/py-scipy/package.py
diff --git a/var/spack/packages/py-setuptools/package.py b/var/spack/repos/builtin/packages/py-setuptools/package.py
similarity index 91%
rename from var/spack/packages/py-setuptools/package.py
rename to var/spack/repos/builtin/packages/py-setuptools/package.py
index 760ad4d6dbfb7d66cf3d78835abc8ddad9d708c6..26c048bfd423e42bfd33194cf4c95a01c1c2b66b 100644
--- a/var/spack/packages/py-setuptools/package.py
+++ b/var/spack/repos/builtin/packages/py-setuptools/package.py
@@ -8,6 +8,7 @@ class PySetuptools(Package):
     version('11.3.1', '01f69212e019a2420c1693fb43593930')
     version('16.0', '0ace0b96233516fc5f7c857d086aa3ad')
     version('18.1', 'f72e87f34fbf07f299f6cb46256a0b06')
+    version('19.2', '78353b1f80375ca5e088f4b4627ffe03')
 
     extends('python')
 
diff --git a/var/spack/packages/py-shiboken/package.py b/var/spack/repos/builtin/packages/py-shiboken/package.py
similarity index 100%
rename from var/spack/packages/py-shiboken/package.py
rename to var/spack/repos/builtin/packages/py-shiboken/package.py
diff --git a/var/spack/packages/py-sip/package.py b/var/spack/repos/builtin/packages/py-sip/package.py
similarity index 100%
rename from var/spack/packages/py-sip/package.py
rename to var/spack/repos/builtin/packages/py-sip/package.py
diff --git a/var/spack/packages/py-six/package.py b/var/spack/repos/builtin/packages/py-six/package.py
similarity index 100%
rename from var/spack/packages/py-six/package.py
rename to var/spack/repos/builtin/packages/py-six/package.py
diff --git a/var/spack/packages/py-sphinx/package.py b/var/spack/repos/builtin/packages/py-sphinx/package.py
similarity index 100%
rename from var/spack/packages/py-sphinx/package.py
rename to var/spack/repos/builtin/packages/py-sphinx/package.py
diff --git a/var/spack/packages/py-sympy/package.py b/var/spack/repos/builtin/packages/py-sympy/package.py
similarity index 100%
rename from var/spack/packages/py-sympy/package.py
rename to var/spack/repos/builtin/packages/py-sympy/package.py
diff --git a/var/spack/packages/py-tappy/package.py b/var/spack/repos/builtin/packages/py-tappy/package.py
similarity index 100%
rename from var/spack/packages/py-tappy/package.py
rename to var/spack/repos/builtin/packages/py-tappy/package.py
diff --git a/var/spack/packages/py-twisted/package.py b/var/spack/repos/builtin/packages/py-twisted/package.py
similarity index 100%
rename from var/spack/packages/py-twisted/package.py
rename to var/spack/repos/builtin/packages/py-twisted/package.py
diff --git a/var/spack/packages/py-urwid/package.py b/var/spack/repos/builtin/packages/py-urwid/package.py
similarity index 100%
rename from var/spack/packages/py-urwid/package.py
rename to var/spack/repos/builtin/packages/py-urwid/package.py
diff --git a/var/spack/packages/py-virtualenv/package.py b/var/spack/repos/builtin/packages/py-virtualenv/package.py
similarity index 100%
rename from var/spack/packages/py-virtualenv/package.py
rename to var/spack/repos/builtin/packages/py-virtualenv/package.py
diff --git a/var/spack/packages/py-yapf/package.py b/var/spack/repos/builtin/packages/py-yapf/package.py
similarity index 100%
rename from var/spack/packages/py-yapf/package.py
rename to var/spack/repos/builtin/packages/py-yapf/package.py
diff --git a/var/spack/packages/python/package.py b/var/spack/repos/builtin/packages/python/package.py
similarity index 95%
rename from var/spack/packages/python/package.py
rename to var/spack/repos/builtin/packages/python/package.py
index eae2566b57350ad93192f09b6186e269cc76024b..a1ce06feb036f1f487dcb2d2b58edf046c08d7a1 100644
--- a/var/spack/packages/python/package.py
+++ b/var/spack/repos/builtin/packages/python/package.py
@@ -11,15 +11,16 @@
 class Python(Package):
     """The Python programming language."""
     homepage = "http://www.python.org"
-    url      = "http://www.python.org/ftp/python/2.7.8/Python-2.7.8.tar.xz"
+    url      = "http://www.python.org/ftp/python/2.7.8/Python-2.7.8.tgz"
 
     extendable = True
 
-    version('2.7.8', 'd235bdfa75b8396942e360a70487ee00')
-    version('2.7.10', 'c685ef0b8e9f27b5e3db5db12b268ac6')
-    version('2.7.11', '1dbcc848b4cd8399a8199d000f9f823c', preferred=True)
-    version('3.5.0', 'd149d2812f10cbe04c042232e7964171')
-    version('3.5.1', 'e9ea6f2623fffcdd871b7b19113fde80')
+    version('3.5.1', 'be78e48cdfc1a7ad90efff146dce6cfe')
+    version('3.5.0', 'a56c0c0b45d75a0ec9c6dee933c41c36')
+    version('2.7.11', '6b6076ec9e93f05dd63e47eb9c15728b', preferred=True)
+    version('2.7.10', 'd7547558fd673bd9d38e2108c6b42521')
+    version('2.7.9', '5eebcaa0030dc4061156d3429657fb83')
+    version('2.7.8', 'd4bca0159acb0b44a781292b5231936f')
 
     depends_on("openssl")
     depends_on("bzip2")
diff --git a/var/spack/packages/qhull/package.py b/var/spack/repos/builtin/packages/qhull/package.py
similarity index 93%
rename from var/spack/packages/qhull/package.py
rename to var/spack/repos/builtin/packages/qhull/package.py
index 9da4078a702f958c6e86ce2b84667bc52d13fb7a..f6712ced38c32a157f4844fe2da5f7c1ee1230bc 100644
--- a/var/spack/packages/qhull/package.py
+++ b/var/spack/repos/builtin/packages/qhull/package.py
@@ -20,6 +20,9 @@ class Qhull(Package):
     version('1.0', 'd0f978c0d8dfb2e919caefa56ea2953c',
             url="http://www.qhull.org/download/qhull-2012.1-src.tgz")
 
+    # https://github.com/qhull/qhull/pull/5
+    patch('qhull-iterator.patch')
+
     def install(self, spec, prefix):
         with working_dir('spack-build', create=True):
             cmake('..', *std_cmake_args)
diff --git a/var/spack/repos/builtin/packages/qhull/qhull-iterator.patch b/var/spack/repos/builtin/packages/qhull/qhull-iterator.patch
new file mode 100644
index 0000000000000000000000000000000000000000..88e931d84f2f0e7dfdb9a50b466cecbfbd2722e5
--- /dev/null
+++ b/var/spack/repos/builtin/packages/qhull/qhull-iterator.patch
@@ -0,0 +1,45 @@
+From 93f4b306c54bb5be7724dcc19c6e747b62ac76dd Mon Sep 17 00:00:00 2001
+From: Ben Boeckel <mathstuf@gmail.com>
+Date: Thu, 28 May 2015 11:12:25 -0400
+Subject: [PATCH] iterator: use the header
+
+Standard libraries are doing funky things with inline namespaces which
+make these declarations impossible to get right. Just include the
+header.
+---
+ src/libqhullcpp/QhullIterator.h   | 3 +--
+ src/libqhullcpp/QhullLinkedList.h | 5 +----
+ 2 files changed, 2 insertions(+), 6 deletions(-)
+
+diff --git a/src/libqhullcpp/QhullIterator.h b/src/libqhullcpp/QhullIterator.h
+index 9dde894..49f3a3b 100644
+--- a/src/libqhullcpp/QhullIterator.h
++++ b/src/libqhullcpp/QhullIterator.h
+@@ -14,10 +14,9 @@ extern "C" {
+ }
+ 
+ #include <assert.h>
++#include <iterator>
+ #include <string>
+ #include <vector>
+-//! Avoid dependence on <iterator>
+-namespace std { struct bidirectional_iterator_tag; struct random_access_iterator_tag; }
+ 
+ namespace orgQhull {
+ 
+diff --git a/src/libqhullcpp/QhullLinkedList.h b/src/libqhullcpp/QhullLinkedList.h
+index d828ac6..00b9008 100644
+--- a/src/libqhullcpp/QhullLinkedList.h
++++ b/src/libqhullcpp/QhullLinkedList.h
+@@ -9,10 +9,7 @@
+ #ifndef QHULLLINKEDLIST_H
+ #define QHULLLINKEDLIST_H
+ 
+-namespace std {
+-    struct bidirectional_iterator_tag;
+-    struct random_access_iterator_tag;
+-}//std
++#include <iterator>
+ 
+ #include "QhullError.h"
+ extern "C" {
diff --git a/var/spack/packages/qt/package.py b/var/spack/repos/builtin/packages/qt/package.py
similarity index 100%
rename from var/spack/packages/qt/package.py
rename to var/spack/repos/builtin/packages/qt/package.py
diff --git a/var/spack/packages/qt/qt3krell.patch b/var/spack/repos/builtin/packages/qt/qt3krell.patch
similarity index 100%
rename from var/spack/packages/qt/qt3krell.patch
rename to var/spack/repos/builtin/packages/qt/qt3krell.patch
diff --git a/var/spack/packages/qthreads/package.py b/var/spack/repos/builtin/packages/qthreads/package.py
similarity index 100%
rename from var/spack/packages/qthreads/package.py
rename to var/spack/repos/builtin/packages/qthreads/package.py
diff --git a/var/spack/packages/ravel/package.py b/var/spack/repos/builtin/packages/ravel/package.py
similarity index 100%
rename from var/spack/packages/ravel/package.py
rename to var/spack/repos/builtin/packages/ravel/package.py
diff --git a/var/spack/packages/readline/package.py b/var/spack/repos/builtin/packages/readline/package.py
similarity index 100%
rename from var/spack/packages/readline/package.py
rename to var/spack/repos/builtin/packages/readline/package.py
diff --git a/var/spack/packages/rose/add_spack_compiler_recognition.patch b/var/spack/repos/builtin/packages/rose/add_spack_compiler_recognition.patch
similarity index 100%
rename from var/spack/packages/rose/add_spack_compiler_recognition.patch
rename to var/spack/repos/builtin/packages/rose/add_spack_compiler_recognition.patch
diff --git a/var/spack/packages/rose/package.py b/var/spack/repos/builtin/packages/rose/package.py
similarity index 100%
rename from var/spack/packages/rose/package.py
rename to var/spack/repos/builtin/packages/rose/package.py
diff --git a/var/spack/packages/rsync/package.py b/var/spack/repos/builtin/packages/rsync/package.py
similarity index 100%
rename from var/spack/packages/rsync/package.py
rename to var/spack/repos/builtin/packages/rsync/package.py
diff --git a/var/spack/packages/ruby/package.py b/var/spack/repos/builtin/packages/ruby/package.py
similarity index 100%
rename from var/spack/packages/ruby/package.py
rename to var/spack/repos/builtin/packages/ruby/package.py
diff --git a/var/spack/packages/samtools/package.py b/var/spack/repos/builtin/packages/samtools/package.py
similarity index 100%
rename from var/spack/packages/samtools/package.py
rename to var/spack/repos/builtin/packages/samtools/package.py
diff --git a/var/spack/packages/samtools/samtools1.2.patch b/var/spack/repos/builtin/packages/samtools/samtools1.2.patch
similarity index 100%
rename from var/spack/packages/samtools/samtools1.2.patch
rename to var/spack/repos/builtin/packages/samtools/samtools1.2.patch
diff --git a/var/spack/packages/scalasca/package.py b/var/spack/repos/builtin/packages/scalasca/package.py
similarity index 100%
rename from var/spack/packages/scalasca/package.py
rename to var/spack/repos/builtin/packages/scalasca/package.py
diff --git a/var/spack/packages/scorep/package.py b/var/spack/repos/builtin/packages/scorep/package.py
similarity index 100%
rename from var/spack/packages/scorep/package.py
rename to var/spack/repos/builtin/packages/scorep/package.py
diff --git a/var/spack/packages/scotch/package.py b/var/spack/repos/builtin/packages/scotch/package.py
similarity index 100%
rename from var/spack/packages/scotch/package.py
rename to var/spack/repos/builtin/packages/scotch/package.py
diff --git a/var/spack/packages/scr/package.py b/var/spack/repos/builtin/packages/scr/package.py
similarity index 100%
rename from var/spack/packages/scr/package.py
rename to var/spack/repos/builtin/packages/scr/package.py
diff --git a/var/spack/packages/silo/package.py b/var/spack/repos/builtin/packages/silo/package.py
similarity index 100%
rename from var/spack/packages/silo/package.py
rename to var/spack/repos/builtin/packages/silo/package.py
diff --git a/var/spack/packages/snappy/package.py b/var/spack/repos/builtin/packages/snappy/package.py
similarity index 100%
rename from var/spack/packages/snappy/package.py
rename to var/spack/repos/builtin/packages/snappy/package.py
diff --git a/var/spack/packages/sparsehash/package.py b/var/spack/repos/builtin/packages/sparsehash/package.py
similarity index 100%
rename from var/spack/packages/sparsehash/package.py
rename to var/spack/repos/builtin/packages/sparsehash/package.py
diff --git a/var/spack/packages/spindle/package.py b/var/spack/repos/builtin/packages/spindle/package.py
similarity index 100%
rename from var/spack/packages/spindle/package.py
rename to var/spack/repos/builtin/packages/spindle/package.py
diff --git a/var/spack/packages/spot/package.py b/var/spack/repos/builtin/packages/spot/package.py
similarity index 100%
rename from var/spack/packages/spot/package.py
rename to var/spack/repos/builtin/packages/spot/package.py
diff --git a/var/spack/packages/sqlite/package.py b/var/spack/repos/builtin/packages/sqlite/package.py
similarity index 100%
rename from var/spack/packages/sqlite/package.py
rename to var/spack/repos/builtin/packages/sqlite/package.py
diff --git a/var/spack/packages/stat/configure_mpicxx.patch b/var/spack/repos/builtin/packages/stat/configure_mpicxx.patch
similarity index 100%
rename from var/spack/packages/stat/configure_mpicxx.patch
rename to var/spack/repos/builtin/packages/stat/configure_mpicxx.patch
diff --git a/var/spack/packages/stat/package.py b/var/spack/repos/builtin/packages/stat/package.py
similarity index 100%
rename from var/spack/packages/stat/package.py
rename to var/spack/repos/builtin/packages/stat/package.py
diff --git a/var/spack/packages/sundials/package.py b/var/spack/repos/builtin/packages/sundials/package.py
similarity index 100%
rename from var/spack/packages/sundials/package.py
rename to var/spack/repos/builtin/packages/sundials/package.py
diff --git a/var/spack/packages/swig/package.py b/var/spack/repos/builtin/packages/swig/package.py
similarity index 100%
rename from var/spack/packages/swig/package.py
rename to var/spack/repos/builtin/packages/swig/package.py
diff --git a/var/spack/repos/builtin/packages/szip/package.py b/var/spack/repos/builtin/packages/szip/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..c48c5b431e135ae9a4d66f79f65e93a4793c20c4
--- /dev/null
+++ b/var/spack/repos/builtin/packages/szip/package.py
@@ -0,0 +1,21 @@
+from spack import *
+
+class Szip(Package):
+    """Szip is an implementation of the extended-Rice lossless compression algorithm.
+    It provides lossless compression of scientific data, and is provided with HDF
+    software products."""
+
+    homepage = "https://www.hdfgroup.org/doc_resource/SZIP/"
+    url      = "http://www.hdfgroup.org/ftp/lib-external/szip/2.1/src/szip-2.1.tar.gz"
+
+    version('2.1', '902f831bcefb69c6b635374424acbead')
+
+    def install(self, spec, prefix):
+        configure('--prefix=%s' % prefix,
+                  '--enable-production',
+                  '--enable-shared',
+                  '--enable-static',
+                  '--enable-encoding')
+
+        make()
+        make("install")
diff --git a/var/spack/repos/builtin/packages/tar/package.py b/var/spack/repos/builtin/packages/tar/package.py
new file mode 100644
index 0000000000000000000000000000000000000000..539174017c91e09e42a59a18741afb48c2521fb6
--- /dev/null
+++ b/var/spack/repos/builtin/packages/tar/package.py
@@ -0,0 +1,13 @@
+from spack import *
+
+class Tar(Package):
+    """GNU Tar provides the ability to create tar archives, as well as various other kinds of manipulation."""
+    homepage = "https://www.gnu.org/software/tar/"
+    url      = "http://ftp.gnu.org/gnu/tar/tar-1.28.tar.gz"
+
+    version('1.28', '6ea3dbea1f2b0409b234048e021a9fd7')
+
+    def install(self, spec, prefix):
+        configure("--prefix=%s" % prefix)
+        make()
+        make('install')
diff --git a/var/spack/packages/task/package.py b/var/spack/repos/builtin/packages/task/package.py
similarity index 100%
rename from var/spack/packages/task/package.py
rename to var/spack/repos/builtin/packages/task/package.py
diff --git a/var/spack/packages/taskd/package.py b/var/spack/repos/builtin/packages/taskd/package.py
similarity index 100%
rename from var/spack/packages/taskd/package.py
rename to var/spack/repos/builtin/packages/taskd/package.py
diff --git a/var/spack/packages/tau/package.py b/var/spack/repos/builtin/packages/tau/package.py
similarity index 100%
rename from var/spack/packages/tau/package.py
rename to var/spack/repos/builtin/packages/tau/package.py
diff --git a/var/spack/packages/tcl/package.py b/var/spack/repos/builtin/packages/tcl/package.py
similarity index 100%
rename from var/spack/packages/tcl/package.py
rename to var/spack/repos/builtin/packages/tcl/package.py
diff --git a/var/spack/packages/texinfo/package.py b/var/spack/repos/builtin/packages/texinfo/package.py
similarity index 83%
rename from var/spack/packages/texinfo/package.py
rename to var/spack/repos/builtin/packages/texinfo/package.py
index 460db65b3eca34c673b093119f5a6763e8113b87..a83c10c0c1922f75cb4b87b85003612cefdfe308 100644
--- a/var/spack/packages/texinfo/package.py
+++ b/var/spack/repos/builtin/packages/texinfo/package.py
@@ -6,7 +6,7 @@
 # Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
 # LLNL-CODE-647188
 #
-# For details, see https://scalability-llnl.github.io/spack
+# For details, see https://llnl.github.io/spack
 # Please also see the LICENSE file for our notice and the LGPL.
 #
 # This program is free software; you can redistribute it and/or modify
@@ -33,12 +33,12 @@ class Texinfo(Package):
     used by many non-GNU projects as well.FIXME: put a proper description of your package here.
     """
     homepage = "https://www.gnu.org/software/texinfo/"
-    url      = "http://ftp.gnu.org/gnu/texinfo/texinfo-6.0.tar.xz"
+    url      = "http://ftp.gnu.org/gnu/texinfo/texinfo-6.0.tar.gz"
 
-    version('6.0', '02818e62a5b8ae0213a7ff572991bb50')
-    version('5.2', 'cb489df8a7ee9d10a236197aefdb32c5')
-    version('5.1', '52ee905a3b705020d2a1b6ec36d53ca6')
-    version('5.0', 'ef2fad34c71ddc95b20c7d6a08c0d7a6')
+    version('6.0', 'e1a2ef5dce5018b53f0f6eed45b247a7')
+    version('5.2', '1b8f98b80a8e6c50422125e07522e8db')
+    version('5.1', '54e250014fe698fb4832016158747c03')
+    version('5.0', '918432285abe6fe96c98355594c5656a')
 
     def install(self, spec, prefix):
         configure('--prefix=%s' % prefix)
diff --git a/var/spack/packages/the_silver_searcher/package.py b/var/spack/repos/builtin/packages/the_silver_searcher/package.py
similarity index 100%
rename from var/spack/packages/the_silver_searcher/package.py
rename to var/spack/repos/builtin/packages/the_silver_searcher/package.py
diff --git a/var/spack/packages/thrift/package.py b/var/spack/repos/builtin/packages/thrift/package.py
similarity index 100%
rename from var/spack/packages/thrift/package.py
rename to var/spack/repos/builtin/packages/thrift/package.py
diff --git a/var/spack/packages/tk/package.py b/var/spack/repos/builtin/packages/tk/package.py
similarity index 100%
rename from var/spack/packages/tk/package.py
rename to var/spack/repos/builtin/packages/tk/package.py
diff --git a/var/spack/packages/tmux/package.py b/var/spack/repos/builtin/packages/tmux/package.py
similarity index 100%
rename from var/spack/packages/tmux/package.py
rename to var/spack/repos/builtin/packages/tmux/package.py
diff --git a/var/spack/packages/tmuxinator/package.py b/var/spack/repos/builtin/packages/tmuxinator/package.py
similarity index 100%
rename from var/spack/packages/tmuxinator/package.py
rename to var/spack/repos/builtin/packages/tmuxinator/package.py
diff --git a/var/spack/packages/trilinos/package.py b/var/spack/repos/builtin/packages/trilinos/package.py
similarity index 100%
rename from var/spack/packages/trilinos/package.py
rename to var/spack/repos/builtin/packages/trilinos/package.py
diff --git a/var/spack/packages/uncrustify/package.py b/var/spack/repos/builtin/packages/uncrustify/package.py
similarity index 100%
rename from var/spack/packages/uncrustify/package.py
rename to var/spack/repos/builtin/packages/uncrustify/package.py
diff --git a/var/spack/packages/util-linux/package.py b/var/spack/repos/builtin/packages/util-linux/package.py
similarity index 100%
rename from var/spack/packages/util-linux/package.py
rename to var/spack/repos/builtin/packages/util-linux/package.py
diff --git a/var/spack/packages/valgrind/package.py b/var/spack/repos/builtin/packages/valgrind/package.py
similarity index 100%
rename from var/spack/packages/valgrind/package.py
rename to var/spack/repos/builtin/packages/valgrind/package.py
diff --git a/var/spack/packages/vim/package.py b/var/spack/repos/builtin/packages/vim/package.py
similarity index 100%
rename from var/spack/packages/vim/package.py
rename to var/spack/repos/builtin/packages/vim/package.py
diff --git a/var/spack/packages/vtk/package.py b/var/spack/repos/builtin/packages/vtk/package.py
similarity index 100%
rename from var/spack/packages/vtk/package.py
rename to var/spack/repos/builtin/packages/vtk/package.py
diff --git a/var/spack/packages/wget/package.py b/var/spack/repos/builtin/packages/wget/package.py
similarity index 76%
rename from var/spack/packages/wget/package.py
rename to var/spack/repos/builtin/packages/wget/package.py
index c8fd025122972a8025e13dcad8e5ed640bf5004b..55728b0515723d2d68702164dccbbdb31885e7f9 100644
--- a/var/spack/packages/wget/package.py
+++ b/var/spack/repos/builtin/packages/wget/package.py
@@ -8,9 +8,10 @@ class Wget(Package):
        etc."""
 
     homepage = "http://www.gnu.org/software/wget/"
-    url      = "http://ftp.gnu.org/gnu/wget/wget-1.16.tar.xz"
+    url      = "http://ftp.gnu.org/gnu/wget/wget-1.16.tar.gz"
 
-    version('1.16', 'fe102975ab3a6c049777883f1bb9ad07')
+    version('1.17', 'c4c4727766f24ac716936275014a0536')
+    version('1.16', '293a37977c41b5522f781d3a3a078426')
 
     depends_on("openssl")
 
diff --git a/var/spack/packages/wx/package.py b/var/spack/repos/builtin/packages/wx/package.py
similarity index 100%
rename from var/spack/packages/wx/package.py
rename to var/spack/repos/builtin/packages/wx/package.py
diff --git a/var/spack/packages/wxpropgrid/package.py b/var/spack/repos/builtin/packages/wxpropgrid/package.py
similarity index 100%
rename from var/spack/packages/wxpropgrid/package.py
rename to var/spack/repos/builtin/packages/wxpropgrid/package.py
diff --git a/var/spack/packages/xcb-proto/package.py b/var/spack/repos/builtin/packages/xcb-proto/package.py
similarity index 100%
rename from var/spack/packages/xcb-proto/package.py
rename to var/spack/repos/builtin/packages/xcb-proto/package.py
diff --git a/var/spack/packages/xerces-c/package.py b/var/spack/repos/builtin/packages/xerces-c/package.py
similarity index 100%
rename from var/spack/packages/xerces-c/package.py
rename to var/spack/repos/builtin/packages/xerces-c/package.py
diff --git a/var/spack/packages/xz/package.py b/var/spack/repos/builtin/packages/xz/package.py
similarity index 100%
rename from var/spack/packages/xz/package.py
rename to var/spack/repos/builtin/packages/xz/package.py
diff --git a/var/spack/packages/yasm/package.py b/var/spack/repos/builtin/packages/yasm/package.py
similarity index 100%
rename from var/spack/packages/yasm/package.py
rename to var/spack/repos/builtin/packages/yasm/package.py
diff --git a/var/spack/packages/zeromq/package.py b/var/spack/repos/builtin/packages/zeromq/package.py
similarity index 100%
rename from var/spack/packages/zeromq/package.py
rename to var/spack/repos/builtin/packages/zeromq/package.py
diff --git a/var/spack/packages/zlib/package.py b/var/spack/repos/builtin/packages/zlib/package.py
similarity index 100%
rename from var/spack/packages/zlib/package.py
rename to var/spack/repos/builtin/packages/zlib/package.py
diff --git a/var/spack/packages/zsh/package.py b/var/spack/repos/builtin/packages/zsh/package.py
similarity index 100%
rename from var/spack/packages/zsh/package.py
rename to var/spack/repos/builtin/packages/zsh/package.py
diff --git a/var/spack/repos/builtin/repo.yaml b/var/spack/repos/builtin/repo.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..54b282db6bedfaa0bd2958a96124625f3dfe1778
--- /dev/null
+++ b/var/spack/repos/builtin/repo.yaml
@@ -0,0 +1,2 @@
+repo:
+  namespace: builtin